aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/char
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/char')
-rw-r--r--drivers/char/Kconfig25
-rw-r--r--drivers/char/agp/Kconfig2
-rw-r--r--drivers/char/agp/agp.h7
-rw-r--r--drivers/char/agp/ali-agp.c27
-rw-r--r--drivers/char/agp/amd-k7-agp.c9
-rw-r--r--drivers/char/agp/backend.c12
-rw-r--r--drivers/char/agp/generic.c19
-rw-r--r--drivers/char/agp/hp-agp.c9
-rw-r--r--drivers/char/agp/i460-agp.c9
-rw-r--r--drivers/char/agp/intel-agp.c6
-rw-r--r--drivers/char/agp/parisc-agp.c7
-rw-r--r--drivers/char/consolemap.c25
-rw-r--r--drivers/char/defkeymap.c_shipped2
-rw-r--r--drivers/char/drm/Kconfig2
-rw-r--r--drivers/char/drm/drm.h20
-rw-r--r--drivers/char/drm/drmP.h237
-rw-r--r--drivers/char/drm/drm_agpsupport.c130
-rw-r--r--drivers/char/drm/drm_auth.c48
-rw-r--r--drivers/char/drm/drm_bufs.c203
-rw-r--r--drivers/char/drm/drm_context.c177
-rw-r--r--drivers/char/drm/drm_dma.c11
-rw-r--r--drivers/char/drm/drm_drawable.c67
-rw-r--r--drivers/char/drm/drm_drv.c186
-rw-r--r--drivers/char/drm/drm_fops.c34
-rw-r--r--drivers/char/drm/drm_ioc32.c2
-rw-r--r--drivers/char/drm/drm_ioctl.c196
-rw-r--r--drivers/char/drm/drm_irq.c98
-rw-r--r--drivers/char/drm/drm_lock.c75
-rw-r--r--drivers/char/drm/drm_os_linux.h10
-rw-r--r--drivers/char/drm/drm_pciids.h2
-rw-r--r--drivers/char/drm/drm_scatter.c48
-rw-r--r--drivers/char/drm/drm_vm.c4
-rw-r--r--drivers/char/drm/i810_dma.c312
-rw-r--r--drivers/char/drm/i810_drm.h5
-rw-r--r--drivers/char/drm/i810_drv.h9
-rw-r--r--drivers/char/drm/i830_dma.c210
-rw-r--r--drivers/char/drm/i830_drv.h15
-rw-r--r--drivers/char/drm/i830_irq.c30
-rw-r--r--drivers/char/drm/i915_dma.c214
-rw-r--r--drivers/char/drm/i915_drv.h36
-rw-r--r--drivers/char/drm/i915_irq.c128
-rw-r--r--drivers/char/drm/i915_mem.c125
-rw-r--r--drivers/char/drm/mga_dma.c140
-rw-r--r--drivers/char/drm/mga_drv.h21
-rw-r--r--drivers/char/drm/mga_state.c197
-rw-r--r--drivers/char/drm/mga_warp.c8
-rw-r--r--drivers/char/drm/r128_cce.c138
-rw-r--r--drivers/char/drm/r128_drm.h18
-rw-r--r--drivers/char/drm/r128_drv.h23
-rw-r--r--drivers/char/drm/r128_state.c351
-rw-r--r--drivers/char/drm/r300_cmdbuf.c68
-rw-r--r--drivers/char/drm/radeon_cp.c146
-rw-r--r--drivers/char/drm/radeon_drv.h43
-rw-r--r--drivers/char/drm/radeon_irq.c38
-rw-r--r--drivers/char/drm/radeon_mem.c108
-rw-r--r--drivers/char/drm/radeon_state.c683
-rw-r--r--drivers/char/drm/savage_bci.c145
-rw-r--r--drivers/char/drm/savage_drv.h9
-rw-r--r--drivers/char/drm/savage_state.c200
-rw-r--r--drivers/char/drm/sis_drv.c2
-rw-r--r--drivers/char/drm/sis_drv.h5
-rw-r--r--drivers/char/drm/sis_mm.c112
-rw-r--r--drivers/char/drm/via_dma.c144
-rw-r--r--drivers/char/drm/via_dmablit.c54
-rw-r--r--drivers/char/drm/via_drv.h22
-rw-r--r--drivers/char/drm/via_irq.c47
-rw-r--r--drivers/char/drm/via_map.c14
-rw-r--r--drivers/char/drm/via_mm.c83
-rw-r--r--drivers/char/drm/via_verifier.c8
-rw-r--r--drivers/char/drm/via_video.c20
-rw-r--r--drivers/char/dsp56k.c2
-rw-r--r--drivers/char/ec3104_keyb.c457
-rw-r--r--drivers/char/epca.c2611
-rw-r--r--drivers/char/hvc_console.c56
-rw-r--r--drivers/char/ip2/ip2main.c11
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c2
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c18
-rw-r--r--drivers/char/keyboard.c37
-rw-r--r--drivers/char/lp.c17
-rw-r--r--drivers/char/mbcs.c1
-rw-r--r--drivers/char/mem.c130
-rw-r--r--drivers/char/mspec.c2
-rw-r--r--drivers/char/mxser.h3
-rw-r--r--drivers/char/n_hdlc.c2
-rw-r--r--drivers/char/ppdev.c1
-rw-r--r--drivers/char/rio/host.h2
-rw-r--r--drivers/char/riscom8.h2
-rw-r--r--drivers/char/sx.h4
-rw-r--r--drivers/char/synclink.c4
-rw-r--r--drivers/char/synclink_gt.c8
-rw-r--r--drivers/char/sysrq.c5
-rw-r--r--drivers/char/tpm/tpm_tis.c22
-rw-r--r--drivers/char/tty_ioctl.c82
-rw-r--r--drivers/char/vt.c19
-rw-r--r--drivers/char/vt_ioctl.c63
-rw-r--r--drivers/char/watchdog/mpc5200_wdt.c2
96 files changed, 3941 insertions, 5262 deletions
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index b391776e5bf3..204d53e506de 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -36,6 +36,23 @@ config VT
36 If unsure, say Y, or else you won't be able to do much with your new 36 If unsure, say Y, or else you won't be able to do much with your new
37 shiny Linux system :-) 37 shiny Linux system :-)
38 38
39config VT_UNICODE
40 bool "Virtual console is Unicode by default"
41 depends on VT
42 default n
43 ---help---
44 If you say Y here, the virtual terminal will be in UTF-8 by default,
45 and the keyboard will run in unicode mode.
46
47 If you say N here, the virtual terminal will not be in UTF-8 by
48 default, and the keyboard will run in XLATE mode.
49
50 This can also be changed by passing 'default_utf8=<0|1>' on the
51 kernel command line.
52
53 Historically, the kernel has defaulted to non-UTF8 and XLATE mode.
54 If unsure, say N here.
55
39config VT_CONSOLE 56config VT_CONSOLE
40 bool "Support for console on virtual terminal" if EMBEDDED 57 bool "Support for console on virtual terminal" if EMBEDDED
41 depends on VT 58 depends on VT
@@ -568,8 +585,8 @@ config TIPAR
568config HVC_DRIVER 585config HVC_DRIVER
569 bool 586 bool
570 help 587 help
571 Users of pSeries machines that want to utilize the hvc console front-end 588 Generic "hypervisor virtual console" infrastructure for various
572 module for their backend console driver should select this option. 589 hypervisors (pSeries, iSeries, Xen, lguest).
573 It will automatically be selected if one of the back-end console drivers 590 It will automatically be selected if one of the back-end console drivers
574 is selected. 591 is selected.
575 592
@@ -896,10 +913,6 @@ config GPIO_TB0219
896 depends on TANBAC_TB022X 913 depends on TANBAC_TB022X
897 select GPIO_VR41XX 914 select GPIO_VR41XX
898 915
899source "drivers/char/agp/Kconfig"
900
901source "drivers/char/drm/Kconfig"
902
903source "drivers/char/pcmcia/Kconfig" 916source "drivers/char/pcmcia/Kconfig"
904 917
905config MWAVE 918config MWAVE
diff --git a/drivers/char/agp/Kconfig b/drivers/char/agp/Kconfig
index 713533d8a86e..f22c253bc09f 100644
--- a/drivers/char/agp/Kconfig
+++ b/drivers/char/agp/Kconfig
@@ -1,4 +1,4 @@
1config AGP 1menuconfig AGP
2 tristate "/dev/agpgart (AGP Support)" 2 tristate "/dev/agpgart (AGP Support)"
3 depends on ALPHA || IA64 || PARISC || PPC || X86 3 depends on ALPHA || IA64 || PARISC || PPC || X86
4 depends on PCI 4 depends on PCI
diff --git a/drivers/char/agp/agp.h b/drivers/char/agp/agp.h
index 8955e7ff759a..b83824c41329 100644
--- a/drivers/char/agp/agp.h
+++ b/drivers/char/agp/agp.h
@@ -58,6 +58,9 @@ struct gatt_mask {
58 * devices this will probably be ignored */ 58 * devices this will probably be ignored */
59}; 59};
60 60
61#define AGP_PAGE_DESTROY_UNMAP 1
62#define AGP_PAGE_DESTROY_FREE 2
63
61struct aper_size_info_8 { 64struct aper_size_info_8 {
62 int size; 65 int size;
63 int num_entries; 66 int num_entries;
@@ -113,7 +116,7 @@ struct agp_bridge_driver {
113 struct agp_memory *(*alloc_by_type) (size_t, int); 116 struct agp_memory *(*alloc_by_type) (size_t, int);
114 void (*free_by_type)(struct agp_memory *); 117 void (*free_by_type)(struct agp_memory *);
115 void *(*agp_alloc_page)(struct agp_bridge_data *); 118 void *(*agp_alloc_page)(struct agp_bridge_data *);
116 void (*agp_destroy_page)(void *); 119 void (*agp_destroy_page)(void *, int flags);
117 int (*agp_type_to_mask_type) (struct agp_bridge_data *, int); 120 int (*agp_type_to_mask_type) (struct agp_bridge_data *, int);
118}; 121};
119 122
@@ -267,7 +270,7 @@ int agp_generic_remove_memory(struct agp_memory *mem, off_t pg_start, int type);
267struct agp_memory *agp_generic_alloc_by_type(size_t page_count, int type); 270struct agp_memory *agp_generic_alloc_by_type(size_t page_count, int type);
268void agp_generic_free_by_type(struct agp_memory *curr); 271void agp_generic_free_by_type(struct agp_memory *curr);
269void *agp_generic_alloc_page(struct agp_bridge_data *bridge); 272void *agp_generic_alloc_page(struct agp_bridge_data *bridge);
270void agp_generic_destroy_page(void *addr); 273void agp_generic_destroy_page(void *addr, int flags);
271void agp_free_key(int key); 274void agp_free_key(int key);
272int agp_num_entries(void); 275int agp_num_entries(void);
273u32 agp_collect_device_status(struct agp_bridge_data *bridge, u32 mode, u32 command); 276u32 agp_collect_device_status(struct agp_bridge_data *bridge, u32 mode, u32 command);
diff --git a/drivers/char/agp/ali-agp.c b/drivers/char/agp/ali-agp.c
index 4941ddb78939..aa5ddb716ffb 100644
--- a/drivers/char/agp/ali-agp.c
+++ b/drivers/char/agp/ali-agp.c
@@ -156,29 +156,34 @@ static void *m1541_alloc_page(struct agp_bridge_data *bridge)
156 return addr; 156 return addr;
157} 157}
158 158
159static void ali_destroy_page(void * addr) 159static void ali_destroy_page(void * addr, int flags)
160{ 160{
161 if (addr) { 161 if (addr) {
162 global_cache_flush(); /* is this really needed? --hch */ 162 if (flags & AGP_PAGE_DESTROY_UNMAP) {
163 agp_generic_destroy_page(addr); 163 global_cache_flush(); /* is this really needed? --hch */
164 global_flush_tlb(); 164 agp_generic_destroy_page(addr, flags);
165 global_flush_tlb();
166 } else
167 agp_generic_destroy_page(addr, flags);
165 } 168 }
166} 169}
167 170
168static void m1541_destroy_page(void * addr) 171static void m1541_destroy_page(void * addr, int flags)
169{ 172{
170 u32 temp; 173 u32 temp;
171 174
172 if (addr == NULL) 175 if (addr == NULL)
173 return; 176 return;
174 177
175 global_cache_flush(); 178 if (flags & AGP_PAGE_DESTROY_UNMAP) {
179 global_cache_flush();
176 180
177 pci_read_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL, &temp); 181 pci_read_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL, &temp);
178 pci_write_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL, 182 pci_write_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL,
179 (((temp & ALI_CACHE_FLUSH_ADDR_MASK) | 183 (((temp & ALI_CACHE_FLUSH_ADDR_MASK) |
180 virt_to_gart(addr)) | ALI_CACHE_FLUSH_EN)); 184 virt_to_gart(addr)) | ALI_CACHE_FLUSH_EN));
181 agp_generic_destroy_page(addr); 185 }
186 agp_generic_destroy_page(addr, flags);
182} 187}
183 188
184 189
diff --git a/drivers/char/agp/amd-k7-agp.c b/drivers/char/agp/amd-k7-agp.c
index f60bca70d1fb..1405a42585e1 100644
--- a/drivers/char/agp/amd-k7-agp.c
+++ b/drivers/char/agp/amd-k7-agp.c
@@ -100,21 +100,16 @@ static int amd_create_gatt_pages(int nr_tables)
100 100
101 for (i = 0; i < nr_tables; i++) { 101 for (i = 0; i < nr_tables; i++) {
102 entry = kzalloc(sizeof(struct amd_page_map), GFP_KERNEL); 102 entry = kzalloc(sizeof(struct amd_page_map), GFP_KERNEL);
103 tables[i] = entry;
103 if (entry == NULL) { 104 if (entry == NULL) {
104 while (i > 0) {
105 kfree(tables[i-1]);
106 i--;
107 }
108 kfree(tables);
109 retval = -ENOMEM; 105 retval = -ENOMEM;
110 break; 106 break;
111 } 107 }
112 tables[i] = entry;
113 retval = amd_create_page_map(entry); 108 retval = amd_create_page_map(entry);
114 if (retval != 0) 109 if (retval != 0)
115 break; 110 break;
116 } 111 }
117 amd_irongate_private.num_tables = nr_tables; 112 amd_irongate_private.num_tables = i;
118 amd_irongate_private.gatt_pages = tables; 113 amd_irongate_private.gatt_pages = tables;
119 114
120 if (retval != 0) 115 if (retval != 0)
diff --git a/drivers/char/agp/backend.c b/drivers/char/agp/backend.c
index 1b47c89a1b99..832ded20fe70 100644
--- a/drivers/char/agp/backend.c
+++ b/drivers/char/agp/backend.c
@@ -189,9 +189,11 @@ static int agp_backend_initialize(struct agp_bridge_data *bridge)
189 189
190err_out: 190err_out:
191 if (bridge->driver->needs_scratch_page) { 191 if (bridge->driver->needs_scratch_page) {
192 bridge->driver->agp_destroy_page( 192 bridge->driver->agp_destroy_page(gart_to_virt(bridge->scratch_page_real),
193 gart_to_virt(bridge->scratch_page_real)); 193 AGP_PAGE_DESTROY_UNMAP);
194 flush_agp_mappings(); 194 flush_agp_mappings();
195 bridge->driver->agp_destroy_page(gart_to_virt(bridge->scratch_page_real),
196 AGP_PAGE_DESTROY_FREE);
195 } 197 }
196 if (got_gatt) 198 if (got_gatt)
197 bridge->driver->free_gatt_table(bridge); 199 bridge->driver->free_gatt_table(bridge);
@@ -215,9 +217,11 @@ static void agp_backend_cleanup(struct agp_bridge_data *bridge)
215 217
216 if (bridge->driver->agp_destroy_page && 218 if (bridge->driver->agp_destroy_page &&
217 bridge->driver->needs_scratch_page) { 219 bridge->driver->needs_scratch_page) {
218 bridge->driver->agp_destroy_page( 220 bridge->driver->agp_destroy_page(gart_to_virt(bridge->scratch_page_real),
219 gart_to_virt(bridge->scratch_page_real)); 221 AGP_PAGE_DESTROY_UNMAP);
220 flush_agp_mappings(); 222 flush_agp_mappings();
223 bridge->driver->agp_destroy_page(gart_to_virt(bridge->scratch_page_real),
224 AGP_PAGE_DESTROY_FREE);
221 } 225 }
222} 226}
223 227
diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c
index 3db4f4076ed4..64b2f6d7059d 100644
--- a/drivers/char/agp/generic.c
+++ b/drivers/char/agp/generic.c
@@ -195,9 +195,12 @@ void agp_free_memory(struct agp_memory *curr)
195 } 195 }
196 if (curr->page_count != 0) { 196 if (curr->page_count != 0) {
197 for (i = 0; i < curr->page_count; i++) { 197 for (i = 0; i < curr->page_count; i++) {
198 curr->bridge->driver->agp_destroy_page(gart_to_virt(curr->memory[i])); 198 curr->bridge->driver->agp_destroy_page(gart_to_virt(curr->memory[i]), AGP_PAGE_DESTROY_UNMAP);
199 } 199 }
200 flush_agp_mappings(); 200 flush_agp_mappings();
201 for (i = 0; i < curr->page_count; i++) {
202 curr->bridge->driver->agp_destroy_page(gart_to_virt(curr->memory[i]), AGP_PAGE_DESTROY_FREE);
203 }
201 } 204 }
202 agp_free_key(curr->key); 205 agp_free_key(curr->key);
203 agp_free_page_array(curr); 206 agp_free_page_array(curr);
@@ -1176,7 +1179,7 @@ void *agp_generic_alloc_page(struct agp_bridge_data *bridge)
1176EXPORT_SYMBOL(agp_generic_alloc_page); 1179EXPORT_SYMBOL(agp_generic_alloc_page);
1177 1180
1178 1181
1179void agp_generic_destroy_page(void *addr) 1182void agp_generic_destroy_page(void *addr, int flags)
1180{ 1183{
1181 struct page *page; 1184 struct page *page;
1182 1185
@@ -1184,10 +1187,14 @@ void agp_generic_destroy_page(void *addr)
1184 return; 1187 return;
1185 1188
1186 page = virt_to_page(addr); 1189 page = virt_to_page(addr);
1187 unmap_page_from_agp(page); 1190 if (flags & AGP_PAGE_DESTROY_UNMAP)
1188 put_page(page); 1191 unmap_page_from_agp(page);
1189 free_page((unsigned long)addr); 1192
1190 atomic_dec(&agp_bridge->current_memory_agp); 1193 if (flags & AGP_PAGE_DESTROY_FREE) {
1194 put_page(page);
1195 free_page((unsigned long)addr);
1196 atomic_dec(&agp_bridge->current_memory_agp);
1197 }
1191} 1198}
1192EXPORT_SYMBOL(agp_generic_destroy_page); 1199EXPORT_SYMBOL(agp_generic_destroy_page);
1193 1200
diff --git a/drivers/char/agp/hp-agp.c b/drivers/char/agp/hp-agp.c
index 313a133a1172..cbb0444467ba 100644
--- a/drivers/char/agp/hp-agp.c
+++ b/drivers/char/agp/hp-agp.c
@@ -14,15 +14,12 @@
14#include <linux/pci.h> 14#include <linux/pci.h>
15#include <linux/init.h> 15#include <linux/init.h>
16#include <linux/agp_backend.h> 16#include <linux/agp_backend.h>
17#include <linux/log2.h>
17 18
18#include <asm/acpi-ext.h> 19#include <asm/acpi-ext.h>
19 20
20#include "agp.h" 21#include "agp.h"
21 22
22#ifndef log2
23#define log2(x) ffz(~(x))
24#endif
25
26#define HP_ZX1_IOC_OFFSET 0x1000 /* ACPI reports SBA, we want IOC */ 23#define HP_ZX1_IOC_OFFSET 0x1000 /* ACPI reports SBA, we want IOC */
27 24
28/* HP ZX1 IOC registers */ 25/* HP ZX1 IOC registers */
@@ -257,7 +254,7 @@ hp_zx1_configure (void)
257 readl(hp->ioc_regs+HP_ZX1_IMASK); 254 readl(hp->ioc_regs+HP_ZX1_IMASK);
258 writel(hp->iova_base|1, hp->ioc_regs+HP_ZX1_IBASE); 255 writel(hp->iova_base|1, hp->ioc_regs+HP_ZX1_IBASE);
259 readl(hp->ioc_regs+HP_ZX1_IBASE); 256 readl(hp->ioc_regs+HP_ZX1_IBASE);
260 writel(hp->iova_base|log2(HP_ZX1_IOVA_SIZE), hp->ioc_regs+HP_ZX1_PCOM); 257 writel(hp->iova_base|ilog2(HP_ZX1_IOVA_SIZE), hp->ioc_regs+HP_ZX1_PCOM);
261 readl(hp->ioc_regs+HP_ZX1_PCOM); 258 readl(hp->ioc_regs+HP_ZX1_PCOM);
262 } 259 }
263 260
@@ -285,7 +282,7 @@ hp_zx1_tlbflush (struct agp_memory *mem)
285{ 282{
286 struct _hp_private *hp = &hp_private; 283 struct _hp_private *hp = &hp_private;
287 284
288 writeq(hp->gart_base | log2(hp->gart_size), hp->ioc_regs+HP_ZX1_PCOM); 285 writeq(hp->gart_base | ilog2(hp->gart_size), hp->ioc_regs+HP_ZX1_PCOM);
289 readq(hp->ioc_regs+HP_ZX1_PCOM); 286 readq(hp->ioc_regs+HP_ZX1_PCOM);
290} 287}
291 288
diff --git a/drivers/char/agp/i460-agp.c b/drivers/char/agp/i460-agp.c
index 75d2aca6353d..e72a83e2bad5 100644
--- a/drivers/char/agp/i460-agp.c
+++ b/drivers/char/agp/i460-agp.c
@@ -13,6 +13,7 @@
13#include <linux/string.h> 13#include <linux/string.h>
14#include <linux/slab.h> 14#include <linux/slab.h>
15#include <linux/agp_backend.h> 15#include <linux/agp_backend.h>
16#include <linux/log2.h>
16 17
17#include "agp.h" 18#include "agp.h"
18 19
@@ -59,8 +60,6 @@
59 */ 60 */
60#define WR_FLUSH_GATT(index) RD_GATT(index) 61#define WR_FLUSH_GATT(index) RD_GATT(index)
61 62
62#define log2(x) ffz(~(x))
63
64static struct { 63static struct {
65 void *gatt; /* ioremap'd GATT area */ 64 void *gatt; /* ioremap'd GATT area */
66 65
@@ -148,7 +147,7 @@ static int i460_fetch_size (void)
148 * values[i].size. 147 * values[i].size.
149 */ 148 */
150 values[i].num_entries = (values[i].size << 8) >> (I460_IO_PAGE_SHIFT - 12); 149 values[i].num_entries = (values[i].size << 8) >> (I460_IO_PAGE_SHIFT - 12);
151 values[i].page_order = log2((sizeof(u32)*values[i].num_entries) >> PAGE_SHIFT); 150 values[i].page_order = ilog2((sizeof(u32)*values[i].num_entries) >> PAGE_SHIFT);
152 } 151 }
153 152
154 for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) { 153 for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
@@ -536,10 +535,10 @@ static void *i460_alloc_page (struct agp_bridge_data *bridge)
536 return page; 535 return page;
537} 536}
538 537
539static void i460_destroy_page (void *page) 538static void i460_destroy_page (void *page, int flags)
540{ 539{
541 if (I460_IO_PAGE_SHIFT <= PAGE_SHIFT) { 540 if (I460_IO_PAGE_SHIFT <= PAGE_SHIFT) {
542 agp_generic_destroy_page(page); 541 agp_generic_destroy_page(page, flags);
543 global_flush_tlb(); 542 global_flush_tlb();
544 } 543 }
545} 544}
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index 141ca176c397..d87961993ccf 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -400,9 +400,11 @@ static void intel_i810_free_by_type(struct agp_memory *curr)
400 if (curr->page_count == 4) 400 if (curr->page_count == 4)
401 i8xx_destroy_pages(gart_to_virt(curr->memory[0])); 401 i8xx_destroy_pages(gart_to_virt(curr->memory[0]));
402 else { 402 else {
403 agp_bridge->driver->agp_destroy_page( 403 agp_bridge->driver->agp_destroy_page(gart_to_virt(curr->memory[0]),
404 gart_to_virt(curr->memory[0])); 404 AGP_PAGE_DESTROY_UNMAP);
405 global_flush_tlb(); 405 global_flush_tlb();
406 agp_bridge->driver->agp_destroy_page(gart_to_virt(curr->memory[0]),
407 AGP_PAGE_DESTROY_FREE);
406 } 408 }
407 agp_free_page_array(curr); 409 agp_free_page_array(curr);
408 } 410 }
diff --git a/drivers/char/agp/parisc-agp.c b/drivers/char/agp/parisc-agp.c
index f4562cc22343..2939e3570f9d 100644
--- a/drivers/char/agp/parisc-agp.c
+++ b/drivers/char/agp/parisc-agp.c
@@ -18,6 +18,7 @@
18#include <linux/init.h> 18#include <linux/init.h>
19#include <linux/klist.h> 19#include <linux/klist.h>
20#include <linux/agp_backend.h> 20#include <linux/agp_backend.h>
21#include <linux/log2.h>
21 22
22#include <asm-parisc/parisc-device.h> 23#include <asm-parisc/parisc-device.h>
23#include <asm-parisc/ropes.h> 24#include <asm-parisc/ropes.h>
@@ -27,10 +28,6 @@
27#define DRVNAME "quicksilver" 28#define DRVNAME "quicksilver"
28#define DRVPFX DRVNAME ": " 29#define DRVPFX DRVNAME ": "
29 30
30#ifndef log2
31#define log2(x) ffz(~(x))
32#endif
33
34#define AGP8X_MODE_BIT 3 31#define AGP8X_MODE_BIT 3
35#define AGP8X_MODE (1 << AGP8X_MODE_BIT) 32#define AGP8X_MODE (1 << AGP8X_MODE_BIT)
36 33
@@ -92,7 +89,7 @@ parisc_agp_tlbflush(struct agp_memory *mem)
92{ 89{
93 struct _parisc_agp_info *info = &parisc_agp_info; 90 struct _parisc_agp_info *info = &parisc_agp_info;
94 91
95 writeq(info->gart_base | log2(info->gart_size), info->ioc_regs+IOC_PCOM); 92 writeq(info->gart_base | ilog2(info->gart_size), info->ioc_regs+IOC_PCOM);
96 readq(info->ioc_regs+IOC_PCOM); /* flush */ 93 readq(info->ioc_regs+IOC_PCOM); /* flush */
97} 94}
98 95
diff --git a/drivers/char/consolemap.c b/drivers/char/consolemap.c
index 4b3916f54909..6b104e45a322 100644
--- a/drivers/char/consolemap.c
+++ b/drivers/char/consolemap.c
@@ -494,12 +494,11 @@ int con_clear_unimap(struct vc_data *vc, struct unimapinit *ui)
494 p = (struct uni_pagedir *)*vc->vc_uni_pagedir_loc; 494 p = (struct uni_pagedir *)*vc->vc_uni_pagedir_loc;
495 if (p && p->readonly) return -EIO; 495 if (p && p->readonly) return -EIO;
496 if (!p || --p->refcount) { 496 if (!p || --p->refcount) {
497 q = kmalloc(sizeof(*p), GFP_KERNEL); 497 q = kzalloc(sizeof(*p), GFP_KERNEL);
498 if (!q) { 498 if (!q) {
499 if (p) p->refcount++; 499 if (p) p->refcount++;
500 return -ENOMEM; 500 return -ENOMEM;
501 } 501 }
502 memset(q, 0, sizeof(*q));
503 q->refcount=1; 502 q->refcount=1;
504 *vc->vc_uni_pagedir_loc = (unsigned long)q; 503 *vc->vc_uni_pagedir_loc = (unsigned long)q;
505 } else { 504 } else {
@@ -670,19 +669,29 @@ void con_protect_unimap(struct vc_data *vc, int rdonly)
670 p->readonly = rdonly; 669 p->readonly = rdonly;
671} 670}
672 671
672/*
673 * Always use USER_MAP. These functions are used by the keyboard,
674 * which shouldn't be affected by G0/G1 switching, etc.
675 * If the user map still contains default values, i.e. the
676 * direct-to-font mapping, then assume user is using Latin1.
677 */
673/* may be called during an interrupt */ 678/* may be called during an interrupt */
674u32 conv_8bit_to_uni(unsigned char c) 679u32 conv_8bit_to_uni(unsigned char c)
675{ 680{
676 /*
677 * Always use USER_MAP. This function is used by the keyboard,
678 * which shouldn't be affected by G0/G1 switching, etc.
679 * If the user map still contains default values, i.e. the
680 * direct-to-font mapping, then assume user is using Latin1.
681 */
682 unsigned short uni = translations[USER_MAP][c]; 681 unsigned short uni = translations[USER_MAP][c];
683 return uni == (0xf000 | c) ? c : uni; 682 return uni == (0xf000 | c) ? c : uni;
684} 683}
685 684
685int conv_uni_to_8bit(u32 uni)
686{
687 int c;
688 for (c = 0; c < 0x100; c++)
689 if (translations[USER_MAP][c] == uni ||
690 (translations[USER_MAP][c] == (c | 0xf000) && uni == c))
691 return c;
692 return -1;
693}
694
686int 695int
687conv_uni_to_pc(struct vc_data *conp, long ucs) 696conv_uni_to_pc(struct vc_data *conp, long ucs)
688{ 697{
diff --git a/drivers/char/defkeymap.c_shipped b/drivers/char/defkeymap.c_shipped
index 453a2f1ffa15..0aa419a61767 100644
--- a/drivers/char/defkeymap.c_shipped
+++ b/drivers/char/defkeymap.c_shipped
@@ -222,7 +222,7 @@ char *func_table[MAX_NR_FUNC] = {
222 NULL, 222 NULL,
223}; 223};
224 224
225struct kbdiacr accent_table[MAX_DIACR] = { 225struct kbdiacruc accent_table[MAX_DIACR] = {
226 {'`', 'A', '\300'}, {'`', 'a', '\340'}, 226 {'`', 'A', '\300'}, {'`', 'a', '\340'},
227 {'\'', 'A', '\301'}, {'\'', 'a', '\341'}, 227 {'\'', 'A', '\301'}, {'\'', 'a', '\341'},
228 {'^', 'A', '\302'}, {'^', 'a', '\342'}, 228 {'^', 'A', '\302'}, {'^', 'a', '\342'},
diff --git a/drivers/char/drm/Kconfig b/drivers/char/drm/Kconfig
index 0b7ffa5191c6..ba3058dd39a7 100644
--- a/drivers/char/drm/Kconfig
+++ b/drivers/char/drm/Kconfig
@@ -4,7 +4,7 @@
4# This driver provides support for the 4# This driver provides support for the
5# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. 5# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
6# 6#
7config DRM 7menuconfig DRM
8 tristate "Direct Rendering Manager (XFree86 4.1.0 and higher DRI support)" 8 tristate "Direct Rendering Manager (XFree86 4.1.0 and higher DRI support)"
9 depends on (AGP || AGP=n) && PCI && !EMULATED_CMPXCHG 9 depends on (AGP || AGP=n) && PCI && !EMULATED_CMPXCHG
10 help 10 help
diff --git a/drivers/char/drm/drm.h b/drivers/char/drm/drm.h
index 2d6f2d0bd02b..82fb3d0d2785 100644
--- a/drivers/char/drm/drm.h
+++ b/drivers/char/drm/drm.h
@@ -63,27 +63,9 @@
63#define DRM_IOC(dir, group, nr, size) _IOC(dir, group, nr, size) 63#define DRM_IOC(dir, group, nr, size) _IOC(dir, group, nr, size)
64#endif 64#endif
65 65
66#define XFREE86_VERSION(major,minor,patch,snap) \
67 ((major << 16) | (minor << 8) | patch)
68
69#ifndef CONFIG_XFREE86_VERSION
70#define CONFIG_XFREE86_VERSION XFREE86_VERSION(4,1,0,0)
71#endif
72
73#if CONFIG_XFREE86_VERSION < XFREE86_VERSION(4,1,0,0)
74#define DRM_PROC_DEVICES "/proc/devices"
75#define DRM_PROC_MISC "/proc/misc"
76#define DRM_PROC_DRM "/proc/drm"
77#define DRM_DEV_DRM "/dev/drm"
78#define DRM_DEV_MODE (S_IRUSR|S_IWUSR|S_IRGRP|S_IWGRP)
79#define DRM_DEV_UID 0
80#define DRM_DEV_GID 0
81#endif
82
83#if CONFIG_XFREE86_VERSION >= XFREE86_VERSION(4,1,0,0)
84#define DRM_MAJOR 226 66#define DRM_MAJOR 226
85#define DRM_MAX_MINOR 15 67#define DRM_MAX_MINOR 15
86#endif 68
87#define DRM_NAME "drm" /**< Name in kernel, /dev, and /proc */ 69#define DRM_NAME "drm" /**< Name in kernel, /dev, and /proc */
88#define DRM_MIN_ORDER 5 /**< At least 2^5 bytes = 32 bytes */ 70#define DRM_MIN_ORDER 5 /**< At least 2^5 bytes = 32 bytes */
89#define DRM_MAX_ORDER 22 /**< Up to 2^22 bytes = 4MB */ 71#define DRM_MAX_ORDER 22 /**< Up to 2^22 bytes = 4MB */
diff --git a/drivers/char/drm/drmP.h b/drivers/char/drm/drmP.h
index 0df87fc3dcb2..9dd0760dd87a 100644
--- a/drivers/char/drm/drmP.h
+++ b/drivers/char/drm/drmP.h
@@ -80,6 +80,9 @@
80#define __OS_HAS_AGP (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE))) 80#define __OS_HAS_AGP (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE)))
81#define __OS_HAS_MTRR (defined(CONFIG_MTRR)) 81#define __OS_HAS_MTRR (defined(CONFIG_MTRR))
82 82
83struct drm_file;
84struct drm_device;
85
83#include "drm_os_linux.h" 86#include "drm_os_linux.h"
84#include "drm_hashtab.h" 87#include "drm_hashtab.h"
85 88
@@ -231,12 +234,13 @@
231 * \param dev DRM device. 234 * \param dev DRM device.
232 * \param filp file pointer of the caller. 235 * \param filp file pointer of the caller.
233 */ 236 */
234#define LOCK_TEST_WITH_RETURN( dev, filp ) \ 237#define LOCK_TEST_WITH_RETURN( dev, file_priv ) \
235do { \ 238do { \
236 if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) || \ 239 if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) || \
237 dev->lock.filp != filp ) { \ 240 dev->lock.file_priv != file_priv ) { \
238 DRM_ERROR( "%s called without lock held\n", \ 241 DRM_ERROR( "%s called without lock held, held %d owner %p %p\n",\
239 __FUNCTION__ ); \ 242 __FUNCTION__, _DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ),\
243 dev->lock.file_priv, file_priv ); \
240 return -EINVAL; \ 244 return -EINVAL; \
241 } \ 245 } \
242} while (0) 246} while (0)
@@ -257,12 +261,12 @@ do { \
257 * Ioctl function type. 261 * Ioctl function type.
258 * 262 *
259 * \param inode device inode. 263 * \param inode device inode.
260 * \param filp file pointer. 264 * \param file_priv DRM file private pointer.
261 * \param cmd command. 265 * \param cmd command.
262 * \param arg argument. 266 * \param arg argument.
263 */ 267 */
264typedef int drm_ioctl_t(struct inode *inode, struct file *filp, 268typedef int drm_ioctl_t(struct drm_device *dev, void *data,
265 unsigned int cmd, unsigned long arg); 269 struct drm_file *file_priv);
266 270
267typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd, 271typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
268 unsigned long arg); 272 unsigned long arg);
@@ -271,10 +275,18 @@ typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
271#define DRM_MASTER 0x2 275#define DRM_MASTER 0x2
272#define DRM_ROOT_ONLY 0x4 276#define DRM_ROOT_ONLY 0x4
273 277
274typedef struct drm_ioctl_desc { 278struct drm_ioctl_desc {
279 unsigned int cmd;
275 drm_ioctl_t *func; 280 drm_ioctl_t *func;
276 int flags; 281 int flags;
277} drm_ioctl_desc_t; 282};
283
284/**
285 * Creates a driver or general drm_ioctl_desc array entry for the given
286 * ioctl, for use by drm_ioctl().
287 */
288#define DRM_IOCTL_DEF(ioctl, func, flags) \
289 [DRM_IOCTL_NR(ioctl)] = {ioctl, func, flags}
278 290
279struct drm_magic_entry { 291struct drm_magic_entry {
280 struct list_head head; 292 struct list_head head;
@@ -304,7 +316,7 @@ struct drm_buf {
304 __volatile__ int waiting; /**< On kernel DMA queue */ 316 __volatile__ int waiting; /**< On kernel DMA queue */
305 __volatile__ int pending; /**< On hardware DMA queue */ 317 __volatile__ int pending; /**< On hardware DMA queue */
306 wait_queue_head_t dma_wait; /**< Processes waiting */ 318 wait_queue_head_t dma_wait; /**< Processes waiting */
307 struct file *filp; /**< Pointer to holding file descr */ 319 struct drm_file *file_priv; /**< Private of holding file descr */
308 int context; /**< Kernel queue for this buffer */ 320 int context; /**< Kernel queue for this buffer */
309 int while_locked; /**< Dispatch this buffer while locked */ 321 int while_locked; /**< Dispatch this buffer while locked */
310 enum { 322 enum {
@@ -377,6 +389,7 @@ struct drm_file {
377 int remove_auth_on_close; 389 int remove_auth_on_close;
378 unsigned long lock_count; 390 unsigned long lock_count;
379 void *driver_priv; 391 void *driver_priv;
392 struct file *filp;
380}; 393};
381 394
382/** Wait queue */ 395/** Wait queue */
@@ -403,7 +416,7 @@ struct drm_queue {
403 */ 416 */
404struct drm_lock_data { 417struct drm_lock_data {
405 struct drm_hw_lock *hw_lock; /**< Hardware lock */ 418 struct drm_hw_lock *hw_lock; /**< Hardware lock */
406 struct file *filp; /**< File descr of lock holder (0=kernel) */ 419 struct drm_file *file_priv; /**< File descr of lock holder (0=kernel) */
407 wait_queue_head_t lock_queue; /**< Queue of blocked processes */ 420 wait_queue_head_t lock_queue; /**< Queue of blocked processes */
408 unsigned long lock_time; /**< Time of last lock in jiffies */ 421 unsigned long lock_time; /**< Time of last lock in jiffies */
409 spinlock_t spinlock; 422 spinlock_t spinlock;
@@ -552,11 +565,11 @@ struct drm_driver {
552 int (*load) (struct drm_device *, unsigned long flags); 565 int (*load) (struct drm_device *, unsigned long flags);
553 int (*firstopen) (struct drm_device *); 566 int (*firstopen) (struct drm_device *);
554 int (*open) (struct drm_device *, struct drm_file *); 567 int (*open) (struct drm_device *, struct drm_file *);
555 void (*preclose) (struct drm_device *, struct file * filp); 568 void (*preclose) (struct drm_device *, struct drm_file *file_priv);
556 void (*postclose) (struct drm_device *, struct drm_file *); 569 void (*postclose) (struct drm_device *, struct drm_file *);
557 void (*lastclose) (struct drm_device *); 570 void (*lastclose) (struct drm_device *);
558 int (*unload) (struct drm_device *); 571 int (*unload) (struct drm_device *);
559 int (*dma_ioctl) (DRM_IOCTL_ARGS); 572 int (*dma_ioctl) (struct drm_device *dev, void *data, struct drm_file *file_priv);
560 void (*dma_ready) (struct drm_device *); 573 void (*dma_ready) (struct drm_device *);
561 int (*dma_quiescent) (struct drm_device *); 574 int (*dma_quiescent) (struct drm_device *);
562 int (*context_ctor) (struct drm_device *dev, int context); 575 int (*context_ctor) (struct drm_device *dev, int context);
@@ -587,11 +600,12 @@ struct drm_driver {
587 void (*irq_preinstall) (struct drm_device *dev); 600 void (*irq_preinstall) (struct drm_device *dev);
588 void (*irq_postinstall) (struct drm_device *dev); 601 void (*irq_postinstall) (struct drm_device *dev);
589 void (*irq_uninstall) (struct drm_device *dev); 602 void (*irq_uninstall) (struct drm_device *dev);
590 void (*reclaim_buffers) (struct drm_device *dev, struct file * filp); 603 void (*reclaim_buffers) (struct drm_device *dev,
604 struct drm_file * file_priv);
591 void (*reclaim_buffers_locked) (struct drm_device *dev, 605 void (*reclaim_buffers_locked) (struct drm_device *dev,
592 struct file *filp); 606 struct drm_file *file_priv);
593 void (*reclaim_buffers_idlelocked) (struct drm_device *dev, 607 void (*reclaim_buffers_idlelocked) (struct drm_device *dev,
594 struct file * filp); 608 struct drm_file *file_priv);
595 unsigned long (*get_map_ofs) (struct drm_map * map); 609 unsigned long (*get_map_ofs) (struct drm_map * map);
596 unsigned long (*get_reg_ofs) (struct drm_device *dev); 610 unsigned long (*get_reg_ofs) (struct drm_device *dev);
597 void (*set_version) (struct drm_device *dev, 611 void (*set_version) (struct drm_device *dev,
@@ -606,7 +620,7 @@ struct drm_driver {
606 620
607 u32 driver_features; 621 u32 driver_features;
608 int dev_priv_size; 622 int dev_priv_size;
609 drm_ioctl_desc_t *ioctls; 623 struct drm_ioctl_desc *ioctls;
610 int num_ioctls; 624 int num_ioctls;
611 struct file_operations fops; 625 struct file_operations fops;
612 struct pci_driver pci_driver; 626 struct pci_driver pci_driver;
@@ -850,70 +864,70 @@ extern int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start);
850extern int drm_unbind_agp(DRM_AGP_MEM * handle); 864extern int drm_unbind_agp(DRM_AGP_MEM * handle);
851 865
852 /* Misc. IOCTL support (drm_ioctl.h) */ 866 /* Misc. IOCTL support (drm_ioctl.h) */
853extern int drm_irq_by_busid(struct inode *inode, struct file *filp, 867extern int drm_irq_by_busid(struct drm_device *dev, void *data,
854 unsigned int cmd, unsigned long arg); 868 struct drm_file *file_priv);
855extern int drm_getunique(struct inode *inode, struct file *filp, 869extern int drm_getunique(struct drm_device *dev, void *data,
856 unsigned int cmd, unsigned long arg); 870 struct drm_file *file_priv);
857extern int drm_setunique(struct inode *inode, struct file *filp, 871extern int drm_setunique(struct drm_device *dev, void *data,
858 unsigned int cmd, unsigned long arg); 872 struct drm_file *file_priv);
859extern int drm_getmap(struct inode *inode, struct file *filp, 873extern int drm_getmap(struct drm_device *dev, void *data,
860 unsigned int cmd, unsigned long arg); 874 struct drm_file *file_priv);
861extern int drm_getclient(struct inode *inode, struct file *filp, 875extern int drm_getclient(struct drm_device *dev, void *data,
862 unsigned int cmd, unsigned long arg); 876 struct drm_file *file_priv);
863extern int drm_getstats(struct inode *inode, struct file *filp, 877extern int drm_getstats(struct drm_device *dev, void *data,
864 unsigned int cmd, unsigned long arg); 878 struct drm_file *file_priv);
865extern int drm_setversion(struct inode *inode, struct file *filp, 879extern int drm_setversion(struct drm_device *dev, void *data,
866 unsigned int cmd, unsigned long arg); 880 struct drm_file *file_priv);
867extern int drm_noop(struct inode *inode, struct file *filp, 881extern int drm_noop(struct drm_device *dev, void *data,
868 unsigned int cmd, unsigned long arg); 882 struct drm_file *file_priv);
869 883
870 /* Context IOCTL support (drm_context.h) */ 884 /* Context IOCTL support (drm_context.h) */
871extern int drm_resctx(struct inode *inode, struct file *filp, 885extern int drm_resctx(struct drm_device *dev, void *data,
872 unsigned int cmd, unsigned long arg); 886 struct drm_file *file_priv);
873extern int drm_addctx(struct inode *inode, struct file *filp, 887extern int drm_addctx(struct drm_device *dev, void *data,
874 unsigned int cmd, unsigned long arg); 888 struct drm_file *file_priv);
875extern int drm_modctx(struct inode *inode, struct file *filp, 889extern int drm_modctx(struct drm_device *dev, void *data,
876 unsigned int cmd, unsigned long arg); 890 struct drm_file *file_priv);
877extern int drm_getctx(struct inode *inode, struct file *filp, 891extern int drm_getctx(struct drm_device *dev, void *data,
878 unsigned int cmd, unsigned long arg); 892 struct drm_file *file_priv);
879extern int drm_switchctx(struct inode *inode, struct file *filp, 893extern int drm_switchctx(struct drm_device *dev, void *data,
880 unsigned int cmd, unsigned long arg); 894 struct drm_file *file_priv);
881extern int drm_newctx(struct inode *inode, struct file *filp, 895extern int drm_newctx(struct drm_device *dev, void *data,
882 unsigned int cmd, unsigned long arg); 896 struct drm_file *file_priv);
883extern int drm_rmctx(struct inode *inode, struct file *filp, 897extern int drm_rmctx(struct drm_device *dev, void *data,
884 unsigned int cmd, unsigned long arg); 898 struct drm_file *file_priv);
885 899
886extern int drm_ctxbitmap_init(struct drm_device *dev); 900extern int drm_ctxbitmap_init(struct drm_device *dev);
887extern void drm_ctxbitmap_cleanup(struct drm_device *dev); 901extern void drm_ctxbitmap_cleanup(struct drm_device *dev);
888extern void drm_ctxbitmap_free(struct drm_device *dev, int ctx_handle); 902extern void drm_ctxbitmap_free(struct drm_device *dev, int ctx_handle);
889 903
890extern int drm_setsareactx(struct inode *inode, struct file *filp, 904extern int drm_setsareactx(struct drm_device *dev, void *data,
891 unsigned int cmd, unsigned long arg); 905 struct drm_file *file_priv);
892extern int drm_getsareactx(struct inode *inode, struct file *filp, 906extern int drm_getsareactx(struct drm_device *dev, void *data,
893 unsigned int cmd, unsigned long arg); 907 struct drm_file *file_priv);
894 908
895 /* Drawable IOCTL support (drm_drawable.h) */ 909 /* Drawable IOCTL support (drm_drawable.h) */
896extern int drm_adddraw(struct inode *inode, struct file *filp, 910extern int drm_adddraw(struct drm_device *dev, void *data,
897 unsigned int cmd, unsigned long arg); 911 struct drm_file *file_priv);
898extern int drm_rmdraw(struct inode *inode, struct file *filp, 912extern int drm_rmdraw(struct drm_device *dev, void *data,
899 unsigned int cmd, unsigned long arg); 913 struct drm_file *file_priv);
900extern int drm_update_drawable_info(struct inode *inode, struct file *filp, 914extern int drm_update_drawable_info(struct drm_device *dev, void *data,
901 unsigned int cmd, unsigned long arg); 915 struct drm_file *file_priv);
902extern struct drm_drawable_info *drm_get_drawable_info(struct drm_device *dev, 916extern struct drm_drawable_info *drm_get_drawable_info(struct drm_device *dev,
903 drm_drawable_t id); 917 drm_drawable_t id);
904extern void drm_drawable_free_all(struct drm_device *dev); 918extern void drm_drawable_free_all(struct drm_device *dev);
905 919
906 /* Authentication IOCTL support (drm_auth.h) */ 920 /* Authentication IOCTL support (drm_auth.h) */
907extern int drm_getmagic(struct inode *inode, struct file *filp, 921extern int drm_getmagic(struct drm_device *dev, void *data,
908 unsigned int cmd, unsigned long arg); 922 struct drm_file *file_priv);
909extern int drm_authmagic(struct inode *inode, struct file *filp, 923extern int drm_authmagic(struct drm_device *dev, void *data,
910 unsigned int cmd, unsigned long arg); 924 struct drm_file *file_priv);
911 925
912 /* Locking IOCTL support (drm_lock.h) */ 926 /* Locking IOCTL support (drm_lock.h) */
913extern int drm_lock(struct inode *inode, struct file *filp, 927extern int drm_lock(struct drm_device *dev, void *data,
914 unsigned int cmd, unsigned long arg); 928 struct drm_file *file_priv);
915extern int drm_unlock(struct inode *inode, struct file *filp, 929extern int drm_unlock(struct drm_device *dev, void *data,
916 unsigned int cmd, unsigned long arg); 930 struct drm_file *file_priv);
917extern int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context); 931extern int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context);
918extern int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context); 932extern int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context);
919extern void drm_idlelock_take(struct drm_lock_data *lock_data); 933extern void drm_idlelock_take(struct drm_lock_data *lock_data);
@@ -924,8 +938,7 @@ extern void drm_idlelock_release(struct drm_lock_data *lock_data);
924 * DMA quiscent + idle. DMA quiescent usually requires the hardware lock. 938 * DMA quiscent + idle. DMA quiescent usually requires the hardware lock.
925 */ 939 */
926 940
927extern int drm_i_have_hw_lock(struct file *filp); 941extern int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv);
928extern int drm_kernel_take_hw_lock(struct file *filp);
929 942
930 /* Buffer management support (drm_bufs.h) */ 943 /* Buffer management support (drm_bufs.h) */
931extern int drm_addbufs_agp(struct drm_device *dev, struct drm_buf_desc * request); 944extern int drm_addbufs_agp(struct drm_device *dev, struct drm_buf_desc * request);
@@ -933,24 +946,23 @@ extern int drm_addbufs_pci(struct drm_device *dev, struct drm_buf_desc * request
933extern int drm_addmap(struct drm_device *dev, unsigned int offset, 946extern int drm_addmap(struct drm_device *dev, unsigned int offset,
934 unsigned int size, enum drm_map_type type, 947 unsigned int size, enum drm_map_type type,
935 enum drm_map_flags flags, drm_local_map_t ** map_ptr); 948 enum drm_map_flags flags, drm_local_map_t ** map_ptr);
936extern int drm_addmap_ioctl(struct inode *inode, struct file *filp, 949extern int drm_addmap_ioctl(struct drm_device *dev, void *data,
937 unsigned int cmd, unsigned long arg); 950 struct drm_file *file_priv);
938extern int drm_rmmap(struct drm_device *dev, drm_local_map_t * map); 951extern int drm_rmmap(struct drm_device *dev, drm_local_map_t *map);
939extern int drm_rmmap_locked(struct drm_device *dev, drm_local_map_t * map); 952extern int drm_rmmap_locked(struct drm_device *dev, drm_local_map_t *map);
940extern int drm_rmmap_ioctl(struct inode *inode, struct file *filp, 953extern int drm_rmmap_ioctl(struct drm_device *dev, void *data,
941 unsigned int cmd, unsigned long arg); 954 struct drm_file *file_priv);
942 955extern int drm_addbufs(struct drm_device *dev, void *data,
956 struct drm_file *file_priv);
957extern int drm_infobufs(struct drm_device *dev, void *data,
958 struct drm_file *file_priv);
959extern int drm_markbufs(struct drm_device *dev, void *data,
960 struct drm_file *file_priv);
961extern int drm_freebufs(struct drm_device *dev, void *data,
962 struct drm_file *file_priv);
963extern int drm_mapbufs(struct drm_device *dev, void *data,
964 struct drm_file *file_priv);
943extern int drm_order(unsigned long size); 965extern int drm_order(unsigned long size);
944extern int drm_addbufs(struct inode *inode, struct file *filp,
945 unsigned int cmd, unsigned long arg);
946extern int drm_infobufs(struct inode *inode, struct file *filp,
947 unsigned int cmd, unsigned long arg);
948extern int drm_markbufs(struct inode *inode, struct file *filp,
949 unsigned int cmd, unsigned long arg);
950extern int drm_freebufs(struct inode *inode, struct file *filp,
951 unsigned int cmd, unsigned long arg);
952extern int drm_mapbufs(struct inode *inode, struct file *filp,
953 unsigned int cmd, unsigned long arg);
954extern unsigned long drm_get_resource_start(struct drm_device *dev, 966extern unsigned long drm_get_resource_start(struct drm_device *dev,
955 unsigned int resource); 967 unsigned int resource);
956extern unsigned long drm_get_resource_len(struct drm_device *dev, 968extern unsigned long drm_get_resource_len(struct drm_device *dev,
@@ -960,19 +972,20 @@ extern unsigned long drm_get_resource_len(struct drm_device *dev,
960extern int drm_dma_setup(struct drm_device *dev); 972extern int drm_dma_setup(struct drm_device *dev);
961extern void drm_dma_takedown(struct drm_device *dev); 973extern void drm_dma_takedown(struct drm_device *dev);
962extern void drm_free_buffer(struct drm_device *dev, struct drm_buf * buf); 974extern void drm_free_buffer(struct drm_device *dev, struct drm_buf * buf);
963extern void drm_core_reclaim_buffers(struct drm_device *dev, struct file *filp); 975extern void drm_core_reclaim_buffers(struct drm_device *dev,
976 struct drm_file *filp);
964 977
965 /* IRQ support (drm_irq.h) */ 978 /* IRQ support (drm_irq.h) */
966extern int drm_control(struct inode *inode, struct file *filp, 979extern int drm_control(struct drm_device *dev, void *data,
967 unsigned int cmd, unsigned long arg); 980 struct drm_file *file_priv);
968extern irqreturn_t drm_irq_handler(DRM_IRQ_ARGS); 981extern irqreturn_t drm_irq_handler(DRM_IRQ_ARGS);
969extern int drm_irq_uninstall(struct drm_device *dev); 982extern int drm_irq_uninstall(struct drm_device *dev);
970extern void drm_driver_irq_preinstall(struct drm_device *dev); 983extern void drm_driver_irq_preinstall(struct drm_device *dev);
971extern void drm_driver_irq_postinstall(struct drm_device *dev); 984extern void drm_driver_irq_postinstall(struct drm_device *dev);
972extern void drm_driver_irq_uninstall(struct drm_device *dev); 985extern void drm_driver_irq_uninstall(struct drm_device *dev);
973 986
974extern int drm_wait_vblank(struct inode *inode, struct file *filp, 987extern int drm_wait_vblank(struct drm_device *dev, void *data,
975 unsigned int cmd, unsigned long arg); 988 struct drm_file *file_priv);
976extern int drm_vblank_wait(struct drm_device *dev, unsigned int *vbl_seq); 989extern int drm_vblank_wait(struct drm_device *dev, unsigned int *vbl_seq);
977extern void drm_vbl_send_signals(struct drm_device *dev); 990extern void drm_vbl_send_signals(struct drm_device *dev);
978extern void drm_locked_tasklet(struct drm_device *dev, void(*func)(struct drm_device*)); 991extern void drm_locked_tasklet(struct drm_device *dev, void(*func)(struct drm_device*));
@@ -980,31 +993,30 @@ extern void drm_locked_tasklet(struct drm_device *dev, void(*func)(struct drm_de
980 /* AGP/GART support (drm_agpsupport.h) */ 993 /* AGP/GART support (drm_agpsupport.h) */
981extern struct drm_agp_head *drm_agp_init(struct drm_device *dev); 994extern struct drm_agp_head *drm_agp_init(struct drm_device *dev);
982extern int drm_agp_acquire(struct drm_device *dev); 995extern int drm_agp_acquire(struct drm_device *dev);
983extern int drm_agp_acquire_ioctl(struct inode *inode, struct file *filp, 996extern int drm_agp_acquire_ioctl(struct drm_device *dev, void *data,
984 unsigned int cmd, unsigned long arg); 997 struct drm_file *file_priv);
985extern int drm_agp_release(struct drm_device *dev); 998extern int drm_agp_release(struct drm_device *dev);
986extern int drm_agp_release_ioctl(struct inode *inode, struct file *filp, 999extern int drm_agp_release_ioctl(struct drm_device *dev, void *data,
987 unsigned int cmd, unsigned long arg); 1000 struct drm_file *file_priv);
988extern int drm_agp_enable(struct drm_device *dev, struct drm_agp_mode mode); 1001extern int drm_agp_enable(struct drm_device *dev, struct drm_agp_mode mode);
989extern int drm_agp_enable_ioctl(struct inode *inode, struct file *filp, 1002extern int drm_agp_enable_ioctl(struct drm_device *dev, void *data,
990 unsigned int cmd, unsigned long arg); 1003 struct drm_file *file_priv);
991extern int drm_agp_info(struct drm_device *dev, struct drm_agp_info * info); 1004extern int drm_agp_info(struct drm_device *dev, struct drm_agp_info *info);
992extern int drm_agp_info_ioctl(struct inode *inode, struct file *filp, 1005extern int drm_agp_info_ioctl(struct drm_device *dev, void *data,
993 unsigned int cmd, unsigned long arg); 1006 struct drm_file *file_priv);
994extern int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request); 1007extern int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request);
995extern int drm_agp_alloc_ioctl(struct inode *inode, struct file *filp, 1008extern int drm_agp_alloc_ioctl(struct drm_device *dev, void *data,
996 unsigned int cmd, unsigned long arg); 1009 struct drm_file *file_priv);
997extern int drm_agp_free(struct drm_device *dev, struct drm_agp_buffer *request); 1010extern int drm_agp_free(struct drm_device *dev, struct drm_agp_buffer *request);
998extern int drm_agp_free_ioctl(struct inode *inode, struct file *filp, 1011extern int drm_agp_free_ioctl(struct drm_device *dev, void *data,
999 unsigned int cmd, unsigned long arg); 1012 struct drm_file *file_priv);
1000extern int drm_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request); 1013extern int drm_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request);
1001extern int drm_agp_unbind_ioctl(struct inode *inode, struct file *filp, 1014extern int drm_agp_unbind_ioctl(struct drm_device *dev, void *data,
1002 unsigned int cmd, unsigned long arg); 1015 struct drm_file *file_priv);
1003extern int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request); 1016extern int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request);
1004extern int drm_agp_bind_ioctl(struct inode *inode, struct file *filp, 1017extern int drm_agp_bind_ioctl(struct drm_device *dev, void *data,
1005 unsigned int cmd, unsigned long arg); 1018 struct drm_file *file_priv);
1006extern DRM_AGP_MEM *drm_agp_allocate_memory(struct agp_bridge_data *bridge, 1019extern DRM_AGP_MEM *drm_agp_allocate_memory(struct agp_bridge_data *bridge, size_t pages, u32 type);
1007 size_t pages, u32 type);
1008extern int drm_agp_free_memory(DRM_AGP_MEM * handle); 1020extern int drm_agp_free_memory(DRM_AGP_MEM * handle);
1009extern int drm_agp_bind_memory(DRM_AGP_MEM * handle, off_t start); 1021extern int drm_agp_bind_memory(DRM_AGP_MEM * handle, off_t start);
1010extern int drm_agp_unbind_memory(DRM_AGP_MEM * handle); 1022extern int drm_agp_unbind_memory(DRM_AGP_MEM * handle);
@@ -1033,10 +1045,11 @@ extern int drm_proc_cleanup(int minor,
1033 1045
1034 /* Scatter Gather Support (drm_scatter.h) */ 1046 /* Scatter Gather Support (drm_scatter.h) */
1035extern void drm_sg_cleanup(struct drm_sg_mem * entry); 1047extern void drm_sg_cleanup(struct drm_sg_mem * entry);
1036extern int drm_sg_alloc(struct inode *inode, struct file *filp, 1048extern int drm_sg_alloc_ioctl(struct drm_device *dev, void *data,
1037 unsigned int cmd, unsigned long arg); 1049 struct drm_file *file_priv);
1038extern int drm_sg_free(struct inode *inode, struct file *filp, 1050extern int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request);
1039 unsigned int cmd, unsigned long arg); 1051extern int drm_sg_free(struct drm_device *dev, void *data,
1052 struct drm_file *file_priv);
1040 1053
1041 /* ATI PCIGART support (ati_pcigart.h) */ 1054 /* ATI PCIGART support (ati_pcigart.h) */
1042extern int drm_ati_pcigart_init(struct drm_device *dev, 1055extern int drm_ati_pcigart_init(struct drm_device *dev,
diff --git a/drivers/char/drm/drm_agpsupport.c b/drivers/char/drm/drm_agpsupport.c
index 354f0e3674bf..214f4fbcba73 100644
--- a/drivers/char/drm/drm_agpsupport.c
+++ b/drivers/char/drm/drm_agpsupport.c
@@ -40,7 +40,7 @@
40 * Get AGP information. 40 * Get AGP information.
41 * 41 *
42 * \param inode device inode. 42 * \param inode device inode.
43 * \param filp file pointer. 43 * \param file_priv DRM file private.
44 * \param cmd command. 44 * \param cmd command.
45 * \param arg pointer to a (output) drm_agp_info structure. 45 * \param arg pointer to a (output) drm_agp_info structure.
46 * \return zero on success or a negative number on failure. 46 * \return zero on success or a negative number on failure.
@@ -71,20 +71,16 @@ int drm_agp_info(struct drm_device *dev, struct drm_agp_info *info)
71 71
72EXPORT_SYMBOL(drm_agp_info); 72EXPORT_SYMBOL(drm_agp_info);
73 73
74int drm_agp_info_ioctl(struct inode *inode, struct file *filp, 74int drm_agp_info_ioctl(struct drm_device *dev, void *data,
75 unsigned int cmd, unsigned long arg) 75 struct drm_file *file_priv)
76{ 76{
77 struct drm_file *priv = filp->private_data; 77 struct drm_agp_info *info = data;
78 struct drm_device *dev = priv->head->dev;
79 struct drm_agp_info info;
80 int err; 78 int err;
81 79
82 err = drm_agp_info(dev, &info); 80 err = drm_agp_info(dev, info);
83 if (err) 81 if (err)
84 return err; 82 return err;
85 83
86 if (copy_to_user((struct drm_agp_info __user *) arg, &info, sizeof(info)))
87 return -EFAULT;
88 return 0; 84 return 0;
89} 85}
90 86
@@ -115,7 +111,7 @@ EXPORT_SYMBOL(drm_agp_acquire);
115 * Acquire the AGP device (ioctl). 111 * Acquire the AGP device (ioctl).
116 * 112 *
117 * \param inode device inode. 113 * \param inode device inode.
118 * \param filp file pointer. 114 * \param file_priv DRM file private.
119 * \param cmd command. 115 * \param cmd command.
120 * \param arg user argument. 116 * \param arg user argument.
121 * \return zero on success or a negative number on failure. 117 * \return zero on success or a negative number on failure.
@@ -123,12 +119,10 @@ EXPORT_SYMBOL(drm_agp_acquire);
123 * Verifies the AGP device hasn't been acquired before and calls 119 * Verifies the AGP device hasn't been acquired before and calls
124 * \c agp_backend_acquire. 120 * \c agp_backend_acquire.
125 */ 121 */
126int drm_agp_acquire_ioctl(struct inode *inode, struct file *filp, 122int drm_agp_acquire_ioctl(struct drm_device *dev, void *data,
127 unsigned int cmd, unsigned long arg) 123 struct drm_file *file_priv)
128{ 124{
129 struct drm_file *priv = filp->private_data; 125 return drm_agp_acquire((struct drm_device *) file_priv->head->dev);
130
131 return drm_agp_acquire((struct drm_device *) priv->head->dev);
132} 126}
133 127
134/** 128/**
@@ -149,12 +143,9 @@ int drm_agp_release(struct drm_device * dev)
149} 143}
150EXPORT_SYMBOL(drm_agp_release); 144EXPORT_SYMBOL(drm_agp_release);
151 145
152int drm_agp_release_ioctl(struct inode *inode, struct file *filp, 146int drm_agp_release_ioctl(struct drm_device *dev, void *data,
153 unsigned int cmd, unsigned long arg) 147 struct drm_file *file_priv)
154{ 148{
155 struct drm_file *priv = filp->private_data;
156 struct drm_device *dev = priv->head->dev;
157
158 return drm_agp_release(dev); 149 return drm_agp_release(dev);
159} 150}
160 151
@@ -182,24 +173,19 @@ int drm_agp_enable(struct drm_device * dev, struct drm_agp_mode mode)
182 173
183EXPORT_SYMBOL(drm_agp_enable); 174EXPORT_SYMBOL(drm_agp_enable);
184 175
185int drm_agp_enable_ioctl(struct inode *inode, struct file *filp, 176int drm_agp_enable_ioctl(struct drm_device *dev, void *data,
186 unsigned int cmd, unsigned long arg) 177 struct drm_file *file_priv)
187{ 178{
188 struct drm_file *priv = filp->private_data; 179 struct drm_agp_mode *mode = data;
189 struct drm_device *dev = priv->head->dev;
190 struct drm_agp_mode mode;
191
192 if (copy_from_user(&mode, (struct drm_agp_mode __user *) arg, sizeof(mode)))
193 return -EFAULT;
194 180
195 return drm_agp_enable(dev, mode); 181 return drm_agp_enable(dev, *mode);
196} 182}
197 183
198/** 184/**
199 * Allocate AGP memory. 185 * Allocate AGP memory.
200 * 186 *
201 * \param inode device inode. 187 * \param inode device inode.
202 * \param filp file pointer. 188 * \param file_priv file private pointer.
203 * \param cmd command. 189 * \param cmd command.
204 * \param arg pointer to a drm_agp_buffer structure. 190 * \param arg pointer to a drm_agp_buffer structure.
205 * \return zero on success or a negative number on failure. 191 * \return zero on success or a negative number on failure.
@@ -241,35 +227,13 @@ int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request)
241} 227}
242EXPORT_SYMBOL(drm_agp_alloc); 228EXPORT_SYMBOL(drm_agp_alloc);
243 229
244int drm_agp_alloc_ioctl(struct inode *inode, struct file *filp,
245 unsigned int cmd, unsigned long arg)
246{
247 struct drm_file *priv = filp->private_data;
248 struct drm_device *dev = priv->head->dev;
249 struct drm_agp_buffer request;
250 struct drm_agp_buffer __user *argp = (void __user *)arg;
251 int err;
252
253 if (copy_from_user(&request, argp, sizeof(request)))
254 return -EFAULT;
255 230
256 err = drm_agp_alloc(dev, &request); 231int drm_agp_alloc_ioctl(struct drm_device *dev, void *data,
257 if (err) 232 struct drm_file *file_priv)
258 return err; 233{
259 234 struct drm_agp_buffer *request = data;
260 if (copy_to_user(argp, &request, sizeof(request))) {
261 struct drm_agp_mem *entry;
262 list_for_each_entry(entry, &dev->agp->memory, head) {
263 if (entry->handle == request.handle)
264 break;
265 }
266 list_del(&entry->head);
267 drm_free_agp(entry->memory, entry->pages);
268 drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS);
269 return -EFAULT;
270 }
271 235
272 return 0; 236 return drm_agp_alloc(dev, request);
273} 237}
274 238
275/** 239/**
@@ -297,7 +261,7 @@ static struct drm_agp_mem *drm_agp_lookup_entry(struct drm_device * dev,
297 * Unbind AGP memory from the GATT (ioctl). 261 * Unbind AGP memory from the GATT (ioctl).
298 * 262 *
299 * \param inode device inode. 263 * \param inode device inode.
300 * \param filp file pointer. 264 * \param file_priv DRM file private.
301 * \param cmd command. 265 * \param cmd command.
302 * \param arg pointer to a drm_agp_binding structure. 266 * \param arg pointer to a drm_agp_binding structure.
303 * \return zero on success or a negative number on failure. 267 * \return zero on success or a negative number on failure.
@@ -323,25 +287,20 @@ int drm_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request)
323} 287}
324EXPORT_SYMBOL(drm_agp_unbind); 288EXPORT_SYMBOL(drm_agp_unbind);
325 289
326int drm_agp_unbind_ioctl(struct inode *inode, struct file *filp,
327 unsigned int cmd, unsigned long arg)
328{
329 struct drm_file *priv = filp->private_data;
330 struct drm_device *dev = priv->head->dev;
331 struct drm_agp_binding request;
332 290
333 if (copy_from_user 291int drm_agp_unbind_ioctl(struct drm_device *dev, void *data,
334 (&request, (struct drm_agp_binding __user *) arg, sizeof(request))) 292 struct drm_file *file_priv)
335 return -EFAULT; 293{
294 struct drm_agp_binding *request = data;
336 295
337 return drm_agp_unbind(dev, &request); 296 return drm_agp_unbind(dev, request);
338} 297}
339 298
340/** 299/**
341 * Bind AGP memory into the GATT (ioctl) 300 * Bind AGP memory into the GATT (ioctl)
342 * 301 *
343 * \param inode device inode. 302 * \param inode device inode.
344 * \param filp file pointer. 303 * \param file_priv DRM file private.
345 * \param cmd command. 304 * \param cmd command.
346 * \param arg pointer to a drm_agp_binding structure. 305 * \param arg pointer to a drm_agp_binding structure.
347 * \return zero on success or a negative number on failure. 306 * \return zero on success or a negative number on failure.
@@ -372,25 +331,20 @@ int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request)
372} 331}
373EXPORT_SYMBOL(drm_agp_bind); 332EXPORT_SYMBOL(drm_agp_bind);
374 333
375int drm_agp_bind_ioctl(struct inode *inode, struct file *filp,
376 unsigned int cmd, unsigned long arg)
377{
378 struct drm_file *priv = filp->private_data;
379 struct drm_device *dev = priv->head->dev;
380 struct drm_agp_binding request;
381 334
382 if (copy_from_user 335int drm_agp_bind_ioctl(struct drm_device *dev, void *data,
383 (&request, (struct drm_agp_binding __user *) arg, sizeof(request))) 336 struct drm_file *file_priv)
384 return -EFAULT; 337{
338 struct drm_agp_binding *request = data;
385 339
386 return drm_agp_bind(dev, &request); 340 return drm_agp_bind(dev, request);
387} 341}
388 342
389/** 343/**
390 * Free AGP memory (ioctl). 344 * Free AGP memory (ioctl).
391 * 345 *
392 * \param inode device inode. 346 * \param inode device inode.
393 * \param filp file pointer. 347 * \param file_priv DRM file private.
394 * \param cmd command. 348 * \param cmd command.
395 * \param arg pointer to a drm_agp_buffer structure. 349 * \param arg pointer to a drm_agp_buffer structure.
396 * \return zero on success or a negative number on failure. 350 * \return zero on success or a negative number on failure.
@@ -419,18 +373,14 @@ int drm_agp_free(struct drm_device *dev, struct drm_agp_buffer *request)
419} 373}
420EXPORT_SYMBOL(drm_agp_free); 374EXPORT_SYMBOL(drm_agp_free);
421 375
422int drm_agp_free_ioctl(struct inode *inode, struct file *filp,
423 unsigned int cmd, unsigned long arg)
424{
425 struct drm_file *priv = filp->private_data;
426 struct drm_device *dev = priv->head->dev;
427 struct drm_agp_buffer request;
428 376
429 if (copy_from_user
430 (&request, (struct drm_agp_buffer __user *) arg, sizeof(request)))
431 return -EFAULT;
432 377
433 return drm_agp_free(dev, &request); 378int drm_agp_free_ioctl(struct drm_device *dev, void *data,
379 struct drm_file *file_priv)
380{
381 struct drm_agp_buffer *request = data;
382
383 return drm_agp_free(dev, request);
434} 384}
435 385
436/** 386/**
diff --git a/drivers/char/drm/drm_auth.c b/drivers/char/drm/drm_auth.c
index 7f777da872cd..a73462723d2d 100644
--- a/drivers/char/drm/drm_auth.c
+++ b/drivers/char/drm/drm_auth.c
@@ -128,42 +128,38 @@ static int drm_remove_magic(struct drm_device * dev, drm_magic_t magic)
128 * Get a unique magic number (ioctl). 128 * Get a unique magic number (ioctl).
129 * 129 *
130 * \param inode device inode. 130 * \param inode device inode.
131 * \param filp file pointer. 131 * \param file_priv DRM file private.
132 * \param cmd command. 132 * \param cmd command.
133 * \param arg pointer to a resulting drm_auth structure. 133 * \param arg pointer to a resulting drm_auth structure.
134 * \return zero on success, or a negative number on failure. 134 * \return zero on success, or a negative number on failure.
135 * 135 *
136 * If there is a magic number in drm_file::magic then use it, otherwise 136 * If there is a magic number in drm_file::magic then use it, otherwise
137 * searches an unique non-zero magic number and add it associating it with \p 137 * searches an unique non-zero magic number and add it associating it with \p
138 * filp. 138 * file_priv.
139 */ 139 */
140int drm_getmagic(struct inode *inode, struct file *filp, 140int drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv)
141 unsigned int cmd, unsigned long arg)
142{ 141{
143 static drm_magic_t sequence = 0; 142 static drm_magic_t sequence = 0;
144 static DEFINE_SPINLOCK(lock); 143 static DEFINE_SPINLOCK(lock);
145 struct drm_file *priv = filp->private_data; 144 struct drm_auth *auth = data;
146 struct drm_device *dev = priv->head->dev;
147 struct drm_auth auth;
148 145
149 /* Find unique magic */ 146 /* Find unique magic */
150 if (priv->magic) { 147 if (file_priv->magic) {
151 auth.magic = priv->magic; 148 auth->magic = file_priv->magic;
152 } else { 149 } else {
153 do { 150 do {
154 spin_lock(&lock); 151 spin_lock(&lock);
155 if (!sequence) 152 if (!sequence)
156 ++sequence; /* reserve 0 */ 153 ++sequence; /* reserve 0 */
157 auth.magic = sequence++; 154 auth->magic = sequence++;
158 spin_unlock(&lock); 155 spin_unlock(&lock);
159 } while (drm_find_file(dev, auth.magic)); 156 } while (drm_find_file(dev, auth->magic));
160 priv->magic = auth.magic; 157 file_priv->magic = auth->magic;
161 drm_add_magic(dev, priv, auth.magic); 158 drm_add_magic(dev, file_priv, auth->magic);
162 } 159 }
163 160
164 DRM_DEBUG("%u\n", auth.magic); 161 DRM_DEBUG("%u\n", auth->magic);
165 if (copy_to_user((struct drm_auth __user *) arg, &auth, sizeof(auth))) 162
166 return -EFAULT;
167 return 0; 163 return 0;
168} 164}
169 165
@@ -171,27 +167,23 @@ int drm_getmagic(struct inode *inode, struct file *filp,
171 * Authenticate with a magic. 167 * Authenticate with a magic.
172 * 168 *
173 * \param inode device inode. 169 * \param inode device inode.
174 * \param filp file pointer. 170 * \param file_priv DRM file private.
175 * \param cmd command. 171 * \param cmd command.
176 * \param arg pointer to a drm_auth structure. 172 * \param arg pointer to a drm_auth structure.
177 * \return zero if authentication successed, or a negative number otherwise. 173 * \return zero if authentication successed, or a negative number otherwise.
178 * 174 *
179 * Checks if \p filp is associated with the magic number passed in \arg. 175 * Checks if \p file_priv is associated with the magic number passed in \arg.
180 */ 176 */
181int drm_authmagic(struct inode *inode, struct file *filp, 177int drm_authmagic(struct drm_device *dev, void *data,
182 unsigned int cmd, unsigned long arg) 178 struct drm_file *file_priv)
183{ 179{
184 struct drm_file *priv = filp->private_data; 180 struct drm_auth *auth = data;
185 struct drm_device *dev = priv->head->dev;
186 struct drm_auth auth;
187 struct drm_file *file; 181 struct drm_file *file;
188 182
189 if (copy_from_user(&auth, (struct drm_auth __user *) arg, sizeof(auth))) 183 DRM_DEBUG("%u\n", auth->magic);
190 return -EFAULT; 184 if ((file = drm_find_file(dev, auth->magic))) {
191 DRM_DEBUG("%u\n", auth.magic);
192 if ((file = drm_find_file(dev, auth.magic))) {
193 file->authenticated = 1; 185 file->authenticated = 1;
194 drm_remove_magic(dev, auth.magic); 186 drm_remove_magic(dev, auth->magic);
195 return 0; 187 return 0;
196 } 188 }
197 return -EINVAL; 189 return -EINVAL;
diff --git a/drivers/char/drm/drm_bufs.c b/drivers/char/drm/drm_bufs.c
index c115b39b8517..856774fbe025 100644
--- a/drivers/char/drm/drm_bufs.c
+++ b/drivers/char/drm/drm_bufs.c
@@ -92,7 +92,7 @@ static int drm_map_handle(struct drm_device *dev, struct drm_hash_item *hash,
92 * Ioctl to specify a range of memory that is available for mapping by a non-root process. 92 * Ioctl to specify a range of memory that is available for mapping by a non-root process.
93 * 93 *
94 * \param inode device inode. 94 * \param inode device inode.
95 * \param filp file pointer. 95 * \param file_priv DRM file private.
96 * \param cmd command. 96 * \param cmd command.
97 * \param arg pointer to a drm_map structure. 97 * \param arg pointer to a drm_map structure.
98 * \return zero on success or a negative value on error. 98 * \return zero on success or a negative value on error.
@@ -332,38 +332,24 @@ int drm_addmap(struct drm_device * dev, unsigned int offset,
332 332
333EXPORT_SYMBOL(drm_addmap); 333EXPORT_SYMBOL(drm_addmap);
334 334
335int drm_addmap_ioctl(struct inode *inode, struct file *filp, 335int drm_addmap_ioctl(struct drm_device *dev, void *data,
336 unsigned int cmd, unsigned long arg) 336 struct drm_file *file_priv)
337{ 337{
338 struct drm_file *priv = filp->private_data; 338 struct drm_map *map = data;
339 struct drm_device *dev = priv->head->dev;
340 struct drm_map map;
341 struct drm_map_list *maplist; 339 struct drm_map_list *maplist;
342 struct drm_map __user *argp = (void __user *)arg;
343 int err; 340 int err;
344 341
345 if (!(filp->f_mode & 3)) 342 if (!(capable(CAP_SYS_ADMIN) || map->type == _DRM_AGP))
346 return -EACCES; /* Require read/write */
347
348 if (copy_from_user(&map, argp, sizeof(map))) {
349 return -EFAULT;
350 }
351
352 if (!(capable(CAP_SYS_ADMIN) || map.type == _DRM_AGP))
353 return -EPERM; 343 return -EPERM;
354 344
355 err = drm_addmap_core(dev, map.offset, map.size, map.type, map.flags, 345 err = drm_addmap_core(dev, map->offset, map->size, map->type,
356 &maplist); 346 map->flags, &maplist);
357 347
358 if (err) 348 if (err)
359 return err; 349 return err;
360 350
361 if (copy_to_user(argp, maplist->map, sizeof(struct drm_map)))
362 return -EFAULT;
363
364 /* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */ 351 /* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */
365 if (put_user((void *)(unsigned long)maplist->user_token, &argp->handle)) 352 map->handle = (void *)(unsigned long)maplist->user_token;
366 return -EFAULT;
367 return 0; 353 return 0;
368} 354}
369 355
@@ -372,7 +358,7 @@ int drm_addmap_ioctl(struct inode *inode, struct file *filp,
372 * isn't in use. 358 * isn't in use.
373 * 359 *
374 * \param inode device inode. 360 * \param inode device inode.
375 * \param filp file pointer. 361 * \param file_priv DRM file private.
376 * \param cmd command. 362 * \param cmd command.
377 * \param arg pointer to a struct drm_map structure. 363 * \param arg pointer to a struct drm_map structure.
378 * \return zero on success or a negative value on error. 364 * \return zero on success or a negative value on error.
@@ -453,24 +439,18 @@ int drm_rmmap(struct drm_device *dev, drm_local_map_t *map)
453 * gets used by drivers that the server doesn't need to care about. This seems 439 * gets used by drivers that the server doesn't need to care about. This seems
454 * unlikely. 440 * unlikely.
455 */ 441 */
456int drm_rmmap_ioctl(struct inode *inode, struct file *filp, 442int drm_rmmap_ioctl(struct drm_device *dev, void *data,
457 unsigned int cmd, unsigned long arg) 443 struct drm_file *file_priv)
458{ 444{
459 struct drm_file *priv = filp->private_data; 445 struct drm_map *request = data;
460 struct drm_device *dev = priv->head->dev;
461 struct drm_map request;
462 drm_local_map_t *map = NULL; 446 drm_local_map_t *map = NULL;
463 struct drm_map_list *r_list; 447 struct drm_map_list *r_list;
464 int ret; 448 int ret;
465 449
466 if (copy_from_user(&request, (struct drm_map __user *) arg, sizeof(request))) {
467 return -EFAULT;
468 }
469
470 mutex_lock(&dev->struct_mutex); 450 mutex_lock(&dev->struct_mutex);
471 list_for_each_entry(r_list, &dev->maplist, head) { 451 list_for_each_entry(r_list, &dev->maplist, head) {
472 if (r_list->map && 452 if (r_list->map &&
473 r_list->user_token == (unsigned long)request.handle && 453 r_list->user_token == (unsigned long)request->handle &&
474 r_list->map->flags & _DRM_REMOVABLE) { 454 r_list->map->flags & _DRM_REMOVABLE) {
475 map = r_list->map; 455 map = r_list->map;
476 break; 456 break;
@@ -661,7 +641,7 @@ int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request)
661 buf->waiting = 0; 641 buf->waiting = 0;
662 buf->pending = 0; 642 buf->pending = 0;
663 init_waitqueue_head(&buf->dma_wait); 643 init_waitqueue_head(&buf->dma_wait);
664 buf->filp = NULL; 644 buf->file_priv = NULL;
665 645
666 buf->dev_priv_size = dev->driver->dev_priv_size; 646 buf->dev_priv_size = dev->driver->dev_priv_size;
667 buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS); 647 buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS);
@@ -872,7 +852,7 @@ int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
872 buf->waiting = 0; 852 buf->waiting = 0;
873 buf->pending = 0; 853 buf->pending = 0;
874 init_waitqueue_head(&buf->dma_wait); 854 init_waitqueue_head(&buf->dma_wait);
875 buf->filp = NULL; 855 buf->file_priv = NULL;
876 856
877 buf->dev_priv_size = dev->driver->dev_priv_size; 857 buf->dev_priv_size = dev->driver->dev_priv_size;
878 buf->dev_private = drm_alloc(buf->dev_priv_size, 858 buf->dev_private = drm_alloc(buf->dev_priv_size,
@@ -1050,7 +1030,7 @@ static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request
1050 buf->waiting = 0; 1030 buf->waiting = 0;
1051 buf->pending = 0; 1031 buf->pending = 0;
1052 init_waitqueue_head(&buf->dma_wait); 1032 init_waitqueue_head(&buf->dma_wait);
1053 buf->filp = NULL; 1033 buf->file_priv = NULL;
1054 1034
1055 buf->dev_priv_size = dev->driver->dev_priv_size; 1035 buf->dev_priv_size = dev->driver->dev_priv_size;
1056 buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS); 1036 buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS);
@@ -1211,7 +1191,7 @@ static int drm_addbufs_fb(struct drm_device * dev, struct drm_buf_desc * request
1211 buf->waiting = 0; 1191 buf->waiting = 0;
1212 buf->pending = 0; 1192 buf->pending = 0;
1213 init_waitqueue_head(&buf->dma_wait); 1193 init_waitqueue_head(&buf->dma_wait);
1214 buf->filp = NULL; 1194 buf->file_priv = NULL;
1215 1195
1216 buf->dev_priv_size = dev->driver->dev_priv_size; 1196 buf->dev_priv_size = dev->driver->dev_priv_size;
1217 buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS); 1197 buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS);
@@ -1275,7 +1255,7 @@ static int drm_addbufs_fb(struct drm_device * dev, struct drm_buf_desc * request
1275 * Add buffers for DMA transfers (ioctl). 1255 * Add buffers for DMA transfers (ioctl).
1276 * 1256 *
1277 * \param inode device inode. 1257 * \param inode device inode.
1278 * \param filp file pointer. 1258 * \param file_priv DRM file private.
1279 * \param cmd command. 1259 * \param cmd command.
1280 * \param arg pointer to a struct drm_buf_desc request. 1260 * \param arg pointer to a struct drm_buf_desc request.
1281 * \return zero on success or a negative number on failure. 1261 * \return zero on success or a negative number on failure.
@@ -1285,38 +1265,27 @@ static int drm_addbufs_fb(struct drm_device * dev, struct drm_buf_desc * request
1285 * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent 1265 * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
1286 * PCI memory respectively. 1266 * PCI memory respectively.
1287 */ 1267 */
1288int drm_addbufs(struct inode *inode, struct file *filp, 1268int drm_addbufs(struct drm_device *dev, void *data,
1289 unsigned int cmd, unsigned long arg) 1269 struct drm_file *file_priv)
1290{ 1270{
1291 struct drm_buf_desc request; 1271 struct drm_buf_desc *request = data;
1292 struct drm_file *priv = filp->private_data;
1293 struct drm_device *dev = priv->head->dev;
1294 int ret; 1272 int ret;
1295 1273
1296 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) 1274 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1297 return -EINVAL; 1275 return -EINVAL;
1298 1276
1299 if (copy_from_user(&request, (struct drm_buf_desc __user *) arg,
1300 sizeof(request)))
1301 return -EFAULT;
1302
1303#if __OS_HAS_AGP 1277#if __OS_HAS_AGP
1304 if (request.flags & _DRM_AGP_BUFFER) 1278 if (request->flags & _DRM_AGP_BUFFER)
1305 ret = drm_addbufs_agp(dev, &request); 1279 ret = drm_addbufs_agp(dev, request);
1306 else 1280 else
1307#endif 1281#endif
1308 if (request.flags & _DRM_SG_BUFFER) 1282 if (request->flags & _DRM_SG_BUFFER)
1309 ret = drm_addbufs_sg(dev, &request); 1283 ret = drm_addbufs_sg(dev, request);
1310 else if (request.flags & _DRM_FB_BUFFER) 1284 else if (request->flags & _DRM_FB_BUFFER)
1311 ret = drm_addbufs_fb(dev, &request); 1285 ret = drm_addbufs_fb(dev, request);
1312 else 1286 else
1313 ret = drm_addbufs_pci(dev, &request); 1287 ret = drm_addbufs_pci(dev, request);
1314 1288
1315 if (ret == 0) {
1316 if (copy_to_user((void __user *)arg, &request, sizeof(request))) {
1317 ret = -EFAULT;
1318 }
1319 }
1320 return ret; 1289 return ret;
1321} 1290}
1322 1291
@@ -1328,7 +1297,7 @@ int drm_addbufs(struct inode *inode, struct file *filp,
1328 * large buffers can be used for image transfer). 1297 * large buffers can be used for image transfer).
1329 * 1298 *
1330 * \param inode device inode. 1299 * \param inode device inode.
1331 * \param filp file pointer. 1300 * \param file_priv DRM file private.
1332 * \param cmd command. 1301 * \param cmd command.
1333 * \param arg pointer to a drm_buf_info structure. 1302 * \param arg pointer to a drm_buf_info structure.
1334 * \return zero on success or a negative number on failure. 1303 * \return zero on success or a negative number on failure.
@@ -1337,14 +1306,11 @@ int drm_addbufs(struct inode *inode, struct file *filp,
1337 * lock, preventing of allocating more buffers after this call. Information 1306 * lock, preventing of allocating more buffers after this call. Information
1338 * about each requested buffer is then copied into user space. 1307 * about each requested buffer is then copied into user space.
1339 */ 1308 */
1340int drm_infobufs(struct inode *inode, struct file *filp, 1309int drm_infobufs(struct drm_device *dev, void *data,
1341 unsigned int cmd, unsigned long arg) 1310 struct drm_file *file_priv)
1342{ 1311{
1343 struct drm_file *priv = filp->private_data;
1344 struct drm_device *dev = priv->head->dev;
1345 struct drm_device_dma *dma = dev->dma; 1312 struct drm_device_dma *dma = dev->dma;
1346 struct drm_buf_info request; 1313 struct drm_buf_info *request = data;
1347 struct drm_buf_info __user *argp = (void __user *)arg;
1348 int i; 1314 int i;
1349 int count; 1315 int count;
1350 1316
@@ -1362,9 +1328,6 @@ int drm_infobufs(struct inode *inode, struct file *filp,
1362 ++dev->buf_use; /* Can't allocate more after this call */ 1328 ++dev->buf_use; /* Can't allocate more after this call */
1363 spin_unlock(&dev->count_lock); 1329 spin_unlock(&dev->count_lock);
1364 1330
1365 if (copy_from_user(&request, argp, sizeof(request)))
1366 return -EFAULT;
1367
1368 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) { 1331 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
1369 if (dma->bufs[i].buf_count) 1332 if (dma->bufs[i].buf_count)
1370 ++count; 1333 ++count;
@@ -1372,11 +1335,11 @@ int drm_infobufs(struct inode *inode, struct file *filp,
1372 1335
1373 DRM_DEBUG("count = %d\n", count); 1336 DRM_DEBUG("count = %d\n", count);
1374 1337
1375 if (request.count >= count) { 1338 if (request->count >= count) {
1376 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) { 1339 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
1377 if (dma->bufs[i].buf_count) { 1340 if (dma->bufs[i].buf_count) {
1378 struct drm_buf_desc __user *to = 1341 struct drm_buf_desc __user *to =
1379 &request.list[count]; 1342 &request->list[count];
1380 struct drm_buf_entry *from = &dma->bufs[i]; 1343 struct drm_buf_entry *from = &dma->bufs[i];
1381 struct drm_freelist *list = &dma->bufs[i].freelist; 1344 struct drm_freelist *list = &dma->bufs[i].freelist;
1382 if (copy_to_user(&to->count, 1345 if (copy_to_user(&to->count,
@@ -1403,10 +1366,7 @@ int drm_infobufs(struct inode *inode, struct file *filp,
1403 } 1366 }
1404 } 1367 }
1405 } 1368 }
1406 request.count = count; 1369 request->count = count;
1407
1408 if (copy_to_user(argp, &request, sizeof(request)))
1409 return -EFAULT;
1410 1370
1411 return 0; 1371 return 0;
1412} 1372}
@@ -1415,7 +1375,7 @@ int drm_infobufs(struct inode *inode, struct file *filp,
1415 * Specifies a low and high water mark for buffer allocation 1375 * Specifies a low and high water mark for buffer allocation
1416 * 1376 *
1417 * \param inode device inode. 1377 * \param inode device inode.
1418 * \param filp file pointer. 1378 * \param file_priv DRM file private.
1419 * \param cmd command. 1379 * \param cmd command.
1420 * \param arg a pointer to a drm_buf_desc structure. 1380 * \param arg a pointer to a drm_buf_desc structure.
1421 * \return zero on success or a negative number on failure. 1381 * \return zero on success or a negative number on failure.
@@ -1425,13 +1385,11 @@ int drm_infobufs(struct inode *inode, struct file *filp,
1425 * 1385 *
1426 * \note This ioctl is deprecated and mostly never used. 1386 * \note This ioctl is deprecated and mostly never used.
1427 */ 1387 */
1428int drm_markbufs(struct inode *inode, struct file *filp, 1388int drm_markbufs(struct drm_device *dev, void *data,
1429 unsigned int cmd, unsigned long arg) 1389 struct drm_file *file_priv)
1430{ 1390{
1431 struct drm_file *priv = filp->private_data;
1432 struct drm_device *dev = priv->head->dev;
1433 struct drm_device_dma *dma = dev->dma; 1391 struct drm_device_dma *dma = dev->dma;
1434 struct drm_buf_desc request; 1392 struct drm_buf_desc *request = data;
1435 int order; 1393 int order;
1436 struct drm_buf_entry *entry; 1394 struct drm_buf_entry *entry;
1437 1395
@@ -1441,24 +1399,20 @@ int drm_markbufs(struct inode *inode, struct file *filp,
1441 if (!dma) 1399 if (!dma)
1442 return -EINVAL; 1400 return -EINVAL;
1443 1401
1444 if (copy_from_user(&request,
1445 (struct drm_buf_desc __user *) arg, sizeof(request)))
1446 return -EFAULT;
1447
1448 DRM_DEBUG("%d, %d, %d\n", 1402 DRM_DEBUG("%d, %d, %d\n",
1449 request.size, request.low_mark, request.high_mark); 1403 request->size, request->low_mark, request->high_mark);
1450 order = drm_order(request.size); 1404 order = drm_order(request->size);
1451 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) 1405 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1452 return -EINVAL; 1406 return -EINVAL;
1453 entry = &dma->bufs[order]; 1407 entry = &dma->bufs[order];
1454 1408
1455 if (request.low_mark < 0 || request.low_mark > entry->buf_count) 1409 if (request->low_mark < 0 || request->low_mark > entry->buf_count)
1456 return -EINVAL; 1410 return -EINVAL;
1457 if (request.high_mark < 0 || request.high_mark > entry->buf_count) 1411 if (request->high_mark < 0 || request->high_mark > entry->buf_count)
1458 return -EINVAL; 1412 return -EINVAL;
1459 1413
1460 entry->freelist.low_mark = request.low_mark; 1414 entry->freelist.low_mark = request->low_mark;
1461 entry->freelist.high_mark = request.high_mark; 1415 entry->freelist.high_mark = request->high_mark;
1462 1416
1463 return 0; 1417 return 0;
1464} 1418}
@@ -1467,7 +1421,7 @@ int drm_markbufs(struct inode *inode, struct file *filp,
1467 * Unreserve the buffers in list, previously reserved using drmDMA. 1421 * Unreserve the buffers in list, previously reserved using drmDMA.
1468 * 1422 *
1469 * \param inode device inode. 1423 * \param inode device inode.
1470 * \param filp file pointer. 1424 * \param file_priv DRM file private.
1471 * \param cmd command. 1425 * \param cmd command.
1472 * \param arg pointer to a drm_buf_free structure. 1426 * \param arg pointer to a drm_buf_free structure.
1473 * \return zero on success or a negative number on failure. 1427 * \return zero on success or a negative number on failure.
@@ -1475,13 +1429,11 @@ int drm_markbufs(struct inode *inode, struct file *filp,
1475 * Calls free_buffer() for each used buffer. 1429 * Calls free_buffer() for each used buffer.
1476 * This function is primarily used for debugging. 1430 * This function is primarily used for debugging.
1477 */ 1431 */
1478int drm_freebufs(struct inode *inode, struct file *filp, 1432int drm_freebufs(struct drm_device *dev, void *data,
1479 unsigned int cmd, unsigned long arg) 1433 struct drm_file *file_priv)
1480{ 1434{
1481 struct drm_file *priv = filp->private_data;
1482 struct drm_device *dev = priv->head->dev;
1483 struct drm_device_dma *dma = dev->dma; 1435 struct drm_device_dma *dma = dev->dma;
1484 struct drm_buf_free request; 1436 struct drm_buf_free *request = data;
1485 int i; 1437 int i;
1486 int idx; 1438 int idx;
1487 struct drm_buf *buf; 1439 struct drm_buf *buf;
@@ -1492,13 +1444,9 @@ int drm_freebufs(struct inode *inode, struct file *filp,
1492 if (!dma) 1444 if (!dma)
1493 return -EINVAL; 1445 return -EINVAL;
1494 1446
1495 if (copy_from_user(&request, 1447 DRM_DEBUG("%d\n", request->count);
1496 (struct drm_buf_free __user *) arg, sizeof(request))) 1448 for (i = 0; i < request->count; i++) {
1497 return -EFAULT; 1449 if (copy_from_user(&idx, &request->list[i], sizeof(idx)))
1498
1499 DRM_DEBUG("%d\n", request.count);
1500 for (i = 0; i < request.count; i++) {
1501 if (copy_from_user(&idx, &request.list[i], sizeof(idx)))
1502 return -EFAULT; 1450 return -EFAULT;
1503 if (idx < 0 || idx >= dma->buf_count) { 1451 if (idx < 0 || idx >= dma->buf_count) {
1504 DRM_ERROR("Index %d (of %d max)\n", 1452 DRM_ERROR("Index %d (of %d max)\n",
@@ -1506,7 +1454,7 @@ int drm_freebufs(struct inode *inode, struct file *filp,
1506 return -EINVAL; 1454 return -EINVAL;
1507 } 1455 }
1508 buf = dma->buflist[idx]; 1456 buf = dma->buflist[idx];
1509 if (buf->filp != filp) { 1457 if (buf->file_priv != file_priv) {
1510 DRM_ERROR("Process %d freeing buffer not owned\n", 1458 DRM_ERROR("Process %d freeing buffer not owned\n",
1511 current->pid); 1459 current->pid);
1512 return -EINVAL; 1460 return -EINVAL;
@@ -1521,7 +1469,7 @@ int drm_freebufs(struct inode *inode, struct file *filp,
1521 * Maps all of the DMA buffers into client-virtual space (ioctl). 1469 * Maps all of the DMA buffers into client-virtual space (ioctl).
1522 * 1470 *
1523 * \param inode device inode. 1471 * \param inode device inode.
1524 * \param filp file pointer. 1472 * \param file_priv DRM file private.
1525 * \param cmd command. 1473 * \param cmd command.
1526 * \param arg pointer to a drm_buf_map structure. 1474 * \param arg pointer to a drm_buf_map structure.
1527 * \return zero on success or a negative number on failure. 1475 * \return zero on success or a negative number on failure.
@@ -1531,18 +1479,15 @@ int drm_freebufs(struct inode *inode, struct file *filp,
1531 * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls 1479 * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls
1532 * drm_mmap_dma(). 1480 * drm_mmap_dma().
1533 */ 1481 */
1534int drm_mapbufs(struct inode *inode, struct file *filp, 1482int drm_mapbufs(struct drm_device *dev, void *data,
1535 unsigned int cmd, unsigned long arg) 1483 struct drm_file *file_priv)
1536{ 1484{
1537 struct drm_file *priv = filp->private_data;
1538 struct drm_device *dev = priv->head->dev;
1539 struct drm_device_dma *dma = dev->dma; 1485 struct drm_device_dma *dma = dev->dma;
1540 struct drm_buf_map __user *argp = (void __user *)arg;
1541 int retcode = 0; 1486 int retcode = 0;
1542 const int zero = 0; 1487 const int zero = 0;
1543 unsigned long virtual; 1488 unsigned long virtual;
1544 unsigned long address; 1489 unsigned long address;
1545 struct drm_buf_map request; 1490 struct drm_buf_map *request = data;
1546 int i; 1491 int i;
1547 1492
1548 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) 1493 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
@@ -1559,10 +1504,7 @@ int drm_mapbufs(struct inode *inode, struct file *filp,
1559 dev->buf_use++; /* Can't allocate more after this call */ 1504 dev->buf_use++; /* Can't allocate more after this call */
1560 spin_unlock(&dev->count_lock); 1505 spin_unlock(&dev->count_lock);
1561 1506
1562 if (copy_from_user(&request, argp, sizeof(request))) 1507 if (request->count >= dma->buf_count) {
1563 return -EFAULT;
1564
1565 if (request.count >= dma->buf_count) {
1566 if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP)) 1508 if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP))
1567 || (drm_core_check_feature(dev, DRIVER_SG) 1509 || (drm_core_check_feature(dev, DRIVER_SG)
1568 && (dma->flags & _DRM_DMA_USE_SG)) 1510 && (dma->flags & _DRM_DMA_USE_SG))
@@ -1575,15 +1517,15 @@ int drm_mapbufs(struct inode *inode, struct file *filp,
1575 retcode = -EINVAL; 1517 retcode = -EINVAL;
1576 goto done; 1518 goto done;
1577 } 1519 }
1578
1579 down_write(&current->mm->mmap_sem); 1520 down_write(&current->mm->mmap_sem);
1580 virtual = do_mmap(filp, 0, map->size, 1521 virtual = do_mmap(file_priv->filp, 0, map->size,
1581 PROT_READ | PROT_WRITE, 1522 PROT_READ | PROT_WRITE,
1582 MAP_SHARED, token); 1523 MAP_SHARED,
1524 token);
1583 up_write(&current->mm->mmap_sem); 1525 up_write(&current->mm->mmap_sem);
1584 } else { 1526 } else {
1585 down_write(&current->mm->mmap_sem); 1527 down_write(&current->mm->mmap_sem);
1586 virtual = do_mmap(filp, 0, dma->byte_count, 1528 virtual = do_mmap(file_priv->filp, 0, dma->byte_count,
1587 PROT_READ | PROT_WRITE, 1529 PROT_READ | PROT_WRITE,
1588 MAP_SHARED, 0); 1530 MAP_SHARED, 0);
1589 up_write(&current->mm->mmap_sem); 1531 up_write(&current->mm->mmap_sem);
@@ -1593,28 +1535,28 @@ int drm_mapbufs(struct inode *inode, struct file *filp,
1593 retcode = (signed long)virtual; 1535 retcode = (signed long)virtual;
1594 goto done; 1536 goto done;
1595 } 1537 }
1596 request.virtual = (void __user *)virtual; 1538 request->virtual = (void __user *)virtual;
1597 1539
1598 for (i = 0; i < dma->buf_count; i++) { 1540 for (i = 0; i < dma->buf_count; i++) {
1599 if (copy_to_user(&request.list[i].idx, 1541 if (copy_to_user(&request->list[i].idx,
1600 &dma->buflist[i]->idx, 1542 &dma->buflist[i]->idx,
1601 sizeof(request.list[0].idx))) { 1543 sizeof(request->list[0].idx))) {
1602 retcode = -EFAULT; 1544 retcode = -EFAULT;
1603 goto done; 1545 goto done;
1604 } 1546 }
1605 if (copy_to_user(&request.list[i].total, 1547 if (copy_to_user(&request->list[i].total,
1606 &dma->buflist[i]->total, 1548 &dma->buflist[i]->total,
1607 sizeof(request.list[0].total))) { 1549 sizeof(request->list[0].total))) {
1608 retcode = -EFAULT; 1550 retcode = -EFAULT;
1609 goto done; 1551 goto done;
1610 } 1552 }
1611 if (copy_to_user(&request.list[i].used, 1553 if (copy_to_user(&request->list[i].used,
1612 &zero, sizeof(zero))) { 1554 &zero, sizeof(zero))) {
1613 retcode = -EFAULT; 1555 retcode = -EFAULT;
1614 goto done; 1556 goto done;
1615 } 1557 }
1616 address = virtual + dma->buflist[i]->offset; /* *** */ 1558 address = virtual + dma->buflist[i]->offset; /* *** */
1617 if (copy_to_user(&request.list[i].address, 1559 if (copy_to_user(&request->list[i].address,
1618 &address, sizeof(address))) { 1560 &address, sizeof(address))) {
1619 retcode = -EFAULT; 1561 retcode = -EFAULT;
1620 goto done; 1562 goto done;
@@ -1622,11 +1564,8 @@ int drm_mapbufs(struct inode *inode, struct file *filp,
1622 } 1564 }
1623 } 1565 }
1624 done: 1566 done:
1625 request.count = dma->buf_count; 1567 request->count = dma->buf_count;
1626 DRM_DEBUG("%d buffers, retcode = %d\n", request.count, retcode); 1568 DRM_DEBUG("%d buffers, retcode = %d\n", request->count, retcode);
1627
1628 if (copy_to_user(argp, &request, sizeof(request)))
1629 return -EFAULT;
1630 1569
1631 return retcode; 1570 return retcode;
1632} 1571}
diff --git a/drivers/char/drm/drm_context.c b/drivers/char/drm/drm_context.c
index 61ad986baa8d..17fe69e7bfc1 100644
--- a/drivers/char/drm/drm_context.c
+++ b/drivers/char/drm/drm_context.c
@@ -131,7 +131,7 @@ void drm_ctxbitmap_cleanup(struct drm_device * dev)
131 * Get per-context SAREA. 131 * Get per-context SAREA.
132 * 132 *
133 * \param inode device inode. 133 * \param inode device inode.
134 * \param filp file pointer. 134 * \param file_priv DRM file private.
135 * \param cmd command. 135 * \param cmd command.
136 * \param arg user argument pointing to a drm_ctx_priv_map structure. 136 * \param arg user argument pointing to a drm_ctx_priv_map structure.
137 * \return zero on success or a negative number on failure. 137 * \return zero on success or a negative number on failure.
@@ -139,22 +139,16 @@ void drm_ctxbitmap_cleanup(struct drm_device * dev)
139 * Gets the map from drm_device::ctx_idr with the handle specified and 139 * Gets the map from drm_device::ctx_idr with the handle specified and
140 * returns its handle. 140 * returns its handle.
141 */ 141 */
142int drm_getsareactx(struct inode *inode, struct file *filp, 142int drm_getsareactx(struct drm_device *dev, void *data,
143 unsigned int cmd, unsigned long arg) 143 struct drm_file *file_priv)
144{ 144{
145 struct drm_file *priv = filp->private_data; 145 struct drm_ctx_priv_map *request = data;
146 struct drm_device *dev = priv->head->dev;
147 struct drm_ctx_priv_map __user *argp = (void __user *)arg;
148 struct drm_ctx_priv_map request;
149 struct drm_map *map; 146 struct drm_map *map;
150 struct drm_map_list *_entry; 147 struct drm_map_list *_entry;
151 148
152 if (copy_from_user(&request, argp, sizeof(request)))
153 return -EFAULT;
154
155 mutex_lock(&dev->struct_mutex); 149 mutex_lock(&dev->struct_mutex);
156 150
157 map = idr_find(&dev->ctx_idr, request.ctx_id); 151 map = idr_find(&dev->ctx_idr, request->ctx_id);
158 if (!map) { 152 if (!map) {
159 mutex_unlock(&dev->struct_mutex); 153 mutex_unlock(&dev->struct_mutex);
160 return -EINVAL; 154 return -EINVAL;
@@ -162,19 +156,17 @@ int drm_getsareactx(struct inode *inode, struct file *filp,
162 156
163 mutex_unlock(&dev->struct_mutex); 157 mutex_unlock(&dev->struct_mutex);
164 158
165 request.handle = NULL; 159 request->handle = NULL;
166 list_for_each_entry(_entry, &dev->maplist, head) { 160 list_for_each_entry(_entry, &dev->maplist, head) {
167 if (_entry->map == map) { 161 if (_entry->map == map) {
168 request.handle = 162 request->handle =
169 (void *)(unsigned long)_entry->user_token; 163 (void *)(unsigned long)_entry->user_token;
170 break; 164 break;
171 } 165 }
172 } 166 }
173 if (request.handle == NULL) 167 if (request->handle == NULL)
174 return -EINVAL; 168 return -EINVAL;
175 169
176 if (copy_to_user(argp, &request, sizeof(request)))
177 return -EFAULT;
178 return 0; 170 return 0;
179} 171}
180 172
@@ -182,7 +174,7 @@ int drm_getsareactx(struct inode *inode, struct file *filp,
182 * Set per-context SAREA. 174 * Set per-context SAREA.
183 * 175 *
184 * \param inode device inode. 176 * \param inode device inode.
185 * \param filp file pointer. 177 * \param file_priv DRM file private.
186 * \param cmd command. 178 * \param cmd command.
187 * \param arg user argument pointing to a drm_ctx_priv_map structure. 179 * \param arg user argument pointing to a drm_ctx_priv_map structure.
188 * \return zero on success or a negative number on failure. 180 * \return zero on success or a negative number on failure.
@@ -190,24 +182,17 @@ int drm_getsareactx(struct inode *inode, struct file *filp,
190 * Searches the mapping specified in \p arg and update the entry in 182 * Searches the mapping specified in \p arg and update the entry in
191 * drm_device::ctx_idr with it. 183 * drm_device::ctx_idr with it.
192 */ 184 */
193int drm_setsareactx(struct inode *inode, struct file *filp, 185int drm_setsareactx(struct drm_device *dev, void *data,
194 unsigned int cmd, unsigned long arg) 186 struct drm_file *file_priv)
195{ 187{
196 struct drm_file *priv = filp->private_data; 188 struct drm_ctx_priv_map *request = data;
197 struct drm_device *dev = priv->head->dev;
198 struct drm_ctx_priv_map request;
199 struct drm_map *map = NULL; 189 struct drm_map *map = NULL;
200 struct drm_map_list *r_list = NULL; 190 struct drm_map_list *r_list = NULL;
201 191
202 if (copy_from_user(&request,
203 (struct drm_ctx_priv_map __user *) arg,
204 sizeof(request)))
205 return -EFAULT;
206
207 mutex_lock(&dev->struct_mutex); 192 mutex_lock(&dev->struct_mutex);
208 list_for_each_entry(r_list, &dev->maplist, head) { 193 list_for_each_entry(r_list, &dev->maplist, head) {
209 if (r_list->map 194 if (r_list->map
210 && r_list->user_token == (unsigned long)request.handle) 195 && r_list->user_token == (unsigned long) request->handle)
211 goto found; 196 goto found;
212 } 197 }
213 bad: 198 bad:
@@ -219,10 +204,11 @@ int drm_setsareactx(struct inode *inode, struct file *filp,
219 if (!map) 204 if (!map)
220 goto bad; 205 goto bad;
221 206
222 if (IS_ERR(idr_replace(&dev->ctx_idr, map, request.ctx_id))) 207 if (IS_ERR(idr_replace(&dev->ctx_idr, map, request->ctx_id)))
223 goto bad; 208 goto bad;
224 209
225 mutex_unlock(&dev->struct_mutex); 210 mutex_unlock(&dev->struct_mutex);
211
226 return 0; 212 return 0;
227} 213}
228 214
@@ -292,34 +278,28 @@ static int drm_context_switch_complete(struct drm_device * dev, int new)
292 * Reserve contexts. 278 * Reserve contexts.
293 * 279 *
294 * \param inode device inode. 280 * \param inode device inode.
295 * \param filp file pointer. 281 * \param file_priv DRM file private.
296 * \param cmd command. 282 * \param cmd command.
297 * \param arg user argument pointing to a drm_ctx_res structure. 283 * \param arg user argument pointing to a drm_ctx_res structure.
298 * \return zero on success or a negative number on failure. 284 * \return zero on success or a negative number on failure.
299 */ 285 */
300int drm_resctx(struct inode *inode, struct file *filp, 286int drm_resctx(struct drm_device *dev, void *data,
301 unsigned int cmd, unsigned long arg) 287 struct drm_file *file_priv)
302{ 288{
303 struct drm_ctx_res res; 289 struct drm_ctx_res *res = data;
304 struct drm_ctx_res __user *argp = (void __user *)arg;
305 struct drm_ctx ctx; 290 struct drm_ctx ctx;
306 int i; 291 int i;
307 292
308 if (copy_from_user(&res, argp, sizeof(res))) 293 if (res->count >= DRM_RESERVED_CONTEXTS) {
309 return -EFAULT;
310
311 if (res.count >= DRM_RESERVED_CONTEXTS) {
312 memset(&ctx, 0, sizeof(ctx)); 294 memset(&ctx, 0, sizeof(ctx));
313 for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) { 295 for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
314 ctx.handle = i; 296 ctx.handle = i;
315 if (copy_to_user(&res.contexts[i], &ctx, sizeof(ctx))) 297 if (copy_to_user(&res->contexts[i], &ctx, sizeof(ctx)))
316 return -EFAULT; 298 return -EFAULT;
317 } 299 }
318 } 300 }
319 res.count = DRM_RESERVED_CONTEXTS; 301 res->count = DRM_RESERVED_CONTEXTS;
320 302
321 if (copy_to_user(argp, &res, sizeof(res)))
322 return -EFAULT;
323 return 0; 303 return 0;
324} 304}
325 305
@@ -327,40 +307,34 @@ int drm_resctx(struct inode *inode, struct file *filp,
327 * Add context. 307 * Add context.
328 * 308 *
329 * \param inode device inode. 309 * \param inode device inode.
330 * \param filp file pointer. 310 * \param file_priv DRM file private.
331 * \param cmd command. 311 * \param cmd command.
332 * \param arg user argument pointing to a drm_ctx structure. 312 * \param arg user argument pointing to a drm_ctx structure.
333 * \return zero on success or a negative number on failure. 313 * \return zero on success or a negative number on failure.
334 * 314 *
335 * Get a new handle for the context and copy to userspace. 315 * Get a new handle for the context and copy to userspace.
336 */ 316 */
337int drm_addctx(struct inode *inode, struct file *filp, 317int drm_addctx(struct drm_device *dev, void *data,
338 unsigned int cmd, unsigned long arg) 318 struct drm_file *file_priv)
339{ 319{
340 struct drm_file *priv = filp->private_data;
341 struct drm_device *dev = priv->head->dev;
342 struct drm_ctx_list *ctx_entry; 320 struct drm_ctx_list *ctx_entry;
343 struct drm_ctx __user *argp = (void __user *)arg; 321 struct drm_ctx *ctx = data;
344 struct drm_ctx ctx;
345
346 if (copy_from_user(&ctx, argp, sizeof(ctx)))
347 return -EFAULT;
348 322
349 ctx.handle = drm_ctxbitmap_next(dev); 323 ctx->handle = drm_ctxbitmap_next(dev);
350 if (ctx.handle == DRM_KERNEL_CONTEXT) { 324 if (ctx->handle == DRM_KERNEL_CONTEXT) {
351 /* Skip kernel's context and get a new one. */ 325 /* Skip kernel's context and get a new one. */
352 ctx.handle = drm_ctxbitmap_next(dev); 326 ctx->handle = drm_ctxbitmap_next(dev);
353 } 327 }
354 DRM_DEBUG("%d\n", ctx.handle); 328 DRM_DEBUG("%d\n", ctx->handle);
355 if (ctx.handle == -1) { 329 if (ctx->handle == -1) {
356 DRM_DEBUG("Not enough free contexts.\n"); 330 DRM_DEBUG("Not enough free contexts.\n");
357 /* Should this return -EBUSY instead? */ 331 /* Should this return -EBUSY instead? */
358 return -ENOMEM; 332 return -ENOMEM;
359 } 333 }
360 334
361 if (ctx.handle != DRM_KERNEL_CONTEXT) { 335 if (ctx->handle != DRM_KERNEL_CONTEXT) {
362 if (dev->driver->context_ctor) 336 if (dev->driver->context_ctor)
363 if (!dev->driver->context_ctor(dev, ctx.handle)) { 337 if (!dev->driver->context_ctor(dev, ctx->handle)) {
364 DRM_DEBUG("Running out of ctxs or memory.\n"); 338 DRM_DEBUG("Running out of ctxs or memory.\n");
365 return -ENOMEM; 339 return -ENOMEM;
366 } 340 }
@@ -373,21 +347,18 @@ int drm_addctx(struct inode *inode, struct file *filp,
373 } 347 }
374 348
375 INIT_LIST_HEAD(&ctx_entry->head); 349 INIT_LIST_HEAD(&ctx_entry->head);
376 ctx_entry->handle = ctx.handle; 350 ctx_entry->handle = ctx->handle;
377 ctx_entry->tag = priv; 351 ctx_entry->tag = file_priv;
378 352
379 mutex_lock(&dev->ctxlist_mutex); 353 mutex_lock(&dev->ctxlist_mutex);
380 list_add(&ctx_entry->head, &dev->ctxlist); 354 list_add(&ctx_entry->head, &dev->ctxlist);
381 ++dev->ctx_count; 355 ++dev->ctx_count;
382 mutex_unlock(&dev->ctxlist_mutex); 356 mutex_unlock(&dev->ctxlist_mutex);
383 357
384 if (copy_to_user(argp, &ctx, sizeof(ctx)))
385 return -EFAULT;
386 return 0; 358 return 0;
387} 359}
388 360
389int drm_modctx(struct inode *inode, struct file *filp, 361int drm_modctx(struct drm_device *dev, void *data, struct drm_file *file_priv)
390 unsigned int cmd, unsigned long arg)
391{ 362{
392 /* This does nothing */ 363 /* This does nothing */
393 return 0; 364 return 0;
@@ -397,25 +368,18 @@ int drm_modctx(struct inode *inode, struct file *filp,
397 * Get context. 368 * Get context.
398 * 369 *
399 * \param inode device inode. 370 * \param inode device inode.
400 * \param filp file pointer. 371 * \param file_priv DRM file private.
401 * \param cmd command. 372 * \param cmd command.
402 * \param arg user argument pointing to a drm_ctx structure. 373 * \param arg user argument pointing to a drm_ctx structure.
403 * \return zero on success or a negative number on failure. 374 * \return zero on success or a negative number on failure.
404 */ 375 */
405int drm_getctx(struct inode *inode, struct file *filp, 376int drm_getctx(struct drm_device *dev, void *data, struct drm_file *file_priv)
406 unsigned int cmd, unsigned long arg)
407{ 377{
408 struct drm_ctx __user *argp = (void __user *)arg; 378 struct drm_ctx *ctx = data;
409 struct drm_ctx ctx;
410
411 if (copy_from_user(&ctx, argp, sizeof(ctx)))
412 return -EFAULT;
413 379
414 /* This is 0, because we don't handle any context flags */ 380 /* This is 0, because we don't handle any context flags */
415 ctx.flags = 0; 381 ctx->flags = 0;
416 382
417 if (copy_to_user(argp, &ctx, sizeof(ctx)))
418 return -EFAULT;
419 return 0; 383 return 0;
420} 384}
421 385
@@ -423,50 +387,40 @@ int drm_getctx(struct inode *inode, struct file *filp,
423 * Switch context. 387 * Switch context.
424 * 388 *
425 * \param inode device inode. 389 * \param inode device inode.
426 * \param filp file pointer. 390 * \param file_priv DRM file private.
427 * \param cmd command. 391 * \param cmd command.
428 * \param arg user argument pointing to a drm_ctx structure. 392 * \param arg user argument pointing to a drm_ctx structure.
429 * \return zero on success or a negative number on failure. 393 * \return zero on success or a negative number on failure.
430 * 394 *
431 * Calls context_switch(). 395 * Calls context_switch().
432 */ 396 */
433int drm_switchctx(struct inode *inode, struct file *filp, 397int drm_switchctx(struct drm_device *dev, void *data,
434 unsigned int cmd, unsigned long arg) 398 struct drm_file *file_priv)
435{ 399{
436 struct drm_file *priv = filp->private_data; 400 struct drm_ctx *ctx = data;
437 struct drm_device *dev = priv->head->dev;
438 struct drm_ctx ctx;
439 401
440 if (copy_from_user(&ctx, (struct drm_ctx __user *) arg, sizeof(ctx))) 402 DRM_DEBUG("%d\n", ctx->handle);
441 return -EFAULT; 403 return drm_context_switch(dev, dev->last_context, ctx->handle);
442
443 DRM_DEBUG("%d\n", ctx.handle);
444 return drm_context_switch(dev, dev->last_context, ctx.handle);
445} 404}
446 405
447/** 406/**
448 * New context. 407 * New context.
449 * 408 *
450 * \param inode device inode. 409 * \param inode device inode.
451 * \param filp file pointer. 410 * \param file_priv DRM file private.
452 * \param cmd command. 411 * \param cmd command.
453 * \param arg user argument pointing to a drm_ctx structure. 412 * \param arg user argument pointing to a drm_ctx structure.
454 * \return zero on success or a negative number on failure. 413 * \return zero on success or a negative number on failure.
455 * 414 *
456 * Calls context_switch_complete(). 415 * Calls context_switch_complete().
457 */ 416 */
458int drm_newctx(struct inode *inode, struct file *filp, 417int drm_newctx(struct drm_device *dev, void *data,
459 unsigned int cmd, unsigned long arg) 418 struct drm_file *file_priv)
460{ 419{
461 struct drm_file *priv = filp->private_data; 420 struct drm_ctx *ctx = data;
462 struct drm_device *dev = priv->head->dev;
463 struct drm_ctx ctx;
464 421
465 if (copy_from_user(&ctx, (struct drm_ctx __user *) arg, sizeof(ctx))) 422 DRM_DEBUG("%d\n", ctx->handle);
466 return -EFAULT; 423 drm_context_switch_complete(dev, ctx->handle);
467
468 DRM_DEBUG("%d\n", ctx.handle);
469 drm_context_switch_complete(dev, ctx.handle);
470 424
471 return 0; 425 return 0;
472} 426}
@@ -475,31 +429,26 @@ int drm_newctx(struct inode *inode, struct file *filp,
475 * Remove context. 429 * Remove context.
476 * 430 *
477 * \param inode device inode. 431 * \param inode device inode.
478 * \param filp file pointer. 432 * \param file_priv DRM file private.
479 * \param cmd command. 433 * \param cmd command.
480 * \param arg user argument pointing to a drm_ctx structure. 434 * \param arg user argument pointing to a drm_ctx structure.
481 * \return zero on success or a negative number on failure. 435 * \return zero on success or a negative number on failure.
482 * 436 *
483 * If not the special kernel context, calls ctxbitmap_free() to free the specified context. 437 * If not the special kernel context, calls ctxbitmap_free() to free the specified context.
484 */ 438 */
485int drm_rmctx(struct inode *inode, struct file *filp, 439int drm_rmctx(struct drm_device *dev, void *data,
486 unsigned int cmd, unsigned long arg) 440 struct drm_file *file_priv)
487{ 441{
488 struct drm_file *priv = filp->private_data; 442 struct drm_ctx *ctx = data;
489 struct drm_device *dev = priv->head->dev;
490 struct drm_ctx ctx;
491
492 if (copy_from_user(&ctx, (struct drm_ctx __user *) arg, sizeof(ctx)))
493 return -EFAULT;
494 443
495 DRM_DEBUG("%d\n", ctx.handle); 444 DRM_DEBUG("%d\n", ctx->handle);
496 if (ctx.handle == DRM_KERNEL_CONTEXT + 1) { 445 if (ctx->handle == DRM_KERNEL_CONTEXT + 1) {
497 priv->remove_auth_on_close = 1; 446 file_priv->remove_auth_on_close = 1;
498 } 447 }
499 if (ctx.handle != DRM_KERNEL_CONTEXT) { 448 if (ctx->handle != DRM_KERNEL_CONTEXT) {
500 if (dev->driver->context_dtor) 449 if (dev->driver->context_dtor)
501 dev->driver->context_dtor(dev, ctx.handle); 450 dev->driver->context_dtor(dev, ctx->handle);
502 drm_ctxbitmap_free(dev, ctx.handle); 451 drm_ctxbitmap_free(dev, ctx->handle);
503 } 452 }
504 453
505 mutex_lock(&dev->ctxlist_mutex); 454 mutex_lock(&dev->ctxlist_mutex);
@@ -507,7 +456,7 @@ int drm_rmctx(struct inode *inode, struct file *filp,
507 struct drm_ctx_list *pos, *n; 456 struct drm_ctx_list *pos, *n;
508 457
509 list_for_each_entry_safe(pos, n, &dev->ctxlist, head) { 458 list_for_each_entry_safe(pos, n, &dev->ctxlist, head) {
510 if (pos->handle == ctx.handle) { 459 if (pos->handle == ctx->handle) {
511 list_del(&pos->head); 460 list_del(&pos->head);
512 drm_free(pos, sizeof(*pos), DRM_MEM_CTXLIST); 461 drm_free(pos, sizeof(*pos), DRM_MEM_CTXLIST);
513 --dev->ctx_count; 462 --dev->ctx_count;
diff --git a/drivers/char/drm/drm_dma.c b/drivers/char/drm/drm_dma.c
index 802fbdbfe1b3..7a8e2fba4678 100644
--- a/drivers/char/drm/drm_dma.c
+++ b/drivers/char/drm/drm_dma.c
@@ -136,7 +136,7 @@ void drm_free_buffer(struct drm_device *dev, struct drm_buf * buf)
136 136
137 buf->waiting = 0; 137 buf->waiting = 0;
138 buf->pending = 0; 138 buf->pending = 0;
139 buf->filp = NULL; 139 buf->file_priv = NULL;
140 buf->used = 0; 140 buf->used = 0;
141 141
142 if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE) 142 if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE)
@@ -148,11 +148,12 @@ void drm_free_buffer(struct drm_device *dev, struct drm_buf * buf)
148/** 148/**
149 * Reclaim the buffers. 149 * Reclaim the buffers.
150 * 150 *
151 * \param filp file pointer. 151 * \param file_priv DRM file private.
152 * 152 *
153 * Frees each buffer associated with \p filp not already on the hardware. 153 * Frees each buffer associated with \p file_priv not already on the hardware.
154 */ 154 */
155void drm_core_reclaim_buffers(struct drm_device *dev, struct file *filp) 155void drm_core_reclaim_buffers(struct drm_device *dev,
156 struct drm_file *file_priv)
156{ 157{
157 struct drm_device_dma *dma = dev->dma; 158 struct drm_device_dma *dma = dev->dma;
158 int i; 159 int i;
@@ -160,7 +161,7 @@ void drm_core_reclaim_buffers(struct drm_device *dev, struct file *filp)
160 if (!dma) 161 if (!dma)
161 return; 162 return;
162 for (i = 0; i < dma->buf_count; i++) { 163 for (i = 0; i < dma->buf_count; i++) {
163 if (dma->buflist[i]->filp == filp) { 164 if (dma->buflist[i]->file_priv == file_priv) {
164 switch (dma->buflist[i]->list) { 165 switch (dma->buflist[i]->list) {
165 case DRM_LIST_NONE: 166 case DRM_LIST_NONE:
166 drm_free_buffer(dev, dma->buflist[i]); 167 drm_free_buffer(dev, dma->buflist[i]);
diff --git a/drivers/char/drm/drm_drawable.c b/drivers/char/drm/drm_drawable.c
index d6cdba5644e2..1839c57663c5 100644
--- a/drivers/char/drm/drm_drawable.c
+++ b/drivers/char/drm/drm_drawable.c
@@ -40,11 +40,10 @@
40/** 40/**
41 * Allocate drawable ID and memory to store information about it. 41 * Allocate drawable ID and memory to store information about it.
42 */ 42 */
43int drm_adddraw(DRM_IOCTL_ARGS) 43int drm_adddraw(struct drm_device *dev, void *data, struct drm_file *file_priv)
44{ 44{
45 DRM_DEVICE;
46 unsigned long irqflags; 45 unsigned long irqflags;
47 struct drm_draw draw; 46 struct drm_draw *draw = data;
48 int new_id = 0; 47 int new_id = 0;
49 int ret; 48 int ret;
50 49
@@ -63,11 +62,9 @@ again:
63 62
64 spin_unlock_irqrestore(&dev->drw_lock, irqflags); 63 spin_unlock_irqrestore(&dev->drw_lock, irqflags);
65 64
66 draw.handle = new_id; 65 draw->handle = new_id;
67 66
68 DRM_DEBUG("%d\n", draw.handle); 67 DRM_DEBUG("%d\n", draw->handle);
69
70 DRM_COPY_TO_USER_IOCTL((struct drm_draw __user *)data, draw, sizeof(draw));
71 68
72 return 0; 69 return 0;
73} 70}
@@ -75,72 +72,64 @@ again:
75/** 72/**
76 * Free drawable ID and memory to store information about it. 73 * Free drawable ID and memory to store information about it.
77 */ 74 */
78int drm_rmdraw(DRM_IOCTL_ARGS) 75int drm_rmdraw(struct drm_device *dev, void *data, struct drm_file *file_priv)
79{ 76{
80 DRM_DEVICE; 77 struct drm_draw *draw = data;
81 struct drm_draw draw;
82 unsigned long irqflags; 78 unsigned long irqflags;
83 79
84 DRM_COPY_FROM_USER_IOCTL(draw, (struct drm_draw __user *) data,
85 sizeof(draw));
86
87 spin_lock_irqsave(&dev->drw_lock, irqflags); 80 spin_lock_irqsave(&dev->drw_lock, irqflags);
88 81
89 drm_free(drm_get_drawable_info(dev, draw.handle), 82 drm_free(drm_get_drawable_info(dev, draw->handle),
90 sizeof(struct drm_drawable_info), DRM_MEM_BUFS); 83 sizeof(struct drm_drawable_info), DRM_MEM_BUFS);
91 84
92 idr_remove(&dev->drw_idr, draw.handle); 85 idr_remove(&dev->drw_idr, draw->handle);
93 86
94 spin_unlock_irqrestore(&dev->drw_lock, irqflags); 87 spin_unlock_irqrestore(&dev->drw_lock, irqflags);
95 DRM_DEBUG("%d\n", draw.handle); 88 DRM_DEBUG("%d\n", draw->handle);
96 return 0; 89 return 0;
97} 90}
98 91
99int drm_update_drawable_info(DRM_IOCTL_ARGS) 92int drm_update_drawable_info(struct drm_device *dev, void *data, struct drm_file *file_priv)
100{ 93{
101 DRM_DEVICE; 94 struct drm_update_draw *update = data;
102 struct drm_update_draw update;
103 unsigned long irqflags; 95 unsigned long irqflags;
104 struct drm_clip_rect *rects; 96 struct drm_clip_rect *rects;
105 struct drm_drawable_info *info; 97 struct drm_drawable_info *info;
106 int err; 98 int err;
107 99
108 DRM_COPY_FROM_USER_IOCTL(update, (struct drm_update_draw __user *) data, 100 info = idr_find(&dev->drw_idr, update->handle);
109 sizeof(update));
110
111 info = idr_find(&dev->drw_idr, update.handle);
112 if (!info) { 101 if (!info) {
113 info = drm_calloc(1, sizeof(*info), DRM_MEM_BUFS); 102 info = drm_calloc(1, sizeof(*info), DRM_MEM_BUFS);
114 if (!info) 103 if (!info)
115 return -ENOMEM; 104 return -ENOMEM;
116 if (IS_ERR(idr_replace(&dev->drw_idr, info, update.handle))) { 105 if (IS_ERR(idr_replace(&dev->drw_idr, info, update->handle))) {
117 DRM_ERROR("No such drawable %d\n", update.handle); 106 DRM_ERROR("No such drawable %d\n", update->handle);
118 drm_free(info, sizeof(*info), DRM_MEM_BUFS); 107 drm_free(info, sizeof(*info), DRM_MEM_BUFS);
119 return -EINVAL; 108 return -EINVAL;
120 } 109 }
121 } 110 }
122 111
123 switch (update.type) { 112 switch (update->type) {
124 case DRM_DRAWABLE_CLIPRECTS: 113 case DRM_DRAWABLE_CLIPRECTS:
125 if (update.num != info->num_rects) { 114 if (update->num != info->num_rects) {
126 rects = drm_alloc(update.num * sizeof(struct drm_clip_rect), 115 rects = drm_alloc(update->num * sizeof(struct drm_clip_rect),
127 DRM_MEM_BUFS); 116 DRM_MEM_BUFS);
128 } else 117 } else
129 rects = info->rects; 118 rects = info->rects;
130 119
131 if (update.num && !rects) { 120 if (update->num && !rects) {
132 DRM_ERROR("Failed to allocate cliprect memory\n"); 121 DRM_ERROR("Failed to allocate cliprect memory\n");
133 err = DRM_ERR(ENOMEM); 122 err = -ENOMEM;
134 goto error; 123 goto error;
135 } 124 }
136 125
137 if (update.num && DRM_COPY_FROM_USER(rects, 126 if (update->num && DRM_COPY_FROM_USER(rects,
138 (struct drm_clip_rect __user *) 127 (struct drm_clip_rect __user *)
139 (unsigned long)update.data, 128 (unsigned long)update->data,
140 update.num * 129 update->num *
141 sizeof(*rects))) { 130 sizeof(*rects))) {
142 DRM_ERROR("Failed to copy cliprects from userspace\n"); 131 DRM_ERROR("Failed to copy cliprects from userspace\n");
143 err = DRM_ERR(EFAULT); 132 err = -EFAULT;
144 goto error; 133 goto error;
145 } 134 }
146 135
@@ -152,23 +141,23 @@ int drm_update_drawable_info(DRM_IOCTL_ARGS)
152 } 141 }
153 142
154 info->rects = rects; 143 info->rects = rects;
155 info->num_rects = update.num; 144 info->num_rects = update->num;
156 145
157 spin_unlock_irqrestore(&dev->drw_lock, irqflags); 146 spin_unlock_irqrestore(&dev->drw_lock, irqflags);
158 147
159 DRM_DEBUG("Updated %d cliprects for drawable %d\n", 148 DRM_DEBUG("Updated %d cliprects for drawable %d\n",
160 info->num_rects, update.handle); 149 info->num_rects, update->handle);
161 break; 150 break;
162 default: 151 default:
163 DRM_ERROR("Invalid update type %d\n", update.type); 152 DRM_ERROR("Invalid update type %d\n", update->type);
164 return DRM_ERR(EINVAL); 153 return -EINVAL;
165 } 154 }
166 155
167 return 0; 156 return 0;
168 157
169error: 158error:
170 if (rects != info->rects) 159 if (rects != info->rects)
171 drm_free(rects, update.num * sizeof(struct drm_clip_rect), 160 drm_free(rects, update->num * sizeof(struct drm_clip_rect),
172 DRM_MEM_BUFS); 161 DRM_MEM_BUFS);
173 162
174 return err; 163 return err;
diff --git a/drivers/char/drm/drm_drv.c b/drivers/char/drm/drm_drv.c
index 19994cd865de..72668b15e5ce 100644
--- a/drivers/char/drm/drm_drv.c
+++ b/drivers/char/drm/drm_drv.c
@@ -49,73 +49,74 @@
49#include "drmP.h" 49#include "drmP.h"
50#include "drm_core.h" 50#include "drm_core.h"
51 51
52static int drm_version(struct inode *inode, struct file *filp, 52static int drm_version(struct drm_device *dev, void *data,
53 unsigned int cmd, unsigned long arg); 53 struct drm_file *file_priv);
54 54
55/** Ioctl table */ 55/** Ioctl table */
56static drm_ioctl_desc_t drm_ioctls[] = { 56static struct drm_ioctl_desc drm_ioctls[] = {
57 [DRM_IOCTL_NR(DRM_IOCTL_VERSION)] = {drm_version, 0}, 57 DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, 0),
58 [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE)] = {drm_getunique, 0}, 58 DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0),
59 [DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC)] = {drm_getmagic, 0}, 59 DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0),
60 [DRM_IOCTL_NR(DRM_IOCTL_IRQ_BUSID)] = {drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY}, 60 DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY),
61 [DRM_IOCTL_NR(DRM_IOCTL_GET_MAP)] = {drm_getmap, 0}, 61 DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, 0),
62 [DRM_IOCTL_NR(DRM_IOCTL_GET_CLIENT)] = {drm_getclient, 0}, 62 DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, 0),
63 [DRM_IOCTL_NR(DRM_IOCTL_GET_STATS)] = {drm_getstats, 0}, 63 DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, 0),
64 [DRM_IOCTL_NR(DRM_IOCTL_SET_VERSION)] = {drm_setversion, DRM_MASTER|DRM_ROOT_ONLY}, 64 DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER|DRM_ROOT_ONLY),
65 [DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE)] = {drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 65
66 [DRM_IOCTL_NR(DRM_IOCTL_BLOCK)] = {drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 66 DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
67 [DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)] = {drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 67 DRM_IOCTL_DEF(DRM_IOCTL_BLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
68 [DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = {drm_authmagic, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 68 DRM_IOCTL_DEF(DRM_IOCTL_UNBLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
69 69 DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
70 [DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] = {drm_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 70
71 [DRM_IOCTL_NR(DRM_IOCTL_RM_MAP)] = {drm_rmmap_ioctl, DRM_AUTH}, 71 DRM_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
72 72 DRM_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_rmmap_ioctl, DRM_AUTH),
73 [DRM_IOCTL_NR(DRM_IOCTL_SET_SAREA_CTX)] = {drm_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 73
74 [DRM_IOCTL_NR(DRM_IOCTL_GET_SAREA_CTX)] = {drm_getsareactx, DRM_AUTH}, 74 DRM_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
75 75 DRM_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_getsareactx, DRM_AUTH),
76 [DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] = {drm_addctx, DRM_AUTH|DRM_ROOT_ONLY}, 76
77 [DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] = {drm_rmctx, DRM_AUTH|DRM_ROOT_ONLY}, 77 DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_addctx, DRM_AUTH|DRM_ROOT_ONLY),
78 [DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)] = {drm_modctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 78 DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
79 [DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)] = {drm_getctx, DRM_AUTH}, 79 DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_modctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
80 [DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)] = {drm_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 80 DRM_IOCTL_DEF(DRM_IOCTL_GET_CTX, drm_getctx, DRM_AUTH),
81 [DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX)] = {drm_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 81 DRM_IOCTL_DEF(DRM_IOCTL_SWITCH_CTX, drm_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
82 [DRM_IOCTL_NR(DRM_IOCTL_RES_CTX)] = {drm_resctx, DRM_AUTH}, 82 DRM_IOCTL_DEF(DRM_IOCTL_NEW_CTX, drm_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
83 83 DRM_IOCTL_DEF(DRM_IOCTL_RES_CTX, drm_resctx, DRM_AUTH),
84 [DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW)] = {drm_adddraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 84
85 [DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW)] = {drm_rmdraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 85 DRM_IOCTL_DEF(DRM_IOCTL_ADD_DRAW, drm_adddraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
86 86 DRM_IOCTL_DEF(DRM_IOCTL_RM_DRAW, drm_rmdraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
87 [DRM_IOCTL_NR(DRM_IOCTL_LOCK)] = {drm_lock, DRM_AUTH}, 87
88 [DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)] = {drm_unlock, DRM_AUTH}, 88 DRM_IOCTL_DEF(DRM_IOCTL_LOCK, drm_lock, DRM_AUTH),
89 89 DRM_IOCTL_DEF(DRM_IOCTL_UNLOCK, drm_unlock, DRM_AUTH),
90 [DRM_IOCTL_NR(DRM_IOCTL_FINISH)] = {drm_noop, DRM_AUTH}, 90
91 91 DRM_IOCTL_DEF(DRM_IOCTL_FINISH, drm_noop, DRM_AUTH),
92 [DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS)] = {drm_addbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 92
93 [DRM_IOCTL_NR(DRM_IOCTL_MARK_BUFS)] = {drm_markbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 93 DRM_IOCTL_DEF(DRM_IOCTL_ADD_BUFS, drm_addbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
94 [DRM_IOCTL_NR(DRM_IOCTL_INFO_BUFS)] = {drm_infobufs, DRM_AUTH}, 94 DRM_IOCTL_DEF(DRM_IOCTL_MARK_BUFS, drm_markbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
95 [DRM_IOCTL_NR(DRM_IOCTL_MAP_BUFS)] = {drm_mapbufs, DRM_AUTH}, 95 DRM_IOCTL_DEF(DRM_IOCTL_INFO_BUFS, drm_infobufs, DRM_AUTH),
96 [DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS)] = {drm_freebufs, DRM_AUTH}, 96 DRM_IOCTL_DEF(DRM_IOCTL_MAP_BUFS, drm_mapbufs, DRM_AUTH),
97 DRM_IOCTL_DEF(DRM_IOCTL_FREE_BUFS, drm_freebufs, DRM_AUTH),
97 /* The DRM_IOCTL_DMA ioctl should be defined by the driver. */ 98 /* The DRM_IOCTL_DMA ioctl should be defined by the driver. */
98 [DRM_IOCTL_NR(DRM_IOCTL_DMA)] = {NULL, DRM_AUTH}, 99 DRM_IOCTL_DEF(DRM_IOCTL_DMA, NULL, DRM_AUTH),
99 100
100 [DRM_IOCTL_NR(DRM_IOCTL_CONTROL)] = {drm_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 101 DRM_IOCTL_DEF(DRM_IOCTL_CONTROL, drm_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
101 102
102#if __OS_HAS_AGP 103#if __OS_HAS_AGP
103 [DRM_IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE)] = {drm_agp_acquire_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 104 DRM_IOCTL_DEF(DRM_IOCTL_AGP_ACQUIRE, drm_agp_acquire_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
104 [DRM_IOCTL_NR(DRM_IOCTL_AGP_RELEASE)] = {drm_agp_release_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 105 DRM_IOCTL_DEF(DRM_IOCTL_AGP_RELEASE, drm_agp_release_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
105 [DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE)] = {drm_agp_enable_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 106 DRM_IOCTL_DEF(DRM_IOCTL_AGP_ENABLE, drm_agp_enable_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
106 [DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO)] = {drm_agp_info_ioctl, DRM_AUTH}, 107 DRM_IOCTL_DEF(DRM_IOCTL_AGP_INFO, drm_agp_info_ioctl, DRM_AUTH),
107 [DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC)] = {drm_agp_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 108 DRM_IOCTL_DEF(DRM_IOCTL_AGP_ALLOC, drm_agp_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
108 [DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE)] = {drm_agp_free_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 109 DRM_IOCTL_DEF(DRM_IOCTL_AGP_FREE, drm_agp_free_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
109 [DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)] = {drm_agp_bind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 110 DRM_IOCTL_DEF(DRM_IOCTL_AGP_BIND, drm_agp_bind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
110 [DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND)] = {drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 111 DRM_IOCTL_DEF(DRM_IOCTL_AGP_UNBIND, drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
111#endif 112#endif
112 113
113 [DRM_IOCTL_NR(DRM_IOCTL_SG_ALLOC)] = {drm_sg_alloc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 114 DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
114 [DRM_IOCTL_NR(DRM_IOCTL_SG_FREE)] = {drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 115 DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
115 116
116 [DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK)] = {drm_wait_vblank, 0}, 117 DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, 0),
117 118
118 [DRM_IOCTL_NR(DRM_IOCTL_UPDATE_DRAW)] = {drm_update_drawable_info, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 119 DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_update_drawable_info, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
119}; 120};
120 121
121#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls ) 122#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
@@ -224,7 +225,7 @@ int drm_lastclose(struct drm_device * dev)
224 225
225 if (dev->lock.hw_lock) { 226 if (dev->lock.hw_lock) {
226 dev->sigdata.lock = dev->lock.hw_lock = NULL; /* SHM removed */ 227 dev->sigdata.lock = dev->lock.hw_lock = NULL; /* SHM removed */
227 dev->lock.filp = NULL; 228 dev->lock.file_priv = NULL;
228 wake_up_interruptible(&dev->lock.lock_queue); 229 wake_up_interruptible(&dev->lock.lock_queue);
229 } 230 }
230 mutex_unlock(&dev->struct_mutex); 231 mutex_unlock(&dev->struct_mutex);
@@ -418,27 +419,19 @@ module_exit(drm_core_exit);
418 * 419 *
419 * Fills in the version information in \p arg. 420 * Fills in the version information in \p arg.
420 */ 421 */
421static int drm_version(struct inode *inode, struct file *filp, 422static int drm_version(struct drm_device *dev, void *data,
422 unsigned int cmd, unsigned long arg) 423 struct drm_file *file_priv)
423{ 424{
424 struct drm_file *priv = filp->private_data; 425 struct drm_version *version = data;
425 struct drm_device *dev = priv->head->dev;
426 struct drm_version __user *argp = (void __user *)arg;
427 struct drm_version version;
428 int len; 426 int len;
429 427
430 if (copy_from_user(&version, argp, sizeof(version))) 428 version->version_major = dev->driver->major;
431 return -EFAULT; 429 version->version_minor = dev->driver->minor;
430 version->version_patchlevel = dev->driver->patchlevel;
431 DRM_COPY(version->name, dev->driver->name);
432 DRM_COPY(version->date, dev->driver->date);
433 DRM_COPY(version->desc, dev->driver->desc);
432 434
433 version.version_major = dev->driver->major;
434 version.version_minor = dev->driver->minor;
435 version.version_patchlevel = dev->driver->patchlevel;
436 DRM_COPY(version.name, dev->driver->name);
437 DRM_COPY(version.date, dev->driver->date);
438 DRM_COPY(version.desc, dev->driver->desc);
439
440 if (copy_to_user(argp, &version, sizeof(version)))
441 return -EFAULT;
442 return 0; 435 return 0;
443} 436}
444 437
@@ -446,7 +439,7 @@ static int drm_version(struct inode *inode, struct file *filp,
446 * Called whenever a process performs an ioctl on /dev/drm. 439 * Called whenever a process performs an ioctl on /dev/drm.
447 * 440 *
448 * \param inode device inode. 441 * \param inode device inode.
449 * \param filp file pointer. 442 * \param file_priv DRM file private.
450 * \param cmd command. 443 * \param cmd command.
451 * \param arg user argument. 444 * \param arg user argument.
452 * \return zero on success or negative number on failure. 445 * \return zero on success or negative number on failure.
@@ -457,21 +450,22 @@ static int drm_version(struct inode *inode, struct file *filp,
457int drm_ioctl(struct inode *inode, struct file *filp, 450int drm_ioctl(struct inode *inode, struct file *filp,
458 unsigned int cmd, unsigned long arg) 451 unsigned int cmd, unsigned long arg)
459{ 452{
460 struct drm_file *priv = filp->private_data; 453 struct drm_file *file_priv = filp->private_data;
461 struct drm_device *dev = priv->head->dev; 454 struct drm_device *dev = file_priv->head->dev;
462 drm_ioctl_desc_t *ioctl; 455 struct drm_ioctl_desc *ioctl;
463 drm_ioctl_t *func; 456 drm_ioctl_t *func;
464 unsigned int nr = DRM_IOCTL_NR(cmd); 457 unsigned int nr = DRM_IOCTL_NR(cmd);
465 int retcode = -EINVAL; 458 int retcode = -EINVAL;
459 char *kdata = NULL;
466 460
467 atomic_inc(&dev->ioctl_count); 461 atomic_inc(&dev->ioctl_count);
468 atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]); 462 atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
469 ++priv->ioctl_count; 463 ++file_priv->ioctl_count;
470 464
471 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n", 465 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
472 current->pid, cmd, nr, 466 current->pid, cmd, nr,
473 (long)old_encode_dev(priv->head->device), 467 (long)old_encode_dev(file_priv->head->device),
474 priv->authenticated); 468 file_priv->authenticated);
475 469
476 if ((nr >= DRM_CORE_IOCTL_COUNT) && 470 if ((nr >= DRM_CORE_IOCTL_COUNT) &&
477 ((nr < DRM_COMMAND_BASE) || (nr >= DRM_COMMAND_END))) 471 ((nr < DRM_COMMAND_BASE) || (nr >= DRM_COMMAND_END)))
@@ -489,18 +483,40 @@ int drm_ioctl(struct inode *inode, struct file *filp,
489 if ((nr == DRM_IOCTL_NR(DRM_IOCTL_DMA)) && dev->driver->dma_ioctl) 483 if ((nr == DRM_IOCTL_NR(DRM_IOCTL_DMA)) && dev->driver->dma_ioctl)
490 func = dev->driver->dma_ioctl; 484 func = dev->driver->dma_ioctl;
491 485
486
492 if (!func) { 487 if (!func) {
493 DRM_DEBUG("no function\n"); 488 DRM_DEBUG("no function\n");
494 retcode = -EINVAL; 489 retcode = -EINVAL;
495 } else if (((ioctl->flags & DRM_ROOT_ONLY) && !capable(CAP_SYS_ADMIN)) || 490 } else if (((ioctl->flags & DRM_ROOT_ONLY) && !capable(CAP_SYS_ADMIN)) ||
496 ((ioctl->flags & DRM_AUTH) && !priv->authenticated) || 491 ((ioctl->flags & DRM_AUTH) && !file_priv->authenticated) ||
497 ((ioctl->flags & DRM_MASTER) && !priv->master)) { 492 ((ioctl->flags & DRM_MASTER) && !file_priv->master)) {
498 retcode = -EACCES; 493 retcode = -EACCES;
499 } else { 494 } else {
500 retcode = func(inode, filp, cmd, arg); 495 if (cmd & (IOC_IN | IOC_OUT)) {
496 kdata = kmalloc(_IOC_SIZE(cmd), GFP_KERNEL);
497 if (!kdata)
498 return -ENOMEM;
499 }
500
501 if (cmd & IOC_IN) {
502 if (copy_from_user(kdata, (void __user *)arg,
503 _IOC_SIZE(cmd)) != 0) {
504 retcode = -EACCES;
505 goto err_i1;
506 }
507 }
508 retcode = func(dev, kdata, file_priv);
509
510 if (cmd & IOC_OUT) {
511 if (copy_to_user((void __user *)arg, kdata,
512 _IOC_SIZE(cmd)) != 0)
513 retcode = -EACCES;
514 }
501 } 515 }
502 516
503 err_i1: 517 err_i1:
518 if (kdata)
519 kfree(kdata);
504 atomic_dec(&dev->ioctl_count); 520 atomic_dec(&dev->ioctl_count);
505 if (retcode) 521 if (retcode)
506 DRM_DEBUG("ret = %x\n", retcode); 522 DRM_DEBUG("ret = %x\n", retcode);
diff --git a/drivers/char/drm/drm_fops.c b/drivers/char/drm/drm_fops.c
index 7bc51bac450d..f383fc37190c 100644
--- a/drivers/char/drm/drm_fops.c
+++ b/drivers/char/drm/drm_fops.c
@@ -242,6 +242,7 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
242 242
243 memset(priv, 0, sizeof(*priv)); 243 memset(priv, 0, sizeof(*priv));
244 filp->private_data = priv; 244 filp->private_data = priv;
245 priv->filp = filp;
245 priv->uid = current->euid; 246 priv->uid = current->euid;
246 priv->pid = current->pid; 247 priv->pid = current->pid;
247 priv->minor = minor; 248 priv->minor = minor;
@@ -312,7 +313,7 @@ EXPORT_SYMBOL(drm_fasync);
312 * Release file. 313 * Release file.
313 * 314 *
314 * \param inode device inode 315 * \param inode device inode
315 * \param filp file pointer. 316 * \param file_priv DRM file private.
316 * \return zero on success or a negative number on failure. 317 * \return zero on success or a negative number on failure.
317 * 318 *
318 * If the hardware lock is held then free it, and take it again for the kernel 319 * If the hardware lock is held then free it, and take it again for the kernel
@@ -322,29 +323,28 @@ EXPORT_SYMBOL(drm_fasync);
322 */ 323 */
323int drm_release(struct inode *inode, struct file *filp) 324int drm_release(struct inode *inode, struct file *filp)
324{ 325{
325 struct drm_file *priv = filp->private_data; 326 struct drm_file *file_priv = filp->private_data;
326 struct drm_device *dev; 327 struct drm_device *dev = file_priv->head->dev;
327 int retcode = 0; 328 int retcode = 0;
328 329
329 lock_kernel(); 330 lock_kernel();
330 dev = priv->head->dev;
331 331
332 DRM_DEBUG("open_count = %d\n", dev->open_count); 332 DRM_DEBUG("open_count = %d\n", dev->open_count);
333 333
334 if (dev->driver->preclose) 334 if (dev->driver->preclose)
335 dev->driver->preclose(dev, filp); 335 dev->driver->preclose(dev, file_priv);
336 336
337 /* ======================================================== 337 /* ========================================================
338 * Begin inline drm_release 338 * Begin inline drm_release
339 */ 339 */
340 340
341 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n", 341 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
342 current->pid, (long)old_encode_dev(priv->head->device), 342 current->pid, (long)old_encode_dev(file_priv->head->device),
343 dev->open_count); 343 dev->open_count);
344 344
345 if (dev->driver->reclaim_buffers_locked && dev->lock.hw_lock) { 345 if (dev->driver->reclaim_buffers_locked && dev->lock.hw_lock) {
346 if (drm_i_have_hw_lock(filp)) { 346 if (drm_i_have_hw_lock(dev, file_priv)) {
347 dev->driver->reclaim_buffers_locked(dev, filp); 347 dev->driver->reclaim_buffers_locked(dev, file_priv);
348 } else { 348 } else {
349 unsigned long _end=jiffies + 3*DRM_HZ; 349 unsigned long _end=jiffies + 3*DRM_HZ;
350 int locked = 0; 350 int locked = 0;
@@ -370,7 +370,7 @@ int drm_release(struct inode *inode, struct file *filp)
370 "\tI will go on reclaiming the buffers anyway.\n"); 370 "\tI will go on reclaiming the buffers anyway.\n");
371 } 371 }
372 372
373 dev->driver->reclaim_buffers_locked(dev, filp); 373 dev->driver->reclaim_buffers_locked(dev, file_priv);
374 drm_idlelock_release(&dev->lock); 374 drm_idlelock_release(&dev->lock);
375 } 375 }
376 } 376 }
@@ -378,12 +378,12 @@ int drm_release(struct inode *inode, struct file *filp)
378 if (dev->driver->reclaim_buffers_idlelocked && dev->lock.hw_lock) { 378 if (dev->driver->reclaim_buffers_idlelocked && dev->lock.hw_lock) {
379 379
380 drm_idlelock_take(&dev->lock); 380 drm_idlelock_take(&dev->lock);
381 dev->driver->reclaim_buffers_idlelocked(dev, filp); 381 dev->driver->reclaim_buffers_idlelocked(dev, file_priv);
382 drm_idlelock_release(&dev->lock); 382 drm_idlelock_release(&dev->lock);
383 383
384 } 384 }
385 385
386 if (drm_i_have_hw_lock(filp)) { 386 if (drm_i_have_hw_lock(dev, file_priv)) {
387 DRM_DEBUG("File %p released, freeing lock for context %d\n", 387 DRM_DEBUG("File %p released, freeing lock for context %d\n",
388 filp, _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)); 388 filp, _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
389 389
@@ -394,7 +394,7 @@ int drm_release(struct inode *inode, struct file *filp)
394 394
395 if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) && 395 if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
396 !dev->driver->reclaim_buffers_locked) { 396 !dev->driver->reclaim_buffers_locked) {
397 dev->driver->reclaim_buffers(dev, filp); 397 dev->driver->reclaim_buffers(dev, file_priv);
398 } 398 }
399 399
400 drm_fasync(-1, filp, 0); 400 drm_fasync(-1, filp, 0);
@@ -404,7 +404,7 @@ int drm_release(struct inode *inode, struct file *filp)
404 struct drm_ctx_list *pos, *n; 404 struct drm_ctx_list *pos, *n;
405 405
406 list_for_each_entry_safe(pos, n, &dev->ctxlist, head) { 406 list_for_each_entry_safe(pos, n, &dev->ctxlist, head) {
407 if (pos->tag == priv && 407 if (pos->tag == file_priv &&
408 pos->handle != DRM_KERNEL_CONTEXT) { 408 pos->handle != DRM_KERNEL_CONTEXT) {
409 if (dev->driver->context_dtor) 409 if (dev->driver->context_dtor)
410 dev->driver->context_dtor(dev, 410 dev->driver->context_dtor(dev,
@@ -421,18 +421,18 @@ int drm_release(struct inode *inode, struct file *filp)
421 mutex_unlock(&dev->ctxlist_mutex); 421 mutex_unlock(&dev->ctxlist_mutex);
422 422
423 mutex_lock(&dev->struct_mutex); 423 mutex_lock(&dev->struct_mutex);
424 if (priv->remove_auth_on_close == 1) { 424 if (file_priv->remove_auth_on_close == 1) {
425 struct drm_file *temp; 425 struct drm_file *temp;
426 426
427 list_for_each_entry(temp, &dev->filelist, lhead) 427 list_for_each_entry(temp, &dev->filelist, lhead)
428 temp->authenticated = 0; 428 temp->authenticated = 0;
429 } 429 }
430 list_del(&priv->lhead); 430 list_del(&file_priv->lhead);
431 mutex_unlock(&dev->struct_mutex); 431 mutex_unlock(&dev->struct_mutex);
432 432
433 if (dev->driver->postclose) 433 if (dev->driver->postclose)
434 dev->driver->postclose(dev, priv); 434 dev->driver->postclose(dev, file_priv);
435 drm_free(priv, sizeof(*priv), DRM_MEM_FILES); 435 drm_free(file_priv, sizeof(*file_priv), DRM_MEM_FILES);
436 436
437 /* ======================================================== 437 /* ========================================================
438 * End inline drm_release 438 * End inline drm_release
diff --git a/drivers/char/drm/drm_ioc32.c b/drivers/char/drm/drm_ioc32.c
index 462f46f2049a..2286f3312c5c 100644
--- a/drivers/char/drm/drm_ioc32.c
+++ b/drivers/char/drm/drm_ioc32.c
@@ -1040,7 +1040,7 @@ drm_ioctl_compat_t *drm_compat_ioctls[] = {
1040 * Called whenever a 32-bit process running under a 64-bit kernel 1040 * Called whenever a 32-bit process running under a 64-bit kernel
1041 * performs an ioctl on /dev/drm. 1041 * performs an ioctl on /dev/drm.
1042 * 1042 *
1043 * \param filp file pointer. 1043 * \param file_priv DRM file private.
1044 * \param cmd command. 1044 * \param cmd command.
1045 * \param arg user argument. 1045 * \param arg user argument.
1046 * \return zero on success or negative number on failure. 1046 * \return zero on success or negative number on failure.
diff --git a/drivers/char/drm/drm_ioctl.c b/drivers/char/drm/drm_ioctl.c
index b195e102e737..d9be14624526 100644
--- a/drivers/char/drm/drm_ioctl.c
+++ b/drivers/char/drm/drm_ioctl.c
@@ -42,30 +42,24 @@
42 * Get the bus id. 42 * Get the bus id.
43 * 43 *
44 * \param inode device inode. 44 * \param inode device inode.
45 * \param filp file pointer. 45 * \param file_priv DRM file private.
46 * \param cmd command. 46 * \param cmd command.
47 * \param arg user argument, pointing to a drm_unique structure. 47 * \param arg user argument, pointing to a drm_unique structure.
48 * \return zero on success or a negative number on failure. 48 * \return zero on success or a negative number on failure.
49 * 49 *
50 * Copies the bus id from drm_device::unique into user space. 50 * Copies the bus id from drm_device::unique into user space.
51 */ 51 */
52int drm_getunique(struct inode *inode, struct file *filp, 52int drm_getunique(struct drm_device *dev, void *data,
53 unsigned int cmd, unsigned long arg) 53 struct drm_file *file_priv)
54{ 54{
55 struct drm_file *priv = filp->private_data; 55 struct drm_unique *u = data;
56 struct drm_device *dev = priv->head->dev;
57 struct drm_unique __user *argp = (void __user *)arg;
58 struct drm_unique u;
59 56
60 if (copy_from_user(&u, argp, sizeof(u))) 57 if (u->unique_len >= dev->unique_len) {
61 return -EFAULT; 58 if (copy_to_user(u->unique, dev->unique, dev->unique_len))
62 if (u.unique_len >= dev->unique_len) {
63 if (copy_to_user(u.unique, dev->unique, dev->unique_len))
64 return -EFAULT; 59 return -EFAULT;
65 } 60 }
66 u.unique_len = dev->unique_len; 61 u->unique_len = dev->unique_len;
67 if (copy_to_user(argp, &u, sizeof(u))) 62
68 return -EFAULT;
69 return 0; 63 return 0;
70} 64}
71 65
@@ -73,7 +67,7 @@ int drm_getunique(struct inode *inode, struct file *filp,
73 * Set the bus id. 67 * Set the bus id.
74 * 68 *
75 * \param inode device inode. 69 * \param inode device inode.
76 * \param filp file pointer. 70 * \param file_priv DRM file private.
77 * \param cmd command. 71 * \param cmd command.
78 * \param arg user argument, pointing to a drm_unique structure. 72 * \param arg user argument, pointing to a drm_unique structure.
79 * \return zero on success or a negative number on failure. 73 * \return zero on success or a negative number on failure.
@@ -83,28 +77,23 @@ int drm_getunique(struct inode *inode, struct file *filp,
83 * in interface version 1.1 and will return EBUSY when setversion has requested 77 * in interface version 1.1 and will return EBUSY when setversion has requested
84 * version 1.1 or greater. 78 * version 1.1 or greater.
85 */ 79 */
86int drm_setunique(struct inode *inode, struct file *filp, 80int drm_setunique(struct drm_device *dev, void *data,
87 unsigned int cmd, unsigned long arg) 81 struct drm_file *file_priv)
88{ 82{
89 struct drm_file *priv = filp->private_data; 83 struct drm_unique *u = data;
90 struct drm_device *dev = priv->head->dev;
91 struct drm_unique u;
92 int domain, bus, slot, func, ret; 84 int domain, bus, slot, func, ret;
93 85
94 if (dev->unique_len || dev->unique) 86 if (dev->unique_len || dev->unique)
95 return -EBUSY; 87 return -EBUSY;
96 88
97 if (copy_from_user(&u, (struct drm_unique __user *) arg, sizeof(u))) 89 if (!u->unique_len || u->unique_len > 1024)
98 return -EFAULT;
99
100 if (!u.unique_len || u.unique_len > 1024)
101 return -EINVAL; 90 return -EINVAL;
102 91
103 dev->unique_len = u.unique_len; 92 dev->unique_len = u->unique_len;
104 dev->unique = drm_alloc(u.unique_len + 1, DRM_MEM_DRIVER); 93 dev->unique = drm_alloc(u->unique_len + 1, DRM_MEM_DRIVER);
105 if (!dev->unique) 94 if (!dev->unique)
106 return -ENOMEM; 95 return -ENOMEM;
107 if (copy_from_user(dev->unique, u.unique, dev->unique_len)) 96 if (copy_from_user(dev->unique, u->unique, dev->unique_len))
108 return -EFAULT; 97 return -EFAULT;
109 98
110 dev->unique[dev->unique_len] = '\0'; 99 dev->unique[dev->unique_len] = '\0';
@@ -123,7 +112,7 @@ int drm_setunique(struct inode *inode, struct file *filp,
123 */ 112 */
124 ret = sscanf(dev->unique, "PCI:%d:%d:%d", &bus, &slot, &func); 113 ret = sscanf(dev->unique, "PCI:%d:%d:%d", &bus, &slot, &func);
125 if (ret != 3) 114 if (ret != 3)
126 return DRM_ERR(EINVAL); 115 return -EINVAL;
127 domain = bus >> 8; 116 domain = bus >> 8;
128 bus &= 0xff; 117 bus &= 0xff;
129 118
@@ -172,7 +161,7 @@ static int drm_set_busid(struct drm_device * dev)
172 * Get a mapping information. 161 * Get a mapping information.
173 * 162 *
174 * \param inode device inode. 163 * \param inode device inode.
175 * \param filp file pointer. 164 * \param file_priv DRM file private.
176 * \param cmd command. 165 * \param cmd command.
177 * \param arg user argument, pointing to a drm_map structure. 166 * \param arg user argument, pointing to a drm_map structure.
178 * 167 *
@@ -181,21 +170,16 @@ static int drm_set_busid(struct drm_device * dev)
181 * Searches for the mapping with the specified offset and copies its information 170 * Searches for the mapping with the specified offset and copies its information
182 * into userspace 171 * into userspace
183 */ 172 */
184int drm_getmap(struct inode *inode, struct file *filp, 173int drm_getmap(struct drm_device *dev, void *data,
185 unsigned int cmd, unsigned long arg) 174 struct drm_file *file_priv)
186{ 175{
187 struct drm_file *priv = filp->private_data; 176 struct drm_map *map = data;
188 struct drm_device *dev = priv->head->dev;
189 struct drm_map __user *argp = (void __user *)arg;
190 struct drm_map map;
191 struct drm_map_list *r_list = NULL; 177 struct drm_map_list *r_list = NULL;
192 struct list_head *list; 178 struct list_head *list;
193 int idx; 179 int idx;
194 int i; 180 int i;
195 181
196 if (copy_from_user(&map, argp, sizeof(map))) 182 idx = map->offset;
197 return -EFAULT;
198 idx = map.offset;
199 183
200 mutex_lock(&dev->struct_mutex); 184 mutex_lock(&dev->struct_mutex);
201 if (idx < 0) { 185 if (idx < 0) {
@@ -216,16 +200,14 @@ int drm_getmap(struct inode *inode, struct file *filp,
216 return -EINVAL; 200 return -EINVAL;
217 } 201 }
218 202
219 map.offset = r_list->map->offset; 203 map->offset = r_list->map->offset;
220 map.size = r_list->map->size; 204 map->size = r_list->map->size;
221 map.type = r_list->map->type; 205 map->type = r_list->map->type;
222 map.flags = r_list->map->flags; 206 map->flags = r_list->map->flags;
223 map.handle = (void *)(unsigned long)r_list->user_token; 207 map->handle = (void *)(unsigned long) r_list->user_token;
224 map.mtrr = r_list->map->mtrr; 208 map->mtrr = r_list->map->mtrr;
225 mutex_unlock(&dev->struct_mutex); 209 mutex_unlock(&dev->struct_mutex);
226 210
227 if (copy_to_user(argp, &map, sizeof(map)))
228 return -EFAULT;
229 return 0; 211 return 0;
230} 212}
231 213
@@ -233,7 +215,7 @@ int drm_getmap(struct inode *inode, struct file *filp,
233 * Get client information. 215 * Get client information.
234 * 216 *
235 * \param inode device inode. 217 * \param inode device inode.
236 * \param filp file pointer. 218 * \param file_priv DRM file private.
237 * \param cmd command. 219 * \param cmd command.
238 * \param arg user argument, pointing to a drm_client structure. 220 * \param arg user argument, pointing to a drm_client structure.
239 * 221 *
@@ -242,20 +224,15 @@ int drm_getmap(struct inode *inode, struct file *filp,
242 * Searches for the client with the specified index and copies its information 224 * Searches for the client with the specified index and copies its information
243 * into userspace 225 * into userspace
244 */ 226 */
245int drm_getclient(struct inode *inode, struct file *filp, 227int drm_getclient(struct drm_device *dev, void *data,
246 unsigned int cmd, unsigned long arg) 228 struct drm_file *file_priv)
247{ 229{
248 struct drm_file *priv = filp->private_data; 230 struct drm_client *client = data;
249 struct drm_device *dev = priv->head->dev;
250 struct drm_client __user *argp = (struct drm_client __user *)arg;
251 struct drm_client client;
252 struct drm_file *pt; 231 struct drm_file *pt;
253 int idx; 232 int idx;
254 int i; 233 int i;
255 234
256 if (copy_from_user(&client, argp, sizeof(client))) 235 idx = client->idx;
257 return -EFAULT;
258 idx = client.idx;
259 mutex_lock(&dev->struct_mutex); 236 mutex_lock(&dev->struct_mutex);
260 237
261 if (list_empty(&dev->filelist)) { 238 if (list_empty(&dev->filelist)) {
@@ -269,15 +246,13 @@ int drm_getclient(struct inode *inode, struct file *filp,
269 break; 246 break;
270 } 247 }
271 248
272 client.auth = pt->authenticated; 249 client->auth = pt->authenticated;
273 client.pid = pt->pid; 250 client->pid = pt->pid;
274 client.uid = pt->uid; 251 client->uid = pt->uid;
275 client.magic = pt->magic; 252 client->magic = pt->magic;
276 client.iocs = pt->ioctl_count; 253 client->iocs = pt->ioctl_count;
277 mutex_unlock(&dev->struct_mutex); 254 mutex_unlock(&dev->struct_mutex);
278 255
279 if (copy_to_user(argp, &client, sizeof(client)))
280 return -EFAULT;
281 return 0; 256 return 0;
282} 257}
283 258
@@ -285,39 +260,35 @@ int drm_getclient(struct inode *inode, struct file *filp,
285 * Get statistics information. 260 * Get statistics information.
286 * 261 *
287 * \param inode device inode. 262 * \param inode device inode.
288 * \param filp file pointer. 263 * \param file_priv DRM file private.
289 * \param cmd command. 264 * \param cmd command.
290 * \param arg user argument, pointing to a drm_stats structure. 265 * \param arg user argument, pointing to a drm_stats structure.
291 * 266 *
292 * \return zero on success or a negative number on failure. 267 * \return zero on success or a negative number on failure.
293 */ 268 */
294int drm_getstats(struct inode *inode, struct file *filp, 269int drm_getstats(struct drm_device *dev, void *data,
295 unsigned int cmd, unsigned long arg) 270 struct drm_file *file_priv)
296{ 271{
297 struct drm_file *priv = filp->private_data; 272 struct drm_stats *stats = data;
298 struct drm_device *dev = priv->head->dev;
299 struct drm_stats stats;
300 int i; 273 int i;
301 274
302 memset(&stats, 0, sizeof(stats)); 275 memset(stats, 0, sizeof(stats));
303 276
304 mutex_lock(&dev->struct_mutex); 277 mutex_lock(&dev->struct_mutex);
305 278
306 for (i = 0; i < dev->counters; i++) { 279 for (i = 0; i < dev->counters; i++) {
307 if (dev->types[i] == _DRM_STAT_LOCK) 280 if (dev->types[i] == _DRM_STAT_LOCK)
308 stats.data[i].value 281 stats->data[i].value =
309 = (dev->lock.hw_lock ? dev->lock.hw_lock->lock : 0); 282 (dev->lock.hw_lock ? dev->lock.hw_lock->lock : 0);
310 else 283 else
311 stats.data[i].value = atomic_read(&dev->counts[i]); 284 stats->data[i].value = atomic_read(&dev->counts[i]);
312 stats.data[i].type = dev->types[i]; 285 stats->data[i].type = dev->types[i];
313 } 286 }
314 287
315 stats.count = dev->counters; 288 stats->count = dev->counters;
316 289
317 mutex_unlock(&dev->struct_mutex); 290 mutex_unlock(&dev->struct_mutex);
318 291
319 if (copy_to_user((struct drm_stats __user *) arg, &stats, sizeof(stats)))
320 return -EFAULT;
321 return 0; 292 return 0;
322} 293}
323 294
@@ -325,64 +296,59 @@ int drm_getstats(struct inode *inode, struct file *filp,
325 * Setversion ioctl. 296 * Setversion ioctl.
326 * 297 *
327 * \param inode device inode. 298 * \param inode device inode.
328 * \param filp file pointer. 299 * \param file_priv DRM file private.
329 * \param cmd command. 300 * \param cmd command.
330 * \param arg user argument, pointing to a drm_lock structure. 301 * \param arg user argument, pointing to a drm_lock structure.
331 * \return zero on success or negative number on failure. 302 * \return zero on success or negative number on failure.
332 * 303 *
333 * Sets the requested interface version 304 * Sets the requested interface version
334 */ 305 */
335int drm_setversion(DRM_IOCTL_ARGS) 306int drm_setversion(struct drm_device *dev, void *data, struct drm_file *file_priv)
336{ 307{
337 DRM_DEVICE; 308 struct drm_set_version *sv = data;
338 struct drm_set_version sv; 309 int if_version, retcode = 0;
339 struct drm_set_version retv; 310
340 int if_version; 311 if (sv->drm_di_major != -1) {
341 struct drm_set_version __user *argp = (void __user *)data; 312 if (sv->drm_di_major != DRM_IF_MAJOR ||
342 int ret; 313 sv->drm_di_minor < 0 || sv->drm_di_minor > DRM_IF_MINOR) {
343 314 retcode = -EINVAL;
344 if (copy_from_user(&sv, argp, sizeof(sv))) 315 goto done;
345 return -EFAULT; 316 }
346 317 if_version = DRM_IF_VERSION(sv->drm_di_major,
347 retv.drm_di_major = DRM_IF_MAJOR; 318 sv->drm_di_minor);
348 retv.drm_di_minor = DRM_IF_MINOR;
349 retv.drm_dd_major = dev->driver->major;
350 retv.drm_dd_minor = dev->driver->minor;
351
352 if (copy_to_user(argp, &retv, sizeof(retv)))
353 return -EFAULT;
354
355 if (sv.drm_di_major != -1) {
356 if (sv.drm_di_major != DRM_IF_MAJOR ||
357 sv.drm_di_minor < 0 || sv.drm_di_minor > DRM_IF_MINOR)
358 return -EINVAL;
359 if_version = DRM_IF_VERSION(sv.drm_di_major, sv.drm_di_minor);
360 dev->if_version = max(if_version, dev->if_version); 319 dev->if_version = max(if_version, dev->if_version);
361 if (sv.drm_di_minor >= 1) { 320 if (sv->drm_di_minor >= 1) {
362 /* 321 /*
363 * Version 1.1 includes tying of DRM to specific device 322 * Version 1.1 includes tying of DRM to specific device
364 */ 323 */
365 ret = drm_set_busid(dev); 324 drm_set_busid(dev);
366 if (ret)
367 return ret;
368 } 325 }
369 } 326 }
370 327
371 if (sv.drm_dd_major != -1) { 328 if (sv->drm_dd_major != -1) {
372 if (sv.drm_dd_major != dev->driver->major || 329 if (sv->drm_dd_major != dev->driver->major ||
373 sv.drm_dd_minor < 0 330 sv->drm_dd_minor < 0 || sv->drm_dd_minor >
374 || sv.drm_dd_minor > dev->driver->minor) 331 dev->driver->minor) {
375 return -EINVAL; 332 retcode = -EINVAL;
333 goto done;
334 }
376 335
377 if (dev->driver->set_version) 336 if (dev->driver->set_version)
378 dev->driver->set_version(dev, &sv); 337 dev->driver->set_version(dev, sv);
379 } 338 }
380 return 0; 339
340done:
341 sv->drm_di_major = DRM_IF_MAJOR;
342 sv->drm_di_minor = DRM_IF_MINOR;
343 sv->drm_dd_major = dev->driver->major;
344 sv->drm_dd_minor = dev->driver->minor;
345
346 return retcode;
381} 347}
382 348
383/** No-op ioctl. */ 349/** No-op ioctl. */
384int drm_noop(struct inode *inode, struct file *filp, unsigned int cmd, 350int drm_noop(struct drm_device *dev, void *data,
385 unsigned long arg) 351 struct drm_file *file_priv)
386{ 352{
387 DRM_DEBUG("\n"); 353 DRM_DEBUG("\n");
388 return 0; 354 return 0;
diff --git a/drivers/char/drm/drm_irq.c b/drivers/char/drm/drm_irq.c
index 871d2fde09b3..05eae63f85ba 100644
--- a/drivers/char/drm/drm_irq.c
+++ b/drivers/char/drm/drm_irq.c
@@ -41,7 +41,7 @@
41 * Get interrupt from bus id. 41 * Get interrupt from bus id.
42 * 42 *
43 * \param inode device inode. 43 * \param inode device inode.
44 * \param filp file pointer. 44 * \param file_priv DRM file private.
45 * \param cmd command. 45 * \param cmd command.
46 * \param arg user argument, pointing to a drm_irq_busid structure. 46 * \param arg user argument, pointing to a drm_irq_busid structure.
47 * \return zero on success or a negative number on failure. 47 * \return zero on success or a negative number on failure.
@@ -50,30 +50,24 @@
50 * This IOCTL is deprecated, and will now return EINVAL for any busid not equal 50 * This IOCTL is deprecated, and will now return EINVAL for any busid not equal
51 * to that of the device that this DRM instance attached to. 51 * to that of the device that this DRM instance attached to.
52 */ 52 */
53int drm_irq_by_busid(struct inode *inode, struct file *filp, 53int drm_irq_by_busid(struct drm_device *dev, void *data,
54 unsigned int cmd, unsigned long arg) 54 struct drm_file *file_priv)
55{ 55{
56 struct drm_file *priv = filp->private_data; 56 struct drm_irq_busid *p = data;
57 struct drm_device *dev = priv->head->dev;
58 struct drm_irq_busid __user *argp = (void __user *)arg;
59 struct drm_irq_busid p;
60 57
61 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) 58 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
62 return -EINVAL; 59 return -EINVAL;
63 60
64 if (copy_from_user(&p, argp, sizeof(p))) 61 if ((p->busnum >> 8) != drm_get_pci_domain(dev) ||
65 return -EFAULT; 62 (p->busnum & 0xff) != dev->pdev->bus->number ||
66 63 p->devnum != PCI_SLOT(dev->pdev->devfn) || p->funcnum != PCI_FUNC(dev->pdev->devfn))
67 if ((p.busnum >> 8) != drm_get_pci_domain(dev) ||
68 (p.busnum & 0xff) != dev->pdev->bus->number ||
69 p.devnum != PCI_SLOT(dev->pdev->devfn) || p.funcnum != PCI_FUNC(dev->pdev->devfn))
70 return -EINVAL; 64 return -EINVAL;
71 65
72 p.irq = dev->irq; 66 p->irq = dev->irq;
67
68 DRM_DEBUG("%d:%d:%d => IRQ %d\n", p->busnum, p->devnum, p->funcnum,
69 p->irq);
73 70
74 DRM_DEBUG("%d:%d:%d => IRQ %d\n", p.busnum, p.devnum, p.funcnum, p.irq);
75 if (copy_to_user(argp, &p, sizeof(p)))
76 return -EFAULT;
77 return 0; 71 return 0;
78} 72}
79 73
@@ -187,31 +181,27 @@ EXPORT_SYMBOL(drm_irq_uninstall);
187 * IRQ control ioctl. 181 * IRQ control ioctl.
188 * 182 *
189 * \param inode device inode. 183 * \param inode device inode.
190 * \param filp file pointer. 184 * \param file_priv DRM file private.
191 * \param cmd command. 185 * \param cmd command.
192 * \param arg user argument, pointing to a drm_control structure. 186 * \param arg user argument, pointing to a drm_control structure.
193 * \return zero on success or a negative number on failure. 187 * \return zero on success or a negative number on failure.
194 * 188 *
195 * Calls irq_install() or irq_uninstall() according to \p arg. 189 * Calls irq_install() or irq_uninstall() according to \p arg.
196 */ 190 */
197int drm_control(struct inode *inode, struct file *filp, 191int drm_control(struct drm_device *dev, void *data,
198 unsigned int cmd, unsigned long arg) 192 struct drm_file *file_priv)
199{ 193{
200 struct drm_file *priv = filp->private_data; 194 struct drm_control *ctl = data;
201 struct drm_device *dev = priv->head->dev;
202 struct drm_control ctl;
203 195
204 /* if we haven't irq we fallback for compatibility reasons - this used to be a separate function in drm_dma.h */ 196 /* if we haven't irq we fallback for compatibility reasons - this used to be a separate function in drm_dma.h */
205 197
206 if (copy_from_user(&ctl, (struct drm_control __user *) arg, sizeof(ctl)))
207 return -EFAULT;
208 198
209 switch (ctl.func) { 199 switch (ctl->func) {
210 case DRM_INST_HANDLER: 200 case DRM_INST_HANDLER:
211 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) 201 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
212 return 0; 202 return 0;
213 if (dev->if_version < DRM_IF_VERSION(1, 2) && 203 if (dev->if_version < DRM_IF_VERSION(1, 2) &&
214 ctl.irq != dev->irq) 204 ctl->irq != dev->irq)
215 return -EINVAL; 205 return -EINVAL;
216 return drm_irq_install(dev); 206 return drm_irq_install(dev);
217 case DRM_UNINST_HANDLER: 207 case DRM_UNINST_HANDLER:
@@ -227,7 +217,7 @@ int drm_control(struct inode *inode, struct file *filp,
227 * Wait for VBLANK. 217 * Wait for VBLANK.
228 * 218 *
229 * \param inode device inode. 219 * \param inode device inode.
230 * \param filp file pointer. 220 * \param file_priv DRM file private.
231 * \param cmd command. 221 * \param cmd command.
232 * \param data user argument, pointing to a drm_wait_vblank structure. 222 * \param data user argument, pointing to a drm_wait_vblank structure.
233 * \return zero on success or a negative number on failure. 223 * \return zero on success or a negative number on failure.
@@ -242,31 +232,25 @@ int drm_control(struct inode *inode, struct file *filp,
242 * 232 *
243 * If a signal is not requested, then calls vblank_wait(). 233 * If a signal is not requested, then calls vblank_wait().
244 */ 234 */
245int drm_wait_vblank(DRM_IOCTL_ARGS) 235int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_priv)
246{ 236{
247 struct drm_file *priv = filp->private_data; 237 union drm_wait_vblank *vblwait = data;
248 struct drm_device *dev = priv->head->dev;
249 union drm_wait_vblank __user *argp = (void __user *)data;
250 union drm_wait_vblank vblwait;
251 struct timeval now; 238 struct timeval now;
252 int ret = 0; 239 int ret = 0;
253 unsigned int flags, seq; 240 unsigned int flags, seq;
254 241
255 if (!dev->irq) 242 if ((!dev->irq) || (!dev->irq_enabled))
256 return -EINVAL; 243 return -EINVAL;
257 244
258 if (copy_from_user(&vblwait, argp, sizeof(vblwait))) 245 if (vblwait->request.type &
259 return -EFAULT;
260
261 if (vblwait.request.type &
262 ~(_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK)) { 246 ~(_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK)) {
263 DRM_ERROR("Unsupported type value 0x%x, supported mask 0x%x\n", 247 DRM_ERROR("Unsupported type value 0x%x, supported mask 0x%x\n",
264 vblwait.request.type, 248 vblwait->request.type,
265 (_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK)); 249 (_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK));
266 return -EINVAL; 250 return -EINVAL;
267 } 251 }
268 252
269 flags = vblwait.request.type & _DRM_VBLANK_FLAGS_MASK; 253 flags = vblwait->request.type & _DRM_VBLANK_FLAGS_MASK;
270 254
271 if (!drm_core_check_feature(dev, (flags & _DRM_VBLANK_SECONDARY) ? 255 if (!drm_core_check_feature(dev, (flags & _DRM_VBLANK_SECONDARY) ?
272 DRIVER_IRQ_VBL2 : DRIVER_IRQ_VBL)) 256 DRIVER_IRQ_VBL2 : DRIVER_IRQ_VBL))
@@ -275,10 +259,10 @@ int drm_wait_vblank(DRM_IOCTL_ARGS)
275 seq = atomic_read((flags & _DRM_VBLANK_SECONDARY) ? &dev->vbl_received2 259 seq = atomic_read((flags & _DRM_VBLANK_SECONDARY) ? &dev->vbl_received2
276 : &dev->vbl_received); 260 : &dev->vbl_received);
277 261
278 switch (vblwait.request.type & _DRM_VBLANK_TYPES_MASK) { 262 switch (vblwait->request.type & _DRM_VBLANK_TYPES_MASK) {
279 case _DRM_VBLANK_RELATIVE: 263 case _DRM_VBLANK_RELATIVE:
280 vblwait.request.sequence += seq; 264 vblwait->request.sequence += seq;
281 vblwait.request.type &= ~_DRM_VBLANK_RELATIVE; 265 vblwait->request.type &= ~_DRM_VBLANK_RELATIVE;
282 case _DRM_VBLANK_ABSOLUTE: 266 case _DRM_VBLANK_ABSOLUTE:
283 break; 267 break;
284 default: 268 default:
@@ -286,8 +270,8 @@ int drm_wait_vblank(DRM_IOCTL_ARGS)
286 } 270 }
287 271
288 if ((flags & _DRM_VBLANK_NEXTONMISS) && 272 if ((flags & _DRM_VBLANK_NEXTONMISS) &&
289 (seq - vblwait.request.sequence) <= (1<<23)) { 273 (seq - vblwait->request.sequence) <= (1<<23)) {
290 vblwait.request.sequence = seq + 1; 274 vblwait->request.sequence = seq + 1;
291 } 275 }
292 276
293 if (flags & _DRM_VBLANK_SIGNAL) { 277 if (flags & _DRM_VBLANK_SIGNAL) {
@@ -303,12 +287,13 @@ int drm_wait_vblank(DRM_IOCTL_ARGS)
303 * that case 287 * that case
304 */ 288 */
305 list_for_each_entry(vbl_sig, vbl_sigs, head) { 289 list_for_each_entry(vbl_sig, vbl_sigs, head) {
306 if (vbl_sig->sequence == vblwait.request.sequence 290 if (vbl_sig->sequence == vblwait->request.sequence
307 && vbl_sig->info.si_signo == vblwait.request.signal 291 && vbl_sig->info.si_signo ==
292 vblwait->request.signal
308 && vbl_sig->task == current) { 293 && vbl_sig->task == current) {
309 spin_unlock_irqrestore(&dev->vbl_lock, 294 spin_unlock_irqrestore(&dev->vbl_lock,
310 irqflags); 295 irqflags);
311 vblwait.reply.sequence = seq; 296 vblwait->reply.sequence = seq;
312 goto done; 297 goto done;
313 } 298 }
314 } 299 }
@@ -330,8 +315,8 @@ int drm_wait_vblank(DRM_IOCTL_ARGS)
330 315
331 memset((void *)vbl_sig, 0, sizeof(*vbl_sig)); 316 memset((void *)vbl_sig, 0, sizeof(*vbl_sig));
332 317
333 vbl_sig->sequence = vblwait.request.sequence; 318 vbl_sig->sequence = vblwait->request.sequence;
334 vbl_sig->info.si_signo = vblwait.request.signal; 319 vbl_sig->info.si_signo = vblwait->request.signal;
335 vbl_sig->task = current; 320 vbl_sig->task = current;
336 321
337 spin_lock_irqsave(&dev->vbl_lock, irqflags); 322 spin_lock_irqsave(&dev->vbl_lock, irqflags);
@@ -340,25 +325,22 @@ int drm_wait_vblank(DRM_IOCTL_ARGS)
340 325
341 spin_unlock_irqrestore(&dev->vbl_lock, irqflags); 326 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
342 327
343 vblwait.reply.sequence = seq; 328 vblwait->reply.sequence = seq;
344 } else { 329 } else {
345 if (flags & _DRM_VBLANK_SECONDARY) { 330 if (flags & _DRM_VBLANK_SECONDARY) {
346 if (dev->driver->vblank_wait2) 331 if (dev->driver->vblank_wait2)
347 ret = dev->driver->vblank_wait2(dev, &vblwait.request.sequence); 332 ret = dev->driver->vblank_wait2(dev, &vblwait->request.sequence);
348 } else if (dev->driver->vblank_wait) 333 } else if (dev->driver->vblank_wait)
349 ret = 334 ret =
350 dev->driver->vblank_wait(dev, 335 dev->driver->vblank_wait(dev,
351 &vblwait.request.sequence); 336 &vblwait->request.sequence);
352 337
353 do_gettimeofday(&now); 338 do_gettimeofday(&now);
354 vblwait.reply.tval_sec = now.tv_sec; 339 vblwait->reply.tval_sec = now.tv_sec;
355 vblwait.reply.tval_usec = now.tv_usec; 340 vblwait->reply.tval_usec = now.tv_usec;
356 } 341 }
357 342
358 done: 343 done:
359 if (copy_to_user(argp, &vblwait, sizeof(vblwait)))
360 return -EFAULT;
361
362 return ret; 344 return ret;
363} 345}
364 346
diff --git a/drivers/char/drm/drm_lock.c b/drivers/char/drm/drm_lock.c
index c0534b5a8b78..c6b73e744d67 100644
--- a/drivers/char/drm/drm_lock.c
+++ b/drivers/char/drm/drm_lock.c
@@ -41,39 +41,33 @@ static int drm_notifier(void *priv);
41 * Lock ioctl. 41 * Lock ioctl.
42 * 42 *
43 * \param inode device inode. 43 * \param inode device inode.
44 * \param filp file pointer. 44 * \param file_priv DRM file private.
45 * \param cmd command. 45 * \param cmd command.
46 * \param arg user argument, pointing to a drm_lock structure. 46 * \param arg user argument, pointing to a drm_lock structure.
47 * \return zero on success or negative number on failure. 47 * \return zero on success or negative number on failure.
48 * 48 *
49 * Add the current task to the lock wait queue, and attempt to take to lock. 49 * Add the current task to the lock wait queue, and attempt to take to lock.
50 */ 50 */
51int drm_lock(struct inode *inode, struct file *filp, 51int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
52 unsigned int cmd, unsigned long arg)
53{ 52{
54 struct drm_file *priv = filp->private_data;
55 struct drm_device *dev = priv->head->dev;
56 DECLARE_WAITQUEUE(entry, current); 53 DECLARE_WAITQUEUE(entry, current);
57 struct drm_lock lock; 54 struct drm_lock *lock = data;
58 int ret = 0; 55 int ret = 0;
59 56
60 ++priv->lock_count; 57 ++file_priv->lock_count;
61 58
62 if (copy_from_user(&lock, (struct drm_lock __user *) arg, sizeof(lock))) 59 if (lock->context == DRM_KERNEL_CONTEXT) {
63 return -EFAULT;
64
65 if (lock.context == DRM_KERNEL_CONTEXT) {
66 DRM_ERROR("Process %d using kernel context %d\n", 60 DRM_ERROR("Process %d using kernel context %d\n",
67 current->pid, lock.context); 61 current->pid, lock->context);
68 return -EINVAL; 62 return -EINVAL;
69 } 63 }
70 64
71 DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n", 65 DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
72 lock.context, current->pid, 66 lock->context, current->pid,
73 dev->lock.hw_lock->lock, lock.flags); 67 dev->lock.hw_lock->lock, lock->flags);
74 68
75 if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE)) 69 if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE))
76 if (lock.context < 0) 70 if (lock->context < 0)
77 return -EINVAL; 71 return -EINVAL;
78 72
79 add_wait_queue(&dev->lock.lock_queue, &entry); 73 add_wait_queue(&dev->lock.lock_queue, &entry);
@@ -87,8 +81,8 @@ int drm_lock(struct inode *inode, struct file *filp,
87 ret = -EINTR; 81 ret = -EINTR;
88 break; 82 break;
89 } 83 }
90 if (drm_lock_take(&dev->lock, lock.context)) { 84 if (drm_lock_take(&dev->lock, lock->context)) {
91 dev->lock.filp = filp; 85 dev->lock.file_priv = file_priv;
92 dev->lock.lock_time = jiffies; 86 dev->lock.lock_time = jiffies;
93 atomic_inc(&dev->counts[_DRM_STAT_LOCKS]); 87 atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
94 break; /* Got lock */ 88 break; /* Got lock */
@@ -107,7 +101,8 @@ int drm_lock(struct inode *inode, struct file *filp,
107 __set_current_state(TASK_RUNNING); 101 __set_current_state(TASK_RUNNING);
108 remove_wait_queue(&dev->lock.lock_queue, &entry); 102 remove_wait_queue(&dev->lock.lock_queue, &entry);
109 103
110 DRM_DEBUG( "%d %s\n", lock.context, ret ? "interrupted" : "has lock" ); 104 DRM_DEBUG("%d %s\n", lock->context,
105 ret ? "interrupted" : "has lock");
111 if (ret) return ret; 106 if (ret) return ret;
112 107
113 sigemptyset(&dev->sigmask); 108 sigemptyset(&dev->sigmask);
@@ -115,24 +110,26 @@ int drm_lock(struct inode *inode, struct file *filp,
115 sigaddset(&dev->sigmask, SIGTSTP); 110 sigaddset(&dev->sigmask, SIGTSTP);
116 sigaddset(&dev->sigmask, SIGTTIN); 111 sigaddset(&dev->sigmask, SIGTTIN);
117 sigaddset(&dev->sigmask, SIGTTOU); 112 sigaddset(&dev->sigmask, SIGTTOU);
118 dev->sigdata.context = lock.context; 113 dev->sigdata.context = lock->context;
119 dev->sigdata.lock = dev->lock.hw_lock; 114 dev->sigdata.lock = dev->lock.hw_lock;
120 block_all_signals(drm_notifier, &dev->sigdata, &dev->sigmask); 115 block_all_signals(drm_notifier, &dev->sigdata, &dev->sigmask);
121 116
122 if (dev->driver->dma_ready && (lock.flags & _DRM_LOCK_READY)) 117 if (dev->driver->dma_ready && (lock->flags & _DRM_LOCK_READY))
123 dev->driver->dma_ready(dev); 118 dev->driver->dma_ready(dev);
124 119
125 if (dev->driver->dma_quiescent && (lock.flags & _DRM_LOCK_QUIESCENT)) { 120 if (dev->driver->dma_quiescent && (lock->flags & _DRM_LOCK_QUIESCENT))
121 {
126 if (dev->driver->dma_quiescent(dev)) { 122 if (dev->driver->dma_quiescent(dev)) {
127 DRM_DEBUG("%d waiting for DMA quiescent\n", lock.context); 123 DRM_DEBUG("%d waiting for DMA quiescent\n",
128 return DRM_ERR(EBUSY); 124 lock->context);
125 return -EBUSY;
129 } 126 }
130 } 127 }
131 128
132 if (dev->driver->kernel_context_switch && 129 if (dev->driver->kernel_context_switch &&
133 dev->last_context != lock.context) { 130 dev->last_context != lock->context) {
134 dev->driver->kernel_context_switch(dev, dev->last_context, 131 dev->driver->kernel_context_switch(dev, dev->last_context,
135 lock.context); 132 lock->context);
136 } 133 }
137 134
138 return 0; 135 return 0;
@@ -142,27 +139,21 @@ int drm_lock(struct inode *inode, struct file *filp,
142 * Unlock ioctl. 139 * Unlock ioctl.
143 * 140 *
144 * \param inode device inode. 141 * \param inode device inode.
145 * \param filp file pointer. 142 * \param file_priv DRM file private.
146 * \param cmd command. 143 * \param cmd command.
147 * \param arg user argument, pointing to a drm_lock structure. 144 * \param arg user argument, pointing to a drm_lock structure.
148 * \return zero on success or negative number on failure. 145 * \return zero on success or negative number on failure.
149 * 146 *
150 * Transfer and free the lock. 147 * Transfer and free the lock.
151 */ 148 */
152int drm_unlock(struct inode *inode, struct file *filp, 149int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
153 unsigned int cmd, unsigned long arg)
154{ 150{
155 struct drm_file *priv = filp->private_data; 151 struct drm_lock *lock = data;
156 struct drm_device *dev = priv->head->dev;
157 struct drm_lock lock;
158 unsigned long irqflags; 152 unsigned long irqflags;
159 153
160 if (copy_from_user(&lock, (struct drm_lock __user *) arg, sizeof(lock))) 154 if (lock->context == DRM_KERNEL_CONTEXT) {
161 return -EFAULT;
162
163 if (lock.context == DRM_KERNEL_CONTEXT) {
164 DRM_ERROR("Process %d using kernel context %d\n", 155 DRM_ERROR("Process %d using kernel context %d\n",
165 current->pid, lock.context); 156 current->pid, lock->context);
166 return -EINVAL; 157 return -EINVAL;
167 } 158 }
168 159
@@ -184,7 +175,7 @@ int drm_unlock(struct inode *inode, struct file *filp,
184 if (dev->driver->kernel_context_switch_unlock) 175 if (dev->driver->kernel_context_switch_unlock)
185 dev->driver->kernel_context_switch_unlock(dev); 176 dev->driver->kernel_context_switch_unlock(dev);
186 else { 177 else {
187 if (drm_lock_free(&dev->lock,lock.context)) { 178 if (drm_lock_free(&dev->lock,lock->context)) {
188 /* FIXME: Should really bail out here. */ 179 /* FIXME: Should really bail out here. */
189 } 180 }
190 } 181 }
@@ -257,7 +248,7 @@ static int drm_lock_transfer(struct drm_lock_data *lock_data,
257 unsigned int old, new, prev; 248 unsigned int old, new, prev;
258 volatile unsigned int *lock = &lock_data->hw_lock->lock; 249 volatile unsigned int *lock = &lock_data->hw_lock->lock;
259 250
260 lock_data->filp = NULL; 251 lock_data->file_priv = NULL;
261 do { 252 do {
262 old = *lock; 253 old = *lock;
263 new = context | _DRM_LOCK_HELD; 254 new = context | _DRM_LOCK_HELD;
@@ -390,13 +381,11 @@ void drm_idlelock_release(struct drm_lock_data *lock_data)
390EXPORT_SYMBOL(drm_idlelock_release); 381EXPORT_SYMBOL(drm_idlelock_release);
391 382
392 383
393int drm_i_have_hw_lock(struct file *filp) 384int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv)
394{ 385{
395 DRM_DEVICE; 386 return (file_priv->lock_count && dev->lock.hw_lock &&
396
397 return (priv->lock_count && dev->lock.hw_lock &&
398 _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) && 387 _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) &&
399 dev->lock.filp == filp); 388 dev->lock.file_priv == file_priv);
400} 389}
401 390
402EXPORT_SYMBOL(drm_i_have_hw_lock); 391EXPORT_SYMBOL(drm_i_have_hw_lock);
diff --git a/drivers/char/drm/drm_os_linux.h b/drivers/char/drm/drm_os_linux.h
index 0b8d3433386d..114e54e0f61b 100644
--- a/drivers/char/drm/drm_os_linux.h
+++ b/drivers/char/drm/drm_os_linux.h
@@ -6,11 +6,6 @@
6#include <linux/interrupt.h> /* For task queue support */ 6#include <linux/interrupt.h> /* For task queue support */
7#include <linux/delay.h> 7#include <linux/delay.h>
8 8
9/** File pointer type */
10#define DRMFILE struct file *
11/** Ioctl arguments */
12#define DRM_IOCTL_ARGS struct inode *inode, struct file *filp, unsigned int cmd, unsigned long data
13#define DRM_ERR(d) -(d)
14/** Current process ID */ 9/** Current process ID */
15#define DRM_CURRENTPID current->pid 10#define DRM_CURRENTPID current->pid
16#define DRM_SUSER(p) capable(CAP_SYS_ADMIN) 11#define DRM_SUSER(p) capable(CAP_SYS_ADMIN)
@@ -33,9 +28,6 @@
33#define DRM_WRITEMEMORYBARRIER() wmb() 28#define DRM_WRITEMEMORYBARRIER() wmb()
34/** Read/write memory barrier */ 29/** Read/write memory barrier */
35#define DRM_MEMORYBARRIER() mb() 30#define DRM_MEMORYBARRIER() mb()
36/** DRM device local declaration */
37#define DRM_DEVICE struct drm_file *priv = filp->private_data; \
38 struct drm_device *dev = priv->head->dev
39 31
40/** IRQ handler arguments and return type and values */ 32/** IRQ handler arguments and return type and values */
41#define DRM_IRQ_ARGS int irq, void *arg 33#define DRM_IRQ_ARGS int irq, void *arg
@@ -94,8 +86,6 @@ static __inline__ int mtrr_del(int reg, unsigned long base, unsigned long size)
94#define DRM_GET_USER_UNCHECKED(val, uaddr) \ 86#define DRM_GET_USER_UNCHECKED(val, uaddr) \
95 __get_user(val, uaddr) 87 __get_user(val, uaddr)
96 88
97#define DRM_GET_PRIV_WITH_RETURN(_priv, _filp) _priv = _filp->private_data
98
99#define DRM_HZ HZ 89#define DRM_HZ HZ
100 90
101#define DRM_WAIT_ON( ret, queue, timeout, condition ) \ 91#define DRM_WAIT_ON( ret, queue, timeout, condition ) \
diff --git a/drivers/char/drm/drm_pciids.h b/drivers/char/drm/drm_pciids.h
index 30b200b01314..f3593974496c 100644
--- a/drivers/char/drm/drm_pciids.h
+++ b/drivers/char/drm/drm_pciids.h
@@ -236,10 +236,8 @@
236 {0x1106, 0x3022, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 236 {0x1106, 0x3022, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
237 {0x1106, 0x3118, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VIA_PRO_GROUP_A}, \ 237 {0x1106, 0x3118, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VIA_PRO_GROUP_A}, \
238 {0x1106, 0x3122, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 238 {0x1106, 0x3122, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
239 {0x1106, 0x7204, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
240 {0x1106, 0x7205, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 239 {0x1106, 0x7205, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
241 {0x1106, 0x3108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 240 {0x1106, 0x3108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
242 {0x1106, 0x3304, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
243 {0x1106, 0x3344, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 241 {0x1106, 0x3344, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
244 {0x1106, 0x3343, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 242 {0x1106, 0x3343, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
245 {0x1106, 0x3230, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VIA_DX9_0}, \ 243 {0x1106, 0x3230, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VIA_DX9_0}, \
diff --git a/drivers/char/drm/drm_scatter.c b/drivers/char/drm/drm_scatter.c
index 067d25daaf17..eb7fa437355e 100644
--- a/drivers/char/drm/drm_scatter.c
+++ b/drivers/char/drm/drm_scatter.c
@@ -62,13 +62,8 @@ void drm_sg_cleanup(struct drm_sg_mem * entry)
62# define ScatterHandle(x) (unsigned int)(x) 62# define ScatterHandle(x) (unsigned int)(x)
63#endif 63#endif
64 64
65int drm_sg_alloc(struct inode *inode, struct file *filp, 65int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request)
66 unsigned int cmd, unsigned long arg)
67{ 66{
68 struct drm_file *priv = filp->private_data;
69 struct drm_device *dev = priv->head->dev;
70 struct drm_scatter_gather __user *argp = (void __user *)arg;
71 struct drm_scatter_gather request;
72 struct drm_sg_mem *entry; 67 struct drm_sg_mem *entry;
73 unsigned long pages, i, j; 68 unsigned long pages, i, j;
74 69
@@ -80,17 +75,13 @@ int drm_sg_alloc(struct inode *inode, struct file *filp,
80 if (dev->sg) 75 if (dev->sg)
81 return -EINVAL; 76 return -EINVAL;
82 77
83 if (copy_from_user(&request, argp, sizeof(request)))
84 return -EFAULT;
85
86 entry = drm_alloc(sizeof(*entry), DRM_MEM_SGLISTS); 78 entry = drm_alloc(sizeof(*entry), DRM_MEM_SGLISTS);
87 if (!entry) 79 if (!entry)
88 return -ENOMEM; 80 return -ENOMEM;
89 81
90 memset(entry, 0, sizeof(*entry)); 82 memset(entry, 0, sizeof(*entry));
91 83 pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE;
92 pages = (request.size + PAGE_SIZE - 1) / PAGE_SIZE; 84 DRM_DEBUG("sg size=%ld pages=%ld\n", request->size, pages);
93 DRM_DEBUG("sg size=%ld pages=%ld\n", request.size, pages);
94 85
95 entry->pages = pages; 86 entry->pages = pages;
96 entry->pagelist = drm_alloc(pages * sizeof(*entry->pagelist), 87 entry->pagelist = drm_alloc(pages * sizeof(*entry->pagelist),
@@ -142,12 +133,7 @@ int drm_sg_alloc(struct inode *inode, struct file *filp,
142 SetPageReserved(entry->pagelist[j]); 133 SetPageReserved(entry->pagelist[j]);
143 } 134 }
144 135
145 request.handle = entry->handle; 136 request->handle = entry->handle;
146
147 if (copy_to_user(argp, &request, sizeof(request))) {
148 drm_sg_cleanup(entry);
149 return -EFAULT;
150 }
151 137
152 dev->sg = entry; 138 dev->sg = entry;
153 139
@@ -197,27 +183,31 @@ int drm_sg_alloc(struct inode *inode, struct file *filp,
197 drm_sg_cleanup(entry); 183 drm_sg_cleanup(entry);
198 return -ENOMEM; 184 return -ENOMEM;
199} 185}
186EXPORT_SYMBOL(drm_sg_alloc);
187
200 188
201int drm_sg_free(struct inode *inode, struct file *filp, 189int drm_sg_alloc_ioctl(struct drm_device *dev, void *data,
202 unsigned int cmd, unsigned long arg) 190 struct drm_file *file_priv)
203{ 191{
204 struct drm_file *priv = filp->private_data; 192 struct drm_scatter_gather *request = data;
205 struct drm_device *dev = priv->head->dev; 193
206 struct drm_scatter_gather request; 194 return drm_sg_alloc(dev, request);
195
196}
197
198int drm_sg_free(struct drm_device *dev, void *data,
199 struct drm_file *file_priv)
200{
201 struct drm_scatter_gather *request = data;
207 struct drm_sg_mem *entry; 202 struct drm_sg_mem *entry;
208 203
209 if (!drm_core_check_feature(dev, DRIVER_SG)) 204 if (!drm_core_check_feature(dev, DRIVER_SG))
210 return -EINVAL; 205 return -EINVAL;
211 206
212 if (copy_from_user(&request,
213 (struct drm_scatter_gather __user *) arg,
214 sizeof(request)))
215 return -EFAULT;
216
217 entry = dev->sg; 207 entry = dev->sg;
218 dev->sg = NULL; 208 dev->sg = NULL;
219 209
220 if (!entry || entry->handle != request.handle) 210 if (!entry || entry->handle != request->handle)
221 return -EINVAL; 211 return -EINVAL;
222 212
223 DRM_DEBUG("sg free virtual = %p\n", entry->virtual); 213 DRM_DEBUG("sg free virtual = %p\n", entry->virtual);
diff --git a/drivers/char/drm/drm_vm.c b/drivers/char/drm/drm_vm.c
index 68e36e51ba0c..e8d50af58201 100644
--- a/drivers/char/drm/drm_vm.c
+++ b/drivers/char/drm/drm_vm.c
@@ -463,7 +463,7 @@ static void drm_vm_close(struct vm_area_struct *vma)
463/** 463/**
464 * mmap DMA memory. 464 * mmap DMA memory.
465 * 465 *
466 * \param filp file pointer. 466 * \param file_priv DRM file private.
467 * \param vma virtual memory area. 467 * \param vma virtual memory area.
468 * \return zero on success or a negative number on failure. 468 * \return zero on success or a negative number on failure.
469 * 469 *
@@ -533,7 +533,7 @@ EXPORT_SYMBOL(drm_core_get_reg_ofs);
533/** 533/**
534 * mmap DMA memory. 534 * mmap DMA memory.
535 * 535 *
536 * \param filp file pointer. 536 * \param file_priv DRM file private.
537 * \param vma virtual memory area. 537 * \param vma virtual memory area.
538 * \return zero on success or a negative number on failure. 538 * \return zero on success or a negative number on failure.
539 * 539 *
diff --git a/drivers/char/drm/i810_dma.c b/drivers/char/drm/i810_dma.c
index cb449999d0ef..8e841bdee6dc 100644
--- a/drivers/char/drm/i810_dma.c
+++ b/drivers/char/drm/i810_dma.c
@@ -120,10 +120,9 @@ static const struct file_operations i810_buffer_fops = {
120 .fasync = drm_fasync, 120 .fasync = drm_fasync,
121}; 121};
122 122
123static int i810_map_buffer(struct drm_buf * buf, struct file *filp) 123static int i810_map_buffer(struct drm_buf * buf, struct drm_file *file_priv)
124{ 124{
125 struct drm_file *priv = filp->private_data; 125 struct drm_device *dev = file_priv->head->dev;
126 struct drm_device *dev = priv->head->dev;
127 drm_i810_buf_priv_t *buf_priv = buf->dev_private; 126 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
128 drm_i810_private_t *dev_priv = dev->dev_private; 127 drm_i810_private_t *dev_priv = dev->dev_private;
129 const struct file_operations *old_fops; 128 const struct file_operations *old_fops;
@@ -133,14 +132,14 @@ static int i810_map_buffer(struct drm_buf * buf, struct file *filp)
133 return -EINVAL; 132 return -EINVAL;
134 133
135 down_write(&current->mm->mmap_sem); 134 down_write(&current->mm->mmap_sem);
136 old_fops = filp->f_op; 135 old_fops = file_priv->filp->f_op;
137 filp->f_op = &i810_buffer_fops; 136 file_priv->filp->f_op = &i810_buffer_fops;
138 dev_priv->mmap_buffer = buf; 137 dev_priv->mmap_buffer = buf;
139 buf_priv->virtual = (void *)do_mmap(filp, 0, buf->total, 138 buf_priv->virtual = (void *)do_mmap(file_priv->filp, 0, buf->total,
140 PROT_READ | PROT_WRITE, 139 PROT_READ | PROT_WRITE,
141 MAP_SHARED, buf->bus_address); 140 MAP_SHARED, buf->bus_address);
142 dev_priv->mmap_buffer = NULL; 141 dev_priv->mmap_buffer = NULL;
143 filp->f_op = old_fops; 142 file_priv->filp->f_op = old_fops;
144 if (IS_ERR(buf_priv->virtual)) { 143 if (IS_ERR(buf_priv->virtual)) {
145 /* Real error */ 144 /* Real error */
146 DRM_ERROR("mmap error\n"); 145 DRM_ERROR("mmap error\n");
@@ -173,7 +172,7 @@ static int i810_unmap_buffer(struct drm_buf * buf)
173} 172}
174 173
175static int i810_dma_get_buffer(struct drm_device * dev, drm_i810_dma_t * d, 174static int i810_dma_get_buffer(struct drm_device * dev, drm_i810_dma_t * d,
176 struct file *filp) 175 struct drm_file *file_priv)
177{ 176{
178 struct drm_buf *buf; 177 struct drm_buf *buf;
179 drm_i810_buf_priv_t *buf_priv; 178 drm_i810_buf_priv_t *buf_priv;
@@ -186,13 +185,13 @@ static int i810_dma_get_buffer(struct drm_device * dev, drm_i810_dma_t * d,
186 return retcode; 185 return retcode;
187 } 186 }
188 187
189 retcode = i810_map_buffer(buf, filp); 188 retcode = i810_map_buffer(buf, file_priv);
190 if (retcode) { 189 if (retcode) {
191 i810_freelist_put(dev, buf); 190 i810_freelist_put(dev, buf);
192 DRM_ERROR("mapbuf failed, retcode %d\n", retcode); 191 DRM_ERROR("mapbuf failed, retcode %d\n", retcode);
193 return retcode; 192 return retcode;
194 } 193 }
195 buf->filp = filp; 194 buf->file_priv = file_priv;
196 buf_priv = buf->dev_private; 195 buf_priv = buf->dev_private;
197 d->granted = 1; 196 d->granted = 1;
198 d->request_idx = buf->idx; 197 d->request_idx = buf->idx;
@@ -380,7 +379,7 @@ static int i810_dma_initialize(struct drm_device * dev,
380 i810_dma_cleanup(dev); 379 i810_dma_cleanup(dev);
381 DRM_ERROR("can not ioremap virtual address for" 380 DRM_ERROR("can not ioremap virtual address for"
382 " ring buffer\n"); 381 " ring buffer\n");
383 return DRM_ERR(ENOMEM); 382 return -ENOMEM;
384 } 383 }
385 384
386 dev_priv->ring.virtual_start = dev_priv->ring.map.handle; 385 dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
@@ -430,99 +429,29 @@ static int i810_dma_initialize(struct drm_device * dev,
430 return 0; 429 return 0;
431} 430}
432 431
433/* i810 DRM version 1.1 used a smaller init structure with different 432static int i810_dma_init(struct drm_device *dev, void *data,
434 * ordering of values than is currently used (drm >= 1.2). There is 433 struct drm_file *file_priv)
435 * no defined way to detect the XFree version to correct this problem,
436 * however by checking using this procedure we can detect the correct
437 * thing to do.
438 *
439 * #1 Read the Smaller init structure from user-space
440 * #2 Verify the overlay_physical is a valid physical address, or NULL
441 * If it isn't then we have a v1.1 client. Fix up params.
442 * If it is, then we have a 1.2 client... get the rest of the data.
443 */
444static int i810_dma_init_compat(drm_i810_init_t * init, unsigned long arg)
445{ 434{
446
447 /* Get v1.1 init data */
448 if (copy_from_user(init, (drm_i810_pre12_init_t __user *) arg,
449 sizeof(drm_i810_pre12_init_t))) {
450 return -EFAULT;
451 }
452
453 if ((!init->overlay_physical) || (init->overlay_physical > 4096)) {
454
455 /* This is a v1.2 client, just get the v1.2 init data */
456 DRM_INFO("Using POST v1.2 init.\n");
457 if (copy_from_user(init, (drm_i810_init_t __user *) arg,
458 sizeof(drm_i810_init_t))) {
459 return -EFAULT;
460 }
461 } else {
462
463 /* This is a v1.1 client, fix the params */
464 DRM_INFO("Using PRE v1.2 init.\n");
465 init->pitch_bits = init->h;
466 init->pitch = init->w;
467 init->h = init->overlay_physical;
468 init->w = init->overlay_offset;
469 init->overlay_physical = 0;
470 init->overlay_offset = 0;
471 }
472
473 return 0;
474}
475
476static int i810_dma_init(struct inode *inode, struct file *filp,
477 unsigned int cmd, unsigned long arg)
478{
479 struct drm_file *priv = filp->private_data;
480 struct drm_device *dev = priv->head->dev;
481 drm_i810_private_t *dev_priv; 435 drm_i810_private_t *dev_priv;
482 drm_i810_init_t init; 436 drm_i810_init_t *init = data;
483 int retcode = 0; 437 int retcode = 0;
484 438
485 /* Get only the init func */ 439 switch (init->func) {
486 if (copy_from_user
487 (&init, (void __user *)arg, sizeof(drm_i810_init_func_t)))
488 return -EFAULT;
489
490 switch (init.func) {
491 case I810_INIT_DMA:
492 /* This case is for backward compatibility. It
493 * handles XFree 4.1.0 and 4.2.0, and has to
494 * do some parameter checking as described below.
495 * It will someday go away.
496 */
497 retcode = i810_dma_init_compat(&init, arg);
498 if (retcode)
499 return retcode;
500
501 dev_priv = drm_alloc(sizeof(drm_i810_private_t),
502 DRM_MEM_DRIVER);
503 if (dev_priv == NULL)
504 return -ENOMEM;
505 retcode = i810_dma_initialize(dev, dev_priv, &init);
506 break;
507
508 default:
509 case I810_INIT_DMA_1_4: 440 case I810_INIT_DMA_1_4:
510 DRM_INFO("Using v1.4 init.\n"); 441 DRM_INFO("Using v1.4 init.\n");
511 if (copy_from_user(&init, (drm_i810_init_t __user *) arg,
512 sizeof(drm_i810_init_t))) {
513 return -EFAULT;
514 }
515 dev_priv = drm_alloc(sizeof(drm_i810_private_t), 442 dev_priv = drm_alloc(sizeof(drm_i810_private_t),
516 DRM_MEM_DRIVER); 443 DRM_MEM_DRIVER);
517 if (dev_priv == NULL) 444 if (dev_priv == NULL)
518 return -ENOMEM; 445 return -ENOMEM;
519 retcode = i810_dma_initialize(dev, dev_priv, &init); 446 retcode = i810_dma_initialize(dev, dev_priv, init);
520 break; 447 break;
521 448
522 case I810_CLEANUP_DMA: 449 case I810_CLEANUP_DMA:
523 DRM_INFO("DMA Cleanup\n"); 450 DRM_INFO("DMA Cleanup\n");
524 retcode = i810_dma_cleanup(dev); 451 retcode = i810_dma_cleanup(dev);
525 break; 452 break;
453 default:
454 return -EINVAL;
526 } 455 }
527 456
528 return retcode; 457 return retcode;
@@ -968,7 +897,8 @@ static int i810_flush_queue(struct drm_device * dev)
968} 897}
969 898
970/* Must be called with the lock held */ 899/* Must be called with the lock held */
971static void i810_reclaim_buffers(struct drm_device * dev, struct file *filp) 900static void i810_reclaim_buffers(struct drm_device * dev,
901 struct drm_file *file_priv)
972{ 902{
973 struct drm_device_dma *dma = dev->dma; 903 struct drm_device_dma *dma = dev->dma;
974 int i; 904 int i;
@@ -986,7 +916,7 @@ static void i810_reclaim_buffers(struct drm_device * dev, struct file *filp)
986 struct drm_buf *buf = dma->buflist[i]; 916 struct drm_buf *buf = dma->buflist[i];
987 drm_i810_buf_priv_t *buf_priv = buf->dev_private; 917 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
988 918
989 if (buf->filp == filp && buf_priv) { 919 if (buf->file_priv == file_priv && buf_priv) {
990 int used = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT, 920 int used = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT,
991 I810_BUF_FREE); 921 I810_BUF_FREE);
992 922
@@ -998,47 +928,38 @@ static void i810_reclaim_buffers(struct drm_device * dev, struct file *filp)
998 } 928 }
999} 929}
1000 930
1001static int i810_flush_ioctl(struct inode *inode, struct file *filp, 931static int i810_flush_ioctl(struct drm_device *dev, void *data,
1002 unsigned int cmd, unsigned long arg) 932 struct drm_file *file_priv)
1003{ 933{
1004 struct drm_file *priv = filp->private_data; 934 LOCK_TEST_WITH_RETURN(dev, file_priv);
1005 struct drm_device *dev = priv->head->dev;
1006
1007 LOCK_TEST_WITH_RETURN(dev, filp);
1008 935
1009 i810_flush_queue(dev); 936 i810_flush_queue(dev);
1010 return 0; 937 return 0;
1011} 938}
1012 939
1013static int i810_dma_vertex(struct inode *inode, struct file *filp, 940static int i810_dma_vertex(struct drm_device *dev, void *data,
1014 unsigned int cmd, unsigned long arg) 941 struct drm_file *file_priv)
1015{ 942{
1016 struct drm_file *priv = filp->private_data;
1017 struct drm_device *dev = priv->head->dev;
1018 struct drm_device_dma *dma = dev->dma; 943 struct drm_device_dma *dma = dev->dma;
1019 drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; 944 drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
1020 u32 *hw_status = dev_priv->hw_status_page; 945 u32 *hw_status = dev_priv->hw_status_page;
1021 drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *) 946 drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *)
1022 dev_priv->sarea_priv; 947 dev_priv->sarea_priv;
1023 drm_i810_vertex_t vertex; 948 drm_i810_vertex_t *vertex = data;
1024
1025 if (copy_from_user
1026 (&vertex, (drm_i810_vertex_t __user *) arg, sizeof(vertex)))
1027 return -EFAULT;
1028 949
1029 LOCK_TEST_WITH_RETURN(dev, filp); 950 LOCK_TEST_WITH_RETURN(dev, file_priv);
1030 951
1031 DRM_DEBUG("i810 dma vertex, idx %d used %d discard %d\n", 952 DRM_DEBUG("i810 dma vertex, idx %d used %d discard %d\n",
1032 vertex.idx, vertex.used, vertex.discard); 953 vertex->idx, vertex->used, vertex->discard);
1033 954
1034 if (vertex.idx < 0 || vertex.idx > dma->buf_count) 955 if (vertex->idx < 0 || vertex->idx > dma->buf_count)
1035 return -EINVAL; 956 return -EINVAL;
1036 957
1037 i810_dma_dispatch_vertex(dev, 958 i810_dma_dispatch_vertex(dev,
1038 dma->buflist[vertex.idx], 959 dma->buflist[vertex->idx],
1039 vertex.discard, vertex.used); 960 vertex->discard, vertex->used);
1040 961
1041 atomic_add(vertex.used, &dev->counts[_DRM_STAT_SECONDARY]); 962 atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
1042 atomic_inc(&dev->counts[_DRM_STAT_DMA]); 963 atomic_inc(&dev->counts[_DRM_STAT_DMA]);
1043 sarea_priv->last_enqueue = dev_priv->counter - 1; 964 sarea_priv->last_enqueue = dev_priv->counter - 1;
1044 sarea_priv->last_dispatch = (int)hw_status[5]; 965 sarea_priv->last_dispatch = (int)hw_status[5];
@@ -1046,48 +967,37 @@ static int i810_dma_vertex(struct inode *inode, struct file *filp,
1046 return 0; 967 return 0;
1047} 968}
1048 969
1049static int i810_clear_bufs(struct inode *inode, struct file *filp, 970static int i810_clear_bufs(struct drm_device *dev, void *data,
1050 unsigned int cmd, unsigned long arg) 971 struct drm_file *file_priv)
1051{ 972{
1052 struct drm_file *priv = filp->private_data; 973 drm_i810_clear_t *clear = data;
1053 struct drm_device *dev = priv->head->dev;
1054 drm_i810_clear_t clear;
1055 974
1056 if (copy_from_user 975 LOCK_TEST_WITH_RETURN(dev, file_priv);
1057 (&clear, (drm_i810_clear_t __user *) arg, sizeof(clear)))
1058 return -EFAULT;
1059
1060 LOCK_TEST_WITH_RETURN(dev, filp);
1061 976
1062 /* GH: Someone's doing nasty things... */ 977 /* GH: Someone's doing nasty things... */
1063 if (!dev->dev_private) { 978 if (!dev->dev_private) {
1064 return -EINVAL; 979 return -EINVAL;
1065 } 980 }
1066 981
1067 i810_dma_dispatch_clear(dev, clear.flags, 982 i810_dma_dispatch_clear(dev, clear->flags,
1068 clear.clear_color, clear.clear_depth); 983 clear->clear_color, clear->clear_depth);
1069 return 0; 984 return 0;
1070} 985}
1071 986
1072static int i810_swap_bufs(struct inode *inode, struct file *filp, 987static int i810_swap_bufs(struct drm_device *dev, void *data,
1073 unsigned int cmd, unsigned long arg) 988 struct drm_file *file_priv)
1074{ 989{
1075 struct drm_file *priv = filp->private_data;
1076 struct drm_device *dev = priv->head->dev;
1077
1078 DRM_DEBUG("i810_swap_bufs\n"); 990 DRM_DEBUG("i810_swap_bufs\n");
1079 991
1080 LOCK_TEST_WITH_RETURN(dev, filp); 992 LOCK_TEST_WITH_RETURN(dev, file_priv);
1081 993
1082 i810_dma_dispatch_swap(dev); 994 i810_dma_dispatch_swap(dev);
1083 return 0; 995 return 0;
1084} 996}
1085 997
1086static int i810_getage(struct inode *inode, struct file *filp, unsigned int cmd, 998static int i810_getage(struct drm_device *dev, void *data,
1087 unsigned long arg) 999 struct drm_file *file_priv)
1088{ 1000{
1089 struct drm_file *priv = filp->private_data;
1090 struct drm_device *dev = priv->head->dev;
1091 drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; 1001 drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
1092 u32 *hw_status = dev_priv->hw_status_page; 1002 u32 *hw_status = dev_priv->hw_status_page;
1093 drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *) 1003 drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *)
@@ -1097,46 +1007,39 @@ static int i810_getage(struct inode *inode, struct file *filp, unsigned int cmd,
1097 return 0; 1007 return 0;
1098} 1008}
1099 1009
1100static int i810_getbuf(struct inode *inode, struct file *filp, unsigned int cmd, 1010static int i810_getbuf(struct drm_device *dev, void *data,
1101 unsigned long arg) 1011 struct drm_file *file_priv)
1102{ 1012{
1103 struct drm_file *priv = filp->private_data;
1104 struct drm_device *dev = priv->head->dev;
1105 int retcode = 0; 1013 int retcode = 0;
1106 drm_i810_dma_t d; 1014 drm_i810_dma_t *d = data;
1107 drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; 1015 drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
1108 u32 *hw_status = dev_priv->hw_status_page; 1016 u32 *hw_status = dev_priv->hw_status_page;
1109 drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *) 1017 drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *)
1110 dev_priv->sarea_priv; 1018 dev_priv->sarea_priv;
1111 1019
1112 if (copy_from_user(&d, (drm_i810_dma_t __user *) arg, sizeof(d))) 1020 LOCK_TEST_WITH_RETURN(dev, file_priv);
1113 return -EFAULT;
1114
1115 LOCK_TEST_WITH_RETURN(dev, filp);
1116 1021
1117 d.granted = 0; 1022 d->granted = 0;
1118 1023
1119 retcode = i810_dma_get_buffer(dev, &d, filp); 1024 retcode = i810_dma_get_buffer(dev, d, file_priv);
1120 1025
1121 DRM_DEBUG("i810_dma: %d returning %d, granted = %d\n", 1026 DRM_DEBUG("i810_dma: %d returning %d, granted = %d\n",
1122 current->pid, retcode, d.granted); 1027 current->pid, retcode, d->granted);
1123 1028
1124 if (copy_to_user((void __user *) arg, &d, sizeof(d)))
1125 return -EFAULT;
1126 sarea_priv->last_dispatch = (int)hw_status[5]; 1029 sarea_priv->last_dispatch = (int)hw_status[5];
1127 1030
1128 return retcode; 1031 return retcode;
1129} 1032}
1130 1033
1131static int i810_copybuf(struct inode *inode, 1034static int i810_copybuf(struct drm_device *dev, void *data,
1132 struct file *filp, unsigned int cmd, unsigned long arg) 1035 struct drm_file *file_priv)
1133{ 1036{
1134 /* Never copy - 2.4.x doesn't need it */ 1037 /* Never copy - 2.4.x doesn't need it */
1135 return 0; 1038 return 0;
1136} 1039}
1137 1040
1138static int i810_docopy(struct inode *inode, struct file *filp, unsigned int cmd, 1041static int i810_docopy(struct drm_device *dev, void *data,
1139 unsigned long arg) 1042 struct drm_file *file_priv)
1140{ 1043{
1141 /* Never copy - 2.4.x doesn't need it */ 1044 /* Never copy - 2.4.x doesn't need it */
1142 return 0; 1045 return 0;
@@ -1202,30 +1105,25 @@ static void i810_dma_dispatch_mc(struct drm_device * dev, struct drm_buf * buf,
1202 ADVANCE_LP_RING(); 1105 ADVANCE_LP_RING();
1203} 1106}
1204 1107
1205static int i810_dma_mc(struct inode *inode, struct file *filp, 1108static int i810_dma_mc(struct drm_device *dev, void *data,
1206 unsigned int cmd, unsigned long arg) 1109 struct drm_file *file_priv)
1207{ 1110{
1208 struct drm_file *priv = filp->private_data;
1209 struct drm_device *dev = priv->head->dev;
1210 struct drm_device_dma *dma = dev->dma; 1111 struct drm_device_dma *dma = dev->dma;
1211 drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; 1112 drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
1212 u32 *hw_status = dev_priv->hw_status_page; 1113 u32 *hw_status = dev_priv->hw_status_page;
1213 drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *) 1114 drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *)
1214 dev_priv->sarea_priv; 1115 dev_priv->sarea_priv;
1215 drm_i810_mc_t mc; 1116 drm_i810_mc_t *mc = data;
1216
1217 if (copy_from_user(&mc, (drm_i810_mc_t __user *) arg, sizeof(mc)))
1218 return -EFAULT;
1219 1117
1220 LOCK_TEST_WITH_RETURN(dev, filp); 1118 LOCK_TEST_WITH_RETURN(dev, file_priv);
1221 1119
1222 if (mc.idx >= dma->buf_count || mc.idx < 0) 1120 if (mc->idx >= dma->buf_count || mc->idx < 0)
1223 return -EINVAL; 1121 return -EINVAL;
1224 1122
1225 i810_dma_dispatch_mc(dev, dma->buflist[mc.idx], mc.used, 1123 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
1226 mc.last_render); 1124 mc->last_render);
1227 1125
1228 atomic_add(mc.used, &dev->counts[_DRM_STAT_SECONDARY]); 1126 atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
1229 atomic_inc(&dev->counts[_DRM_STAT_DMA]); 1127 atomic_inc(&dev->counts[_DRM_STAT_DMA]);
1230 sarea_priv->last_enqueue = dev_priv->counter - 1; 1128 sarea_priv->last_enqueue = dev_priv->counter - 1;
1231 sarea_priv->last_dispatch = (int)hw_status[5]; 1129 sarea_priv->last_dispatch = (int)hw_status[5];
@@ -1233,52 +1131,41 @@ static int i810_dma_mc(struct inode *inode, struct file *filp,
1233 return 0; 1131 return 0;
1234} 1132}
1235 1133
1236static int i810_rstatus(struct inode *inode, struct file *filp, 1134static int i810_rstatus(struct drm_device *dev, void *data,
1237 unsigned int cmd, unsigned long arg) 1135 struct drm_file *file_priv)
1238{ 1136{
1239 struct drm_file *priv = filp->private_data;
1240 struct drm_device *dev = priv->head->dev;
1241 drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; 1137 drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
1242 1138
1243 return (int)(((u32 *) (dev_priv->hw_status_page))[4]); 1139 return (int)(((u32 *) (dev_priv->hw_status_page))[4]);
1244} 1140}
1245 1141
1246static int i810_ov0_info(struct inode *inode, struct file *filp, 1142static int i810_ov0_info(struct drm_device *dev, void *data,
1247 unsigned int cmd, unsigned long arg) 1143 struct drm_file *file_priv)
1248{ 1144{
1249 struct drm_file *priv = filp->private_data;
1250 struct drm_device *dev = priv->head->dev;
1251 drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; 1145 drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
1252 drm_i810_overlay_t data; 1146 drm_i810_overlay_t *ov = data;
1147
1148 ov->offset = dev_priv->overlay_offset;
1149 ov->physical = dev_priv->overlay_physical;
1253 1150
1254 data.offset = dev_priv->overlay_offset;
1255 data.physical = dev_priv->overlay_physical;
1256 if (copy_to_user
1257 ((drm_i810_overlay_t __user *) arg, &data, sizeof(data)))
1258 return -EFAULT;
1259 return 0; 1151 return 0;
1260} 1152}
1261 1153
1262static int i810_fstatus(struct inode *inode, struct file *filp, 1154static int i810_fstatus(struct drm_device *dev, void *data,
1263 unsigned int cmd, unsigned long arg) 1155 struct drm_file *file_priv)
1264{ 1156{
1265 struct drm_file *priv = filp->private_data;
1266 struct drm_device *dev = priv->head->dev;
1267 drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; 1157 drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
1268 1158
1269 LOCK_TEST_WITH_RETURN(dev, filp); 1159 LOCK_TEST_WITH_RETURN(dev, file_priv);
1270
1271 return I810_READ(0x30008); 1160 return I810_READ(0x30008);
1272} 1161}
1273 1162
1274static int i810_ov0_flip(struct inode *inode, struct file *filp, 1163static int i810_ov0_flip(struct drm_device *dev, void *data,
1275 unsigned int cmd, unsigned long arg) 1164 struct drm_file *file_priv)
1276{ 1165{
1277 struct drm_file *priv = filp->private_data;
1278 struct drm_device *dev = priv->head->dev;
1279 drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; 1166 drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
1280 1167
1281 LOCK_TEST_WITH_RETURN(dev, filp); 1168 LOCK_TEST_WITH_RETURN(dev, file_priv);
1282 1169
1283 //Tell the overlay to update 1170 //Tell the overlay to update
1284 I810_WRITE(0x30000, dev_priv->overlay_physical | 0x80000000); 1171 I810_WRITE(0x30000, dev_priv->overlay_physical | 0x80000000);
@@ -1310,16 +1197,14 @@ static int i810_do_cleanup_pageflip(struct drm_device * dev)
1310 return 0; 1197 return 0;
1311} 1198}
1312 1199
1313static int i810_flip_bufs(struct inode *inode, struct file *filp, 1200static int i810_flip_bufs(struct drm_device *dev, void *data,
1314 unsigned int cmd, unsigned long arg) 1201 struct drm_file *file_priv)
1315{ 1202{
1316 struct drm_file *priv = filp->private_data;
1317 struct drm_device *dev = priv->head->dev;
1318 drm_i810_private_t *dev_priv = dev->dev_private; 1203 drm_i810_private_t *dev_priv = dev->dev_private;
1319 1204
1320 DRM_DEBUG("%s\n", __FUNCTION__); 1205 DRM_DEBUG("%s\n", __FUNCTION__);
1321 1206
1322 LOCK_TEST_WITH_RETURN(dev, filp); 1207 LOCK_TEST_WITH_RETURN(dev, file_priv);
1323 1208
1324 if (!dev_priv->page_flipping) 1209 if (!dev_priv->page_flipping)
1325 i810_do_init_pageflip(dev); 1210 i810_do_init_pageflip(dev);
@@ -1345,7 +1230,7 @@ void i810_driver_lastclose(struct drm_device * dev)
1345 i810_dma_cleanup(dev); 1230 i810_dma_cleanup(dev);
1346} 1231}
1347 1232
1348void i810_driver_preclose(struct drm_device * dev, DRMFILE filp) 1233void i810_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
1349{ 1234{
1350 if (dev->dev_private) { 1235 if (dev->dev_private) {
1351 drm_i810_private_t *dev_priv = dev->dev_private; 1236 drm_i810_private_t *dev_priv = dev->dev_private;
@@ -1355,9 +1240,10 @@ void i810_driver_preclose(struct drm_device * dev, DRMFILE filp)
1355 } 1240 }
1356} 1241}
1357 1242
1358void i810_driver_reclaim_buffers_locked(struct drm_device * dev, struct file *filp) 1243void i810_driver_reclaim_buffers_locked(struct drm_device * dev,
1244 struct drm_file *file_priv)
1359{ 1245{
1360 i810_reclaim_buffers(dev, filp); 1246 i810_reclaim_buffers(dev, file_priv);
1361} 1247}
1362 1248
1363int i810_driver_dma_quiescent(struct drm_device * dev) 1249int i810_driver_dma_quiescent(struct drm_device * dev)
@@ -1366,22 +1252,22 @@ int i810_driver_dma_quiescent(struct drm_device * dev)
1366 return 0; 1252 return 0;
1367} 1253}
1368 1254
1369drm_ioctl_desc_t i810_ioctls[] = { 1255struct drm_ioctl_desc i810_ioctls[] = {
1370 [DRM_IOCTL_NR(DRM_I810_INIT)] = {i810_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 1256 DRM_IOCTL_DEF(DRM_I810_INIT, i810_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1371 [DRM_IOCTL_NR(DRM_I810_VERTEX)] = {i810_dma_vertex, DRM_AUTH}, 1257 DRM_IOCTL_DEF(DRM_I810_VERTEX, i810_dma_vertex, DRM_AUTH),
1372 [DRM_IOCTL_NR(DRM_I810_CLEAR)] = {i810_clear_bufs, DRM_AUTH}, 1258 DRM_IOCTL_DEF(DRM_I810_CLEAR, i810_clear_bufs, DRM_AUTH),
1373 [DRM_IOCTL_NR(DRM_I810_FLUSH)] = {i810_flush_ioctl, DRM_AUTH}, 1259 DRM_IOCTL_DEF(DRM_I810_FLUSH, i810_flush_ioctl, DRM_AUTH),
1374 [DRM_IOCTL_NR(DRM_I810_GETAGE)] = {i810_getage, DRM_AUTH}, 1260 DRM_IOCTL_DEF(DRM_I810_GETAGE, i810_getage, DRM_AUTH),
1375 [DRM_IOCTL_NR(DRM_I810_GETBUF)] = {i810_getbuf, DRM_AUTH}, 1261 DRM_IOCTL_DEF(DRM_I810_GETBUF, i810_getbuf, DRM_AUTH),
1376 [DRM_IOCTL_NR(DRM_I810_SWAP)] = {i810_swap_bufs, DRM_AUTH}, 1262 DRM_IOCTL_DEF(DRM_I810_SWAP, i810_swap_bufs, DRM_AUTH),
1377 [DRM_IOCTL_NR(DRM_I810_COPY)] = {i810_copybuf, DRM_AUTH}, 1263 DRM_IOCTL_DEF(DRM_I810_COPY, i810_copybuf, DRM_AUTH),
1378 [DRM_IOCTL_NR(DRM_I810_DOCOPY)] = {i810_docopy, DRM_AUTH}, 1264 DRM_IOCTL_DEF(DRM_I810_DOCOPY, i810_docopy, DRM_AUTH),
1379 [DRM_IOCTL_NR(DRM_I810_OV0INFO)] = {i810_ov0_info, DRM_AUTH}, 1265 DRM_IOCTL_DEF(DRM_I810_OV0INFO, i810_ov0_info, DRM_AUTH),
1380 [DRM_IOCTL_NR(DRM_I810_FSTATUS)] = {i810_fstatus, DRM_AUTH}, 1266 DRM_IOCTL_DEF(DRM_I810_FSTATUS, i810_fstatus, DRM_AUTH),
1381 [DRM_IOCTL_NR(DRM_I810_OV0FLIP)] = {i810_ov0_flip, DRM_AUTH}, 1267 DRM_IOCTL_DEF(DRM_I810_OV0FLIP, i810_ov0_flip, DRM_AUTH),
1382 [DRM_IOCTL_NR(DRM_I810_MC)] = {i810_dma_mc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 1268 DRM_IOCTL_DEF(DRM_I810_MC, i810_dma_mc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1383 [DRM_IOCTL_NR(DRM_I810_RSTATUS)] = {i810_rstatus, DRM_AUTH}, 1269 DRM_IOCTL_DEF(DRM_I810_RSTATUS, i810_rstatus, DRM_AUTH),
1384 [DRM_IOCTL_NR(DRM_I810_FLIP)] = {i810_flip_bufs, DRM_AUTH} 1270 DRM_IOCTL_DEF(DRM_I810_FLIP, i810_flip_bufs, DRM_AUTH)
1385}; 1271};
1386 1272
1387int i810_max_ioctl = DRM_ARRAY_SIZE(i810_ioctls); 1273int i810_max_ioctl = DRM_ARRAY_SIZE(i810_ioctls);
diff --git a/drivers/char/drm/i810_drm.h b/drivers/char/drm/i810_drm.h
index 614977dbce45..7a10bb6f2c0f 100644
--- a/drivers/char/drm/i810_drm.h
+++ b/drivers/char/drm/i810_drm.h
@@ -102,13 +102,8 @@ typedef enum _drm_i810_init_func {
102/* This is the init structure after v1.2 */ 102/* This is the init structure after v1.2 */
103typedef struct _drm_i810_init { 103typedef struct _drm_i810_init {
104 drm_i810_init_func_t func; 104 drm_i810_init_func_t func;
105#if CONFIG_XFREE86_VERSION < XFREE86_VERSION(4,1,0,0)
106 int ring_map_idx;
107 int buffer_map_idx;
108#else
109 unsigned int mmio_offset; 105 unsigned int mmio_offset;
110 unsigned int buffers_offset; 106 unsigned int buffers_offset;
111#endif
112 int sarea_priv_offset; 107 int sarea_priv_offset;
113 unsigned int ring_start; 108 unsigned int ring_start;
114 unsigned int ring_end; 109 unsigned int ring_end;
diff --git a/drivers/char/drm/i810_drv.h b/drivers/char/drm/i810_drv.h
index 648833844c7f..0af45872f67e 100644
--- a/drivers/char/drm/i810_drv.h
+++ b/drivers/char/drm/i810_drv.h
@@ -117,15 +117,16 @@ typedef struct drm_i810_private {
117 /* i810_dma.c */ 117 /* i810_dma.c */
118extern int i810_driver_dma_quiescent(struct drm_device * dev); 118extern int i810_driver_dma_quiescent(struct drm_device * dev);
119extern void i810_driver_reclaim_buffers_locked(struct drm_device * dev, 119extern void i810_driver_reclaim_buffers_locked(struct drm_device * dev,
120 struct file *filp); 120 struct drm_file *file_priv);
121extern int i810_driver_load(struct drm_device *, unsigned long flags); 121extern int i810_driver_load(struct drm_device *, unsigned long flags);
122extern void i810_driver_lastclose(struct drm_device * dev); 122extern void i810_driver_lastclose(struct drm_device * dev);
123extern void i810_driver_preclose(struct drm_device * dev, DRMFILE filp); 123extern void i810_driver_preclose(struct drm_device * dev,
124 struct drm_file *file_priv);
124extern void i810_driver_reclaim_buffers_locked(struct drm_device * dev, 125extern void i810_driver_reclaim_buffers_locked(struct drm_device * dev,
125 struct file *filp); 126 struct drm_file *file_priv);
126extern int i810_driver_device_is_agp(struct drm_device * dev); 127extern int i810_driver_device_is_agp(struct drm_device * dev);
127 128
128extern drm_ioctl_desc_t i810_ioctls[]; 129extern struct drm_ioctl_desc i810_ioctls[];
129extern int i810_max_ioctl; 130extern int i810_max_ioctl;
130 131
131#define I810_BASE(reg) ((unsigned long) \ 132#define I810_BASE(reg) ((unsigned long) \
diff --git a/drivers/char/drm/i830_dma.c b/drivers/char/drm/i830_dma.c
index dc20c1a7834e..43a1f78712d6 100644
--- a/drivers/char/drm/i830_dma.c
+++ b/drivers/char/drm/i830_dma.c
@@ -122,10 +122,9 @@ static const struct file_operations i830_buffer_fops = {
122 .fasync = drm_fasync, 122 .fasync = drm_fasync,
123}; 123};
124 124
125static int i830_map_buffer(struct drm_buf * buf, struct file *filp) 125static int i830_map_buffer(struct drm_buf * buf, struct drm_file *file_priv)
126{ 126{
127 struct drm_file *priv = filp->private_data; 127 struct drm_device *dev = file_priv->head->dev;
128 struct drm_device *dev = priv->head->dev;
129 drm_i830_buf_priv_t *buf_priv = buf->dev_private; 128 drm_i830_buf_priv_t *buf_priv = buf->dev_private;
130 drm_i830_private_t *dev_priv = dev->dev_private; 129 drm_i830_private_t *dev_priv = dev->dev_private;
131 const struct file_operations *old_fops; 130 const struct file_operations *old_fops;
@@ -136,13 +135,13 @@ static int i830_map_buffer(struct drm_buf * buf, struct file *filp)
136 return -EINVAL; 135 return -EINVAL;
137 136
138 down_write(&current->mm->mmap_sem); 137 down_write(&current->mm->mmap_sem);
139 old_fops = filp->f_op; 138 old_fops = file_priv->filp->f_op;
140 filp->f_op = &i830_buffer_fops; 139 file_priv->filp->f_op = &i830_buffer_fops;
141 dev_priv->mmap_buffer = buf; 140 dev_priv->mmap_buffer = buf;
142 virtual = do_mmap(filp, 0, buf->total, PROT_READ | PROT_WRITE, 141 virtual = do_mmap(file_priv->filp, 0, buf->total, PROT_READ | PROT_WRITE,
143 MAP_SHARED, buf->bus_address); 142 MAP_SHARED, buf->bus_address);
144 dev_priv->mmap_buffer = NULL; 143 dev_priv->mmap_buffer = NULL;
145 filp->f_op = old_fops; 144 file_priv->filp->f_op = old_fops;
146 if (IS_ERR((void *)virtual)) { /* ugh */ 145 if (IS_ERR((void *)virtual)) { /* ugh */
147 /* Real error */ 146 /* Real error */
148 DRM_ERROR("mmap error\n"); 147 DRM_ERROR("mmap error\n");
@@ -177,7 +176,7 @@ static int i830_unmap_buffer(struct drm_buf * buf)
177} 176}
178 177
179static int i830_dma_get_buffer(struct drm_device * dev, drm_i830_dma_t * d, 178static int i830_dma_get_buffer(struct drm_device * dev, drm_i830_dma_t * d,
180 struct file *filp) 179 struct drm_file *file_priv)
181{ 180{
182 struct drm_buf *buf; 181 struct drm_buf *buf;
183 drm_i830_buf_priv_t *buf_priv; 182 drm_i830_buf_priv_t *buf_priv;
@@ -190,13 +189,13 @@ static int i830_dma_get_buffer(struct drm_device * dev, drm_i830_dma_t * d,
190 return retcode; 189 return retcode;
191 } 190 }
192 191
193 retcode = i830_map_buffer(buf, filp); 192 retcode = i830_map_buffer(buf, file_priv);
194 if (retcode) { 193 if (retcode) {
195 i830_freelist_put(dev, buf); 194 i830_freelist_put(dev, buf);
196 DRM_ERROR("mapbuf failed, retcode %d\n", retcode); 195 DRM_ERROR("mapbuf failed, retcode %d\n", retcode);
197 return retcode; 196 return retcode;
198 } 197 }
199 buf->filp = filp; 198 buf->file_priv = file_priv;
200 buf_priv = buf->dev_private; 199 buf_priv = buf->dev_private;
201 d->granted = 1; 200 d->granted = 1;
202 d->request_idx = buf->idx; 201 d->request_idx = buf->idx;
@@ -389,7 +388,7 @@ static int i830_dma_initialize(struct drm_device * dev,
389 i830_dma_cleanup(dev); 388 i830_dma_cleanup(dev);
390 DRM_ERROR("can not ioremap virtual address for" 389 DRM_ERROR("can not ioremap virtual address for"
391 " ring buffer\n"); 390 " ring buffer\n");
392 return DRM_ERR(ENOMEM); 391 return -ENOMEM;
393 } 392 }
394 393
395 dev_priv->ring.virtual_start = dev_priv->ring.map.handle; 394 dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
@@ -451,25 +450,20 @@ static int i830_dma_initialize(struct drm_device * dev,
451 return 0; 450 return 0;
452} 451}
453 452
454static int i830_dma_init(struct inode *inode, struct file *filp, 453static int i830_dma_init(struct drm_device *dev, void *data,
455 unsigned int cmd, unsigned long arg) 454 struct drm_file *file_priv)
456{ 455{
457 struct drm_file *priv = filp->private_data;
458 struct drm_device *dev = priv->head->dev;
459 drm_i830_private_t *dev_priv; 456 drm_i830_private_t *dev_priv;
460 drm_i830_init_t init; 457 drm_i830_init_t *init = data;
461 int retcode = 0; 458 int retcode = 0;
462 459
463 if (copy_from_user(&init, (void *__user)arg, sizeof(init))) 460 switch (init->func) {
464 return -EFAULT;
465
466 switch (init.func) {
467 case I830_INIT_DMA: 461 case I830_INIT_DMA:
468 dev_priv = drm_alloc(sizeof(drm_i830_private_t), 462 dev_priv = drm_alloc(sizeof(drm_i830_private_t),
469 DRM_MEM_DRIVER); 463 DRM_MEM_DRIVER);
470 if (dev_priv == NULL) 464 if (dev_priv == NULL)
471 return -ENOMEM; 465 return -ENOMEM;
472 retcode = i830_dma_initialize(dev, dev_priv, &init); 466 retcode = i830_dma_initialize(dev, dev_priv, init);
473 break; 467 break;
474 case I830_CLEANUP_DMA: 468 case I830_CLEANUP_DMA:
475 retcode = i830_dma_cleanup(dev); 469 retcode = i830_dma_cleanup(dev);
@@ -1248,7 +1242,7 @@ static int i830_flush_queue(struct drm_device * dev)
1248} 1242}
1249 1243
1250/* Must be called with the lock held */ 1244/* Must be called with the lock held */
1251static void i830_reclaim_buffers(struct drm_device * dev, struct file *filp) 1245static void i830_reclaim_buffers(struct drm_device * dev, struct drm_file *file_priv)
1252{ 1246{
1253 struct drm_device_dma *dma = dev->dma; 1247 struct drm_device_dma *dma = dev->dma;
1254 int i; 1248 int i;
@@ -1266,7 +1260,7 @@ static void i830_reclaim_buffers(struct drm_device * dev, struct file *filp)
1266 struct drm_buf *buf = dma->buflist[i]; 1260 struct drm_buf *buf = dma->buflist[i];
1267 drm_i830_buf_priv_t *buf_priv = buf->dev_private; 1261 drm_i830_buf_priv_t *buf_priv = buf->dev_private;
1268 1262
1269 if (buf->filp == filp && buf_priv) { 1263 if (buf->file_priv == file_priv && buf_priv) {
1270 int used = cmpxchg(buf_priv->in_use, I830_BUF_CLIENT, 1264 int used = cmpxchg(buf_priv->in_use, I830_BUF_CLIENT,
1271 I830_BUF_FREE); 1265 I830_BUF_FREE);
1272 1266
@@ -1278,45 +1272,36 @@ static void i830_reclaim_buffers(struct drm_device * dev, struct file *filp)
1278 } 1272 }
1279} 1273}
1280 1274
1281static int i830_flush_ioctl(struct inode *inode, struct file *filp, 1275static int i830_flush_ioctl(struct drm_device *dev, void *data,
1282 unsigned int cmd, unsigned long arg) 1276 struct drm_file *file_priv)
1283{ 1277{
1284 struct drm_file *priv = filp->private_data; 1278 LOCK_TEST_WITH_RETURN(dev, file_priv);
1285 struct drm_device *dev = priv->head->dev;
1286
1287 LOCK_TEST_WITH_RETURN(dev, filp);
1288 1279
1289 i830_flush_queue(dev); 1280 i830_flush_queue(dev);
1290 return 0; 1281 return 0;
1291} 1282}
1292 1283
1293static int i830_dma_vertex(struct inode *inode, struct file *filp, 1284static int i830_dma_vertex(struct drm_device *dev, void *data,
1294 unsigned int cmd, unsigned long arg) 1285 struct drm_file *file_priv)
1295{ 1286{
1296 struct drm_file *priv = filp->private_data;
1297 struct drm_device *dev = priv->head->dev;
1298 struct drm_device_dma *dma = dev->dma; 1287 struct drm_device_dma *dma = dev->dma;
1299 drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private; 1288 drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
1300 u32 *hw_status = dev_priv->hw_status_page; 1289 u32 *hw_status = dev_priv->hw_status_page;
1301 drm_i830_sarea_t *sarea_priv = (drm_i830_sarea_t *) 1290 drm_i830_sarea_t *sarea_priv = (drm_i830_sarea_t *)
1302 dev_priv->sarea_priv; 1291 dev_priv->sarea_priv;
1303 drm_i830_vertex_t vertex; 1292 drm_i830_vertex_t *vertex = data;
1304
1305 if (copy_from_user
1306 (&vertex, (drm_i830_vertex_t __user *) arg, sizeof(vertex)))
1307 return -EFAULT;
1308 1293
1309 LOCK_TEST_WITH_RETURN(dev, filp); 1294 LOCK_TEST_WITH_RETURN(dev, file_priv);
1310 1295
1311 DRM_DEBUG("i830 dma vertex, idx %d used %d discard %d\n", 1296 DRM_DEBUG("i830 dma vertex, idx %d used %d discard %d\n",
1312 vertex.idx, vertex.used, vertex.discard); 1297 vertex->idx, vertex->used, vertex->discard);
1313 1298
1314 if (vertex.idx < 0 || vertex.idx > dma->buf_count) 1299 if (vertex->idx < 0 || vertex->idx > dma->buf_count)
1315 return -EINVAL; 1300 return -EINVAL;
1316 1301
1317 i830_dma_dispatch_vertex(dev, 1302 i830_dma_dispatch_vertex(dev,
1318 dma->buflist[vertex.idx], 1303 dma->buflist[vertex->idx],
1319 vertex.discard, vertex.used); 1304 vertex->discard, vertex->used);
1320 1305
1321 sarea_priv->last_enqueue = dev_priv->counter - 1; 1306 sarea_priv->last_enqueue = dev_priv->counter - 1;
1322 sarea_priv->last_dispatch = (int)hw_status[5]; 1307 sarea_priv->last_dispatch = (int)hw_status[5];
@@ -1324,39 +1309,30 @@ static int i830_dma_vertex(struct inode *inode, struct file *filp,
1324 return 0; 1309 return 0;
1325} 1310}
1326 1311
1327static int i830_clear_bufs(struct inode *inode, struct file *filp, 1312static int i830_clear_bufs(struct drm_device *dev, void *data,
1328 unsigned int cmd, unsigned long arg) 1313 struct drm_file *file_priv)
1329{ 1314{
1330 struct drm_file *priv = filp->private_data; 1315 drm_i830_clear_t *clear = data;
1331 struct drm_device *dev = priv->head->dev;
1332 drm_i830_clear_t clear;
1333
1334 if (copy_from_user
1335 (&clear, (drm_i830_clear_t __user *) arg, sizeof(clear)))
1336 return -EFAULT;
1337 1316
1338 LOCK_TEST_WITH_RETURN(dev, filp); 1317 LOCK_TEST_WITH_RETURN(dev, file_priv);
1339 1318
1340 /* GH: Someone's doing nasty things... */ 1319 /* GH: Someone's doing nasty things... */
1341 if (!dev->dev_private) { 1320 if (!dev->dev_private) {
1342 return -EINVAL; 1321 return -EINVAL;
1343 } 1322 }
1344 1323
1345 i830_dma_dispatch_clear(dev, clear.flags, 1324 i830_dma_dispatch_clear(dev, clear->flags,
1346 clear.clear_color, 1325 clear->clear_color,
1347 clear.clear_depth, clear.clear_depthmask); 1326 clear->clear_depth, clear->clear_depthmask);
1348 return 0; 1327 return 0;
1349} 1328}
1350 1329
1351static int i830_swap_bufs(struct inode *inode, struct file *filp, 1330static int i830_swap_bufs(struct drm_device *dev, void *data,
1352 unsigned int cmd, unsigned long arg) 1331 struct drm_file *file_priv)
1353{ 1332{
1354 struct drm_file *priv = filp->private_data;
1355 struct drm_device *dev = priv->head->dev;
1356
1357 DRM_DEBUG("i830_swap_bufs\n"); 1333 DRM_DEBUG("i830_swap_bufs\n");
1358 1334
1359 LOCK_TEST_WITH_RETURN(dev, filp); 1335 LOCK_TEST_WITH_RETURN(dev, file_priv);
1360 1336
1361 i830_dma_dispatch_swap(dev); 1337 i830_dma_dispatch_swap(dev);
1362 return 0; 1338 return 0;
@@ -1386,16 +1362,14 @@ static int i830_do_cleanup_pageflip(struct drm_device * dev)
1386 return 0; 1362 return 0;
1387} 1363}
1388 1364
1389static int i830_flip_bufs(struct inode *inode, struct file *filp, 1365static int i830_flip_bufs(struct drm_device *dev, void *data,
1390 unsigned int cmd, unsigned long arg) 1366 struct drm_file *file_priv)
1391{ 1367{
1392 struct drm_file *priv = filp->private_data;
1393 struct drm_device *dev = priv->head->dev;
1394 drm_i830_private_t *dev_priv = dev->dev_private; 1368 drm_i830_private_t *dev_priv = dev->dev_private;
1395 1369
1396 DRM_DEBUG("%s\n", __FUNCTION__); 1370 DRM_DEBUG("%s\n", __FUNCTION__);
1397 1371
1398 LOCK_TEST_WITH_RETURN(dev, filp); 1372 LOCK_TEST_WITH_RETURN(dev, file_priv);
1399 1373
1400 if (!dev_priv->page_flipping) 1374 if (!dev_priv->page_flipping)
1401 i830_do_init_pageflip(dev); 1375 i830_do_init_pageflip(dev);
@@ -1404,11 +1378,9 @@ static int i830_flip_bufs(struct inode *inode, struct file *filp,
1404 return 0; 1378 return 0;
1405} 1379}
1406 1380
1407static int i830_getage(struct inode *inode, struct file *filp, unsigned int cmd, 1381static int i830_getage(struct drm_device *dev, void *data,
1408 unsigned long arg) 1382 struct drm_file *file_priv)
1409{ 1383{
1410 struct drm_file *priv = filp->private_data;
1411 struct drm_device *dev = priv->head->dev;
1412 drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private; 1384 drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
1413 u32 *hw_status = dev_priv->hw_status_page; 1385 u32 *hw_status = dev_priv->hw_status_page;
1414 drm_i830_sarea_t *sarea_priv = (drm_i830_sarea_t *) 1386 drm_i830_sarea_t *sarea_priv = (drm_i830_sarea_t *)
@@ -1418,58 +1390,50 @@ static int i830_getage(struct inode *inode, struct file *filp, unsigned int cmd,
1418 return 0; 1390 return 0;
1419} 1391}
1420 1392
1421static int i830_getbuf(struct inode *inode, struct file *filp, unsigned int cmd, 1393static int i830_getbuf(struct drm_device *dev, void *data,
1422 unsigned long arg) 1394 struct drm_file *file_priv)
1423{ 1395{
1424 struct drm_file *priv = filp->private_data;
1425 struct drm_device *dev = priv->head->dev;
1426 int retcode = 0; 1396 int retcode = 0;
1427 drm_i830_dma_t d; 1397 drm_i830_dma_t *d = data;
1428 drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private; 1398 drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
1429 u32 *hw_status = dev_priv->hw_status_page; 1399 u32 *hw_status = dev_priv->hw_status_page;
1430 drm_i830_sarea_t *sarea_priv = (drm_i830_sarea_t *) 1400 drm_i830_sarea_t *sarea_priv = (drm_i830_sarea_t *)
1431 dev_priv->sarea_priv; 1401 dev_priv->sarea_priv;
1432 1402
1433 DRM_DEBUG("getbuf\n"); 1403 DRM_DEBUG("getbuf\n");
1434 if (copy_from_user(&d, (drm_i830_dma_t __user *) arg, sizeof(d)))
1435 return -EFAULT;
1436 1404
1437 LOCK_TEST_WITH_RETURN(dev, filp); 1405 LOCK_TEST_WITH_RETURN(dev, file_priv);
1438 1406
1439 d.granted = 0; 1407 d->granted = 0;
1440 1408
1441 retcode = i830_dma_get_buffer(dev, &d, filp); 1409 retcode = i830_dma_get_buffer(dev, d, file_priv);
1442 1410
1443 DRM_DEBUG("i830_dma: %d returning %d, granted = %d\n", 1411 DRM_DEBUG("i830_dma: %d returning %d, granted = %d\n",
1444 current->pid, retcode, d.granted); 1412 current->pid, retcode, d->granted);
1445 1413
1446 if (copy_to_user((void __user *) arg, &d, sizeof(d)))
1447 return -EFAULT;
1448 sarea_priv->last_dispatch = (int)hw_status[5]; 1414 sarea_priv->last_dispatch = (int)hw_status[5];
1449 1415
1450 return retcode; 1416 return retcode;
1451} 1417}
1452 1418
1453static int i830_copybuf(struct inode *inode, 1419static int i830_copybuf(struct drm_device *dev, void *data,
1454 struct file *filp, unsigned int cmd, unsigned long arg) 1420 struct drm_file *file_priv)
1455{ 1421{
1456 /* Never copy - 2.4.x doesn't need it */ 1422 /* Never copy - 2.4.x doesn't need it */
1457 return 0; 1423 return 0;
1458} 1424}
1459 1425
1460static int i830_docopy(struct inode *inode, struct file *filp, unsigned int cmd, 1426static int i830_docopy(struct drm_device *dev, void *data,
1461 unsigned long arg) 1427 struct drm_file *file_priv)
1462{ 1428{
1463 return 0; 1429 return 0;
1464} 1430}
1465 1431
1466static int i830_getparam(struct inode *inode, struct file *filp, 1432static int i830_getparam(struct drm_device *dev, void *data,
1467 unsigned int cmd, unsigned long arg) 1433 struct drm_file *file_priv)
1468{ 1434{
1469 struct drm_file *priv = filp->private_data;
1470 struct drm_device *dev = priv->head->dev;
1471 drm_i830_private_t *dev_priv = dev->dev_private; 1435 drm_i830_private_t *dev_priv = dev->dev_private;
1472 drm_i830_getparam_t param; 1436 drm_i830_getparam_t *param = data;
1473 int value; 1437 int value;
1474 1438
1475 if (!dev_priv) { 1439 if (!dev_priv) {
@@ -1477,11 +1441,7 @@ static int i830_getparam(struct inode *inode, struct file *filp,
1477 return -EINVAL; 1441 return -EINVAL;
1478 } 1442 }
1479 1443
1480 if (copy_from_user 1444 switch (param->param) {
1481 (&param, (drm_i830_getparam_t __user *) arg, sizeof(param)))
1482 return -EFAULT;
1483
1484 switch (param.param) {
1485 case I830_PARAM_IRQ_ACTIVE: 1445 case I830_PARAM_IRQ_ACTIVE:
1486 value = dev->irq_enabled; 1446 value = dev->irq_enabled;
1487 break; 1447 break;
@@ -1489,7 +1449,7 @@ static int i830_getparam(struct inode *inode, struct file *filp,
1489 return -EINVAL; 1449 return -EINVAL;
1490 } 1450 }
1491 1451
1492 if (copy_to_user(param.value, &value, sizeof(int))) { 1452 if (copy_to_user(param->value, &value, sizeof(int))) {
1493 DRM_ERROR("copy_to_user\n"); 1453 DRM_ERROR("copy_to_user\n");
1494 return -EFAULT; 1454 return -EFAULT;
1495 } 1455 }
@@ -1497,26 +1457,20 @@ static int i830_getparam(struct inode *inode, struct file *filp,
1497 return 0; 1457 return 0;
1498} 1458}
1499 1459
1500static int i830_setparam(struct inode *inode, struct file *filp, 1460static int i830_setparam(struct drm_device *dev, void *data,
1501 unsigned int cmd, unsigned long arg) 1461 struct drm_file *file_priv)
1502{ 1462{
1503 struct drm_file *priv = filp->private_data;
1504 struct drm_device *dev = priv->head->dev;
1505 drm_i830_private_t *dev_priv = dev->dev_private; 1463 drm_i830_private_t *dev_priv = dev->dev_private;
1506 drm_i830_setparam_t param; 1464 drm_i830_setparam_t *param = data;
1507 1465
1508 if (!dev_priv) { 1466 if (!dev_priv) {
1509 DRM_ERROR("%s called with no initialization\n", __FUNCTION__); 1467 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
1510 return -EINVAL; 1468 return -EINVAL;
1511 } 1469 }
1512 1470
1513 if (copy_from_user 1471 switch (param->param) {
1514 (&param, (drm_i830_setparam_t __user *) arg, sizeof(param)))
1515 return -EFAULT;
1516
1517 switch (param.param) {
1518 case I830_SETPARAM_USE_MI_BATCHBUFFER_START: 1472 case I830_SETPARAM_USE_MI_BATCHBUFFER_START:
1519 dev_priv->use_mi_batchbuffer_start = param.value; 1473 dev_priv->use_mi_batchbuffer_start = param->value;
1520 break; 1474 break;
1521 default: 1475 default:
1522 return -EINVAL; 1476 return -EINVAL;
@@ -1542,7 +1496,7 @@ void i830_driver_lastclose(struct drm_device * dev)
1542 i830_dma_cleanup(dev); 1496 i830_dma_cleanup(dev);
1543} 1497}
1544 1498
1545void i830_driver_preclose(struct drm_device * dev, DRMFILE filp) 1499void i830_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
1546{ 1500{
1547 if (dev->dev_private) { 1501 if (dev->dev_private) {
1548 drm_i830_private_t *dev_priv = dev->dev_private; 1502 drm_i830_private_t *dev_priv = dev->dev_private;
@@ -1552,9 +1506,9 @@ void i830_driver_preclose(struct drm_device * dev, DRMFILE filp)
1552 } 1506 }
1553} 1507}
1554 1508
1555void i830_driver_reclaim_buffers_locked(struct drm_device * dev, struct file *filp) 1509void i830_driver_reclaim_buffers_locked(struct drm_device * dev, struct drm_file *file_priv)
1556{ 1510{
1557 i830_reclaim_buffers(dev, filp); 1511 i830_reclaim_buffers(dev, file_priv);
1558} 1512}
1559 1513
1560int i830_driver_dma_quiescent(struct drm_device * dev) 1514int i830_driver_dma_quiescent(struct drm_device * dev)
@@ -1563,21 +1517,21 @@ int i830_driver_dma_quiescent(struct drm_device * dev)
1563 return 0; 1517 return 0;
1564} 1518}
1565 1519
1566drm_ioctl_desc_t i830_ioctls[] = { 1520struct drm_ioctl_desc i830_ioctls[] = {
1567 [DRM_IOCTL_NR(DRM_I830_INIT)] = {i830_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 1521 DRM_IOCTL_DEF(DRM_I830_INIT, i830_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1568 [DRM_IOCTL_NR(DRM_I830_VERTEX)] = {i830_dma_vertex, DRM_AUTH}, 1522 DRM_IOCTL_DEF(DRM_I830_VERTEX, i830_dma_vertex, DRM_AUTH),
1569 [DRM_IOCTL_NR(DRM_I830_CLEAR)] = {i830_clear_bufs, DRM_AUTH}, 1523 DRM_IOCTL_DEF(DRM_I830_CLEAR, i830_clear_bufs, DRM_AUTH),
1570 [DRM_IOCTL_NR(DRM_I830_FLUSH)] = {i830_flush_ioctl, DRM_AUTH}, 1524 DRM_IOCTL_DEF(DRM_I830_FLUSH, i830_flush_ioctl, DRM_AUTH),
1571 [DRM_IOCTL_NR(DRM_I830_GETAGE)] = {i830_getage, DRM_AUTH}, 1525 DRM_IOCTL_DEF(DRM_I830_GETAGE, i830_getage, DRM_AUTH),
1572 [DRM_IOCTL_NR(DRM_I830_GETBUF)] = {i830_getbuf, DRM_AUTH}, 1526 DRM_IOCTL_DEF(DRM_I830_GETBUF, i830_getbuf, DRM_AUTH),
1573 [DRM_IOCTL_NR(DRM_I830_SWAP)] = {i830_swap_bufs, DRM_AUTH}, 1527 DRM_IOCTL_DEF(DRM_I830_SWAP, i830_swap_bufs, DRM_AUTH),
1574 [DRM_IOCTL_NR(DRM_I830_COPY)] = {i830_copybuf, DRM_AUTH}, 1528 DRM_IOCTL_DEF(DRM_I830_COPY, i830_copybuf, DRM_AUTH),
1575 [DRM_IOCTL_NR(DRM_I830_DOCOPY)] = {i830_docopy, DRM_AUTH}, 1529 DRM_IOCTL_DEF(DRM_I830_DOCOPY, i830_docopy, DRM_AUTH),
1576 [DRM_IOCTL_NR(DRM_I830_FLIP)] = {i830_flip_bufs, DRM_AUTH}, 1530 DRM_IOCTL_DEF(DRM_I830_FLIP, i830_flip_bufs, DRM_AUTH),
1577 [DRM_IOCTL_NR(DRM_I830_IRQ_EMIT)] = {i830_irq_emit, DRM_AUTH}, 1531 DRM_IOCTL_DEF(DRM_I830_IRQ_EMIT, i830_irq_emit, DRM_AUTH),
1578 [DRM_IOCTL_NR(DRM_I830_IRQ_WAIT)] = {i830_irq_wait, DRM_AUTH}, 1532 DRM_IOCTL_DEF(DRM_I830_IRQ_WAIT, i830_irq_wait, DRM_AUTH),
1579 [DRM_IOCTL_NR(DRM_I830_GETPARAM)] = {i830_getparam, DRM_AUTH}, 1533 DRM_IOCTL_DEF(DRM_I830_GETPARAM, i830_getparam, DRM_AUTH),
1580 [DRM_IOCTL_NR(DRM_I830_SETPARAM)] = {i830_setparam, DRM_AUTH} 1534 DRM_IOCTL_DEF(DRM_I830_SETPARAM, i830_setparam, DRM_AUTH)
1581}; 1535};
1582 1536
1583int i830_max_ioctl = DRM_ARRAY_SIZE(i830_ioctls); 1537int i830_max_ioctl = DRM_ARRAY_SIZE(i830_ioctls);
diff --git a/drivers/char/drm/i830_drv.h b/drivers/char/drm/i830_drv.h
index ddda67956dea..db3a9fa83960 100644
--- a/drivers/char/drm/i830_drv.h
+++ b/drivers/char/drm/i830_drv.h
@@ -122,24 +122,25 @@ typedef struct drm_i830_private {
122 122
123} drm_i830_private_t; 123} drm_i830_private_t;
124 124
125extern drm_ioctl_desc_t i830_ioctls[]; 125extern struct drm_ioctl_desc i830_ioctls[];
126extern int i830_max_ioctl; 126extern int i830_max_ioctl;
127 127
128/* i830_irq.c */ 128/* i830_irq.c */
129extern int i830_irq_emit(struct inode *inode, struct file *filp, 129extern int i830_irq_emit(struct drm_device *dev, void *data,
130 unsigned int cmd, unsigned long arg); 130 struct drm_file *file_priv);
131extern int i830_irq_wait(struct inode *inode, struct file *filp, 131extern int i830_irq_wait(struct drm_device *dev, void *data,
132 unsigned int cmd, unsigned long arg); 132 struct drm_file *file_priv);
133 133
134extern irqreturn_t i830_driver_irq_handler(DRM_IRQ_ARGS); 134extern irqreturn_t i830_driver_irq_handler(DRM_IRQ_ARGS);
135extern void i830_driver_irq_preinstall(struct drm_device * dev); 135extern void i830_driver_irq_preinstall(struct drm_device * dev);
136extern void i830_driver_irq_postinstall(struct drm_device * dev); 136extern void i830_driver_irq_postinstall(struct drm_device * dev);
137extern void i830_driver_irq_uninstall(struct drm_device * dev); 137extern void i830_driver_irq_uninstall(struct drm_device * dev);
138extern int i830_driver_load(struct drm_device *, unsigned long flags); 138extern int i830_driver_load(struct drm_device *, unsigned long flags);
139extern void i830_driver_preclose(struct drm_device * dev, DRMFILE filp); 139extern void i830_driver_preclose(struct drm_device * dev,
140 struct drm_file *file_priv);
140extern void i830_driver_lastclose(struct drm_device * dev); 141extern void i830_driver_lastclose(struct drm_device * dev);
141extern void i830_driver_reclaim_buffers_locked(struct drm_device * dev, 142extern void i830_driver_reclaim_buffers_locked(struct drm_device * dev,
142 struct file *filp); 143 struct drm_file *file_priv);
143extern int i830_driver_dma_quiescent(struct drm_device * dev); 144extern int i830_driver_dma_quiescent(struct drm_device * dev);
144extern int i830_driver_device_is_agp(struct drm_device * dev); 145extern int i830_driver_device_is_agp(struct drm_device * dev);
145 146
diff --git a/drivers/char/drm/i830_irq.c b/drivers/char/drm/i830_irq.c
index a1b5c63c3c3e..76403f4b6200 100644
--- a/drivers/char/drm/i830_irq.c
+++ b/drivers/char/drm/i830_irq.c
@@ -114,29 +114,23 @@ static int i830_wait_irq(struct drm_device * dev, int irq_nr)
114 114
115/* Needs the lock as it touches the ring. 115/* Needs the lock as it touches the ring.
116 */ 116 */
117int i830_irq_emit(struct inode *inode, struct file *filp, unsigned int cmd, 117int i830_irq_emit(struct drm_device *dev, void *data,
118 unsigned long arg) 118 struct drm_file *file_priv)
119{ 119{
120 struct drm_file *priv = filp->private_data;
121 struct drm_device *dev = priv->head->dev;
122 drm_i830_private_t *dev_priv = dev->dev_private; 120 drm_i830_private_t *dev_priv = dev->dev_private;
123 drm_i830_irq_emit_t emit; 121 drm_i830_irq_emit_t *emit = data;
124 int result; 122 int result;
125 123
126 LOCK_TEST_WITH_RETURN(dev, filp); 124 LOCK_TEST_WITH_RETURN(dev, file_priv);
127 125
128 if (!dev_priv) { 126 if (!dev_priv) {
129 DRM_ERROR("%s called with no initialization\n", __FUNCTION__); 127 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
130 return -EINVAL; 128 return -EINVAL;
131 } 129 }
132 130
133 if (copy_from_user
134 (&emit, (drm_i830_irq_emit_t __user *) arg, sizeof(emit)))
135 return -EFAULT;
136
137 result = i830_emit_irq(dev); 131 result = i830_emit_irq(dev);
138 132
139 if (copy_to_user(emit.irq_seq, &result, sizeof(int))) { 133 if (copy_to_user(emit->irq_seq, &result, sizeof(int))) {
140 DRM_ERROR("copy_to_user\n"); 134 DRM_ERROR("copy_to_user\n");
141 return -EFAULT; 135 return -EFAULT;
142 } 136 }
@@ -146,24 +140,18 @@ int i830_irq_emit(struct inode *inode, struct file *filp, unsigned int cmd,
146 140
147/* Doesn't need the hardware lock. 141/* Doesn't need the hardware lock.
148 */ 142 */
149int i830_irq_wait(struct inode *inode, struct file *filp, unsigned int cmd, 143int i830_irq_wait(struct drm_device *dev, void *data,
150 unsigned long arg) 144 struct drm_file *file_priv)
151{ 145{
152 struct drm_file *priv = filp->private_data;
153 struct drm_device *dev = priv->head->dev;
154 drm_i830_private_t *dev_priv = dev->dev_private; 146 drm_i830_private_t *dev_priv = dev->dev_private;
155 drm_i830_irq_wait_t irqwait; 147 drm_i830_irq_wait_t *irqwait = data;
156 148
157 if (!dev_priv) { 149 if (!dev_priv) {
158 DRM_ERROR("%s called with no initialization\n", __FUNCTION__); 150 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
159 return -EINVAL; 151 return -EINVAL;
160 } 152 }
161 153
162 if (copy_from_user(&irqwait, (drm_i830_irq_wait_t __user *) arg, 154 return i830_wait_irq(dev, irqwait->irq_seq);
163 sizeof(irqwait)))
164 return -EFAULT;
165
166 return i830_wait_irq(dev, irqwait.irq_seq);
167} 155}
168 156
169/* drm_dma.h hooks 157/* drm_dma.h hooks
diff --git a/drivers/char/drm/i915_dma.c b/drivers/char/drm/i915_dma.c
index 8e7d713a5a15..e61a43e5b3ac 100644
--- a/drivers/char/drm/i915_dma.c
+++ b/drivers/char/drm/i915_dma.c
@@ -70,7 +70,7 @@ int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
70 last_head = ring->head; 70 last_head = ring->head;
71 } 71 }
72 72
73 return DRM_ERR(EBUSY); 73 return -EBUSY;
74} 74}
75 75
76void i915_kernel_lost_context(struct drm_device * dev) 76void i915_kernel_lost_context(struct drm_device * dev)
@@ -137,7 +137,7 @@ static int i915_initialize(struct drm_device * dev,
137 DRM_ERROR("can not find sarea!\n"); 137 DRM_ERROR("can not find sarea!\n");
138 dev->dev_private = (void *)dev_priv; 138 dev->dev_private = (void *)dev_priv;
139 i915_dma_cleanup(dev); 139 i915_dma_cleanup(dev);
140 return DRM_ERR(EINVAL); 140 return -EINVAL;
141 } 141 }
142 142
143 dev_priv->mmio_map = drm_core_findmap(dev, init->mmio_offset); 143 dev_priv->mmio_map = drm_core_findmap(dev, init->mmio_offset);
@@ -145,7 +145,7 @@ static int i915_initialize(struct drm_device * dev,
145 dev->dev_private = (void *)dev_priv; 145 dev->dev_private = (void *)dev_priv;
146 i915_dma_cleanup(dev); 146 i915_dma_cleanup(dev);
147 DRM_ERROR("can not find mmio map!\n"); 147 DRM_ERROR("can not find mmio map!\n");
148 return DRM_ERR(EINVAL); 148 return -EINVAL;
149 } 149 }
150 150
151 dev_priv->sarea_priv = (drm_i915_sarea_t *) 151 dev_priv->sarea_priv = (drm_i915_sarea_t *)
@@ -169,7 +169,7 @@ static int i915_initialize(struct drm_device * dev,
169 i915_dma_cleanup(dev); 169 i915_dma_cleanup(dev);
170 DRM_ERROR("can not ioremap virtual address for" 170 DRM_ERROR("can not ioremap virtual address for"
171 " ring buffer\n"); 171 " ring buffer\n");
172 return DRM_ERR(ENOMEM); 172 return -ENOMEM;
173 } 173 }
174 174
175 dev_priv->ring.virtual_start = dev_priv->ring.map.handle; 175 dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
@@ -200,7 +200,7 @@ static int i915_initialize(struct drm_device * dev,
200 dev->dev_private = (void *)dev_priv; 200 dev->dev_private = (void *)dev_priv;
201 i915_dma_cleanup(dev); 201 i915_dma_cleanup(dev);
202 DRM_ERROR("Can not allocate hardware status page\n"); 202 DRM_ERROR("Can not allocate hardware status page\n");
203 return DRM_ERR(ENOMEM); 203 return -ENOMEM;
204 } 204 }
205 dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr; 205 dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr;
206 dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr; 206 dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
@@ -221,24 +221,24 @@ static int i915_dma_resume(struct drm_device * dev)
221 221
222 if (!dev_priv->sarea) { 222 if (!dev_priv->sarea) {
223 DRM_ERROR("can not find sarea!\n"); 223 DRM_ERROR("can not find sarea!\n");
224 return DRM_ERR(EINVAL); 224 return -EINVAL;
225 } 225 }
226 226
227 if (!dev_priv->mmio_map) { 227 if (!dev_priv->mmio_map) {
228 DRM_ERROR("can not find mmio map!\n"); 228 DRM_ERROR("can not find mmio map!\n");
229 return DRM_ERR(EINVAL); 229 return -EINVAL;
230 } 230 }
231 231
232 if (dev_priv->ring.map.handle == NULL) { 232 if (dev_priv->ring.map.handle == NULL) {
233 DRM_ERROR("can not ioremap virtual address for" 233 DRM_ERROR("can not ioremap virtual address for"
234 " ring buffer\n"); 234 " ring buffer\n");
235 return DRM_ERR(ENOMEM); 235 return -ENOMEM;
236 } 236 }
237 237
238 /* Program Hardware Status Page */ 238 /* Program Hardware Status Page */
239 if (!dev_priv->hw_status_page) { 239 if (!dev_priv->hw_status_page) {
240 DRM_ERROR("Can not find hardware status page\n"); 240 DRM_ERROR("Can not find hardware status page\n");
241 return DRM_ERR(EINVAL); 241 return -EINVAL;
242 } 242 }
243 DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page); 243 DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page);
244 244
@@ -251,23 +251,20 @@ static int i915_dma_resume(struct drm_device * dev)
251 return 0; 251 return 0;
252} 252}
253 253
254static int i915_dma_init(DRM_IOCTL_ARGS) 254static int i915_dma_init(struct drm_device *dev, void *data,
255 struct drm_file *file_priv)
255{ 256{
256 DRM_DEVICE;
257 drm_i915_private_t *dev_priv; 257 drm_i915_private_t *dev_priv;
258 drm_i915_init_t init; 258 drm_i915_init_t *init = data;
259 int retcode = 0; 259 int retcode = 0;
260 260
261 DRM_COPY_FROM_USER_IOCTL(init, (drm_i915_init_t __user *) data, 261 switch (init->func) {
262 sizeof(init));
263
264 switch (init.func) {
265 case I915_INIT_DMA: 262 case I915_INIT_DMA:
266 dev_priv = drm_alloc(sizeof(drm_i915_private_t), 263 dev_priv = drm_alloc(sizeof(drm_i915_private_t),
267 DRM_MEM_DRIVER); 264 DRM_MEM_DRIVER);
268 if (dev_priv == NULL) 265 if (dev_priv == NULL)
269 return DRM_ERR(ENOMEM); 266 return -ENOMEM;
270 retcode = i915_initialize(dev, dev_priv, &init); 267 retcode = i915_initialize(dev, dev_priv, init);
271 break; 268 break;
272 case I915_CLEANUP_DMA: 269 case I915_CLEANUP_DMA:
273 retcode = i915_dma_cleanup(dev); 270 retcode = i915_dma_cleanup(dev);
@@ -276,7 +273,7 @@ static int i915_dma_init(DRM_IOCTL_ARGS)
276 retcode = i915_dma_resume(dev); 273 retcode = i915_dma_resume(dev);
277 break; 274 break;
278 default: 275 default:
279 retcode = DRM_ERR(EINVAL); 276 retcode = -EINVAL;
280 break; 277 break;
281 } 278 }
282 279
@@ -366,7 +363,7 @@ static int i915_emit_cmds(struct drm_device * dev, int __user * buffer, int dwor
366 RING_LOCALS; 363 RING_LOCALS;
367 364
368 if ((dwords+1) * sizeof(int) >= dev_priv->ring.Size - 8) 365 if ((dwords+1) * sizeof(int) >= dev_priv->ring.Size - 8)
369 return DRM_ERR(EINVAL); 366 return -EINVAL;
370 367
371 BEGIN_LP_RING((dwords+1)&~1); 368 BEGIN_LP_RING((dwords+1)&~1);
372 369
@@ -374,17 +371,17 @@ static int i915_emit_cmds(struct drm_device * dev, int __user * buffer, int dwor
374 int cmd, sz; 371 int cmd, sz;
375 372
376 if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], sizeof(cmd))) 373 if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], sizeof(cmd)))
377 return DRM_ERR(EINVAL); 374 return -EINVAL;
378 375
379 if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords) 376 if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords)
380 return DRM_ERR(EINVAL); 377 return -EINVAL;
381 378
382 OUT_RING(cmd); 379 OUT_RING(cmd);
383 380
384 while (++i, --sz) { 381 while (++i, --sz) {
385 if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], 382 if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i],
386 sizeof(cmd))) { 383 sizeof(cmd))) {
387 return DRM_ERR(EINVAL); 384 return -EINVAL;
388 } 385 }
389 OUT_RING(cmd); 386 OUT_RING(cmd);
390 } 387 }
@@ -407,13 +404,13 @@ static int i915_emit_box(struct drm_device * dev,
407 RING_LOCALS; 404 RING_LOCALS;
408 405
409 if (DRM_COPY_FROM_USER_UNCHECKED(&box, &boxes[i], sizeof(box))) { 406 if (DRM_COPY_FROM_USER_UNCHECKED(&box, &boxes[i], sizeof(box))) {
410 return DRM_ERR(EFAULT); 407 return -EFAULT;
411 } 408 }
412 409
413 if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) { 410 if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) {
414 DRM_ERROR("Bad box %d,%d..%d,%d\n", 411 DRM_ERROR("Bad box %d,%d..%d,%d\n",
415 box.x1, box.y1, box.x2, box.y2); 412 box.x1, box.y1, box.x2, box.y2);
416 return DRM_ERR(EINVAL); 413 return -EINVAL;
417 } 414 }
418 415
419 if (IS_I965G(dev)) { 416 if (IS_I965G(dev)) {
@@ -467,7 +464,7 @@ static int i915_dispatch_cmdbuffer(struct drm_device * dev,
467 464
468 if (cmd->sz & 0x3) { 465 if (cmd->sz & 0x3) {
469 DRM_ERROR("alignment"); 466 DRM_ERROR("alignment");
470 return DRM_ERR(EINVAL); 467 return -EINVAL;
471 } 468 }
472 469
473 i915_kernel_lost_context(dev); 470 i915_kernel_lost_context(dev);
@@ -502,7 +499,7 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev,
502 499
503 if ((batch->start | batch->used) & 0x7) { 500 if ((batch->start | batch->used) & 0x7) {
504 DRM_ERROR("alignment"); 501 DRM_ERROR("alignment");
505 return DRM_ERR(EINVAL); 502 return -EINVAL;
506 } 503 }
507 504
508 i915_kernel_lost_context(dev); 505 i915_kernel_lost_context(dev);
@@ -598,76 +595,69 @@ static int i915_quiescent(struct drm_device * dev)
598 return i915_wait_ring(dev, dev_priv->ring.Size - 8, __FUNCTION__); 595 return i915_wait_ring(dev, dev_priv->ring.Size - 8, __FUNCTION__);
599} 596}
600 597
601static int i915_flush_ioctl(DRM_IOCTL_ARGS) 598static int i915_flush_ioctl(struct drm_device *dev, void *data,
599 struct drm_file *file_priv)
602{ 600{
603 DRM_DEVICE; 601 LOCK_TEST_WITH_RETURN(dev, file_priv);
604
605 LOCK_TEST_WITH_RETURN(dev, filp);
606 602
607 return i915_quiescent(dev); 603 return i915_quiescent(dev);
608} 604}
609 605
610static int i915_batchbuffer(DRM_IOCTL_ARGS) 606static int i915_batchbuffer(struct drm_device *dev, void *data,
607 struct drm_file *file_priv)
611{ 608{
612 DRM_DEVICE;
613 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 609 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
614 u32 *hw_status = dev_priv->hw_status_page; 610 u32 *hw_status = dev_priv->hw_status_page;
615 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) 611 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
616 dev_priv->sarea_priv; 612 dev_priv->sarea_priv;
617 drm_i915_batchbuffer_t batch; 613 drm_i915_batchbuffer_t *batch = data;
618 int ret; 614 int ret;
619 615
620 if (!dev_priv->allow_batchbuffer) { 616 if (!dev_priv->allow_batchbuffer) {
621 DRM_ERROR("Batchbuffer ioctl disabled\n"); 617 DRM_ERROR("Batchbuffer ioctl disabled\n");
622 return DRM_ERR(EINVAL); 618 return -EINVAL;
623 } 619 }
624 620
625 DRM_COPY_FROM_USER_IOCTL(batch, (drm_i915_batchbuffer_t __user *) data,
626 sizeof(batch));
627
628 DRM_DEBUG("i915 batchbuffer, start %x used %d cliprects %d\n", 621 DRM_DEBUG("i915 batchbuffer, start %x used %d cliprects %d\n",
629 batch.start, batch.used, batch.num_cliprects); 622 batch->start, batch->used, batch->num_cliprects);
630 623
631 LOCK_TEST_WITH_RETURN(dev, filp); 624 LOCK_TEST_WITH_RETURN(dev, file_priv);
632 625
633 if (batch.num_cliprects && DRM_VERIFYAREA_READ(batch.cliprects, 626 if (batch->num_cliprects && DRM_VERIFYAREA_READ(batch->cliprects,
634 batch.num_cliprects * 627 batch->num_cliprects *
635 sizeof(struct drm_clip_rect))) 628 sizeof(struct drm_clip_rect)))
636 return DRM_ERR(EFAULT); 629 return -EFAULT;
637 630
638 ret = i915_dispatch_batchbuffer(dev, &batch); 631 ret = i915_dispatch_batchbuffer(dev, batch);
639 632
640 sarea_priv->last_dispatch = (int)hw_status[5]; 633 sarea_priv->last_dispatch = (int)hw_status[5];
641 return ret; 634 return ret;
642} 635}
643 636
644static int i915_cmdbuffer(DRM_IOCTL_ARGS) 637static int i915_cmdbuffer(struct drm_device *dev, void *data,
638 struct drm_file *file_priv)
645{ 639{
646 DRM_DEVICE;
647 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 640 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
648 u32 *hw_status = dev_priv->hw_status_page; 641 u32 *hw_status = dev_priv->hw_status_page;
649 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) 642 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
650 dev_priv->sarea_priv; 643 dev_priv->sarea_priv;
651 drm_i915_cmdbuffer_t cmdbuf; 644 drm_i915_cmdbuffer_t *cmdbuf = data;
652 int ret; 645 int ret;
653 646
654 DRM_COPY_FROM_USER_IOCTL(cmdbuf, (drm_i915_cmdbuffer_t __user *) data,
655 sizeof(cmdbuf));
656
657 DRM_DEBUG("i915 cmdbuffer, buf %p sz %d cliprects %d\n", 647 DRM_DEBUG("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
658 cmdbuf.buf, cmdbuf.sz, cmdbuf.num_cliprects); 648 cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects);
659 649
660 LOCK_TEST_WITH_RETURN(dev, filp); 650 LOCK_TEST_WITH_RETURN(dev, file_priv);
661 651
662 if (cmdbuf.num_cliprects && 652 if (cmdbuf->num_cliprects &&
663 DRM_VERIFYAREA_READ(cmdbuf.cliprects, 653 DRM_VERIFYAREA_READ(cmdbuf->cliprects,
664 cmdbuf.num_cliprects * 654 cmdbuf->num_cliprects *
665 sizeof(struct drm_clip_rect))) { 655 sizeof(struct drm_clip_rect))) {
666 DRM_ERROR("Fault accessing cliprects\n"); 656 DRM_ERROR("Fault accessing cliprects\n");
667 return DRM_ERR(EFAULT); 657 return -EFAULT;
668 } 658 }
669 659
670 ret = i915_dispatch_cmdbuffer(dev, &cmdbuf); 660 ret = i915_dispatch_cmdbuffer(dev, cmdbuf);
671 if (ret) { 661 if (ret) {
672 DRM_ERROR("i915_dispatch_cmdbuffer failed\n"); 662 DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
673 return ret; 663 return ret;
@@ -677,33 +667,29 @@ static int i915_cmdbuffer(DRM_IOCTL_ARGS)
677 return 0; 667 return 0;
678} 668}
679 669
680static int i915_flip_bufs(DRM_IOCTL_ARGS) 670static int i915_flip_bufs(struct drm_device *dev, void *data,
671 struct drm_file *file_priv)
681{ 672{
682 DRM_DEVICE;
683
684 DRM_DEBUG("%s\n", __FUNCTION__); 673 DRM_DEBUG("%s\n", __FUNCTION__);
685 674
686 LOCK_TEST_WITH_RETURN(dev, filp); 675 LOCK_TEST_WITH_RETURN(dev, file_priv);
687 676
688 return i915_dispatch_flip(dev); 677 return i915_dispatch_flip(dev);
689} 678}
690 679
691static int i915_getparam(DRM_IOCTL_ARGS) 680static int i915_getparam(struct drm_device *dev, void *data,
681 struct drm_file *file_priv)
692{ 682{
693 DRM_DEVICE;
694 drm_i915_private_t *dev_priv = dev->dev_private; 683 drm_i915_private_t *dev_priv = dev->dev_private;
695 drm_i915_getparam_t param; 684 drm_i915_getparam_t *param = data;
696 int value; 685 int value;
697 686
698 if (!dev_priv) { 687 if (!dev_priv) {
699 DRM_ERROR("%s called with no initialization\n", __FUNCTION__); 688 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
700 return DRM_ERR(EINVAL); 689 return -EINVAL;
701 } 690 }
702 691
703 DRM_COPY_FROM_USER_IOCTL(param, (drm_i915_getparam_t __user *) data, 692 switch (param->param) {
704 sizeof(param));
705
706 switch (param.param) {
707 case I915_PARAM_IRQ_ACTIVE: 693 case I915_PARAM_IRQ_ACTIVE:
708 value = dev->irq ? 1 : 0; 694 value = dev->irq ? 1 : 0;
709 break; 695 break;
@@ -714,68 +700,64 @@ static int i915_getparam(DRM_IOCTL_ARGS)
714 value = READ_BREADCRUMB(dev_priv); 700 value = READ_BREADCRUMB(dev_priv);
715 break; 701 break;
716 default: 702 default:
717 DRM_ERROR("Unknown parameter %d\n", param.param); 703 DRM_ERROR("Unknown parameter %d\n", param->param);
718 return DRM_ERR(EINVAL); 704 return -EINVAL;
719 } 705 }
720 706
721 if (DRM_COPY_TO_USER(param.value, &value, sizeof(int))) { 707 if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) {
722 DRM_ERROR("DRM_COPY_TO_USER failed\n"); 708 DRM_ERROR("DRM_COPY_TO_USER failed\n");
723 return DRM_ERR(EFAULT); 709 return -EFAULT;
724 } 710 }
725 711
726 return 0; 712 return 0;
727} 713}
728 714
729static int i915_setparam(DRM_IOCTL_ARGS) 715static int i915_setparam(struct drm_device *dev, void *data,
716 struct drm_file *file_priv)
730{ 717{
731 DRM_DEVICE;
732 drm_i915_private_t *dev_priv = dev->dev_private; 718 drm_i915_private_t *dev_priv = dev->dev_private;
733 drm_i915_setparam_t param; 719 drm_i915_setparam_t *param = data;
734 720
735 if (!dev_priv) { 721 if (!dev_priv) {
736 DRM_ERROR("%s called with no initialization\n", __FUNCTION__); 722 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
737 return DRM_ERR(EINVAL); 723 return -EINVAL;
738 } 724 }
739 725
740 DRM_COPY_FROM_USER_IOCTL(param, (drm_i915_setparam_t __user *) data, 726 switch (param->param) {
741 sizeof(param));
742
743 switch (param.param) {
744 case I915_SETPARAM_USE_MI_BATCHBUFFER_START: 727 case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
745 if (!IS_I965G(dev)) 728 if (!IS_I965G(dev))
746 dev_priv->use_mi_batchbuffer_start = param.value; 729 dev_priv->use_mi_batchbuffer_start = param->value;
747 break; 730 break;
748 case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY: 731 case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
749 dev_priv->tex_lru_log_granularity = param.value; 732 dev_priv->tex_lru_log_granularity = param->value;
750 break; 733 break;
751 case I915_SETPARAM_ALLOW_BATCHBUFFER: 734 case I915_SETPARAM_ALLOW_BATCHBUFFER:
752 dev_priv->allow_batchbuffer = param.value; 735 dev_priv->allow_batchbuffer = param->value;
753 break; 736 break;
754 default: 737 default:
755 DRM_ERROR("unknown parameter %d\n", param.param); 738 DRM_ERROR("unknown parameter %d\n", param->param);
756 return DRM_ERR(EINVAL); 739 return -EINVAL;
757 } 740 }
758 741
759 return 0; 742 return 0;
760} 743}
761 744
762static int i915_set_status_page(DRM_IOCTL_ARGS) 745static int i915_set_status_page(struct drm_device *dev, void *data,
746 struct drm_file *file_priv)
763{ 747{
764 DRM_DEVICE;
765 drm_i915_private_t *dev_priv = dev->dev_private; 748 drm_i915_private_t *dev_priv = dev->dev_private;
766 drm_i915_hws_addr_t hws; 749 drm_i915_hws_addr_t *hws = data;
767 750
768 if (!dev_priv) { 751 if (!dev_priv) {
769 DRM_ERROR("%s called with no initialization\n", __FUNCTION__); 752 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
770 return DRM_ERR(EINVAL); 753 return -EINVAL;
771 } 754 }
772 DRM_COPY_FROM_USER_IOCTL(hws, (drm_i915_hws_addr_t __user *) data,
773 sizeof(hws));
774 printk(KERN_DEBUG "set status page addr 0x%08x\n", (u32)hws.addr);
775 755
776 dev_priv->status_gfx_addr = hws.addr & (0x1ffff<<12); 756 printk(KERN_DEBUG "set status page addr 0x%08x\n", (u32)hws->addr);
757
758 dev_priv->status_gfx_addr = hws->addr & (0x1ffff<<12);
777 759
778 dev_priv->hws_map.offset = dev->agp->agp_info.aper_base + hws.addr; 760 dev_priv->hws_map.offset = dev->agp->agp_info.aper_base + hws->addr;
779 dev_priv->hws_map.size = 4*1024; 761 dev_priv->hws_map.size = 4*1024;
780 dev_priv->hws_map.type = 0; 762 dev_priv->hws_map.type = 0;
781 dev_priv->hws_map.flags = 0; 763 dev_priv->hws_map.flags = 0;
@@ -788,7 +770,7 @@ static int i915_set_status_page(DRM_IOCTL_ARGS)
788 dev_priv->status_gfx_addr = 0; 770 dev_priv->status_gfx_addr = 0;
789 DRM_ERROR("can not ioremap virtual address for" 771 DRM_ERROR("can not ioremap virtual address for"
790 " G33 hw status page\n"); 772 " G33 hw status page\n");
791 return DRM_ERR(ENOMEM); 773 return -ENOMEM;
792 } 774 }
793 dev_priv->hw_status_page = dev_priv->hws_map.handle; 775 dev_priv->hw_status_page = dev_priv->hws_map.handle;
794 776
@@ -821,32 +803,32 @@ void i915_driver_lastclose(struct drm_device * dev)
821 i915_dma_cleanup(dev); 803 i915_dma_cleanup(dev);
822} 804}
823 805
824void i915_driver_preclose(struct drm_device * dev, DRMFILE filp) 806void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
825{ 807{
826 if (dev->dev_private) { 808 if (dev->dev_private) {
827 drm_i915_private_t *dev_priv = dev->dev_private; 809 drm_i915_private_t *dev_priv = dev->dev_private;
828 i915_mem_release(dev, filp, dev_priv->agp_heap); 810 i915_mem_release(dev, file_priv, dev_priv->agp_heap);
829 } 811 }
830} 812}
831 813
832drm_ioctl_desc_t i915_ioctls[] = { 814struct drm_ioctl_desc i915_ioctls[] = {
833 [DRM_IOCTL_NR(DRM_I915_INIT)] = {i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 815 DRM_IOCTL_DEF(DRM_I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
834 [DRM_IOCTL_NR(DRM_I915_FLUSH)] = {i915_flush_ioctl, DRM_AUTH}, 816 DRM_IOCTL_DEF(DRM_I915_FLUSH, i915_flush_ioctl, DRM_AUTH),
835 [DRM_IOCTL_NR(DRM_I915_FLIP)] = {i915_flip_bufs, DRM_AUTH}, 817 DRM_IOCTL_DEF(DRM_I915_FLIP, i915_flip_bufs, DRM_AUTH),
836 [DRM_IOCTL_NR(DRM_I915_BATCHBUFFER)] = {i915_batchbuffer, DRM_AUTH}, 818 DRM_IOCTL_DEF(DRM_I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH),
837 [DRM_IOCTL_NR(DRM_I915_IRQ_EMIT)] = {i915_irq_emit, DRM_AUTH}, 819 DRM_IOCTL_DEF(DRM_I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH),
838 [DRM_IOCTL_NR(DRM_I915_IRQ_WAIT)] = {i915_irq_wait, DRM_AUTH}, 820 DRM_IOCTL_DEF(DRM_I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH),
839 [DRM_IOCTL_NR(DRM_I915_GETPARAM)] = {i915_getparam, DRM_AUTH}, 821 DRM_IOCTL_DEF(DRM_I915_GETPARAM, i915_getparam, DRM_AUTH),
840 [DRM_IOCTL_NR(DRM_I915_SETPARAM)] = {i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 822 DRM_IOCTL_DEF(DRM_I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
841 [DRM_IOCTL_NR(DRM_I915_ALLOC)] = {i915_mem_alloc, DRM_AUTH}, 823 DRM_IOCTL_DEF(DRM_I915_ALLOC, i915_mem_alloc, DRM_AUTH),
842 [DRM_IOCTL_NR(DRM_I915_FREE)] = {i915_mem_free, DRM_AUTH}, 824 DRM_IOCTL_DEF(DRM_I915_FREE, i915_mem_free, DRM_AUTH),
843 [DRM_IOCTL_NR(DRM_I915_INIT_HEAP)] = {i915_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 825 DRM_IOCTL_DEF(DRM_I915_INIT_HEAP, i915_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
844 [DRM_IOCTL_NR(DRM_I915_CMDBUFFER)] = {i915_cmdbuffer, DRM_AUTH}, 826 DRM_IOCTL_DEF(DRM_I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH),
845 [DRM_IOCTL_NR(DRM_I915_DESTROY_HEAP)] = { i915_mem_destroy_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY }, 827 DRM_IOCTL_DEF(DRM_I915_DESTROY_HEAP, i915_mem_destroy_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ),
846 [DRM_IOCTL_NR(DRM_I915_SET_VBLANK_PIPE)] = { i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY }, 828 DRM_IOCTL_DEF(DRM_I915_SET_VBLANK_PIPE, i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ),
847 [DRM_IOCTL_NR(DRM_I915_GET_VBLANK_PIPE)] = { i915_vblank_pipe_get, DRM_AUTH }, 829 DRM_IOCTL_DEF(DRM_I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH ),
848 [DRM_IOCTL_NR(DRM_I915_VBLANK_SWAP)] = {i915_vblank_swap, DRM_AUTH}, 830 DRM_IOCTL_DEF(DRM_I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
849 [DRM_IOCTL_NR(DRM_I915_HWS_ADDR)] = {i915_set_status_page, DRM_AUTH}, 831 DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH),
850}; 832};
851 833
852int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls); 834int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
diff --git a/drivers/char/drm/i915_drv.h b/drivers/char/drm/i915_drv.h
index 28b98733beb8..e064292e703a 100644
--- a/drivers/char/drm/i915_drv.h
+++ b/drivers/char/drm/i915_drv.h
@@ -70,7 +70,7 @@ struct mem_block {
70 struct mem_block *prev; 70 struct mem_block *prev;
71 int start; 71 int start;
72 int size; 72 int size;
73 DRMFILE filp; /* 0: free, -1: heap, other: real files */ 73 struct drm_file *file_priv; /* NULL: free, -1: heap, other: real files */
74}; 74};
75 75
76typedef struct _drm_i915_vbl_swap { 76typedef struct _drm_i915_vbl_swap {
@@ -116,21 +116,24 @@ typedef struct drm_i915_private {
116 unsigned int swaps_pending; 116 unsigned int swaps_pending;
117} drm_i915_private_t; 117} drm_i915_private_t;
118 118
119extern drm_ioctl_desc_t i915_ioctls[]; 119extern struct drm_ioctl_desc i915_ioctls[];
120extern int i915_max_ioctl; 120extern int i915_max_ioctl;
121 121
122 /* i915_dma.c */ 122 /* i915_dma.c */
123extern void i915_kernel_lost_context(struct drm_device * dev); 123extern void i915_kernel_lost_context(struct drm_device * dev);
124extern int i915_driver_load(struct drm_device *, unsigned long flags); 124extern int i915_driver_load(struct drm_device *, unsigned long flags);
125extern void i915_driver_lastclose(struct drm_device * dev); 125extern void i915_driver_lastclose(struct drm_device * dev);
126extern void i915_driver_preclose(struct drm_device * dev, DRMFILE filp); 126extern void i915_driver_preclose(struct drm_device *dev,
127 struct drm_file *file_priv);
127extern int i915_driver_device_is_agp(struct drm_device * dev); 128extern int i915_driver_device_is_agp(struct drm_device * dev);
128extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, 129extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
129 unsigned long arg); 130 unsigned long arg);
130 131
131/* i915_irq.c */ 132/* i915_irq.c */
132extern int i915_irq_emit(DRM_IOCTL_ARGS); 133extern int i915_irq_emit(struct drm_device *dev, void *data,
133extern int i915_irq_wait(DRM_IOCTL_ARGS); 134 struct drm_file *file_priv);
135extern int i915_irq_wait(struct drm_device *dev, void *data,
136 struct drm_file *file_priv);
134 137
135extern int i915_driver_vblank_wait(struct drm_device *dev, unsigned int *sequence); 138extern int i915_driver_vblank_wait(struct drm_device *dev, unsigned int *sequence);
136extern int i915_driver_vblank_wait2(struct drm_device *dev, unsigned int *sequence); 139extern int i915_driver_vblank_wait2(struct drm_device *dev, unsigned int *sequence);
@@ -138,18 +141,25 @@ extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS);
138extern void i915_driver_irq_preinstall(struct drm_device * dev); 141extern void i915_driver_irq_preinstall(struct drm_device * dev);
139extern void i915_driver_irq_postinstall(struct drm_device * dev); 142extern void i915_driver_irq_postinstall(struct drm_device * dev);
140extern void i915_driver_irq_uninstall(struct drm_device * dev); 143extern void i915_driver_irq_uninstall(struct drm_device * dev);
141extern int i915_vblank_pipe_set(DRM_IOCTL_ARGS); 144extern int i915_vblank_pipe_set(struct drm_device *dev, void *data,
142extern int i915_vblank_pipe_get(DRM_IOCTL_ARGS); 145 struct drm_file *file_priv);
143extern int i915_vblank_swap(DRM_IOCTL_ARGS); 146extern int i915_vblank_pipe_get(struct drm_device *dev, void *data,
147 struct drm_file *file_priv);
148extern int i915_vblank_swap(struct drm_device *dev, void *data,
149 struct drm_file *file_priv);
144 150
145/* i915_mem.c */ 151/* i915_mem.c */
146extern int i915_mem_alloc(DRM_IOCTL_ARGS); 152extern int i915_mem_alloc(struct drm_device *dev, void *data,
147extern int i915_mem_free(DRM_IOCTL_ARGS); 153 struct drm_file *file_priv);
148extern int i915_mem_init_heap(DRM_IOCTL_ARGS); 154extern int i915_mem_free(struct drm_device *dev, void *data,
149extern int i915_mem_destroy_heap(DRM_IOCTL_ARGS); 155 struct drm_file *file_priv);
156extern int i915_mem_init_heap(struct drm_device *dev, void *data,
157 struct drm_file *file_priv);
158extern int i915_mem_destroy_heap(struct drm_device *dev, void *data,
159 struct drm_file *file_priv);
150extern void i915_mem_takedown(struct mem_block **heap); 160extern void i915_mem_takedown(struct mem_block **heap);
151extern void i915_mem_release(struct drm_device * dev, 161extern void i915_mem_release(struct drm_device * dev,
152 DRMFILE filp, struct mem_block *heap); 162 struct drm_file *file_priv, struct mem_block *heap);
153 163
154#define I915_READ(reg) DRM_READ32(dev_priv->mmio_map, (reg)) 164#define I915_READ(reg) DRM_READ32(dev_priv->mmio_map, (reg))
155#define I915_WRITE(reg,val) DRM_WRITE32(dev_priv->mmio_map, (reg), (val)) 165#define I915_WRITE(reg,val) DRM_WRITE32(dev_priv->mmio_map, (reg), (val))
diff --git a/drivers/char/drm/i915_irq.c b/drivers/char/drm/i915_irq.c
index bb8e9e9c8201..a443f4a202e3 100644
--- a/drivers/char/drm/i915_irq.c
+++ b/drivers/char/drm/i915_irq.c
@@ -311,7 +311,7 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
311 DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ, 311 DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ,
312 READ_BREADCRUMB(dev_priv) >= irq_nr); 312 READ_BREADCRUMB(dev_priv) >= irq_nr);
313 313
314 if (ret == DRM_ERR(EBUSY)) { 314 if (ret == -EBUSY) {
315 DRM_ERROR("%s: EBUSY -- rec: %d emitted: %d\n", 315 DRM_ERROR("%s: EBUSY -- rec: %d emitted: %d\n",
316 __FUNCTION__, 316 __FUNCTION__,
317 READ_BREADCRUMB(dev_priv), (int)dev_priv->counter); 317 READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
@@ -330,7 +330,7 @@ static int i915_driver_vblank_do_wait(struct drm_device *dev, unsigned int *sequ
330 330
331 if (!dev_priv) { 331 if (!dev_priv) {
332 DRM_ERROR("%s called with no initialization\n", __FUNCTION__); 332 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
333 return DRM_ERR(EINVAL); 333 return -EINVAL;
334 } 334 }
335 335
336 DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ, 336 DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
@@ -355,28 +355,25 @@ int i915_driver_vblank_wait2(struct drm_device *dev, unsigned int *sequence)
355 355
356/* Needs the lock as it touches the ring. 356/* Needs the lock as it touches the ring.
357 */ 357 */
358int i915_irq_emit(DRM_IOCTL_ARGS) 358int i915_irq_emit(struct drm_device *dev, void *data,
359 struct drm_file *file_priv)
359{ 360{
360 DRM_DEVICE;
361 drm_i915_private_t *dev_priv = dev->dev_private; 361 drm_i915_private_t *dev_priv = dev->dev_private;
362 drm_i915_irq_emit_t emit; 362 drm_i915_irq_emit_t *emit = data;
363 int result; 363 int result;
364 364
365 LOCK_TEST_WITH_RETURN(dev, filp); 365 LOCK_TEST_WITH_RETURN(dev, file_priv);
366 366
367 if (!dev_priv) { 367 if (!dev_priv) {
368 DRM_ERROR("%s called with no initialization\n", __FUNCTION__); 368 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
369 return DRM_ERR(EINVAL); 369 return -EINVAL;
370 } 370 }
371 371
372 DRM_COPY_FROM_USER_IOCTL(emit, (drm_i915_irq_emit_t __user *) data,
373 sizeof(emit));
374
375 result = i915_emit_irq(dev); 372 result = i915_emit_irq(dev);
376 373
377 if (DRM_COPY_TO_USER(emit.irq_seq, &result, sizeof(int))) { 374 if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
378 DRM_ERROR("copy_to_user\n"); 375 DRM_ERROR("copy_to_user\n");
379 return DRM_ERR(EFAULT); 376 return -EFAULT;
380 } 377 }
381 378
382 return 0; 379 return 0;
@@ -384,21 +381,18 @@ int i915_irq_emit(DRM_IOCTL_ARGS)
384 381
385/* Doesn't need the hardware lock. 382/* Doesn't need the hardware lock.
386 */ 383 */
387int i915_irq_wait(DRM_IOCTL_ARGS) 384int i915_irq_wait(struct drm_device *dev, void *data,
385 struct drm_file *file_priv)
388{ 386{
389 DRM_DEVICE;
390 drm_i915_private_t *dev_priv = dev->dev_private; 387 drm_i915_private_t *dev_priv = dev->dev_private;
391 drm_i915_irq_wait_t irqwait; 388 drm_i915_irq_wait_t *irqwait = data;
392 389
393 if (!dev_priv) { 390 if (!dev_priv) {
394 DRM_ERROR("%s called with no initialization\n", __FUNCTION__); 391 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
395 return DRM_ERR(EINVAL); 392 return -EINVAL;
396 } 393 }
397 394
398 DRM_COPY_FROM_USER_IOCTL(irqwait, (drm_i915_irq_wait_t __user *) data, 395 return i915_wait_irq(dev, irqwait->irq_seq);
399 sizeof(irqwait));
400
401 return i915_wait_irq(dev, irqwait.irq_seq);
402} 396}
403 397
404static void i915_enable_interrupt (struct drm_device *dev) 398static void i915_enable_interrupt (struct drm_device *dev)
@@ -417,64 +411,60 @@ static void i915_enable_interrupt (struct drm_device *dev)
417 411
418/* Set the vblank monitor pipe 412/* Set the vblank monitor pipe
419 */ 413 */
420int i915_vblank_pipe_set(DRM_IOCTL_ARGS) 414int i915_vblank_pipe_set(struct drm_device *dev, void *data,
415 struct drm_file *file_priv)
421{ 416{
422 DRM_DEVICE;
423 drm_i915_private_t *dev_priv = dev->dev_private; 417 drm_i915_private_t *dev_priv = dev->dev_private;
424 drm_i915_vblank_pipe_t pipe; 418 drm_i915_vblank_pipe_t *pipe = data;
425 419
426 if (!dev_priv) { 420 if (!dev_priv) {
427 DRM_ERROR("%s called with no initialization\n", __FUNCTION__); 421 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
428 return DRM_ERR(EINVAL); 422 return -EINVAL;
429 } 423 }
430 424
431 DRM_COPY_FROM_USER_IOCTL(pipe, (drm_i915_vblank_pipe_t __user *) data, 425 if (pipe->pipe & ~(DRM_I915_VBLANK_PIPE_A|DRM_I915_VBLANK_PIPE_B)) {
432 sizeof(pipe));
433
434 if (pipe.pipe & ~(DRM_I915_VBLANK_PIPE_A|DRM_I915_VBLANK_PIPE_B)) {
435 DRM_ERROR("%s called with invalid pipe 0x%x\n", 426 DRM_ERROR("%s called with invalid pipe 0x%x\n",
436 __FUNCTION__, pipe.pipe); 427 __FUNCTION__, pipe->pipe);
437 return DRM_ERR(EINVAL); 428 return -EINVAL;
438 } 429 }
439 430
440 dev_priv->vblank_pipe = pipe.pipe; 431 dev_priv->vblank_pipe = pipe->pipe;
441 432
442 i915_enable_interrupt (dev); 433 i915_enable_interrupt (dev);
443 434
444 return 0; 435 return 0;
445} 436}
446 437
447int i915_vblank_pipe_get(DRM_IOCTL_ARGS) 438int i915_vblank_pipe_get(struct drm_device *dev, void *data,
439 struct drm_file *file_priv)
448{ 440{
449 DRM_DEVICE;
450 drm_i915_private_t *dev_priv = dev->dev_private; 441 drm_i915_private_t *dev_priv = dev->dev_private;
451 drm_i915_vblank_pipe_t pipe; 442 drm_i915_vblank_pipe_t *pipe = data;
452 u16 flag; 443 u16 flag;
453 444
454 if (!dev_priv) { 445 if (!dev_priv) {
455 DRM_ERROR("%s called with no initialization\n", __FUNCTION__); 446 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
456 return DRM_ERR(EINVAL); 447 return -EINVAL;
457 } 448 }
458 449
459 flag = I915_READ(I915REG_INT_ENABLE_R); 450 flag = I915_READ(I915REG_INT_ENABLE_R);
460 pipe.pipe = 0; 451 pipe->pipe = 0;
461 if (flag & VSYNC_PIPEA_FLAG) 452 if (flag & VSYNC_PIPEA_FLAG)
462 pipe.pipe |= DRM_I915_VBLANK_PIPE_A; 453 pipe->pipe |= DRM_I915_VBLANK_PIPE_A;
463 if (flag & VSYNC_PIPEB_FLAG) 454 if (flag & VSYNC_PIPEB_FLAG)
464 pipe.pipe |= DRM_I915_VBLANK_PIPE_B; 455 pipe->pipe |= DRM_I915_VBLANK_PIPE_B;
465 DRM_COPY_TO_USER_IOCTL((drm_i915_vblank_pipe_t __user *) data, pipe, 456
466 sizeof(pipe));
467 return 0; 457 return 0;
468} 458}
469 459
470/** 460/**
471 * Schedule buffer swap at given vertical blank. 461 * Schedule buffer swap at given vertical blank.
472 */ 462 */
473int i915_vblank_swap(DRM_IOCTL_ARGS) 463int i915_vblank_swap(struct drm_device *dev, void *data,
464 struct drm_file *file_priv)
474{ 465{
475 DRM_DEVICE;
476 drm_i915_private_t *dev_priv = dev->dev_private; 466 drm_i915_private_t *dev_priv = dev->dev_private;
477 drm_i915_vblank_swap_t swap; 467 drm_i915_vblank_swap_t *swap = data;
478 drm_i915_vbl_swap_t *vbl_swap; 468 drm_i915_vbl_swap_t *vbl_swap;
479 unsigned int pipe, seqtype, curseq; 469 unsigned int pipe, seqtype, curseq;
480 unsigned long irqflags; 470 unsigned long irqflags;
@@ -482,38 +472,35 @@ int i915_vblank_swap(DRM_IOCTL_ARGS)
482 472
483 if (!dev_priv) { 473 if (!dev_priv) {
484 DRM_ERROR("%s called with no initialization\n", __func__); 474 DRM_ERROR("%s called with no initialization\n", __func__);
485 return DRM_ERR(EINVAL); 475 return -EINVAL;
486 } 476 }
487 477
488 if (dev_priv->sarea_priv->rotation) { 478 if (dev_priv->sarea_priv->rotation) {
489 DRM_DEBUG("Rotation not supported\n"); 479 DRM_DEBUG("Rotation not supported\n");
490 return DRM_ERR(EINVAL); 480 return -EINVAL;
491 } 481 }
492 482
493 DRM_COPY_FROM_USER_IOCTL(swap, (drm_i915_vblank_swap_t __user *) data, 483 if (swap->seqtype & ~(_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE |
494 sizeof(swap));
495
496 if (swap.seqtype & ~(_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE |
497 _DRM_VBLANK_SECONDARY | _DRM_VBLANK_NEXTONMISS)) { 484 _DRM_VBLANK_SECONDARY | _DRM_VBLANK_NEXTONMISS)) {
498 DRM_ERROR("Invalid sequence type 0x%x\n", swap.seqtype); 485 DRM_ERROR("Invalid sequence type 0x%x\n", swap->seqtype);
499 return DRM_ERR(EINVAL); 486 return -EINVAL;
500 } 487 }
501 488
502 pipe = (swap.seqtype & _DRM_VBLANK_SECONDARY) ? 1 : 0; 489 pipe = (swap->seqtype & _DRM_VBLANK_SECONDARY) ? 1 : 0;
503 490
504 seqtype = swap.seqtype & (_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE); 491 seqtype = swap->seqtype & (_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE);
505 492
506 if (!(dev_priv->vblank_pipe & (1 << pipe))) { 493 if (!(dev_priv->vblank_pipe & (1 << pipe))) {
507 DRM_ERROR("Invalid pipe %d\n", pipe); 494 DRM_ERROR("Invalid pipe %d\n", pipe);
508 return DRM_ERR(EINVAL); 495 return -EINVAL;
509 } 496 }
510 497
511 spin_lock_irqsave(&dev->drw_lock, irqflags); 498 spin_lock_irqsave(&dev->drw_lock, irqflags);
512 499
513 if (!drm_get_drawable_info(dev, swap.drawable)) { 500 if (!drm_get_drawable_info(dev, swap->drawable)) {
514 spin_unlock_irqrestore(&dev->drw_lock, irqflags); 501 spin_unlock_irqrestore(&dev->drw_lock, irqflags);
515 DRM_DEBUG("Invalid drawable ID %d\n", swap.drawable); 502 DRM_DEBUG("Invalid drawable ID %d\n", swap->drawable);
516 return DRM_ERR(EINVAL); 503 return -EINVAL;
517 } 504 }
518 505
519 spin_unlock_irqrestore(&dev->drw_lock, irqflags); 506 spin_unlock_irqrestore(&dev->drw_lock, irqflags);
@@ -521,14 +508,14 @@ int i915_vblank_swap(DRM_IOCTL_ARGS)
521 curseq = atomic_read(pipe ? &dev->vbl_received2 : &dev->vbl_received); 508 curseq = atomic_read(pipe ? &dev->vbl_received2 : &dev->vbl_received);
522 509
523 if (seqtype == _DRM_VBLANK_RELATIVE) 510 if (seqtype == _DRM_VBLANK_RELATIVE)
524 swap.sequence += curseq; 511 swap->sequence += curseq;
525 512
526 if ((curseq - swap.sequence) <= (1<<23)) { 513 if ((curseq - swap->sequence) <= (1<<23)) {
527 if (swap.seqtype & _DRM_VBLANK_NEXTONMISS) { 514 if (swap->seqtype & _DRM_VBLANK_NEXTONMISS) {
528 swap.sequence = curseq + 1; 515 swap->sequence = curseq + 1;
529 } else { 516 } else {
530 DRM_DEBUG("Missed target sequence\n"); 517 DRM_DEBUG("Missed target sequence\n");
531 return DRM_ERR(EINVAL); 518 return -EINVAL;
532 } 519 }
533 } 520 }
534 521
@@ -537,9 +524,9 @@ int i915_vblank_swap(DRM_IOCTL_ARGS)
537 list_for_each(list, &dev_priv->vbl_swaps.head) { 524 list_for_each(list, &dev_priv->vbl_swaps.head) {
538 vbl_swap = list_entry(list, drm_i915_vbl_swap_t, head); 525 vbl_swap = list_entry(list, drm_i915_vbl_swap_t, head);
539 526
540 if (vbl_swap->drw_id == swap.drawable && 527 if (vbl_swap->drw_id == swap->drawable &&
541 vbl_swap->pipe == pipe && 528 vbl_swap->pipe == pipe &&
542 vbl_swap->sequence == swap.sequence) { 529 vbl_swap->sequence == swap->sequence) {
543 spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags); 530 spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
544 DRM_DEBUG("Already scheduled\n"); 531 DRM_DEBUG("Already scheduled\n");
545 return 0; 532 return 0;
@@ -550,21 +537,21 @@ int i915_vblank_swap(DRM_IOCTL_ARGS)
550 537
551 if (dev_priv->swaps_pending >= 100) { 538 if (dev_priv->swaps_pending >= 100) {
552 DRM_DEBUG("Too many swaps queued\n"); 539 DRM_DEBUG("Too many swaps queued\n");
553 return DRM_ERR(EBUSY); 540 return -EBUSY;
554 } 541 }
555 542
556 vbl_swap = drm_calloc(1, sizeof(vbl_swap), DRM_MEM_DRIVER); 543 vbl_swap = drm_calloc(1, sizeof(*vbl_swap), DRM_MEM_DRIVER);
557 544
558 if (!vbl_swap) { 545 if (!vbl_swap) {
559 DRM_ERROR("Failed to allocate memory to queue swap\n"); 546 DRM_ERROR("Failed to allocate memory to queue swap\n");
560 return DRM_ERR(ENOMEM); 547 return -ENOMEM;
561 } 548 }
562 549
563 DRM_DEBUG("\n"); 550 DRM_DEBUG("\n");
564 551
565 vbl_swap->drw_id = swap.drawable; 552 vbl_swap->drw_id = swap->drawable;
566 vbl_swap->pipe = pipe; 553 vbl_swap->pipe = pipe;
567 vbl_swap->sequence = swap.sequence; 554 vbl_swap->sequence = swap->sequence;
568 555
569 spin_lock_irqsave(&dev_priv->swaps_lock, irqflags); 556 spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
570 557
@@ -573,9 +560,6 @@ int i915_vblank_swap(DRM_IOCTL_ARGS)
573 560
574 spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags); 561 spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
575 562
576 DRM_COPY_TO_USER_IOCTL((drm_i915_vblank_swap_t __user *) data, swap,
577 sizeof(swap));
578
579 return 0; 563 return 0;
580} 564}
581 565
diff --git a/drivers/char/drm/i915_mem.c b/drivers/char/drm/i915_mem.c
index 50b4bacef0e0..56fb9b30a5d7 100644
--- a/drivers/char/drm/i915_mem.c
+++ b/drivers/char/drm/i915_mem.c
@@ -89,7 +89,7 @@ static void mark_block(struct drm_device * dev, struct mem_block *p, int in_use)
89 */ 89 */
90 90
91static struct mem_block *split_block(struct mem_block *p, int start, int size, 91static struct mem_block *split_block(struct mem_block *p, int start, int size,
92 DRMFILE filp) 92 struct drm_file *file_priv)
93{ 93{
94 /* Maybe cut off the start of an existing block */ 94 /* Maybe cut off the start of an existing block */
95 if (start > p->start) { 95 if (start > p->start) {
@@ -99,7 +99,7 @@ static struct mem_block *split_block(struct mem_block *p, int start, int size,
99 goto out; 99 goto out;
100 newblock->start = start; 100 newblock->start = start;
101 newblock->size = p->size - (start - p->start); 101 newblock->size = p->size - (start - p->start);
102 newblock->filp = NULL; 102 newblock->file_priv = NULL;
103 newblock->next = p->next; 103 newblock->next = p->next;
104 newblock->prev = p; 104 newblock->prev = p;
105 p->next->prev = newblock; 105 p->next->prev = newblock;
@@ -116,7 +116,7 @@ static struct mem_block *split_block(struct mem_block *p, int start, int size,
116 goto out; 116 goto out;
117 newblock->start = start + size; 117 newblock->start = start + size;
118 newblock->size = p->size - size; 118 newblock->size = p->size - size;
119 newblock->filp = NULL; 119 newblock->file_priv = NULL;
120 newblock->next = p->next; 120 newblock->next = p->next;
121 newblock->prev = p; 121 newblock->prev = p;
122 p->next->prev = newblock; 122 p->next->prev = newblock;
@@ -126,20 +126,20 @@ static struct mem_block *split_block(struct mem_block *p, int start, int size,
126 126
127 out: 127 out:
128 /* Our block is in the middle */ 128 /* Our block is in the middle */
129 p->filp = filp; 129 p->file_priv = file_priv;
130 return p; 130 return p;
131} 131}
132 132
133static struct mem_block *alloc_block(struct mem_block *heap, int size, 133static struct mem_block *alloc_block(struct mem_block *heap, int size,
134 int align2, DRMFILE filp) 134 int align2, struct drm_file *file_priv)
135{ 135{
136 struct mem_block *p; 136 struct mem_block *p;
137 int mask = (1 << align2) - 1; 137 int mask = (1 << align2) - 1;
138 138
139 for (p = heap->next; p != heap; p = p->next) { 139 for (p = heap->next; p != heap; p = p->next) {
140 int start = (p->start + mask) & ~mask; 140 int start = (p->start + mask) & ~mask;
141 if (p->filp == NULL && start + size <= p->start + p->size) 141 if (p->file_priv == NULL && start + size <= p->start + p->size)
142 return split_block(p, start, size, filp); 142 return split_block(p, start, size, file_priv);
143 } 143 }
144 144
145 return NULL; 145 return NULL;
@@ -158,12 +158,12 @@ static struct mem_block *find_block(struct mem_block *heap, int start)
158 158
159static void free_block(struct mem_block *p) 159static void free_block(struct mem_block *p)
160{ 160{
161 p->filp = NULL; 161 p->file_priv = NULL;
162 162
163 /* Assumes a single contiguous range. Needs a special filp in 163 /* Assumes a single contiguous range. Needs a special file_priv in
164 * 'heap' to stop it being subsumed. 164 * 'heap' to stop it being subsumed.
165 */ 165 */
166 if (p->next->filp == NULL) { 166 if (p->next->file_priv == NULL) {
167 struct mem_block *q = p->next; 167 struct mem_block *q = p->next;
168 p->size += q->size; 168 p->size += q->size;
169 p->next = q->next; 169 p->next = q->next;
@@ -171,7 +171,7 @@ static void free_block(struct mem_block *p)
171 drm_free(q, sizeof(*q), DRM_MEM_BUFLISTS); 171 drm_free(q, sizeof(*q), DRM_MEM_BUFLISTS);
172 } 172 }
173 173
174 if (p->prev->filp == NULL) { 174 if (p->prev->file_priv == NULL) {
175 struct mem_block *q = p->prev; 175 struct mem_block *q = p->prev;
176 q->size += p->size; 176 q->size += p->size;
177 q->next = p->next; 177 q->next = p->next;
@@ -197,18 +197,19 @@ static int init_heap(struct mem_block **heap, int start, int size)
197 197
198 blocks->start = start; 198 blocks->start = start;
199 blocks->size = size; 199 blocks->size = size;
200 blocks->filp = NULL; 200 blocks->file_priv = NULL;
201 blocks->next = blocks->prev = *heap; 201 blocks->next = blocks->prev = *heap;
202 202
203 memset(*heap, 0, sizeof(**heap)); 203 memset(*heap, 0, sizeof(**heap));
204 (*heap)->filp = (DRMFILE) - 1; 204 (*heap)->file_priv = (struct drm_file *) - 1;
205 (*heap)->next = (*heap)->prev = blocks; 205 (*heap)->next = (*heap)->prev = blocks;
206 return 0; 206 return 0;
207} 207}
208 208
209/* Free all blocks associated with the releasing file. 209/* Free all blocks associated with the releasing file.
210 */ 210 */
211void i915_mem_release(struct drm_device * dev, DRMFILE filp, struct mem_block *heap) 211void i915_mem_release(struct drm_device * dev, struct drm_file *file_priv,
212 struct mem_block *heap)
212{ 213{
213 struct mem_block *p; 214 struct mem_block *p;
214 215
@@ -216,17 +217,17 @@ void i915_mem_release(struct drm_device * dev, DRMFILE filp, struct mem_block *h
216 return; 217 return;
217 218
218 for (p = heap->next; p != heap; p = p->next) { 219 for (p = heap->next; p != heap; p = p->next) {
219 if (p->filp == filp) { 220 if (p->file_priv == file_priv) {
220 p->filp = NULL; 221 p->file_priv = NULL;
221 mark_block(dev, p, 0); 222 mark_block(dev, p, 0);
222 } 223 }
223 } 224 }
224 225
225 /* Assumes a single contiguous range. Needs a special filp in 226 /* Assumes a single contiguous range. Needs a special file_priv in
226 * 'heap' to stop it being subsumed. 227 * 'heap' to stop it being subsumed.
227 */ 228 */
228 for (p = heap->next; p != heap; p = p->next) { 229 for (p = heap->next; p != heap; p = p->next) {
229 while (p->filp == NULL && p->next->filp == NULL) { 230 while (p->file_priv == NULL && p->next->file_priv == NULL) {
230 struct mem_block *q = p->next; 231 struct mem_block *q = p->next;
231 p->size += q->size; 232 p->size += q->size;
232 p->next = q->next; 233 p->next = q->next;
@@ -267,129 +268,117 @@ static struct mem_block **get_heap(drm_i915_private_t * dev_priv, int region)
267 268
268/* IOCTL HANDLERS */ 269/* IOCTL HANDLERS */
269 270
270int i915_mem_alloc(DRM_IOCTL_ARGS) 271int i915_mem_alloc(struct drm_device *dev, void *data,
272 struct drm_file *file_priv)
271{ 273{
272 DRM_DEVICE;
273 drm_i915_private_t *dev_priv = dev->dev_private; 274 drm_i915_private_t *dev_priv = dev->dev_private;
274 drm_i915_mem_alloc_t alloc; 275 drm_i915_mem_alloc_t *alloc = data;
275 struct mem_block *block, **heap; 276 struct mem_block *block, **heap;
276 277
277 if (!dev_priv) { 278 if (!dev_priv) {
278 DRM_ERROR("%s called with no initialization\n", __FUNCTION__); 279 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
279 return DRM_ERR(EINVAL); 280 return -EINVAL;
280 } 281 }
281 282
282 DRM_COPY_FROM_USER_IOCTL(alloc, (drm_i915_mem_alloc_t __user *) data, 283 heap = get_heap(dev_priv, alloc->region);
283 sizeof(alloc));
284
285 heap = get_heap(dev_priv, alloc.region);
286 if (!heap || !*heap) 284 if (!heap || !*heap)
287 return DRM_ERR(EFAULT); 285 return -EFAULT;
288 286
289 /* Make things easier on ourselves: all allocations at least 287 /* Make things easier on ourselves: all allocations at least
290 * 4k aligned. 288 * 4k aligned.
291 */ 289 */
292 if (alloc.alignment < 12) 290 if (alloc->alignment < 12)
293 alloc.alignment = 12; 291 alloc->alignment = 12;
294 292
295 block = alloc_block(*heap, alloc.size, alloc.alignment, filp); 293 block = alloc_block(*heap, alloc->size, alloc->alignment, file_priv);
296 294
297 if (!block) 295 if (!block)
298 return DRM_ERR(ENOMEM); 296 return -ENOMEM;
299 297
300 mark_block(dev, block, 1); 298 mark_block(dev, block, 1);
301 299
302 if (DRM_COPY_TO_USER(alloc.region_offset, &block->start, sizeof(int))) { 300 if (DRM_COPY_TO_USER(alloc->region_offset, &block->start,
301 sizeof(int))) {
303 DRM_ERROR("copy_to_user\n"); 302 DRM_ERROR("copy_to_user\n");
304 return DRM_ERR(EFAULT); 303 return -EFAULT;
305 } 304 }
306 305
307 return 0; 306 return 0;
308} 307}
309 308
310int i915_mem_free(DRM_IOCTL_ARGS) 309int i915_mem_free(struct drm_device *dev, void *data,
310 struct drm_file *file_priv)
311{ 311{
312 DRM_DEVICE;
313 drm_i915_private_t *dev_priv = dev->dev_private; 312 drm_i915_private_t *dev_priv = dev->dev_private;
314 drm_i915_mem_free_t memfree; 313 drm_i915_mem_free_t *memfree = data;
315 struct mem_block *block, **heap; 314 struct mem_block *block, **heap;
316 315
317 if (!dev_priv) { 316 if (!dev_priv) {
318 DRM_ERROR("%s called with no initialization\n", __FUNCTION__); 317 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
319 return DRM_ERR(EINVAL); 318 return -EINVAL;
320 } 319 }
321 320
322 DRM_COPY_FROM_USER_IOCTL(memfree, (drm_i915_mem_free_t __user *) data, 321 heap = get_heap(dev_priv, memfree->region);
323 sizeof(memfree));
324
325 heap = get_heap(dev_priv, memfree.region);
326 if (!heap || !*heap) 322 if (!heap || !*heap)
327 return DRM_ERR(EFAULT); 323 return -EFAULT;
328 324
329 block = find_block(*heap, memfree.region_offset); 325 block = find_block(*heap, memfree->region_offset);
330 if (!block) 326 if (!block)
331 return DRM_ERR(EFAULT); 327 return -EFAULT;
332 328
333 if (block->filp != filp) 329 if (block->file_priv != file_priv)
334 return DRM_ERR(EPERM); 330 return -EPERM;
335 331
336 mark_block(dev, block, 0); 332 mark_block(dev, block, 0);
337 free_block(block); 333 free_block(block);
338 return 0; 334 return 0;
339} 335}
340 336
341int i915_mem_init_heap(DRM_IOCTL_ARGS) 337int i915_mem_init_heap(struct drm_device *dev, void *data,
338 struct drm_file *file_priv)
342{ 339{
343 DRM_DEVICE;
344 drm_i915_private_t *dev_priv = dev->dev_private; 340 drm_i915_private_t *dev_priv = dev->dev_private;
345 drm_i915_mem_init_heap_t initheap; 341 drm_i915_mem_init_heap_t *initheap = data;
346 struct mem_block **heap; 342 struct mem_block **heap;
347 343
348 if (!dev_priv) { 344 if (!dev_priv) {
349 DRM_ERROR("%s called with no initialization\n", __FUNCTION__); 345 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
350 return DRM_ERR(EINVAL); 346 return -EINVAL;
351 } 347 }
352 348
353 DRM_COPY_FROM_USER_IOCTL(initheap, 349 heap = get_heap(dev_priv, initheap->region);
354 (drm_i915_mem_init_heap_t __user *) data,
355 sizeof(initheap));
356
357 heap = get_heap(dev_priv, initheap.region);
358 if (!heap) 350 if (!heap)
359 return DRM_ERR(EFAULT); 351 return -EFAULT;
360 352
361 if (*heap) { 353 if (*heap) {
362 DRM_ERROR("heap already initialized?"); 354 DRM_ERROR("heap already initialized?");
363 return DRM_ERR(EFAULT); 355 return -EFAULT;
364 } 356 }
365 357
366 return init_heap(heap, initheap.start, initheap.size); 358 return init_heap(heap, initheap->start, initheap->size);
367} 359}
368 360
369int i915_mem_destroy_heap( DRM_IOCTL_ARGS ) 361int i915_mem_destroy_heap( struct drm_device *dev, void *data,
362 struct drm_file *file_priv )
370{ 363{
371 DRM_DEVICE;
372 drm_i915_private_t *dev_priv = dev->dev_private; 364 drm_i915_private_t *dev_priv = dev->dev_private;
373 drm_i915_mem_destroy_heap_t destroyheap; 365 drm_i915_mem_destroy_heap_t *destroyheap = data;
374 struct mem_block **heap; 366 struct mem_block **heap;
375 367
376 if ( !dev_priv ) { 368 if ( !dev_priv ) {
377 DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ ); 369 DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
378 return DRM_ERR(EINVAL); 370 return -EINVAL;
379 } 371 }
380 372
381 DRM_COPY_FROM_USER_IOCTL( destroyheap, (drm_i915_mem_destroy_heap_t *)data, 373 heap = get_heap( dev_priv, destroyheap->region );
382 sizeof(destroyheap) );
383
384 heap = get_heap( dev_priv, destroyheap.region );
385 if (!heap) { 374 if (!heap) {
386 DRM_ERROR("get_heap failed"); 375 DRM_ERROR("get_heap failed");
387 return DRM_ERR(EFAULT); 376 return -EFAULT;
388 } 377 }
389 378
390 if (!*heap) { 379 if (!*heap) {
391 DRM_ERROR("heap not initialized?"); 380 DRM_ERROR("heap not initialized?");
392 return DRM_ERR(EFAULT); 381 return -EFAULT;
393 } 382 }
394 383
395 i915_mem_takedown( heap ); 384 i915_mem_takedown( heap );
diff --git a/drivers/char/drm/mga_dma.c b/drivers/char/drm/mga_dma.c
index 9c73a6e3861b..c567c34cda78 100644
--- a/drivers/char/drm/mga_dma.c
+++ b/drivers/char/drm/mga_dma.c
@@ -71,7 +71,7 @@ int mga_do_wait_for_idle(drm_mga_private_t * dev_priv)
71 DRM_ERROR("failed!\n"); 71 DRM_ERROR("failed!\n");
72 DRM_INFO(" status=0x%08x\n", status); 72 DRM_INFO(" status=0x%08x\n", status);
73#endif 73#endif
74 return DRM_ERR(EBUSY); 74 return -EBUSY;
75} 75}
76 76
77static int mga_do_dma_reset(drm_mga_private_t * dev_priv) 77static int mga_do_dma_reset(drm_mga_private_t * dev_priv)
@@ -256,7 +256,7 @@ static int mga_freelist_init(struct drm_device * dev, drm_mga_private_t * dev_pr
256 256
257 dev_priv->head = drm_alloc(sizeof(drm_mga_freelist_t), DRM_MEM_DRIVER); 257 dev_priv->head = drm_alloc(sizeof(drm_mga_freelist_t), DRM_MEM_DRIVER);
258 if (dev_priv->head == NULL) 258 if (dev_priv->head == NULL)
259 return DRM_ERR(ENOMEM); 259 return -ENOMEM;
260 260
261 memset(dev_priv->head, 0, sizeof(drm_mga_freelist_t)); 261 memset(dev_priv->head, 0, sizeof(drm_mga_freelist_t));
262 SET_AGE(&dev_priv->head->age, MGA_BUFFER_USED, 0); 262 SET_AGE(&dev_priv->head->age, MGA_BUFFER_USED, 0);
@@ -267,7 +267,7 @@ static int mga_freelist_init(struct drm_device * dev, drm_mga_private_t * dev_pr
267 267
268 entry = drm_alloc(sizeof(drm_mga_freelist_t), DRM_MEM_DRIVER); 268 entry = drm_alloc(sizeof(drm_mga_freelist_t), DRM_MEM_DRIVER);
269 if (entry == NULL) 269 if (entry == NULL)
270 return DRM_ERR(ENOMEM); 270 return -ENOMEM;
271 271
272 memset(entry, 0, sizeof(drm_mga_freelist_t)); 272 memset(entry, 0, sizeof(drm_mga_freelist_t));
273 273
@@ -399,7 +399,7 @@ int mga_driver_load(struct drm_device * dev, unsigned long flags)
399 399
400 dev_priv = drm_alloc(sizeof(drm_mga_private_t), DRM_MEM_DRIVER); 400 dev_priv = drm_alloc(sizeof(drm_mga_private_t), DRM_MEM_DRIVER);
401 if (!dev_priv) 401 if (!dev_priv)
402 return DRM_ERR(ENOMEM); 402 return -ENOMEM;
403 403
404 dev->dev_private = (void *)dev_priv; 404 dev->dev_private = (void *)dev_priv;
405 memset(dev_priv, 0, sizeof(drm_mga_private_t)); 405 memset(dev_priv, 0, sizeof(drm_mga_private_t));
@@ -578,7 +578,7 @@ static int mga_do_agp_dma_bootstrap(struct drm_device * dev,
578 DRM_ERROR("failed to ioremap agp regions! (%p, %p, %p)\n", 578 DRM_ERROR("failed to ioremap agp regions! (%p, %p, %p)\n",
579 dev_priv->warp->handle, dev_priv->primary->handle, 579 dev_priv->warp->handle, dev_priv->primary->handle,
580 dev->agp_buffer_map->handle); 580 dev->agp_buffer_map->handle);
581 return DRM_ERR(ENOMEM); 581 return -ENOMEM;
582 } 582 }
583 583
584 dev_priv->dma_access = MGA_PAGPXFER; 584 dev_priv->dma_access = MGA_PAGPXFER;
@@ -622,7 +622,7 @@ static int mga_do_pci_dma_bootstrap(struct drm_device * dev,
622 622
623 if (dev->dma == NULL) { 623 if (dev->dma == NULL) {
624 DRM_ERROR("dev->dma is NULL\n"); 624 DRM_ERROR("dev->dma is NULL\n");
625 return DRM_ERR(EFAULT); 625 return -EFAULT;
626 } 626 }
627 627
628 /* Make drm_addbufs happy by not trying to create a mapping for less 628 /* Make drm_addbufs happy by not trying to create a mapping for less
@@ -656,7 +656,7 @@ static int mga_do_pci_dma_bootstrap(struct drm_device * dev,
656 656
657 if (err != 0) { 657 if (err != 0) {
658 DRM_ERROR("Unable to allocate primary DMA region: %d\n", err); 658 DRM_ERROR("Unable to allocate primary DMA region: %d\n", err);
659 return DRM_ERR(ENOMEM); 659 return -ENOMEM;
660 } 660 }
661 661
662 if (dev_priv->primary->size != dma_bs->primary_size) { 662 if (dev_priv->primary->size != dma_bs->primary_size) {
@@ -759,36 +759,30 @@ static int mga_do_dma_bootstrap(struct drm_device * dev,
759 return err; 759 return err;
760} 760}
761 761
762int mga_dma_bootstrap(DRM_IOCTL_ARGS) 762int mga_dma_bootstrap(struct drm_device *dev, void *data,
763 struct drm_file *file_priv)
763{ 764{
764 DRM_DEVICE; 765 drm_mga_dma_bootstrap_t *bootstrap = data;
765 drm_mga_dma_bootstrap_t bootstrap;
766 int err; 766 int err;
767 static const int modes[] = { 0, 1, 2, 2, 4, 4, 4, 4 }; 767 static const int modes[] = { 0, 1, 2, 2, 4, 4, 4, 4 };
768 const drm_mga_private_t *const dev_priv = 768 const drm_mga_private_t *const dev_priv =
769 (drm_mga_private_t *) dev->dev_private; 769 (drm_mga_private_t *) dev->dev_private;
770 770
771 DRM_COPY_FROM_USER_IOCTL(bootstrap, 771 err = mga_do_dma_bootstrap(dev, bootstrap);
772 (drm_mga_dma_bootstrap_t __user *) data,
773 sizeof(bootstrap));
774
775 err = mga_do_dma_bootstrap(dev, &bootstrap);
776 if (err) { 772 if (err) {
777 mga_do_cleanup_dma(dev, FULL_CLEANUP); 773 mga_do_cleanup_dma(dev, FULL_CLEANUP);
778 return err; 774 return err;
779 } 775 }
780 776
781 if (dev_priv->agp_textures != NULL) { 777 if (dev_priv->agp_textures != NULL) {
782 bootstrap.texture_handle = dev_priv->agp_textures->offset; 778 bootstrap->texture_handle = dev_priv->agp_textures->offset;
783 bootstrap.texture_size = dev_priv->agp_textures->size; 779 bootstrap->texture_size = dev_priv->agp_textures->size;
784 } else { 780 } else {
785 bootstrap.texture_handle = 0; 781 bootstrap->texture_handle = 0;
786 bootstrap.texture_size = 0; 782 bootstrap->texture_size = 0;
787 } 783 }
788 784
789 bootstrap.agp_mode = modes[bootstrap.agp_mode & 0x07]; 785 bootstrap->agp_mode = modes[bootstrap->agp_mode & 0x07];
790 DRM_COPY_TO_USER_IOCTL((drm_mga_dma_bootstrap_t __user *)data,
791 bootstrap, sizeof(bootstrap));
792 786
793 return err; 787 return err;
794} 788}
@@ -826,7 +820,7 @@ static int mga_do_init_dma(struct drm_device * dev, drm_mga_init_t * init)
826 dev_priv->sarea = drm_getsarea(dev); 820 dev_priv->sarea = drm_getsarea(dev);
827 if (!dev_priv->sarea) { 821 if (!dev_priv->sarea) {
828 DRM_ERROR("failed to find sarea!\n"); 822 DRM_ERROR("failed to find sarea!\n");
829 return DRM_ERR(EINVAL); 823 return -EINVAL;
830 } 824 }
831 825
832 if (!dev_priv->used_new_dma_init) { 826 if (!dev_priv->used_new_dma_init) {
@@ -837,29 +831,29 @@ static int mga_do_init_dma(struct drm_device * dev, drm_mga_init_t * init)
837 dev_priv->status = drm_core_findmap(dev, init->status_offset); 831 dev_priv->status = drm_core_findmap(dev, init->status_offset);
838 if (!dev_priv->status) { 832 if (!dev_priv->status) {
839 DRM_ERROR("failed to find status page!\n"); 833 DRM_ERROR("failed to find status page!\n");
840 return DRM_ERR(EINVAL); 834 return -EINVAL;
841 } 835 }
842 dev_priv->mmio = drm_core_findmap(dev, init->mmio_offset); 836 dev_priv->mmio = drm_core_findmap(dev, init->mmio_offset);
843 if (!dev_priv->mmio) { 837 if (!dev_priv->mmio) {
844 DRM_ERROR("failed to find mmio region!\n"); 838 DRM_ERROR("failed to find mmio region!\n");
845 return DRM_ERR(EINVAL); 839 return -EINVAL;
846 } 840 }
847 dev_priv->warp = drm_core_findmap(dev, init->warp_offset); 841 dev_priv->warp = drm_core_findmap(dev, init->warp_offset);
848 if (!dev_priv->warp) { 842 if (!dev_priv->warp) {
849 DRM_ERROR("failed to find warp microcode region!\n"); 843 DRM_ERROR("failed to find warp microcode region!\n");
850 return DRM_ERR(EINVAL); 844 return -EINVAL;
851 } 845 }
852 dev_priv->primary = drm_core_findmap(dev, init->primary_offset); 846 dev_priv->primary = drm_core_findmap(dev, init->primary_offset);
853 if (!dev_priv->primary) { 847 if (!dev_priv->primary) {
854 DRM_ERROR("failed to find primary dma region!\n"); 848 DRM_ERROR("failed to find primary dma region!\n");
855 return DRM_ERR(EINVAL); 849 return -EINVAL;
856 } 850 }
857 dev->agp_buffer_token = init->buffers_offset; 851 dev->agp_buffer_token = init->buffers_offset;
858 dev->agp_buffer_map = 852 dev->agp_buffer_map =
859 drm_core_findmap(dev, init->buffers_offset); 853 drm_core_findmap(dev, init->buffers_offset);
860 if (!dev->agp_buffer_map) { 854 if (!dev->agp_buffer_map) {
861 DRM_ERROR("failed to find dma buffer region!\n"); 855 DRM_ERROR("failed to find dma buffer region!\n");
862 return DRM_ERR(EINVAL); 856 return -EINVAL;
863 } 857 }
864 858
865 drm_core_ioremap(dev_priv->warp, dev); 859 drm_core_ioremap(dev_priv->warp, dev);
@@ -877,7 +871,7 @@ static int mga_do_init_dma(struct drm_device * dev, drm_mga_init_t * init)
877 ((dev->agp_buffer_map == NULL) || 871 ((dev->agp_buffer_map == NULL) ||
878 (dev->agp_buffer_map->handle == NULL)))) { 872 (dev->agp_buffer_map->handle == NULL)))) {
879 DRM_ERROR("failed to ioremap agp regions!\n"); 873 DRM_ERROR("failed to ioremap agp regions!\n");
880 return DRM_ERR(ENOMEM); 874 return -ENOMEM;
881 } 875 }
882 876
883 ret = mga_warp_install_microcode(dev_priv); 877 ret = mga_warp_install_microcode(dev_priv);
@@ -927,7 +921,7 @@ static int mga_do_init_dma(struct drm_device * dev, drm_mga_init_t * init)
927 921
928 if (mga_freelist_init(dev, dev_priv) < 0) { 922 if (mga_freelist_init(dev, dev_priv) < 0) {
929 DRM_ERROR("could not initialize freelist\n"); 923 DRM_ERROR("could not initialize freelist\n");
930 return DRM_ERR(ENOMEM); 924 return -ENOMEM;
931 } 925 }
932 926
933 return 0; 927 return 0;
@@ -1007,20 +1001,17 @@ static int mga_do_cleanup_dma(struct drm_device *dev, int full_cleanup)
1007 return 0; 1001 return 0;
1008} 1002}
1009 1003
1010int mga_dma_init(DRM_IOCTL_ARGS) 1004int mga_dma_init(struct drm_device *dev, void *data,
1005 struct drm_file *file_priv)
1011{ 1006{
1012 DRM_DEVICE; 1007 drm_mga_init_t *init = data;
1013 drm_mga_init_t init;
1014 int err; 1008 int err;
1015 1009
1016 LOCK_TEST_WITH_RETURN(dev, filp); 1010 LOCK_TEST_WITH_RETURN(dev, file_priv);
1017
1018 DRM_COPY_FROM_USER_IOCTL(init, (drm_mga_init_t __user *) data,
1019 sizeof(init));
1020 1011
1021 switch (init.func) { 1012 switch (init->func) {
1022 case MGA_INIT_DMA: 1013 case MGA_INIT_DMA:
1023 err = mga_do_init_dma(dev, &init); 1014 err = mga_do_init_dma(dev, init);
1024 if (err) { 1015 if (err) {
1025 (void)mga_do_cleanup_dma(dev, FULL_CLEANUP); 1016 (void)mga_do_cleanup_dma(dev, FULL_CLEANUP);
1026 } 1017 }
@@ -1029,36 +1020,33 @@ int mga_dma_init(DRM_IOCTL_ARGS)
1029 return mga_do_cleanup_dma(dev, FULL_CLEANUP); 1020 return mga_do_cleanup_dma(dev, FULL_CLEANUP);
1030 } 1021 }
1031 1022
1032 return DRM_ERR(EINVAL); 1023 return -EINVAL;
1033} 1024}
1034 1025
1035/* ================================================================ 1026/* ================================================================
1036 * Primary DMA stream management 1027 * Primary DMA stream management
1037 */ 1028 */
1038 1029
1039int mga_dma_flush(DRM_IOCTL_ARGS) 1030int mga_dma_flush(struct drm_device *dev, void *data,
1031 struct drm_file *file_priv)
1040{ 1032{
1041 DRM_DEVICE;
1042 drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private; 1033 drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
1043 struct drm_lock lock; 1034 struct drm_lock *lock = data;
1044
1045 LOCK_TEST_WITH_RETURN(dev, filp);
1046 1035
1047 DRM_COPY_FROM_USER_IOCTL(lock, (struct drm_lock __user *) data, 1036 LOCK_TEST_WITH_RETURN(dev, file_priv);
1048 sizeof(lock));
1049 1037
1050 DRM_DEBUG("%s%s%s\n", 1038 DRM_DEBUG("%s%s%s\n",
1051 (lock.flags & _DRM_LOCK_FLUSH) ? "flush, " : "", 1039 (lock->flags & _DRM_LOCK_FLUSH) ? "flush, " : "",
1052 (lock.flags & _DRM_LOCK_FLUSH_ALL) ? "flush all, " : "", 1040 (lock->flags & _DRM_LOCK_FLUSH_ALL) ? "flush all, " : "",
1053 (lock.flags & _DRM_LOCK_QUIESCENT) ? "idle, " : ""); 1041 (lock->flags & _DRM_LOCK_QUIESCENT) ? "idle, " : "");
1054 1042
1055 WRAP_WAIT_WITH_RETURN(dev_priv); 1043 WRAP_WAIT_WITH_RETURN(dev_priv);
1056 1044
1057 if (lock.flags & (_DRM_LOCK_FLUSH | _DRM_LOCK_FLUSH_ALL)) { 1045 if (lock->flags & (_DRM_LOCK_FLUSH | _DRM_LOCK_FLUSH_ALL)) {
1058 mga_do_dma_flush(dev_priv); 1046 mga_do_dma_flush(dev_priv);
1059 } 1047 }
1060 1048
1061 if (lock.flags & _DRM_LOCK_QUIESCENT) { 1049 if (lock->flags & _DRM_LOCK_QUIESCENT) {
1062#if MGA_DMA_DEBUG 1050#if MGA_DMA_DEBUG
1063 int ret = mga_do_wait_for_idle(dev_priv); 1051 int ret = mga_do_wait_for_idle(dev_priv);
1064 if (ret < 0) 1052 if (ret < 0)
@@ -1072,12 +1060,12 @@ int mga_dma_flush(DRM_IOCTL_ARGS)
1072 } 1060 }
1073} 1061}
1074 1062
1075int mga_dma_reset(DRM_IOCTL_ARGS) 1063int mga_dma_reset(struct drm_device *dev, void *data,
1064 struct drm_file *file_priv)
1076{ 1065{
1077 DRM_DEVICE;
1078 drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private; 1066 drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
1079 1067
1080 LOCK_TEST_WITH_RETURN(dev, filp); 1068 LOCK_TEST_WITH_RETURN(dev, file_priv);
1081 1069
1082 return mga_do_dma_reset(dev_priv); 1070 return mga_do_dma_reset(dev_priv);
1083} 1071}
@@ -1086,7 +1074,8 @@ int mga_dma_reset(DRM_IOCTL_ARGS)
1086 * DMA buffer management 1074 * DMA buffer management
1087 */ 1075 */
1088 1076
1089static int mga_dma_get_buffers(DRMFILE filp, struct drm_device * dev, struct drm_dma * d) 1077static int mga_dma_get_buffers(struct drm_device * dev,
1078 struct drm_file *file_priv, struct drm_dma * d)
1090{ 1079{
1091 struct drm_buf *buf; 1080 struct drm_buf *buf;
1092 int i; 1081 int i;
@@ -1094,61 +1083,56 @@ static int mga_dma_get_buffers(DRMFILE filp, struct drm_device * dev, struct drm
1094 for (i = d->granted_count; i < d->request_count; i++) { 1083 for (i = d->granted_count; i < d->request_count; i++) {
1095 buf = mga_freelist_get(dev); 1084 buf = mga_freelist_get(dev);
1096 if (!buf) 1085 if (!buf)
1097 return DRM_ERR(EAGAIN); 1086 return -EAGAIN;
1098 1087
1099 buf->filp = filp; 1088 buf->file_priv = file_priv;
1100 1089
1101 if (DRM_COPY_TO_USER(&d->request_indices[i], 1090 if (DRM_COPY_TO_USER(&d->request_indices[i],
1102 &buf->idx, sizeof(buf->idx))) 1091 &buf->idx, sizeof(buf->idx)))
1103 return DRM_ERR(EFAULT); 1092 return -EFAULT;
1104 if (DRM_COPY_TO_USER(&d->request_sizes[i], 1093 if (DRM_COPY_TO_USER(&d->request_sizes[i],
1105 &buf->total, sizeof(buf->total))) 1094 &buf->total, sizeof(buf->total)))
1106 return DRM_ERR(EFAULT); 1095 return -EFAULT;
1107 1096
1108 d->granted_count++; 1097 d->granted_count++;
1109 } 1098 }
1110 return 0; 1099 return 0;
1111} 1100}
1112 1101
1113int mga_dma_buffers(DRM_IOCTL_ARGS) 1102int mga_dma_buffers(struct drm_device *dev, void *data,
1103 struct drm_file *file_priv)
1114{ 1104{
1115 DRM_DEVICE;
1116 struct drm_device_dma *dma = dev->dma; 1105 struct drm_device_dma *dma = dev->dma;
1117 drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private; 1106 drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
1118 struct drm_dma __user *argp = (void __user *)data; 1107 struct drm_dma *d = data;
1119 struct drm_dma d;
1120 int ret = 0; 1108 int ret = 0;
1121 1109
1122 LOCK_TEST_WITH_RETURN(dev, filp); 1110 LOCK_TEST_WITH_RETURN(dev, file_priv);
1123
1124 DRM_COPY_FROM_USER_IOCTL(d, argp, sizeof(d));
1125 1111
1126 /* Please don't send us buffers. 1112 /* Please don't send us buffers.
1127 */ 1113 */
1128 if (d.send_count != 0) { 1114 if (d->send_count != 0) {
1129 DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n", 1115 DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n",
1130 DRM_CURRENTPID, d.send_count); 1116 DRM_CURRENTPID, d->send_count);
1131 return DRM_ERR(EINVAL); 1117 return -EINVAL;
1132 } 1118 }
1133 1119
1134 /* We'll send you buffers. 1120 /* We'll send you buffers.
1135 */ 1121 */
1136 if (d.request_count < 0 || d.request_count > dma->buf_count) { 1122 if (d->request_count < 0 || d->request_count > dma->buf_count) {
1137 DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n", 1123 DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
1138 DRM_CURRENTPID, d.request_count, dma->buf_count); 1124 DRM_CURRENTPID, d->request_count, dma->buf_count);
1139 return DRM_ERR(EINVAL); 1125 return -EINVAL;
1140 } 1126 }
1141 1127
1142 WRAP_TEST_WITH_RETURN(dev_priv); 1128 WRAP_TEST_WITH_RETURN(dev_priv);
1143 1129
1144 d.granted_count = 0; 1130 d->granted_count = 0;
1145 1131
1146 if (d.request_count) { 1132 if (d->request_count) {
1147 ret = mga_dma_get_buffers(filp, dev, &d); 1133 ret = mga_dma_get_buffers(dev, file_priv, d);
1148 } 1134 }
1149 1135
1150 DRM_COPY_TO_USER_IOCTL(argp, d, sizeof(d));
1151
1152 return ret; 1136 return ret;
1153} 1137}
1154 1138
diff --git a/drivers/char/drm/mga_drv.h b/drivers/char/drm/mga_drv.h
index 49253affa475..cd94c04e31c0 100644
--- a/drivers/char/drm/mga_drv.h
+++ b/drivers/char/drm/mga_drv.h
@@ -148,15 +148,20 @@ typedef struct drm_mga_private {
148 unsigned int agp_size; 148 unsigned int agp_size;
149} drm_mga_private_t; 149} drm_mga_private_t;
150 150
151extern drm_ioctl_desc_t mga_ioctls[]; 151extern struct drm_ioctl_desc mga_ioctls[];
152extern int mga_max_ioctl; 152extern int mga_max_ioctl;
153 153
154 /* mga_dma.c */ 154 /* mga_dma.c */
155extern int mga_dma_bootstrap(DRM_IOCTL_ARGS); 155extern int mga_dma_bootstrap(struct drm_device *dev, void *data,
156extern int mga_dma_init(DRM_IOCTL_ARGS); 156 struct drm_file *file_priv);
157extern int mga_dma_flush(DRM_IOCTL_ARGS); 157extern int mga_dma_init(struct drm_device *dev, void *data,
158extern int mga_dma_reset(DRM_IOCTL_ARGS); 158 struct drm_file *file_priv);
159extern int mga_dma_buffers(DRM_IOCTL_ARGS); 159extern int mga_dma_flush(struct drm_device *dev, void *data,
160 struct drm_file *file_priv);
161extern int mga_dma_reset(struct drm_device *dev, void *data,
162 struct drm_file *file_priv);
163extern int mga_dma_buffers(struct drm_device *dev, void *data,
164 struct drm_file *file_priv);
160extern int mga_driver_load(struct drm_device *dev, unsigned long flags); 165extern int mga_driver_load(struct drm_device *dev, unsigned long flags);
161extern int mga_driver_unload(struct drm_device * dev); 166extern int mga_driver_unload(struct drm_device * dev);
162extern void mga_driver_lastclose(struct drm_device * dev); 167extern void mga_driver_lastclose(struct drm_device * dev);
@@ -245,7 +250,7 @@ do { \
245 dev_priv->prim.high_mark ) { \ 250 dev_priv->prim.high_mark ) { \
246 if ( MGA_DMA_DEBUG ) \ 251 if ( MGA_DMA_DEBUG ) \
247 DRM_INFO( "%s: wrap...\n", __FUNCTION__ ); \ 252 DRM_INFO( "%s: wrap...\n", __FUNCTION__ ); \
248 return DRM_ERR(EBUSY); \ 253 return -EBUSY; \
249 } \ 254 } \
250 } \ 255 } \
251} while (0) 256} while (0)
@@ -256,7 +261,7 @@ do { \
256 if ( mga_do_wait_for_idle( dev_priv ) < 0 ) { \ 261 if ( mga_do_wait_for_idle( dev_priv ) < 0 ) { \
257 if ( MGA_DMA_DEBUG ) \ 262 if ( MGA_DMA_DEBUG ) \
258 DRM_INFO( "%s: wrap...\n", __FUNCTION__ ); \ 263 DRM_INFO( "%s: wrap...\n", __FUNCTION__ ); \
259 return DRM_ERR(EBUSY); \ 264 return -EBUSY; \
260 } \ 265 } \
261 mga_do_dma_wrap_end( dev_priv ); \ 266 mga_do_dma_wrap_end( dev_priv ); \
262 } \ 267 } \
diff --git a/drivers/char/drm/mga_state.c b/drivers/char/drm/mga_state.c
index d448b0aef33c..5ec8b61c5d45 100644
--- a/drivers/char/drm/mga_state.c
+++ b/drivers/char/drm/mga_state.c
@@ -392,7 +392,7 @@ static int mga_verify_context(drm_mga_private_t * dev_priv)
392 ctx->dstorg, dev_priv->front_offset, 392 ctx->dstorg, dev_priv->front_offset,
393 dev_priv->back_offset); 393 dev_priv->back_offset);
394 ctx->dstorg = 0; 394 ctx->dstorg = 0;
395 return DRM_ERR(EINVAL); 395 return -EINVAL;
396 } 396 }
397 397
398 return 0; 398 return 0;
@@ -411,7 +411,7 @@ static int mga_verify_tex(drm_mga_private_t * dev_priv, int unit)
411 if (org == (MGA_TEXORGMAP_SYSMEM | MGA_TEXORGACC_PCI)) { 411 if (org == (MGA_TEXORGMAP_SYSMEM | MGA_TEXORGACC_PCI)) {
412 DRM_ERROR("*** bad TEXORG: 0x%x, unit %d\n", tex->texorg, unit); 412 DRM_ERROR("*** bad TEXORG: 0x%x, unit %d\n", tex->texorg, unit);
413 tex->texorg = 0; 413 tex->texorg = 0;
414 return DRM_ERR(EINVAL); 414 return -EINVAL;
415 } 415 }
416 416
417 return 0; 417 return 0;
@@ -453,13 +453,13 @@ static int mga_verify_iload(drm_mga_private_t * dev_priv,
453 dstorg + length > (dev_priv->texture_offset + 453 dstorg + length > (dev_priv->texture_offset +
454 dev_priv->texture_size)) { 454 dev_priv->texture_size)) {
455 DRM_ERROR("*** bad iload DSTORG: 0x%x\n", dstorg); 455 DRM_ERROR("*** bad iload DSTORG: 0x%x\n", dstorg);
456 return DRM_ERR(EINVAL); 456 return -EINVAL;
457 } 457 }
458 458
459 if (length & MGA_ILOAD_MASK) { 459 if (length & MGA_ILOAD_MASK) {
460 DRM_ERROR("*** bad iload length: 0x%x\n", 460 DRM_ERROR("*** bad iload length: 0x%x\n",
461 length & MGA_ILOAD_MASK); 461 length & MGA_ILOAD_MASK);
462 return DRM_ERR(EINVAL); 462 return -EINVAL;
463 } 463 }
464 464
465 return 0; 465 return 0;
@@ -471,7 +471,7 @@ static int mga_verify_blit(drm_mga_private_t * dev_priv,
471 if ((srcorg & 0x3) == (MGA_SRCACC_PCI | MGA_SRCMAP_SYSMEM) || 471 if ((srcorg & 0x3) == (MGA_SRCACC_PCI | MGA_SRCMAP_SYSMEM) ||
472 (dstorg & 0x3) == (MGA_SRCACC_PCI | MGA_SRCMAP_SYSMEM)) { 472 (dstorg & 0x3) == (MGA_SRCACC_PCI | MGA_SRCMAP_SYSMEM)) {
473 DRM_ERROR("*** bad blit: src=0x%x dst=0x%x\n", srcorg, dstorg); 473 DRM_ERROR("*** bad blit: src=0x%x dst=0x%x\n", srcorg, dstorg);
474 return DRM_ERR(EINVAL); 474 return -EINVAL;
475 } 475 }
476 return 0; 476 return 0;
477} 477}
@@ -828,24 +828,20 @@ static void mga_dma_dispatch_blit(struct drm_device * dev, drm_mga_blit_t * blit
828 * 828 *
829 */ 829 */
830 830
831static int mga_dma_clear(DRM_IOCTL_ARGS) 831static int mga_dma_clear(struct drm_device *dev, void *data, struct drm_file *file_priv)
832{ 832{
833 DRM_DEVICE;
834 drm_mga_private_t *dev_priv = dev->dev_private; 833 drm_mga_private_t *dev_priv = dev->dev_private;
835 drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; 834 drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
836 drm_mga_clear_t clear; 835 drm_mga_clear_t *clear = data;
837 836
838 LOCK_TEST_WITH_RETURN(dev, filp); 837 LOCK_TEST_WITH_RETURN(dev, file_priv);
839
840 DRM_COPY_FROM_USER_IOCTL(clear, (drm_mga_clear_t __user *) data,
841 sizeof(clear));
842 838
843 if (sarea_priv->nbox > MGA_NR_SAREA_CLIPRECTS) 839 if (sarea_priv->nbox > MGA_NR_SAREA_CLIPRECTS)
844 sarea_priv->nbox = MGA_NR_SAREA_CLIPRECTS; 840 sarea_priv->nbox = MGA_NR_SAREA_CLIPRECTS;
845 841
846 WRAP_TEST_WITH_RETURN(dev_priv); 842 WRAP_TEST_WITH_RETURN(dev_priv);
847 843
848 mga_dma_dispatch_clear(dev, &clear); 844 mga_dma_dispatch_clear(dev, clear);
849 845
850 /* Make sure we restore the 3D state next time. 846 /* Make sure we restore the 3D state next time.
851 */ 847 */
@@ -854,13 +850,12 @@ static int mga_dma_clear(DRM_IOCTL_ARGS)
854 return 0; 850 return 0;
855} 851}
856 852
857static int mga_dma_swap(DRM_IOCTL_ARGS) 853static int mga_dma_swap(struct drm_device *dev, void *data, struct drm_file *file_priv)
858{ 854{
859 DRM_DEVICE;
860 drm_mga_private_t *dev_priv = dev->dev_private; 855 drm_mga_private_t *dev_priv = dev->dev_private;
861 drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; 856 drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
862 857
863 LOCK_TEST_WITH_RETURN(dev, filp); 858 LOCK_TEST_WITH_RETURN(dev, file_priv);
864 859
865 if (sarea_priv->nbox > MGA_NR_SAREA_CLIPRECTS) 860 if (sarea_priv->nbox > MGA_NR_SAREA_CLIPRECTS)
866 sarea_priv->nbox = MGA_NR_SAREA_CLIPRECTS; 861 sarea_priv->nbox = MGA_NR_SAREA_CLIPRECTS;
@@ -876,37 +871,32 @@ static int mga_dma_swap(DRM_IOCTL_ARGS)
876 return 0; 871 return 0;
877} 872}
878 873
879static int mga_dma_vertex(DRM_IOCTL_ARGS) 874static int mga_dma_vertex(struct drm_device *dev, void *data, struct drm_file *file_priv)
880{ 875{
881 DRM_DEVICE;
882 drm_mga_private_t *dev_priv = dev->dev_private; 876 drm_mga_private_t *dev_priv = dev->dev_private;
883 struct drm_device_dma *dma = dev->dma; 877 struct drm_device_dma *dma = dev->dma;
884 struct drm_buf *buf; 878 struct drm_buf *buf;
885 drm_mga_buf_priv_t *buf_priv; 879 drm_mga_buf_priv_t *buf_priv;
886 drm_mga_vertex_t vertex; 880 drm_mga_vertex_t *vertex = data;
887
888 LOCK_TEST_WITH_RETURN(dev, filp);
889 881
890 DRM_COPY_FROM_USER_IOCTL(vertex, 882 LOCK_TEST_WITH_RETURN(dev, file_priv);
891 (drm_mga_vertex_t __user *) data,
892 sizeof(vertex));
893 883
894 if (vertex.idx < 0 || vertex.idx > dma->buf_count) 884 if (vertex->idx < 0 || vertex->idx > dma->buf_count)
895 return DRM_ERR(EINVAL); 885 return -EINVAL;
896 buf = dma->buflist[vertex.idx]; 886 buf = dma->buflist[vertex->idx];
897 buf_priv = buf->dev_private; 887 buf_priv = buf->dev_private;
898 888
899 buf->used = vertex.used; 889 buf->used = vertex->used;
900 buf_priv->discard = vertex.discard; 890 buf_priv->discard = vertex->discard;
901 891
902 if (!mga_verify_state(dev_priv)) { 892 if (!mga_verify_state(dev_priv)) {
903 if (vertex.discard) { 893 if (vertex->discard) {
904 if (buf_priv->dispatched == 1) 894 if (buf_priv->dispatched == 1)
905 AGE_BUFFER(buf_priv); 895 AGE_BUFFER(buf_priv);
906 buf_priv->dispatched = 0; 896 buf_priv->dispatched = 0;
907 mga_freelist_put(dev, buf); 897 mga_freelist_put(dev, buf);
908 } 898 }
909 return DRM_ERR(EINVAL); 899 return -EINVAL;
910 } 900 }
911 901
912 WRAP_TEST_WITH_RETURN(dev_priv); 902 WRAP_TEST_WITH_RETURN(dev_priv);
@@ -916,82 +906,73 @@ static int mga_dma_vertex(DRM_IOCTL_ARGS)
916 return 0; 906 return 0;
917} 907}
918 908
919static int mga_dma_indices(DRM_IOCTL_ARGS) 909static int mga_dma_indices(struct drm_device *dev, void *data, struct drm_file *file_priv)
920{ 910{
921 DRM_DEVICE;
922 drm_mga_private_t *dev_priv = dev->dev_private; 911 drm_mga_private_t *dev_priv = dev->dev_private;
923 struct drm_device_dma *dma = dev->dma; 912 struct drm_device_dma *dma = dev->dma;
924 struct drm_buf *buf; 913 struct drm_buf *buf;
925 drm_mga_buf_priv_t *buf_priv; 914 drm_mga_buf_priv_t *buf_priv;
926 drm_mga_indices_t indices; 915 drm_mga_indices_t *indices = data;
927 916
928 LOCK_TEST_WITH_RETURN(dev, filp); 917 LOCK_TEST_WITH_RETURN(dev, file_priv);
929 918
930 DRM_COPY_FROM_USER_IOCTL(indices, 919 if (indices->idx < 0 || indices->idx > dma->buf_count)
931 (drm_mga_indices_t __user *) data, 920 return -EINVAL;
932 sizeof(indices));
933 921
934 if (indices.idx < 0 || indices.idx > dma->buf_count) 922 buf = dma->buflist[indices->idx];
935 return DRM_ERR(EINVAL);
936
937 buf = dma->buflist[indices.idx];
938 buf_priv = buf->dev_private; 923 buf_priv = buf->dev_private;
939 924
940 buf_priv->discard = indices.discard; 925 buf_priv->discard = indices->discard;
941 926
942 if (!mga_verify_state(dev_priv)) { 927 if (!mga_verify_state(dev_priv)) {
943 if (indices.discard) { 928 if (indices->discard) {
944 if (buf_priv->dispatched == 1) 929 if (buf_priv->dispatched == 1)
945 AGE_BUFFER(buf_priv); 930 AGE_BUFFER(buf_priv);
946 buf_priv->dispatched = 0; 931 buf_priv->dispatched = 0;
947 mga_freelist_put(dev, buf); 932 mga_freelist_put(dev, buf);
948 } 933 }
949 return DRM_ERR(EINVAL); 934 return -EINVAL;
950 } 935 }
951 936
952 WRAP_TEST_WITH_RETURN(dev_priv); 937 WRAP_TEST_WITH_RETURN(dev_priv);
953 938
954 mga_dma_dispatch_indices(dev, buf, indices.start, indices.end); 939 mga_dma_dispatch_indices(dev, buf, indices->start, indices->end);
955 940
956 return 0; 941 return 0;
957} 942}
958 943
959static int mga_dma_iload(DRM_IOCTL_ARGS) 944static int mga_dma_iload(struct drm_device *dev, void *data, struct drm_file *file_priv)
960{ 945{
961 DRM_DEVICE;
962 struct drm_device_dma *dma = dev->dma; 946 struct drm_device_dma *dma = dev->dma;
963 drm_mga_private_t *dev_priv = dev->dev_private; 947 drm_mga_private_t *dev_priv = dev->dev_private;
964 struct drm_buf *buf; 948 struct drm_buf *buf;
965 drm_mga_buf_priv_t *buf_priv; 949 drm_mga_buf_priv_t *buf_priv;
966 drm_mga_iload_t iload; 950 drm_mga_iload_t *iload = data;
967 DRM_DEBUG("\n"); 951 DRM_DEBUG("\n");
968 952
969 LOCK_TEST_WITH_RETURN(dev, filp); 953 LOCK_TEST_WITH_RETURN(dev, file_priv);
970
971 DRM_COPY_FROM_USER_IOCTL(iload, (drm_mga_iload_t __user *) data,
972 sizeof(iload));
973 954
974#if 0 955#if 0
975 if (mga_do_wait_for_idle(dev_priv) < 0) { 956 if (mga_do_wait_for_idle(dev_priv) < 0) {
976 if (MGA_DMA_DEBUG) 957 if (MGA_DMA_DEBUG)
977 DRM_INFO("%s: -EBUSY\n", __FUNCTION__); 958 DRM_INFO("%s: -EBUSY\n", __FUNCTION__);
978 return DRM_ERR(EBUSY); 959 return -EBUSY;
979 } 960 }
980#endif 961#endif
981 if (iload.idx < 0 || iload.idx > dma->buf_count) 962 if (iload->idx < 0 || iload->idx > dma->buf_count)
982 return DRM_ERR(EINVAL); 963 return -EINVAL;
983 964
984 buf = dma->buflist[iload.idx]; 965 buf = dma->buflist[iload->idx];
985 buf_priv = buf->dev_private; 966 buf_priv = buf->dev_private;
986 967
987 if (mga_verify_iload(dev_priv, iload.dstorg, iload.length)) { 968 if (mga_verify_iload(dev_priv, iload->dstorg, iload->length)) {
988 mga_freelist_put(dev, buf); 969 mga_freelist_put(dev, buf);
989 return DRM_ERR(EINVAL); 970 return -EINVAL;
990 } 971 }
991 972
992 WRAP_TEST_WITH_RETURN(dev_priv); 973 WRAP_TEST_WITH_RETURN(dev_priv);
993 974
994 mga_dma_dispatch_iload(dev, buf, iload.dstorg, iload.length); 975 mga_dma_dispatch_iload(dev, buf, iload->dstorg, iload->length);
995 976
996 /* Make sure we restore the 3D state next time. 977 /* Make sure we restore the 3D state next time.
997 */ 978 */
@@ -1000,28 +981,24 @@ static int mga_dma_iload(DRM_IOCTL_ARGS)
1000 return 0; 981 return 0;
1001} 982}
1002 983
1003static int mga_dma_blit(DRM_IOCTL_ARGS) 984static int mga_dma_blit(struct drm_device *dev, void *data, struct drm_file *file_priv)
1004{ 985{
1005 DRM_DEVICE;
1006 drm_mga_private_t *dev_priv = dev->dev_private; 986 drm_mga_private_t *dev_priv = dev->dev_private;
1007 drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; 987 drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
1008 drm_mga_blit_t blit; 988 drm_mga_blit_t *blit = data;
1009 DRM_DEBUG("\n"); 989 DRM_DEBUG("\n");
1010 990
1011 LOCK_TEST_WITH_RETURN(dev, filp); 991 LOCK_TEST_WITH_RETURN(dev, file_priv);
1012
1013 DRM_COPY_FROM_USER_IOCTL(blit, (drm_mga_blit_t __user *) data,
1014 sizeof(blit));
1015 992
1016 if (sarea_priv->nbox > MGA_NR_SAREA_CLIPRECTS) 993 if (sarea_priv->nbox > MGA_NR_SAREA_CLIPRECTS)
1017 sarea_priv->nbox = MGA_NR_SAREA_CLIPRECTS; 994 sarea_priv->nbox = MGA_NR_SAREA_CLIPRECTS;
1018 995
1019 if (mga_verify_blit(dev_priv, blit.srcorg, blit.dstorg)) 996 if (mga_verify_blit(dev_priv, blit->srcorg, blit->dstorg))
1020 return DRM_ERR(EINVAL); 997 return -EINVAL;
1021 998
1022 WRAP_TEST_WITH_RETURN(dev_priv); 999 WRAP_TEST_WITH_RETURN(dev_priv);
1023 1000
1024 mga_dma_dispatch_blit(dev, &blit); 1001 mga_dma_dispatch_blit(dev, blit);
1025 1002
1026 /* Make sure we restore the 3D state next time. 1003 /* Make sure we restore the 3D state next time.
1027 */ 1004 */
@@ -1030,24 +1007,20 @@ static int mga_dma_blit(DRM_IOCTL_ARGS)
1030 return 0; 1007 return 0;
1031} 1008}
1032 1009
1033static int mga_getparam(DRM_IOCTL_ARGS) 1010static int mga_getparam(struct drm_device *dev, void *data, struct drm_file *file_priv)
1034{ 1011{
1035 DRM_DEVICE;
1036 drm_mga_private_t *dev_priv = dev->dev_private; 1012 drm_mga_private_t *dev_priv = dev->dev_private;
1037 drm_mga_getparam_t param; 1013 drm_mga_getparam_t *param = data;
1038 int value; 1014 int value;
1039 1015
1040 if (!dev_priv) { 1016 if (!dev_priv) {
1041 DRM_ERROR("%s called with no initialization\n", __FUNCTION__); 1017 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
1042 return DRM_ERR(EINVAL); 1018 return -EINVAL;
1043 } 1019 }
1044 1020
1045 DRM_COPY_FROM_USER_IOCTL(param, (drm_mga_getparam_t __user *) data,
1046 sizeof(param));
1047
1048 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID); 1021 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
1049 1022
1050 switch (param.param) { 1023 switch (param->param) {
1051 case MGA_PARAM_IRQ_NR: 1024 case MGA_PARAM_IRQ_NR:
1052 value = dev->irq; 1025 value = dev->irq;
1053 break; 1026 break;
@@ -1055,36 +1028,35 @@ static int mga_getparam(DRM_IOCTL_ARGS)
1055 value = dev_priv->chipset; 1028 value = dev_priv->chipset;
1056 break; 1029 break;
1057 default: 1030 default:
1058 return DRM_ERR(EINVAL); 1031 return -EINVAL;
1059 } 1032 }
1060 1033
1061 if (DRM_COPY_TO_USER(param.value, &value, sizeof(int))) { 1034 if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) {
1062 DRM_ERROR("copy_to_user\n"); 1035 DRM_ERROR("copy_to_user\n");
1063 return DRM_ERR(EFAULT); 1036 return -EFAULT;
1064 } 1037 }
1065 1038
1066 return 0; 1039 return 0;
1067} 1040}
1068 1041
1069static int mga_set_fence(DRM_IOCTL_ARGS) 1042static int mga_set_fence(struct drm_device *dev, void *data, struct drm_file *file_priv)
1070{ 1043{
1071 DRM_DEVICE;
1072 drm_mga_private_t *dev_priv = dev->dev_private; 1044 drm_mga_private_t *dev_priv = dev->dev_private;
1073 u32 temp; 1045 u32 *fence = data;
1074 DMA_LOCALS; 1046 DMA_LOCALS;
1075 1047
1076 if (!dev_priv) { 1048 if (!dev_priv) {
1077 DRM_ERROR("%s called with no initialization\n", __FUNCTION__); 1049 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
1078 return DRM_ERR(EINVAL); 1050 return -EINVAL;
1079 } 1051 }
1080 1052
1081 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID); 1053 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
1082 1054
1083 /* I would normal do this assignment in the declaration of temp, 1055 /* I would normal do this assignment in the declaration of fence,
1084 * but dev_priv may be NULL. 1056 * but dev_priv may be NULL.
1085 */ 1057 */
1086 1058
1087 temp = dev_priv->next_fence_to_post; 1059 *fence = dev_priv->next_fence_to_post;
1088 dev_priv->next_fence_to_post++; 1060 dev_priv->next_fence_to_post++;
1089 1061
1090 BEGIN_DMA(1); 1062 BEGIN_DMA(1);
@@ -1093,53 +1065,40 @@ static int mga_set_fence(DRM_IOCTL_ARGS)
1093 MGA_DMAPAD, 0x00000000, MGA_SOFTRAP, 0x00000000); 1065 MGA_DMAPAD, 0x00000000, MGA_SOFTRAP, 0x00000000);
1094 ADVANCE_DMA(); 1066 ADVANCE_DMA();
1095 1067
1096 if (DRM_COPY_TO_USER((u32 __user *) data, &temp, sizeof(u32))) {
1097 DRM_ERROR("copy_to_user\n");
1098 return DRM_ERR(EFAULT);
1099 }
1100
1101 return 0; 1068 return 0;
1102} 1069}
1103 1070
1104static int mga_wait_fence(DRM_IOCTL_ARGS) 1071static int mga_wait_fence(struct drm_device *dev, void *data, struct drm_file *
1072file_priv)
1105{ 1073{
1106 DRM_DEVICE;
1107 drm_mga_private_t *dev_priv = dev->dev_private; 1074 drm_mga_private_t *dev_priv = dev->dev_private;
1108 u32 fence; 1075 u32 *fence = data;
1109 1076
1110 if (!dev_priv) { 1077 if (!dev_priv) {
1111 DRM_ERROR("%s called with no initialization\n", __FUNCTION__); 1078 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
1112 return DRM_ERR(EINVAL); 1079 return -EINVAL;
1113 } 1080 }
1114 1081
1115 DRM_COPY_FROM_USER_IOCTL(fence, (u32 __user *) data, sizeof(u32));
1116
1117 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID); 1082 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
1118 1083
1119 mga_driver_fence_wait(dev, &fence); 1084 mga_driver_fence_wait(dev, fence);
1120
1121 if (DRM_COPY_TO_USER((u32 __user *) data, &fence, sizeof(u32))) {
1122 DRM_ERROR("copy_to_user\n");
1123 return DRM_ERR(EFAULT);
1124 }
1125
1126 return 0; 1085 return 0;
1127} 1086}
1128 1087
1129drm_ioctl_desc_t mga_ioctls[] = { 1088struct drm_ioctl_desc mga_ioctls[] = {
1130 [DRM_IOCTL_NR(DRM_MGA_INIT)] = {mga_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 1089 DRM_IOCTL_DEF(DRM_MGA_INIT, mga_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1131 [DRM_IOCTL_NR(DRM_MGA_FLUSH)] = {mga_dma_flush, DRM_AUTH}, 1090 DRM_IOCTL_DEF(DRM_MGA_FLUSH, mga_dma_flush, DRM_AUTH),
1132 [DRM_IOCTL_NR(DRM_MGA_RESET)] = {mga_dma_reset, DRM_AUTH}, 1091 DRM_IOCTL_DEF(DRM_MGA_RESET, mga_dma_reset, DRM_AUTH),
1133 [DRM_IOCTL_NR(DRM_MGA_SWAP)] = {mga_dma_swap, DRM_AUTH}, 1092 DRM_IOCTL_DEF(DRM_MGA_SWAP, mga_dma_swap, DRM_AUTH),
1134 [DRM_IOCTL_NR(DRM_MGA_CLEAR)] = {mga_dma_clear, DRM_AUTH}, 1093 DRM_IOCTL_DEF(DRM_MGA_CLEAR, mga_dma_clear, DRM_AUTH),
1135 [DRM_IOCTL_NR(DRM_MGA_VERTEX)] = {mga_dma_vertex, DRM_AUTH}, 1094 DRM_IOCTL_DEF(DRM_MGA_VERTEX, mga_dma_vertex, DRM_AUTH),
1136 [DRM_IOCTL_NR(DRM_MGA_INDICES)] = {mga_dma_indices, DRM_AUTH}, 1095 DRM_IOCTL_DEF(DRM_MGA_INDICES, mga_dma_indices, DRM_AUTH),
1137 [DRM_IOCTL_NR(DRM_MGA_ILOAD)] = {mga_dma_iload, DRM_AUTH}, 1096 DRM_IOCTL_DEF(DRM_MGA_ILOAD, mga_dma_iload, DRM_AUTH),
1138 [DRM_IOCTL_NR(DRM_MGA_BLIT)] = {mga_dma_blit, DRM_AUTH}, 1097 DRM_IOCTL_DEF(DRM_MGA_BLIT, mga_dma_blit, DRM_AUTH),
1139 [DRM_IOCTL_NR(DRM_MGA_GETPARAM)] = {mga_getparam, DRM_AUTH}, 1098 DRM_IOCTL_DEF(DRM_MGA_GETPARAM, mga_getparam, DRM_AUTH),
1140 [DRM_IOCTL_NR(DRM_MGA_SET_FENCE)] = {mga_set_fence, DRM_AUTH}, 1099 DRM_IOCTL_DEF(DRM_MGA_SET_FENCE, mga_set_fence, DRM_AUTH),
1141 [DRM_IOCTL_NR(DRM_MGA_WAIT_FENCE)] = {mga_wait_fence, DRM_AUTH}, 1100 DRM_IOCTL_DEF(DRM_MGA_WAIT_FENCE, mga_wait_fence, DRM_AUTH),
1142 [DRM_IOCTL_NR(DRM_MGA_DMA_BOOTSTRAP)] = {mga_dma_bootstrap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 1101 DRM_IOCTL_DEF(DRM_MGA_DMA_BOOTSTRAP, mga_dma_bootstrap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1143}; 1102};
1144 1103
1145int mga_max_ioctl = DRM_ARRAY_SIZE(mga_ioctls); 1104int mga_max_ioctl = DRM_ARRAY_SIZE(mga_ioctls);
diff --git a/drivers/char/drm/mga_warp.c b/drivers/char/drm/mga_warp.c
index d67f4925fbac..651b93c8ab5d 100644
--- a/drivers/char/drm/mga_warp.c
+++ b/drivers/char/drm/mga_warp.c
@@ -141,7 +141,7 @@ int mga_warp_install_microcode(drm_mga_private_t * dev_priv)
141 if (size > dev_priv->warp->size) { 141 if (size > dev_priv->warp->size) {
142 DRM_ERROR("microcode too large! (%u > %lu)\n", 142 DRM_ERROR("microcode too large! (%u > %lu)\n",
143 size, dev_priv->warp->size); 143 size, dev_priv->warp->size);
144 return DRM_ERR(ENOMEM); 144 return -ENOMEM;
145 } 145 }
146 146
147 switch (dev_priv->chipset) { 147 switch (dev_priv->chipset) {
@@ -151,7 +151,7 @@ int mga_warp_install_microcode(drm_mga_private_t * dev_priv)
151 case MGA_CARD_TYPE_G200: 151 case MGA_CARD_TYPE_G200:
152 return mga_warp_install_g200_microcode(dev_priv); 152 return mga_warp_install_g200_microcode(dev_priv);
153 default: 153 default:
154 return DRM_ERR(EINVAL); 154 return -EINVAL;
155 } 155 }
156} 156}
157 157
@@ -177,7 +177,7 @@ int mga_warp_init(drm_mga_private_t * dev_priv)
177 MGA_WRITE(MGA_WVRTXSZ, 7); 177 MGA_WRITE(MGA_WVRTXSZ, 7);
178 break; 178 break;
179 default: 179 default:
180 return DRM_ERR(EINVAL); 180 return -EINVAL;
181 } 181 }
182 182
183 MGA_WRITE(MGA_WMISC, (MGA_WUCODECACHE_ENABLE | 183 MGA_WRITE(MGA_WMISC, (MGA_WUCODECACHE_ENABLE |
@@ -186,7 +186,7 @@ int mga_warp_init(drm_mga_private_t * dev_priv)
186 if (wmisc != WMISC_EXPECTED) { 186 if (wmisc != WMISC_EXPECTED) {
187 DRM_ERROR("WARP engine config failed! 0x%x != 0x%x\n", 187 DRM_ERROR("WARP engine config failed! 0x%x != 0x%x\n",
188 wmisc, WMISC_EXPECTED); 188 wmisc, WMISC_EXPECTED);
189 return DRM_ERR(EINVAL); 189 return -EINVAL;
190 } 190 }
191 191
192 return 0; 192 return 0;
diff --git a/drivers/char/drm/r128_cce.c b/drivers/char/drm/r128_cce.c
index b163ed09bd81..7d550aba165e 100644
--- a/drivers/char/drm/r128_cce.c
+++ b/drivers/char/drm/r128_cce.c
@@ -129,7 +129,7 @@ static int r128_do_pixcache_flush(drm_r128_private_t * dev_priv)
129#if R128_FIFO_DEBUG 129#if R128_FIFO_DEBUG
130 DRM_ERROR("failed!\n"); 130 DRM_ERROR("failed!\n");
131#endif 131#endif
132 return DRM_ERR(EBUSY); 132 return -EBUSY;
133} 133}
134 134
135static int r128_do_wait_for_fifo(drm_r128_private_t * dev_priv, int entries) 135static int r128_do_wait_for_fifo(drm_r128_private_t * dev_priv, int entries)
@@ -146,7 +146,7 @@ static int r128_do_wait_for_fifo(drm_r128_private_t * dev_priv, int entries)
146#if R128_FIFO_DEBUG 146#if R128_FIFO_DEBUG
147 DRM_ERROR("failed!\n"); 147 DRM_ERROR("failed!\n");
148#endif 148#endif
149 return DRM_ERR(EBUSY); 149 return -EBUSY;
150} 150}
151 151
152static int r128_do_wait_for_idle(drm_r128_private_t * dev_priv) 152static int r128_do_wait_for_idle(drm_r128_private_t * dev_priv)
@@ -168,7 +168,7 @@ static int r128_do_wait_for_idle(drm_r128_private_t * dev_priv)
168#if R128_FIFO_DEBUG 168#if R128_FIFO_DEBUG
169 DRM_ERROR("failed!\n"); 169 DRM_ERROR("failed!\n");
170#endif 170#endif
171 return DRM_ERR(EBUSY); 171 return -EBUSY;
172} 172}
173 173
174/* ================================================================ 174/* ================================================================
@@ -227,7 +227,7 @@ int r128_do_cce_idle(drm_r128_private_t * dev_priv)
227 DRM_ERROR("failed!\n"); 227 DRM_ERROR("failed!\n");
228 r128_status(dev_priv); 228 r128_status(dev_priv);
229#endif 229#endif
230 return DRM_ERR(EBUSY); 230 return -EBUSY;
231} 231}
232 232
233/* Start the Concurrent Command Engine. 233/* Start the Concurrent Command Engine.
@@ -355,7 +355,7 @@ static int r128_do_init_cce(struct drm_device * dev, drm_r128_init_t * init)
355 355
356 dev_priv = drm_alloc(sizeof(drm_r128_private_t), DRM_MEM_DRIVER); 356 dev_priv = drm_alloc(sizeof(drm_r128_private_t), DRM_MEM_DRIVER);
357 if (dev_priv == NULL) 357 if (dev_priv == NULL)
358 return DRM_ERR(ENOMEM); 358 return -ENOMEM;
359 359
360 memset(dev_priv, 0, sizeof(drm_r128_private_t)); 360 memset(dev_priv, 0, sizeof(drm_r128_private_t));
361 361
@@ -365,7 +365,7 @@ static int r128_do_init_cce(struct drm_device * dev, drm_r128_init_t * init)
365 DRM_ERROR("PCI GART memory not allocated!\n"); 365 DRM_ERROR("PCI GART memory not allocated!\n");
366 dev->dev_private = (void *)dev_priv; 366 dev->dev_private = (void *)dev_priv;
367 r128_do_cleanup_cce(dev); 367 r128_do_cleanup_cce(dev);
368 return DRM_ERR(EINVAL); 368 return -EINVAL;
369 } 369 }
370 370
371 dev_priv->usec_timeout = init->usec_timeout; 371 dev_priv->usec_timeout = init->usec_timeout;
@@ -374,7 +374,7 @@ static int r128_do_init_cce(struct drm_device * dev, drm_r128_init_t * init)
374 DRM_DEBUG("TIMEOUT problem!\n"); 374 DRM_DEBUG("TIMEOUT problem!\n");
375 dev->dev_private = (void *)dev_priv; 375 dev->dev_private = (void *)dev_priv;
376 r128_do_cleanup_cce(dev); 376 r128_do_cleanup_cce(dev);
377 return DRM_ERR(EINVAL); 377 return -EINVAL;
378 } 378 }
379 379
380 dev_priv->cce_mode = init->cce_mode; 380 dev_priv->cce_mode = init->cce_mode;
@@ -394,7 +394,7 @@ static int r128_do_init_cce(struct drm_device * dev, drm_r128_init_t * init)
394 DRM_DEBUG("Bad cce_mode!\n"); 394 DRM_DEBUG("Bad cce_mode!\n");
395 dev->dev_private = (void *)dev_priv; 395 dev->dev_private = (void *)dev_priv;
396 r128_do_cleanup_cce(dev); 396 r128_do_cleanup_cce(dev);
397 return DRM_ERR(EINVAL); 397 return -EINVAL;
398 } 398 }
399 399
400 switch (init->cce_mode) { 400 switch (init->cce_mode) {
@@ -461,7 +461,7 @@ static int r128_do_init_cce(struct drm_device * dev, drm_r128_init_t * init)
461 DRM_ERROR("could not find sarea!\n"); 461 DRM_ERROR("could not find sarea!\n");
462 dev->dev_private = (void *)dev_priv; 462 dev->dev_private = (void *)dev_priv;
463 r128_do_cleanup_cce(dev); 463 r128_do_cleanup_cce(dev);
464 return DRM_ERR(EINVAL); 464 return -EINVAL;
465 } 465 }
466 466
467 dev_priv->mmio = drm_core_findmap(dev, init->mmio_offset); 467 dev_priv->mmio = drm_core_findmap(dev, init->mmio_offset);
@@ -469,21 +469,21 @@ static int r128_do_init_cce(struct drm_device * dev, drm_r128_init_t * init)
469 DRM_ERROR("could not find mmio region!\n"); 469 DRM_ERROR("could not find mmio region!\n");
470 dev->dev_private = (void *)dev_priv; 470 dev->dev_private = (void *)dev_priv;
471 r128_do_cleanup_cce(dev); 471 r128_do_cleanup_cce(dev);
472 return DRM_ERR(EINVAL); 472 return -EINVAL;
473 } 473 }
474 dev_priv->cce_ring = drm_core_findmap(dev, init->ring_offset); 474 dev_priv->cce_ring = drm_core_findmap(dev, init->ring_offset);
475 if (!dev_priv->cce_ring) { 475 if (!dev_priv->cce_ring) {
476 DRM_ERROR("could not find cce ring region!\n"); 476 DRM_ERROR("could not find cce ring region!\n");
477 dev->dev_private = (void *)dev_priv; 477 dev->dev_private = (void *)dev_priv;
478 r128_do_cleanup_cce(dev); 478 r128_do_cleanup_cce(dev);
479 return DRM_ERR(EINVAL); 479 return -EINVAL;
480 } 480 }
481 dev_priv->ring_rptr = drm_core_findmap(dev, init->ring_rptr_offset); 481 dev_priv->ring_rptr = drm_core_findmap(dev, init->ring_rptr_offset);
482 if (!dev_priv->ring_rptr) { 482 if (!dev_priv->ring_rptr) {
483 DRM_ERROR("could not find ring read pointer!\n"); 483 DRM_ERROR("could not find ring read pointer!\n");
484 dev->dev_private = (void *)dev_priv; 484 dev->dev_private = (void *)dev_priv;
485 r128_do_cleanup_cce(dev); 485 r128_do_cleanup_cce(dev);
486 return DRM_ERR(EINVAL); 486 return -EINVAL;
487 } 487 }
488 dev->agp_buffer_token = init->buffers_offset; 488 dev->agp_buffer_token = init->buffers_offset;
489 dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset); 489 dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
@@ -491,7 +491,7 @@ static int r128_do_init_cce(struct drm_device * dev, drm_r128_init_t * init)
491 DRM_ERROR("could not find dma buffer region!\n"); 491 DRM_ERROR("could not find dma buffer region!\n");
492 dev->dev_private = (void *)dev_priv; 492 dev->dev_private = (void *)dev_priv;
493 r128_do_cleanup_cce(dev); 493 r128_do_cleanup_cce(dev);
494 return DRM_ERR(EINVAL); 494 return -EINVAL;
495 } 495 }
496 496
497 if (!dev_priv->is_pci) { 497 if (!dev_priv->is_pci) {
@@ -501,7 +501,7 @@ static int r128_do_init_cce(struct drm_device * dev, drm_r128_init_t * init)
501 DRM_ERROR("could not find agp texture region!\n"); 501 DRM_ERROR("could not find agp texture region!\n");
502 dev->dev_private = (void *)dev_priv; 502 dev->dev_private = (void *)dev_priv;
503 r128_do_cleanup_cce(dev); 503 r128_do_cleanup_cce(dev);
504 return DRM_ERR(EINVAL); 504 return -EINVAL;
505 } 505 }
506 } 506 }
507 507
@@ -520,7 +520,7 @@ static int r128_do_init_cce(struct drm_device * dev, drm_r128_init_t * init)
520 DRM_ERROR("Could not ioremap agp regions!\n"); 520 DRM_ERROR("Could not ioremap agp regions!\n");
521 dev->dev_private = (void *)dev_priv; 521 dev->dev_private = (void *)dev_priv;
522 r128_do_cleanup_cce(dev); 522 r128_do_cleanup_cce(dev);
523 return DRM_ERR(ENOMEM); 523 return -ENOMEM;
524 } 524 }
525 } else 525 } else
526#endif 526#endif
@@ -567,7 +567,7 @@ static int r128_do_init_cce(struct drm_device * dev, drm_r128_init_t * init)
567 DRM_ERROR("failed to init PCI GART!\n"); 567 DRM_ERROR("failed to init PCI GART!\n");
568 dev->dev_private = (void *)dev_priv; 568 dev->dev_private = (void *)dev_priv;
569 r128_do_cleanup_cce(dev); 569 r128_do_cleanup_cce(dev);
570 return DRM_ERR(ENOMEM); 570 return -ENOMEM;
571 } 571 }
572 R128_WRITE(R128_PCI_GART_PAGE, dev_priv->gart_info.bus_addr); 572 R128_WRITE(R128_PCI_GART_PAGE, dev_priv->gart_info.bus_addr);
573#if __OS_HAS_AGP 573#if __OS_HAS_AGP
@@ -625,35 +625,30 @@ int r128_do_cleanup_cce(struct drm_device * dev)
625 return 0; 625 return 0;
626} 626}
627 627
628int r128_cce_init(DRM_IOCTL_ARGS) 628int r128_cce_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
629{ 629{
630 DRM_DEVICE; 630 drm_r128_init_t *init = data;
631 drm_r128_init_t init;
632 631
633 DRM_DEBUG("\n"); 632 DRM_DEBUG("\n");
634 633
635 LOCK_TEST_WITH_RETURN(dev, filp); 634 LOCK_TEST_WITH_RETURN(dev, file_priv);
636 635
637 DRM_COPY_FROM_USER_IOCTL(init, (drm_r128_init_t __user *) data, 636 switch (init->func) {
638 sizeof(init));
639
640 switch (init.func) {
641 case R128_INIT_CCE: 637 case R128_INIT_CCE:
642 return r128_do_init_cce(dev, &init); 638 return r128_do_init_cce(dev, init);
643 case R128_CLEANUP_CCE: 639 case R128_CLEANUP_CCE:
644 return r128_do_cleanup_cce(dev); 640 return r128_do_cleanup_cce(dev);
645 } 641 }
646 642
647 return DRM_ERR(EINVAL); 643 return -EINVAL;
648} 644}
649 645
650int r128_cce_start(DRM_IOCTL_ARGS) 646int r128_cce_start(struct drm_device *dev, void *data, struct drm_file *file_priv)
651{ 647{
652 DRM_DEVICE;
653 drm_r128_private_t *dev_priv = dev->dev_private; 648 drm_r128_private_t *dev_priv = dev->dev_private;
654 DRM_DEBUG("\n"); 649 DRM_DEBUG("\n");
655 650
656 LOCK_TEST_WITH_RETURN(dev, filp); 651 LOCK_TEST_WITH_RETURN(dev, file_priv);
657 652
658 if (dev_priv->cce_running || dev_priv->cce_mode == R128_PM4_NONPM4) { 653 if (dev_priv->cce_running || dev_priv->cce_mode == R128_PM4_NONPM4) {
659 DRM_DEBUG("%s while CCE running\n", __FUNCTION__); 654 DRM_DEBUG("%s while CCE running\n", __FUNCTION__);
@@ -668,30 +663,26 @@ int r128_cce_start(DRM_IOCTL_ARGS)
668/* Stop the CCE. The engine must have been idled before calling this 663/* Stop the CCE. The engine must have been idled before calling this
669 * routine. 664 * routine.
670 */ 665 */
671int r128_cce_stop(DRM_IOCTL_ARGS) 666int r128_cce_stop(struct drm_device *dev, void *data, struct drm_file *file_priv)
672{ 667{
673 DRM_DEVICE;
674 drm_r128_private_t *dev_priv = dev->dev_private; 668 drm_r128_private_t *dev_priv = dev->dev_private;
675 drm_r128_cce_stop_t stop; 669 drm_r128_cce_stop_t *stop = data;
676 int ret; 670 int ret;
677 DRM_DEBUG("\n"); 671 DRM_DEBUG("\n");
678 672
679 LOCK_TEST_WITH_RETURN(dev, filp); 673 LOCK_TEST_WITH_RETURN(dev, file_priv);
680
681 DRM_COPY_FROM_USER_IOCTL(stop, (drm_r128_cce_stop_t __user *) data,
682 sizeof(stop));
683 674
684 /* Flush any pending CCE commands. This ensures any outstanding 675 /* Flush any pending CCE commands. This ensures any outstanding
685 * commands are exectuted by the engine before we turn it off. 676 * commands are exectuted by the engine before we turn it off.
686 */ 677 */
687 if (stop.flush) { 678 if (stop->flush) {
688 r128_do_cce_flush(dev_priv); 679 r128_do_cce_flush(dev_priv);
689 } 680 }
690 681
691 /* If we fail to make the engine go idle, we return an error 682 /* If we fail to make the engine go idle, we return an error
692 * code so that the DRM ioctl wrapper can try again. 683 * code so that the DRM ioctl wrapper can try again.
693 */ 684 */
694 if (stop.idle) { 685 if (stop->idle) {
695 ret = r128_do_cce_idle(dev_priv); 686 ret = r128_do_cce_idle(dev_priv);
696 if (ret) 687 if (ret)
697 return ret; 688 return ret;
@@ -711,17 +702,16 @@ int r128_cce_stop(DRM_IOCTL_ARGS)
711 702
712/* Just reset the CCE ring. Called as part of an X Server engine reset. 703/* Just reset the CCE ring. Called as part of an X Server engine reset.
713 */ 704 */
714int r128_cce_reset(DRM_IOCTL_ARGS) 705int r128_cce_reset(struct drm_device *dev, void *data, struct drm_file *file_priv)
715{ 706{
716 DRM_DEVICE;
717 drm_r128_private_t *dev_priv = dev->dev_private; 707 drm_r128_private_t *dev_priv = dev->dev_private;
718 DRM_DEBUG("\n"); 708 DRM_DEBUG("\n");
719 709
720 LOCK_TEST_WITH_RETURN(dev, filp); 710 LOCK_TEST_WITH_RETURN(dev, file_priv);
721 711
722 if (!dev_priv) { 712 if (!dev_priv) {
723 DRM_DEBUG("%s called before init done\n", __FUNCTION__); 713 DRM_DEBUG("%s called before init done\n", __FUNCTION__);
724 return DRM_ERR(EINVAL); 714 return -EINVAL;
725 } 715 }
726 716
727 r128_do_cce_reset(dev_priv); 717 r128_do_cce_reset(dev_priv);
@@ -732,13 +722,12 @@ int r128_cce_reset(DRM_IOCTL_ARGS)
732 return 0; 722 return 0;
733} 723}
734 724
735int r128_cce_idle(DRM_IOCTL_ARGS) 725int r128_cce_idle(struct drm_device *dev, void *data, struct drm_file *file_priv)
736{ 726{
737 DRM_DEVICE;
738 drm_r128_private_t *dev_priv = dev->dev_private; 727 drm_r128_private_t *dev_priv = dev->dev_private;
739 DRM_DEBUG("\n"); 728 DRM_DEBUG("\n");
740 729
741 LOCK_TEST_WITH_RETURN(dev, filp); 730 LOCK_TEST_WITH_RETURN(dev, file_priv);
742 731
743 if (dev_priv->cce_running) { 732 if (dev_priv->cce_running) {
744 r128_do_cce_flush(dev_priv); 733 r128_do_cce_flush(dev_priv);
@@ -747,19 +736,18 @@ int r128_cce_idle(DRM_IOCTL_ARGS)
747 return r128_do_cce_idle(dev_priv); 736 return r128_do_cce_idle(dev_priv);
748} 737}
749 738
750int r128_engine_reset(DRM_IOCTL_ARGS) 739int r128_engine_reset(struct drm_device *dev, void *data, struct drm_file *file_priv)
751{ 740{
752 DRM_DEVICE;
753 DRM_DEBUG("\n"); 741 DRM_DEBUG("\n");
754 742
755 LOCK_TEST_WITH_RETURN(dev, filp); 743 LOCK_TEST_WITH_RETURN(dev, file_priv);
756 744
757 return r128_do_engine_reset(dev); 745 return r128_do_engine_reset(dev);
758} 746}
759 747
760int r128_fullscreen(DRM_IOCTL_ARGS) 748int r128_fullscreen(struct drm_device *dev, void *data, struct drm_file *file_priv)
761{ 749{
762 return DRM_ERR(EINVAL); 750 return -EINVAL;
763} 751}
764 752
765/* ================================================================ 753/* ================================================================
@@ -780,7 +768,7 @@ static int r128_freelist_init(struct drm_device * dev)
780 768
781 dev_priv->head = drm_alloc(sizeof(drm_r128_freelist_t), DRM_MEM_DRIVER); 769 dev_priv->head = drm_alloc(sizeof(drm_r128_freelist_t), DRM_MEM_DRIVER);
782 if (dev_priv->head == NULL) 770 if (dev_priv->head == NULL)
783 return DRM_ERR(ENOMEM); 771 return -ENOMEM;
784 772
785 memset(dev_priv->head, 0, sizeof(drm_r128_freelist_t)); 773 memset(dev_priv->head, 0, sizeof(drm_r128_freelist_t));
786 dev_priv->head->age = R128_BUFFER_USED; 774 dev_priv->head->age = R128_BUFFER_USED;
@@ -791,7 +779,7 @@ static int r128_freelist_init(struct drm_device * dev)
791 779
792 entry = drm_alloc(sizeof(drm_r128_freelist_t), DRM_MEM_DRIVER); 780 entry = drm_alloc(sizeof(drm_r128_freelist_t), DRM_MEM_DRIVER);
793 if (!entry) 781 if (!entry)
794 return DRM_ERR(ENOMEM); 782 return -ENOMEM;
795 783
796 entry->age = R128_BUFFER_FREE; 784 entry->age = R128_BUFFER_FREE;
797 entry->buf = buf; 785 entry->buf = buf;
@@ -828,7 +816,7 @@ static struct drm_buf *r128_freelist_get(struct drm_device * dev)
828 for (i = 0; i < dma->buf_count; i++) { 816 for (i = 0; i < dma->buf_count; i++) {
829 buf = dma->buflist[i]; 817 buf = dma->buflist[i];
830 buf_priv = buf->dev_private; 818 buf_priv = buf->dev_private;
831 if (buf->filp == 0) 819 if (buf->file_priv == 0)
832 return buf; 820 return buf;
833 } 821 }
834 822
@@ -883,10 +871,12 @@ int r128_wait_ring(drm_r128_private_t * dev_priv, int n)
883 871
884 /* FIXME: This is being ignored... */ 872 /* FIXME: This is being ignored... */
885 DRM_ERROR("failed!\n"); 873 DRM_ERROR("failed!\n");
886 return DRM_ERR(EBUSY); 874 return -EBUSY;
887} 875}
888 876
889static int r128_cce_get_buffers(DRMFILE filp, struct drm_device * dev, struct drm_dma * d) 877static int r128_cce_get_buffers(struct drm_device * dev,
878 struct drm_file *file_priv,
879 struct drm_dma * d)
890{ 880{
891 int i; 881 int i;
892 struct drm_buf *buf; 882 struct drm_buf *buf;
@@ -894,57 +884,51 @@ static int r128_cce_get_buffers(DRMFILE filp, struct drm_device * dev, struct dr
894 for (i = d->granted_count; i < d->request_count; i++) { 884 for (i = d->granted_count; i < d->request_count; i++) {
895 buf = r128_freelist_get(dev); 885 buf = r128_freelist_get(dev);
896 if (!buf) 886 if (!buf)
897 return DRM_ERR(EAGAIN); 887 return -EAGAIN;
898 888
899 buf->filp = filp; 889 buf->file_priv = file_priv;
900 890
901 if (DRM_COPY_TO_USER(&d->request_indices[i], &buf->idx, 891 if (DRM_COPY_TO_USER(&d->request_indices[i], &buf->idx,
902 sizeof(buf->idx))) 892 sizeof(buf->idx)))
903 return DRM_ERR(EFAULT); 893 return -EFAULT;
904 if (DRM_COPY_TO_USER(&d->request_sizes[i], &buf->total, 894 if (DRM_COPY_TO_USER(&d->request_sizes[i], &buf->total,
905 sizeof(buf->total))) 895 sizeof(buf->total)))
906 return DRM_ERR(EFAULT); 896 return -EFAULT;
907 897
908 d->granted_count++; 898 d->granted_count++;
909 } 899 }
910 return 0; 900 return 0;
911} 901}
912 902
913int r128_cce_buffers(DRM_IOCTL_ARGS) 903int r128_cce_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv)
914{ 904{
915 DRM_DEVICE;
916 struct drm_device_dma *dma = dev->dma; 905 struct drm_device_dma *dma = dev->dma;
917 int ret = 0; 906 int ret = 0;
918 struct drm_dma __user *argp = (void __user *)data; 907 struct drm_dma *d = data;
919 struct drm_dma d;
920 908
921 LOCK_TEST_WITH_RETURN(dev, filp); 909 LOCK_TEST_WITH_RETURN(dev, file_priv);
922
923 DRM_COPY_FROM_USER_IOCTL(d, argp, sizeof(d));
924 910
925 /* Please don't send us buffers. 911 /* Please don't send us buffers.
926 */ 912 */
927 if (d.send_count != 0) { 913 if (d->send_count != 0) {
928 DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n", 914 DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n",
929 DRM_CURRENTPID, d.send_count); 915 DRM_CURRENTPID, d->send_count);
930 return DRM_ERR(EINVAL); 916 return -EINVAL;
931 } 917 }
932 918
933 /* We'll send you buffers. 919 /* We'll send you buffers.
934 */ 920 */
935 if (d.request_count < 0 || d.request_count > dma->buf_count) { 921 if (d->request_count < 0 || d->request_count > dma->buf_count) {
936 DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n", 922 DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
937 DRM_CURRENTPID, d.request_count, dma->buf_count); 923 DRM_CURRENTPID, d->request_count, dma->buf_count);
938 return DRM_ERR(EINVAL); 924 return -EINVAL;
939 } 925 }
940 926
941 d.granted_count = 0; 927 d->granted_count = 0;
942 928
943 if (d.request_count) { 929 if (d->request_count) {
944 ret = r128_cce_get_buffers(filp, dev, &d); 930 ret = r128_cce_get_buffers(dev, file_priv, d);
945 } 931 }
946 932
947 DRM_COPY_TO_USER_IOCTL(argp, d, sizeof(d));
948
949 return ret; 933 return ret;
950} 934}
diff --git a/drivers/char/drm/r128_drm.h b/drivers/char/drm/r128_drm.h
index e94a39c6e327..8d8878b55f55 100644
--- a/drivers/char/drm/r128_drm.h
+++ b/drivers/char/drm/r128_drm.h
@@ -222,11 +222,7 @@ typedef struct drm_r128_init {
222 R128_INIT_CCE = 0x01, 222 R128_INIT_CCE = 0x01,
223 R128_CLEANUP_CCE = 0x02 223 R128_CLEANUP_CCE = 0x02
224 } func; 224 } func;
225#if CONFIG_XFREE86_VERSION < XFREE86_VERSION(4,1,0,0)
226 int sarea_priv_offset;
227#else
228 unsigned long sarea_priv_offset; 225 unsigned long sarea_priv_offset;
229#endif
230 int is_pci; 226 int is_pci;
231 int cce_mode; 227 int cce_mode;
232 int cce_secure; 228 int cce_secure;
@@ -240,21 +236,12 @@ typedef struct drm_r128_init {
240 unsigned int depth_offset, depth_pitch; 236 unsigned int depth_offset, depth_pitch;
241 unsigned int span_offset; 237 unsigned int span_offset;
242 238
243#if CONFIG_XFREE86_VERSION < XFREE86_VERSION(4,1,0,0)
244 unsigned int fb_offset;
245 unsigned int mmio_offset;
246 unsigned int ring_offset;
247 unsigned int ring_rptr_offset;
248 unsigned int buffers_offset;
249 unsigned int agp_textures_offset;
250#else
251 unsigned long fb_offset; 239 unsigned long fb_offset;
252 unsigned long mmio_offset; 240 unsigned long mmio_offset;
253 unsigned long ring_offset; 241 unsigned long ring_offset;
254 unsigned long ring_rptr_offset; 242 unsigned long ring_rptr_offset;
255 unsigned long buffers_offset; 243 unsigned long buffers_offset;
256 unsigned long agp_textures_offset; 244 unsigned long agp_textures_offset;
257#endif
258} drm_r128_init_t; 245} drm_r128_init_t;
259 246
260typedef struct drm_r128_cce_stop { 247typedef struct drm_r128_cce_stop {
@@ -264,15 +251,10 @@ typedef struct drm_r128_cce_stop {
264 251
265typedef struct drm_r128_clear { 252typedef struct drm_r128_clear {
266 unsigned int flags; 253 unsigned int flags;
267#if CONFIG_XFREE86_VERSION < XFREE86_VERSION(4,1,0,0)
268 int x, y, w, h;
269#endif
270 unsigned int clear_color; 254 unsigned int clear_color;
271 unsigned int clear_depth; 255 unsigned int clear_depth;
272#if CONFIG_XFREE86_VERSION >= XFREE86_VERSION(4,1,0,0)
273 unsigned int color_mask; 256 unsigned int color_mask;
274 unsigned int depth_mask; 257 unsigned int depth_mask;
275#endif
276} drm_r128_clear_t; 258} drm_r128_clear_t;
277 259
278typedef struct drm_r128_vertex { 260typedef struct drm_r128_vertex {
diff --git a/drivers/char/drm/r128_drv.h b/drivers/char/drm/r128_drv.h
index 72249fb2fd1c..250d2aa46581 100644
--- a/drivers/char/drm/r128_drv.h
+++ b/drivers/char/drm/r128_drv.h
@@ -129,18 +129,18 @@ typedef struct drm_r128_buf_priv {
129 drm_r128_freelist_t *list_entry; 129 drm_r128_freelist_t *list_entry;
130} drm_r128_buf_priv_t; 130} drm_r128_buf_priv_t;
131 131
132extern drm_ioctl_desc_t r128_ioctls[]; 132extern struct drm_ioctl_desc r128_ioctls[];
133extern int r128_max_ioctl; 133extern int r128_max_ioctl;
134 134
135 /* r128_cce.c */ 135 /* r128_cce.c */
136extern int r128_cce_init(DRM_IOCTL_ARGS); 136extern int r128_cce_init(struct drm_device *dev, void *data, struct drm_file *file_priv);
137extern int r128_cce_start(DRM_IOCTL_ARGS); 137extern int r128_cce_start(struct drm_device *dev, void *data, struct drm_file *file_priv);
138extern int r128_cce_stop(DRM_IOCTL_ARGS); 138extern int r128_cce_stop(struct drm_device *dev, void *data, struct drm_file *file_priv);
139extern int r128_cce_reset(DRM_IOCTL_ARGS); 139extern int r128_cce_reset(struct drm_device *dev, void *data, struct drm_file *file_priv);
140extern int r128_cce_idle(DRM_IOCTL_ARGS); 140extern int r128_cce_idle(struct drm_device *dev, void *data, struct drm_file *file_priv);
141extern int r128_engine_reset(DRM_IOCTL_ARGS); 141extern int r128_engine_reset(struct drm_device *dev, void *data, struct drm_file *file_priv);
142extern int r128_fullscreen(DRM_IOCTL_ARGS); 142extern int r128_fullscreen(struct drm_device *dev, void *data, struct drm_file *file_priv);
143extern int r128_cce_buffers(DRM_IOCTL_ARGS); 143extern int r128_cce_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv);
144 144
145extern void r128_freelist_reset(struct drm_device * dev); 145extern void r128_freelist_reset(struct drm_device * dev);
146 146
@@ -156,7 +156,8 @@ extern void r128_driver_irq_preinstall(struct drm_device * dev);
156extern void r128_driver_irq_postinstall(struct drm_device * dev); 156extern void r128_driver_irq_postinstall(struct drm_device * dev);
157extern void r128_driver_irq_uninstall(struct drm_device * dev); 157extern void r128_driver_irq_uninstall(struct drm_device * dev);
158extern void r128_driver_lastclose(struct drm_device * dev); 158extern void r128_driver_lastclose(struct drm_device * dev);
159extern void r128_driver_preclose(struct drm_device * dev, DRMFILE filp); 159extern void r128_driver_preclose(struct drm_device * dev,
160 struct drm_file *file_priv);
160 161
161extern long r128_compat_ioctl(struct file *filp, unsigned int cmd, 162extern long r128_compat_ioctl(struct file *filp, unsigned int cmd,
162 unsigned long arg); 163 unsigned long arg);
@@ -428,7 +429,7 @@ do { \
428 DRM_UDELAY(1); \ 429 DRM_UDELAY(1); \
429 } \ 430 } \
430 DRM_ERROR( "ring space check failed!\n" ); \ 431 DRM_ERROR( "ring space check failed!\n" ); \
431 return DRM_ERR(EBUSY); \ 432 return -EBUSY; \
432 } \ 433 } \
433 __ring_space_done: \ 434 __ring_space_done: \
434 ; \ 435 ; \
diff --git a/drivers/char/drm/r128_state.c b/drivers/char/drm/r128_state.c
index 7b334fb7d649..b7f483cac6d4 100644
--- a/drivers/char/drm/r128_state.c
+++ b/drivers/char/drm/r128_state.c
@@ -776,8 +776,9 @@ static void r128_cce_dispatch_indices(struct drm_device * dev,
776 sarea_priv->nbox = 0; 776 sarea_priv->nbox = 0;
777} 777}
778 778
779static int r128_cce_dispatch_blit(DRMFILE filp, 779static int r128_cce_dispatch_blit(struct drm_device * dev,
780 struct drm_device * dev, drm_r128_blit_t * blit) 780 struct drm_file *file_priv,
781 drm_r128_blit_t * blit)
781{ 782{
782 drm_r128_private_t *dev_priv = dev->dev_private; 783 drm_r128_private_t *dev_priv = dev->dev_private;
783 struct drm_device_dma *dma = dev->dma; 784 struct drm_device_dma *dma = dev->dma;
@@ -809,7 +810,7 @@ static int r128_cce_dispatch_blit(DRMFILE filp,
809 break; 810 break;
810 default: 811 default:
811 DRM_ERROR("invalid blit format %d\n", blit->format); 812 DRM_ERROR("invalid blit format %d\n", blit->format);
812 return DRM_ERR(EINVAL); 813 return -EINVAL;
813 } 814 }
814 815
815 /* Flush the pixel cache, and mark the contents as Read Invalid. 816 /* Flush the pixel cache, and mark the contents as Read Invalid.
@@ -829,14 +830,14 @@ static int r128_cce_dispatch_blit(DRMFILE filp,
829 buf = dma->buflist[blit->idx]; 830 buf = dma->buflist[blit->idx];
830 buf_priv = buf->dev_private; 831 buf_priv = buf->dev_private;
831 832
832 if (buf->filp != filp) { 833 if (buf->file_priv != file_priv) {
833 DRM_ERROR("process %d using buffer owned by %p\n", 834 DRM_ERROR("process %d using buffer owned by %p\n",
834 DRM_CURRENTPID, buf->filp); 835 DRM_CURRENTPID, buf->file_priv);
835 return DRM_ERR(EINVAL); 836 return -EINVAL;
836 } 837 }
837 if (buf->pending) { 838 if (buf->pending) {
838 DRM_ERROR("sending pending buffer %d\n", blit->idx); 839 DRM_ERROR("sending pending buffer %d\n", blit->idx);
839 return DRM_ERR(EINVAL); 840 return -EINVAL;
840 } 841 }
841 842
842 buf_priv->discard = 1; 843 buf_priv->discard = 1;
@@ -900,22 +901,22 @@ static int r128_cce_dispatch_write_span(struct drm_device * dev,
900 901
901 count = depth->n; 902 count = depth->n;
902 if (count > 4096 || count <= 0) 903 if (count > 4096 || count <= 0)
903 return DRM_ERR(EMSGSIZE); 904 return -EMSGSIZE;
904 905
905 if (DRM_COPY_FROM_USER(&x, depth->x, sizeof(x))) { 906 if (DRM_COPY_FROM_USER(&x, depth->x, sizeof(x))) {
906 return DRM_ERR(EFAULT); 907 return -EFAULT;
907 } 908 }
908 if (DRM_COPY_FROM_USER(&y, depth->y, sizeof(y))) { 909 if (DRM_COPY_FROM_USER(&y, depth->y, sizeof(y))) {
909 return DRM_ERR(EFAULT); 910 return -EFAULT;
910 } 911 }
911 912
912 buffer_size = depth->n * sizeof(u32); 913 buffer_size = depth->n * sizeof(u32);
913 buffer = drm_alloc(buffer_size, DRM_MEM_BUFS); 914 buffer = drm_alloc(buffer_size, DRM_MEM_BUFS);
914 if (buffer == NULL) 915 if (buffer == NULL)
915 return DRM_ERR(ENOMEM); 916 return -ENOMEM;
916 if (DRM_COPY_FROM_USER(buffer, depth->buffer, buffer_size)) { 917 if (DRM_COPY_FROM_USER(buffer, depth->buffer, buffer_size)) {
917 drm_free(buffer, buffer_size, DRM_MEM_BUFS); 918 drm_free(buffer, buffer_size, DRM_MEM_BUFS);
918 return DRM_ERR(EFAULT); 919 return -EFAULT;
919 } 920 }
920 921
921 mask_size = depth->n * sizeof(u8); 922 mask_size = depth->n * sizeof(u8);
@@ -923,12 +924,12 @@ static int r128_cce_dispatch_write_span(struct drm_device * dev,
923 mask = drm_alloc(mask_size, DRM_MEM_BUFS); 924 mask = drm_alloc(mask_size, DRM_MEM_BUFS);
924 if (mask == NULL) { 925 if (mask == NULL) {
925 drm_free(buffer, buffer_size, DRM_MEM_BUFS); 926 drm_free(buffer, buffer_size, DRM_MEM_BUFS);
926 return DRM_ERR(ENOMEM); 927 return -ENOMEM;
927 } 928 }
928 if (DRM_COPY_FROM_USER(mask, depth->mask, mask_size)) { 929 if (DRM_COPY_FROM_USER(mask, depth->mask, mask_size)) {
929 drm_free(buffer, buffer_size, DRM_MEM_BUFS); 930 drm_free(buffer, buffer_size, DRM_MEM_BUFS);
930 drm_free(mask, mask_size, DRM_MEM_BUFS); 931 drm_free(mask, mask_size, DRM_MEM_BUFS);
931 return DRM_ERR(EFAULT); 932 return -EFAULT;
932 } 933 }
933 934
934 for (i = 0; i < count; i++, x++) { 935 for (i = 0; i < count; i++, x++) {
@@ -996,28 +997,28 @@ static int r128_cce_dispatch_write_pixels(struct drm_device * dev,
996 997
997 count = depth->n; 998 count = depth->n;
998 if (count > 4096 || count <= 0) 999 if (count > 4096 || count <= 0)
999 return DRM_ERR(EMSGSIZE); 1000 return -EMSGSIZE;
1000 1001
1001 xbuf_size = count * sizeof(*x); 1002 xbuf_size = count * sizeof(*x);
1002 ybuf_size = count * sizeof(*y); 1003 ybuf_size = count * sizeof(*y);
1003 x = drm_alloc(xbuf_size, DRM_MEM_BUFS); 1004 x = drm_alloc(xbuf_size, DRM_MEM_BUFS);
1004 if (x == NULL) { 1005 if (x == NULL) {
1005 return DRM_ERR(ENOMEM); 1006 return -ENOMEM;
1006 } 1007 }
1007 y = drm_alloc(ybuf_size, DRM_MEM_BUFS); 1008 y = drm_alloc(ybuf_size, DRM_MEM_BUFS);
1008 if (y == NULL) { 1009 if (y == NULL) {
1009 drm_free(x, xbuf_size, DRM_MEM_BUFS); 1010 drm_free(x, xbuf_size, DRM_MEM_BUFS);
1010 return DRM_ERR(ENOMEM); 1011 return -ENOMEM;
1011 } 1012 }
1012 if (DRM_COPY_FROM_USER(x, depth->x, xbuf_size)) { 1013 if (DRM_COPY_FROM_USER(x, depth->x, xbuf_size)) {
1013 drm_free(x, xbuf_size, DRM_MEM_BUFS); 1014 drm_free(x, xbuf_size, DRM_MEM_BUFS);
1014 drm_free(y, ybuf_size, DRM_MEM_BUFS); 1015 drm_free(y, ybuf_size, DRM_MEM_BUFS);
1015 return DRM_ERR(EFAULT); 1016 return -EFAULT;
1016 } 1017 }
1017 if (DRM_COPY_FROM_USER(y, depth->y, xbuf_size)) { 1018 if (DRM_COPY_FROM_USER(y, depth->y, xbuf_size)) {
1018 drm_free(x, xbuf_size, DRM_MEM_BUFS); 1019 drm_free(x, xbuf_size, DRM_MEM_BUFS);
1019 drm_free(y, ybuf_size, DRM_MEM_BUFS); 1020 drm_free(y, ybuf_size, DRM_MEM_BUFS);
1020 return DRM_ERR(EFAULT); 1021 return -EFAULT;
1021 } 1022 }
1022 1023
1023 buffer_size = depth->n * sizeof(u32); 1024 buffer_size = depth->n * sizeof(u32);
@@ -1025,13 +1026,13 @@ static int r128_cce_dispatch_write_pixels(struct drm_device * dev,
1025 if (buffer == NULL) { 1026 if (buffer == NULL) {
1026 drm_free(x, xbuf_size, DRM_MEM_BUFS); 1027 drm_free(x, xbuf_size, DRM_MEM_BUFS);
1027 drm_free(y, ybuf_size, DRM_MEM_BUFS); 1028 drm_free(y, ybuf_size, DRM_MEM_BUFS);
1028 return DRM_ERR(ENOMEM); 1029 return -ENOMEM;
1029 } 1030 }
1030 if (DRM_COPY_FROM_USER(buffer, depth->buffer, buffer_size)) { 1031 if (DRM_COPY_FROM_USER(buffer, depth->buffer, buffer_size)) {
1031 drm_free(x, xbuf_size, DRM_MEM_BUFS); 1032 drm_free(x, xbuf_size, DRM_MEM_BUFS);
1032 drm_free(y, ybuf_size, DRM_MEM_BUFS); 1033 drm_free(y, ybuf_size, DRM_MEM_BUFS);
1033 drm_free(buffer, buffer_size, DRM_MEM_BUFS); 1034 drm_free(buffer, buffer_size, DRM_MEM_BUFS);
1034 return DRM_ERR(EFAULT); 1035 return -EFAULT;
1035 } 1036 }
1036 1037
1037 if (depth->mask) { 1038 if (depth->mask) {
@@ -1041,14 +1042,14 @@ static int r128_cce_dispatch_write_pixels(struct drm_device * dev,
1041 drm_free(x, xbuf_size, DRM_MEM_BUFS); 1042 drm_free(x, xbuf_size, DRM_MEM_BUFS);
1042 drm_free(y, ybuf_size, DRM_MEM_BUFS); 1043 drm_free(y, ybuf_size, DRM_MEM_BUFS);
1043 drm_free(buffer, buffer_size, DRM_MEM_BUFS); 1044 drm_free(buffer, buffer_size, DRM_MEM_BUFS);
1044 return DRM_ERR(ENOMEM); 1045 return -ENOMEM;
1045 } 1046 }
1046 if (DRM_COPY_FROM_USER(mask, depth->mask, mask_size)) { 1047 if (DRM_COPY_FROM_USER(mask, depth->mask, mask_size)) {
1047 drm_free(x, xbuf_size, DRM_MEM_BUFS); 1048 drm_free(x, xbuf_size, DRM_MEM_BUFS);
1048 drm_free(y, ybuf_size, DRM_MEM_BUFS); 1049 drm_free(y, ybuf_size, DRM_MEM_BUFS);
1049 drm_free(buffer, buffer_size, DRM_MEM_BUFS); 1050 drm_free(buffer, buffer_size, DRM_MEM_BUFS);
1050 drm_free(mask, mask_size, DRM_MEM_BUFS); 1051 drm_free(mask, mask_size, DRM_MEM_BUFS);
1051 return DRM_ERR(EFAULT); 1052 return -EFAULT;
1052 } 1053 }
1053 1054
1054 for (i = 0; i < count; i++) { 1055 for (i = 0; i < count; i++) {
@@ -1115,13 +1116,13 @@ static int r128_cce_dispatch_read_span(struct drm_device * dev,
1115 1116
1116 count = depth->n; 1117 count = depth->n;
1117 if (count > 4096 || count <= 0) 1118 if (count > 4096 || count <= 0)
1118 return DRM_ERR(EMSGSIZE); 1119 return -EMSGSIZE;
1119 1120
1120 if (DRM_COPY_FROM_USER(&x, depth->x, sizeof(x))) { 1121 if (DRM_COPY_FROM_USER(&x, depth->x, sizeof(x))) {
1121 return DRM_ERR(EFAULT); 1122 return -EFAULT;
1122 } 1123 }
1123 if (DRM_COPY_FROM_USER(&y, depth->y, sizeof(y))) { 1124 if (DRM_COPY_FROM_USER(&y, depth->y, sizeof(y))) {
1124 return DRM_ERR(EFAULT); 1125 return -EFAULT;
1125 } 1126 }
1126 1127
1127 BEGIN_RING(7); 1128 BEGIN_RING(7);
@@ -1159,7 +1160,7 @@ static int r128_cce_dispatch_read_pixels(struct drm_device * dev,
1159 1160
1160 count = depth->n; 1161 count = depth->n;
1161 if (count > 4096 || count <= 0) 1162 if (count > 4096 || count <= 0)
1162 return DRM_ERR(EMSGSIZE); 1163 return -EMSGSIZE;
1163 1164
1164 if (count > dev_priv->depth_pitch) { 1165 if (count > dev_priv->depth_pitch) {
1165 count = dev_priv->depth_pitch; 1166 count = dev_priv->depth_pitch;
@@ -1169,22 +1170,22 @@ static int r128_cce_dispatch_read_pixels(struct drm_device * dev,
1169 ybuf_size = count * sizeof(*y); 1170 ybuf_size = count * sizeof(*y);
1170 x = drm_alloc(xbuf_size, DRM_MEM_BUFS); 1171 x = drm_alloc(xbuf_size, DRM_MEM_BUFS);
1171 if (x == NULL) { 1172 if (x == NULL) {
1172 return DRM_ERR(ENOMEM); 1173 return -ENOMEM;
1173 } 1174 }
1174 y = drm_alloc(ybuf_size, DRM_MEM_BUFS); 1175 y = drm_alloc(ybuf_size, DRM_MEM_BUFS);
1175 if (y == NULL) { 1176 if (y == NULL) {
1176 drm_free(x, xbuf_size, DRM_MEM_BUFS); 1177 drm_free(x, xbuf_size, DRM_MEM_BUFS);
1177 return DRM_ERR(ENOMEM); 1178 return -ENOMEM;
1178 } 1179 }
1179 if (DRM_COPY_FROM_USER(x, depth->x, xbuf_size)) { 1180 if (DRM_COPY_FROM_USER(x, depth->x, xbuf_size)) {
1180 drm_free(x, xbuf_size, DRM_MEM_BUFS); 1181 drm_free(x, xbuf_size, DRM_MEM_BUFS);
1181 drm_free(y, ybuf_size, DRM_MEM_BUFS); 1182 drm_free(y, ybuf_size, DRM_MEM_BUFS);
1182 return DRM_ERR(EFAULT); 1183 return -EFAULT;
1183 } 1184 }
1184 if (DRM_COPY_FROM_USER(y, depth->y, ybuf_size)) { 1185 if (DRM_COPY_FROM_USER(y, depth->y, ybuf_size)) {
1185 drm_free(x, xbuf_size, DRM_MEM_BUFS); 1186 drm_free(x, xbuf_size, DRM_MEM_BUFS);
1186 drm_free(y, ybuf_size, DRM_MEM_BUFS); 1187 drm_free(y, ybuf_size, DRM_MEM_BUFS);
1187 return DRM_ERR(EFAULT); 1188 return -EFAULT;
1188 } 1189 }
1189 1190
1190 for (i = 0; i < count; i++) { 1191 for (i = 0; i < count; i++) {
@@ -1241,25 +1242,21 @@ static void r128_cce_dispatch_stipple(struct drm_device * dev, u32 * stipple)
1241 * IOCTL functions 1242 * IOCTL functions
1242 */ 1243 */
1243 1244
1244static int r128_cce_clear(DRM_IOCTL_ARGS) 1245static int r128_cce_clear(struct drm_device *dev, void *data, struct drm_file *file_priv)
1245{ 1246{
1246 DRM_DEVICE;
1247 drm_r128_private_t *dev_priv = dev->dev_private; 1247 drm_r128_private_t *dev_priv = dev->dev_private;
1248 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv; 1248 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
1249 drm_r128_clear_t clear; 1249 drm_r128_clear_t *clear = data;
1250 DRM_DEBUG("\n"); 1250 DRM_DEBUG("\n");
1251 1251
1252 LOCK_TEST_WITH_RETURN(dev, filp); 1252 LOCK_TEST_WITH_RETURN(dev, file_priv);
1253
1254 DRM_COPY_FROM_USER_IOCTL(clear, (drm_r128_clear_t __user *) data,
1255 sizeof(clear));
1256 1253
1257 RING_SPACE_TEST_WITH_RETURN(dev_priv); 1254 RING_SPACE_TEST_WITH_RETURN(dev_priv);
1258 1255
1259 if (sarea_priv->nbox > R128_NR_SAREA_CLIPRECTS) 1256 if (sarea_priv->nbox > R128_NR_SAREA_CLIPRECTS)
1260 sarea_priv->nbox = R128_NR_SAREA_CLIPRECTS; 1257 sarea_priv->nbox = R128_NR_SAREA_CLIPRECTS;
1261 1258
1262 r128_cce_dispatch_clear(dev, &clear); 1259 r128_cce_dispatch_clear(dev, clear);
1263 COMMIT_RING(); 1260 COMMIT_RING();
1264 1261
1265 /* Make sure we restore the 3D state next time. 1262 /* Make sure we restore the 3D state next time.
@@ -1309,13 +1306,12 @@ static int r128_do_cleanup_pageflip(struct drm_device * dev)
1309 * They can & should be intermixed to support multiple 3d windows. 1306 * They can & should be intermixed to support multiple 3d windows.
1310 */ 1307 */
1311 1308
1312static int r128_cce_flip(DRM_IOCTL_ARGS) 1309static int r128_cce_flip(struct drm_device *dev, void *data, struct drm_file *file_priv)
1313{ 1310{
1314 DRM_DEVICE;
1315 drm_r128_private_t *dev_priv = dev->dev_private; 1311 drm_r128_private_t *dev_priv = dev->dev_private;
1316 DRM_DEBUG("%s\n", __FUNCTION__); 1312 DRM_DEBUG("%s\n", __FUNCTION__);
1317 1313
1318 LOCK_TEST_WITH_RETURN(dev, filp); 1314 LOCK_TEST_WITH_RETURN(dev, file_priv);
1319 1315
1320 RING_SPACE_TEST_WITH_RETURN(dev_priv); 1316 RING_SPACE_TEST_WITH_RETURN(dev_priv);
1321 1317
@@ -1328,14 +1324,13 @@ static int r128_cce_flip(DRM_IOCTL_ARGS)
1328 return 0; 1324 return 0;
1329} 1325}
1330 1326
1331static int r128_cce_swap(DRM_IOCTL_ARGS) 1327static int r128_cce_swap(struct drm_device *dev, void *data, struct drm_file *file_priv)
1332{ 1328{
1333 DRM_DEVICE;
1334 drm_r128_private_t *dev_priv = dev->dev_private; 1329 drm_r128_private_t *dev_priv = dev->dev_private;
1335 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv; 1330 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
1336 DRM_DEBUG("%s\n", __FUNCTION__); 1331 DRM_DEBUG("%s\n", __FUNCTION__);
1337 1332
1338 LOCK_TEST_WITH_RETURN(dev, filp); 1333 LOCK_TEST_WITH_RETURN(dev, file_priv);
1339 1334
1340 RING_SPACE_TEST_WITH_RETURN(dev_priv); 1335 RING_SPACE_TEST_WITH_RETURN(dev_priv);
1341 1336
@@ -1350,58 +1345,54 @@ static int r128_cce_swap(DRM_IOCTL_ARGS)
1350 return 0; 1345 return 0;
1351} 1346}
1352 1347
1353static int r128_cce_vertex(DRM_IOCTL_ARGS) 1348static int r128_cce_vertex(struct drm_device *dev, void *data, struct drm_file *file_priv)
1354{ 1349{
1355 DRM_DEVICE;
1356 drm_r128_private_t *dev_priv = dev->dev_private; 1350 drm_r128_private_t *dev_priv = dev->dev_private;
1357 struct drm_device_dma *dma = dev->dma; 1351 struct drm_device_dma *dma = dev->dma;
1358 struct drm_buf *buf; 1352 struct drm_buf *buf;
1359 drm_r128_buf_priv_t *buf_priv; 1353 drm_r128_buf_priv_t *buf_priv;
1360 drm_r128_vertex_t vertex; 1354 drm_r128_vertex_t *vertex = data;
1361 1355
1362 LOCK_TEST_WITH_RETURN(dev, filp); 1356 LOCK_TEST_WITH_RETURN(dev, file_priv);
1363 1357
1364 if (!dev_priv) { 1358 if (!dev_priv) {
1365 DRM_ERROR("%s called with no initialization\n", __FUNCTION__); 1359 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
1366 return DRM_ERR(EINVAL); 1360 return -EINVAL;
1367 } 1361 }
1368 1362
1369 DRM_COPY_FROM_USER_IOCTL(vertex, (drm_r128_vertex_t __user *) data,
1370 sizeof(vertex));
1371
1372 DRM_DEBUG("pid=%d index=%d count=%d discard=%d\n", 1363 DRM_DEBUG("pid=%d index=%d count=%d discard=%d\n",
1373 DRM_CURRENTPID, vertex.idx, vertex.count, vertex.discard); 1364 DRM_CURRENTPID, vertex->idx, vertex->count, vertex->discard);
1374 1365
1375 if (vertex.idx < 0 || vertex.idx >= dma->buf_count) { 1366 if (vertex->idx < 0 || vertex->idx >= dma->buf_count) {
1376 DRM_ERROR("buffer index %d (of %d max)\n", 1367 DRM_ERROR("buffer index %d (of %d max)\n",
1377 vertex.idx, dma->buf_count - 1); 1368 vertex->idx, dma->buf_count - 1);
1378 return DRM_ERR(EINVAL); 1369 return -EINVAL;
1379 } 1370 }
1380 if (vertex.prim < 0 || 1371 if (vertex->prim < 0 ||
1381 vertex.prim > R128_CCE_VC_CNTL_PRIM_TYPE_TRI_TYPE2) { 1372 vertex->prim > R128_CCE_VC_CNTL_PRIM_TYPE_TRI_TYPE2) {
1382 DRM_ERROR("buffer prim %d\n", vertex.prim); 1373 DRM_ERROR("buffer prim %d\n", vertex->prim);
1383 return DRM_ERR(EINVAL); 1374 return -EINVAL;
1384 } 1375 }
1385 1376
1386 RING_SPACE_TEST_WITH_RETURN(dev_priv); 1377 RING_SPACE_TEST_WITH_RETURN(dev_priv);
1387 VB_AGE_TEST_WITH_RETURN(dev_priv); 1378 VB_AGE_TEST_WITH_RETURN(dev_priv);
1388 1379
1389 buf = dma->buflist[vertex.idx]; 1380 buf = dma->buflist[vertex->idx];
1390 buf_priv = buf->dev_private; 1381 buf_priv = buf->dev_private;
1391 1382
1392 if (buf->filp != filp) { 1383 if (buf->file_priv != file_priv) {
1393 DRM_ERROR("process %d using buffer owned by %p\n", 1384 DRM_ERROR("process %d using buffer owned by %p\n",
1394 DRM_CURRENTPID, buf->filp); 1385 DRM_CURRENTPID, buf->file_priv);
1395 return DRM_ERR(EINVAL); 1386 return -EINVAL;
1396 } 1387 }
1397 if (buf->pending) { 1388 if (buf->pending) {
1398 DRM_ERROR("sending pending buffer %d\n", vertex.idx); 1389 DRM_ERROR("sending pending buffer %d\n", vertex->idx);
1399 return DRM_ERR(EINVAL); 1390 return -EINVAL;
1400 } 1391 }
1401 1392
1402 buf->used = vertex.count; 1393 buf->used = vertex->count;
1403 buf_priv->prim = vertex.prim; 1394 buf_priv->prim = vertex->prim;
1404 buf_priv->discard = vertex.discard; 1395 buf_priv->discard = vertex->discard;
1405 1396
1406 r128_cce_dispatch_vertex(dev, buf); 1397 r128_cce_dispatch_vertex(dev, buf);
1407 1398
@@ -1409,134 +1400,123 @@ static int r128_cce_vertex(DRM_IOCTL_ARGS)
1409 return 0; 1400 return 0;
1410} 1401}
1411 1402
1412static int r128_cce_indices(DRM_IOCTL_ARGS) 1403static int r128_cce_indices(struct drm_device *dev, void *data, struct drm_file *file_priv)
1413{ 1404{
1414 DRM_DEVICE;
1415 drm_r128_private_t *dev_priv = dev->dev_private; 1405 drm_r128_private_t *dev_priv = dev->dev_private;
1416 struct drm_device_dma *dma = dev->dma; 1406 struct drm_device_dma *dma = dev->dma;
1417 struct drm_buf *buf; 1407 struct drm_buf *buf;
1418 drm_r128_buf_priv_t *buf_priv; 1408 drm_r128_buf_priv_t *buf_priv;
1419 drm_r128_indices_t elts; 1409 drm_r128_indices_t *elts = data;
1420 int count; 1410 int count;
1421 1411
1422 LOCK_TEST_WITH_RETURN(dev, filp); 1412 LOCK_TEST_WITH_RETURN(dev, file_priv);
1423 1413
1424 if (!dev_priv) { 1414 if (!dev_priv) {
1425 DRM_ERROR("%s called with no initialization\n", __FUNCTION__); 1415 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
1426 return DRM_ERR(EINVAL); 1416 return -EINVAL;
1427 } 1417 }
1428 1418
1429 DRM_COPY_FROM_USER_IOCTL(elts, (drm_r128_indices_t __user *) data,
1430 sizeof(elts));
1431
1432 DRM_DEBUG("pid=%d buf=%d s=%d e=%d d=%d\n", DRM_CURRENTPID, 1419 DRM_DEBUG("pid=%d buf=%d s=%d e=%d d=%d\n", DRM_CURRENTPID,
1433 elts.idx, elts.start, elts.end, elts.discard); 1420 elts->idx, elts->start, elts->end, elts->discard);
1434 1421
1435 if (elts.idx < 0 || elts.idx >= dma->buf_count) { 1422 if (elts->idx < 0 || elts->idx >= dma->buf_count) {
1436 DRM_ERROR("buffer index %d (of %d max)\n", 1423 DRM_ERROR("buffer index %d (of %d max)\n",
1437 elts.idx, dma->buf_count - 1); 1424 elts->idx, dma->buf_count - 1);
1438 return DRM_ERR(EINVAL); 1425 return -EINVAL;
1439 } 1426 }
1440 if (elts.prim < 0 || elts.prim > R128_CCE_VC_CNTL_PRIM_TYPE_TRI_TYPE2) { 1427 if (elts->prim < 0 ||
1441 DRM_ERROR("buffer prim %d\n", elts.prim); 1428 elts->prim > R128_CCE_VC_CNTL_PRIM_TYPE_TRI_TYPE2) {
1442 return DRM_ERR(EINVAL); 1429 DRM_ERROR("buffer prim %d\n", elts->prim);
1430 return -EINVAL;
1443 } 1431 }
1444 1432
1445 RING_SPACE_TEST_WITH_RETURN(dev_priv); 1433 RING_SPACE_TEST_WITH_RETURN(dev_priv);
1446 VB_AGE_TEST_WITH_RETURN(dev_priv); 1434 VB_AGE_TEST_WITH_RETURN(dev_priv);
1447 1435
1448 buf = dma->buflist[elts.idx]; 1436 buf = dma->buflist[elts->idx];
1449 buf_priv = buf->dev_private; 1437 buf_priv = buf->dev_private;
1450 1438
1451 if (buf->filp != filp) { 1439 if (buf->file_priv != file_priv) {
1452 DRM_ERROR("process %d using buffer owned by %p\n", 1440 DRM_ERROR("process %d using buffer owned by %p\n",
1453 DRM_CURRENTPID, buf->filp); 1441 DRM_CURRENTPID, buf->file_priv);
1454 return DRM_ERR(EINVAL); 1442 return -EINVAL;
1455 } 1443 }
1456 if (buf->pending) { 1444 if (buf->pending) {
1457 DRM_ERROR("sending pending buffer %d\n", elts.idx); 1445 DRM_ERROR("sending pending buffer %d\n", elts->idx);
1458 return DRM_ERR(EINVAL); 1446 return -EINVAL;
1459 } 1447 }
1460 1448
1461 count = (elts.end - elts.start) / sizeof(u16); 1449 count = (elts->end - elts->start) / sizeof(u16);
1462 elts.start -= R128_INDEX_PRIM_OFFSET; 1450 elts->start -= R128_INDEX_PRIM_OFFSET;
1463 1451
1464 if (elts.start & 0x7) { 1452 if (elts->start & 0x7) {
1465 DRM_ERROR("misaligned buffer 0x%x\n", elts.start); 1453 DRM_ERROR("misaligned buffer 0x%x\n", elts->start);
1466 return DRM_ERR(EINVAL); 1454 return -EINVAL;
1467 } 1455 }
1468 if (elts.start < buf->used) { 1456 if (elts->start < buf->used) {
1469 DRM_ERROR("no header 0x%x - 0x%x\n", elts.start, buf->used); 1457 DRM_ERROR("no header 0x%x - 0x%x\n", elts->start, buf->used);
1470 return DRM_ERR(EINVAL); 1458 return -EINVAL;
1471 } 1459 }
1472 1460
1473 buf->used = elts.end; 1461 buf->used = elts->end;
1474 buf_priv->prim = elts.prim; 1462 buf_priv->prim = elts->prim;
1475 buf_priv->discard = elts.discard; 1463 buf_priv->discard = elts->discard;
1476 1464
1477 r128_cce_dispatch_indices(dev, buf, elts.start, elts.end, count); 1465 r128_cce_dispatch_indices(dev, buf, elts->start, elts->end, count);
1478 1466
1479 COMMIT_RING(); 1467 COMMIT_RING();
1480 return 0; 1468 return 0;
1481} 1469}
1482 1470
1483static int r128_cce_blit(DRM_IOCTL_ARGS) 1471static int r128_cce_blit(struct drm_device *dev, void *data, struct drm_file *file_priv)
1484{ 1472{
1485 DRM_DEVICE;
1486 struct drm_device_dma *dma = dev->dma; 1473 struct drm_device_dma *dma = dev->dma;
1487 drm_r128_private_t *dev_priv = dev->dev_private; 1474 drm_r128_private_t *dev_priv = dev->dev_private;
1488 drm_r128_blit_t blit; 1475 drm_r128_blit_t *blit = data;
1489 int ret; 1476 int ret;
1490 1477
1491 LOCK_TEST_WITH_RETURN(dev, filp); 1478 LOCK_TEST_WITH_RETURN(dev, file_priv);
1492
1493 DRM_COPY_FROM_USER_IOCTL(blit, (drm_r128_blit_t __user *) data,
1494 sizeof(blit));
1495 1479
1496 DRM_DEBUG("pid=%d index=%d\n", DRM_CURRENTPID, blit.idx); 1480 DRM_DEBUG("pid=%d index=%d\n", DRM_CURRENTPID, blit->idx);
1497 1481
1498 if (blit.idx < 0 || blit.idx >= dma->buf_count) { 1482 if (blit->idx < 0 || blit->idx >= dma->buf_count) {
1499 DRM_ERROR("buffer index %d (of %d max)\n", 1483 DRM_ERROR("buffer index %d (of %d max)\n",
1500 blit.idx, dma->buf_count - 1); 1484 blit->idx, dma->buf_count - 1);
1501 return DRM_ERR(EINVAL); 1485 return -EINVAL;
1502 } 1486 }
1503 1487
1504 RING_SPACE_TEST_WITH_RETURN(dev_priv); 1488 RING_SPACE_TEST_WITH_RETURN(dev_priv);
1505 VB_AGE_TEST_WITH_RETURN(dev_priv); 1489 VB_AGE_TEST_WITH_RETURN(dev_priv);
1506 1490
1507 ret = r128_cce_dispatch_blit(filp, dev, &blit); 1491 ret = r128_cce_dispatch_blit(dev, file_priv, blit);
1508 1492
1509 COMMIT_RING(); 1493 COMMIT_RING();
1510 return ret; 1494 return ret;
1511} 1495}
1512 1496
1513static int r128_cce_depth(DRM_IOCTL_ARGS) 1497static int r128_cce_depth(struct drm_device *dev, void *data, struct drm_file *file_priv)
1514{ 1498{
1515 DRM_DEVICE;
1516 drm_r128_private_t *dev_priv = dev->dev_private; 1499 drm_r128_private_t *dev_priv = dev->dev_private;
1517 drm_r128_depth_t depth; 1500 drm_r128_depth_t *depth = data;
1518 int ret; 1501 int ret;
1519 1502
1520 LOCK_TEST_WITH_RETURN(dev, filp); 1503 LOCK_TEST_WITH_RETURN(dev, file_priv);
1521
1522 DRM_COPY_FROM_USER_IOCTL(depth, (drm_r128_depth_t __user *) data,
1523 sizeof(depth));
1524 1504
1525 RING_SPACE_TEST_WITH_RETURN(dev_priv); 1505 RING_SPACE_TEST_WITH_RETURN(dev_priv);
1526 1506
1527 ret = DRM_ERR(EINVAL); 1507 ret = -EINVAL;
1528 switch (depth.func) { 1508 switch (depth->func) {
1529 case R128_WRITE_SPAN: 1509 case R128_WRITE_SPAN:
1530 ret = r128_cce_dispatch_write_span(dev, &depth); 1510 ret = r128_cce_dispatch_write_span(dev, depth);
1531 break; 1511 break;
1532 case R128_WRITE_PIXELS: 1512 case R128_WRITE_PIXELS:
1533 ret = r128_cce_dispatch_write_pixels(dev, &depth); 1513 ret = r128_cce_dispatch_write_pixels(dev, depth);
1534 break; 1514 break;
1535 case R128_READ_SPAN: 1515 case R128_READ_SPAN:
1536 ret = r128_cce_dispatch_read_span(dev, &depth); 1516 ret = r128_cce_dispatch_read_span(dev, depth);
1537 break; 1517 break;
1538 case R128_READ_PIXELS: 1518 case R128_READ_PIXELS:
1539 ret = r128_cce_dispatch_read_pixels(dev, &depth); 1519 ret = r128_cce_dispatch_read_pixels(dev, depth);
1540 break; 1520 break;
1541 } 1521 }
1542 1522
@@ -1544,20 +1524,16 @@ static int r128_cce_depth(DRM_IOCTL_ARGS)
1544 return ret; 1524 return ret;
1545} 1525}
1546 1526
1547static int r128_cce_stipple(DRM_IOCTL_ARGS) 1527static int r128_cce_stipple(struct drm_device *dev, void *data, struct drm_file *file_priv)
1548{ 1528{
1549 DRM_DEVICE;
1550 drm_r128_private_t *dev_priv = dev->dev_private; 1529 drm_r128_private_t *dev_priv = dev->dev_private;
1551 drm_r128_stipple_t stipple; 1530 drm_r128_stipple_t *stipple = data;
1552 u32 mask[32]; 1531 u32 mask[32];
1553 1532
1554 LOCK_TEST_WITH_RETURN(dev, filp); 1533 LOCK_TEST_WITH_RETURN(dev, file_priv);
1555
1556 DRM_COPY_FROM_USER_IOCTL(stipple, (drm_r128_stipple_t __user *) data,
1557 sizeof(stipple));
1558 1534
1559 if (DRM_COPY_FROM_USER(&mask, stipple.mask, 32 * sizeof(u32))) 1535 if (DRM_COPY_FROM_USER(&mask, stipple->mask, 32 * sizeof(u32)))
1560 return DRM_ERR(EFAULT); 1536 return -EFAULT;
1561 1537
1562 RING_SPACE_TEST_WITH_RETURN(dev_priv); 1538 RING_SPACE_TEST_WITH_RETURN(dev_priv);
1563 1539
@@ -1567,61 +1543,58 @@ static int r128_cce_stipple(DRM_IOCTL_ARGS)
1567 return 0; 1543 return 0;
1568} 1544}
1569 1545
1570static int r128_cce_indirect(DRM_IOCTL_ARGS) 1546static int r128_cce_indirect(struct drm_device *dev, void *data, struct drm_file *file_priv)
1571{ 1547{
1572 DRM_DEVICE;
1573 drm_r128_private_t *dev_priv = dev->dev_private; 1548 drm_r128_private_t *dev_priv = dev->dev_private;
1574 struct drm_device_dma *dma = dev->dma; 1549 struct drm_device_dma *dma = dev->dma;
1575 struct drm_buf *buf; 1550 struct drm_buf *buf;
1576 drm_r128_buf_priv_t *buf_priv; 1551 drm_r128_buf_priv_t *buf_priv;
1577 drm_r128_indirect_t indirect; 1552 drm_r128_indirect_t *indirect = data;
1578#if 0 1553#if 0
1579 RING_LOCALS; 1554 RING_LOCALS;
1580#endif 1555#endif
1581 1556
1582 LOCK_TEST_WITH_RETURN(dev, filp); 1557 LOCK_TEST_WITH_RETURN(dev, file_priv);
1583 1558
1584 if (!dev_priv) { 1559 if (!dev_priv) {
1585 DRM_ERROR("%s called with no initialization\n", __FUNCTION__); 1560 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
1586 return DRM_ERR(EINVAL); 1561 return -EINVAL;
1587 } 1562 }
1588 1563
1589 DRM_COPY_FROM_USER_IOCTL(indirect, (drm_r128_indirect_t __user *) data,
1590 sizeof(indirect));
1591
1592 DRM_DEBUG("indirect: idx=%d s=%d e=%d d=%d\n", 1564 DRM_DEBUG("indirect: idx=%d s=%d e=%d d=%d\n",
1593 indirect.idx, indirect.start, indirect.end, indirect.discard); 1565 indirect->idx, indirect->start, indirect->end,
1566 indirect->discard);
1594 1567
1595 if (indirect.idx < 0 || indirect.idx >= dma->buf_count) { 1568 if (indirect->idx < 0 || indirect->idx >= dma->buf_count) {
1596 DRM_ERROR("buffer index %d (of %d max)\n", 1569 DRM_ERROR("buffer index %d (of %d max)\n",
1597 indirect.idx, dma->buf_count - 1); 1570 indirect->idx, dma->buf_count - 1);
1598 return DRM_ERR(EINVAL); 1571 return -EINVAL;
1599 } 1572 }
1600 1573
1601 buf = dma->buflist[indirect.idx]; 1574 buf = dma->buflist[indirect->idx];
1602 buf_priv = buf->dev_private; 1575 buf_priv = buf->dev_private;
1603 1576
1604 if (buf->filp != filp) { 1577 if (buf->file_priv != file_priv) {
1605 DRM_ERROR("process %d using buffer owned by %p\n", 1578 DRM_ERROR("process %d using buffer owned by %p\n",
1606 DRM_CURRENTPID, buf->filp); 1579 DRM_CURRENTPID, buf->file_priv);
1607 return DRM_ERR(EINVAL); 1580 return -EINVAL;
1608 } 1581 }
1609 if (buf->pending) { 1582 if (buf->pending) {
1610 DRM_ERROR("sending pending buffer %d\n", indirect.idx); 1583 DRM_ERROR("sending pending buffer %d\n", indirect->idx);
1611 return DRM_ERR(EINVAL); 1584 return -EINVAL;
1612 } 1585 }
1613 1586
1614 if (indirect.start < buf->used) { 1587 if (indirect->start < buf->used) {
1615 DRM_ERROR("reusing indirect: start=0x%x actual=0x%x\n", 1588 DRM_ERROR("reusing indirect: start=0x%x actual=0x%x\n",
1616 indirect.start, buf->used); 1589 indirect->start, buf->used);
1617 return DRM_ERR(EINVAL); 1590 return -EINVAL;
1618 } 1591 }
1619 1592
1620 RING_SPACE_TEST_WITH_RETURN(dev_priv); 1593 RING_SPACE_TEST_WITH_RETURN(dev_priv);
1621 VB_AGE_TEST_WITH_RETURN(dev_priv); 1594 VB_AGE_TEST_WITH_RETURN(dev_priv);
1622 1595
1623 buf->used = indirect.end; 1596 buf->used = indirect->end;
1624 buf_priv->discard = indirect.discard; 1597 buf_priv->discard = indirect->discard;
1625 1598
1626#if 0 1599#if 0
1627 /* Wait for the 3D stream to idle before the indirect buffer 1600 /* Wait for the 3D stream to idle before the indirect buffer
@@ -1636,46 +1609,42 @@ static int r128_cce_indirect(DRM_IOCTL_ARGS)
1636 * X server. This is insecure and is thus only available to 1609 * X server. This is insecure and is thus only available to
1637 * privileged clients. 1610 * privileged clients.
1638 */ 1611 */
1639 r128_cce_dispatch_indirect(dev, buf, indirect.start, indirect.end); 1612 r128_cce_dispatch_indirect(dev, buf, indirect->start, indirect->end);
1640 1613
1641 COMMIT_RING(); 1614 COMMIT_RING();
1642 return 0; 1615 return 0;
1643} 1616}
1644 1617
1645static int r128_getparam(DRM_IOCTL_ARGS) 1618static int r128_getparam(struct drm_device *dev, void *data, struct drm_file *file_priv)
1646{ 1619{
1647 DRM_DEVICE;
1648 drm_r128_private_t *dev_priv = dev->dev_private; 1620 drm_r128_private_t *dev_priv = dev->dev_private;
1649 drm_r128_getparam_t param; 1621 drm_r128_getparam_t *param = data;
1650 int value; 1622 int value;
1651 1623
1652 if (!dev_priv) { 1624 if (!dev_priv) {
1653 DRM_ERROR("%s called with no initialization\n", __FUNCTION__); 1625 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
1654 return DRM_ERR(EINVAL); 1626 return -EINVAL;
1655 } 1627 }
1656 1628
1657 DRM_COPY_FROM_USER_IOCTL(param, (drm_r128_getparam_t __user *) data,
1658 sizeof(param));
1659
1660 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID); 1629 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
1661 1630
1662 switch (param.param) { 1631 switch (param->param) {
1663 case R128_PARAM_IRQ_NR: 1632 case R128_PARAM_IRQ_NR:
1664 value = dev->irq; 1633 value = dev->irq;
1665 break; 1634 break;
1666 default: 1635 default:
1667 return DRM_ERR(EINVAL); 1636 return -EINVAL;
1668 } 1637 }
1669 1638
1670 if (DRM_COPY_TO_USER(param.value, &value, sizeof(int))) { 1639 if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) {
1671 DRM_ERROR("copy_to_user\n"); 1640 DRM_ERROR("copy_to_user\n");
1672 return DRM_ERR(EFAULT); 1641 return -EFAULT;
1673 } 1642 }
1674 1643
1675 return 0; 1644 return 0;
1676} 1645}
1677 1646
1678void r128_driver_preclose(struct drm_device * dev, DRMFILE filp) 1647void r128_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
1679{ 1648{
1680 if (dev->dev_private) { 1649 if (dev->dev_private) {
1681 drm_r128_private_t *dev_priv = dev->dev_private; 1650 drm_r128_private_t *dev_priv = dev->dev_private;
@@ -1690,24 +1659,24 @@ void r128_driver_lastclose(struct drm_device * dev)
1690 r128_do_cleanup_cce(dev); 1659 r128_do_cleanup_cce(dev);
1691} 1660}
1692 1661
1693drm_ioctl_desc_t r128_ioctls[] = { 1662struct drm_ioctl_desc r128_ioctls[] = {
1694 [DRM_IOCTL_NR(DRM_R128_INIT)] = {r128_cce_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 1663 DRM_IOCTL_DEF(DRM_R128_INIT, r128_cce_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1695 [DRM_IOCTL_NR(DRM_R128_CCE_START)] = {r128_cce_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 1664 DRM_IOCTL_DEF(DRM_R128_CCE_START, r128_cce_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1696 [DRM_IOCTL_NR(DRM_R128_CCE_STOP)] = {r128_cce_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 1665 DRM_IOCTL_DEF(DRM_R128_CCE_STOP, r128_cce_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1697 [DRM_IOCTL_NR(DRM_R128_CCE_RESET)] = {r128_cce_reset, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 1666 DRM_IOCTL_DEF(DRM_R128_CCE_RESET, r128_cce_reset, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1698 [DRM_IOCTL_NR(DRM_R128_CCE_IDLE)] = {r128_cce_idle, DRM_AUTH}, 1667 DRM_IOCTL_DEF(DRM_R128_CCE_IDLE, r128_cce_idle, DRM_AUTH),
1699 [DRM_IOCTL_NR(DRM_R128_RESET)] = {r128_engine_reset, DRM_AUTH}, 1668 DRM_IOCTL_DEF(DRM_R128_RESET, r128_engine_reset, DRM_AUTH),
1700 [DRM_IOCTL_NR(DRM_R128_FULLSCREEN)] = {r128_fullscreen, DRM_AUTH}, 1669 DRM_IOCTL_DEF(DRM_R128_FULLSCREEN, r128_fullscreen, DRM_AUTH),
1701 [DRM_IOCTL_NR(DRM_R128_SWAP)] = {r128_cce_swap, DRM_AUTH}, 1670 DRM_IOCTL_DEF(DRM_R128_SWAP, r128_cce_swap, DRM_AUTH),
1702 [DRM_IOCTL_NR(DRM_R128_FLIP)] = {r128_cce_flip, DRM_AUTH}, 1671 DRM_IOCTL_DEF(DRM_R128_FLIP, r128_cce_flip, DRM_AUTH),
1703 [DRM_IOCTL_NR(DRM_R128_CLEAR)] = {r128_cce_clear, DRM_AUTH}, 1672 DRM_IOCTL_DEF(DRM_R128_CLEAR, r128_cce_clear, DRM_AUTH),
1704 [DRM_IOCTL_NR(DRM_R128_VERTEX)] = {r128_cce_vertex, DRM_AUTH}, 1673 DRM_IOCTL_DEF(DRM_R128_VERTEX, r128_cce_vertex, DRM_AUTH),
1705 [DRM_IOCTL_NR(DRM_R128_INDICES)] = {r128_cce_indices, DRM_AUTH}, 1674 DRM_IOCTL_DEF(DRM_R128_INDICES, r128_cce_indices, DRM_AUTH),
1706 [DRM_IOCTL_NR(DRM_R128_BLIT)] = {r128_cce_blit, DRM_AUTH}, 1675 DRM_IOCTL_DEF(DRM_R128_BLIT, r128_cce_blit, DRM_AUTH),
1707 [DRM_IOCTL_NR(DRM_R128_DEPTH)] = {r128_cce_depth, DRM_AUTH}, 1676 DRM_IOCTL_DEF(DRM_R128_DEPTH, r128_cce_depth, DRM_AUTH),
1708 [DRM_IOCTL_NR(DRM_R128_STIPPLE)] = {r128_cce_stipple, DRM_AUTH}, 1677 DRM_IOCTL_DEF(DRM_R128_STIPPLE, r128_cce_stipple, DRM_AUTH),
1709 [DRM_IOCTL_NR(DRM_R128_INDIRECT)] = {r128_cce_indirect, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 1678 DRM_IOCTL_DEF(DRM_R128_INDIRECT, r128_cce_indirect, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1710 [DRM_IOCTL_NR(DRM_R128_GETPARAM)] = {r128_getparam, DRM_AUTH}, 1679 DRM_IOCTL_DEF(DRM_R128_GETPARAM, r128_getparam, DRM_AUTH),
1711}; 1680};
1712 1681
1713int r128_max_ioctl = DRM_ARRAY_SIZE(r128_ioctls); 1682int r128_max_ioctl = DRM_ARRAY_SIZE(r128_ioctls);
diff --git a/drivers/char/drm/r300_cmdbuf.c b/drivers/char/drm/r300_cmdbuf.c
index 4e5aca6ba59a..59b2944811c5 100644
--- a/drivers/char/drm/r300_cmdbuf.c
+++ b/drivers/char/drm/r300_cmdbuf.c
@@ -74,7 +74,7 @@ static int r300_emit_cliprects(drm_radeon_private_t *dev_priv,
74 if (DRM_COPY_FROM_USER_UNCHECKED 74 if (DRM_COPY_FROM_USER_UNCHECKED
75 (&box, &cmdbuf->boxes[n + i], sizeof(box))) { 75 (&box, &cmdbuf->boxes[n + i], sizeof(box))) {
76 DRM_ERROR("copy cliprect faulted\n"); 76 DRM_ERROR("copy cliprect faulted\n");
77 return DRM_ERR(EFAULT); 77 return -EFAULT;
78 } 78 }
79 79
80 box.x1 = 80 box.x1 =
@@ -263,7 +263,7 @@ static __inline__ int r300_emit_carefully_checked_packet0(drm_radeon_private_t *
263 DRM_ERROR 263 DRM_ERROR
264 ("Cannot emit more than 64 values at a time (reg=%04x sz=%d)\n", 264 ("Cannot emit more than 64 values at a time (reg=%04x sz=%d)\n",
265 reg, sz); 265 reg, sz);
266 return DRM_ERR(EINVAL); 266 return -EINVAL;
267 } 267 }
268 for (i = 0; i < sz; i++) { 268 for (i = 0; i < sz; i++) {
269 values[i] = ((int *)cmdbuf->buf)[i]; 269 values[i] = ((int *)cmdbuf->buf)[i];
@@ -275,13 +275,13 @@ static __inline__ int r300_emit_carefully_checked_packet0(drm_radeon_private_t *
275 DRM_ERROR 275 DRM_ERROR
276 ("Offset failed range check (reg=%04x sz=%d)\n", 276 ("Offset failed range check (reg=%04x sz=%d)\n",
277 reg, sz); 277 reg, sz);
278 return DRM_ERR(EINVAL); 278 return -EINVAL;
279 } 279 }
280 break; 280 break;
281 default: 281 default:
282 DRM_ERROR("Register %04x failed check as flag=%02x\n", 282 DRM_ERROR("Register %04x failed check as flag=%02x\n",
283 reg + i * 4, r300_reg_flags[(reg >> 2) + i]); 283 reg + i * 4, r300_reg_flags[(reg >> 2) + i]);
284 return DRM_ERR(EINVAL); 284 return -EINVAL;
285 } 285 }
286 } 286 }
287 287
@@ -317,12 +317,12 @@ static __inline__ int r300_emit_packet0(drm_radeon_private_t *dev_priv,
317 return 0; 317 return 0;
318 318
319 if (sz * 4 > cmdbuf->bufsz) 319 if (sz * 4 > cmdbuf->bufsz)
320 return DRM_ERR(EINVAL); 320 return -EINVAL;
321 321
322 if (reg + sz * 4 >= 0x10000) { 322 if (reg + sz * 4 >= 0x10000) {
323 DRM_ERROR("No such registers in hardware reg=%04x sz=%d\n", reg, 323 DRM_ERROR("No such registers in hardware reg=%04x sz=%d\n", reg,
324 sz); 324 sz);
325 return DRM_ERR(EINVAL); 325 return -EINVAL;
326 } 326 }
327 327
328 if (r300_check_range(reg, sz)) { 328 if (r300_check_range(reg, sz)) {
@@ -362,7 +362,7 @@ static __inline__ int r300_emit_vpu(drm_radeon_private_t *dev_priv,
362 if (!sz) 362 if (!sz)
363 return 0; 363 return 0;
364 if (sz * 16 > cmdbuf->bufsz) 364 if (sz * 16 > cmdbuf->bufsz)
365 return DRM_ERR(EINVAL); 365 return -EINVAL;
366 366
367 BEGIN_RING(5 + sz * 4); 367 BEGIN_RING(5 + sz * 4);
368 /* Wait for VAP to come to senses.. */ 368 /* Wait for VAP to come to senses.. */
@@ -391,7 +391,7 @@ static __inline__ int r300_emit_clear(drm_radeon_private_t *dev_priv,
391 RING_LOCALS; 391 RING_LOCALS;
392 392
393 if (8 * 4 > cmdbuf->bufsz) 393 if (8 * 4 > cmdbuf->bufsz)
394 return DRM_ERR(EINVAL); 394 return -EINVAL;
395 395
396 BEGIN_RING(10); 396 BEGIN_RING(10);
397 OUT_RING(CP_PACKET3(R200_3D_DRAW_IMMD_2, 8)); 397 OUT_RING(CP_PACKET3(R200_3D_DRAW_IMMD_2, 8));
@@ -421,7 +421,7 @@ static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t *dev_priv,
421 if ((count + 1) > MAX_ARRAY_PACKET) { 421 if ((count + 1) > MAX_ARRAY_PACKET) {
422 DRM_ERROR("Too large payload in 3D_LOAD_VBPNTR (count=%d)\n", 422 DRM_ERROR("Too large payload in 3D_LOAD_VBPNTR (count=%d)\n",
423 count); 423 count);
424 return DRM_ERR(EINVAL); 424 return -EINVAL;
425 } 425 }
426 memset(payload, 0, MAX_ARRAY_PACKET * 4); 426 memset(payload, 0, MAX_ARRAY_PACKET * 4);
427 memcpy(payload, cmdbuf->buf + 4, (count + 1) * 4); 427 memcpy(payload, cmdbuf->buf + 4, (count + 1) * 4);
@@ -437,7 +437,7 @@ static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t *dev_priv,
437 DRM_ERROR 437 DRM_ERROR
438 ("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n", 438 ("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n",
439 k, i); 439 k, i);
440 return DRM_ERR(EINVAL); 440 return -EINVAL;
441 } 441 }
442 k++; 442 k++;
443 i++; 443 i++;
@@ -448,7 +448,7 @@ static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t *dev_priv,
448 DRM_ERROR 448 DRM_ERROR
449 ("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n", 449 ("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n",
450 k, i); 450 k, i);
451 return DRM_ERR(EINVAL); 451 return -EINVAL;
452 } 452 }
453 k++; 453 k++;
454 i++; 454 i++;
@@ -458,7 +458,7 @@ static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t *dev_priv,
458 DRM_ERROR 458 DRM_ERROR
459 ("Malformed 3D_LOAD_VBPNTR packet (k=%d i=%d narrays=%d count+1=%d).\n", 459 ("Malformed 3D_LOAD_VBPNTR packet (k=%d i=%d narrays=%d count+1=%d).\n",
460 k, i, narrays, count + 1); 460 k, i, narrays, count + 1);
461 return DRM_ERR(EINVAL); 461 return -EINVAL;
462 } 462 }
463 463
464 /* all clear, output packet */ 464 /* all clear, output packet */
@@ -492,7 +492,7 @@ static __inline__ int r300_emit_bitblt_multi(drm_radeon_private_t *dev_priv,
492 ret = !radeon_check_offset(dev_priv, offset); 492 ret = !radeon_check_offset(dev_priv, offset);
493 if (ret) { 493 if (ret) {
494 DRM_ERROR("Invalid bitblt first offset is %08X\n", offset); 494 DRM_ERROR("Invalid bitblt first offset is %08X\n", offset);
495 return DRM_ERR(EINVAL); 495 return -EINVAL;
496 } 496 }
497 } 497 }
498 498
@@ -502,7 +502,7 @@ static __inline__ int r300_emit_bitblt_multi(drm_radeon_private_t *dev_priv,
502 ret = !radeon_check_offset(dev_priv, offset); 502 ret = !radeon_check_offset(dev_priv, offset);
503 if (ret) { 503 if (ret) {
504 DRM_ERROR("Invalid bitblt second offset is %08X\n", offset); 504 DRM_ERROR("Invalid bitblt second offset is %08X\n", offset);
505 return DRM_ERR(EINVAL); 505 return -EINVAL;
506 } 506 }
507 507
508 } 508 }
@@ -530,12 +530,12 @@ static __inline__ int r300_emit_indx_buffer(drm_radeon_private_t *dev_priv,
530 530
531 if ((cmd[1] & 0x8000ffff) != 0x80000810) { 531 if ((cmd[1] & 0x8000ffff) != 0x80000810) {
532 DRM_ERROR("Invalid indx_buffer reg address %08X\n", cmd[1]); 532 DRM_ERROR("Invalid indx_buffer reg address %08X\n", cmd[1]);
533 return DRM_ERR(EINVAL); 533 return -EINVAL;
534 } 534 }
535 ret = !radeon_check_offset(dev_priv, cmd[2]); 535 ret = !radeon_check_offset(dev_priv, cmd[2]);
536 if (ret) { 536 if (ret) {
537 DRM_ERROR("Invalid indx_buffer offset is %08X\n", cmd[2]); 537 DRM_ERROR("Invalid indx_buffer offset is %08X\n", cmd[2]);
538 return DRM_ERR(EINVAL); 538 return -EINVAL;
539 } 539 }
540 540
541 BEGIN_RING(count+2); 541 BEGIN_RING(count+2);
@@ -557,7 +557,7 @@ static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t *dev_priv,
557 RING_LOCALS; 557 RING_LOCALS;
558 558
559 if (4 > cmdbuf->bufsz) 559 if (4 > cmdbuf->bufsz)
560 return DRM_ERR(EINVAL); 560 return -EINVAL;
561 561
562 /* Fixme !! This simply emits a packet without much checking. 562 /* Fixme !! This simply emits a packet without much checking.
563 We need to be smarter. */ 563 We need to be smarter. */
@@ -568,7 +568,7 @@ static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t *dev_priv,
568 /* Is it packet 3 ? */ 568 /* Is it packet 3 ? */
569 if ((header >> 30) != 0x3) { 569 if ((header >> 30) != 0x3) {
570 DRM_ERROR("Not a packet3 header (0x%08x)\n", header); 570 DRM_ERROR("Not a packet3 header (0x%08x)\n", header);
571 return DRM_ERR(EINVAL); 571 return -EINVAL;
572 } 572 }
573 573
574 count = (header >> 16) & 0x3fff; 574 count = (header >> 16) & 0x3fff;
@@ -578,7 +578,7 @@ static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t *dev_priv,
578 DRM_ERROR 578 DRM_ERROR
579 ("Expected packet3 of length %d but have only %d bytes left\n", 579 ("Expected packet3 of length %d but have only %d bytes left\n",
580 (count + 2) * 4, cmdbuf->bufsz); 580 (count + 2) * 4, cmdbuf->bufsz);
581 return DRM_ERR(EINVAL); 581 return -EINVAL;
582 } 582 }
583 583
584 /* Is it a packet type we know about ? */ 584 /* Is it a packet type we know about ? */
@@ -600,7 +600,7 @@ static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t *dev_priv,
600 break; 600 break;
601 default: 601 default:
602 DRM_ERROR("Unknown packet3 header (0x%08x)\n", header); 602 DRM_ERROR("Unknown packet3 header (0x%08x)\n", header);
603 return DRM_ERR(EINVAL); 603 return -EINVAL;
604 } 604 }
605 605
606 BEGIN_RING(count + 2); 606 BEGIN_RING(count + 2);
@@ -664,7 +664,7 @@ static __inline__ int r300_emit_packet3(drm_radeon_private_t *dev_priv,
664 DRM_ERROR("bad packet3 type %i at %p\n", 664 DRM_ERROR("bad packet3 type %i at %p\n",
665 header.packet3.packet, 665 header.packet3.packet,
666 cmdbuf->buf - sizeof(header)); 666 cmdbuf->buf - sizeof(header));
667 return DRM_ERR(EINVAL); 667 return -EINVAL;
668 } 668 }
669 669
670 n += R300_SIMULTANEOUS_CLIPRECTS; 670 n += R300_SIMULTANEOUS_CLIPRECTS;
@@ -726,11 +726,11 @@ static int r300_scratch(drm_radeon_private_t *dev_priv,
726 726
727 if (cmdbuf->bufsz < 727 if (cmdbuf->bufsz <
728 (sizeof(u64) + header.scratch.n_bufs * sizeof(buf_idx))) { 728 (sizeof(u64) + header.scratch.n_bufs * sizeof(buf_idx))) {
729 return DRM_ERR(EINVAL); 729 return -EINVAL;
730 } 730 }
731 731
732 if (header.scratch.reg >= 5) { 732 if (header.scratch.reg >= 5) {
733 return DRM_ERR(EINVAL); 733 return -EINVAL;
734 } 734 }
735 735
736 dev_priv->scratch_ages[header.scratch.reg]++; 736 dev_priv->scratch_ages[header.scratch.reg]++;
@@ -745,21 +745,21 @@ static int r300_scratch(drm_radeon_private_t *dev_priv,
745 buf_idx *= 2; /* 8 bytes per buf */ 745 buf_idx *= 2; /* 8 bytes per buf */
746 746
747 if (DRM_COPY_TO_USER(ref_age_base + buf_idx, &dev_priv->scratch_ages[header.scratch.reg], sizeof(u32))) { 747 if (DRM_COPY_TO_USER(ref_age_base + buf_idx, &dev_priv->scratch_ages[header.scratch.reg], sizeof(u32))) {
748 return DRM_ERR(EINVAL); 748 return -EINVAL;
749 } 749 }
750 750
751 if (DRM_COPY_FROM_USER(&h_pending, ref_age_base + buf_idx + 1, sizeof(u32))) { 751 if (DRM_COPY_FROM_USER(&h_pending, ref_age_base + buf_idx + 1, sizeof(u32))) {
752 return DRM_ERR(EINVAL); 752 return -EINVAL;
753 } 753 }
754 754
755 if (h_pending == 0) { 755 if (h_pending == 0) {
756 return DRM_ERR(EINVAL); 756 return -EINVAL;
757 } 757 }
758 758
759 h_pending--; 759 h_pending--;
760 760
761 if (DRM_COPY_TO_USER(ref_age_base + buf_idx + 1, &h_pending, sizeof(u32))) { 761 if (DRM_COPY_TO_USER(ref_age_base + buf_idx + 1, &h_pending, sizeof(u32))) {
762 return DRM_ERR(EINVAL); 762 return -EINVAL;
763 } 763 }
764 764
765 cmdbuf->buf += sizeof(buf_idx); 765 cmdbuf->buf += sizeof(buf_idx);
@@ -780,8 +780,7 @@ static int r300_scratch(drm_radeon_private_t *dev_priv,
780 * Called by the ioctl handler function radeon_cp_cmdbuf. 780 * Called by the ioctl handler function radeon_cp_cmdbuf.
781 */ 781 */
782int r300_do_cp_cmdbuf(struct drm_device *dev, 782int r300_do_cp_cmdbuf(struct drm_device *dev,
783 DRMFILE filp, 783 struct drm_file *file_priv,
784 struct drm_file *filp_priv,
785 drm_radeon_kcmd_buffer_t *cmdbuf) 784 drm_radeon_kcmd_buffer_t *cmdbuf)
786{ 785{
787 drm_radeon_private_t *dev_priv = dev->dev_private; 786 drm_radeon_private_t *dev_priv = dev->dev_private;
@@ -879,15 +878,16 @@ int r300_do_cp_cmdbuf(struct drm_device *dev,
879 if (idx < 0 || idx >= dma->buf_count) { 878 if (idx < 0 || idx >= dma->buf_count) {
880 DRM_ERROR("buffer index %d (of %d max)\n", 879 DRM_ERROR("buffer index %d (of %d max)\n",
881 idx, dma->buf_count - 1); 880 idx, dma->buf_count - 1);
882 ret = DRM_ERR(EINVAL); 881 ret = -EINVAL;
883 goto cleanup; 882 goto cleanup;
884 } 883 }
885 884
886 buf = dma->buflist[idx]; 885 buf = dma->buflist[idx];
887 if (buf->filp != filp || buf->pending) { 886 if (buf->file_priv != file_priv || buf->pending) {
888 DRM_ERROR("bad buffer %p %p %d\n", 887 DRM_ERROR("bad buffer %p %p %d\n",
889 buf->filp, filp, buf->pending); 888 buf->file_priv, file_priv,
890 ret = DRM_ERR(EINVAL); 889 buf->pending);
890 ret = -EINVAL;
891 goto cleanup; 891 goto cleanup;
892 } 892 }
893 893
@@ -924,7 +924,7 @@ int r300_do_cp_cmdbuf(struct drm_device *dev,
924 DRM_ERROR("bad cmd_type %i at %p\n", 924 DRM_ERROR("bad cmd_type %i at %p\n",
925 header.header.cmd_type, 925 header.header.cmd_type,
926 cmdbuf->buf - sizeof(header)); 926 cmdbuf->buf - sizeof(header));
927 ret = DRM_ERR(EINVAL); 927 ret = -EINVAL;
928 goto cleanup; 928 goto cleanup;
929 } 929 }
930 } 930 }
diff --git a/drivers/char/drm/radeon_cp.c b/drivers/char/drm/radeon_cp.c
index af5790f8fd53..335423c5c186 100644
--- a/drivers/char/drm/radeon_cp.c
+++ b/drivers/char/drm/radeon_cp.c
@@ -889,7 +889,7 @@ static int radeon_do_pixcache_flush(drm_radeon_private_t * dev_priv)
889 DRM_ERROR("failed!\n"); 889 DRM_ERROR("failed!\n");
890 radeon_status(dev_priv); 890 radeon_status(dev_priv);
891#endif 891#endif
892 return DRM_ERR(EBUSY); 892 return -EBUSY;
893} 893}
894 894
895static int radeon_do_wait_for_fifo(drm_radeon_private_t * dev_priv, int entries) 895static int radeon_do_wait_for_fifo(drm_radeon_private_t * dev_priv, int entries)
@@ -910,7 +910,7 @@ static int radeon_do_wait_for_fifo(drm_radeon_private_t * dev_priv, int entries)
910 DRM_ERROR("failed!\n"); 910 DRM_ERROR("failed!\n");
911 radeon_status(dev_priv); 911 radeon_status(dev_priv);
912#endif 912#endif
913 return DRM_ERR(EBUSY); 913 return -EBUSY;
914} 914}
915 915
916static int radeon_do_wait_for_idle(drm_radeon_private_t * dev_priv) 916static int radeon_do_wait_for_idle(drm_radeon_private_t * dev_priv)
@@ -936,7 +936,7 @@ static int radeon_do_wait_for_idle(drm_radeon_private_t * dev_priv)
936 DRM_ERROR("failed!\n"); 936 DRM_ERROR("failed!\n");
937 radeon_status(dev_priv); 937 radeon_status(dev_priv);
938#endif 938#endif
939 return DRM_ERR(EBUSY); 939 return -EBUSY;
940} 940}
941 941
942/* ================================================================ 942/* ================================================================
@@ -1394,7 +1394,7 @@ static int radeon_do_init_cp(struct drm_device * dev, drm_radeon_init_t * init)
1394 if ((dev_priv->flags & RADEON_NEW_MEMMAP) && !dev_priv->new_memmap) { 1394 if ((dev_priv->flags & RADEON_NEW_MEMMAP) && !dev_priv->new_memmap) {
1395 DRM_ERROR("Cannot initialise DRM on this card\nThis card requires a new X.org DDX for 3D\n"); 1395 DRM_ERROR("Cannot initialise DRM on this card\nThis card requires a new X.org DDX for 3D\n");
1396 radeon_do_cleanup_cp(dev); 1396 radeon_do_cleanup_cp(dev);
1397 return DRM_ERR(EINVAL); 1397 return -EINVAL;
1398 } 1398 }
1399 1399
1400 if (init->is_pci && (dev_priv->flags & RADEON_IS_AGP)) { 1400 if (init->is_pci && (dev_priv->flags & RADEON_IS_AGP)) {
@@ -1409,7 +1409,7 @@ static int radeon_do_init_cp(struct drm_device * dev, drm_radeon_init_t * init)
1409 if ((!(dev_priv->flags & RADEON_IS_AGP)) && !dev->sg) { 1409 if ((!(dev_priv->flags & RADEON_IS_AGP)) && !dev->sg) {
1410 DRM_ERROR("PCI GART memory not allocated!\n"); 1410 DRM_ERROR("PCI GART memory not allocated!\n");
1411 radeon_do_cleanup_cp(dev); 1411 radeon_do_cleanup_cp(dev);
1412 return DRM_ERR(EINVAL); 1412 return -EINVAL;
1413 } 1413 }
1414 1414
1415 dev_priv->usec_timeout = init->usec_timeout; 1415 dev_priv->usec_timeout = init->usec_timeout;
@@ -1417,7 +1417,7 @@ static int radeon_do_init_cp(struct drm_device * dev, drm_radeon_init_t * init)
1417 dev_priv->usec_timeout > RADEON_MAX_USEC_TIMEOUT) { 1417 dev_priv->usec_timeout > RADEON_MAX_USEC_TIMEOUT) {
1418 DRM_DEBUG("TIMEOUT problem!\n"); 1418 DRM_DEBUG("TIMEOUT problem!\n");
1419 radeon_do_cleanup_cp(dev); 1419 radeon_do_cleanup_cp(dev);
1420 return DRM_ERR(EINVAL); 1420 return -EINVAL;
1421 } 1421 }
1422 1422
1423 /* Enable vblank on CRTC1 for older X servers 1423 /* Enable vblank on CRTC1 for older X servers
@@ -1446,7 +1446,7 @@ static int radeon_do_init_cp(struct drm_device * dev, drm_radeon_init_t * init)
1446 (init->cp_mode != RADEON_CSQ_PRIBM_INDBM)) { 1446 (init->cp_mode != RADEON_CSQ_PRIBM_INDBM)) {
1447 DRM_DEBUG("BAD cp_mode (%x)!\n", init->cp_mode); 1447 DRM_DEBUG("BAD cp_mode (%x)!\n", init->cp_mode);
1448 radeon_do_cleanup_cp(dev); 1448 radeon_do_cleanup_cp(dev);
1449 return DRM_ERR(EINVAL); 1449 return -EINVAL;
1450 } 1450 }
1451 1451
1452 switch (init->fb_bpp) { 1452 switch (init->fb_bpp) {
@@ -1515,27 +1515,27 @@ static int radeon_do_init_cp(struct drm_device * dev, drm_radeon_init_t * init)
1515 if (!dev_priv->sarea) { 1515 if (!dev_priv->sarea) {
1516 DRM_ERROR("could not find sarea!\n"); 1516 DRM_ERROR("could not find sarea!\n");
1517 radeon_do_cleanup_cp(dev); 1517 radeon_do_cleanup_cp(dev);
1518 return DRM_ERR(EINVAL); 1518 return -EINVAL;
1519 } 1519 }
1520 1520
1521 dev_priv->cp_ring = drm_core_findmap(dev, init->ring_offset); 1521 dev_priv->cp_ring = drm_core_findmap(dev, init->ring_offset);
1522 if (!dev_priv->cp_ring) { 1522 if (!dev_priv->cp_ring) {
1523 DRM_ERROR("could not find cp ring region!\n"); 1523 DRM_ERROR("could not find cp ring region!\n");
1524 radeon_do_cleanup_cp(dev); 1524 radeon_do_cleanup_cp(dev);
1525 return DRM_ERR(EINVAL); 1525 return -EINVAL;
1526 } 1526 }
1527 dev_priv->ring_rptr = drm_core_findmap(dev, init->ring_rptr_offset); 1527 dev_priv->ring_rptr = drm_core_findmap(dev, init->ring_rptr_offset);
1528 if (!dev_priv->ring_rptr) { 1528 if (!dev_priv->ring_rptr) {
1529 DRM_ERROR("could not find ring read pointer!\n"); 1529 DRM_ERROR("could not find ring read pointer!\n");
1530 radeon_do_cleanup_cp(dev); 1530 radeon_do_cleanup_cp(dev);
1531 return DRM_ERR(EINVAL); 1531 return -EINVAL;
1532 } 1532 }
1533 dev->agp_buffer_token = init->buffers_offset; 1533 dev->agp_buffer_token = init->buffers_offset;
1534 dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset); 1534 dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
1535 if (!dev->agp_buffer_map) { 1535 if (!dev->agp_buffer_map) {
1536 DRM_ERROR("could not find dma buffer region!\n"); 1536 DRM_ERROR("could not find dma buffer region!\n");
1537 radeon_do_cleanup_cp(dev); 1537 radeon_do_cleanup_cp(dev);
1538 return DRM_ERR(EINVAL); 1538 return -EINVAL;
1539 } 1539 }
1540 1540
1541 if (init->gart_textures_offset) { 1541 if (init->gart_textures_offset) {
@@ -1544,7 +1544,7 @@ static int radeon_do_init_cp(struct drm_device * dev, drm_radeon_init_t * init)
1544 if (!dev_priv->gart_textures) { 1544 if (!dev_priv->gart_textures) {
1545 DRM_ERROR("could not find GART texture region!\n"); 1545 DRM_ERROR("could not find GART texture region!\n");
1546 radeon_do_cleanup_cp(dev); 1546 radeon_do_cleanup_cp(dev);
1547 return DRM_ERR(EINVAL); 1547 return -EINVAL;
1548 } 1548 }
1549 } 1549 }
1550 1550
@@ -1562,7 +1562,7 @@ static int radeon_do_init_cp(struct drm_device * dev, drm_radeon_init_t * init)
1562 !dev->agp_buffer_map->handle) { 1562 !dev->agp_buffer_map->handle) {
1563 DRM_ERROR("could not find ioremap agp regions!\n"); 1563 DRM_ERROR("could not find ioremap agp regions!\n");
1564 radeon_do_cleanup_cp(dev); 1564 radeon_do_cleanup_cp(dev);
1565 return DRM_ERR(EINVAL); 1565 return -EINVAL;
1566 } 1566 }
1567 } else 1567 } else
1568#endif 1568#endif
@@ -1710,14 +1710,14 @@ static int radeon_do_init_cp(struct drm_device * dev, drm_radeon_init_t * init)
1710 DRM_ERROR 1710 DRM_ERROR
1711 ("Cannot use PCI Express without GART in FB memory\n"); 1711 ("Cannot use PCI Express without GART in FB memory\n");
1712 radeon_do_cleanup_cp(dev); 1712 radeon_do_cleanup_cp(dev);
1713 return DRM_ERR(EINVAL); 1713 return -EINVAL;
1714 } 1714 }
1715 } 1715 }
1716 1716
1717 if (!drm_ati_pcigart_init(dev, &dev_priv->gart_info)) { 1717 if (!drm_ati_pcigart_init(dev, &dev_priv->gart_info)) {
1718 DRM_ERROR("failed to init PCI GART!\n"); 1718 DRM_ERROR("failed to init PCI GART!\n");
1719 radeon_do_cleanup_cp(dev); 1719 radeon_do_cleanup_cp(dev);
1720 return DRM_ERR(ENOMEM); 1720 return -ENOMEM;
1721 } 1721 }
1722 1722
1723 /* Turn on PCI GART */ 1723 /* Turn on PCI GART */
@@ -1797,7 +1797,7 @@ static int radeon_do_resume_cp(struct drm_device * dev)
1797 1797
1798 if (!dev_priv) { 1798 if (!dev_priv) {
1799 DRM_ERROR("Called with no initialization\n"); 1799 DRM_ERROR("Called with no initialization\n");
1800 return DRM_ERR(EINVAL); 1800 return -EINVAL;
1801 } 1801 }
1802 1802
1803 DRM_DEBUG("Starting radeon_do_resume_cp()\n"); 1803 DRM_DEBUG("Starting radeon_do_resume_cp()\n");
@@ -1823,38 +1823,33 @@ static int radeon_do_resume_cp(struct drm_device * dev)
1823 return 0; 1823 return 0;
1824} 1824}
1825 1825
1826int radeon_cp_init(DRM_IOCTL_ARGS) 1826int radeon_cp_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
1827{ 1827{
1828 DRM_DEVICE; 1828 drm_radeon_init_t *init = data;
1829 drm_radeon_init_t init;
1830 1829
1831 LOCK_TEST_WITH_RETURN(dev, filp); 1830 LOCK_TEST_WITH_RETURN(dev, file_priv);
1832 1831
1833 DRM_COPY_FROM_USER_IOCTL(init, (drm_radeon_init_t __user *) data, 1832 if (init->func == RADEON_INIT_R300_CP)
1834 sizeof(init));
1835
1836 if (init.func == RADEON_INIT_R300_CP)
1837 r300_init_reg_flags(); 1833 r300_init_reg_flags();
1838 1834
1839 switch (init.func) { 1835 switch (init->func) {
1840 case RADEON_INIT_CP: 1836 case RADEON_INIT_CP:
1841 case RADEON_INIT_R200_CP: 1837 case RADEON_INIT_R200_CP:
1842 case RADEON_INIT_R300_CP: 1838 case RADEON_INIT_R300_CP:
1843 return radeon_do_init_cp(dev, &init); 1839 return radeon_do_init_cp(dev, init);
1844 case RADEON_CLEANUP_CP: 1840 case RADEON_CLEANUP_CP:
1845 return radeon_do_cleanup_cp(dev); 1841 return radeon_do_cleanup_cp(dev);
1846 } 1842 }
1847 1843
1848 return DRM_ERR(EINVAL); 1844 return -EINVAL;
1849} 1845}
1850 1846
1851int radeon_cp_start(DRM_IOCTL_ARGS) 1847int radeon_cp_start(struct drm_device *dev, void *data, struct drm_file *file_priv)
1852{ 1848{
1853 DRM_DEVICE;
1854 drm_radeon_private_t *dev_priv = dev->dev_private; 1849 drm_radeon_private_t *dev_priv = dev->dev_private;
1855 DRM_DEBUG("\n"); 1850 DRM_DEBUG("\n");
1856 1851
1857 LOCK_TEST_WITH_RETURN(dev, filp); 1852 LOCK_TEST_WITH_RETURN(dev, file_priv);
1858 1853
1859 if (dev_priv->cp_running) { 1854 if (dev_priv->cp_running) {
1860 DRM_DEBUG("%s while CP running\n", __FUNCTION__); 1855 DRM_DEBUG("%s while CP running\n", __FUNCTION__);
@@ -1874,18 +1869,14 @@ int radeon_cp_start(DRM_IOCTL_ARGS)
1874/* Stop the CP. The engine must have been idled before calling this 1869/* Stop the CP. The engine must have been idled before calling this
1875 * routine. 1870 * routine.
1876 */ 1871 */
1877int radeon_cp_stop(DRM_IOCTL_ARGS) 1872int radeon_cp_stop(struct drm_device *dev, void *data, struct drm_file *file_priv)
1878{ 1873{
1879 DRM_DEVICE;
1880 drm_radeon_private_t *dev_priv = dev->dev_private; 1874 drm_radeon_private_t *dev_priv = dev->dev_private;
1881 drm_radeon_cp_stop_t stop; 1875 drm_radeon_cp_stop_t *stop = data;
1882 int ret; 1876 int ret;
1883 DRM_DEBUG("\n"); 1877 DRM_DEBUG("\n");
1884 1878
1885 LOCK_TEST_WITH_RETURN(dev, filp); 1879 LOCK_TEST_WITH_RETURN(dev, file_priv);
1886
1887 DRM_COPY_FROM_USER_IOCTL(stop, (drm_radeon_cp_stop_t __user *) data,
1888 sizeof(stop));
1889 1880
1890 if (!dev_priv->cp_running) 1881 if (!dev_priv->cp_running)
1891 return 0; 1882 return 0;
@@ -1893,14 +1884,14 @@ int radeon_cp_stop(DRM_IOCTL_ARGS)
1893 /* Flush any pending CP commands. This ensures any outstanding 1884 /* Flush any pending CP commands. This ensures any outstanding
1894 * commands are exectuted by the engine before we turn it off. 1885 * commands are exectuted by the engine before we turn it off.
1895 */ 1886 */
1896 if (stop.flush) { 1887 if (stop->flush) {
1897 radeon_do_cp_flush(dev_priv); 1888 radeon_do_cp_flush(dev_priv);
1898 } 1889 }
1899 1890
1900 /* If we fail to make the engine go idle, we return an error 1891 /* If we fail to make the engine go idle, we return an error
1901 * code so that the DRM ioctl wrapper can try again. 1892 * code so that the DRM ioctl wrapper can try again.
1902 */ 1893 */
1903 if (stop.idle) { 1894 if (stop->idle) {
1904 ret = radeon_do_cp_idle(dev_priv); 1895 ret = radeon_do_cp_idle(dev_priv);
1905 if (ret) 1896 if (ret)
1906 return ret; 1897 return ret;
@@ -1963,17 +1954,16 @@ void radeon_do_release(struct drm_device * dev)
1963 1954
1964/* Just reset the CP ring. Called as part of an X Server engine reset. 1955/* Just reset the CP ring. Called as part of an X Server engine reset.
1965 */ 1956 */
1966int radeon_cp_reset(DRM_IOCTL_ARGS) 1957int radeon_cp_reset(struct drm_device *dev, void *data, struct drm_file *file_priv)
1967{ 1958{
1968 DRM_DEVICE;
1969 drm_radeon_private_t *dev_priv = dev->dev_private; 1959 drm_radeon_private_t *dev_priv = dev->dev_private;
1970 DRM_DEBUG("\n"); 1960 DRM_DEBUG("\n");
1971 1961
1972 LOCK_TEST_WITH_RETURN(dev, filp); 1962 LOCK_TEST_WITH_RETURN(dev, file_priv);
1973 1963
1974 if (!dev_priv) { 1964 if (!dev_priv) {
1975 DRM_DEBUG("%s called before init done\n", __FUNCTION__); 1965 DRM_DEBUG("%s called before init done\n", __FUNCTION__);
1976 return DRM_ERR(EINVAL); 1966 return -EINVAL;
1977 } 1967 }
1978 1968
1979 radeon_do_cp_reset(dev_priv); 1969 radeon_do_cp_reset(dev_priv);
@@ -1984,32 +1974,29 @@ int radeon_cp_reset(DRM_IOCTL_ARGS)
1984 return 0; 1974 return 0;
1985} 1975}
1986 1976
1987int radeon_cp_idle(DRM_IOCTL_ARGS) 1977int radeon_cp_idle(struct drm_device *dev, void *data, struct drm_file *file_priv)
1988{ 1978{
1989 DRM_DEVICE;
1990 drm_radeon_private_t *dev_priv = dev->dev_private; 1979 drm_radeon_private_t *dev_priv = dev->dev_private;
1991 DRM_DEBUG("\n"); 1980 DRM_DEBUG("\n");
1992 1981
1993 LOCK_TEST_WITH_RETURN(dev, filp); 1982 LOCK_TEST_WITH_RETURN(dev, file_priv);
1994 1983
1995 return radeon_do_cp_idle(dev_priv); 1984 return radeon_do_cp_idle(dev_priv);
1996} 1985}
1997 1986
1998/* Added by Charl P. Botha to call radeon_do_resume_cp(). 1987/* Added by Charl P. Botha to call radeon_do_resume_cp().
1999 */ 1988 */
2000int radeon_cp_resume(DRM_IOCTL_ARGS) 1989int radeon_cp_resume(struct drm_device *dev, void *data, struct drm_file *file_priv)
2001{ 1990{
2002 DRM_DEVICE;
2003 1991
2004 return radeon_do_resume_cp(dev); 1992 return radeon_do_resume_cp(dev);
2005} 1993}
2006 1994
2007int radeon_engine_reset(DRM_IOCTL_ARGS) 1995int radeon_engine_reset(struct drm_device *dev, void *data, struct drm_file *file_priv)
2008{ 1996{
2009 DRM_DEVICE;
2010 DRM_DEBUG("\n"); 1997 DRM_DEBUG("\n");
2011 1998
2012 LOCK_TEST_WITH_RETURN(dev, filp); 1999 LOCK_TEST_WITH_RETURN(dev, file_priv);
2013 2000
2014 return radeon_do_engine_reset(dev); 2001 return radeon_do_engine_reset(dev);
2015} 2002}
@@ -2020,7 +2007,7 @@ int radeon_engine_reset(DRM_IOCTL_ARGS)
2020 2007
2021/* KW: Deprecated to say the least: 2008/* KW: Deprecated to say the least:
2022 */ 2009 */
2023int radeon_fullscreen(DRM_IOCTL_ARGS) 2010int radeon_fullscreen(struct drm_device *dev, void *data, struct drm_file *file_priv)
2024{ 2011{
2025 return 0; 2012 return 0;
2026} 2013}
@@ -2066,8 +2053,9 @@ struct drm_buf *radeon_freelist_get(struct drm_device * dev)
2066 for (i = start; i < dma->buf_count; i++) { 2053 for (i = start; i < dma->buf_count; i++) {
2067 buf = dma->buflist[i]; 2054 buf = dma->buflist[i];
2068 buf_priv = buf->dev_private; 2055 buf_priv = buf->dev_private;
2069 if (buf->filp == 0 || (buf->pending && 2056 if (buf->file_priv == NULL || (buf->pending &&
2070 buf_priv->age <= done_age)) { 2057 buf_priv->age <=
2058 done_age)) {
2071 dev_priv->stats.requested_bufs++; 2059 dev_priv->stats.requested_bufs++;
2072 buf->pending = 0; 2060 buf->pending = 0;
2073 return buf; 2061 return buf;
@@ -2106,8 +2094,9 @@ struct drm_buf *radeon_freelist_get(struct drm_device * dev)
2106 for (i = start; i < dma->buf_count; i++) { 2094 for (i = start; i < dma->buf_count; i++) {
2107 buf = dma->buflist[i]; 2095 buf = dma->buflist[i];
2108 buf_priv = buf->dev_private; 2096 buf_priv = buf->dev_private;
2109 if (buf->filp == 0 || (buf->pending && 2097 if (buf->file_priv == 0 || (buf->pending &&
2110 buf_priv->age <= done_age)) { 2098 buf_priv->age <=
2099 done_age)) {
2111 dev_priv->stats.requested_bufs++; 2100 dev_priv->stats.requested_bufs++;
2112 buf->pending = 0; 2101 buf->pending = 0;
2113 return buf; 2102 return buf;
@@ -2167,10 +2156,11 @@ int radeon_wait_ring(drm_radeon_private_t * dev_priv, int n)
2167 radeon_status(dev_priv); 2156 radeon_status(dev_priv);
2168 DRM_ERROR("failed!\n"); 2157 DRM_ERROR("failed!\n");
2169#endif 2158#endif
2170 return DRM_ERR(EBUSY); 2159 return -EBUSY;
2171} 2160}
2172 2161
2173static int radeon_cp_get_buffers(DRMFILE filp, struct drm_device * dev, 2162static int radeon_cp_get_buffers(struct drm_device *dev,
2163 struct drm_file *file_priv,
2174 struct drm_dma * d) 2164 struct drm_dma * d)
2175{ 2165{
2176 int i; 2166 int i;
@@ -2179,58 +2169,52 @@ static int radeon_cp_get_buffers(DRMFILE filp, struct drm_device * dev,
2179 for (i = d->granted_count; i < d->request_count; i++) { 2169 for (i = d->granted_count; i < d->request_count; i++) {
2180 buf = radeon_freelist_get(dev); 2170 buf = radeon_freelist_get(dev);
2181 if (!buf) 2171 if (!buf)
2182 return DRM_ERR(EBUSY); /* NOTE: broken client */ 2172 return -EBUSY; /* NOTE: broken client */
2183 2173
2184 buf->filp = filp; 2174 buf->file_priv = file_priv;
2185 2175
2186 if (DRM_COPY_TO_USER(&d->request_indices[i], &buf->idx, 2176 if (DRM_COPY_TO_USER(&d->request_indices[i], &buf->idx,
2187 sizeof(buf->idx))) 2177 sizeof(buf->idx)))
2188 return DRM_ERR(EFAULT); 2178 return -EFAULT;
2189 if (DRM_COPY_TO_USER(&d->request_sizes[i], &buf->total, 2179 if (DRM_COPY_TO_USER(&d->request_sizes[i], &buf->total,
2190 sizeof(buf->total))) 2180 sizeof(buf->total)))
2191 return DRM_ERR(EFAULT); 2181 return -EFAULT;
2192 2182
2193 d->granted_count++; 2183 d->granted_count++;
2194 } 2184 }
2195 return 0; 2185 return 0;
2196} 2186}
2197 2187
2198int radeon_cp_buffers(DRM_IOCTL_ARGS) 2188int radeon_cp_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv)
2199{ 2189{
2200 DRM_DEVICE;
2201 struct drm_device_dma *dma = dev->dma; 2190 struct drm_device_dma *dma = dev->dma;
2202 int ret = 0; 2191 int ret = 0;
2203 struct drm_dma __user *argp = (void __user *)data; 2192 struct drm_dma *d = data;
2204 struct drm_dma d;
2205 2193
2206 LOCK_TEST_WITH_RETURN(dev, filp); 2194 LOCK_TEST_WITH_RETURN(dev, file_priv);
2207
2208 DRM_COPY_FROM_USER_IOCTL(d, argp, sizeof(d));
2209 2195
2210 /* Please don't send us buffers. 2196 /* Please don't send us buffers.
2211 */ 2197 */
2212 if (d.send_count != 0) { 2198 if (d->send_count != 0) {
2213 DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n", 2199 DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n",
2214 DRM_CURRENTPID, d.send_count); 2200 DRM_CURRENTPID, d->send_count);
2215 return DRM_ERR(EINVAL); 2201 return -EINVAL;
2216 } 2202 }
2217 2203
2218 /* We'll send you buffers. 2204 /* We'll send you buffers.
2219 */ 2205 */
2220 if (d.request_count < 0 || d.request_count > dma->buf_count) { 2206 if (d->request_count < 0 || d->request_count > dma->buf_count) {
2221 DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n", 2207 DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
2222 DRM_CURRENTPID, d.request_count, dma->buf_count); 2208 DRM_CURRENTPID, d->request_count, dma->buf_count);
2223 return DRM_ERR(EINVAL); 2209 return -EINVAL;
2224 } 2210 }
2225 2211
2226 d.granted_count = 0; 2212 d->granted_count = 0;
2227 2213
2228 if (d.request_count) { 2214 if (d->request_count) {
2229 ret = radeon_cp_get_buffers(filp, dev, &d); 2215 ret = radeon_cp_get_buffers(dev, file_priv, d);
2230 } 2216 }
2231 2217
2232 DRM_COPY_TO_USER_IOCTL(argp, d, sizeof(d));
2233
2234 return ret; 2218 return ret;
2235} 2219}
2236 2220
@@ -2241,7 +2225,7 @@ int radeon_driver_load(struct drm_device *dev, unsigned long flags)
2241 2225
2242 dev_priv = drm_alloc(sizeof(drm_radeon_private_t), DRM_MEM_DRIVER); 2226 dev_priv = drm_alloc(sizeof(drm_radeon_private_t), DRM_MEM_DRIVER);
2243 if (dev_priv == NULL) 2227 if (dev_priv == NULL)
2244 return DRM_ERR(ENOMEM); 2228 return -ENOMEM;
2245 2229
2246 memset(dev_priv, 0, sizeof(drm_radeon_private_t)); 2230 memset(dev_priv, 0, sizeof(drm_radeon_private_t));
2247 dev->dev_private = (void *)dev_priv; 2231 dev->dev_private = (void *)dev_priv;
diff --git a/drivers/char/drm/radeon_drv.h b/drivers/char/drm/radeon_drv.h
index 3b3d9357201c..e4077bc212b3 100644
--- a/drivers/char/drm/radeon_drv.h
+++ b/drivers/char/drm/radeon_drv.h
@@ -188,7 +188,7 @@ struct mem_block {
188 struct mem_block *prev; 188 struct mem_block *prev;
189 int start; 189 int start;
190 int size; 190 int size;
191 DRMFILE filp; /* 0: free, -1: heap, other: real files */ 191 struct drm_file *file_priv; /* NULL: free, -1: heap, other: real files */
192}; 192};
193 193
194struct radeon_surface { 194struct radeon_surface {
@@ -203,7 +203,7 @@ struct radeon_virt_surface {
203 u32 lower; 203 u32 lower;
204 u32 upper; 204 u32 upper;
205 u32 flags; 205 u32 flags;
206 DRMFILE filp; 206 struct drm_file *file_priv;
207}; 207};
208 208
209typedef struct drm_radeon_private { 209typedef struct drm_radeon_private {
@@ -307,7 +307,7 @@ typedef struct drm_radeon_kcmd_buffer {
307} drm_radeon_kcmd_buffer_t; 307} drm_radeon_kcmd_buffer_t;
308 308
309extern int radeon_no_wb; 309extern int radeon_no_wb;
310extern drm_ioctl_desc_t radeon_ioctls[]; 310extern struct drm_ioctl_desc radeon_ioctls[];
311extern int radeon_max_ioctl; 311extern int radeon_max_ioctl;
312 312
313/* Check whether the given hardware address is inside the framebuffer or the 313/* Check whether the given hardware address is inside the framebuffer or the
@@ -326,15 +326,15 @@ static __inline__ int radeon_check_offset(drm_radeon_private_t *dev_priv,
326} 326}
327 327
328 /* radeon_cp.c */ 328 /* radeon_cp.c */
329extern int radeon_cp_init(DRM_IOCTL_ARGS); 329extern int radeon_cp_init(struct drm_device *dev, void *data, struct drm_file *file_priv);
330extern int radeon_cp_start(DRM_IOCTL_ARGS); 330extern int radeon_cp_start(struct drm_device *dev, void *data, struct drm_file *file_priv);
331extern int radeon_cp_stop(DRM_IOCTL_ARGS); 331extern int radeon_cp_stop(struct drm_device *dev, void *data, struct drm_file *file_priv);
332extern int radeon_cp_reset(DRM_IOCTL_ARGS); 332extern int radeon_cp_reset(struct drm_device *dev, void *data, struct drm_file *file_priv);
333extern int radeon_cp_idle(DRM_IOCTL_ARGS); 333extern int radeon_cp_idle(struct drm_device *dev, void *data, struct drm_file *file_priv);
334extern int radeon_cp_resume(DRM_IOCTL_ARGS); 334extern int radeon_cp_resume(struct drm_device *dev, void *data, struct drm_file *file_priv);
335extern int radeon_engine_reset(DRM_IOCTL_ARGS); 335extern int radeon_engine_reset(struct drm_device *dev, void *data, struct drm_file *file_priv);
336extern int radeon_fullscreen(DRM_IOCTL_ARGS); 336extern int radeon_fullscreen(struct drm_device *dev, void *data, struct drm_file *file_priv);
337extern int radeon_cp_buffers(DRM_IOCTL_ARGS); 337extern int radeon_cp_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv);
338 338
339extern void radeon_freelist_reset(struct drm_device * dev); 339extern void radeon_freelist_reset(struct drm_device * dev);
340extern struct drm_buf *radeon_freelist_get(struct drm_device * dev); 340extern struct drm_buf *radeon_freelist_get(struct drm_device * dev);
@@ -347,15 +347,16 @@ extern int radeon_driver_preinit(struct drm_device *dev, unsigned long flags);
347extern int radeon_presetup(struct drm_device *dev); 347extern int radeon_presetup(struct drm_device *dev);
348extern int radeon_driver_postcleanup(struct drm_device *dev); 348extern int radeon_driver_postcleanup(struct drm_device *dev);
349 349
350extern int radeon_mem_alloc(DRM_IOCTL_ARGS); 350extern int radeon_mem_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv);
351extern int radeon_mem_free(DRM_IOCTL_ARGS); 351extern int radeon_mem_free(struct drm_device *dev, void *data, struct drm_file *file_priv);
352extern int radeon_mem_init_heap(DRM_IOCTL_ARGS); 352extern int radeon_mem_init_heap(struct drm_device *dev, void *data, struct drm_file *file_priv);
353extern void radeon_mem_takedown(struct mem_block **heap); 353extern void radeon_mem_takedown(struct mem_block **heap);
354extern void radeon_mem_release(DRMFILE filp, struct mem_block *heap); 354extern void radeon_mem_release(struct drm_file *file_priv,
355 struct mem_block *heap);
355 356
356 /* radeon_irq.c */ 357 /* radeon_irq.c */
357extern int radeon_irq_emit(DRM_IOCTL_ARGS); 358extern int radeon_irq_emit(struct drm_device *dev, void *data, struct drm_file *file_priv);
358extern int radeon_irq_wait(DRM_IOCTL_ARGS); 359extern int radeon_irq_wait(struct drm_device *dev, void *data, struct drm_file *file_priv);
359 360
360extern void radeon_do_release(struct drm_device * dev); 361extern void radeon_do_release(struct drm_device * dev);
361extern int radeon_driver_vblank_wait(struct drm_device * dev, 362extern int radeon_driver_vblank_wait(struct drm_device * dev,
@@ -372,7 +373,7 @@ extern int radeon_vblank_crtc_set(struct drm_device *dev, int64_t value);
372extern int radeon_driver_load(struct drm_device *dev, unsigned long flags); 373extern int radeon_driver_load(struct drm_device *dev, unsigned long flags);
373extern int radeon_driver_unload(struct drm_device *dev); 374extern int radeon_driver_unload(struct drm_device *dev);
374extern int radeon_driver_firstopen(struct drm_device *dev); 375extern int radeon_driver_firstopen(struct drm_device *dev);
375extern void radeon_driver_preclose(struct drm_device * dev, DRMFILE filp); 376extern void radeon_driver_preclose(struct drm_device * dev, struct drm_file *file_priv);
376extern void radeon_driver_postclose(struct drm_device * dev, struct drm_file * filp); 377extern void radeon_driver_postclose(struct drm_device * dev, struct drm_file * filp);
377extern void radeon_driver_lastclose(struct drm_device * dev); 378extern void radeon_driver_lastclose(struct drm_device * dev);
378extern int radeon_driver_open(struct drm_device * dev, struct drm_file * filp_priv); 379extern int radeon_driver_open(struct drm_device * dev, struct drm_file * filp_priv);
@@ -382,8 +383,8 @@ extern long radeon_compat_ioctl(struct file *filp, unsigned int cmd,
382/* r300_cmdbuf.c */ 383/* r300_cmdbuf.c */
383extern void r300_init_reg_flags(void); 384extern void r300_init_reg_flags(void);
384 385
385extern int r300_do_cp_cmdbuf(struct drm_device * dev, DRMFILE filp, 386extern int r300_do_cp_cmdbuf(struct drm_device * dev,
386 struct drm_file * filp_priv, 387 struct drm_file *file_priv,
387 drm_radeon_kcmd_buffer_t * cmdbuf); 388 drm_radeon_kcmd_buffer_t * cmdbuf);
388 389
389/* Flags for stats.boxes 390/* Flags for stats.boxes
diff --git a/drivers/char/drm/radeon_irq.c b/drivers/char/drm/radeon_irq.c
index ad8a0ac7182e..2b2407ee490e 100644
--- a/drivers/char/drm/radeon_irq.c
+++ b/drivers/char/drm/radeon_irq.c
@@ -144,8 +144,8 @@ static int radeon_wait_irq(struct drm_device * dev, int swi_nr)
144 return ret; 144 return ret;
145} 145}
146 146
147int radeon_driver_vblank_do_wait(struct drm_device * dev, unsigned int *sequence, 147static int radeon_driver_vblank_do_wait(struct drm_device * dev,
148 int crtc) 148 unsigned int *sequence, int crtc)
149{ 149{
150 drm_radeon_private_t *dev_priv = 150 drm_radeon_private_t *dev_priv =
151 (drm_radeon_private_t *) dev->dev_private; 151 (drm_radeon_private_t *) dev->dev_private;
@@ -155,7 +155,7 @@ int radeon_driver_vblank_do_wait(struct drm_device * dev, unsigned int *sequence
155 atomic_t *counter; 155 atomic_t *counter;
156 if (!dev_priv) { 156 if (!dev_priv) {
157 DRM_ERROR("%s called with no initialization\n", __FUNCTION__); 157 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
158 return DRM_ERR(EINVAL); 158 return -EINVAL;
159 } 159 }
160 160
161 if (crtc == DRM_RADEON_VBLANK_CRTC1) { 161 if (crtc == DRM_RADEON_VBLANK_CRTC1) {
@@ -165,7 +165,7 @@ int radeon_driver_vblank_do_wait(struct drm_device * dev, unsigned int *sequence
165 counter = &dev->vbl_received2; 165 counter = &dev->vbl_received2;
166 ack |= RADEON_CRTC2_VBLANK_STAT; 166 ack |= RADEON_CRTC2_VBLANK_STAT;
167 } else 167 } else
168 return DRM_ERR(EINVAL); 168 return -EINVAL;
169 169
170 radeon_acknowledge_irqs(dev_priv, ack); 170 radeon_acknowledge_irqs(dev_priv, ack);
171 171
@@ -196,28 +196,24 @@ int radeon_driver_vblank_wait2(struct drm_device *dev, unsigned int *sequence)
196 196
197/* Needs the lock as it touches the ring. 197/* Needs the lock as it touches the ring.
198 */ 198 */
199int radeon_irq_emit(DRM_IOCTL_ARGS) 199int radeon_irq_emit(struct drm_device *dev, void *data, struct drm_file *file_priv)
200{ 200{
201 DRM_DEVICE;
202 drm_radeon_private_t *dev_priv = dev->dev_private; 201 drm_radeon_private_t *dev_priv = dev->dev_private;
203 drm_radeon_irq_emit_t emit; 202 drm_radeon_irq_emit_t *emit = data;
204 int result; 203 int result;
205 204
206 LOCK_TEST_WITH_RETURN(dev, filp); 205 LOCK_TEST_WITH_RETURN(dev, file_priv);
207 206
208 if (!dev_priv) { 207 if (!dev_priv) {
209 DRM_ERROR("%s called with no initialization\n", __FUNCTION__); 208 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
210 return DRM_ERR(EINVAL); 209 return -EINVAL;
211 } 210 }
212 211
213 DRM_COPY_FROM_USER_IOCTL(emit, (drm_radeon_irq_emit_t __user *) data,
214 sizeof(emit));
215
216 result = radeon_emit_irq(dev); 212 result = radeon_emit_irq(dev);
217 213
218 if (DRM_COPY_TO_USER(emit.irq_seq, &result, sizeof(int))) { 214 if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
219 DRM_ERROR("copy_to_user\n"); 215 DRM_ERROR("copy_to_user\n");
220 return DRM_ERR(EFAULT); 216 return -EFAULT;
221 } 217 }
222 218
223 return 0; 219 return 0;
@@ -225,21 +221,17 @@ int radeon_irq_emit(DRM_IOCTL_ARGS)
225 221
226/* Doesn't need the hardware lock. 222/* Doesn't need the hardware lock.
227 */ 223 */
228int radeon_irq_wait(DRM_IOCTL_ARGS) 224int radeon_irq_wait(struct drm_device *dev, void *data, struct drm_file *file_priv)
229{ 225{
230 DRM_DEVICE;
231 drm_radeon_private_t *dev_priv = dev->dev_private; 226 drm_radeon_private_t *dev_priv = dev->dev_private;
232 drm_radeon_irq_wait_t irqwait; 227 drm_radeon_irq_wait_t *irqwait = data;
233 228
234 if (!dev_priv) { 229 if (!dev_priv) {
235 DRM_ERROR("%s called with no initialization\n", __FUNCTION__); 230 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
236 return DRM_ERR(EINVAL); 231 return -EINVAL;
237 } 232 }
238 233
239 DRM_COPY_FROM_USER_IOCTL(irqwait, (drm_radeon_irq_wait_t __user *) data, 234 return radeon_wait_irq(dev, irqwait->irq_seq);
240 sizeof(irqwait));
241
242 return radeon_wait_irq(dev, irqwait.irq_seq);
243} 235}
244 236
245static void radeon_enable_interrupt(struct drm_device *dev) 237static void radeon_enable_interrupt(struct drm_device *dev)
@@ -320,7 +312,7 @@ int radeon_vblank_crtc_set(struct drm_device *dev, int64_t value)
320 drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private; 312 drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private;
321 if (value & ~(DRM_RADEON_VBLANK_CRTC1 | DRM_RADEON_VBLANK_CRTC2)) { 313 if (value & ~(DRM_RADEON_VBLANK_CRTC1 | DRM_RADEON_VBLANK_CRTC2)) {
322 DRM_ERROR("called with invalid crtc 0x%x\n", (unsigned int)value); 314 DRM_ERROR("called with invalid crtc 0x%x\n", (unsigned int)value);
323 return DRM_ERR(EINVAL); 315 return -EINVAL;
324 } 316 }
325 dev_priv->vblank_crtc = (unsigned int)value; 317 dev_priv->vblank_crtc = (unsigned int)value;
326 radeon_enable_interrupt(dev); 318 radeon_enable_interrupt(dev);
diff --git a/drivers/char/drm/radeon_mem.c b/drivers/char/drm/radeon_mem.c
index 517cad8b6e3a..a29acfe2f973 100644
--- a/drivers/char/drm/radeon_mem.c
+++ b/drivers/char/drm/radeon_mem.c
@@ -39,7 +39,7 @@
39 */ 39 */
40 40
41static struct mem_block *split_block(struct mem_block *p, int start, int size, 41static struct mem_block *split_block(struct mem_block *p, int start, int size,
42 DRMFILE filp) 42 struct drm_file *file_priv)
43{ 43{
44 /* Maybe cut off the start of an existing block */ 44 /* Maybe cut off the start of an existing block */
45 if (start > p->start) { 45 if (start > p->start) {
@@ -49,7 +49,7 @@ static struct mem_block *split_block(struct mem_block *p, int start, int size,
49 goto out; 49 goto out;
50 newblock->start = start; 50 newblock->start = start;
51 newblock->size = p->size - (start - p->start); 51 newblock->size = p->size - (start - p->start);
52 newblock->filp = NULL; 52 newblock->file_priv = NULL;
53 newblock->next = p->next; 53 newblock->next = p->next;
54 newblock->prev = p; 54 newblock->prev = p;
55 p->next->prev = newblock; 55 p->next->prev = newblock;
@@ -66,7 +66,7 @@ static struct mem_block *split_block(struct mem_block *p, int start, int size,
66 goto out; 66 goto out;
67 newblock->start = start + size; 67 newblock->start = start + size;
68 newblock->size = p->size - size; 68 newblock->size = p->size - size;
69 newblock->filp = NULL; 69 newblock->file_priv = NULL;
70 newblock->next = p->next; 70 newblock->next = p->next;
71 newblock->prev = p; 71 newblock->prev = p;
72 p->next->prev = newblock; 72 p->next->prev = newblock;
@@ -76,20 +76,20 @@ static struct mem_block *split_block(struct mem_block *p, int start, int size,
76 76
77 out: 77 out:
78 /* Our block is in the middle */ 78 /* Our block is in the middle */
79 p->filp = filp; 79 p->file_priv = file_priv;
80 return p; 80 return p;
81} 81}
82 82
83static struct mem_block *alloc_block(struct mem_block *heap, int size, 83static struct mem_block *alloc_block(struct mem_block *heap, int size,
84 int align2, DRMFILE filp) 84 int align2, struct drm_file *file_priv)
85{ 85{
86 struct mem_block *p; 86 struct mem_block *p;
87 int mask = (1 << align2) - 1; 87 int mask = (1 << align2) - 1;
88 88
89 list_for_each(p, heap) { 89 list_for_each(p, heap) {
90 int start = (p->start + mask) & ~mask; 90 int start = (p->start + mask) & ~mask;
91 if (p->filp == 0 && start + size <= p->start + p->size) 91 if (p->file_priv == 0 && start + size <= p->start + p->size)
92 return split_block(p, start, size, filp); 92 return split_block(p, start, size, file_priv);
93 } 93 }
94 94
95 return NULL; 95 return NULL;
@@ -108,12 +108,12 @@ static struct mem_block *find_block(struct mem_block *heap, int start)
108 108
109static void free_block(struct mem_block *p) 109static void free_block(struct mem_block *p)
110{ 110{
111 p->filp = NULL; 111 p->file_priv = NULL;
112 112
113 /* Assumes a single contiguous range. Needs a special filp in 113 /* Assumes a single contiguous range. Needs a special file_priv in
114 * 'heap' to stop it being subsumed. 114 * 'heap' to stop it being subsumed.
115 */ 115 */
116 if (p->next->filp == 0) { 116 if (p->next->file_priv == 0) {
117 struct mem_block *q = p->next; 117 struct mem_block *q = p->next;
118 p->size += q->size; 118 p->size += q->size;
119 p->next = q->next; 119 p->next = q->next;
@@ -121,7 +121,7 @@ static void free_block(struct mem_block *p)
121 drm_free(q, sizeof(*q), DRM_MEM_BUFS); 121 drm_free(q, sizeof(*q), DRM_MEM_BUFS);
122 } 122 }
123 123
124 if (p->prev->filp == 0) { 124 if (p->prev->file_priv == 0) {
125 struct mem_block *q = p->prev; 125 struct mem_block *q = p->prev;
126 q->size += p->size; 126 q->size += p->size;
127 q->next = p->next; 127 q->next = p->next;
@@ -137,28 +137,28 @@ static int init_heap(struct mem_block **heap, int start, int size)
137 struct mem_block *blocks = drm_alloc(sizeof(*blocks), DRM_MEM_BUFS); 137 struct mem_block *blocks = drm_alloc(sizeof(*blocks), DRM_MEM_BUFS);
138 138
139 if (!blocks) 139 if (!blocks)
140 return DRM_ERR(ENOMEM); 140 return -ENOMEM;
141 141
142 *heap = drm_alloc(sizeof(**heap), DRM_MEM_BUFS); 142 *heap = drm_alloc(sizeof(**heap), DRM_MEM_BUFS);
143 if (!*heap) { 143 if (!*heap) {
144 drm_free(blocks, sizeof(*blocks), DRM_MEM_BUFS); 144 drm_free(blocks, sizeof(*blocks), DRM_MEM_BUFS);
145 return DRM_ERR(ENOMEM); 145 return -ENOMEM;
146 } 146 }
147 147
148 blocks->start = start; 148 blocks->start = start;
149 blocks->size = size; 149 blocks->size = size;
150 blocks->filp = NULL; 150 blocks->file_priv = NULL;
151 blocks->next = blocks->prev = *heap; 151 blocks->next = blocks->prev = *heap;
152 152
153 memset(*heap, 0, sizeof(**heap)); 153 memset(*heap, 0, sizeof(**heap));
154 (*heap)->filp = (DRMFILE) - 1; 154 (*heap)->file_priv = (struct drm_file *) - 1;
155 (*heap)->next = (*heap)->prev = blocks; 155 (*heap)->next = (*heap)->prev = blocks;
156 return 0; 156 return 0;
157} 157}
158 158
159/* Free all blocks associated with the releasing file. 159/* Free all blocks associated with the releasing file.
160 */ 160 */
161void radeon_mem_release(DRMFILE filp, struct mem_block *heap) 161void radeon_mem_release(struct drm_file *file_priv, struct mem_block *heap)
162{ 162{
163 struct mem_block *p; 163 struct mem_block *p;
164 164
@@ -166,15 +166,15 @@ void radeon_mem_release(DRMFILE filp, struct mem_block *heap)
166 return; 166 return;
167 167
168 list_for_each(p, heap) { 168 list_for_each(p, heap) {
169 if (p->filp == filp) 169 if (p->file_priv == file_priv)
170 p->filp = NULL; 170 p->file_priv = NULL;
171 } 171 }
172 172
173 /* Assumes a single contiguous range. Needs a special filp in 173 /* Assumes a single contiguous range. Needs a special file_priv in
174 * 'heap' to stop it being subsumed. 174 * 'heap' to stop it being subsumed.
175 */ 175 */
176 list_for_each(p, heap) { 176 list_for_each(p, heap) {
177 while (p->filp == 0 && p->next->filp == 0) { 177 while (p->file_priv == 0 && p->next->file_priv == 0) {
178 struct mem_block *q = p->next; 178 struct mem_block *q = p->next;
179 p->size += q->size; 179 p->size += q->size;
180 p->next = q->next; 180 p->next = q->next;
@@ -217,98 +217,86 @@ static struct mem_block **get_heap(drm_radeon_private_t * dev_priv, int region)
217 } 217 }
218} 218}
219 219
220int radeon_mem_alloc(DRM_IOCTL_ARGS) 220int radeon_mem_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv)
221{ 221{
222 DRM_DEVICE;
223 drm_radeon_private_t *dev_priv = dev->dev_private; 222 drm_radeon_private_t *dev_priv = dev->dev_private;
224 drm_radeon_mem_alloc_t alloc; 223 drm_radeon_mem_alloc_t *alloc = data;
225 struct mem_block *block, **heap; 224 struct mem_block *block, **heap;
226 225
227 if (!dev_priv) { 226 if (!dev_priv) {
228 DRM_ERROR("%s called with no initialization\n", __FUNCTION__); 227 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
229 return DRM_ERR(EINVAL); 228 return -EINVAL;
230 } 229 }
231 230
232 DRM_COPY_FROM_USER_IOCTL(alloc, (drm_radeon_mem_alloc_t __user *) data, 231 heap = get_heap(dev_priv, alloc->region);
233 sizeof(alloc));
234
235 heap = get_heap(dev_priv, alloc.region);
236 if (!heap || !*heap) 232 if (!heap || !*heap)
237 return DRM_ERR(EFAULT); 233 return -EFAULT;
238 234
239 /* Make things easier on ourselves: all allocations at least 235 /* Make things easier on ourselves: all allocations at least
240 * 4k aligned. 236 * 4k aligned.
241 */ 237 */
242 if (alloc.alignment < 12) 238 if (alloc->alignment < 12)
243 alloc.alignment = 12; 239 alloc->alignment = 12;
244 240
245 block = alloc_block(*heap, alloc.size, alloc.alignment, filp); 241 block = alloc_block(*heap, alloc->size, alloc->alignment, file_priv);
246 242
247 if (!block) 243 if (!block)
248 return DRM_ERR(ENOMEM); 244 return -ENOMEM;
249 245
250 if (DRM_COPY_TO_USER(alloc.region_offset, &block->start, sizeof(int))) { 246 if (DRM_COPY_TO_USER(alloc->region_offset, &block->start,
247 sizeof(int))) {
251 DRM_ERROR("copy_to_user\n"); 248 DRM_ERROR("copy_to_user\n");
252 return DRM_ERR(EFAULT); 249 return -EFAULT;
253 } 250 }
254 251
255 return 0; 252 return 0;
256} 253}
257 254
258int radeon_mem_free(DRM_IOCTL_ARGS) 255int radeon_mem_free(struct drm_device *dev, void *data, struct drm_file *file_priv)
259{ 256{
260 DRM_DEVICE;
261 drm_radeon_private_t *dev_priv = dev->dev_private; 257 drm_radeon_private_t *dev_priv = dev->dev_private;
262 drm_radeon_mem_free_t memfree; 258 drm_radeon_mem_free_t *memfree = data;
263 struct mem_block *block, **heap; 259 struct mem_block *block, **heap;
264 260
265 if (!dev_priv) { 261 if (!dev_priv) {
266 DRM_ERROR("%s called with no initialization\n", __FUNCTION__); 262 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
267 return DRM_ERR(EINVAL); 263 return -EINVAL;
268 } 264 }
269 265
270 DRM_COPY_FROM_USER_IOCTL(memfree, (drm_radeon_mem_free_t __user *) data, 266 heap = get_heap(dev_priv, memfree->region);
271 sizeof(memfree));
272
273 heap = get_heap(dev_priv, memfree.region);
274 if (!heap || !*heap) 267 if (!heap || !*heap)
275 return DRM_ERR(EFAULT); 268 return -EFAULT;
276 269
277 block = find_block(*heap, memfree.region_offset); 270 block = find_block(*heap, memfree->region_offset);
278 if (!block) 271 if (!block)
279 return DRM_ERR(EFAULT); 272 return -EFAULT;
280 273
281 if (block->filp != filp) 274 if (block->file_priv != file_priv)
282 return DRM_ERR(EPERM); 275 return -EPERM;
283 276
284 free_block(block); 277 free_block(block);
285 return 0; 278 return 0;
286} 279}
287 280
288int radeon_mem_init_heap(DRM_IOCTL_ARGS) 281int radeon_mem_init_heap(struct drm_device *dev, void *data, struct drm_file *file_priv)
289{ 282{
290 DRM_DEVICE;
291 drm_radeon_private_t *dev_priv = dev->dev_private; 283 drm_radeon_private_t *dev_priv = dev->dev_private;
292 drm_radeon_mem_init_heap_t initheap; 284 drm_radeon_mem_init_heap_t *initheap = data;
293 struct mem_block **heap; 285 struct mem_block **heap;
294 286
295 if (!dev_priv) { 287 if (!dev_priv) {
296 DRM_ERROR("%s called with no initialization\n", __FUNCTION__); 288 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
297 return DRM_ERR(EINVAL); 289 return -EINVAL;
298 } 290 }
299 291
300 DRM_COPY_FROM_USER_IOCTL(initheap, 292 heap = get_heap(dev_priv, initheap->region);
301 (drm_radeon_mem_init_heap_t __user *) data,
302 sizeof(initheap));
303
304 heap = get_heap(dev_priv, initheap.region);
305 if (!heap) 293 if (!heap)
306 return DRM_ERR(EFAULT); 294 return -EFAULT;
307 295
308 if (*heap) { 296 if (*heap) {
309 DRM_ERROR("heap already initialized?"); 297 DRM_ERROR("heap already initialized?");
310 return DRM_ERR(EFAULT); 298 return -EFAULT;
311 } 299 }
312 300
313 return init_heap(heap, initheap.start, initheap.size); 301 return init_heap(heap, initheap->start, initheap->size);
314} 302}
diff --git a/drivers/char/drm/radeon_state.c b/drivers/char/drm/radeon_state.c
index 3ddf86f2abf0..69c9f2febf43 100644
--- a/drivers/char/drm/radeon_state.c
+++ b/drivers/char/drm/radeon_state.c
@@ -39,7 +39,7 @@
39 39
40static __inline__ int radeon_check_and_fixup_offset(drm_radeon_private_t * 40static __inline__ int radeon_check_and_fixup_offset(drm_radeon_private_t *
41 dev_priv, 41 dev_priv,
42 struct drm_file * filp_priv, 42 struct drm_file * file_priv,
43 u32 *offset) 43 u32 *offset)
44{ 44{
45 u64 off = *offset; 45 u64 off = *offset;
@@ -71,7 +71,7 @@ static __inline__ int radeon_check_and_fixup_offset(drm_radeon_private_t *
71 * magic offset we get from SETPARAM or calculated from fb_location 71 * magic offset we get from SETPARAM or calculated from fb_location
72 */ 72 */
73 if (off < (dev_priv->fb_size + dev_priv->gart_size)) { 73 if (off < (dev_priv->fb_size + dev_priv->gart_size)) {
74 radeon_priv = filp_priv->driver_priv; 74 radeon_priv = file_priv->driver_priv;
75 off += radeon_priv->radeon_fb_delta; 75 off += radeon_priv->radeon_fb_delta;
76 } 76 }
77 77
@@ -85,29 +85,29 @@ static __inline__ int radeon_check_and_fixup_offset(drm_radeon_private_t *
85 *offset = off; 85 *offset = off;
86 return 0; 86 return 0;
87 } 87 }
88 return DRM_ERR(EINVAL); 88 return -EINVAL;
89} 89}
90 90
91static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t * 91static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
92 dev_priv, 92 dev_priv,
93 struct drm_file * filp_priv, 93 struct drm_file *file_priv,
94 int id, u32 *data) 94 int id, u32 *data)
95{ 95{
96 switch (id) { 96 switch (id) {
97 97
98 case RADEON_EMIT_PP_MISC: 98 case RADEON_EMIT_PP_MISC:
99 if (radeon_check_and_fixup_offset(dev_priv, filp_priv, 99 if (radeon_check_and_fixup_offset(dev_priv, file_priv,
100 &data[(RADEON_RB3D_DEPTHOFFSET - RADEON_PP_MISC) / 4])) { 100 &data[(RADEON_RB3D_DEPTHOFFSET - RADEON_PP_MISC) / 4])) {
101 DRM_ERROR("Invalid depth buffer offset\n"); 101 DRM_ERROR("Invalid depth buffer offset\n");
102 return DRM_ERR(EINVAL); 102 return -EINVAL;
103 } 103 }
104 break; 104 break;
105 105
106 case RADEON_EMIT_PP_CNTL: 106 case RADEON_EMIT_PP_CNTL:
107 if (radeon_check_and_fixup_offset(dev_priv, filp_priv, 107 if (radeon_check_and_fixup_offset(dev_priv, file_priv,
108 &data[(RADEON_RB3D_COLOROFFSET - RADEON_PP_CNTL) / 4])) { 108 &data[(RADEON_RB3D_COLOROFFSET - RADEON_PP_CNTL) / 4])) {
109 DRM_ERROR("Invalid colour buffer offset\n"); 109 DRM_ERROR("Invalid colour buffer offset\n");
110 return DRM_ERR(EINVAL); 110 return -EINVAL;
111 } 111 }
112 break; 112 break;
113 113
@@ -117,20 +117,20 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
117 case R200_EMIT_PP_TXOFFSET_3: 117 case R200_EMIT_PP_TXOFFSET_3:
118 case R200_EMIT_PP_TXOFFSET_4: 118 case R200_EMIT_PP_TXOFFSET_4:
119 case R200_EMIT_PP_TXOFFSET_5: 119 case R200_EMIT_PP_TXOFFSET_5:
120 if (radeon_check_and_fixup_offset(dev_priv, filp_priv, 120 if (radeon_check_and_fixup_offset(dev_priv, file_priv,
121 &data[0])) { 121 &data[0])) {
122 DRM_ERROR("Invalid R200 texture offset\n"); 122 DRM_ERROR("Invalid R200 texture offset\n");
123 return DRM_ERR(EINVAL); 123 return -EINVAL;
124 } 124 }
125 break; 125 break;
126 126
127 case RADEON_EMIT_PP_TXFILTER_0: 127 case RADEON_EMIT_PP_TXFILTER_0:
128 case RADEON_EMIT_PP_TXFILTER_1: 128 case RADEON_EMIT_PP_TXFILTER_1:
129 case RADEON_EMIT_PP_TXFILTER_2: 129 case RADEON_EMIT_PP_TXFILTER_2:
130 if (radeon_check_and_fixup_offset(dev_priv, filp_priv, 130 if (radeon_check_and_fixup_offset(dev_priv, file_priv,
131 &data[(RADEON_PP_TXOFFSET_0 - RADEON_PP_TXFILTER_0) / 4])) { 131 &data[(RADEON_PP_TXOFFSET_0 - RADEON_PP_TXFILTER_0) / 4])) {
132 DRM_ERROR("Invalid R100 texture offset\n"); 132 DRM_ERROR("Invalid R100 texture offset\n");
133 return DRM_ERR(EINVAL); 133 return -EINVAL;
134 } 134 }
135 break; 135 break;
136 136
@@ -143,11 +143,11 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
143 int i; 143 int i;
144 for (i = 0; i < 5; i++) { 144 for (i = 0; i < 5; i++) {
145 if (radeon_check_and_fixup_offset(dev_priv, 145 if (radeon_check_and_fixup_offset(dev_priv,
146 filp_priv, 146 file_priv,
147 &data[i])) { 147 &data[i])) {
148 DRM_ERROR 148 DRM_ERROR
149 ("Invalid R200 cubic texture offset\n"); 149 ("Invalid R200 cubic texture offset\n");
150 return DRM_ERR(EINVAL); 150 return -EINVAL;
151 } 151 }
152 } 152 }
153 break; 153 break;
@@ -159,11 +159,11 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
159 int i; 159 int i;
160 for (i = 0; i < 5; i++) { 160 for (i = 0; i < 5; i++) {
161 if (radeon_check_and_fixup_offset(dev_priv, 161 if (radeon_check_and_fixup_offset(dev_priv,
162 filp_priv, 162 file_priv,
163 &data[i])) { 163 &data[i])) {
164 DRM_ERROR 164 DRM_ERROR
165 ("Invalid R100 cubic texture offset\n"); 165 ("Invalid R100 cubic texture offset\n");
166 return DRM_ERR(EINVAL); 166 return -EINVAL;
167 } 167 }
168 } 168 }
169 } 169 }
@@ -256,7 +256,7 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
256 256
257 default: 257 default:
258 DRM_ERROR("Unknown state packet ID %d\n", id); 258 DRM_ERROR("Unknown state packet ID %d\n", id);
259 return DRM_ERR(EINVAL); 259 return -EINVAL;
260 } 260 }
261 261
262 return 0; 262 return 0;
@@ -264,7 +264,7 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
264 264
265static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t * 265static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
266 dev_priv, 266 dev_priv,
267 struct drm_file *filp_priv, 267 struct drm_file *file_priv,
268 drm_radeon_kcmd_buffer_t * 268 drm_radeon_kcmd_buffer_t *
269 cmdbuf, 269 cmdbuf,
270 unsigned int *cmdsz) 270 unsigned int *cmdsz)
@@ -277,12 +277,12 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
277 277
278 if ((cmd[0] & 0xc0000000) != RADEON_CP_PACKET3) { 278 if ((cmd[0] & 0xc0000000) != RADEON_CP_PACKET3) {
279 DRM_ERROR("Not a type 3 packet\n"); 279 DRM_ERROR("Not a type 3 packet\n");
280 return DRM_ERR(EINVAL); 280 return -EINVAL;
281 } 281 }
282 282
283 if (4 * *cmdsz > cmdbuf->bufsz) { 283 if (4 * *cmdsz > cmdbuf->bufsz) {
284 DRM_ERROR("Packet size larger than size of data provided\n"); 284 DRM_ERROR("Packet size larger than size of data provided\n");
285 return DRM_ERR(EINVAL); 285 return -EINVAL;
286 } 286 }
287 287
288 switch(cmd[0] & 0xff00) { 288 switch(cmd[0] & 0xff00) {
@@ -307,7 +307,7 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
307 /* safe but r200 only */ 307 /* safe but r200 only */
308 if (dev_priv->microcode_version != UCODE_R200) { 308 if (dev_priv->microcode_version != UCODE_R200) {
309 DRM_ERROR("Invalid 3d packet for r100-class chip\n"); 309 DRM_ERROR("Invalid 3d packet for r100-class chip\n");
310 return DRM_ERR(EINVAL); 310 return -EINVAL;
311 } 311 }
312 break; 312 break;
313 313
@@ -317,7 +317,7 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
317 if (count > 18) { /* 12 arrays max */ 317 if (count > 18) { /* 12 arrays max */
318 DRM_ERROR("Too large payload in 3D_LOAD_VBPNTR (count=%d)\n", 318 DRM_ERROR("Too large payload in 3D_LOAD_VBPNTR (count=%d)\n",
319 count); 319 count);
320 return DRM_ERR(EINVAL); 320 return -EINVAL;
321 } 321 }
322 322
323 /* carefully check packet contents */ 323 /* carefully check packet contents */
@@ -326,22 +326,25 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
326 i = 2; 326 i = 2;
327 while ((k < narrays) && (i < (count + 2))) { 327 while ((k < narrays) && (i < (count + 2))) {
328 i++; /* skip attribute field */ 328 i++; /* skip attribute field */
329 if (radeon_check_and_fixup_offset(dev_priv, filp_priv, &cmd[i])) { 329 if (radeon_check_and_fixup_offset(dev_priv, file_priv,
330 &cmd[i])) {
330 DRM_ERROR 331 DRM_ERROR
331 ("Invalid offset (k=%d i=%d) in 3D_LOAD_VBPNTR packet.\n", 332 ("Invalid offset (k=%d i=%d) in 3D_LOAD_VBPNTR packet.\n",
332 k, i); 333 k, i);
333 return DRM_ERR(EINVAL); 334 return -EINVAL;
334 } 335 }
335 k++; 336 k++;
336 i++; 337 i++;
337 if (k == narrays) 338 if (k == narrays)
338 break; 339 break;
339 /* have one more to process, they come in pairs */ 340 /* have one more to process, they come in pairs */
340 if (radeon_check_and_fixup_offset(dev_priv, filp_priv, &cmd[i])) { 341 if (radeon_check_and_fixup_offset(dev_priv,
342 file_priv, &cmd[i]))
343 {
341 DRM_ERROR 344 DRM_ERROR
342 ("Invalid offset (k=%d i=%d) in 3D_LOAD_VBPNTR packet.\n", 345 ("Invalid offset (k=%d i=%d) in 3D_LOAD_VBPNTR packet.\n",
343 k, i); 346 k, i);
344 return DRM_ERR(EINVAL); 347 return -EINVAL;
345 } 348 }
346 k++; 349 k++;
347 i++; 350 i++;
@@ -351,33 +354,33 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
351 DRM_ERROR 354 DRM_ERROR
352 ("Malformed 3D_LOAD_VBPNTR packet (k=%d i=%d narrays=%d count+1=%d).\n", 355 ("Malformed 3D_LOAD_VBPNTR packet (k=%d i=%d narrays=%d count+1=%d).\n",
353 k, i, narrays, count + 1); 356 k, i, narrays, count + 1);
354 return DRM_ERR(EINVAL); 357 return -EINVAL;
355 } 358 }
356 break; 359 break;
357 360
358 case RADEON_3D_RNDR_GEN_INDX_PRIM: 361 case RADEON_3D_RNDR_GEN_INDX_PRIM:
359 if (dev_priv->microcode_version != UCODE_R100) { 362 if (dev_priv->microcode_version != UCODE_R100) {
360 DRM_ERROR("Invalid 3d packet for r200-class chip\n"); 363 DRM_ERROR("Invalid 3d packet for r200-class chip\n");
361 return DRM_ERR(EINVAL); 364 return -EINVAL;
362 } 365 }
363 if (radeon_check_and_fixup_offset(dev_priv, filp_priv, &cmd[1])) { 366 if (radeon_check_and_fixup_offset(dev_priv, file_priv, &cmd[1])) {
364 DRM_ERROR("Invalid rndr_gen_indx offset\n"); 367 DRM_ERROR("Invalid rndr_gen_indx offset\n");
365 return DRM_ERR(EINVAL); 368 return -EINVAL;
366 } 369 }
367 break; 370 break;
368 371
369 case RADEON_CP_INDX_BUFFER: 372 case RADEON_CP_INDX_BUFFER:
370 if (dev_priv->microcode_version != UCODE_R200) { 373 if (dev_priv->microcode_version != UCODE_R200) {
371 DRM_ERROR("Invalid 3d packet for r100-class chip\n"); 374 DRM_ERROR("Invalid 3d packet for r100-class chip\n");
372 return DRM_ERR(EINVAL); 375 return -EINVAL;
373 } 376 }
374 if ((cmd[1] & 0x8000ffff) != 0x80000810) { 377 if ((cmd[1] & 0x8000ffff) != 0x80000810) {
375 DRM_ERROR("Invalid indx_buffer reg address %08X\n", cmd[1]); 378 DRM_ERROR("Invalid indx_buffer reg address %08X\n", cmd[1]);
376 return DRM_ERR(EINVAL); 379 return -EINVAL;
377 } 380 }
378 if (radeon_check_and_fixup_offset(dev_priv, filp_priv, &cmd[2])) { 381 if (radeon_check_and_fixup_offset(dev_priv, file_priv, &cmd[2])) {
379 DRM_ERROR("Invalid indx_buffer offset is %08X\n", cmd[2]); 382 DRM_ERROR("Invalid indx_buffer offset is %08X\n", cmd[2]);
380 return DRM_ERR(EINVAL); 383 return -EINVAL;
381 } 384 }
382 break; 385 break;
383 386
@@ -389,9 +392,9 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
389 | RADEON_GMC_DST_PITCH_OFFSET_CNTL)) { 392 | RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
390 offset = cmd[2] << 10; 393 offset = cmd[2] << 10;
391 if (radeon_check_and_fixup_offset 394 if (radeon_check_and_fixup_offset
392 (dev_priv, filp_priv, &offset)) { 395 (dev_priv, file_priv, &offset)) {
393 DRM_ERROR("Invalid first packet offset\n"); 396 DRM_ERROR("Invalid first packet offset\n");
394 return DRM_ERR(EINVAL); 397 return -EINVAL;
395 } 398 }
396 cmd[2] = (cmd[2] & 0xffc00000) | offset >> 10; 399 cmd[2] = (cmd[2] & 0xffc00000) | offset >> 10;
397 } 400 }
@@ -400,9 +403,9 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
400 (cmd[1] & RADEON_GMC_DST_PITCH_OFFSET_CNTL)) { 403 (cmd[1] & RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
401 offset = cmd[3] << 10; 404 offset = cmd[3] << 10;
402 if (radeon_check_and_fixup_offset 405 if (radeon_check_and_fixup_offset
403 (dev_priv, filp_priv, &offset)) { 406 (dev_priv, file_priv, &offset)) {
404 DRM_ERROR("Invalid second packet offset\n"); 407 DRM_ERROR("Invalid second packet offset\n");
405 return DRM_ERR(EINVAL); 408 return -EINVAL;
406 } 409 }
407 cmd[3] = (cmd[3] & 0xffc00000) | offset >> 10; 410 cmd[3] = (cmd[3] & 0xffc00000) | offset >> 10;
408 } 411 }
@@ -410,7 +413,7 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
410 413
411 default: 414 default:
412 DRM_ERROR("Invalid packet type %x\n", cmd[0] & 0xff00); 415 DRM_ERROR("Invalid packet type %x\n", cmd[0] & 0xff00);
413 return DRM_ERR(EINVAL); 416 return -EINVAL;
414 } 417 }
415 418
416 return 0; 419 return 0;
@@ -439,7 +442,7 @@ static __inline__ void radeon_emit_clip_rect(drm_radeon_private_t * dev_priv,
439/* Emit 1.1 state 442/* Emit 1.1 state
440 */ 443 */
441static int radeon_emit_state(drm_radeon_private_t * dev_priv, 444static int radeon_emit_state(drm_radeon_private_t * dev_priv,
442 struct drm_file * filp_priv, 445 struct drm_file *file_priv,
443 drm_radeon_context_regs_t * ctx, 446 drm_radeon_context_regs_t * ctx,
444 drm_radeon_texture_regs_t * tex, 447 drm_radeon_texture_regs_t * tex,
445 unsigned int dirty) 448 unsigned int dirty)
@@ -448,16 +451,16 @@ static int radeon_emit_state(drm_radeon_private_t * dev_priv,
448 DRM_DEBUG("dirty=0x%08x\n", dirty); 451 DRM_DEBUG("dirty=0x%08x\n", dirty);
449 452
450 if (dirty & RADEON_UPLOAD_CONTEXT) { 453 if (dirty & RADEON_UPLOAD_CONTEXT) {
451 if (radeon_check_and_fixup_offset(dev_priv, filp_priv, 454 if (radeon_check_and_fixup_offset(dev_priv, file_priv,
452 &ctx->rb3d_depthoffset)) { 455 &ctx->rb3d_depthoffset)) {
453 DRM_ERROR("Invalid depth buffer offset\n"); 456 DRM_ERROR("Invalid depth buffer offset\n");
454 return DRM_ERR(EINVAL); 457 return -EINVAL;
455 } 458 }
456 459
457 if (radeon_check_and_fixup_offset(dev_priv, filp_priv, 460 if (radeon_check_and_fixup_offset(dev_priv, file_priv,
458 &ctx->rb3d_coloroffset)) { 461 &ctx->rb3d_coloroffset)) {
459 DRM_ERROR("Invalid depth buffer offset\n"); 462 DRM_ERROR("Invalid depth buffer offset\n");
460 return DRM_ERR(EINVAL); 463 return -EINVAL;
461 } 464 }
462 465
463 BEGIN_RING(14); 466 BEGIN_RING(14);
@@ -543,10 +546,10 @@ static int radeon_emit_state(drm_radeon_private_t * dev_priv,
543 } 546 }
544 547
545 if (dirty & RADEON_UPLOAD_TEX0) { 548 if (dirty & RADEON_UPLOAD_TEX0) {
546 if (radeon_check_and_fixup_offset(dev_priv, filp_priv, 549 if (radeon_check_and_fixup_offset(dev_priv, file_priv,
547 &tex[0].pp_txoffset)) { 550 &tex[0].pp_txoffset)) {
548 DRM_ERROR("Invalid texture offset for unit 0\n"); 551 DRM_ERROR("Invalid texture offset for unit 0\n");
549 return DRM_ERR(EINVAL); 552 return -EINVAL;
550 } 553 }
551 554
552 BEGIN_RING(9); 555 BEGIN_RING(9);
@@ -563,10 +566,10 @@ static int radeon_emit_state(drm_radeon_private_t * dev_priv,
563 } 566 }
564 567
565 if (dirty & RADEON_UPLOAD_TEX1) { 568 if (dirty & RADEON_UPLOAD_TEX1) {
566 if (radeon_check_and_fixup_offset(dev_priv, filp_priv, 569 if (radeon_check_and_fixup_offset(dev_priv, file_priv,
567 &tex[1].pp_txoffset)) { 570 &tex[1].pp_txoffset)) {
568 DRM_ERROR("Invalid texture offset for unit 1\n"); 571 DRM_ERROR("Invalid texture offset for unit 1\n");
569 return DRM_ERR(EINVAL); 572 return -EINVAL;
570 } 573 }
571 574
572 BEGIN_RING(9); 575 BEGIN_RING(9);
@@ -583,10 +586,10 @@ static int radeon_emit_state(drm_radeon_private_t * dev_priv,
583 } 586 }
584 587
585 if (dirty & RADEON_UPLOAD_TEX2) { 588 if (dirty & RADEON_UPLOAD_TEX2) {
586 if (radeon_check_and_fixup_offset(dev_priv, filp_priv, 589 if (radeon_check_and_fixup_offset(dev_priv, file_priv,
587 &tex[2].pp_txoffset)) { 590 &tex[2].pp_txoffset)) {
588 DRM_ERROR("Invalid texture offset for unit 2\n"); 591 DRM_ERROR("Invalid texture offset for unit 2\n");
589 return DRM_ERR(EINVAL); 592 return -EINVAL;
590 } 593 }
591 594
592 BEGIN_RING(9); 595 BEGIN_RING(9);
@@ -608,7 +611,7 @@ static int radeon_emit_state(drm_radeon_private_t * dev_priv,
608/* Emit 1.2 state 611/* Emit 1.2 state
609 */ 612 */
610static int radeon_emit_state2(drm_radeon_private_t * dev_priv, 613static int radeon_emit_state2(drm_radeon_private_t * dev_priv,
611 struct drm_file * filp_priv, 614 struct drm_file *file_priv,
612 drm_radeon_state_t * state) 615 drm_radeon_state_t * state)
613{ 616{
614 RING_LOCALS; 617 RING_LOCALS;
@@ -621,7 +624,7 @@ static int radeon_emit_state2(drm_radeon_private_t * dev_priv,
621 ADVANCE_RING(); 624 ADVANCE_RING();
622 } 625 }
623 626
624 return radeon_emit_state(dev_priv, filp_priv, &state->context, 627 return radeon_emit_state(dev_priv, file_priv, &state->context,
625 state->tex, state->dirty); 628 state->tex, state->dirty);
626} 629}
627 630
@@ -1646,13 +1649,12 @@ static void radeon_cp_dispatch_indices(struct drm_device * dev,
1646 1649
1647#define RADEON_MAX_TEXTURE_SIZE RADEON_BUFFER_SIZE 1650#define RADEON_MAX_TEXTURE_SIZE RADEON_BUFFER_SIZE
1648 1651
1649static int radeon_cp_dispatch_texture(DRMFILE filp, 1652static int radeon_cp_dispatch_texture(struct drm_device * dev,
1650 struct drm_device * dev, 1653 struct drm_file *file_priv,
1651 drm_radeon_texture_t * tex, 1654 drm_radeon_texture_t * tex,
1652 drm_radeon_tex_image_t * image) 1655 drm_radeon_tex_image_t * image)
1653{ 1656{
1654 drm_radeon_private_t *dev_priv = dev->dev_private; 1657 drm_radeon_private_t *dev_priv = dev->dev_private;
1655 struct drm_file *filp_priv;
1656 struct drm_buf *buf; 1658 struct drm_buf *buf;
1657 u32 format; 1659 u32 format;
1658 u32 *buffer; 1660 u32 *buffer;
@@ -1664,11 +1666,9 @@ static int radeon_cp_dispatch_texture(DRMFILE filp,
1664 u32 offset; 1666 u32 offset;
1665 RING_LOCALS; 1667 RING_LOCALS;
1666 1668
1667 DRM_GET_PRIV_WITH_RETURN(filp_priv, filp); 1669 if (radeon_check_and_fixup_offset(dev_priv, file_priv, &tex->offset)) {
1668
1669 if (radeon_check_and_fixup_offset(dev_priv, filp_priv, &tex->offset)) {
1670 DRM_ERROR("Invalid destination offset\n"); 1670 DRM_ERROR("Invalid destination offset\n");
1671 return DRM_ERR(EINVAL); 1671 return -EINVAL;
1672 } 1672 }
1673 1673
1674 dev_priv->stats.boxes |= RADEON_BOX_TEXTURE_LOAD; 1674 dev_priv->stats.boxes |= RADEON_BOX_TEXTURE_LOAD;
@@ -1711,11 +1711,11 @@ static int radeon_cp_dispatch_texture(DRMFILE filp,
1711 break; 1711 break;
1712 default: 1712 default:
1713 DRM_ERROR("invalid texture format %d\n", tex->format); 1713 DRM_ERROR("invalid texture format %d\n", tex->format);
1714 return DRM_ERR(EINVAL); 1714 return -EINVAL;
1715 } 1715 }
1716 spitch = blit_width >> 6; 1716 spitch = blit_width >> 6;
1717 if (spitch == 0 && image->height > 1) 1717 if (spitch == 0 && image->height > 1)
1718 return DRM_ERR(EINVAL); 1718 return -EINVAL;
1719 1719
1720 texpitch = tex->pitch; 1720 texpitch = tex->pitch;
1721 if ((texpitch << 22) & RADEON_DST_TILE_MICRO) { 1721 if ((texpitch << 22) & RADEON_DST_TILE_MICRO) {
@@ -1760,8 +1760,8 @@ static int radeon_cp_dispatch_texture(DRMFILE filp,
1760 if (!buf) { 1760 if (!buf) {
1761 DRM_DEBUG("radeon_cp_dispatch_texture: EAGAIN\n"); 1761 DRM_DEBUG("radeon_cp_dispatch_texture: EAGAIN\n");
1762 if (DRM_COPY_TO_USER(tex->image, image, sizeof(*image))) 1762 if (DRM_COPY_TO_USER(tex->image, image, sizeof(*image)))
1763 return DRM_ERR(EFAULT); 1763 return -EFAULT;
1764 return DRM_ERR(EAGAIN); 1764 return -EAGAIN;
1765 } 1765 }
1766 1766
1767 /* Dispatch the indirect buffer. 1767 /* Dispatch the indirect buffer.
@@ -1774,7 +1774,7 @@ static int radeon_cp_dispatch_texture(DRMFILE filp,
1774 do { \ 1774 do { \
1775 if (DRM_COPY_FROM_USER(_buf, _data, (_width))) {\ 1775 if (DRM_COPY_FROM_USER(_buf, _data, (_width))) {\
1776 DRM_ERROR("EFAULT on pad, %d bytes\n", (_width)); \ 1776 DRM_ERROR("EFAULT on pad, %d bytes\n", (_width)); \
1777 return DRM_ERR(EFAULT); \ 1777 return -EFAULT; \
1778 } \ 1778 } \
1779 } while(0) 1779 } while(0)
1780 1780
@@ -1841,7 +1841,7 @@ static int radeon_cp_dispatch_texture(DRMFILE filp,
1841 } 1841 }
1842 1842
1843#undef RADEON_COPY_MT 1843#undef RADEON_COPY_MT
1844 buf->filp = filp; 1844 buf->file_priv = file_priv;
1845 buf->used = size; 1845 buf->used = size;
1846 offset = dev_priv->gart_buffers_offset + buf->offset; 1846 offset = dev_priv->gart_buffers_offset + buf->offset;
1847 BEGIN_RING(9); 1847 BEGIN_RING(9);
@@ -1861,6 +1861,7 @@ static int radeon_cp_dispatch_texture(DRMFILE filp,
1861 OUT_RING((image->width << 16) | height); 1861 OUT_RING((image->width << 16) | height);
1862 RADEON_WAIT_UNTIL_2D_IDLE(); 1862 RADEON_WAIT_UNTIL_2D_IDLE();
1863 ADVANCE_RING(); 1863 ADVANCE_RING();
1864 COMMIT_RING();
1864 1865
1865 radeon_cp_discard_buffer(dev, buf); 1866 radeon_cp_discard_buffer(dev, buf);
1866 1867
@@ -1878,6 +1879,8 @@ static int radeon_cp_dispatch_texture(DRMFILE filp,
1878 RADEON_FLUSH_CACHE(); 1879 RADEON_FLUSH_CACHE();
1879 RADEON_WAIT_UNTIL_2D_IDLE(); 1880 RADEON_WAIT_UNTIL_2D_IDLE();
1880 ADVANCE_RING(); 1881 ADVANCE_RING();
1882 COMMIT_RING();
1883
1881 return 0; 1884 return 0;
1882} 1885}
1883 1886
@@ -1929,7 +1932,8 @@ static void radeon_apply_surface_regs(int surf_index,
1929 * not always be available. 1932 * not always be available.
1930 */ 1933 */
1931static int alloc_surface(drm_radeon_surface_alloc_t *new, 1934static int alloc_surface(drm_radeon_surface_alloc_t *new,
1932 drm_radeon_private_t *dev_priv, DRMFILE filp) 1935 drm_radeon_private_t *dev_priv,
1936 struct drm_file *file_priv)
1933{ 1937{
1934 struct radeon_virt_surface *s; 1938 struct radeon_virt_surface *s;
1935 int i; 1939 int i;
@@ -1959,7 +1963,7 @@ static int alloc_surface(drm_radeon_surface_alloc_t *new,
1959 1963
1960 /* find a virtual surface */ 1964 /* find a virtual surface */
1961 for (i = 0; i < 2 * RADEON_MAX_SURFACES; i++) 1965 for (i = 0; i < 2 * RADEON_MAX_SURFACES; i++)
1962 if (dev_priv->virt_surfaces[i].filp == 0) 1966 if (dev_priv->virt_surfaces[i].file_priv == 0)
1963 break; 1967 break;
1964 if (i == 2 * RADEON_MAX_SURFACES) { 1968 if (i == 2 * RADEON_MAX_SURFACES) {
1965 return -1; 1969 return -1;
@@ -1977,7 +1981,7 @@ static int alloc_surface(drm_radeon_surface_alloc_t *new,
1977 s->lower = new_lower; 1981 s->lower = new_lower;
1978 s->upper = new_upper; 1982 s->upper = new_upper;
1979 s->flags = new->flags; 1983 s->flags = new->flags;
1980 s->filp = filp; 1984 s->file_priv = file_priv;
1981 dev_priv->surfaces[i].refcount++; 1985 dev_priv->surfaces[i].refcount++;
1982 dev_priv->surfaces[i].lower = s->lower; 1986 dev_priv->surfaces[i].lower = s->lower;
1983 radeon_apply_surface_regs(s->surface_index, dev_priv); 1987 radeon_apply_surface_regs(s->surface_index, dev_priv);
@@ -1993,7 +1997,7 @@ static int alloc_surface(drm_radeon_surface_alloc_t *new,
1993 s->lower = new_lower; 1997 s->lower = new_lower;
1994 s->upper = new_upper; 1998 s->upper = new_upper;
1995 s->flags = new->flags; 1999 s->flags = new->flags;
1996 s->filp = filp; 2000 s->file_priv = file_priv;
1997 dev_priv->surfaces[i].refcount++; 2001 dev_priv->surfaces[i].refcount++;
1998 dev_priv->surfaces[i].upper = s->upper; 2002 dev_priv->surfaces[i].upper = s->upper;
1999 radeon_apply_surface_regs(s->surface_index, dev_priv); 2003 radeon_apply_surface_regs(s->surface_index, dev_priv);
@@ -2009,7 +2013,7 @@ static int alloc_surface(drm_radeon_surface_alloc_t *new,
2009 s->lower = new_lower; 2013 s->lower = new_lower;
2010 s->upper = new_upper; 2014 s->upper = new_upper;
2011 s->flags = new->flags; 2015 s->flags = new->flags;
2012 s->filp = filp; 2016 s->file_priv = file_priv;
2013 dev_priv->surfaces[i].refcount = 1; 2017 dev_priv->surfaces[i].refcount = 1;
2014 dev_priv->surfaces[i].lower = s->lower; 2018 dev_priv->surfaces[i].lower = s->lower;
2015 dev_priv->surfaces[i].upper = s->upper; 2019 dev_priv->surfaces[i].upper = s->upper;
@@ -2023,7 +2027,8 @@ static int alloc_surface(drm_radeon_surface_alloc_t *new,
2023 return -1; 2027 return -1;
2024} 2028}
2025 2029
2026static int free_surface(DRMFILE filp, drm_radeon_private_t * dev_priv, 2030static int free_surface(struct drm_file *file_priv,
2031 drm_radeon_private_t * dev_priv,
2027 int lower) 2032 int lower)
2028{ 2033{
2029 struct radeon_virt_surface *s; 2034 struct radeon_virt_surface *s;
@@ -2031,8 +2036,9 @@ static int free_surface(DRMFILE filp, drm_radeon_private_t * dev_priv,
2031 /* find the virtual surface */ 2036 /* find the virtual surface */
2032 for (i = 0; i < 2 * RADEON_MAX_SURFACES; i++) { 2037 for (i = 0; i < 2 * RADEON_MAX_SURFACES; i++) {
2033 s = &(dev_priv->virt_surfaces[i]); 2038 s = &(dev_priv->virt_surfaces[i]);
2034 if (s->filp) { 2039 if (s->file_priv) {
2035 if ((lower == s->lower) && (filp == s->filp)) { 2040 if ((lower == s->lower) && (file_priv == s->file_priv))
2041 {
2036 if (dev_priv->surfaces[s->surface_index]. 2042 if (dev_priv->surfaces[s->surface_index].
2037 lower == s->lower) 2043 lower == s->lower)
2038 dev_priv->surfaces[s->surface_index]. 2044 dev_priv->surfaces[s->surface_index].
@@ -2048,7 +2054,7 @@ static int free_surface(DRMFILE filp, drm_radeon_private_t * dev_priv,
2048 refcount == 0) 2054 refcount == 0)
2049 dev_priv->surfaces[s->surface_index]. 2055 dev_priv->surfaces[s->surface_index].
2050 flags = 0; 2056 flags = 0;
2051 s->filp = NULL; 2057 s->file_priv = NULL;
2052 radeon_apply_surface_regs(s->surface_index, 2058 radeon_apply_surface_regs(s->surface_index,
2053 dev_priv); 2059 dev_priv);
2054 return 0; 2060 return 0;
@@ -2058,13 +2064,13 @@ static int free_surface(DRMFILE filp, drm_radeon_private_t * dev_priv,
2058 return 1; 2064 return 1;
2059} 2065}
2060 2066
2061static void radeon_surfaces_release(DRMFILE filp, 2067static void radeon_surfaces_release(struct drm_file *file_priv,
2062 drm_radeon_private_t * dev_priv) 2068 drm_radeon_private_t * dev_priv)
2063{ 2069{
2064 int i; 2070 int i;
2065 for (i = 0; i < 2 * RADEON_MAX_SURFACES; i++) { 2071 for (i = 0; i < 2 * RADEON_MAX_SURFACES; i++) {
2066 if (dev_priv->virt_surfaces[i].filp == filp) 2072 if (dev_priv->virt_surfaces[i].file_priv == file_priv)
2067 free_surface(filp, dev_priv, 2073 free_surface(file_priv, dev_priv,
2068 dev_priv->virt_surfaces[i].lower); 2074 dev_priv->virt_surfaces[i].lower);
2069 } 2075 }
2070} 2076}
@@ -2072,61 +2078,48 @@ static void radeon_surfaces_release(DRMFILE filp,
2072/* ================================================================ 2078/* ================================================================
2073 * IOCTL functions 2079 * IOCTL functions
2074 */ 2080 */
2075static int radeon_surface_alloc(DRM_IOCTL_ARGS) 2081static int radeon_surface_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv)
2076{ 2082{
2077 DRM_DEVICE;
2078 drm_radeon_private_t *dev_priv = dev->dev_private; 2083 drm_radeon_private_t *dev_priv = dev->dev_private;
2079 drm_radeon_surface_alloc_t alloc; 2084 drm_radeon_surface_alloc_t *alloc = data;
2080 2085
2081 DRM_COPY_FROM_USER_IOCTL(alloc, 2086 if (alloc_surface(alloc, dev_priv, file_priv) == -1)
2082 (drm_radeon_surface_alloc_t __user *) data, 2087 return -EINVAL;
2083 sizeof(alloc));
2084
2085 if (alloc_surface(&alloc, dev_priv, filp) == -1)
2086 return DRM_ERR(EINVAL);
2087 else 2088 else
2088 return 0; 2089 return 0;
2089} 2090}
2090 2091
2091static int radeon_surface_free(DRM_IOCTL_ARGS) 2092static int radeon_surface_free(struct drm_device *dev, void *data, struct drm_file *file_priv)
2092{ 2093{
2093 DRM_DEVICE;
2094 drm_radeon_private_t *dev_priv = dev->dev_private; 2094 drm_radeon_private_t *dev_priv = dev->dev_private;
2095 drm_radeon_surface_free_t memfree; 2095 drm_radeon_surface_free_t *memfree = data;
2096
2097 DRM_COPY_FROM_USER_IOCTL(memfree, (drm_radeon_surface_free_t __user *) data,
2098 sizeof(memfree));
2099 2096
2100 if (free_surface(filp, dev_priv, memfree.address)) 2097 if (free_surface(file_priv, dev_priv, memfree->address))
2101 return DRM_ERR(EINVAL); 2098 return -EINVAL;
2102 else 2099 else
2103 return 0; 2100 return 0;
2104} 2101}
2105 2102
2106static int radeon_cp_clear(DRM_IOCTL_ARGS) 2103static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *file_priv)
2107{ 2104{
2108 DRM_DEVICE;
2109 drm_radeon_private_t *dev_priv = dev->dev_private; 2105 drm_radeon_private_t *dev_priv = dev->dev_private;
2110 drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; 2106 drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
2111 drm_radeon_clear_t clear; 2107 drm_radeon_clear_t *clear = data;
2112 drm_radeon_clear_rect_t depth_boxes[RADEON_NR_SAREA_CLIPRECTS]; 2108 drm_radeon_clear_rect_t depth_boxes[RADEON_NR_SAREA_CLIPRECTS];
2113 DRM_DEBUG("\n"); 2109 DRM_DEBUG("\n");
2114 2110
2115 LOCK_TEST_WITH_RETURN(dev, filp); 2111 LOCK_TEST_WITH_RETURN(dev, file_priv);
2116
2117 DRM_COPY_FROM_USER_IOCTL(clear, (drm_radeon_clear_t __user *) data,
2118 sizeof(clear));
2119 2112
2120 RING_SPACE_TEST_WITH_RETURN(dev_priv); 2113 RING_SPACE_TEST_WITH_RETURN(dev_priv);
2121 2114
2122 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS) 2115 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
2123 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS; 2116 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
2124 2117
2125 if (DRM_COPY_FROM_USER(&depth_boxes, clear.depth_boxes, 2118 if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
2126 sarea_priv->nbox * sizeof(depth_boxes[0]))) 2119 sarea_priv->nbox * sizeof(depth_boxes[0])))
2127 return DRM_ERR(EFAULT); 2120 return -EFAULT;
2128 2121
2129 radeon_cp_dispatch_clear(dev, &clear, depth_boxes); 2122 radeon_cp_dispatch_clear(dev, clear, depth_boxes);
2130 2123
2131 COMMIT_RING(); 2124 COMMIT_RING();
2132 return 0; 2125 return 0;
@@ -2162,13 +2155,12 @@ static int radeon_do_init_pageflip(struct drm_device * dev)
2162/* Swapping and flipping are different operations, need different ioctls. 2155/* Swapping and flipping are different operations, need different ioctls.
2163 * They can & should be intermixed to support multiple 3d windows. 2156 * They can & should be intermixed to support multiple 3d windows.
2164 */ 2157 */
2165static int radeon_cp_flip(DRM_IOCTL_ARGS) 2158static int radeon_cp_flip(struct drm_device *dev, void *data, struct drm_file *file_priv)
2166{ 2159{
2167 DRM_DEVICE;
2168 drm_radeon_private_t *dev_priv = dev->dev_private; 2160 drm_radeon_private_t *dev_priv = dev->dev_private;
2169 DRM_DEBUG("\n"); 2161 DRM_DEBUG("\n");
2170 2162
2171 LOCK_TEST_WITH_RETURN(dev, filp); 2163 LOCK_TEST_WITH_RETURN(dev, file_priv);
2172 2164
2173 RING_SPACE_TEST_WITH_RETURN(dev_priv); 2165 RING_SPACE_TEST_WITH_RETURN(dev_priv);
2174 2166
@@ -2181,14 +2173,13 @@ static int radeon_cp_flip(DRM_IOCTL_ARGS)
2181 return 0; 2173 return 0;
2182} 2174}
2183 2175
2184static int radeon_cp_swap(DRM_IOCTL_ARGS) 2176static int radeon_cp_swap(struct drm_device *dev, void *data, struct drm_file *file_priv)
2185{ 2177{
2186 DRM_DEVICE;
2187 drm_radeon_private_t *dev_priv = dev->dev_private; 2178 drm_radeon_private_t *dev_priv = dev->dev_private;
2188 drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; 2179 drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
2189 DRM_DEBUG("\n"); 2180 DRM_DEBUG("\n");
2190 2181
2191 LOCK_TEST_WITH_RETURN(dev, filp); 2182 LOCK_TEST_WITH_RETURN(dev, file_priv);
2192 2183
2193 RING_SPACE_TEST_WITH_RETURN(dev_priv); 2184 RING_SPACE_TEST_WITH_RETURN(dev_priv);
2194 2185
@@ -2202,64 +2193,57 @@ static int radeon_cp_swap(DRM_IOCTL_ARGS)
2202 return 0; 2193 return 0;
2203} 2194}
2204 2195
2205static int radeon_cp_vertex(DRM_IOCTL_ARGS) 2196static int radeon_cp_vertex(struct drm_device *dev, void *data, struct drm_file *file_priv)
2206{ 2197{
2207 DRM_DEVICE;
2208 drm_radeon_private_t *dev_priv = dev->dev_private; 2198 drm_radeon_private_t *dev_priv = dev->dev_private;
2209 struct drm_file *filp_priv;
2210 drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; 2199 drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
2211 struct drm_device_dma *dma = dev->dma; 2200 struct drm_device_dma *dma = dev->dma;
2212 struct drm_buf *buf; 2201 struct drm_buf *buf;
2213 drm_radeon_vertex_t vertex; 2202 drm_radeon_vertex_t *vertex = data;
2214 drm_radeon_tcl_prim_t prim; 2203 drm_radeon_tcl_prim_t prim;
2215 2204
2216 LOCK_TEST_WITH_RETURN(dev, filp); 2205 LOCK_TEST_WITH_RETURN(dev, file_priv);
2217
2218 DRM_GET_PRIV_WITH_RETURN(filp_priv, filp);
2219
2220 DRM_COPY_FROM_USER_IOCTL(vertex, (drm_radeon_vertex_t __user *) data,
2221 sizeof(vertex));
2222 2206
2223 DRM_DEBUG("pid=%d index=%d count=%d discard=%d\n", 2207 DRM_DEBUG("pid=%d index=%d count=%d discard=%d\n",
2224 DRM_CURRENTPID, vertex.idx, vertex.count, vertex.discard); 2208 DRM_CURRENTPID, vertex->idx, vertex->count, vertex->discard);
2225 2209
2226 if (vertex.idx < 0 || vertex.idx >= dma->buf_count) { 2210 if (vertex->idx < 0 || vertex->idx >= dma->buf_count) {
2227 DRM_ERROR("buffer index %d (of %d max)\n", 2211 DRM_ERROR("buffer index %d (of %d max)\n",
2228 vertex.idx, dma->buf_count - 1); 2212 vertex->idx, dma->buf_count - 1);
2229 return DRM_ERR(EINVAL); 2213 return -EINVAL;
2230 } 2214 }
2231 if (vertex.prim < 0 || vertex.prim > RADEON_PRIM_TYPE_3VRT_LINE_LIST) { 2215 if (vertex->prim < 0 || vertex->prim > RADEON_PRIM_TYPE_3VRT_LINE_LIST) {
2232 DRM_ERROR("buffer prim %d\n", vertex.prim); 2216 DRM_ERROR("buffer prim %d\n", vertex->prim);
2233 return DRM_ERR(EINVAL); 2217 return -EINVAL;
2234 } 2218 }
2235 2219
2236 RING_SPACE_TEST_WITH_RETURN(dev_priv); 2220 RING_SPACE_TEST_WITH_RETURN(dev_priv);
2237 VB_AGE_TEST_WITH_RETURN(dev_priv); 2221 VB_AGE_TEST_WITH_RETURN(dev_priv);
2238 2222
2239 buf = dma->buflist[vertex.idx]; 2223 buf = dma->buflist[vertex->idx];
2240 2224
2241 if (buf->filp != filp) { 2225 if (buf->file_priv != file_priv) {
2242 DRM_ERROR("process %d using buffer owned by %p\n", 2226 DRM_ERROR("process %d using buffer owned by %p\n",
2243 DRM_CURRENTPID, buf->filp); 2227 DRM_CURRENTPID, buf->file_priv);
2244 return DRM_ERR(EINVAL); 2228 return -EINVAL;
2245 } 2229 }
2246 if (buf->pending) { 2230 if (buf->pending) {
2247 DRM_ERROR("sending pending buffer %d\n", vertex.idx); 2231 DRM_ERROR("sending pending buffer %d\n", vertex->idx);
2248 return DRM_ERR(EINVAL); 2232 return -EINVAL;
2249 } 2233 }
2250 2234
2251 /* Build up a prim_t record: 2235 /* Build up a prim_t record:
2252 */ 2236 */
2253 if (vertex.count) { 2237 if (vertex->count) {
2254 buf->used = vertex.count; /* not used? */ 2238 buf->used = vertex->count; /* not used? */
2255 2239
2256 if (sarea_priv->dirty & ~RADEON_UPLOAD_CLIPRECTS) { 2240 if (sarea_priv->dirty & ~RADEON_UPLOAD_CLIPRECTS) {
2257 if (radeon_emit_state(dev_priv, filp_priv, 2241 if (radeon_emit_state(dev_priv, file_priv,
2258 &sarea_priv->context_state, 2242 &sarea_priv->context_state,
2259 sarea_priv->tex_state, 2243 sarea_priv->tex_state,
2260 sarea_priv->dirty)) { 2244 sarea_priv->dirty)) {
2261 DRM_ERROR("radeon_emit_state failed\n"); 2245 DRM_ERROR("radeon_emit_state failed\n");
2262 return DRM_ERR(EINVAL); 2246 return -EINVAL;
2263 } 2247 }
2264 2248
2265 sarea_priv->dirty &= ~(RADEON_UPLOAD_TEX0IMAGES | 2249 sarea_priv->dirty &= ~(RADEON_UPLOAD_TEX0IMAGES |
@@ -2269,15 +2253,15 @@ static int radeon_cp_vertex(DRM_IOCTL_ARGS)
2269 } 2253 }
2270 2254
2271 prim.start = 0; 2255 prim.start = 0;
2272 prim.finish = vertex.count; /* unused */ 2256 prim.finish = vertex->count; /* unused */
2273 prim.prim = vertex.prim; 2257 prim.prim = vertex->prim;
2274 prim.numverts = vertex.count; 2258 prim.numverts = vertex->count;
2275 prim.vc_format = dev_priv->sarea_priv->vc_format; 2259 prim.vc_format = dev_priv->sarea_priv->vc_format;
2276 2260
2277 radeon_cp_dispatch_vertex(dev, buf, &prim); 2261 radeon_cp_dispatch_vertex(dev, buf, &prim);
2278 } 2262 }
2279 2263
2280 if (vertex.discard) { 2264 if (vertex->discard) {
2281 radeon_cp_discard_buffer(dev, buf); 2265 radeon_cp_discard_buffer(dev, buf);
2282 } 2266 }
2283 2267
@@ -2285,74 +2269,68 @@ static int radeon_cp_vertex(DRM_IOCTL_ARGS)
2285 return 0; 2269 return 0;
2286} 2270}
2287 2271
2288static int radeon_cp_indices(DRM_IOCTL_ARGS) 2272static int radeon_cp_indices(struct drm_device *dev, void *data, struct drm_file *file_priv)
2289{ 2273{
2290 DRM_DEVICE;
2291 drm_radeon_private_t *dev_priv = dev->dev_private; 2274 drm_radeon_private_t *dev_priv = dev->dev_private;
2292 struct drm_file *filp_priv;
2293 drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; 2275 drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
2294 struct drm_device_dma *dma = dev->dma; 2276 struct drm_device_dma *dma = dev->dma;
2295 struct drm_buf *buf; 2277 struct drm_buf *buf;
2296 drm_radeon_indices_t elts; 2278 drm_radeon_indices_t *elts = data;
2297 drm_radeon_tcl_prim_t prim; 2279 drm_radeon_tcl_prim_t prim;
2298 int count; 2280 int count;
2299 2281
2300 LOCK_TEST_WITH_RETURN(dev, filp); 2282 LOCK_TEST_WITH_RETURN(dev, file_priv);
2301
2302 DRM_GET_PRIV_WITH_RETURN(filp_priv, filp);
2303
2304 DRM_COPY_FROM_USER_IOCTL(elts, (drm_radeon_indices_t __user *) data,
2305 sizeof(elts));
2306 2283
2307 DRM_DEBUG("pid=%d index=%d start=%d end=%d discard=%d\n", 2284 DRM_DEBUG("pid=%d index=%d start=%d end=%d discard=%d\n",
2308 DRM_CURRENTPID, elts.idx, elts.start, elts.end, elts.discard); 2285 DRM_CURRENTPID, elts->idx, elts->start, elts->end,
2286 elts->discard);
2309 2287
2310 if (elts.idx < 0 || elts.idx >= dma->buf_count) { 2288 if (elts->idx < 0 || elts->idx >= dma->buf_count) {
2311 DRM_ERROR("buffer index %d (of %d max)\n", 2289 DRM_ERROR("buffer index %d (of %d max)\n",
2312 elts.idx, dma->buf_count - 1); 2290 elts->idx, dma->buf_count - 1);
2313 return DRM_ERR(EINVAL); 2291 return -EINVAL;
2314 } 2292 }
2315 if (elts.prim < 0 || elts.prim > RADEON_PRIM_TYPE_3VRT_LINE_LIST) { 2293 if (elts->prim < 0 || elts->prim > RADEON_PRIM_TYPE_3VRT_LINE_LIST) {
2316 DRM_ERROR("buffer prim %d\n", elts.prim); 2294 DRM_ERROR("buffer prim %d\n", elts->prim);
2317 return DRM_ERR(EINVAL); 2295 return -EINVAL;
2318 } 2296 }
2319 2297
2320 RING_SPACE_TEST_WITH_RETURN(dev_priv); 2298 RING_SPACE_TEST_WITH_RETURN(dev_priv);
2321 VB_AGE_TEST_WITH_RETURN(dev_priv); 2299 VB_AGE_TEST_WITH_RETURN(dev_priv);
2322 2300
2323 buf = dma->buflist[elts.idx]; 2301 buf = dma->buflist[elts->idx];
2324 2302
2325 if (buf->filp != filp) { 2303 if (buf->file_priv != file_priv) {
2326 DRM_ERROR("process %d using buffer owned by %p\n", 2304 DRM_ERROR("process %d using buffer owned by %p\n",
2327 DRM_CURRENTPID, buf->filp); 2305 DRM_CURRENTPID, buf->file_priv);
2328 return DRM_ERR(EINVAL); 2306 return -EINVAL;
2329 } 2307 }
2330 if (buf->pending) { 2308 if (buf->pending) {
2331 DRM_ERROR("sending pending buffer %d\n", elts.idx); 2309 DRM_ERROR("sending pending buffer %d\n", elts->idx);
2332 return DRM_ERR(EINVAL); 2310 return -EINVAL;
2333 } 2311 }
2334 2312
2335 count = (elts.end - elts.start) / sizeof(u16); 2313 count = (elts->end - elts->start) / sizeof(u16);
2336 elts.start -= RADEON_INDEX_PRIM_OFFSET; 2314 elts->start -= RADEON_INDEX_PRIM_OFFSET;
2337 2315
2338 if (elts.start & 0x7) { 2316 if (elts->start & 0x7) {
2339 DRM_ERROR("misaligned buffer 0x%x\n", elts.start); 2317 DRM_ERROR("misaligned buffer 0x%x\n", elts->start);
2340 return DRM_ERR(EINVAL); 2318 return -EINVAL;
2341 } 2319 }
2342 if (elts.start < buf->used) { 2320 if (elts->start < buf->used) {
2343 DRM_ERROR("no header 0x%x - 0x%x\n", elts.start, buf->used); 2321 DRM_ERROR("no header 0x%x - 0x%x\n", elts->start, buf->used);
2344 return DRM_ERR(EINVAL); 2322 return -EINVAL;
2345 } 2323 }
2346 2324
2347 buf->used = elts.end; 2325 buf->used = elts->end;
2348 2326
2349 if (sarea_priv->dirty & ~RADEON_UPLOAD_CLIPRECTS) { 2327 if (sarea_priv->dirty & ~RADEON_UPLOAD_CLIPRECTS) {
2350 if (radeon_emit_state(dev_priv, filp_priv, 2328 if (radeon_emit_state(dev_priv, file_priv,
2351 &sarea_priv->context_state, 2329 &sarea_priv->context_state,
2352 sarea_priv->tex_state, 2330 sarea_priv->tex_state,
2353 sarea_priv->dirty)) { 2331 sarea_priv->dirty)) {
2354 DRM_ERROR("radeon_emit_state failed\n"); 2332 DRM_ERROR("radeon_emit_state failed\n");
2355 return DRM_ERR(EINVAL); 2333 return -EINVAL;
2356 } 2334 }
2357 2335
2358 sarea_priv->dirty &= ~(RADEON_UPLOAD_TEX0IMAGES | 2336 sarea_priv->dirty &= ~(RADEON_UPLOAD_TEX0IMAGES |
@@ -2363,15 +2341,15 @@ static int radeon_cp_indices(DRM_IOCTL_ARGS)
2363 2341
2364 /* Build up a prim_t record: 2342 /* Build up a prim_t record:
2365 */ 2343 */
2366 prim.start = elts.start; 2344 prim.start = elts->start;
2367 prim.finish = elts.end; 2345 prim.finish = elts->end;
2368 prim.prim = elts.prim; 2346 prim.prim = elts->prim;
2369 prim.offset = 0; /* offset from start of dma buffers */ 2347 prim.offset = 0; /* offset from start of dma buffers */
2370 prim.numverts = RADEON_MAX_VB_VERTS; /* duh */ 2348 prim.numverts = RADEON_MAX_VB_VERTS; /* duh */
2371 prim.vc_format = dev_priv->sarea_priv->vc_format; 2349 prim.vc_format = dev_priv->sarea_priv->vc_format;
2372 2350
2373 radeon_cp_dispatch_indices(dev, buf, &prim); 2351 radeon_cp_dispatch_indices(dev, buf, &prim);
2374 if (elts.discard) { 2352 if (elts->discard) {
2375 radeon_cp_discard_buffer(dev, buf); 2353 radeon_cp_discard_buffer(dev, buf);
2376 } 2354 }
2377 2355
@@ -2379,52 +2357,43 @@ static int radeon_cp_indices(DRM_IOCTL_ARGS)
2379 return 0; 2357 return 0;
2380} 2358}
2381 2359
2382static int radeon_cp_texture(DRM_IOCTL_ARGS) 2360static int radeon_cp_texture(struct drm_device *dev, void *data, struct drm_file *file_priv)
2383{ 2361{
2384 DRM_DEVICE;
2385 drm_radeon_private_t *dev_priv = dev->dev_private; 2362 drm_radeon_private_t *dev_priv = dev->dev_private;
2386 drm_radeon_texture_t tex; 2363 drm_radeon_texture_t *tex = data;
2387 drm_radeon_tex_image_t image; 2364 drm_radeon_tex_image_t image;
2388 int ret; 2365 int ret;
2389 2366
2390 LOCK_TEST_WITH_RETURN(dev, filp); 2367 LOCK_TEST_WITH_RETURN(dev, file_priv);
2391
2392 DRM_COPY_FROM_USER_IOCTL(tex, (drm_radeon_texture_t __user *) data,
2393 sizeof(tex));
2394 2368
2395 if (tex.image == NULL) { 2369 if (tex->image == NULL) {
2396 DRM_ERROR("null texture image!\n"); 2370 DRM_ERROR("null texture image!\n");
2397 return DRM_ERR(EINVAL); 2371 return -EINVAL;
2398 } 2372 }
2399 2373
2400 if (DRM_COPY_FROM_USER(&image, 2374 if (DRM_COPY_FROM_USER(&image,
2401 (drm_radeon_tex_image_t __user *) tex.image, 2375 (drm_radeon_tex_image_t __user *) tex->image,
2402 sizeof(image))) 2376 sizeof(image)))
2403 return DRM_ERR(EFAULT); 2377 return -EFAULT;
2404 2378
2405 RING_SPACE_TEST_WITH_RETURN(dev_priv); 2379 RING_SPACE_TEST_WITH_RETURN(dev_priv);
2406 VB_AGE_TEST_WITH_RETURN(dev_priv); 2380 VB_AGE_TEST_WITH_RETURN(dev_priv);
2407 2381
2408 ret = radeon_cp_dispatch_texture(filp, dev, &tex, &image); 2382 ret = radeon_cp_dispatch_texture(dev, file_priv, tex, &image);
2409 2383
2410 COMMIT_RING();
2411 return ret; 2384 return ret;
2412} 2385}
2413 2386
2414static int radeon_cp_stipple(DRM_IOCTL_ARGS) 2387static int radeon_cp_stipple(struct drm_device *dev, void *data, struct drm_file *file_priv)
2415{ 2388{
2416 DRM_DEVICE;
2417 drm_radeon_private_t *dev_priv = dev->dev_private; 2389 drm_radeon_private_t *dev_priv = dev->dev_private;
2418 drm_radeon_stipple_t stipple; 2390 drm_radeon_stipple_t *stipple = data;
2419 u32 mask[32]; 2391 u32 mask[32];
2420 2392
2421 LOCK_TEST_WITH_RETURN(dev, filp); 2393 LOCK_TEST_WITH_RETURN(dev, file_priv);
2422
2423 DRM_COPY_FROM_USER_IOCTL(stipple, (drm_radeon_stipple_t __user *) data,
2424 sizeof(stipple));
2425 2394
2426 if (DRM_COPY_FROM_USER(&mask, stipple.mask, 32 * sizeof(u32))) 2395 if (DRM_COPY_FROM_USER(&mask, stipple->mask, 32 * sizeof(u32)))
2427 return DRM_ERR(EFAULT); 2396 return -EFAULT;
2428 2397
2429 RING_SPACE_TEST_WITH_RETURN(dev_priv); 2398 RING_SPACE_TEST_WITH_RETURN(dev_priv);
2430 2399
@@ -2434,52 +2403,48 @@ static int radeon_cp_stipple(DRM_IOCTL_ARGS)
2434 return 0; 2403 return 0;
2435} 2404}
2436 2405
2437static int radeon_cp_indirect(DRM_IOCTL_ARGS) 2406static int radeon_cp_indirect(struct drm_device *dev, void *data, struct drm_file *file_priv)
2438{ 2407{
2439 DRM_DEVICE;
2440 drm_radeon_private_t *dev_priv = dev->dev_private; 2408 drm_radeon_private_t *dev_priv = dev->dev_private;
2441 struct drm_device_dma *dma = dev->dma; 2409 struct drm_device_dma *dma = dev->dma;
2442 struct drm_buf *buf; 2410 struct drm_buf *buf;
2443 drm_radeon_indirect_t indirect; 2411 drm_radeon_indirect_t *indirect = data;
2444 RING_LOCALS; 2412 RING_LOCALS;
2445 2413
2446 LOCK_TEST_WITH_RETURN(dev, filp); 2414 LOCK_TEST_WITH_RETURN(dev, file_priv);
2447
2448 DRM_COPY_FROM_USER_IOCTL(indirect,
2449 (drm_radeon_indirect_t __user *) data,
2450 sizeof(indirect));
2451 2415
2452 DRM_DEBUG("indirect: idx=%d s=%d e=%d d=%d\n", 2416 DRM_DEBUG("indirect: idx=%d s=%d e=%d d=%d\n",
2453 indirect.idx, indirect.start, indirect.end, indirect.discard); 2417 indirect->idx, indirect->start, indirect->end,
2418 indirect->discard);
2454 2419
2455 if (indirect.idx < 0 || indirect.idx >= dma->buf_count) { 2420 if (indirect->idx < 0 || indirect->idx >= dma->buf_count) {
2456 DRM_ERROR("buffer index %d (of %d max)\n", 2421 DRM_ERROR("buffer index %d (of %d max)\n",
2457 indirect.idx, dma->buf_count - 1); 2422 indirect->idx, dma->buf_count - 1);
2458 return DRM_ERR(EINVAL); 2423 return -EINVAL;
2459 } 2424 }
2460 2425
2461 buf = dma->buflist[indirect.idx]; 2426 buf = dma->buflist[indirect->idx];
2462 2427
2463 if (buf->filp != filp) { 2428 if (buf->file_priv != file_priv) {
2464 DRM_ERROR("process %d using buffer owned by %p\n", 2429 DRM_ERROR("process %d using buffer owned by %p\n",
2465 DRM_CURRENTPID, buf->filp); 2430 DRM_CURRENTPID, buf->file_priv);
2466 return DRM_ERR(EINVAL); 2431 return -EINVAL;
2467 } 2432 }
2468 if (buf->pending) { 2433 if (buf->pending) {
2469 DRM_ERROR("sending pending buffer %d\n", indirect.idx); 2434 DRM_ERROR("sending pending buffer %d\n", indirect->idx);
2470 return DRM_ERR(EINVAL); 2435 return -EINVAL;
2471 } 2436 }
2472 2437
2473 if (indirect.start < buf->used) { 2438 if (indirect->start < buf->used) {
2474 DRM_ERROR("reusing indirect: start=0x%x actual=0x%x\n", 2439 DRM_ERROR("reusing indirect: start=0x%x actual=0x%x\n",
2475 indirect.start, buf->used); 2440 indirect->start, buf->used);
2476 return DRM_ERR(EINVAL); 2441 return -EINVAL;
2477 } 2442 }
2478 2443
2479 RING_SPACE_TEST_WITH_RETURN(dev_priv); 2444 RING_SPACE_TEST_WITH_RETURN(dev_priv);
2480 VB_AGE_TEST_WITH_RETURN(dev_priv); 2445 VB_AGE_TEST_WITH_RETURN(dev_priv);
2481 2446
2482 buf->used = indirect.end; 2447 buf->used = indirect->end;
2483 2448
2484 /* Wait for the 3D stream to idle before the indirect buffer 2449 /* Wait for the 3D stream to idle before the indirect buffer
2485 * containing 2D acceleration commands is processed. 2450 * containing 2D acceleration commands is processed.
@@ -2494,8 +2459,8 @@ static int radeon_cp_indirect(DRM_IOCTL_ARGS)
2494 * X server. This is insecure and is thus only available to 2459 * X server. This is insecure and is thus only available to
2495 * privileged clients. 2460 * privileged clients.
2496 */ 2461 */
2497 radeon_cp_dispatch_indirect(dev, buf, indirect.start, indirect.end); 2462 radeon_cp_dispatch_indirect(dev, buf, indirect->start, indirect->end);
2498 if (indirect.discard) { 2463 if (indirect->discard) {
2499 radeon_cp_discard_buffer(dev, buf); 2464 radeon_cp_discard_buffer(dev, buf);
2500 } 2465 }
2501 2466
@@ -2503,71 +2468,64 @@ static int radeon_cp_indirect(DRM_IOCTL_ARGS)
2503 return 0; 2468 return 0;
2504} 2469}
2505 2470
2506static int radeon_cp_vertex2(DRM_IOCTL_ARGS) 2471static int radeon_cp_vertex2(struct drm_device *dev, void *data, struct drm_file *file_priv)
2507{ 2472{
2508 DRM_DEVICE;
2509 drm_radeon_private_t *dev_priv = dev->dev_private; 2473 drm_radeon_private_t *dev_priv = dev->dev_private;
2510 struct drm_file *filp_priv;
2511 drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; 2474 drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
2512 struct drm_device_dma *dma = dev->dma; 2475 struct drm_device_dma *dma = dev->dma;
2513 struct drm_buf *buf; 2476 struct drm_buf *buf;
2514 drm_radeon_vertex2_t vertex; 2477 drm_radeon_vertex2_t *vertex = data;
2515 int i; 2478 int i;
2516 unsigned char laststate; 2479 unsigned char laststate;
2517 2480
2518 LOCK_TEST_WITH_RETURN(dev, filp); 2481 LOCK_TEST_WITH_RETURN(dev, file_priv);
2519
2520 DRM_GET_PRIV_WITH_RETURN(filp_priv, filp);
2521
2522 DRM_COPY_FROM_USER_IOCTL(vertex, (drm_radeon_vertex2_t __user *) data,
2523 sizeof(vertex));
2524 2482
2525 DRM_DEBUG("pid=%d index=%d discard=%d\n", 2483 DRM_DEBUG("pid=%d index=%d discard=%d\n",
2526 DRM_CURRENTPID, vertex.idx, vertex.discard); 2484 DRM_CURRENTPID, vertex->idx, vertex->discard);
2527 2485
2528 if (vertex.idx < 0 || vertex.idx >= dma->buf_count) { 2486 if (vertex->idx < 0 || vertex->idx >= dma->buf_count) {
2529 DRM_ERROR("buffer index %d (of %d max)\n", 2487 DRM_ERROR("buffer index %d (of %d max)\n",
2530 vertex.idx, dma->buf_count - 1); 2488 vertex->idx, dma->buf_count - 1);
2531 return DRM_ERR(EINVAL); 2489 return -EINVAL;
2532 } 2490 }
2533 2491
2534 RING_SPACE_TEST_WITH_RETURN(dev_priv); 2492 RING_SPACE_TEST_WITH_RETURN(dev_priv);
2535 VB_AGE_TEST_WITH_RETURN(dev_priv); 2493 VB_AGE_TEST_WITH_RETURN(dev_priv);
2536 2494
2537 buf = dma->buflist[vertex.idx]; 2495 buf = dma->buflist[vertex->idx];
2538 2496
2539 if (buf->filp != filp) { 2497 if (buf->file_priv != file_priv) {
2540 DRM_ERROR("process %d using buffer owned by %p\n", 2498 DRM_ERROR("process %d using buffer owned by %p\n",
2541 DRM_CURRENTPID, buf->filp); 2499 DRM_CURRENTPID, buf->file_priv);
2542 return DRM_ERR(EINVAL); 2500 return -EINVAL;
2543 } 2501 }
2544 2502
2545 if (buf->pending) { 2503 if (buf->pending) {
2546 DRM_ERROR("sending pending buffer %d\n", vertex.idx); 2504 DRM_ERROR("sending pending buffer %d\n", vertex->idx);
2547 return DRM_ERR(EINVAL); 2505 return -EINVAL;
2548 } 2506 }
2549 2507
2550 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS) 2508 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
2551 return DRM_ERR(EINVAL); 2509 return -EINVAL;
2552 2510
2553 for (laststate = 0xff, i = 0; i < vertex.nr_prims; i++) { 2511 for (laststate = 0xff, i = 0; i < vertex->nr_prims; i++) {
2554 drm_radeon_prim_t prim; 2512 drm_radeon_prim_t prim;
2555 drm_radeon_tcl_prim_t tclprim; 2513 drm_radeon_tcl_prim_t tclprim;
2556 2514
2557 if (DRM_COPY_FROM_USER(&prim, &vertex.prim[i], sizeof(prim))) 2515 if (DRM_COPY_FROM_USER(&prim, &vertex->prim[i], sizeof(prim)))
2558 return DRM_ERR(EFAULT); 2516 return -EFAULT;
2559 2517
2560 if (prim.stateidx != laststate) { 2518 if (prim.stateidx != laststate) {
2561 drm_radeon_state_t state; 2519 drm_radeon_state_t state;
2562 2520
2563 if (DRM_COPY_FROM_USER(&state, 2521 if (DRM_COPY_FROM_USER(&state,
2564 &vertex.state[prim.stateidx], 2522 &vertex->state[prim.stateidx],
2565 sizeof(state))) 2523 sizeof(state)))
2566 return DRM_ERR(EFAULT); 2524 return -EFAULT;
2567 2525
2568 if (radeon_emit_state2(dev_priv, filp_priv, &state)) { 2526 if (radeon_emit_state2(dev_priv, file_priv, &state)) {
2569 DRM_ERROR("radeon_emit_state2 failed\n"); 2527 DRM_ERROR("radeon_emit_state2 failed\n");
2570 return DRM_ERR(EINVAL); 2528 return -EINVAL;
2571 } 2529 }
2572 2530
2573 laststate = prim.stateidx; 2531 laststate = prim.stateidx;
@@ -2594,7 +2552,7 @@ static int radeon_cp_vertex2(DRM_IOCTL_ARGS)
2594 sarea_priv->nbox = 0; 2552 sarea_priv->nbox = 0;
2595 } 2553 }
2596 2554
2597 if (vertex.discard) { 2555 if (vertex->discard) {
2598 radeon_cp_discard_buffer(dev, buf); 2556 radeon_cp_discard_buffer(dev, buf);
2599 } 2557 }
2600 2558
@@ -2603,7 +2561,7 @@ static int radeon_cp_vertex2(DRM_IOCTL_ARGS)
2603} 2561}
2604 2562
2605static int radeon_emit_packets(drm_radeon_private_t * dev_priv, 2563static int radeon_emit_packets(drm_radeon_private_t * dev_priv,
2606 struct drm_file * filp_priv, 2564 struct drm_file *file_priv,
2607 drm_radeon_cmd_header_t header, 2565 drm_radeon_cmd_header_t header,
2608 drm_radeon_kcmd_buffer_t *cmdbuf) 2566 drm_radeon_kcmd_buffer_t *cmdbuf)
2609{ 2567{
@@ -2613,19 +2571,19 @@ static int radeon_emit_packets(drm_radeon_private_t * dev_priv,
2613 RING_LOCALS; 2571 RING_LOCALS;
2614 2572
2615 if (id >= RADEON_MAX_STATE_PACKETS) 2573 if (id >= RADEON_MAX_STATE_PACKETS)
2616 return DRM_ERR(EINVAL); 2574 return -EINVAL;
2617 2575
2618 sz = packet[id].len; 2576 sz = packet[id].len;
2619 reg = packet[id].start; 2577 reg = packet[id].start;
2620 2578
2621 if (sz * sizeof(int) > cmdbuf->bufsz) { 2579 if (sz * sizeof(int) > cmdbuf->bufsz) {
2622 DRM_ERROR("Packet size provided larger than data provided\n"); 2580 DRM_ERROR("Packet size provided larger than data provided\n");
2623 return DRM_ERR(EINVAL); 2581 return -EINVAL;
2624 } 2582 }
2625 2583
2626 if (radeon_check_and_fixup_packets(dev_priv, filp_priv, id, data)) { 2584 if (radeon_check_and_fixup_packets(dev_priv, file_priv, id, data)) {
2627 DRM_ERROR("Packet verification failed\n"); 2585 DRM_ERROR("Packet verification failed\n");
2628 return DRM_ERR(EINVAL); 2586 return -EINVAL;
2629 } 2587 }
2630 2588
2631 BEGIN_RING(sz + 1); 2589 BEGIN_RING(sz + 1);
@@ -2713,7 +2671,7 @@ static __inline__ int radeon_emit_veclinear(drm_radeon_private_t *dev_priv,
2713 if (!sz) 2671 if (!sz)
2714 return 0; 2672 return 0;
2715 if (sz * 4 > cmdbuf->bufsz) 2673 if (sz * 4 > cmdbuf->bufsz)
2716 return DRM_ERR(EINVAL); 2674 return -EINVAL;
2717 2675
2718 BEGIN_RING(5 + sz); 2676 BEGIN_RING(5 + sz);
2719 OUT_RING_REG(RADEON_SE_TCL_STATE_FLUSH, 0); 2677 OUT_RING_REG(RADEON_SE_TCL_STATE_FLUSH, 0);
@@ -2729,7 +2687,7 @@ static __inline__ int radeon_emit_veclinear(drm_radeon_private_t *dev_priv,
2729} 2687}
2730 2688
2731static int radeon_emit_packet3(struct drm_device * dev, 2689static int radeon_emit_packet3(struct drm_device * dev,
2732 struct drm_file * filp_priv, 2690 struct drm_file *file_priv,
2733 drm_radeon_kcmd_buffer_t *cmdbuf) 2691 drm_radeon_kcmd_buffer_t *cmdbuf)
2734{ 2692{
2735 drm_radeon_private_t *dev_priv = dev->dev_private; 2693 drm_radeon_private_t *dev_priv = dev->dev_private;
@@ -2739,7 +2697,7 @@ static int radeon_emit_packet3(struct drm_device * dev,
2739 2697
2740 DRM_DEBUG("\n"); 2698 DRM_DEBUG("\n");
2741 2699
2742 if ((ret = radeon_check_and_fixup_packet3(dev_priv, filp_priv, 2700 if ((ret = radeon_check_and_fixup_packet3(dev_priv, file_priv,
2743 cmdbuf, &cmdsz))) { 2701 cmdbuf, &cmdsz))) {
2744 DRM_ERROR("Packet verification failed\n"); 2702 DRM_ERROR("Packet verification failed\n");
2745 return ret; 2703 return ret;
@@ -2755,7 +2713,7 @@ static int radeon_emit_packet3(struct drm_device * dev,
2755} 2713}
2756 2714
2757static int radeon_emit_packet3_cliprect(struct drm_device *dev, 2715static int radeon_emit_packet3_cliprect(struct drm_device *dev,
2758 struct drm_file *filp_priv, 2716 struct drm_file *file_priv,
2759 drm_radeon_kcmd_buffer_t *cmdbuf, 2717 drm_radeon_kcmd_buffer_t *cmdbuf,
2760 int orig_nbox) 2718 int orig_nbox)
2761{ 2719{
@@ -2769,7 +2727,7 @@ static int radeon_emit_packet3_cliprect(struct drm_device *dev,
2769 2727
2770 DRM_DEBUG("\n"); 2728 DRM_DEBUG("\n");
2771 2729
2772 if ((ret = radeon_check_and_fixup_packet3(dev_priv, filp_priv, 2730 if ((ret = radeon_check_and_fixup_packet3(dev_priv, file_priv,
2773 cmdbuf, &cmdsz))) { 2731 cmdbuf, &cmdsz))) {
2774 DRM_ERROR("Packet verification failed\n"); 2732 DRM_ERROR("Packet verification failed\n");
2775 return ret; 2733 return ret;
@@ -2781,7 +2739,7 @@ static int radeon_emit_packet3_cliprect(struct drm_device *dev,
2781 do { 2739 do {
2782 if (i < cmdbuf->nbox) { 2740 if (i < cmdbuf->nbox) {
2783 if (DRM_COPY_FROM_USER(&box, &boxes[i], sizeof(box))) 2741 if (DRM_COPY_FROM_USER(&box, &boxes[i], sizeof(box)))
2784 return DRM_ERR(EFAULT); 2742 return -EFAULT;
2785 /* FIXME The second and subsequent times round 2743 /* FIXME The second and subsequent times round
2786 * this loop, send a WAIT_UNTIL_3D_IDLE before 2744 * this loop, send a WAIT_UNTIL_3D_IDLE before
2787 * calling emit_clip_rect(). This fixes a 2745 * calling emit_clip_rect(). This fixes a
@@ -2839,62 +2797,54 @@ static int radeon_emit_wait(struct drm_device * dev, int flags)
2839 ADVANCE_RING(); 2797 ADVANCE_RING();
2840 break; 2798 break;
2841 default: 2799 default:
2842 return DRM_ERR(EINVAL); 2800 return -EINVAL;
2843 } 2801 }
2844 2802
2845 return 0; 2803 return 0;
2846} 2804}
2847 2805
2848static int radeon_cp_cmdbuf(DRM_IOCTL_ARGS) 2806static int radeon_cp_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_priv)
2849{ 2807{
2850 DRM_DEVICE;
2851 drm_radeon_private_t *dev_priv = dev->dev_private; 2808 drm_radeon_private_t *dev_priv = dev->dev_private;
2852 struct drm_file *filp_priv;
2853 struct drm_device_dma *dma = dev->dma; 2809 struct drm_device_dma *dma = dev->dma;
2854 struct drm_buf *buf = NULL; 2810 struct drm_buf *buf = NULL;
2855 int idx; 2811 int idx;
2856 drm_radeon_kcmd_buffer_t cmdbuf; 2812 drm_radeon_kcmd_buffer_t *cmdbuf = data;
2857 drm_radeon_cmd_header_t header; 2813 drm_radeon_cmd_header_t header;
2858 int orig_nbox, orig_bufsz; 2814 int orig_nbox, orig_bufsz;
2859 char *kbuf = NULL; 2815 char *kbuf = NULL;
2860 2816
2861 LOCK_TEST_WITH_RETURN(dev, filp); 2817 LOCK_TEST_WITH_RETURN(dev, file_priv);
2862
2863 DRM_GET_PRIV_WITH_RETURN(filp_priv, filp);
2864
2865 DRM_COPY_FROM_USER_IOCTL(cmdbuf,
2866 (drm_radeon_cmd_buffer_t __user *) data,
2867 sizeof(cmdbuf));
2868 2818
2869 RING_SPACE_TEST_WITH_RETURN(dev_priv); 2819 RING_SPACE_TEST_WITH_RETURN(dev_priv);
2870 VB_AGE_TEST_WITH_RETURN(dev_priv); 2820 VB_AGE_TEST_WITH_RETURN(dev_priv);
2871 2821
2872 if (cmdbuf.bufsz > 64 * 1024 || cmdbuf.bufsz < 0) { 2822 if (cmdbuf->bufsz > 64 * 1024 || cmdbuf->bufsz < 0) {
2873 return DRM_ERR(EINVAL); 2823 return -EINVAL;
2874 } 2824 }
2875 2825
2876 /* Allocate an in-kernel area and copy in the cmdbuf. Do this to avoid 2826 /* Allocate an in-kernel area and copy in the cmdbuf. Do this to avoid
2877 * races between checking values and using those values in other code, 2827 * races between checking values and using those values in other code,
2878 * and simply to avoid a lot of function calls to copy in data. 2828 * and simply to avoid a lot of function calls to copy in data.
2879 */ 2829 */
2880 orig_bufsz = cmdbuf.bufsz; 2830 orig_bufsz = cmdbuf->bufsz;
2881 if (orig_bufsz != 0) { 2831 if (orig_bufsz != 0) {
2882 kbuf = drm_alloc(cmdbuf.bufsz, DRM_MEM_DRIVER); 2832 kbuf = drm_alloc(cmdbuf->bufsz, DRM_MEM_DRIVER);
2883 if (kbuf == NULL) 2833 if (kbuf == NULL)
2884 return DRM_ERR(ENOMEM); 2834 return -ENOMEM;
2885 if (DRM_COPY_FROM_USER(kbuf, (void __user *)cmdbuf.buf, 2835 if (DRM_COPY_FROM_USER(kbuf, (void __user *)cmdbuf->buf,
2886 cmdbuf.bufsz)) { 2836 cmdbuf->bufsz)) {
2887 drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER); 2837 drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER);
2888 return DRM_ERR(EFAULT); 2838 return -EFAULT;
2889 } 2839 }
2890 cmdbuf.buf = kbuf; 2840 cmdbuf->buf = kbuf;
2891 } 2841 }
2892 2842
2893 orig_nbox = cmdbuf.nbox; 2843 orig_nbox = cmdbuf->nbox;
2894 2844
2895 if (dev_priv->microcode_version == UCODE_R300) { 2845 if (dev_priv->microcode_version == UCODE_R300) {
2896 int temp; 2846 int temp;
2897 temp = r300_do_cp_cmdbuf(dev, filp, filp_priv, &cmdbuf); 2847 temp = r300_do_cp_cmdbuf(dev, file_priv, cmdbuf);
2898 2848
2899 if (orig_bufsz != 0) 2849 if (orig_bufsz != 0)
2900 drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER); 2850 drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER);
@@ -2903,17 +2853,17 @@ static int radeon_cp_cmdbuf(DRM_IOCTL_ARGS)
2903 } 2853 }
2904 2854
2905 /* microcode_version != r300 */ 2855 /* microcode_version != r300 */
2906 while (cmdbuf.bufsz >= sizeof(header)) { 2856 while (cmdbuf->bufsz >= sizeof(header)) {
2907 2857
2908 header.i = *(int *)cmdbuf.buf; 2858 header.i = *(int *)cmdbuf->buf;
2909 cmdbuf.buf += sizeof(header); 2859 cmdbuf->buf += sizeof(header);
2910 cmdbuf.bufsz -= sizeof(header); 2860 cmdbuf->bufsz -= sizeof(header);
2911 2861
2912 switch (header.header.cmd_type) { 2862 switch (header.header.cmd_type) {
2913 case RADEON_CMD_PACKET: 2863 case RADEON_CMD_PACKET:
2914 DRM_DEBUG("RADEON_CMD_PACKET\n"); 2864 DRM_DEBUG("RADEON_CMD_PACKET\n");
2915 if (radeon_emit_packets 2865 if (radeon_emit_packets
2916 (dev_priv, filp_priv, header, &cmdbuf)) { 2866 (dev_priv, file_priv, header, cmdbuf)) {
2917 DRM_ERROR("radeon_emit_packets failed\n"); 2867 DRM_ERROR("radeon_emit_packets failed\n");
2918 goto err; 2868 goto err;
2919 } 2869 }
@@ -2921,7 +2871,7 @@ static int radeon_cp_cmdbuf(DRM_IOCTL_ARGS)
2921 2871
2922 case RADEON_CMD_SCALARS: 2872 case RADEON_CMD_SCALARS:
2923 DRM_DEBUG("RADEON_CMD_SCALARS\n"); 2873 DRM_DEBUG("RADEON_CMD_SCALARS\n");
2924 if (radeon_emit_scalars(dev_priv, header, &cmdbuf)) { 2874 if (radeon_emit_scalars(dev_priv, header, cmdbuf)) {
2925 DRM_ERROR("radeon_emit_scalars failed\n"); 2875 DRM_ERROR("radeon_emit_scalars failed\n");
2926 goto err; 2876 goto err;
2927 } 2877 }
@@ -2929,7 +2879,7 @@ static int radeon_cp_cmdbuf(DRM_IOCTL_ARGS)
2929 2879
2930 case RADEON_CMD_VECTORS: 2880 case RADEON_CMD_VECTORS:
2931 DRM_DEBUG("RADEON_CMD_VECTORS\n"); 2881 DRM_DEBUG("RADEON_CMD_VECTORS\n");
2932 if (radeon_emit_vectors(dev_priv, header, &cmdbuf)) { 2882 if (radeon_emit_vectors(dev_priv, header, cmdbuf)) {
2933 DRM_ERROR("radeon_emit_vectors failed\n"); 2883 DRM_ERROR("radeon_emit_vectors failed\n");
2934 goto err; 2884 goto err;
2935 } 2885 }
@@ -2945,9 +2895,10 @@ static int radeon_cp_cmdbuf(DRM_IOCTL_ARGS)
2945 } 2895 }
2946 2896
2947 buf = dma->buflist[idx]; 2897 buf = dma->buflist[idx];
2948 if (buf->filp != filp || buf->pending) { 2898 if (buf->file_priv != file_priv || buf->pending) {
2949 DRM_ERROR("bad buffer %p %p %d\n", 2899 DRM_ERROR("bad buffer %p %p %d\n",
2950 buf->filp, filp, buf->pending); 2900 buf->file_priv, file_priv,
2901 buf->pending);
2951 goto err; 2902 goto err;
2952 } 2903 }
2953 2904
@@ -2956,7 +2907,7 @@ static int radeon_cp_cmdbuf(DRM_IOCTL_ARGS)
2956 2907
2957 case RADEON_CMD_PACKET3: 2908 case RADEON_CMD_PACKET3:
2958 DRM_DEBUG("RADEON_CMD_PACKET3\n"); 2909 DRM_DEBUG("RADEON_CMD_PACKET3\n");
2959 if (radeon_emit_packet3(dev, filp_priv, &cmdbuf)) { 2910 if (radeon_emit_packet3(dev, file_priv, cmdbuf)) {
2960 DRM_ERROR("radeon_emit_packet3 failed\n"); 2911 DRM_ERROR("radeon_emit_packet3 failed\n");
2961 goto err; 2912 goto err;
2962 } 2913 }
@@ -2965,7 +2916,7 @@ static int radeon_cp_cmdbuf(DRM_IOCTL_ARGS)
2965 case RADEON_CMD_PACKET3_CLIP: 2916 case RADEON_CMD_PACKET3_CLIP:
2966 DRM_DEBUG("RADEON_CMD_PACKET3_CLIP\n"); 2917 DRM_DEBUG("RADEON_CMD_PACKET3_CLIP\n");
2967 if (radeon_emit_packet3_cliprect 2918 if (radeon_emit_packet3_cliprect
2968 (dev, filp_priv, &cmdbuf, orig_nbox)) { 2919 (dev, file_priv, cmdbuf, orig_nbox)) {
2969 DRM_ERROR("radeon_emit_packet3_clip failed\n"); 2920 DRM_ERROR("radeon_emit_packet3_clip failed\n");
2970 goto err; 2921 goto err;
2971 } 2922 }
@@ -2973,7 +2924,7 @@ static int radeon_cp_cmdbuf(DRM_IOCTL_ARGS)
2973 2924
2974 case RADEON_CMD_SCALARS2: 2925 case RADEON_CMD_SCALARS2:
2975 DRM_DEBUG("RADEON_CMD_SCALARS2\n"); 2926 DRM_DEBUG("RADEON_CMD_SCALARS2\n");
2976 if (radeon_emit_scalars2(dev_priv, header, &cmdbuf)) { 2927 if (radeon_emit_scalars2(dev_priv, header, cmdbuf)) {
2977 DRM_ERROR("radeon_emit_scalars2 failed\n"); 2928 DRM_ERROR("radeon_emit_scalars2 failed\n");
2978 goto err; 2929 goto err;
2979 } 2930 }
@@ -2988,7 +2939,7 @@ static int radeon_cp_cmdbuf(DRM_IOCTL_ARGS)
2988 break; 2939 break;
2989 case RADEON_CMD_VECLINEAR: 2940 case RADEON_CMD_VECLINEAR:
2990 DRM_DEBUG("RADEON_CMD_VECLINEAR\n"); 2941 DRM_DEBUG("RADEON_CMD_VECLINEAR\n");
2991 if (radeon_emit_veclinear(dev_priv, header, &cmdbuf)) { 2942 if (radeon_emit_veclinear(dev_priv, header, cmdbuf)) {
2992 DRM_ERROR("radeon_emit_veclinear failed\n"); 2943 DRM_ERROR("radeon_emit_veclinear failed\n");
2993 goto err; 2944 goto err;
2994 } 2945 }
@@ -2997,7 +2948,7 @@ static int radeon_cp_cmdbuf(DRM_IOCTL_ARGS)
2997 default: 2948 default:
2998 DRM_ERROR("bad cmd_type %d at %p\n", 2949 DRM_ERROR("bad cmd_type %d at %p\n",
2999 header.header.cmd_type, 2950 header.header.cmd_type,
3000 cmdbuf.buf - sizeof(header)); 2951 cmdbuf->buf - sizeof(header));
3001 goto err; 2952 goto err;
3002 } 2953 }
3003 } 2954 }
@@ -3012,22 +2963,18 @@ static int radeon_cp_cmdbuf(DRM_IOCTL_ARGS)
3012 err: 2963 err:
3013 if (orig_bufsz != 0) 2964 if (orig_bufsz != 0)
3014 drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER); 2965 drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER);
3015 return DRM_ERR(EINVAL); 2966 return -EINVAL;
3016} 2967}
3017 2968
3018static int radeon_cp_getparam(DRM_IOCTL_ARGS) 2969static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_file *file_priv)
3019{ 2970{
3020 DRM_DEVICE;
3021 drm_radeon_private_t *dev_priv = dev->dev_private; 2971 drm_radeon_private_t *dev_priv = dev->dev_private;
3022 drm_radeon_getparam_t param; 2972 drm_radeon_getparam_t *param = data;
3023 int value; 2973 int value;
3024 2974
3025 DRM_COPY_FROM_USER_IOCTL(param, (drm_radeon_getparam_t __user *) data,
3026 sizeof(param));
3027
3028 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID); 2975 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
3029 2976
3030 switch (param.param) { 2977 switch (param->param) {
3031 case RADEON_PARAM_GART_BUFFER_OFFSET: 2978 case RADEON_PARAM_GART_BUFFER_OFFSET:
3032 value = dev_priv->gart_buffers_offset; 2979 value = dev_priv->gart_buffers_offset;
3033 break; 2980 break;
@@ -3074,7 +3021,7 @@ static int radeon_cp_getparam(DRM_IOCTL_ARGS)
3074 break; 3021 break;
3075 case RADEON_PARAM_SCRATCH_OFFSET: 3022 case RADEON_PARAM_SCRATCH_OFFSET:
3076 if (!dev_priv->writeback_works) 3023 if (!dev_priv->writeback_works)
3077 return DRM_ERR(EINVAL); 3024 return -EINVAL;
3078 value = RADEON_SCRATCH_REG_OFFSET; 3025 value = RADEON_SCRATCH_REG_OFFSET;
3079 break; 3026 break;
3080 case RADEON_PARAM_CARD_TYPE: 3027 case RADEON_PARAM_CARD_TYPE:
@@ -3089,43 +3036,37 @@ static int radeon_cp_getparam(DRM_IOCTL_ARGS)
3089 value = radeon_vblank_crtc_get(dev); 3036 value = radeon_vblank_crtc_get(dev);
3090 break; 3037 break;
3091 default: 3038 default:
3092 DRM_DEBUG("Invalid parameter %d\n", param.param); 3039 DRM_DEBUG("Invalid parameter %d\n", param->param);
3093 return DRM_ERR(EINVAL); 3040 return -EINVAL;
3094 } 3041 }
3095 3042
3096 if (DRM_COPY_TO_USER(param.value, &value, sizeof(int))) { 3043 if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) {
3097 DRM_ERROR("copy_to_user\n"); 3044 DRM_ERROR("copy_to_user\n");
3098 return DRM_ERR(EFAULT); 3045 return -EFAULT;
3099 } 3046 }
3100 3047
3101 return 0; 3048 return 0;
3102} 3049}
3103 3050
3104static int radeon_cp_setparam(DRM_IOCTL_ARGS) 3051static int radeon_cp_setparam(struct drm_device *dev, void *data, struct drm_file *file_priv)
3105{ 3052{
3106 DRM_DEVICE;
3107 drm_radeon_private_t *dev_priv = dev->dev_private; 3053 drm_radeon_private_t *dev_priv = dev->dev_private;
3108 struct drm_file *filp_priv; 3054 drm_radeon_setparam_t *sp = data;
3109 drm_radeon_setparam_t sp;
3110 struct drm_radeon_driver_file_fields *radeon_priv; 3055 struct drm_radeon_driver_file_fields *radeon_priv;
3111 3056
3112 DRM_GET_PRIV_WITH_RETURN(filp_priv, filp); 3057 switch (sp->param) {
3113
3114 DRM_COPY_FROM_USER_IOCTL(sp, (drm_radeon_setparam_t __user *) data,
3115 sizeof(sp));
3116
3117 switch (sp.param) {
3118 case RADEON_SETPARAM_FB_LOCATION: 3058 case RADEON_SETPARAM_FB_LOCATION:
3119 radeon_priv = filp_priv->driver_priv; 3059 radeon_priv = file_priv->driver_priv;
3120 radeon_priv->radeon_fb_delta = dev_priv->fb_location - sp.value; 3060 radeon_priv->radeon_fb_delta = dev_priv->fb_location -
3061 sp->value;
3121 break; 3062 break;
3122 case RADEON_SETPARAM_SWITCH_TILING: 3063 case RADEON_SETPARAM_SWITCH_TILING:
3123 if (sp.value == 0) { 3064 if (sp->value == 0) {
3124 DRM_DEBUG("color tiling disabled\n"); 3065 DRM_DEBUG("color tiling disabled\n");
3125 dev_priv->front_pitch_offset &= ~RADEON_DST_TILE_MACRO; 3066 dev_priv->front_pitch_offset &= ~RADEON_DST_TILE_MACRO;
3126 dev_priv->back_pitch_offset &= ~RADEON_DST_TILE_MACRO; 3067 dev_priv->back_pitch_offset &= ~RADEON_DST_TILE_MACRO;
3127 dev_priv->sarea_priv->tiling_enabled = 0; 3068 dev_priv->sarea_priv->tiling_enabled = 0;
3128 } else if (sp.value == 1) { 3069 } else if (sp->value == 1) {
3129 DRM_DEBUG("color tiling enabled\n"); 3070 DRM_DEBUG("color tiling enabled\n");
3130 dev_priv->front_pitch_offset |= RADEON_DST_TILE_MACRO; 3071 dev_priv->front_pitch_offset |= RADEON_DST_TILE_MACRO;
3131 dev_priv->back_pitch_offset |= RADEON_DST_TILE_MACRO; 3072 dev_priv->back_pitch_offset |= RADEON_DST_TILE_MACRO;
@@ -3133,23 +3074,23 @@ static int radeon_cp_setparam(DRM_IOCTL_ARGS)
3133 } 3074 }
3134 break; 3075 break;
3135 case RADEON_SETPARAM_PCIGART_LOCATION: 3076 case RADEON_SETPARAM_PCIGART_LOCATION:
3136 dev_priv->pcigart_offset = sp.value; 3077 dev_priv->pcigart_offset = sp->value;
3137 dev_priv->pcigart_offset_set = 1; 3078 dev_priv->pcigart_offset_set = 1;
3138 break; 3079 break;
3139 case RADEON_SETPARAM_NEW_MEMMAP: 3080 case RADEON_SETPARAM_NEW_MEMMAP:
3140 dev_priv->new_memmap = sp.value; 3081 dev_priv->new_memmap = sp->value;
3141 break; 3082 break;
3142 case RADEON_SETPARAM_PCIGART_TABLE_SIZE: 3083 case RADEON_SETPARAM_PCIGART_TABLE_SIZE:
3143 dev_priv->gart_info.table_size = sp.value; 3084 dev_priv->gart_info.table_size = sp->value;
3144 if (dev_priv->gart_info.table_size < RADEON_PCIGART_TABLE_SIZE) 3085 if (dev_priv->gart_info.table_size < RADEON_PCIGART_TABLE_SIZE)
3145 dev_priv->gart_info.table_size = RADEON_PCIGART_TABLE_SIZE; 3086 dev_priv->gart_info.table_size = RADEON_PCIGART_TABLE_SIZE;
3146 break; 3087 break;
3147 case RADEON_SETPARAM_VBLANK_CRTC: 3088 case RADEON_SETPARAM_VBLANK_CRTC:
3148 return radeon_vblank_crtc_set(dev, sp.value); 3089 return radeon_vblank_crtc_set(dev, sp->value);
3149 break; 3090 break;
3150 default: 3091 default:
3151 DRM_DEBUG("Invalid parameter %d\n", sp.param); 3092 DRM_DEBUG("Invalid parameter %d\n", sp->param);
3152 return DRM_ERR(EINVAL); 3093 return -EINVAL;
3153 } 3094 }
3154 3095
3155 return 0; 3096 return 0;
@@ -3162,14 +3103,14 @@ static int radeon_cp_setparam(DRM_IOCTL_ARGS)
3162 * 3103 *
3163 * DRM infrastructure takes care of reclaiming dma buffers. 3104 * DRM infrastructure takes care of reclaiming dma buffers.
3164 */ 3105 */
3165void radeon_driver_preclose(struct drm_device *dev, DRMFILE filp) 3106void radeon_driver_preclose(struct drm_device *dev, struct drm_file *file_priv)
3166{ 3107{
3167 if (dev->dev_private) { 3108 if (dev->dev_private) {
3168 drm_radeon_private_t *dev_priv = dev->dev_private; 3109 drm_radeon_private_t *dev_priv = dev->dev_private;
3169 dev_priv->page_flipping = 0; 3110 dev_priv->page_flipping = 0;
3170 radeon_mem_release(filp, dev_priv->gart_heap); 3111 radeon_mem_release(file_priv, dev_priv->gart_heap);
3171 radeon_mem_release(filp, dev_priv->fb_heap); 3112 radeon_mem_release(file_priv, dev_priv->fb_heap);
3172 radeon_surfaces_release(filp, dev_priv); 3113 radeon_surfaces_release(file_priv, dev_priv);
3173 } 3114 }
3174} 3115}
3175 3116
@@ -3186,7 +3127,7 @@ void radeon_driver_lastclose(struct drm_device *dev)
3186 radeon_do_release(dev); 3127 radeon_do_release(dev);
3187} 3128}
3188 3129
3189int radeon_driver_open(struct drm_device *dev, struct drm_file *filp_priv) 3130int radeon_driver_open(struct drm_device *dev, struct drm_file *file_priv)
3190{ 3131{
3191 drm_radeon_private_t *dev_priv = dev->dev_private; 3132 drm_radeon_private_t *dev_priv = dev->dev_private;
3192 struct drm_radeon_driver_file_fields *radeon_priv; 3133 struct drm_radeon_driver_file_fields *radeon_priv;
@@ -3199,7 +3140,7 @@ int radeon_driver_open(struct drm_device *dev, struct drm_file *filp_priv)
3199 if (!radeon_priv) 3140 if (!radeon_priv)
3200 return -ENOMEM; 3141 return -ENOMEM;
3201 3142
3202 filp_priv->driver_priv = radeon_priv; 3143 file_priv->driver_priv = radeon_priv;
3203 3144
3204 if (dev_priv) 3145 if (dev_priv)
3205 radeon_priv->radeon_fb_delta = dev_priv->fb_location; 3146 radeon_priv->radeon_fb_delta = dev_priv->fb_location;
@@ -3208,42 +3149,42 @@ int radeon_driver_open(struct drm_device *dev, struct drm_file *filp_priv)
3208 return 0; 3149 return 0;
3209} 3150}
3210 3151
3211void radeon_driver_postclose(struct drm_device *dev, struct drm_file *filp_priv) 3152void radeon_driver_postclose(struct drm_device *dev, struct drm_file *file_priv)
3212{ 3153{
3213 struct drm_radeon_driver_file_fields *radeon_priv = 3154 struct drm_radeon_driver_file_fields *radeon_priv =
3214 filp_priv->driver_priv; 3155 file_priv->driver_priv;
3215 3156
3216 drm_free(radeon_priv, sizeof(*radeon_priv), DRM_MEM_FILES); 3157 drm_free(radeon_priv, sizeof(*radeon_priv), DRM_MEM_FILES);
3217} 3158}
3218 3159
3219drm_ioctl_desc_t radeon_ioctls[] = { 3160struct drm_ioctl_desc radeon_ioctls[] = {
3220 [DRM_IOCTL_NR(DRM_RADEON_CP_INIT)] = {radeon_cp_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 3161 DRM_IOCTL_DEF(DRM_RADEON_CP_INIT, radeon_cp_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
3221 [DRM_IOCTL_NR(DRM_RADEON_CP_START)] = {radeon_cp_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 3162 DRM_IOCTL_DEF(DRM_RADEON_CP_START, radeon_cp_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
3222 [DRM_IOCTL_NR(DRM_RADEON_CP_STOP)] = {radeon_cp_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 3163 DRM_IOCTL_DEF(DRM_RADEON_CP_STOP, radeon_cp_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
3223 [DRM_IOCTL_NR(DRM_RADEON_CP_RESET)] = {radeon_cp_reset, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 3164 DRM_IOCTL_DEF(DRM_RADEON_CP_RESET, radeon_cp_reset, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
3224 [DRM_IOCTL_NR(DRM_RADEON_CP_IDLE)] = {radeon_cp_idle, DRM_AUTH}, 3165 DRM_IOCTL_DEF(DRM_RADEON_CP_IDLE, radeon_cp_idle, DRM_AUTH),
3225 [DRM_IOCTL_NR(DRM_RADEON_CP_RESUME)] = {radeon_cp_resume, DRM_AUTH}, 3166 DRM_IOCTL_DEF(DRM_RADEON_CP_RESUME, radeon_cp_resume, DRM_AUTH),
3226 [DRM_IOCTL_NR(DRM_RADEON_RESET)] = {radeon_engine_reset, DRM_AUTH}, 3167 DRM_IOCTL_DEF(DRM_RADEON_RESET, radeon_engine_reset, DRM_AUTH),
3227 [DRM_IOCTL_NR(DRM_RADEON_FULLSCREEN)] = {radeon_fullscreen, DRM_AUTH}, 3168 DRM_IOCTL_DEF(DRM_RADEON_FULLSCREEN, radeon_fullscreen, DRM_AUTH),
3228 [DRM_IOCTL_NR(DRM_RADEON_SWAP)] = {radeon_cp_swap, DRM_AUTH}, 3169 DRM_IOCTL_DEF(DRM_RADEON_SWAP, radeon_cp_swap, DRM_AUTH),
3229 [DRM_IOCTL_NR(DRM_RADEON_CLEAR)] = {radeon_cp_clear, DRM_AUTH}, 3170 DRM_IOCTL_DEF(DRM_RADEON_CLEAR, radeon_cp_clear, DRM_AUTH),
3230 [DRM_IOCTL_NR(DRM_RADEON_VERTEX)] = {radeon_cp_vertex, DRM_AUTH}, 3171 DRM_IOCTL_DEF(DRM_RADEON_VERTEX, radeon_cp_vertex, DRM_AUTH),
3231 [DRM_IOCTL_NR(DRM_RADEON_INDICES)] = {radeon_cp_indices, DRM_AUTH}, 3172 DRM_IOCTL_DEF(DRM_RADEON_INDICES, radeon_cp_indices, DRM_AUTH),
3232 [DRM_IOCTL_NR(DRM_RADEON_TEXTURE)] = {radeon_cp_texture, DRM_AUTH}, 3173 DRM_IOCTL_DEF(DRM_RADEON_TEXTURE, radeon_cp_texture, DRM_AUTH),
3233 [DRM_IOCTL_NR(DRM_RADEON_STIPPLE)] = {radeon_cp_stipple, DRM_AUTH}, 3174 DRM_IOCTL_DEF(DRM_RADEON_STIPPLE, radeon_cp_stipple, DRM_AUTH),
3234 [DRM_IOCTL_NR(DRM_RADEON_INDIRECT)] = {radeon_cp_indirect, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 3175 DRM_IOCTL_DEF(DRM_RADEON_INDIRECT, radeon_cp_indirect, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
3235 [DRM_IOCTL_NR(DRM_RADEON_VERTEX2)] = {radeon_cp_vertex2, DRM_AUTH}, 3176 DRM_IOCTL_DEF(DRM_RADEON_VERTEX2, radeon_cp_vertex2, DRM_AUTH),
3236 [DRM_IOCTL_NR(DRM_RADEON_CMDBUF)] = {radeon_cp_cmdbuf, DRM_AUTH}, 3177 DRM_IOCTL_DEF(DRM_RADEON_CMDBUF, radeon_cp_cmdbuf, DRM_AUTH),
3237 [DRM_IOCTL_NR(DRM_RADEON_GETPARAM)] = {radeon_cp_getparam, DRM_AUTH}, 3178 DRM_IOCTL_DEF(DRM_RADEON_GETPARAM, radeon_cp_getparam, DRM_AUTH),
3238 [DRM_IOCTL_NR(DRM_RADEON_FLIP)] = {radeon_cp_flip, DRM_AUTH}, 3179 DRM_IOCTL_DEF(DRM_RADEON_FLIP, radeon_cp_flip, DRM_AUTH),
3239 [DRM_IOCTL_NR(DRM_RADEON_ALLOC)] = {radeon_mem_alloc, DRM_AUTH}, 3180 DRM_IOCTL_DEF(DRM_RADEON_ALLOC, radeon_mem_alloc, DRM_AUTH),
3240 [DRM_IOCTL_NR(DRM_RADEON_FREE)] = {radeon_mem_free, DRM_AUTH}, 3181 DRM_IOCTL_DEF(DRM_RADEON_FREE, radeon_mem_free, DRM_AUTH),
3241 [DRM_IOCTL_NR(DRM_RADEON_INIT_HEAP)] = {radeon_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 3182 DRM_IOCTL_DEF(DRM_RADEON_INIT_HEAP, radeon_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
3242 [DRM_IOCTL_NR(DRM_RADEON_IRQ_EMIT)] = {radeon_irq_emit, DRM_AUTH}, 3183 DRM_IOCTL_DEF(DRM_RADEON_IRQ_EMIT, radeon_irq_emit, DRM_AUTH),
3243 [DRM_IOCTL_NR(DRM_RADEON_IRQ_WAIT)] = {radeon_irq_wait, DRM_AUTH}, 3184 DRM_IOCTL_DEF(DRM_RADEON_IRQ_WAIT, radeon_irq_wait, DRM_AUTH),
3244 [DRM_IOCTL_NR(DRM_RADEON_SETPARAM)] = {radeon_cp_setparam, DRM_AUTH}, 3185 DRM_IOCTL_DEF(DRM_RADEON_SETPARAM, radeon_cp_setparam, DRM_AUTH),
3245 [DRM_IOCTL_NR(DRM_RADEON_SURF_ALLOC)] = {radeon_surface_alloc, DRM_AUTH}, 3186 DRM_IOCTL_DEF(DRM_RADEON_SURF_ALLOC, radeon_surface_alloc, DRM_AUTH),
3246 [DRM_IOCTL_NR(DRM_RADEON_SURF_FREE)] = {radeon_surface_free, DRM_AUTH} 3187 DRM_IOCTL_DEF(DRM_RADEON_SURF_FREE, radeon_surface_free, DRM_AUTH)
3247}; 3188};
3248 3189
3249int radeon_max_ioctl = DRM_ARRAY_SIZE(radeon_ioctls); 3190int radeon_max_ioctl = DRM_ARRAY_SIZE(radeon_ioctls);
diff --git a/drivers/char/drm/savage_bci.c b/drivers/char/drm/savage_bci.c
index 18c7235f6b73..59484d56b333 100644
--- a/drivers/char/drm/savage_bci.c
+++ b/drivers/char/drm/savage_bci.c
@@ -60,7 +60,7 @@ savage_bci_wait_fifo_shadow(drm_savage_private_t * dev_priv, unsigned int n)
60 DRM_ERROR("failed!\n"); 60 DRM_ERROR("failed!\n");
61 DRM_INFO(" status=0x%08x, threshold=0x%08x\n", status, threshold); 61 DRM_INFO(" status=0x%08x, threshold=0x%08x\n", status, threshold);
62#endif 62#endif
63 return DRM_ERR(EBUSY); 63 return -EBUSY;
64} 64}
65 65
66static int 66static int
@@ -81,7 +81,7 @@ savage_bci_wait_fifo_s3d(drm_savage_private_t * dev_priv, unsigned int n)
81 DRM_ERROR("failed!\n"); 81 DRM_ERROR("failed!\n");
82 DRM_INFO(" status=0x%08x\n", status); 82 DRM_INFO(" status=0x%08x\n", status);
83#endif 83#endif
84 return DRM_ERR(EBUSY); 84 return -EBUSY;
85} 85}
86 86
87static int 87static int
@@ -102,7 +102,7 @@ savage_bci_wait_fifo_s4(drm_savage_private_t * dev_priv, unsigned int n)
102 DRM_ERROR("failed!\n"); 102 DRM_ERROR("failed!\n");
103 DRM_INFO(" status=0x%08x\n", status); 103 DRM_INFO(" status=0x%08x\n", status);
104#endif 104#endif
105 return DRM_ERR(EBUSY); 105 return -EBUSY;
106} 106}
107 107
108/* 108/*
@@ -136,7 +136,7 @@ savage_bci_wait_event_shadow(drm_savage_private_t * dev_priv, uint16_t e)
136 DRM_INFO(" status=0x%08x, e=0x%04x\n", status, e); 136 DRM_INFO(" status=0x%08x, e=0x%04x\n", status, e);
137#endif 137#endif
138 138
139 return DRM_ERR(EBUSY); 139 return -EBUSY;
140} 140}
141 141
142static int 142static int
@@ -158,7 +158,7 @@ savage_bci_wait_event_reg(drm_savage_private_t * dev_priv, uint16_t e)
158 DRM_INFO(" status=0x%08x, e=0x%04x\n", status, e); 158 DRM_INFO(" status=0x%08x, e=0x%04x\n", status, e);
159#endif 159#endif
160 160
161 return DRM_ERR(EBUSY); 161 return -EBUSY;
162} 162}
163 163
164uint16_t savage_bci_emit_event(drm_savage_private_t * dev_priv, 164uint16_t savage_bci_emit_event(drm_savage_private_t * dev_priv,
@@ -301,7 +301,7 @@ static int savage_dma_init(drm_savage_private_t * dev_priv)
301 dev_priv->dma_pages = drm_alloc(sizeof(drm_savage_dma_page_t) * 301 dev_priv->dma_pages = drm_alloc(sizeof(drm_savage_dma_page_t) *
302 dev_priv->nr_dma_pages, DRM_MEM_DRIVER); 302 dev_priv->nr_dma_pages, DRM_MEM_DRIVER);
303 if (dev_priv->dma_pages == NULL) 303 if (dev_priv->dma_pages == NULL)
304 return DRM_ERR(ENOMEM); 304 return -ENOMEM;
305 305
306 for (i = 0; i < dev_priv->nr_dma_pages; ++i) { 306 for (i = 0; i < dev_priv->nr_dma_pages; ++i) {
307 SET_AGE(&dev_priv->dma_pages[i].age, 0, 0); 307 SET_AGE(&dev_priv->dma_pages[i].age, 0, 0);
@@ -541,7 +541,7 @@ int savage_driver_load(struct drm_device *dev, unsigned long chipset)
541 541
542 dev_priv = drm_alloc(sizeof(drm_savage_private_t), DRM_MEM_DRIVER); 542 dev_priv = drm_alloc(sizeof(drm_savage_private_t), DRM_MEM_DRIVER);
543 if (dev_priv == NULL) 543 if (dev_priv == NULL)
544 return DRM_ERR(ENOMEM); 544 return -ENOMEM;
545 545
546 memset(dev_priv, 0, sizeof(drm_savage_private_t)); 546 memset(dev_priv, 0, sizeof(drm_savage_private_t));
547 dev->dev_private = (void *)dev_priv; 547 dev->dev_private = (void *)dev_priv;
@@ -682,16 +682,16 @@ static int savage_do_init_bci(struct drm_device * dev, drm_savage_init_t * init)
682 682
683 if (init->fb_bpp != 16 && init->fb_bpp != 32) { 683 if (init->fb_bpp != 16 && init->fb_bpp != 32) {
684 DRM_ERROR("invalid frame buffer bpp %d!\n", init->fb_bpp); 684 DRM_ERROR("invalid frame buffer bpp %d!\n", init->fb_bpp);
685 return DRM_ERR(EINVAL); 685 return -EINVAL;
686 } 686 }
687 if (init->depth_bpp != 16 && init->depth_bpp != 32) { 687 if (init->depth_bpp != 16 && init->depth_bpp != 32) {
688 DRM_ERROR("invalid depth buffer bpp %d!\n", init->fb_bpp); 688 DRM_ERROR("invalid depth buffer bpp %d!\n", init->fb_bpp);
689 return DRM_ERR(EINVAL); 689 return -EINVAL;
690 } 690 }
691 if (init->dma_type != SAVAGE_DMA_AGP && 691 if (init->dma_type != SAVAGE_DMA_AGP &&
692 init->dma_type != SAVAGE_DMA_PCI) { 692 init->dma_type != SAVAGE_DMA_PCI) {
693 DRM_ERROR("invalid dma memory type %d!\n", init->dma_type); 693 DRM_ERROR("invalid dma memory type %d!\n", init->dma_type);
694 return DRM_ERR(EINVAL); 694 return -EINVAL;
695 } 695 }
696 696
697 dev_priv->cob_size = init->cob_size; 697 dev_priv->cob_size = init->cob_size;
@@ -715,14 +715,14 @@ static int savage_do_init_bci(struct drm_device * dev, drm_savage_init_t * init)
715 if (!dev_priv->sarea) { 715 if (!dev_priv->sarea) {
716 DRM_ERROR("could not find sarea!\n"); 716 DRM_ERROR("could not find sarea!\n");
717 savage_do_cleanup_bci(dev); 717 savage_do_cleanup_bci(dev);
718 return DRM_ERR(EINVAL); 718 return -EINVAL;
719 } 719 }
720 if (init->status_offset != 0) { 720 if (init->status_offset != 0) {
721 dev_priv->status = drm_core_findmap(dev, init->status_offset); 721 dev_priv->status = drm_core_findmap(dev, init->status_offset);
722 if (!dev_priv->status) { 722 if (!dev_priv->status) {
723 DRM_ERROR("could not find shadow status region!\n"); 723 DRM_ERROR("could not find shadow status region!\n");
724 savage_do_cleanup_bci(dev); 724 savage_do_cleanup_bci(dev);
725 return DRM_ERR(EINVAL); 725 return -EINVAL;
726 } 726 }
727 } else { 727 } else {
728 dev_priv->status = NULL; 728 dev_priv->status = NULL;
@@ -734,13 +734,13 @@ static int savage_do_init_bci(struct drm_device * dev, drm_savage_init_t * init)
734 if (!dev->agp_buffer_map) { 734 if (!dev->agp_buffer_map) {
735 DRM_ERROR("could not find DMA buffer region!\n"); 735 DRM_ERROR("could not find DMA buffer region!\n");
736 savage_do_cleanup_bci(dev); 736 savage_do_cleanup_bci(dev);
737 return DRM_ERR(EINVAL); 737 return -EINVAL;
738 } 738 }
739 drm_core_ioremap(dev->agp_buffer_map, dev); 739 drm_core_ioremap(dev->agp_buffer_map, dev);
740 if (!dev->agp_buffer_map) { 740 if (!dev->agp_buffer_map) {
741 DRM_ERROR("failed to ioremap DMA buffer region!\n"); 741 DRM_ERROR("failed to ioremap DMA buffer region!\n");
742 savage_do_cleanup_bci(dev); 742 savage_do_cleanup_bci(dev);
743 return DRM_ERR(ENOMEM); 743 return -ENOMEM;
744 } 744 }
745 } 745 }
746 if (init->agp_textures_offset) { 746 if (init->agp_textures_offset) {
@@ -749,7 +749,7 @@ static int savage_do_init_bci(struct drm_device * dev, drm_savage_init_t * init)
749 if (!dev_priv->agp_textures) { 749 if (!dev_priv->agp_textures) {
750 DRM_ERROR("could not find agp texture region!\n"); 750 DRM_ERROR("could not find agp texture region!\n");
751 savage_do_cleanup_bci(dev); 751 savage_do_cleanup_bci(dev);
752 return DRM_ERR(EINVAL); 752 return -EINVAL;
753 } 753 }
754 } else { 754 } else {
755 dev_priv->agp_textures = NULL; 755 dev_priv->agp_textures = NULL;
@@ -760,39 +760,39 @@ static int savage_do_init_bci(struct drm_device * dev, drm_savage_init_t * init)
760 DRM_ERROR("command DMA not supported on " 760 DRM_ERROR("command DMA not supported on "
761 "Savage3D/MX/IX.\n"); 761 "Savage3D/MX/IX.\n");
762 savage_do_cleanup_bci(dev); 762 savage_do_cleanup_bci(dev);
763 return DRM_ERR(EINVAL); 763 return -EINVAL;
764 } 764 }
765 if (dev->dma && dev->dma->buflist) { 765 if (dev->dma && dev->dma->buflist) {
766 DRM_ERROR("command and vertex DMA not supported " 766 DRM_ERROR("command and vertex DMA not supported "
767 "at the same time.\n"); 767 "at the same time.\n");
768 savage_do_cleanup_bci(dev); 768 savage_do_cleanup_bci(dev);
769 return DRM_ERR(EINVAL); 769 return -EINVAL;
770 } 770 }
771 dev_priv->cmd_dma = drm_core_findmap(dev, init->cmd_dma_offset); 771 dev_priv->cmd_dma = drm_core_findmap(dev, init->cmd_dma_offset);
772 if (!dev_priv->cmd_dma) { 772 if (!dev_priv->cmd_dma) {
773 DRM_ERROR("could not find command DMA region!\n"); 773 DRM_ERROR("could not find command DMA region!\n");
774 savage_do_cleanup_bci(dev); 774 savage_do_cleanup_bci(dev);
775 return DRM_ERR(EINVAL); 775 return -EINVAL;
776 } 776 }
777 if (dev_priv->dma_type == SAVAGE_DMA_AGP) { 777 if (dev_priv->dma_type == SAVAGE_DMA_AGP) {
778 if (dev_priv->cmd_dma->type != _DRM_AGP) { 778 if (dev_priv->cmd_dma->type != _DRM_AGP) {
779 DRM_ERROR("AGP command DMA region is not a " 779 DRM_ERROR("AGP command DMA region is not a "
780 "_DRM_AGP map!\n"); 780 "_DRM_AGP map!\n");
781 savage_do_cleanup_bci(dev); 781 savage_do_cleanup_bci(dev);
782 return DRM_ERR(EINVAL); 782 return -EINVAL;
783 } 783 }
784 drm_core_ioremap(dev_priv->cmd_dma, dev); 784 drm_core_ioremap(dev_priv->cmd_dma, dev);
785 if (!dev_priv->cmd_dma->handle) { 785 if (!dev_priv->cmd_dma->handle) {
786 DRM_ERROR("failed to ioremap command " 786 DRM_ERROR("failed to ioremap command "
787 "DMA region!\n"); 787 "DMA region!\n");
788 savage_do_cleanup_bci(dev); 788 savage_do_cleanup_bci(dev);
789 return DRM_ERR(ENOMEM); 789 return -ENOMEM;
790 } 790 }
791 } else if (dev_priv->cmd_dma->type != _DRM_CONSISTENT) { 791 } else if (dev_priv->cmd_dma->type != _DRM_CONSISTENT) {
792 DRM_ERROR("PCI command DMA region is not a " 792 DRM_ERROR("PCI command DMA region is not a "
793 "_DRM_CONSISTENT map!\n"); 793 "_DRM_CONSISTENT map!\n");
794 savage_do_cleanup_bci(dev); 794 savage_do_cleanup_bci(dev);
795 return DRM_ERR(EINVAL); 795 return -EINVAL;
796 } 796 }
797 } else { 797 } else {
798 dev_priv->cmd_dma = NULL; 798 dev_priv->cmd_dma = NULL;
@@ -809,7 +809,7 @@ static int savage_do_init_bci(struct drm_device * dev, drm_savage_init_t * init)
809 if (!dev_priv->fake_dma.handle) { 809 if (!dev_priv->fake_dma.handle) {
810 DRM_ERROR("could not allocate faked DMA buffer!\n"); 810 DRM_ERROR("could not allocate faked DMA buffer!\n");
811 savage_do_cleanup_bci(dev); 811 savage_do_cleanup_bci(dev);
812 return DRM_ERR(ENOMEM); 812 return -ENOMEM;
813 } 813 }
814 dev_priv->cmd_dma = &dev_priv->fake_dma; 814 dev_priv->cmd_dma = &dev_priv->fake_dma;
815 dev_priv->dma_flush = savage_fake_dma_flush; 815 dev_priv->dma_flush = savage_fake_dma_flush;
@@ -886,13 +886,13 @@ static int savage_do_init_bci(struct drm_device * dev, drm_savage_init_t * init)
886 if (savage_freelist_init(dev) < 0) { 886 if (savage_freelist_init(dev) < 0) {
887 DRM_ERROR("could not initialize freelist\n"); 887 DRM_ERROR("could not initialize freelist\n");
888 savage_do_cleanup_bci(dev); 888 savage_do_cleanup_bci(dev);
889 return DRM_ERR(ENOMEM); 889 return -ENOMEM;
890 } 890 }
891 891
892 if (savage_dma_init(dev_priv) < 0) { 892 if (savage_dma_init(dev_priv) < 0) {
893 DRM_ERROR("could not initialize command DMA\n"); 893 DRM_ERROR("could not initialize command DMA\n");
894 savage_do_cleanup_bci(dev); 894 savage_do_cleanup_bci(dev);
895 return DRM_ERR(ENOMEM); 895 return -ENOMEM;
896 } 896 }
897 897
898 return 0; 898 return 0;
@@ -928,51 +928,41 @@ static int savage_do_cleanup_bci(struct drm_device * dev)
928 return 0; 928 return 0;
929} 929}
930 930
931static int savage_bci_init(DRM_IOCTL_ARGS) 931static int savage_bci_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
932{ 932{
933 DRM_DEVICE; 933 drm_savage_init_t *init = data;
934 drm_savage_init_t init;
935 934
936 LOCK_TEST_WITH_RETURN(dev, filp); 935 LOCK_TEST_WITH_RETURN(dev, file_priv);
937 936
938 DRM_COPY_FROM_USER_IOCTL(init, (drm_savage_init_t __user *) data, 937 switch (init->func) {
939 sizeof(init));
940
941 switch (init.func) {
942 case SAVAGE_INIT_BCI: 938 case SAVAGE_INIT_BCI:
943 return savage_do_init_bci(dev, &init); 939 return savage_do_init_bci(dev, init);
944 case SAVAGE_CLEANUP_BCI: 940 case SAVAGE_CLEANUP_BCI:
945 return savage_do_cleanup_bci(dev); 941 return savage_do_cleanup_bci(dev);
946 } 942 }
947 943
948 return DRM_ERR(EINVAL); 944 return -EINVAL;
949} 945}
950 946
951static int savage_bci_event_emit(DRM_IOCTL_ARGS) 947static int savage_bci_event_emit(struct drm_device *dev, void *data, struct drm_file *file_priv)
952{ 948{
953 DRM_DEVICE;
954 drm_savage_private_t *dev_priv = dev->dev_private; 949 drm_savage_private_t *dev_priv = dev->dev_private;
955 drm_savage_event_emit_t event; 950 drm_savage_event_emit_t *event = data;
956 951
957 DRM_DEBUG("\n"); 952 DRM_DEBUG("\n");
958 953
959 LOCK_TEST_WITH_RETURN(dev, filp); 954 LOCK_TEST_WITH_RETURN(dev, file_priv);
960 955
961 DRM_COPY_FROM_USER_IOCTL(event, (drm_savage_event_emit_t __user *) data, 956 event->count = savage_bci_emit_event(dev_priv, event->flags);
962 sizeof(event)); 957 event->count |= dev_priv->event_wrap << 16;
963 958
964 event.count = savage_bci_emit_event(dev_priv, event.flags);
965 event.count |= dev_priv->event_wrap << 16;
966 DRM_COPY_TO_USER_IOCTL((drm_savage_event_emit_t __user *) data,
967 event, sizeof(event));
968 return 0; 959 return 0;
969} 960}
970 961
971static int savage_bci_event_wait(DRM_IOCTL_ARGS) 962static int savage_bci_event_wait(struct drm_device *dev, void *data, struct drm_file *file_priv)
972{ 963{
973 DRM_DEVICE;
974 drm_savage_private_t *dev_priv = dev->dev_private; 964 drm_savage_private_t *dev_priv = dev->dev_private;
975 drm_savage_event_wait_t event; 965 drm_savage_event_wait_t *event = data;
976 unsigned int event_e, hw_e; 966 unsigned int event_e, hw_e;
977 unsigned int event_w, hw_w; 967 unsigned int event_w, hw_w;
978 968
@@ -990,8 +980,8 @@ static int savage_bci_event_wait(DRM_IOCTL_ARGS)
990 if (hw_e > dev_priv->event_counter) 980 if (hw_e > dev_priv->event_counter)
991 hw_w--; /* hardware hasn't passed the last wrap yet */ 981 hw_w--; /* hardware hasn't passed the last wrap yet */
992 982
993 event_e = event.count & 0xffff; 983 event_e = event->count & 0xffff;
994 event_w = event.count >> 16; 984 event_w = event->count >> 16;
995 985
996 /* Don't need to wait if 986 /* Don't need to wait if
997 * - event counter wrapped since the event was emitted or 987 * - event counter wrapped since the event was emitted or
@@ -1007,7 +997,9 @@ static int savage_bci_event_wait(DRM_IOCTL_ARGS)
1007 * DMA buffer management 997 * DMA buffer management
1008 */ 998 */
1009 999
1010static int savage_bci_get_buffers(DRMFILE filp, struct drm_device *dev, struct drm_dma *d) 1000static int savage_bci_get_buffers(struct drm_device *dev,
1001 struct drm_file *file_priv,
1002 struct drm_dma *d)
1011{ 1003{
1012 struct drm_buf *buf; 1004 struct drm_buf *buf;
1013 int i; 1005 int i;
@@ -1015,61 +1007,56 @@ static int savage_bci_get_buffers(DRMFILE filp, struct drm_device *dev, struct d
1015 for (i = d->granted_count; i < d->request_count; i++) { 1007 for (i = d->granted_count; i < d->request_count; i++) {
1016 buf = savage_freelist_get(dev); 1008 buf = savage_freelist_get(dev);
1017 if (!buf) 1009 if (!buf)
1018 return DRM_ERR(EAGAIN); 1010 return -EAGAIN;
1019 1011
1020 buf->filp = filp; 1012 buf->file_priv = file_priv;
1021 1013
1022 if (DRM_COPY_TO_USER(&d->request_indices[i], 1014 if (DRM_COPY_TO_USER(&d->request_indices[i],
1023 &buf->idx, sizeof(buf->idx))) 1015 &buf->idx, sizeof(buf->idx)))
1024 return DRM_ERR(EFAULT); 1016 return -EFAULT;
1025 if (DRM_COPY_TO_USER(&d->request_sizes[i], 1017 if (DRM_COPY_TO_USER(&d->request_sizes[i],
1026 &buf->total, sizeof(buf->total))) 1018 &buf->total, sizeof(buf->total)))
1027 return DRM_ERR(EFAULT); 1019 return -EFAULT;
1028 1020
1029 d->granted_count++; 1021 d->granted_count++;
1030 } 1022 }
1031 return 0; 1023 return 0;
1032} 1024}
1033 1025
1034int savage_bci_buffers(DRM_IOCTL_ARGS) 1026int savage_bci_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv)
1035{ 1027{
1036 DRM_DEVICE;
1037 struct drm_device_dma *dma = dev->dma; 1028 struct drm_device_dma *dma = dev->dma;
1038 struct drm_dma d; 1029 struct drm_dma *d = data;
1039 int ret = 0; 1030 int ret = 0;
1040 1031
1041 LOCK_TEST_WITH_RETURN(dev, filp); 1032 LOCK_TEST_WITH_RETURN(dev, file_priv);
1042
1043 DRM_COPY_FROM_USER_IOCTL(d, (struct drm_dma __user *) data, sizeof(d));
1044 1033
1045 /* Please don't send us buffers. 1034 /* Please don't send us buffers.
1046 */ 1035 */
1047 if (d.send_count != 0) { 1036 if (d->send_count != 0) {
1048 DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n", 1037 DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n",
1049 DRM_CURRENTPID, d.send_count); 1038 DRM_CURRENTPID, d->send_count);
1050 return DRM_ERR(EINVAL); 1039 return -EINVAL;
1051 } 1040 }
1052 1041
1053 /* We'll send you buffers. 1042 /* We'll send you buffers.
1054 */ 1043 */
1055 if (d.request_count < 0 || d.request_count > dma->buf_count) { 1044 if (d->request_count < 0 || d->request_count > dma->buf_count) {
1056 DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n", 1045 DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
1057 DRM_CURRENTPID, d.request_count, dma->buf_count); 1046 DRM_CURRENTPID, d->request_count, dma->buf_count);
1058 return DRM_ERR(EINVAL); 1047 return -EINVAL;
1059 } 1048 }
1060 1049
1061 d.granted_count = 0; 1050 d->granted_count = 0;
1062 1051
1063 if (d.request_count) { 1052 if (d->request_count) {
1064 ret = savage_bci_get_buffers(filp, dev, &d); 1053 ret = savage_bci_get_buffers(dev, file_priv, d);
1065 } 1054 }
1066 1055
1067 DRM_COPY_TO_USER_IOCTL((struct drm_dma __user *) data, d, sizeof(d));
1068
1069 return ret; 1056 return ret;
1070} 1057}
1071 1058
1072void savage_reclaim_buffers(struct drm_device *dev, DRMFILE filp) 1059void savage_reclaim_buffers(struct drm_device *dev, struct drm_file *file_priv)
1073{ 1060{
1074 struct drm_device_dma *dma = dev->dma; 1061 struct drm_device_dma *dma = dev->dma;
1075 drm_savage_private_t *dev_priv = dev->dev_private; 1062 drm_savage_private_t *dev_priv = dev->dev_private;
@@ -1088,7 +1075,7 @@ void savage_reclaim_buffers(struct drm_device *dev, DRMFILE filp)
1088 struct drm_buf *buf = dma->buflist[i]; 1075 struct drm_buf *buf = dma->buflist[i];
1089 drm_savage_buf_priv_t *buf_priv = buf->dev_private; 1076 drm_savage_buf_priv_t *buf_priv = buf->dev_private;
1090 1077
1091 if (buf->filp == filp && buf_priv && 1078 if (buf->file_priv == file_priv && buf_priv &&
1092 buf_priv->next == NULL && buf_priv->prev == NULL) { 1079 buf_priv->next == NULL && buf_priv->prev == NULL) {
1093 uint16_t event; 1080 uint16_t event;
1094 DRM_DEBUG("reclaimed from client\n"); 1081 DRM_DEBUG("reclaimed from client\n");
@@ -1098,14 +1085,14 @@ void savage_reclaim_buffers(struct drm_device *dev, DRMFILE filp)
1098 } 1085 }
1099 } 1086 }
1100 1087
1101 drm_core_reclaim_buffers(dev, filp); 1088 drm_core_reclaim_buffers(dev, file_priv);
1102} 1089}
1103 1090
1104drm_ioctl_desc_t savage_ioctls[] = { 1091struct drm_ioctl_desc savage_ioctls[] = {
1105 [DRM_IOCTL_NR(DRM_SAVAGE_BCI_INIT)] = {savage_bci_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 1092 DRM_IOCTL_DEF(DRM_SAVAGE_BCI_INIT, savage_bci_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1106 [DRM_IOCTL_NR(DRM_SAVAGE_BCI_CMDBUF)] = {savage_bci_cmdbuf, DRM_AUTH}, 1093 DRM_IOCTL_DEF(DRM_SAVAGE_BCI_CMDBUF, savage_bci_cmdbuf, DRM_AUTH),
1107 [DRM_IOCTL_NR(DRM_SAVAGE_BCI_EVENT_EMIT)] = {savage_bci_event_emit, DRM_AUTH}, 1094 DRM_IOCTL_DEF(DRM_SAVAGE_BCI_EVENT_EMIT, savage_bci_event_emit, DRM_AUTH),
1108 [DRM_IOCTL_NR(DRM_SAVAGE_BCI_EVENT_WAIT)] = {savage_bci_event_wait, DRM_AUTH}, 1095 DRM_IOCTL_DEF(DRM_SAVAGE_BCI_EVENT_WAIT, savage_bci_event_wait, DRM_AUTH),
1109}; 1096};
1110 1097
1111int savage_max_ioctl = DRM_ARRAY_SIZE(savage_ioctls); 1098int savage_max_ioctl = DRM_ARRAY_SIZE(savage_ioctls);
diff --git a/drivers/char/drm/savage_drv.h b/drivers/char/drm/savage_drv.h
index 5fd54de4280e..df2aac6636f7 100644
--- a/drivers/char/drm/savage_drv.h
+++ b/drivers/char/drm/savage_drv.h
@@ -104,7 +104,7 @@ enum savage_family {
104 S3_LAST 104 S3_LAST
105}; 105};
106 106
107extern drm_ioctl_desc_t savage_ioctls[]; 107extern struct drm_ioctl_desc savage_ioctls[];
108extern int savage_max_ioctl; 108extern int savage_max_ioctl;
109 109
110#define S3_SAVAGE3D_SERIES(chip) ((chip>=S3_SAVAGE3D) && (chip<=S3_SAVAGE_MX)) 110#define S3_SAVAGE3D_SERIES(chip) ((chip>=S3_SAVAGE3D) && (chip<=S3_SAVAGE_MX))
@@ -197,8 +197,8 @@ typedef struct drm_savage_private {
197} drm_savage_private_t; 197} drm_savage_private_t;
198 198
199/* ioctls */ 199/* ioctls */
200extern int savage_bci_cmdbuf(DRM_IOCTL_ARGS); 200extern int savage_bci_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_priv);
201extern int savage_bci_buffers(DRM_IOCTL_ARGS); 201extern int savage_bci_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv);
202 202
203/* BCI functions */ 203/* BCI functions */
204extern uint16_t savage_bci_emit_event(drm_savage_private_t * dev_priv, 204extern uint16_t savage_bci_emit_event(drm_savage_private_t * dev_priv,
@@ -212,7 +212,8 @@ extern int savage_driver_load(struct drm_device *dev, unsigned long chipset);
212extern int savage_driver_firstopen(struct drm_device *dev); 212extern int savage_driver_firstopen(struct drm_device *dev);
213extern void savage_driver_lastclose(struct drm_device *dev); 213extern void savage_driver_lastclose(struct drm_device *dev);
214extern int savage_driver_unload(struct drm_device *dev); 214extern int savage_driver_unload(struct drm_device *dev);
215extern void savage_reclaim_buffers(struct drm_device * dev, DRMFILE filp); 215extern void savage_reclaim_buffers(struct drm_device *dev,
216 struct drm_file *file_priv);
216 217
217/* state functions */ 218/* state functions */
218extern void savage_emit_clip_rect_s3d(drm_savage_private_t * dev_priv, 219extern void savage_emit_clip_rect_s3d(drm_savage_private_t * dev_priv,
diff --git a/drivers/char/drm/savage_state.c b/drivers/char/drm/savage_state.c
index 77497841478a..bf8e0e10fe21 100644
--- a/drivers/char/drm/savage_state.c
+++ b/drivers/char/drm/savage_state.c
@@ -83,7 +83,7 @@ static int savage_verify_texaddr(drm_savage_private_t * dev_priv, int unit,
83{ 83{
84 if ((addr & 6) != 2) { /* reserved bits */ 84 if ((addr & 6) != 2) { /* reserved bits */
85 DRM_ERROR("bad texAddr%d %08x (reserved bits)\n", unit, addr); 85 DRM_ERROR("bad texAddr%d %08x (reserved bits)\n", unit, addr);
86 return DRM_ERR(EINVAL); 86 return -EINVAL;
87 } 87 }
88 if (!(addr & 1)) { /* local */ 88 if (!(addr & 1)) { /* local */
89 addr &= ~7; 89 addr &= ~7;
@@ -92,13 +92,13 @@ static int savage_verify_texaddr(drm_savage_private_t * dev_priv, int unit,
92 DRM_ERROR 92 DRM_ERROR
93 ("bad texAddr%d %08x (local addr out of range)\n", 93 ("bad texAddr%d %08x (local addr out of range)\n",
94 unit, addr); 94 unit, addr);
95 return DRM_ERR(EINVAL); 95 return -EINVAL;
96 } 96 }
97 } else { /* AGP */ 97 } else { /* AGP */
98 if (!dev_priv->agp_textures) { 98 if (!dev_priv->agp_textures) {
99 DRM_ERROR("bad texAddr%d %08x (AGP not available)\n", 99 DRM_ERROR("bad texAddr%d %08x (AGP not available)\n",
100 unit, addr); 100 unit, addr);
101 return DRM_ERR(EINVAL); 101 return -EINVAL;
102 } 102 }
103 addr &= ~7; 103 addr &= ~7;
104 if (addr < dev_priv->agp_textures->offset || 104 if (addr < dev_priv->agp_textures->offset ||
@@ -107,7 +107,7 @@ static int savage_verify_texaddr(drm_savage_private_t * dev_priv, int unit,
107 DRM_ERROR 107 DRM_ERROR
108 ("bad texAddr%d %08x (AGP addr out of range)\n", 108 ("bad texAddr%d %08x (AGP addr out of range)\n",
109 unit, addr); 109 unit, addr);
110 return DRM_ERR(EINVAL); 110 return -EINVAL;
111 } 111 }
112 } 112 }
113 return 0; 113 return 0;
@@ -133,7 +133,7 @@ static int savage_verify_state_s3d(drm_savage_private_t * dev_priv,
133 start + count - 1 > SAVAGE_DESTTEXRWWATERMARK_S3D) { 133 start + count - 1 > SAVAGE_DESTTEXRWWATERMARK_S3D) {
134 DRM_ERROR("invalid register range (0x%04x-0x%04x)\n", 134 DRM_ERROR("invalid register range (0x%04x-0x%04x)\n",
135 start, start + count - 1); 135 start, start + count - 1);
136 return DRM_ERR(EINVAL); 136 return -EINVAL;
137 } 137 }
138 138
139 SAVE_STATE_MASK(SAVAGE_SCSTART_S3D, s3d.new_scstart, 139 SAVE_STATE_MASK(SAVAGE_SCSTART_S3D, s3d.new_scstart,
@@ -165,7 +165,7 @@ static int savage_verify_state_s4(drm_savage_private_t * dev_priv,
165 start + count - 1 > SAVAGE_TEXBLENDCOLOR_S4) { 165 start + count - 1 > SAVAGE_TEXBLENDCOLOR_S4) {
166 DRM_ERROR("invalid register range (0x%04x-0x%04x)\n", 166 DRM_ERROR("invalid register range (0x%04x-0x%04x)\n",
167 start, start + count - 1); 167 start, start + count - 1);
168 return DRM_ERR(EINVAL); 168 return -EINVAL;
169 } 169 }
170 170
171 SAVE_STATE_MASK(SAVAGE_DRAWCTRL0_S4, s4.new_drawctrl0, 171 SAVE_STATE_MASK(SAVAGE_DRAWCTRL0_S4, s4.new_drawctrl0,
@@ -289,7 +289,7 @@ static int savage_dispatch_dma_prim(drm_savage_private_t * dev_priv,
289 289
290 if (!dmabuf) { 290 if (!dmabuf) {
291 DRM_ERROR("called without dma buffers!\n"); 291 DRM_ERROR("called without dma buffers!\n");
292 return DRM_ERR(EINVAL); 292 return -EINVAL;
293 } 293 }
294 294
295 if (!n) 295 if (!n)
@@ -303,7 +303,7 @@ static int savage_dispatch_dma_prim(drm_savage_private_t * dev_priv,
303 if (n % 3 != 0) { 303 if (n % 3 != 0) {
304 DRM_ERROR("wrong number of vertices %u in TRILIST\n", 304 DRM_ERROR("wrong number of vertices %u in TRILIST\n",
305 n); 305 n);
306 return DRM_ERR(EINVAL); 306 return -EINVAL;
307 } 307 }
308 break; 308 break;
309 case SAVAGE_PRIM_TRISTRIP: 309 case SAVAGE_PRIM_TRISTRIP:
@@ -312,18 +312,18 @@ static int savage_dispatch_dma_prim(drm_savage_private_t * dev_priv,
312 DRM_ERROR 312 DRM_ERROR
313 ("wrong number of vertices %u in TRIFAN/STRIP\n", 313 ("wrong number of vertices %u in TRIFAN/STRIP\n",
314 n); 314 n);
315 return DRM_ERR(EINVAL); 315 return -EINVAL;
316 } 316 }
317 break; 317 break;
318 default: 318 default:
319 DRM_ERROR("invalid primitive type %u\n", prim); 319 DRM_ERROR("invalid primitive type %u\n", prim);
320 return DRM_ERR(EINVAL); 320 return -EINVAL;
321 } 321 }
322 322
323 if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { 323 if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
324 if (skip != 0) { 324 if (skip != 0) {
325 DRM_ERROR("invalid skip flags 0x%04x for DMA\n", skip); 325 DRM_ERROR("invalid skip flags 0x%04x for DMA\n", skip);
326 return DRM_ERR(EINVAL); 326 return -EINVAL;
327 } 327 }
328 } else { 328 } else {
329 unsigned int size = 10 - (skip & 1) - (skip >> 1 & 1) - 329 unsigned int size = 10 - (skip & 1) - (skip >> 1 & 1) -
@@ -331,18 +331,18 @@ static int savage_dispatch_dma_prim(drm_savage_private_t * dev_priv,
331 (skip >> 5 & 1) - (skip >> 6 & 1) - (skip >> 7 & 1); 331 (skip >> 5 & 1) - (skip >> 6 & 1) - (skip >> 7 & 1);
332 if (skip > SAVAGE_SKIP_ALL_S4 || size != 8) { 332 if (skip > SAVAGE_SKIP_ALL_S4 || size != 8) {
333 DRM_ERROR("invalid skip flags 0x%04x for DMA\n", skip); 333 DRM_ERROR("invalid skip flags 0x%04x for DMA\n", skip);
334 return DRM_ERR(EINVAL); 334 return -EINVAL;
335 } 335 }
336 if (reorder) { 336 if (reorder) {
337 DRM_ERROR("TRILIST_201 used on Savage4 hardware\n"); 337 DRM_ERROR("TRILIST_201 used on Savage4 hardware\n");
338 return DRM_ERR(EINVAL); 338 return -EINVAL;
339 } 339 }
340 } 340 }
341 341
342 if (start + n > dmabuf->total / 32) { 342 if (start + n > dmabuf->total / 32) {
343 DRM_ERROR("vertex indices (%u-%u) out of range (0-%u)\n", 343 DRM_ERROR("vertex indices (%u-%u) out of range (0-%u)\n",
344 start, start + n - 1, dmabuf->total / 32); 344 start, start + n - 1, dmabuf->total / 32);
345 return DRM_ERR(EINVAL); 345 return -EINVAL;
346 } 346 }
347 347
348 /* Vertex DMA doesn't work with command DMA at the same time, 348 /* Vertex DMA doesn't work with command DMA at the same time,
@@ -440,7 +440,7 @@ static int savage_dispatch_vb_prim(drm_savage_private_t * dev_priv,
440 if (n % 3 != 0) { 440 if (n % 3 != 0) {
441 DRM_ERROR("wrong number of vertices %u in TRILIST\n", 441 DRM_ERROR("wrong number of vertices %u in TRILIST\n",
442 n); 442 n);
443 return DRM_ERR(EINVAL); 443 return -EINVAL;
444 } 444 }
445 break; 445 break;
446 case SAVAGE_PRIM_TRISTRIP: 446 case SAVAGE_PRIM_TRISTRIP:
@@ -449,24 +449,24 @@ static int savage_dispatch_vb_prim(drm_savage_private_t * dev_priv,
449 DRM_ERROR 449 DRM_ERROR
450 ("wrong number of vertices %u in TRIFAN/STRIP\n", 450 ("wrong number of vertices %u in TRIFAN/STRIP\n",
451 n); 451 n);
452 return DRM_ERR(EINVAL); 452 return -EINVAL;
453 } 453 }
454 break; 454 break;
455 default: 455 default:
456 DRM_ERROR("invalid primitive type %u\n", prim); 456 DRM_ERROR("invalid primitive type %u\n", prim);
457 return DRM_ERR(EINVAL); 457 return -EINVAL;
458 } 458 }
459 459
460 if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { 460 if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
461 if (skip > SAVAGE_SKIP_ALL_S3D) { 461 if (skip > SAVAGE_SKIP_ALL_S3D) {
462 DRM_ERROR("invalid skip flags 0x%04x\n", skip); 462 DRM_ERROR("invalid skip flags 0x%04x\n", skip);
463 return DRM_ERR(EINVAL); 463 return -EINVAL;
464 } 464 }
465 vtx_size = 8; /* full vertex */ 465 vtx_size = 8; /* full vertex */
466 } else { 466 } else {
467 if (skip > SAVAGE_SKIP_ALL_S4) { 467 if (skip > SAVAGE_SKIP_ALL_S4) {
468 DRM_ERROR("invalid skip flags 0x%04x\n", skip); 468 DRM_ERROR("invalid skip flags 0x%04x\n", skip);
469 return DRM_ERR(EINVAL); 469 return -EINVAL;
470 } 470 }
471 vtx_size = 10; /* full vertex */ 471 vtx_size = 10; /* full vertex */
472 } 472 }
@@ -478,13 +478,13 @@ static int savage_dispatch_vb_prim(drm_savage_private_t * dev_priv,
478 if (vtx_size > vb_stride) { 478 if (vtx_size > vb_stride) {
479 DRM_ERROR("vertex size greater than vb stride (%u > %u)\n", 479 DRM_ERROR("vertex size greater than vb stride (%u > %u)\n",
480 vtx_size, vb_stride); 480 vtx_size, vb_stride);
481 return DRM_ERR(EINVAL); 481 return -EINVAL;
482 } 482 }
483 483
484 if (start + n > vb_size / (vb_stride * 4)) { 484 if (start + n > vb_size / (vb_stride * 4)) {
485 DRM_ERROR("vertex indices (%u-%u) out of range (0-%u)\n", 485 DRM_ERROR("vertex indices (%u-%u) out of range (0-%u)\n",
486 start, start + n - 1, vb_size / (vb_stride * 4)); 486 start, start + n - 1, vb_size / (vb_stride * 4));
487 return DRM_ERR(EINVAL); 487 return -EINVAL;
488 } 488 }
489 489
490 prim <<= 25; 490 prim <<= 25;
@@ -547,7 +547,7 @@ static int savage_dispatch_dma_idx(drm_savage_private_t * dev_priv,
547 547
548 if (!dmabuf) { 548 if (!dmabuf) {
549 DRM_ERROR("called without dma buffers!\n"); 549 DRM_ERROR("called without dma buffers!\n");
550 return DRM_ERR(EINVAL); 550 return -EINVAL;
551 } 551 }
552 552
553 if (!n) 553 if (!n)
@@ -560,7 +560,7 @@ static int savage_dispatch_dma_idx(drm_savage_private_t * dev_priv,
560 case SAVAGE_PRIM_TRILIST: 560 case SAVAGE_PRIM_TRILIST:
561 if (n % 3 != 0) { 561 if (n % 3 != 0) {
562 DRM_ERROR("wrong number of indices %u in TRILIST\n", n); 562 DRM_ERROR("wrong number of indices %u in TRILIST\n", n);
563 return DRM_ERR(EINVAL); 563 return -EINVAL;
564 } 564 }
565 break; 565 break;
566 case SAVAGE_PRIM_TRISTRIP: 566 case SAVAGE_PRIM_TRISTRIP:
@@ -568,18 +568,18 @@ static int savage_dispatch_dma_idx(drm_savage_private_t * dev_priv,
568 if (n < 3) { 568 if (n < 3) {
569 DRM_ERROR 569 DRM_ERROR
570 ("wrong number of indices %u in TRIFAN/STRIP\n", n); 570 ("wrong number of indices %u in TRIFAN/STRIP\n", n);
571 return DRM_ERR(EINVAL); 571 return -EINVAL;
572 } 572 }
573 break; 573 break;
574 default: 574 default:
575 DRM_ERROR("invalid primitive type %u\n", prim); 575 DRM_ERROR("invalid primitive type %u\n", prim);
576 return DRM_ERR(EINVAL); 576 return -EINVAL;
577 } 577 }
578 578
579 if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { 579 if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
580 if (skip != 0) { 580 if (skip != 0) {
581 DRM_ERROR("invalid skip flags 0x%04x for DMA\n", skip); 581 DRM_ERROR("invalid skip flags 0x%04x for DMA\n", skip);
582 return DRM_ERR(EINVAL); 582 return -EINVAL;
583 } 583 }
584 } else { 584 } else {
585 unsigned int size = 10 - (skip & 1) - (skip >> 1 & 1) - 585 unsigned int size = 10 - (skip & 1) - (skip >> 1 & 1) -
@@ -587,11 +587,11 @@ static int savage_dispatch_dma_idx(drm_savage_private_t * dev_priv,
587 (skip >> 5 & 1) - (skip >> 6 & 1) - (skip >> 7 & 1); 587 (skip >> 5 & 1) - (skip >> 6 & 1) - (skip >> 7 & 1);
588 if (skip > SAVAGE_SKIP_ALL_S4 || size != 8) { 588 if (skip > SAVAGE_SKIP_ALL_S4 || size != 8) {
589 DRM_ERROR("invalid skip flags 0x%04x for DMA\n", skip); 589 DRM_ERROR("invalid skip flags 0x%04x for DMA\n", skip);
590 return DRM_ERR(EINVAL); 590 return -EINVAL;
591 } 591 }
592 if (reorder) { 592 if (reorder) {
593 DRM_ERROR("TRILIST_201 used on Savage4 hardware\n"); 593 DRM_ERROR("TRILIST_201 used on Savage4 hardware\n");
594 return DRM_ERR(EINVAL); 594 return -EINVAL;
595 } 595 }
596 } 596 }
597 597
@@ -628,7 +628,7 @@ static int savage_dispatch_dma_idx(drm_savage_private_t * dev_priv,
628 if (idx[i] > dmabuf->total / 32) { 628 if (idx[i] > dmabuf->total / 32) {
629 DRM_ERROR("idx[%u]=%u out of range (0-%u)\n", 629 DRM_ERROR("idx[%u]=%u out of range (0-%u)\n",
630 i, idx[i], dmabuf->total / 32); 630 i, idx[i], dmabuf->total / 32);
631 return DRM_ERR(EINVAL); 631 return -EINVAL;
632 } 632 }
633 } 633 }
634 634
@@ -698,7 +698,7 @@ static int savage_dispatch_vb_idx(drm_savage_private_t * dev_priv,
698 case SAVAGE_PRIM_TRILIST: 698 case SAVAGE_PRIM_TRILIST:
699 if (n % 3 != 0) { 699 if (n % 3 != 0) {
700 DRM_ERROR("wrong number of indices %u in TRILIST\n", n); 700 DRM_ERROR("wrong number of indices %u in TRILIST\n", n);
701 return DRM_ERR(EINVAL); 701 return -EINVAL;
702 } 702 }
703 break; 703 break;
704 case SAVAGE_PRIM_TRISTRIP: 704 case SAVAGE_PRIM_TRISTRIP:
@@ -706,24 +706,24 @@ static int savage_dispatch_vb_idx(drm_savage_private_t * dev_priv,
706 if (n < 3) { 706 if (n < 3) {
707 DRM_ERROR 707 DRM_ERROR
708 ("wrong number of indices %u in TRIFAN/STRIP\n", n); 708 ("wrong number of indices %u in TRIFAN/STRIP\n", n);
709 return DRM_ERR(EINVAL); 709 return -EINVAL;
710 } 710 }
711 break; 711 break;
712 default: 712 default:
713 DRM_ERROR("invalid primitive type %u\n", prim); 713 DRM_ERROR("invalid primitive type %u\n", prim);
714 return DRM_ERR(EINVAL); 714 return -EINVAL;
715 } 715 }
716 716
717 if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { 717 if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
718 if (skip > SAVAGE_SKIP_ALL_S3D) { 718 if (skip > SAVAGE_SKIP_ALL_S3D) {
719 DRM_ERROR("invalid skip flags 0x%04x\n", skip); 719 DRM_ERROR("invalid skip flags 0x%04x\n", skip);
720 return DRM_ERR(EINVAL); 720 return -EINVAL;
721 } 721 }
722 vtx_size = 8; /* full vertex */ 722 vtx_size = 8; /* full vertex */
723 } else { 723 } else {
724 if (skip > SAVAGE_SKIP_ALL_S4) { 724 if (skip > SAVAGE_SKIP_ALL_S4) {
725 DRM_ERROR("invalid skip flags 0x%04x\n", skip); 725 DRM_ERROR("invalid skip flags 0x%04x\n", skip);
726 return DRM_ERR(EINVAL); 726 return -EINVAL;
727 } 727 }
728 vtx_size = 10; /* full vertex */ 728 vtx_size = 10; /* full vertex */
729 } 729 }
@@ -735,7 +735,7 @@ static int savage_dispatch_vb_idx(drm_savage_private_t * dev_priv,
735 if (vtx_size > vb_stride) { 735 if (vtx_size > vb_stride) {
736 DRM_ERROR("vertex size greater than vb stride (%u > %u)\n", 736 DRM_ERROR("vertex size greater than vb stride (%u > %u)\n",
737 vtx_size, vb_stride); 737 vtx_size, vb_stride);
738 return DRM_ERR(EINVAL); 738 return -EINVAL;
739 } 739 }
740 740
741 prim <<= 25; 741 prim <<= 25;
@@ -748,7 +748,7 @@ static int savage_dispatch_vb_idx(drm_savage_private_t * dev_priv,
748 if (idx[i] > vb_size / (vb_stride * 4)) { 748 if (idx[i] > vb_size / (vb_stride * 4)) {
749 DRM_ERROR("idx[%u]=%u out of range (0-%u)\n", 749 DRM_ERROR("idx[%u]=%u out of range (0-%u)\n",
750 i, idx[i], vb_size / (vb_stride * 4)); 750 i, idx[i], vb_size / (vb_stride * 4));
751 return DRM_ERR(EINVAL); 751 return -EINVAL;
752 } 752 }
753 } 753 }
754 754
@@ -942,7 +942,7 @@ static int savage_dispatch_draw(drm_savage_private_t * dev_priv,
942 DRM_ERROR("IMPLEMENTATION ERROR: " 942 DRM_ERROR("IMPLEMENTATION ERROR: "
943 "non-drawing-command %d\n", 943 "non-drawing-command %d\n",
944 cmd_header.cmd.cmd); 944 cmd_header.cmd.cmd);
945 return DRM_ERR(EINVAL); 945 return -EINVAL;
946 } 946 }
947 947
948 if (ret != 0) 948 if (ret != 0)
@@ -953,13 +953,12 @@ static int savage_dispatch_draw(drm_savage_private_t * dev_priv,
953 return 0; 953 return 0;
954} 954}
955 955
956int savage_bci_cmdbuf(DRM_IOCTL_ARGS) 956int savage_bci_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_priv)
957{ 957{
958 DRM_DEVICE;
959 drm_savage_private_t *dev_priv = dev->dev_private; 958 drm_savage_private_t *dev_priv = dev->dev_private;
960 struct drm_device_dma *dma = dev->dma; 959 struct drm_device_dma *dma = dev->dma;
961 struct drm_buf *dmabuf; 960 struct drm_buf *dmabuf;
962 drm_savage_cmdbuf_t cmdbuf; 961 drm_savage_cmdbuf_t *cmdbuf = data;
963 drm_savage_cmd_header_t *kcmd_addr = NULL; 962 drm_savage_cmd_header_t *kcmd_addr = NULL;
964 drm_savage_cmd_header_t *first_draw_cmd; 963 drm_savage_cmd_header_t *first_draw_cmd;
965 unsigned int *kvb_addr = NULL; 964 unsigned int *kvb_addr = NULL;
@@ -969,19 +968,16 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS)
969 968
970 DRM_DEBUG("\n"); 969 DRM_DEBUG("\n");
971 970
972 LOCK_TEST_WITH_RETURN(dev, filp); 971 LOCK_TEST_WITH_RETURN(dev, file_priv);
973
974 DRM_COPY_FROM_USER_IOCTL(cmdbuf, (drm_savage_cmdbuf_t __user *) data,
975 sizeof(cmdbuf));
976 972
977 if (dma && dma->buflist) { 973 if (dma && dma->buflist) {
978 if (cmdbuf.dma_idx > dma->buf_count) { 974 if (cmdbuf->dma_idx > dma->buf_count) {
979 DRM_ERROR 975 DRM_ERROR
980 ("vertex buffer index %u out of range (0-%u)\n", 976 ("vertex buffer index %u out of range (0-%u)\n",
981 cmdbuf.dma_idx, dma->buf_count - 1); 977 cmdbuf->dma_idx, dma->buf_count - 1);
982 return DRM_ERR(EINVAL); 978 return -EINVAL;
983 } 979 }
984 dmabuf = dma->buflist[cmdbuf.dma_idx]; 980 dmabuf = dma->buflist[cmdbuf->dma_idx];
985 } else { 981 } else {
986 dmabuf = NULL; 982 dmabuf = NULL;
987 } 983 }
@@ -991,47 +987,47 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS)
991 * COPY_FROM_USER_UNCHECKED when done in other drivers, and is correct 987 * COPY_FROM_USER_UNCHECKED when done in other drivers, and is correct
992 * for locking on FreeBSD. 988 * for locking on FreeBSD.
993 */ 989 */
994 if (cmdbuf.size) { 990 if (cmdbuf->size) {
995 kcmd_addr = drm_alloc(cmdbuf.size * 8, DRM_MEM_DRIVER); 991 kcmd_addr = drm_alloc(cmdbuf->size * 8, DRM_MEM_DRIVER);
996 if (kcmd_addr == NULL) 992 if (kcmd_addr == NULL)
997 return DRM_ERR(ENOMEM); 993 return -ENOMEM;
998 994
999 if (DRM_COPY_FROM_USER(kcmd_addr, cmdbuf.cmd_addr, 995 if (DRM_COPY_FROM_USER(kcmd_addr, cmdbuf->cmd_addr,
1000 cmdbuf.size * 8)) 996 cmdbuf->size * 8))
1001 { 997 {
1002 drm_free(kcmd_addr, cmdbuf.size * 8, DRM_MEM_DRIVER); 998 drm_free(kcmd_addr, cmdbuf->size * 8, DRM_MEM_DRIVER);
1003 return DRM_ERR(EFAULT); 999 return -EFAULT;
1004 } 1000 }
1005 cmdbuf.cmd_addr = kcmd_addr; 1001 cmdbuf->cmd_addr = kcmd_addr;
1006 } 1002 }
1007 if (cmdbuf.vb_size) { 1003 if (cmdbuf->vb_size) {
1008 kvb_addr = drm_alloc(cmdbuf.vb_size, DRM_MEM_DRIVER); 1004 kvb_addr = drm_alloc(cmdbuf->vb_size, DRM_MEM_DRIVER);
1009 if (kvb_addr == NULL) { 1005 if (kvb_addr == NULL) {
1010 ret = DRM_ERR(ENOMEM); 1006 ret = -ENOMEM;
1011 goto done; 1007 goto done;
1012 } 1008 }
1013 1009
1014 if (DRM_COPY_FROM_USER(kvb_addr, cmdbuf.vb_addr, 1010 if (DRM_COPY_FROM_USER(kvb_addr, cmdbuf->vb_addr,
1015 cmdbuf.vb_size)) { 1011 cmdbuf->vb_size)) {
1016 ret = DRM_ERR(EFAULT); 1012 ret = -EFAULT;
1017 goto done; 1013 goto done;
1018 } 1014 }
1019 cmdbuf.vb_addr = kvb_addr; 1015 cmdbuf->vb_addr = kvb_addr;
1020 } 1016 }
1021 if (cmdbuf.nbox) { 1017 if (cmdbuf->nbox) {
1022 kbox_addr = drm_alloc(cmdbuf.nbox * sizeof(struct drm_clip_rect), 1018 kbox_addr = drm_alloc(cmdbuf->nbox * sizeof(struct drm_clip_rect),
1023 DRM_MEM_DRIVER); 1019 DRM_MEM_DRIVER);
1024 if (kbox_addr == NULL) { 1020 if (kbox_addr == NULL) {
1025 ret = DRM_ERR(ENOMEM); 1021 ret = -ENOMEM;
1026 goto done; 1022 goto done;
1027 } 1023 }
1028 1024
1029 if (DRM_COPY_FROM_USER(kbox_addr, cmdbuf.box_addr, 1025 if (DRM_COPY_FROM_USER(kbox_addr, cmdbuf->box_addr,
1030 cmdbuf.nbox * sizeof(struct drm_clip_rect))) { 1026 cmdbuf->nbox * sizeof(struct drm_clip_rect))) {
1031 ret = DRM_ERR(EFAULT); 1027 ret = -EFAULT;
1032 goto done; 1028 goto done;
1033 } 1029 }
1034 cmdbuf.box_addr = kbox_addr; 1030 cmdbuf->box_addr = kbox_addr;
1035 } 1031 }
1036 1032
1037 /* Make sure writes to DMA buffers are finished before sending 1033 /* Make sure writes to DMA buffers are finished before sending
@@ -1044,10 +1040,10 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS)
1044 1040
1045 i = 0; 1041 i = 0;
1046 first_draw_cmd = NULL; 1042 first_draw_cmd = NULL;
1047 while (i < cmdbuf.size) { 1043 while (i < cmdbuf->size) {
1048 drm_savage_cmd_header_t cmd_header; 1044 drm_savage_cmd_header_t cmd_header;
1049 cmd_header = *(drm_savage_cmd_header_t *)cmdbuf.cmd_addr; 1045 cmd_header = *(drm_savage_cmd_header_t *)cmdbuf->cmd_addr;
1050 cmdbuf.cmd_addr++; 1046 cmdbuf->cmd_addr++;
1051 i++; 1047 i++;
1052 1048
1053 /* Group drawing commands with same state to minimize 1049 /* Group drawing commands with same state to minimize
@@ -1057,28 +1053,28 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS)
1057 case SAVAGE_CMD_DMA_IDX: 1053 case SAVAGE_CMD_DMA_IDX:
1058 case SAVAGE_CMD_VB_IDX: 1054 case SAVAGE_CMD_VB_IDX:
1059 j = (cmd_header.idx.count + 3) / 4; 1055 j = (cmd_header.idx.count + 3) / 4;
1060 if (i + j > cmdbuf.size) { 1056 if (i + j > cmdbuf->size) {
1061 DRM_ERROR("indexed drawing command extends " 1057 DRM_ERROR("indexed drawing command extends "
1062 "beyond end of command buffer\n"); 1058 "beyond end of command buffer\n");
1063 DMA_FLUSH(); 1059 DMA_FLUSH();
1064 return DRM_ERR(EINVAL); 1060 return -EINVAL;
1065 } 1061 }
1066 /* fall through */ 1062 /* fall through */
1067 case SAVAGE_CMD_DMA_PRIM: 1063 case SAVAGE_CMD_DMA_PRIM:
1068 case SAVAGE_CMD_VB_PRIM: 1064 case SAVAGE_CMD_VB_PRIM:
1069 if (!first_draw_cmd) 1065 if (!first_draw_cmd)
1070 first_draw_cmd = cmdbuf.cmd_addr - 1; 1066 first_draw_cmd = cmdbuf->cmd_addr - 1;
1071 cmdbuf.cmd_addr += j; 1067 cmdbuf->cmd_addr += j;
1072 i += j; 1068 i += j;
1073 break; 1069 break;
1074 default: 1070 default:
1075 if (first_draw_cmd) { 1071 if (first_draw_cmd) {
1076 ret = savage_dispatch_draw( 1072 ret = savage_dispatch_draw(
1077 dev_priv, first_draw_cmd, 1073 dev_priv, first_draw_cmd,
1078 cmdbuf.cmd_addr - 1, 1074 cmdbuf->cmd_addr - 1,
1079 dmabuf, cmdbuf.vb_addr, cmdbuf.vb_size, 1075 dmabuf, cmdbuf->vb_addr, cmdbuf->vb_size,
1080 cmdbuf.vb_stride, 1076 cmdbuf->vb_stride,
1081 cmdbuf.nbox, cmdbuf.box_addr); 1077 cmdbuf->nbox, cmdbuf->box_addr);
1082 if (ret != 0) 1078 if (ret != 0)
1083 return ret; 1079 return ret;
1084 first_draw_cmd = NULL; 1080 first_draw_cmd = NULL;
@@ -1090,40 +1086,42 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS)
1090 switch (cmd_header.cmd.cmd) { 1086 switch (cmd_header.cmd.cmd) {
1091 case SAVAGE_CMD_STATE: 1087 case SAVAGE_CMD_STATE:
1092 j = (cmd_header.state.count + 1) / 2; 1088 j = (cmd_header.state.count + 1) / 2;
1093 if (i + j > cmdbuf.size) { 1089 if (i + j > cmdbuf->size) {
1094 DRM_ERROR("command SAVAGE_CMD_STATE extends " 1090 DRM_ERROR("command SAVAGE_CMD_STATE extends "
1095 "beyond end of command buffer\n"); 1091 "beyond end of command buffer\n");
1096 DMA_FLUSH(); 1092 DMA_FLUSH();
1097 ret = DRM_ERR(EINVAL); 1093 ret = -EINVAL;
1098 goto done; 1094 goto done;
1099 } 1095 }
1100 ret = savage_dispatch_state(dev_priv, &cmd_header, 1096 ret = savage_dispatch_state(dev_priv, &cmd_header,
1101 (const uint32_t *)cmdbuf.cmd_addr); 1097 (const uint32_t *)cmdbuf->cmd_addr);
1102 cmdbuf.cmd_addr += j; 1098 cmdbuf->cmd_addr += j;
1103 i += j; 1099 i += j;
1104 break; 1100 break;
1105 case SAVAGE_CMD_CLEAR: 1101 case SAVAGE_CMD_CLEAR:
1106 if (i + 1 > cmdbuf.size) { 1102 if (i + 1 > cmdbuf->size) {
1107 DRM_ERROR("command SAVAGE_CMD_CLEAR extends " 1103 DRM_ERROR("command SAVAGE_CMD_CLEAR extends "
1108 "beyond end of command buffer\n"); 1104 "beyond end of command buffer\n");
1109 DMA_FLUSH(); 1105 DMA_FLUSH();
1110 ret = DRM_ERR(EINVAL); 1106 ret = -EINVAL;
1111 goto done; 1107 goto done;
1112 } 1108 }
1113 ret = savage_dispatch_clear(dev_priv, &cmd_header, 1109 ret = savage_dispatch_clear(dev_priv, &cmd_header,
1114 cmdbuf.cmd_addr, 1110 cmdbuf->cmd_addr,
1115 cmdbuf.nbox, cmdbuf.box_addr); 1111 cmdbuf->nbox,
1116 cmdbuf.cmd_addr++; 1112 cmdbuf->box_addr);
1113 cmdbuf->cmd_addr++;
1117 i++; 1114 i++;
1118 break; 1115 break;
1119 case SAVAGE_CMD_SWAP: 1116 case SAVAGE_CMD_SWAP:
1120 ret = savage_dispatch_swap(dev_priv, cmdbuf.nbox, 1117 ret = savage_dispatch_swap(dev_priv, cmdbuf->nbox,
1121 cmdbuf.box_addr); 1118 cmdbuf->box_addr);
1122 break; 1119 break;
1123 default: 1120 default:
1124 DRM_ERROR("invalid command 0x%x\n", cmd_header.cmd.cmd); 1121 DRM_ERROR("invalid command 0x%x\n",
1122 cmd_header.cmd.cmd);
1125 DMA_FLUSH(); 1123 DMA_FLUSH();
1126 ret = DRM_ERR(EINVAL); 1124 ret = -EINVAL;
1127 goto done; 1125 goto done;
1128 } 1126 }
1129 1127
@@ -1135,9 +1133,9 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS)
1135 1133
1136 if (first_draw_cmd) { 1134 if (first_draw_cmd) {
1137 ret = savage_dispatch_draw ( 1135 ret = savage_dispatch_draw (
1138 dev_priv, first_draw_cmd, cmdbuf.cmd_addr, dmabuf, 1136 dev_priv, first_draw_cmd, cmdbuf->cmd_addr, dmabuf,
1139 cmdbuf.vb_addr, cmdbuf.vb_size, cmdbuf.vb_stride, 1137 cmdbuf->vb_addr, cmdbuf->vb_size, cmdbuf->vb_stride,
1140 cmdbuf.nbox, cmdbuf.box_addr); 1138 cmdbuf->nbox, cmdbuf->box_addr);
1141 if (ret != 0) { 1139 if (ret != 0) {
1142 DMA_FLUSH(); 1140 DMA_FLUSH();
1143 goto done; 1141 goto done;
@@ -1146,7 +1144,7 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS)
1146 1144
1147 DMA_FLUSH(); 1145 DMA_FLUSH();
1148 1146
1149 if (dmabuf && cmdbuf.discard) { 1147 if (dmabuf && cmdbuf->discard) {
1150 drm_savage_buf_priv_t *buf_priv = dmabuf->dev_private; 1148 drm_savage_buf_priv_t *buf_priv = dmabuf->dev_private;
1151 uint16_t event; 1149 uint16_t event;
1152 event = savage_bci_emit_event(dev_priv, SAVAGE_WAIT_3D); 1150 event = savage_bci_emit_event(dev_priv, SAVAGE_WAIT_3D);
@@ -1156,9 +1154,9 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS)
1156 1154
1157done: 1155done:
1158 /* If we didn't need to allocate them, these'll be NULL */ 1156 /* If we didn't need to allocate them, these'll be NULL */
1159 drm_free(kcmd_addr, cmdbuf.size * 8, DRM_MEM_DRIVER); 1157 drm_free(kcmd_addr, cmdbuf->size * 8, DRM_MEM_DRIVER);
1160 drm_free(kvb_addr, cmdbuf.vb_size, DRM_MEM_DRIVER); 1158 drm_free(kvb_addr, cmdbuf->vb_size, DRM_MEM_DRIVER);
1161 drm_free(kbox_addr, cmdbuf.nbox * sizeof(struct drm_clip_rect), 1159 drm_free(kbox_addr, cmdbuf->nbox * sizeof(struct drm_clip_rect),
1162 DRM_MEM_DRIVER); 1160 DRM_MEM_DRIVER);
1163 1161
1164 return ret; 1162 return ret;
diff --git a/drivers/char/drm/sis_drv.c b/drivers/char/drm/sis_drv.c
index 1912f5857051..7dacc64e9b56 100644
--- a/drivers/char/drm/sis_drv.c
+++ b/drivers/char/drm/sis_drv.c
@@ -42,7 +42,7 @@ static int sis_driver_load(struct drm_device *dev, unsigned long chipset)
42 42
43 dev_priv = drm_calloc(1, sizeof(drm_sis_private_t), DRM_MEM_DRIVER); 43 dev_priv = drm_calloc(1, sizeof(drm_sis_private_t), DRM_MEM_DRIVER);
44 if (dev_priv == NULL) 44 if (dev_priv == NULL)
45 return DRM_ERR(ENOMEM); 45 return -ENOMEM;
46 46
47 dev->dev_private = (void *)dev_priv; 47 dev->dev_private = (void *)dev_priv;
48 dev_priv->chipset = chipset; 48 dev_priv->chipset = chipset;
diff --git a/drivers/char/drm/sis_drv.h b/drivers/char/drm/sis_drv.h
index 5630df874353..ef940bad63f7 100644
--- a/drivers/char/drm/sis_drv.h
+++ b/drivers/char/drm/sis_drv.h
@@ -63,10 +63,11 @@ typedef struct drm_sis_private {
63} drm_sis_private_t; 63} drm_sis_private_t;
64 64
65extern int sis_idle(struct drm_device *dev); 65extern int sis_idle(struct drm_device *dev);
66extern void sis_reclaim_buffers_locked(struct drm_device *dev, struct file *filp); 66extern void sis_reclaim_buffers_locked(struct drm_device *dev,
67 struct drm_file *file_priv);
67extern void sis_lastclose(struct drm_device *dev); 68extern void sis_lastclose(struct drm_device *dev);
68 69
69extern drm_ioctl_desc_t sis_ioctls[]; 70extern struct drm_ioctl_desc sis_ioctls[];
70extern int sis_max_ioctl; 71extern int sis_max_ioctl;
71 72
72#endif 73#endif
diff --git a/drivers/char/drm/sis_mm.c b/drivers/char/drm/sis_mm.c
index 441bbdbf1510..8c66838ff515 100644
--- a/drivers/char/drm/sis_mm.c
+++ b/drivers/char/drm/sis_mm.c
@@ -82,15 +82,12 @@ static unsigned long sis_sman_mm_offset(void *private, void *ref)
82 82
83#endif /* CONFIG_FB_SIS */ 83#endif /* CONFIG_FB_SIS */
84 84
85static int sis_fb_init(DRM_IOCTL_ARGS) 85static int sis_fb_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
86{ 86{
87 DRM_DEVICE;
88 drm_sis_private_t *dev_priv = dev->dev_private; 87 drm_sis_private_t *dev_priv = dev->dev_private;
89 drm_sis_fb_t fb; 88 drm_sis_fb_t *fb = data;
90 int ret; 89 int ret;
91 90
92 DRM_COPY_FROM_USER_IOCTL(fb, (drm_sis_fb_t __user *) data, sizeof(fb));
93
94 mutex_lock(&dev->struct_mutex); 91 mutex_lock(&dev->struct_mutex);
95#if defined(CONFIG_FB_SIS) 92#if defined(CONFIG_FB_SIS)
96 { 93 {
@@ -105,7 +102,7 @@ static int sis_fb_init(DRM_IOCTL_ARGS)
105 } 102 }
106#else 103#else
107 ret = drm_sman_set_range(&dev_priv->sman, VIDEO_TYPE, 0, 104 ret = drm_sman_set_range(&dev_priv->sman, VIDEO_TYPE, 0,
108 fb.size >> SIS_MM_ALIGN_SHIFT); 105 fb->size >> SIS_MM_ALIGN_SHIFT);
109#endif 106#endif
110 107
111 if (ret) { 108 if (ret) {
@@ -115,98 +112,87 @@ static int sis_fb_init(DRM_IOCTL_ARGS)
115 } 112 }
116 113
117 dev_priv->vram_initialized = 1; 114 dev_priv->vram_initialized = 1;
118 dev_priv->vram_offset = fb.offset; 115 dev_priv->vram_offset = fb->offset;
119 116
120 mutex_unlock(&dev->struct_mutex); 117 mutex_unlock(&dev->struct_mutex);
121 DRM_DEBUG("offset = %u, size = %u", fb.offset, fb.size); 118 DRM_DEBUG("offset = %u, size = %u", fb->offset, fb->size);
122 119
123 return 0; 120 return 0;
124} 121}
125 122
126static int sis_drm_alloc(struct drm_device *dev, struct drm_file * priv, 123static int sis_drm_alloc(struct drm_device *dev, struct drm_file *file_priv,
127 unsigned long data, int pool) 124 void *data, int pool)
128{ 125{
129 drm_sis_private_t *dev_priv = dev->dev_private; 126 drm_sis_private_t *dev_priv = dev->dev_private;
130 drm_sis_mem_t __user *argp = (drm_sis_mem_t __user *) data; 127 drm_sis_mem_t *mem = data;
131 drm_sis_mem_t mem;
132 int retval = 0; 128 int retval = 0;
133 struct drm_memblock_item *item; 129 struct drm_memblock_item *item;
134 130
135 DRM_COPY_FROM_USER_IOCTL(mem, argp, sizeof(mem));
136
137 mutex_lock(&dev->struct_mutex); 131 mutex_lock(&dev->struct_mutex);
138 132
139 if (0 == ((pool == 0) ? dev_priv->vram_initialized : 133 if (0 == ((pool == 0) ? dev_priv->vram_initialized :
140 dev_priv->agp_initialized)) { 134 dev_priv->agp_initialized)) {
141 DRM_ERROR 135 DRM_ERROR
142 ("Attempt to allocate from uninitialized memory manager.\n"); 136 ("Attempt to allocate from uninitialized memory manager.\n");
143 return DRM_ERR(EINVAL); 137 return -EINVAL;
144 } 138 }
145 139
146 mem.size = (mem.size + SIS_MM_ALIGN_MASK) >> SIS_MM_ALIGN_SHIFT; 140 mem->size = (mem->size + SIS_MM_ALIGN_MASK) >> SIS_MM_ALIGN_SHIFT;
147 item = drm_sman_alloc(&dev_priv->sman, pool, mem.size, 0, 141 item = drm_sman_alloc(&dev_priv->sman, pool, mem->size, 0,
148 (unsigned long)priv); 142 (unsigned long)file_priv);
149 143
150 mutex_unlock(&dev->struct_mutex); 144 mutex_unlock(&dev->struct_mutex);
151 if (item) { 145 if (item) {
152 mem.offset = ((pool == 0) ? 146 mem->offset = ((pool == 0) ?
153 dev_priv->vram_offset : dev_priv->agp_offset) + 147 dev_priv->vram_offset : dev_priv->agp_offset) +
154 (item->mm-> 148 (item->mm->
155 offset(item->mm, item->mm_info) << SIS_MM_ALIGN_SHIFT); 149 offset(item->mm, item->mm_info) << SIS_MM_ALIGN_SHIFT);
156 mem.free = item->user_hash.key; 150 mem->free = item->user_hash.key;
157 mem.size = mem.size << SIS_MM_ALIGN_SHIFT; 151 mem->size = mem->size << SIS_MM_ALIGN_SHIFT;
158 } else { 152 } else {
159 mem.offset = 0; 153 mem->offset = 0;
160 mem.size = 0; 154 mem->size = 0;
161 mem.free = 0; 155 mem->free = 0;
162 retval = DRM_ERR(ENOMEM); 156 retval = -ENOMEM;
163 } 157 }
164 158
165 DRM_COPY_TO_USER_IOCTL(argp, mem, sizeof(mem)); 159 DRM_DEBUG("alloc %d, size = %d, offset = %d\n", pool, mem->size,
166 160 mem->offset);
167 DRM_DEBUG("alloc %d, size = %d, offset = %d\n", pool, mem.size,
168 mem.offset);
169 161
170 return retval; 162 return retval;
171} 163}
172 164
173static int sis_drm_free(DRM_IOCTL_ARGS) 165static int sis_drm_free(struct drm_device *dev, void *data, struct drm_file *file_priv)
174{ 166{
175 DRM_DEVICE;
176 drm_sis_private_t *dev_priv = dev->dev_private; 167 drm_sis_private_t *dev_priv = dev->dev_private;
177 drm_sis_mem_t mem; 168 drm_sis_mem_t *mem = data;
178 int ret; 169 int ret;
179 170
180 DRM_COPY_FROM_USER_IOCTL(mem, (drm_sis_mem_t __user *) data,
181 sizeof(mem));
182
183 mutex_lock(&dev->struct_mutex); 171 mutex_lock(&dev->struct_mutex);
184 ret = drm_sman_free_key(&dev_priv->sman, mem.free); 172 ret = drm_sman_free_key(&dev_priv->sman, mem->free);
185 mutex_unlock(&dev->struct_mutex); 173 mutex_unlock(&dev->struct_mutex);
186 DRM_DEBUG("free = 0x%lx\n", mem.free); 174 DRM_DEBUG("free = 0x%lx\n", mem->free);
187 175
188 return ret; 176 return ret;
189} 177}
190 178
191static int sis_fb_alloc(DRM_IOCTL_ARGS) 179static int sis_fb_alloc(struct drm_device *dev, void *data,
180 struct drm_file *file_priv)
192{ 181{
193 DRM_DEVICE; 182 return sis_drm_alloc(dev, file_priv, data, VIDEO_TYPE);
194 return sis_drm_alloc(dev, priv, data, VIDEO_TYPE);
195} 183}
196 184
197static int sis_ioctl_agp_init(DRM_IOCTL_ARGS) 185static int sis_ioctl_agp_init(struct drm_device *dev, void *data,
186 struct drm_file *file_priv)
198{ 187{
199 DRM_DEVICE;
200 drm_sis_private_t *dev_priv = dev->dev_private; 188 drm_sis_private_t *dev_priv = dev->dev_private;
201 drm_sis_agp_t agp; 189 drm_sis_agp_t *agp = data;
202 int ret; 190 int ret;
203 dev_priv = dev->dev_private; 191 dev_priv = dev->dev_private;
204 192
205 DRM_COPY_FROM_USER_IOCTL(agp, (drm_sis_agp_t __user *) data,
206 sizeof(agp));
207 mutex_lock(&dev->struct_mutex); 193 mutex_lock(&dev->struct_mutex);
208 ret = drm_sman_set_range(&dev_priv->sman, AGP_TYPE, 0, 194 ret = drm_sman_set_range(&dev_priv->sman, AGP_TYPE, 0,
209 agp.size >> SIS_MM_ALIGN_SHIFT); 195 agp->size >> SIS_MM_ALIGN_SHIFT);
210 196
211 if (ret) { 197 if (ret) {
212 DRM_ERROR("AGP memory manager initialisation error\n"); 198 DRM_ERROR("AGP memory manager initialisation error\n");
@@ -215,18 +201,18 @@ static int sis_ioctl_agp_init(DRM_IOCTL_ARGS)
215 } 201 }
216 202
217 dev_priv->agp_initialized = 1; 203 dev_priv->agp_initialized = 1;
218 dev_priv->agp_offset = agp.offset; 204 dev_priv->agp_offset = agp->offset;
219 mutex_unlock(&dev->struct_mutex); 205 mutex_unlock(&dev->struct_mutex);
220 206
221 DRM_DEBUG("offset = %u, size = %u", agp.offset, agp.size); 207 DRM_DEBUG("offset = %u, size = %u", agp->offset, agp->size);
222 return 0; 208 return 0;
223} 209}
224 210
225static int sis_ioctl_agp_alloc(DRM_IOCTL_ARGS) 211static int sis_ioctl_agp_alloc(struct drm_device *dev, void *data,
212 struct drm_file *file_priv)
226{ 213{
227 DRM_DEVICE;
228 214
229 return sis_drm_alloc(dev, priv, data, AGP_TYPE); 215 return sis_drm_alloc(dev, file_priv, data, AGP_TYPE);
230} 216}
231 217
232static drm_local_map_t *sis_reg_init(struct drm_device *dev) 218static drm_local_map_t *sis_reg_init(struct drm_device *dev)
@@ -314,13 +300,13 @@ void sis_lastclose(struct drm_device *dev)
314 mutex_unlock(&dev->struct_mutex); 300 mutex_unlock(&dev->struct_mutex);
315} 301}
316 302
317void sis_reclaim_buffers_locked(struct drm_device * dev, struct file *filp) 303void sis_reclaim_buffers_locked(struct drm_device * dev,
304 struct drm_file *file_priv)
318{ 305{
319 drm_sis_private_t *dev_priv = dev->dev_private; 306 drm_sis_private_t *dev_priv = dev->dev_private;
320 struct drm_file *priv = filp->private_data;
321 307
322 mutex_lock(&dev->struct_mutex); 308 mutex_lock(&dev->struct_mutex);
323 if (drm_sman_owner_clean(&dev_priv->sman, (unsigned long)priv)) { 309 if (drm_sman_owner_clean(&dev_priv->sman, (unsigned long)file_priv)) {
324 mutex_unlock(&dev->struct_mutex); 310 mutex_unlock(&dev->struct_mutex);
325 return; 311 return;
326 } 312 }
@@ -329,20 +315,18 @@ void sis_reclaim_buffers_locked(struct drm_device * dev, struct file *filp)
329 dev->driver->dma_quiescent(dev); 315 dev->driver->dma_quiescent(dev);
330 } 316 }
331 317
332 drm_sman_owner_cleanup(&dev_priv->sman, (unsigned long)priv); 318 drm_sman_owner_cleanup(&dev_priv->sman, (unsigned long)file_priv);
333 mutex_unlock(&dev->struct_mutex); 319 mutex_unlock(&dev->struct_mutex);
334 return; 320 return;
335} 321}
336 322
337drm_ioctl_desc_t sis_ioctls[] = { 323struct drm_ioctl_desc sis_ioctls[] = {
338 [DRM_IOCTL_NR(DRM_SIS_FB_ALLOC)] = {sis_fb_alloc, DRM_AUTH}, 324 DRM_IOCTL_DEF(DRM_SIS_FB_ALLOC, sis_fb_alloc, DRM_AUTH),
339 [DRM_IOCTL_NR(DRM_SIS_FB_FREE)] = {sis_drm_free, DRM_AUTH}, 325 DRM_IOCTL_DEF(DRM_SIS_FB_FREE, sis_drm_free, DRM_AUTH),
340 [DRM_IOCTL_NR(DRM_SIS_AGP_INIT)] = 326 DRM_IOCTL_DEF(DRM_SIS_AGP_INIT, sis_ioctl_agp_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY),
341 {sis_ioctl_agp_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY}, 327 DRM_IOCTL_DEF(DRM_SIS_AGP_ALLOC, sis_ioctl_agp_alloc, DRM_AUTH),
342 [DRM_IOCTL_NR(DRM_SIS_AGP_ALLOC)] = {sis_ioctl_agp_alloc, DRM_AUTH}, 328 DRM_IOCTL_DEF(DRM_SIS_AGP_FREE, sis_drm_free, DRM_AUTH),
343 [DRM_IOCTL_NR(DRM_SIS_AGP_FREE)] = {sis_drm_free, DRM_AUTH}, 329 DRM_IOCTL_DEF(DRM_SIS_FB_INIT, sis_fb_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY),
344 [DRM_IOCTL_NR(DRM_SIS_FB_INIT)] =
345 {sis_fb_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY}
346}; 330};
347 331
348int sis_max_ioctl = DRM_ARRAY_SIZE(sis_ioctls); 332int sis_max_ioctl = DRM_ARRAY_SIZE(sis_ioctls);
diff --git a/drivers/char/drm/via_dma.c b/drivers/char/drm/via_dma.c
index 7ff2b623c2d4..75d6b748c2c0 100644
--- a/drivers/char/drm/via_dma.c
+++ b/drivers/char/drm/via_dma.c
@@ -175,24 +175,24 @@ static int via_initialize(struct drm_device * dev,
175{ 175{
176 if (!dev_priv || !dev_priv->mmio) { 176 if (!dev_priv || !dev_priv->mmio) {
177 DRM_ERROR("via_dma_init called before via_map_init\n"); 177 DRM_ERROR("via_dma_init called before via_map_init\n");
178 return DRM_ERR(EFAULT); 178 return -EFAULT;
179 } 179 }
180 180
181 if (dev_priv->ring.virtual_start != NULL) { 181 if (dev_priv->ring.virtual_start != NULL) {
182 DRM_ERROR("%s called again without calling cleanup\n", 182 DRM_ERROR("%s called again without calling cleanup\n",
183 __FUNCTION__); 183 __FUNCTION__);
184 return DRM_ERR(EFAULT); 184 return -EFAULT;
185 } 185 }
186 186
187 if (!dev->agp || !dev->agp->base) { 187 if (!dev->agp || !dev->agp->base) {
188 DRM_ERROR("%s called with no agp memory available\n", 188 DRM_ERROR("%s called with no agp memory available\n",
189 __FUNCTION__); 189 __FUNCTION__);
190 return DRM_ERR(EFAULT); 190 return -EFAULT;
191 } 191 }
192 192
193 if (dev_priv->chipset == VIA_DX9_0) { 193 if (dev_priv->chipset == VIA_DX9_0) {
194 DRM_ERROR("AGP DMA is not supported on this chip\n"); 194 DRM_ERROR("AGP DMA is not supported on this chip\n");
195 return DRM_ERR(EINVAL); 195 return -EINVAL;
196 } 196 }
197 197
198 dev_priv->ring.map.offset = dev->agp->base + init->offset; 198 dev_priv->ring.map.offset = dev->agp->base + init->offset;
@@ -207,7 +207,7 @@ static int via_initialize(struct drm_device * dev,
207 via_dma_cleanup(dev); 207 via_dma_cleanup(dev);
208 DRM_ERROR("can not ioremap virtual address for" 208 DRM_ERROR("can not ioremap virtual address for"
209 " ring buffer\n"); 209 " ring buffer\n");
210 return DRM_ERR(ENOMEM); 210 return -ENOMEM;
211 } 211 }
212 212
213 dev_priv->ring.virtual_start = dev_priv->ring.map.handle; 213 dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
@@ -227,35 +227,31 @@ static int via_initialize(struct drm_device * dev,
227 return 0; 227 return 0;
228} 228}
229 229
230static int via_dma_init(DRM_IOCTL_ARGS) 230static int via_dma_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
231{ 231{
232 DRM_DEVICE;
233 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; 232 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
234 drm_via_dma_init_t init; 233 drm_via_dma_init_t *init = data;
235 int retcode = 0; 234 int retcode = 0;
236 235
237 DRM_COPY_FROM_USER_IOCTL(init, (drm_via_dma_init_t __user *) data, 236 switch (init->func) {
238 sizeof(init));
239
240 switch (init.func) {
241 case VIA_INIT_DMA: 237 case VIA_INIT_DMA:
242 if (!DRM_SUSER(DRM_CURPROC)) 238 if (!DRM_SUSER(DRM_CURPROC))
243 retcode = DRM_ERR(EPERM); 239 retcode = -EPERM;
244 else 240 else
245 retcode = via_initialize(dev, dev_priv, &init); 241 retcode = via_initialize(dev, dev_priv, init);
246 break; 242 break;
247 case VIA_CLEANUP_DMA: 243 case VIA_CLEANUP_DMA:
248 if (!DRM_SUSER(DRM_CURPROC)) 244 if (!DRM_SUSER(DRM_CURPROC))
249 retcode = DRM_ERR(EPERM); 245 retcode = -EPERM;
250 else 246 else
251 retcode = via_dma_cleanup(dev); 247 retcode = via_dma_cleanup(dev);
252 break; 248 break;
253 case VIA_DMA_INITIALIZED: 249 case VIA_DMA_INITIALIZED:
254 retcode = (dev_priv->ring.virtual_start != NULL) ? 250 retcode = (dev_priv->ring.virtual_start != NULL) ?
255 0 : DRM_ERR(EFAULT); 251 0 : -EFAULT;
256 break; 252 break;
257 default: 253 default:
258 retcode = DRM_ERR(EINVAL); 254 retcode = -EINVAL;
259 break; 255 break;
260 } 256 }
261 257
@@ -273,15 +269,15 @@ static int via_dispatch_cmdbuffer(struct drm_device * dev, drm_via_cmdbuffer_t *
273 if (dev_priv->ring.virtual_start == NULL) { 269 if (dev_priv->ring.virtual_start == NULL) {
274 DRM_ERROR("%s called without initializing AGP ring buffer.\n", 270 DRM_ERROR("%s called without initializing AGP ring buffer.\n",
275 __FUNCTION__); 271 __FUNCTION__);
276 return DRM_ERR(EFAULT); 272 return -EFAULT;
277 } 273 }
278 274
279 if (cmd->size > VIA_PCI_BUF_SIZE) { 275 if (cmd->size > VIA_PCI_BUF_SIZE) {
280 return DRM_ERR(ENOMEM); 276 return -ENOMEM;
281 } 277 }
282 278
283 if (DRM_COPY_FROM_USER(dev_priv->pci_buf, cmd->buf, cmd->size)) 279 if (DRM_COPY_FROM_USER(dev_priv->pci_buf, cmd->buf, cmd->size))
284 return DRM_ERR(EFAULT); 280 return -EFAULT;
285 281
286 /* 282 /*
287 * Running this function on AGP memory is dead slow. Therefore 283 * Running this function on AGP memory is dead slow. Therefore
@@ -297,7 +293,7 @@ static int via_dispatch_cmdbuffer(struct drm_device * dev, drm_via_cmdbuffer_t *
297 293
298 vb = via_check_dma(dev_priv, (cmd->size < 0x100) ? 0x102 : cmd->size); 294 vb = via_check_dma(dev_priv, (cmd->size < 0x100) ? 0x102 : cmd->size);
299 if (vb == NULL) { 295 if (vb == NULL) {
300 return DRM_ERR(EAGAIN); 296 return -EAGAIN;
301 } 297 }
302 298
303 memcpy(vb, dev_priv->pci_buf, cmd->size); 299 memcpy(vb, dev_priv->pci_buf, cmd->size);
@@ -321,34 +317,30 @@ int via_driver_dma_quiescent(struct drm_device * dev)
321 drm_via_private_t *dev_priv = dev->dev_private; 317 drm_via_private_t *dev_priv = dev->dev_private;
322 318
323 if (!via_wait_idle(dev_priv)) { 319 if (!via_wait_idle(dev_priv)) {
324 return DRM_ERR(EBUSY); 320 return -EBUSY;
325 } 321 }
326 return 0; 322 return 0;
327} 323}
328 324
329static int via_flush_ioctl(DRM_IOCTL_ARGS) 325static int via_flush_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
330{ 326{
331 DRM_DEVICE;
332 327
333 LOCK_TEST_WITH_RETURN(dev, filp); 328 LOCK_TEST_WITH_RETURN(dev, file_priv);
334 329
335 return via_driver_dma_quiescent(dev); 330 return via_driver_dma_quiescent(dev);
336} 331}
337 332
338static int via_cmdbuffer(DRM_IOCTL_ARGS) 333static int via_cmdbuffer(struct drm_device *dev, void *data, struct drm_file *file_priv)
339{ 334{
340 DRM_DEVICE; 335 drm_via_cmdbuffer_t *cmdbuf = data;
341 drm_via_cmdbuffer_t cmdbuf;
342 int ret; 336 int ret;
343 337
344 LOCK_TEST_WITH_RETURN(dev, filp); 338 LOCK_TEST_WITH_RETURN(dev, file_priv);
345
346 DRM_COPY_FROM_USER_IOCTL(cmdbuf, (drm_via_cmdbuffer_t __user *) data,
347 sizeof(cmdbuf));
348 339
349 DRM_DEBUG("via cmdbuffer, buf %p size %lu\n", cmdbuf.buf, cmdbuf.size); 340 DRM_DEBUG("via cmdbuffer, buf %p size %lu\n", cmdbuf->buf,
341 cmdbuf->size);
350 342
351 ret = via_dispatch_cmdbuffer(dev, &cmdbuf); 343 ret = via_dispatch_cmdbuffer(dev, cmdbuf);
352 if (ret) { 344 if (ret) {
353 return ret; 345 return ret;
354 } 346 }
@@ -363,10 +355,10 @@ static int via_dispatch_pci_cmdbuffer(struct drm_device * dev,
363 int ret; 355 int ret;
364 356
365 if (cmd->size > VIA_PCI_BUF_SIZE) { 357 if (cmd->size > VIA_PCI_BUF_SIZE) {
366 return DRM_ERR(ENOMEM); 358 return -ENOMEM;
367 } 359 }
368 if (DRM_COPY_FROM_USER(dev_priv->pci_buf, cmd->buf, cmd->size)) 360 if (DRM_COPY_FROM_USER(dev_priv->pci_buf, cmd->buf, cmd->size))
369 return DRM_ERR(EFAULT); 361 return -EFAULT;
370 362
371 if ((ret = 363 if ((ret =
372 via_verify_command_stream((uint32_t *) dev_priv->pci_buf, 364 via_verify_command_stream((uint32_t *) dev_priv->pci_buf,
@@ -380,21 +372,17 @@ static int via_dispatch_pci_cmdbuffer(struct drm_device * dev,
380 return ret; 372 return ret;
381} 373}
382 374
383static int via_pci_cmdbuffer(DRM_IOCTL_ARGS) 375static int via_pci_cmdbuffer(struct drm_device *dev, void *data, struct drm_file *file_priv)
384{ 376{
385 DRM_DEVICE; 377 drm_via_cmdbuffer_t *cmdbuf = data;
386 drm_via_cmdbuffer_t cmdbuf;
387 int ret; 378 int ret;
388 379
389 LOCK_TEST_WITH_RETURN(dev, filp); 380 LOCK_TEST_WITH_RETURN(dev, file_priv);
390 381
391 DRM_COPY_FROM_USER_IOCTL(cmdbuf, (drm_via_cmdbuffer_t __user *) data, 382 DRM_DEBUG("via_pci_cmdbuffer, buf %p size %lu\n", cmdbuf->buf,
392 sizeof(cmdbuf)); 383 cmdbuf->size);
393 384
394 DRM_DEBUG("via_pci_cmdbuffer, buf %p size %lu\n", cmdbuf.buf, 385 ret = via_dispatch_pci_cmdbuffer(dev, cmdbuf);
395 cmdbuf.size);
396
397 ret = via_dispatch_pci_cmdbuffer(dev, &cmdbuf);
398 if (ret) { 386 if (ret) {
399 return ret; 387 return ret;
400 } 388 }
@@ -653,80 +641,74 @@ static void via_cmdbuf_reset(drm_via_private_t * dev_priv)
653 * User interface to the space and lag functions. 641 * User interface to the space and lag functions.
654 */ 642 */
655 643
656static int via_cmdbuf_size(DRM_IOCTL_ARGS) 644static int via_cmdbuf_size(struct drm_device *dev, void *data, struct drm_file *file_priv)
657{ 645{
658 DRM_DEVICE; 646 drm_via_cmdbuf_size_t *d_siz = data;
659 drm_via_cmdbuf_size_t d_siz;
660 int ret = 0; 647 int ret = 0;
661 uint32_t tmp_size, count; 648 uint32_t tmp_size, count;
662 drm_via_private_t *dev_priv; 649 drm_via_private_t *dev_priv;
663 650
664 DRM_DEBUG("via cmdbuf_size\n"); 651 DRM_DEBUG("via cmdbuf_size\n");
665 LOCK_TEST_WITH_RETURN(dev, filp); 652 LOCK_TEST_WITH_RETURN(dev, file_priv);
666 653
667 dev_priv = (drm_via_private_t *) dev->dev_private; 654 dev_priv = (drm_via_private_t *) dev->dev_private;
668 655
669 if (dev_priv->ring.virtual_start == NULL) { 656 if (dev_priv->ring.virtual_start == NULL) {
670 DRM_ERROR("%s called without initializing AGP ring buffer.\n", 657 DRM_ERROR("%s called without initializing AGP ring buffer.\n",
671 __FUNCTION__); 658 __FUNCTION__);
672 return DRM_ERR(EFAULT); 659 return -EFAULT;
673 } 660 }
674 661
675 DRM_COPY_FROM_USER_IOCTL(d_siz, (drm_via_cmdbuf_size_t __user *) data,
676 sizeof(d_siz));
677
678 count = 1000000; 662 count = 1000000;
679 tmp_size = d_siz.size; 663 tmp_size = d_siz->size;
680 switch (d_siz.func) { 664 switch (d_siz->func) {
681 case VIA_CMDBUF_SPACE: 665 case VIA_CMDBUF_SPACE:
682 while (((tmp_size = via_cmdbuf_space(dev_priv)) < d_siz.size) 666 while (((tmp_size = via_cmdbuf_space(dev_priv)) < d_siz->size)
683 && count--) { 667 && count--) {
684 if (!d_siz.wait) { 668 if (!d_siz->wait) {
685 break; 669 break;
686 } 670 }
687 } 671 }
688 if (!count) { 672 if (!count) {
689 DRM_ERROR("VIA_CMDBUF_SPACE timed out.\n"); 673 DRM_ERROR("VIA_CMDBUF_SPACE timed out.\n");
690 ret = DRM_ERR(EAGAIN); 674 ret = -EAGAIN;
691 } 675 }
692 break; 676 break;
693 case VIA_CMDBUF_LAG: 677 case VIA_CMDBUF_LAG:
694 while (((tmp_size = via_cmdbuf_lag(dev_priv)) > d_siz.size) 678 while (((tmp_size = via_cmdbuf_lag(dev_priv)) > d_siz->size)
695 && count--) { 679 && count--) {
696 if (!d_siz.wait) { 680 if (!d_siz->wait) {
697 break; 681 break;
698 } 682 }
699 } 683 }
700 if (!count) { 684 if (!count) {
701 DRM_ERROR("VIA_CMDBUF_LAG timed out.\n"); 685 DRM_ERROR("VIA_CMDBUF_LAG timed out.\n");
702 ret = DRM_ERR(EAGAIN); 686 ret = -EAGAIN;
703 } 687 }
704 break; 688 break;
705 default: 689 default:
706 ret = DRM_ERR(EFAULT); 690 ret = -EFAULT;
707 } 691 }
708 d_siz.size = tmp_size; 692 d_siz->size = tmp_size;
709 693
710 DRM_COPY_TO_USER_IOCTL((drm_via_cmdbuf_size_t __user *) data, d_siz,
711 sizeof(d_siz));
712 return ret; 694 return ret;
713} 695}
714 696
715drm_ioctl_desc_t via_ioctls[] = { 697struct drm_ioctl_desc via_ioctls[] = {
716 [DRM_IOCTL_NR(DRM_VIA_ALLOCMEM)] = {via_mem_alloc, DRM_AUTH}, 698 DRM_IOCTL_DEF(DRM_VIA_ALLOCMEM, via_mem_alloc, DRM_AUTH),
717 [DRM_IOCTL_NR(DRM_VIA_FREEMEM)] = {via_mem_free, DRM_AUTH}, 699 DRM_IOCTL_DEF(DRM_VIA_FREEMEM, via_mem_free, DRM_AUTH),
718 [DRM_IOCTL_NR(DRM_VIA_AGP_INIT)] = {via_agp_init, DRM_AUTH|DRM_MASTER}, 700 DRM_IOCTL_DEF(DRM_VIA_AGP_INIT, via_agp_init, DRM_AUTH|DRM_MASTER),
719 [DRM_IOCTL_NR(DRM_VIA_FB_INIT)] = {via_fb_init, DRM_AUTH|DRM_MASTER}, 701 DRM_IOCTL_DEF(DRM_VIA_FB_INIT, via_fb_init, DRM_AUTH|DRM_MASTER),
720 [DRM_IOCTL_NR(DRM_VIA_MAP_INIT)] = {via_map_init, DRM_AUTH|DRM_MASTER}, 702 DRM_IOCTL_DEF(DRM_VIA_MAP_INIT, via_map_init, DRM_AUTH|DRM_MASTER),
721 [DRM_IOCTL_NR(DRM_VIA_DEC_FUTEX)] = {via_decoder_futex, DRM_AUTH}, 703 DRM_IOCTL_DEF(DRM_VIA_DEC_FUTEX, via_decoder_futex, DRM_AUTH),
722 [DRM_IOCTL_NR(DRM_VIA_DMA_INIT)] = {via_dma_init, DRM_AUTH}, 704 DRM_IOCTL_DEF(DRM_VIA_DMA_INIT, via_dma_init, DRM_AUTH),
723 [DRM_IOCTL_NR(DRM_VIA_CMDBUFFER)] = {via_cmdbuffer, DRM_AUTH}, 705 DRM_IOCTL_DEF(DRM_VIA_CMDBUFFER, via_cmdbuffer, DRM_AUTH),
724 [DRM_IOCTL_NR(DRM_VIA_FLUSH)] = {via_flush_ioctl, DRM_AUTH}, 706 DRM_IOCTL_DEF(DRM_VIA_FLUSH, via_flush_ioctl, DRM_AUTH),
725 [DRM_IOCTL_NR(DRM_VIA_PCICMD)] = {via_pci_cmdbuffer, DRM_AUTH}, 707 DRM_IOCTL_DEF(DRM_VIA_PCICMD, via_pci_cmdbuffer, DRM_AUTH),
726 [DRM_IOCTL_NR(DRM_VIA_CMDBUF_SIZE)] = {via_cmdbuf_size, DRM_AUTH}, 708 DRM_IOCTL_DEF(DRM_VIA_CMDBUF_SIZE, via_cmdbuf_size, DRM_AUTH),
727 [DRM_IOCTL_NR(DRM_VIA_WAIT_IRQ)] = {via_wait_irq, DRM_AUTH}, 709 DRM_IOCTL_DEF(DRM_VIA_WAIT_IRQ, via_wait_irq, DRM_AUTH),
728 [DRM_IOCTL_NR(DRM_VIA_DMA_BLIT)] = {via_dma_blit, DRM_AUTH}, 710 DRM_IOCTL_DEF(DRM_VIA_DMA_BLIT, via_dma_blit, DRM_AUTH),
729 [DRM_IOCTL_NR(DRM_VIA_BLIT_SYNC)] = {via_dma_blit_sync, DRM_AUTH} 711 DRM_IOCTL_DEF(DRM_VIA_BLIT_SYNC, via_dma_blit_sync, DRM_AUTH)
730}; 712};
731 713
732int via_max_ioctl = DRM_ARRAY_SIZE(via_ioctls); 714int via_max_ioctl = DRM_ARRAY_SIZE(via_ioctls);
diff --git a/drivers/char/drm/via_dmablit.c b/drivers/char/drm/via_dmablit.c
index 3dd1ed3d1bf5..c6fd16f3cb43 100644
--- a/drivers/char/drm/via_dmablit.c
+++ b/drivers/char/drm/via_dmablit.c
@@ -237,7 +237,7 @@ via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
237 first_pfn + 1; 237 first_pfn + 1;
238 238
239 if (NULL == (vsg->pages = vmalloc(sizeof(struct page *) * vsg->num_pages))) 239 if (NULL == (vsg->pages = vmalloc(sizeof(struct page *) * vsg->num_pages)))
240 return DRM_ERR(ENOMEM); 240 return -ENOMEM;
241 memset(vsg->pages, 0, sizeof(struct page *) * vsg->num_pages); 241 memset(vsg->pages, 0, sizeof(struct page *) * vsg->num_pages);
242 down_read(&current->mm->mmap_sem); 242 down_read(&current->mm->mmap_sem);
243 ret = get_user_pages(current, current->mm, 243 ret = get_user_pages(current, current->mm,
@@ -251,7 +251,7 @@ via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
251 if (ret < 0) 251 if (ret < 0)
252 return ret; 252 return ret;
253 vsg->state = dr_via_pages_locked; 253 vsg->state = dr_via_pages_locked;
254 return DRM_ERR(EINVAL); 254 return -EINVAL;
255 } 255 }
256 vsg->state = dr_via_pages_locked; 256 vsg->state = dr_via_pages_locked;
257 DRM_DEBUG("DMA pages locked\n"); 257 DRM_DEBUG("DMA pages locked\n");
@@ -274,13 +274,13 @@ via_alloc_desc_pages(drm_via_sg_info_t *vsg)
274 vsg->descriptors_per_page; 274 vsg->descriptors_per_page;
275 275
276 if (NULL == (vsg->desc_pages = kcalloc(vsg->num_desc_pages, sizeof(void *), GFP_KERNEL))) 276 if (NULL == (vsg->desc_pages = kcalloc(vsg->num_desc_pages, sizeof(void *), GFP_KERNEL)))
277 return DRM_ERR(ENOMEM); 277 return -ENOMEM;
278 278
279 vsg->state = dr_via_desc_pages_alloc; 279 vsg->state = dr_via_desc_pages_alloc;
280 for (i=0; i<vsg->num_desc_pages; ++i) { 280 for (i=0; i<vsg->num_desc_pages; ++i) {
281 if (NULL == (vsg->desc_pages[i] = 281 if (NULL == (vsg->desc_pages[i] =
282 (drm_via_descriptor_t *) __get_free_page(GFP_KERNEL))) 282 (drm_via_descriptor_t *) __get_free_page(GFP_KERNEL)))
283 return DRM_ERR(ENOMEM); 283 return -ENOMEM;
284 } 284 }
285 DRM_DEBUG("Allocated %d pages for %d descriptors.\n", vsg->num_desc_pages, 285 DRM_DEBUG("Allocated %d pages for %d descriptors.\n", vsg->num_desc_pages,
286 vsg->num_desc); 286 vsg->num_desc);
@@ -593,7 +593,7 @@ via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmabli
593 593
594 if (xfer->num_lines <= 0 || xfer->line_length <= 0) { 594 if (xfer->num_lines <= 0 || xfer->line_length <= 0) {
595 DRM_ERROR("Zero size bitblt.\n"); 595 DRM_ERROR("Zero size bitblt.\n");
596 return DRM_ERR(EINVAL); 596 return -EINVAL;
597 } 597 }
598 598
599 /* 599 /*
@@ -606,7 +606,7 @@ via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmabli
606 if ((xfer->mem_stride - xfer->line_length) >= PAGE_SIZE) { 606 if ((xfer->mem_stride - xfer->line_length) >= PAGE_SIZE) {
607 DRM_ERROR("Too large system memory stride. Stride: %d, " 607 DRM_ERROR("Too large system memory stride. Stride: %d, "
608 "Length: %d\n", xfer->mem_stride, xfer->line_length); 608 "Length: %d\n", xfer->mem_stride, xfer->line_length);
609 return DRM_ERR(EINVAL); 609 return -EINVAL;
610 } 610 }
611 611
612 if ((xfer->mem_stride == xfer->line_length) && 612 if ((xfer->mem_stride == xfer->line_length) &&
@@ -624,7 +624,7 @@ via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmabli
624 624
625 if (xfer->num_lines > 2048 || (xfer->num_lines*xfer->mem_stride > (2048*2048*4))) { 625 if (xfer->num_lines > 2048 || (xfer->num_lines*xfer->mem_stride > (2048*2048*4))) {
626 DRM_ERROR("Too large PCI DMA bitblt.\n"); 626 DRM_ERROR("Too large PCI DMA bitblt.\n");
627 return DRM_ERR(EINVAL); 627 return -EINVAL;
628 } 628 }
629 629
630 /* 630 /*
@@ -635,7 +635,7 @@ via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmabli
635 if (xfer->mem_stride < xfer->line_length || 635 if (xfer->mem_stride < xfer->line_length ||
636 abs(xfer->fb_stride) < xfer->line_length) { 636 abs(xfer->fb_stride) < xfer->line_length) {
637 DRM_ERROR("Invalid frame-buffer / memory stride.\n"); 637 DRM_ERROR("Invalid frame-buffer / memory stride.\n");
638 return DRM_ERR(EINVAL); 638 return -EINVAL;
639 } 639 }
640 640
641 /* 641 /*
@@ -648,7 +648,7 @@ via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmabli
648 if ((((unsigned long)xfer->mem_addr & 3) != ((unsigned long)xfer->fb_addr & 3)) || 648 if ((((unsigned long)xfer->mem_addr & 3) != ((unsigned long)xfer->fb_addr & 3)) ||
649 ((xfer->num_lines > 1) && ((xfer->mem_stride & 3) != (xfer->fb_stride & 3)))) { 649 ((xfer->num_lines > 1) && ((xfer->mem_stride & 3) != (xfer->fb_stride & 3)))) {
650 DRM_ERROR("Invalid DRM bitblt alignment.\n"); 650 DRM_ERROR("Invalid DRM bitblt alignment.\n");
651 return DRM_ERR(EINVAL); 651 return -EINVAL;
652 } 652 }
653#else 653#else
654 if ((((unsigned long)xfer->mem_addr & 15) || 654 if ((((unsigned long)xfer->mem_addr & 15) ||
@@ -656,7 +656,7 @@ via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmabli
656 ((xfer->num_lines > 1) && 656 ((xfer->num_lines > 1) &&
657 ((xfer->mem_stride & 15) || (xfer->fb_stride & 3)))) { 657 ((xfer->mem_stride & 15) || (xfer->fb_stride & 3)))) {
658 DRM_ERROR("Invalid DRM bitblt alignment.\n"); 658 DRM_ERROR("Invalid DRM bitblt alignment.\n");
659 return DRM_ERR(EINVAL); 659 return -EINVAL;
660 } 660 }
661#endif 661#endif
662 662
@@ -696,7 +696,7 @@ via_dmablit_grab_slot(drm_via_blitq_t *blitq, int engine)
696 696
697 DRM_WAIT_ON(ret, blitq->busy_queue, DRM_HZ, blitq->num_free > 0); 697 DRM_WAIT_ON(ret, blitq->busy_queue, DRM_HZ, blitq->num_free > 0);
698 if (ret) { 698 if (ret) {
699 return (DRM_ERR(EINTR) == ret) ? DRM_ERR(EAGAIN) : ret; 699 return (-EINTR == ret) ? -EAGAIN : ret;
700 } 700 }
701 701
702 spin_lock_irqsave(&blitq->blit_lock, irqsave); 702 spin_lock_irqsave(&blitq->blit_lock, irqsave);
@@ -740,7 +740,7 @@ via_dmablit(struct drm_device *dev, drm_via_dmablit_t *xfer)
740 740
741 if (dev_priv == NULL) { 741 if (dev_priv == NULL) {
742 DRM_ERROR("Called without initialization.\n"); 742 DRM_ERROR("Called without initialization.\n");
743 return DRM_ERR(EINVAL); 743 return -EINVAL;
744 } 744 }
745 745
746 engine = (xfer->to_fb) ? 0 : 1; 746 engine = (xfer->to_fb) ? 0 : 1;
@@ -750,7 +750,7 @@ via_dmablit(struct drm_device *dev, drm_via_dmablit_t *xfer)
750 } 750 }
751 if (NULL == (vsg = kmalloc(sizeof(*vsg), GFP_KERNEL))) { 751 if (NULL == (vsg = kmalloc(sizeof(*vsg), GFP_KERNEL))) {
752 via_dmablit_release_slot(blitq); 752 via_dmablit_release_slot(blitq);
753 return DRM_ERR(ENOMEM); 753 return -ENOMEM;
754 } 754 }
755 if (0 != (ret = via_build_sg_info(dev, vsg, xfer))) { 755 if (0 != (ret = via_build_sg_info(dev, vsg, xfer))) {
756 via_dmablit_release_slot(blitq); 756 via_dmablit_release_slot(blitq);
@@ -781,21 +781,18 @@ via_dmablit(struct drm_device *dev, drm_via_dmablit_t *xfer)
781 */ 781 */
782 782
783int 783int
784via_dma_blit_sync( DRM_IOCTL_ARGS ) 784via_dma_blit_sync( struct drm_device *dev, void *data, struct drm_file *file_priv )
785{ 785{
786 drm_via_blitsync_t sync; 786 drm_via_blitsync_t *sync = data;
787 int err; 787 int err;
788 DRM_DEVICE;
789 788
790 DRM_COPY_FROM_USER_IOCTL(sync, (drm_via_blitsync_t *)data, sizeof(sync)); 789 if (sync->engine >= VIA_NUM_BLIT_ENGINES)
791 790 return -EINVAL;
792 if (sync.engine >= VIA_NUM_BLIT_ENGINES)
793 return DRM_ERR(EINVAL);
794 791
795 err = via_dmablit_sync(dev, sync.sync_handle, sync.engine); 792 err = via_dmablit_sync(dev, sync->sync_handle, sync->engine);
796 793
797 if (DRM_ERR(EINTR) == err) 794 if (-EINTR == err)
798 err = DRM_ERR(EAGAIN); 795 err = -EAGAIN;
799 796
800 return err; 797 return err;
801} 798}
@@ -808,17 +805,12 @@ via_dma_blit_sync( DRM_IOCTL_ARGS )
808 */ 805 */
809 806
810int 807int
811via_dma_blit( DRM_IOCTL_ARGS ) 808via_dma_blit( struct drm_device *dev, void *data, struct drm_file *file_priv )
812{ 809{
813 drm_via_dmablit_t xfer; 810 drm_via_dmablit_t *xfer = data;
814 int err; 811 int err;
815 DRM_DEVICE;
816
817 DRM_COPY_FROM_USER_IOCTL(xfer, (drm_via_dmablit_t __user *)data, sizeof(xfer));
818
819 err = via_dmablit(dev, &xfer);
820 812
821 DRM_COPY_TO_USER_IOCTL((void __user *)data, xfer, sizeof(xfer)); 813 err = via_dmablit(dev, xfer);
822 814
823 return err; 815 return err;
824} 816}
diff --git a/drivers/char/drm/via_drv.h b/drivers/char/drm/via_drv.h
index 576711564a11..2daae81874cd 100644
--- a/drivers/char/drm/via_drv.h
+++ b/drivers/char/drm/via_drv.h
@@ -110,18 +110,18 @@ enum via_family {
110#define VIA_READ8(reg) DRM_READ8(VIA_BASE, reg) 110#define VIA_READ8(reg) DRM_READ8(VIA_BASE, reg)
111#define VIA_WRITE8(reg,val) DRM_WRITE8(VIA_BASE, reg, val) 111#define VIA_WRITE8(reg,val) DRM_WRITE8(VIA_BASE, reg, val)
112 112
113extern drm_ioctl_desc_t via_ioctls[]; 113extern struct drm_ioctl_desc via_ioctls[];
114extern int via_max_ioctl; 114extern int via_max_ioctl;
115 115
116extern int via_fb_init(DRM_IOCTL_ARGS); 116extern int via_fb_init(struct drm_device *dev, void *data, struct drm_file *file_priv);
117extern int via_mem_alloc(DRM_IOCTL_ARGS); 117extern int via_mem_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv);
118extern int via_mem_free(DRM_IOCTL_ARGS); 118extern int via_mem_free(struct drm_device *dev, void *data, struct drm_file *file_priv);
119extern int via_agp_init(DRM_IOCTL_ARGS); 119extern int via_agp_init(struct drm_device *dev, void *data, struct drm_file *file_priv);
120extern int via_map_init(DRM_IOCTL_ARGS); 120extern int via_map_init(struct drm_device *dev, void *data, struct drm_file *file_priv);
121extern int via_decoder_futex(DRM_IOCTL_ARGS); 121extern int via_decoder_futex(struct drm_device *dev, void *data, struct drm_file *file_priv);
122extern int via_wait_irq(DRM_IOCTL_ARGS); 122extern int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv);
123extern int via_dma_blit_sync( DRM_IOCTL_ARGS ); 123extern int via_dma_blit_sync( struct drm_device *dev, void *data, struct drm_file *file_priv );
124extern int via_dma_blit( DRM_IOCTL_ARGS ); 124extern int via_dma_blit( struct drm_device *dev, void *data, struct drm_file *file_priv );
125 125
126extern int via_driver_load(struct drm_device *dev, unsigned long chipset); 126extern int via_driver_load(struct drm_device *dev, unsigned long chipset);
127extern int via_driver_unload(struct drm_device *dev); 127extern int via_driver_unload(struct drm_device *dev);
@@ -144,7 +144,7 @@ extern void via_init_futex(drm_via_private_t * dev_priv);
144extern void via_cleanup_futex(drm_via_private_t * dev_priv); 144extern void via_cleanup_futex(drm_via_private_t * dev_priv);
145extern void via_release_futex(drm_via_private_t * dev_priv, int context); 145extern void via_release_futex(drm_via_private_t * dev_priv, int context);
146 146
147extern void via_reclaim_buffers_locked(struct drm_device *dev, struct file *filp); 147extern void via_reclaim_buffers_locked(struct drm_device *dev, struct drm_file *file_priv);
148extern void via_lastclose(struct drm_device *dev); 148extern void via_lastclose(struct drm_device *dev);
149 149
150extern void via_dmablit_handler(struct drm_device *dev, int engine, int from_irq); 150extern void via_dmablit_handler(struct drm_device *dev, int engine, int from_irq);
diff --git a/drivers/char/drm/via_irq.c b/drivers/char/drm/via_irq.c
index 8dc99b5fbab6..9c1d52bc92d7 100644
--- a/drivers/char/drm/via_irq.c
+++ b/drivers/char/drm/via_irq.c
@@ -205,13 +205,13 @@ via_driver_irq_wait(struct drm_device * dev, unsigned int irq, int force_sequenc
205 205
206 if (!dev_priv) { 206 if (!dev_priv) {
207 DRM_ERROR("%s called with no initialization\n", __FUNCTION__); 207 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
208 return DRM_ERR(EINVAL); 208 return -EINVAL;
209 } 209 }
210 210
211 if (irq >= drm_via_irq_num) { 211 if (irq >= drm_via_irq_num) {
212 DRM_ERROR("%s Trying to wait on unknown irq %d\n", __FUNCTION__, 212 DRM_ERROR("%s Trying to wait on unknown irq %d\n", __FUNCTION__,
213 irq); 213 irq);
214 return DRM_ERR(EINVAL); 214 return -EINVAL;
215 } 215 }
216 216
217 real_irq = dev_priv->irq_map[irq]; 217 real_irq = dev_priv->irq_map[irq];
@@ -219,7 +219,7 @@ via_driver_irq_wait(struct drm_device * dev, unsigned int irq, int force_sequenc
219 if (real_irq < 0) { 219 if (real_irq < 0) {
220 DRM_ERROR("%s Video IRQ %d not available on this hardware.\n", 220 DRM_ERROR("%s Video IRQ %d not available on this hardware.\n",
221 __FUNCTION__, irq); 221 __FUNCTION__, irq);
222 return DRM_ERR(EINVAL); 222 return -EINVAL;
223 } 223 }
224 224
225 masks = dev_priv->irq_masks; 225 masks = dev_priv->irq_masks;
@@ -331,11 +331,9 @@ void via_driver_irq_uninstall(struct drm_device * dev)
331 } 331 }
332} 332}
333 333
334int via_wait_irq(DRM_IOCTL_ARGS) 334int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
335{ 335{
336 DRM_DEVICE; 336 drm_via_irqwait_t *irqwait = data;
337 drm_via_irqwait_t __user *argp = (void __user *)data;
338 drm_via_irqwait_t irqwait;
339 struct timeval now; 337 struct timeval now;
340 int ret = 0; 338 int ret = 0;
341 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; 339 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
@@ -343,42 +341,39 @@ int via_wait_irq(DRM_IOCTL_ARGS)
343 int force_sequence; 341 int force_sequence;
344 342
345 if (!dev->irq) 343 if (!dev->irq)
346 return DRM_ERR(EINVAL); 344 return -EINVAL;
347 345
348 DRM_COPY_FROM_USER_IOCTL(irqwait, argp, sizeof(irqwait)); 346 if (irqwait->request.irq >= dev_priv->num_irqs) {
349 if (irqwait.request.irq >= dev_priv->num_irqs) {
350 DRM_ERROR("%s Trying to wait on unknown irq %d\n", __FUNCTION__, 347 DRM_ERROR("%s Trying to wait on unknown irq %d\n", __FUNCTION__,
351 irqwait.request.irq); 348 irqwait->request.irq);
352 return DRM_ERR(EINVAL); 349 return -EINVAL;
353 } 350 }
354 351
355 cur_irq += irqwait.request.irq; 352 cur_irq += irqwait->request.irq;
356 353
357 switch (irqwait.request.type & ~VIA_IRQ_FLAGS_MASK) { 354 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
358 case VIA_IRQ_RELATIVE: 355 case VIA_IRQ_RELATIVE:
359 irqwait.request.sequence += atomic_read(&cur_irq->irq_received); 356 irqwait->request.sequence += atomic_read(&cur_irq->irq_received);
360 irqwait.request.type &= ~_DRM_VBLANK_RELATIVE; 357 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
361 case VIA_IRQ_ABSOLUTE: 358 case VIA_IRQ_ABSOLUTE:
362 break; 359 break;
363 default: 360 default:
364 return DRM_ERR(EINVAL); 361 return -EINVAL;
365 } 362 }
366 363
367 if (irqwait.request.type & VIA_IRQ_SIGNAL) { 364 if (irqwait->request.type & VIA_IRQ_SIGNAL) {
368 DRM_ERROR("%s Signals on Via IRQs not implemented yet.\n", 365 DRM_ERROR("%s Signals on Via IRQs not implemented yet.\n",
369 __FUNCTION__); 366 __FUNCTION__);
370 return DRM_ERR(EINVAL); 367 return -EINVAL;
371 } 368 }
372 369
373 force_sequence = (irqwait.request.type & VIA_IRQ_FORCE_SEQUENCE); 370 force_sequence = (irqwait->request.type & VIA_IRQ_FORCE_SEQUENCE);
374 371
375 ret = via_driver_irq_wait(dev, irqwait.request.irq, force_sequence, 372 ret = via_driver_irq_wait(dev, irqwait->request.irq, force_sequence,
376 &irqwait.request.sequence); 373 &irqwait->request.sequence);
377 do_gettimeofday(&now); 374 do_gettimeofday(&now);
378 irqwait.reply.tval_sec = now.tv_sec; 375 irqwait->reply.tval_sec = now.tv_sec;
379 irqwait.reply.tval_usec = now.tv_usec; 376 irqwait->reply.tval_usec = now.tv_usec;
380
381 DRM_COPY_TO_USER_IOCTL(argp, irqwait, sizeof(irqwait));
382 377
383 return ret; 378 return ret;
384} 379}
diff --git a/drivers/char/drm/via_map.c b/drivers/char/drm/via_map.c
index 7fb9d2a2cce2..10091507a0dc 100644
--- a/drivers/char/drm/via_map.c
+++ b/drivers/char/drm/via_map.c
@@ -75,19 +75,15 @@ int via_do_cleanup_map(struct drm_device * dev)
75 return 0; 75 return 0;
76} 76}
77 77
78int via_map_init(DRM_IOCTL_ARGS) 78int via_map_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
79{ 79{
80 DRM_DEVICE; 80 drm_via_init_t *init = data;
81 drm_via_init_t init;
82 81
83 DRM_DEBUG("%s\n", __FUNCTION__); 82 DRM_DEBUG("%s\n", __FUNCTION__);
84 83
85 DRM_COPY_FROM_USER_IOCTL(init, (drm_via_init_t __user *) data, 84 switch (init->func) {
86 sizeof(init));
87
88 switch (init.func) {
89 case VIA_INIT_MAP: 85 case VIA_INIT_MAP:
90 return via_do_init_map(dev, &init); 86 return via_do_init_map(dev, init);
91 case VIA_CLEANUP_MAP: 87 case VIA_CLEANUP_MAP:
92 return via_do_cleanup_map(dev); 88 return via_do_cleanup_map(dev);
93 } 89 }
@@ -102,7 +98,7 @@ int via_driver_load(struct drm_device *dev, unsigned long chipset)
102 98
103 dev_priv = drm_calloc(1, sizeof(drm_via_private_t), DRM_MEM_DRIVER); 99 dev_priv = drm_calloc(1, sizeof(drm_via_private_t), DRM_MEM_DRIVER);
104 if (dev_priv == NULL) 100 if (dev_priv == NULL)
105 return DRM_ERR(ENOMEM); 101 return -ENOMEM;
106 102
107 dev->dev_private = (void *)dev_priv; 103 dev->dev_private = (void *)dev_priv;
108 104
diff --git a/drivers/char/drm/via_mm.c b/drivers/char/drm/via_mm.c
index 85d56acd9d82..9afc1684348d 100644
--- a/drivers/char/drm/via_mm.c
+++ b/drivers/char/drm/via_mm.c
@@ -33,18 +33,15 @@
33#define VIA_MM_ALIGN_SHIFT 4 33#define VIA_MM_ALIGN_SHIFT 4
34#define VIA_MM_ALIGN_MASK ( (1 << VIA_MM_ALIGN_SHIFT) - 1) 34#define VIA_MM_ALIGN_MASK ( (1 << VIA_MM_ALIGN_SHIFT) - 1)
35 35
36int via_agp_init(DRM_IOCTL_ARGS) 36int via_agp_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
37{ 37{
38 DRM_DEVICE; 38 drm_via_agp_t *agp = data;
39 drm_via_agp_t agp;
40 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; 39 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
41 int ret; 40 int ret;
42 41
43 DRM_COPY_FROM_USER_IOCTL(agp, (drm_via_agp_t __user *) data,
44 sizeof(agp));
45 mutex_lock(&dev->struct_mutex); 42 mutex_lock(&dev->struct_mutex);
46 ret = drm_sman_set_range(&dev_priv->sman, VIA_MEM_AGP, 0, 43 ret = drm_sman_set_range(&dev_priv->sman, VIA_MEM_AGP, 0,
47 agp.size >> VIA_MM_ALIGN_SHIFT); 44 agp->size >> VIA_MM_ALIGN_SHIFT);
48 45
49 if (ret) { 46 if (ret) {
50 DRM_ERROR("AGP memory manager initialisation error\n"); 47 DRM_ERROR("AGP memory manager initialisation error\n");
@@ -53,25 +50,22 @@ int via_agp_init(DRM_IOCTL_ARGS)
53 } 50 }
54 51
55 dev_priv->agp_initialized = 1; 52 dev_priv->agp_initialized = 1;
56 dev_priv->agp_offset = agp.offset; 53 dev_priv->agp_offset = agp->offset;
57 mutex_unlock(&dev->struct_mutex); 54 mutex_unlock(&dev->struct_mutex);
58 55
59 DRM_DEBUG("offset = %u, size = %u", agp.offset, agp.size); 56 DRM_DEBUG("offset = %u, size = %u", agp->offset, agp->size);
60 return 0; 57 return 0;
61} 58}
62 59
63int via_fb_init(DRM_IOCTL_ARGS) 60int via_fb_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
64{ 61{
65 DRM_DEVICE; 62 drm_via_fb_t *fb = data;
66 drm_via_fb_t fb;
67 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; 63 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
68 int ret; 64 int ret;
69 65
70 DRM_COPY_FROM_USER_IOCTL(fb, (drm_via_fb_t __user *) data, sizeof(fb));
71
72 mutex_lock(&dev->struct_mutex); 66 mutex_lock(&dev->struct_mutex);
73 ret = drm_sman_set_range(&dev_priv->sman, VIA_MEM_VIDEO, 0, 67 ret = drm_sman_set_range(&dev_priv->sman, VIA_MEM_VIDEO, 0,
74 fb.size >> VIA_MM_ALIGN_SHIFT); 68 fb->size >> VIA_MM_ALIGN_SHIFT);
75 69
76 if (ret) { 70 if (ret) {
77 DRM_ERROR("VRAM memory manager initialisation error\n"); 71 DRM_ERROR("VRAM memory manager initialisation error\n");
@@ -80,10 +74,10 @@ int via_fb_init(DRM_IOCTL_ARGS)
80 } 74 }
81 75
82 dev_priv->vram_initialized = 1; 76 dev_priv->vram_initialized = 1;
83 dev_priv->vram_offset = fb.offset; 77 dev_priv->vram_offset = fb->offset;
84 78
85 mutex_unlock(&dev->struct_mutex); 79 mutex_unlock(&dev->struct_mutex);
86 DRM_DEBUG("offset = %u, size = %u", fb.offset, fb.size); 80 DRM_DEBUG("offset = %u, size = %u", fb->offset, fb->size);
87 81
88 return 0; 82 return 0;
89 83
@@ -121,80 +115,71 @@ void via_lastclose(struct drm_device *dev)
121 mutex_unlock(&dev->struct_mutex); 115 mutex_unlock(&dev->struct_mutex);
122} 116}
123 117
124int via_mem_alloc(DRM_IOCTL_ARGS) 118int via_mem_alloc(struct drm_device *dev, void *data,
119 struct drm_file *file_priv)
125{ 120{
126 DRM_DEVICE; 121 drm_via_mem_t *mem = data;
127
128 drm_via_mem_t mem;
129 int retval = 0; 122 int retval = 0;
130 struct drm_memblock_item *item; 123 struct drm_memblock_item *item;
131 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; 124 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
132 unsigned long tmpSize; 125 unsigned long tmpSize;
133 126
134 DRM_COPY_FROM_USER_IOCTL(mem, (drm_via_mem_t __user *) data, 127 if (mem->type > VIA_MEM_AGP) {
135 sizeof(mem));
136
137 if (mem.type > VIA_MEM_AGP) {
138 DRM_ERROR("Unknown memory type allocation\n"); 128 DRM_ERROR("Unknown memory type allocation\n");
139 return DRM_ERR(EINVAL); 129 return -EINVAL;
140 } 130 }
141 mutex_lock(&dev->struct_mutex); 131 mutex_lock(&dev->struct_mutex);
142 if (0 == ((mem.type == VIA_MEM_VIDEO) ? dev_priv->vram_initialized : 132 if (0 == ((mem->type == VIA_MEM_VIDEO) ? dev_priv->vram_initialized :
143 dev_priv->agp_initialized)) { 133 dev_priv->agp_initialized)) {
144 DRM_ERROR 134 DRM_ERROR
145 ("Attempt to allocate from uninitialized memory manager.\n"); 135 ("Attempt to allocate from uninitialized memory manager.\n");
146 mutex_unlock(&dev->struct_mutex); 136 mutex_unlock(&dev->struct_mutex);
147 return DRM_ERR(EINVAL); 137 return -EINVAL;
148 } 138 }
149 139
150 tmpSize = (mem.size + VIA_MM_ALIGN_MASK) >> VIA_MM_ALIGN_SHIFT; 140 tmpSize = (mem->size + VIA_MM_ALIGN_MASK) >> VIA_MM_ALIGN_SHIFT;
151 item = drm_sman_alloc(&dev_priv->sman, mem.type, tmpSize, 0, 141 item = drm_sman_alloc(&dev_priv->sman, mem->type, tmpSize, 0,
152 (unsigned long)priv); 142 (unsigned long)file_priv);
153 mutex_unlock(&dev->struct_mutex); 143 mutex_unlock(&dev->struct_mutex);
154 if (item) { 144 if (item) {
155 mem.offset = ((mem.type == VIA_MEM_VIDEO) ? 145 mem->offset = ((mem->type == VIA_MEM_VIDEO) ?
156 dev_priv->vram_offset : dev_priv->agp_offset) + 146 dev_priv->vram_offset : dev_priv->agp_offset) +
157 (item->mm-> 147 (item->mm->
158 offset(item->mm, item->mm_info) << VIA_MM_ALIGN_SHIFT); 148 offset(item->mm, item->mm_info) << VIA_MM_ALIGN_SHIFT);
159 mem.index = item->user_hash.key; 149 mem->index = item->user_hash.key;
160 } else { 150 } else {
161 mem.offset = 0; 151 mem->offset = 0;
162 mem.size = 0; 152 mem->size = 0;
163 mem.index = 0; 153 mem->index = 0;
164 DRM_DEBUG("Video memory allocation failed\n"); 154 DRM_DEBUG("Video memory allocation failed\n");
165 retval = DRM_ERR(ENOMEM); 155 retval = -ENOMEM;
166 } 156 }
167 DRM_COPY_TO_USER_IOCTL((drm_via_mem_t __user *) data, mem, sizeof(mem));
168 157
169 return retval; 158 return retval;
170} 159}
171 160
172int via_mem_free(DRM_IOCTL_ARGS) 161int via_mem_free(struct drm_device *dev, void *data, struct drm_file *file_priv)
173{ 162{
174 DRM_DEVICE;
175 drm_via_private_t *dev_priv = dev->dev_private; 163 drm_via_private_t *dev_priv = dev->dev_private;
176 drm_via_mem_t mem; 164 drm_via_mem_t *mem = data;
177 int ret; 165 int ret;
178 166
179 DRM_COPY_FROM_USER_IOCTL(mem, (drm_via_mem_t __user *) data,
180 sizeof(mem));
181
182 mutex_lock(&dev->struct_mutex); 167 mutex_lock(&dev->struct_mutex);
183 ret = drm_sman_free_key(&dev_priv->sman, mem.index); 168 ret = drm_sman_free_key(&dev_priv->sman, mem->index);
184 mutex_unlock(&dev->struct_mutex); 169 mutex_unlock(&dev->struct_mutex);
185 DRM_DEBUG("free = 0x%lx\n", mem.index); 170 DRM_DEBUG("free = 0x%lx\n", mem->index);
186 171
187 return ret; 172 return ret;
188} 173}
189 174
190 175
191void via_reclaim_buffers_locked(struct drm_device * dev, struct file *filp) 176void via_reclaim_buffers_locked(struct drm_device * dev,
177 struct drm_file *file_priv)
192{ 178{
193 drm_via_private_t *dev_priv = dev->dev_private; 179 drm_via_private_t *dev_priv = dev->dev_private;
194 struct drm_file *priv = filp->private_data;
195 180
196 mutex_lock(&dev->struct_mutex); 181 mutex_lock(&dev->struct_mutex);
197 if (drm_sman_owner_clean(&dev_priv->sman, (unsigned long)priv)) { 182 if (drm_sman_owner_clean(&dev_priv->sman, (unsigned long)file_priv)) {
198 mutex_unlock(&dev->struct_mutex); 183 mutex_unlock(&dev->struct_mutex);
199 return; 184 return;
200 } 185 }
@@ -203,7 +188,7 @@ void via_reclaim_buffers_locked(struct drm_device * dev, struct file *filp)
203 dev->driver->dma_quiescent(dev); 188 dev->driver->dma_quiescent(dev);
204 } 189 }
205 190
206 drm_sman_owner_cleanup(&dev_priv->sman, (unsigned long)priv); 191 drm_sman_owner_cleanup(&dev_priv->sman, (unsigned long)file_priv);
207 mutex_unlock(&dev->struct_mutex); 192 mutex_unlock(&dev->struct_mutex);
208 return; 193 return;
209} 194}
diff --git a/drivers/char/drm/via_verifier.c b/drivers/char/drm/via_verifier.c
index 832d48356e91..46a579198747 100644
--- a/drivers/char/drm/via_verifier.c
+++ b/drivers/char/drm/via_verifier.c
@@ -1026,12 +1026,12 @@ via_verify_command_stream(const uint32_t * buf, unsigned int size,
1026 case state_error: 1026 case state_error:
1027 default: 1027 default:
1028 *hc_state = saved_state; 1028 *hc_state = saved_state;
1029 return DRM_ERR(EINVAL); 1029 return -EINVAL;
1030 } 1030 }
1031 } 1031 }
1032 if (state == state_error) { 1032 if (state == state_error) {
1033 *hc_state = saved_state; 1033 *hc_state = saved_state;
1034 return DRM_ERR(EINVAL); 1034 return -EINVAL;
1035 } 1035 }
1036 return 0; 1036 return 0;
1037} 1037}
@@ -1082,11 +1082,11 @@ via_parse_command_stream(struct drm_device * dev, const uint32_t * buf,
1082 break; 1082 break;
1083 case state_error: 1083 case state_error:
1084 default: 1084 default:
1085 return DRM_ERR(EINVAL); 1085 return -EINVAL;
1086 } 1086 }
1087 } 1087 }
1088 if (state == state_error) { 1088 if (state == state_error) {
1089 return DRM_ERR(EINVAL); 1089 return -EINVAL;
1090 } 1090 }
1091 return 0; 1091 return 0;
1092} 1092}
diff --git a/drivers/char/drm/via_video.c b/drivers/char/drm/via_video.c
index 300ac61b09ed..c15e75b54cb1 100644
--- a/drivers/char/drm/via_video.c
+++ b/drivers/char/drm/via_video.c
@@ -65,10 +65,9 @@ void via_release_futex(drm_via_private_t * dev_priv, int context)
65 } 65 }
66} 66}
67 67
68int via_decoder_futex(DRM_IOCTL_ARGS) 68int via_decoder_futex(struct drm_device *dev, void *data, struct drm_file *file_priv)
69{ 69{
70 DRM_DEVICE; 70 drm_via_futex_t *fx = data;
71 drm_via_futex_t fx;
72 volatile int *lock; 71 volatile int *lock;
73 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; 72 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
74 drm_via_sarea_t *sAPriv = dev_priv->sarea_priv; 73 drm_via_sarea_t *sAPriv = dev_priv->sarea_priv;
@@ -76,21 +75,18 @@ int via_decoder_futex(DRM_IOCTL_ARGS)
76 75
77 DRM_DEBUG("%s\n", __FUNCTION__); 76 DRM_DEBUG("%s\n", __FUNCTION__);
78 77
79 DRM_COPY_FROM_USER_IOCTL(fx, (drm_via_futex_t __user *) data, 78 if (fx->lock > VIA_NR_XVMC_LOCKS)
80 sizeof(fx));
81
82 if (fx.lock > VIA_NR_XVMC_LOCKS)
83 return -EFAULT; 79 return -EFAULT;
84 80
85 lock = (volatile int *)XVMCLOCKPTR(sAPriv, fx.lock); 81 lock = (volatile int *)XVMCLOCKPTR(sAPriv, fx->lock);
86 82
87 switch (fx.func) { 83 switch (fx->func) {
88 case VIA_FUTEX_WAIT: 84 case VIA_FUTEX_WAIT:
89 DRM_WAIT_ON(ret, dev_priv->decoder_queue[fx.lock], 85 DRM_WAIT_ON(ret, dev_priv->decoder_queue[fx->lock],
90 (fx.ms / 10) * (DRM_HZ / 100), *lock != fx.val); 86 (fx->ms / 10) * (DRM_HZ / 100), *lock != fx->val);
91 return ret; 87 return ret;
92 case VIA_FUTEX_WAKE: 88 case VIA_FUTEX_WAKE:
93 DRM_WAKEUP(&(dev_priv->decoder_queue[fx.lock])); 89 DRM_WAKEUP(&(dev_priv->decoder_queue[fx->lock]));
94 return 0; 90 return 0;
95 } 91 }
96 return 0; 92 return 0;
diff --git a/drivers/char/dsp56k.c b/drivers/char/dsp56k.c
index acbfe1c49b4d..a69c65283260 100644
--- a/drivers/char/dsp56k.c
+++ b/drivers/char/dsp56k.c
@@ -136,7 +136,7 @@ static int sizeof_bootstrap = 375;
136 136
137 137
138static struct dsp56k_device { 138static struct dsp56k_device {
139 long in_use; 139 unsigned long in_use;
140 long maxio, timeout; 140 long maxio, timeout;
141 int tx_wsize, rx_wsize; 141 int tx_wsize, rx_wsize;
142} dsp56k; 142} dsp56k;
diff --git a/drivers/char/ec3104_keyb.c b/drivers/char/ec3104_keyb.c
deleted file mode 100644
index 020011495d91..000000000000
--- a/drivers/char/ec3104_keyb.c
+++ /dev/null
@@ -1,457 +0,0 @@
1/*
2 * linux/drivers/char/ec3104_keyb.c
3 *
4 * Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>
5 *
6 * based on linux/drivers/char/pc_keyb.c, which had the following comments:
7 *
8 * Separation of the PC low-level part by Geert Uytterhoeven, May 1997
9 * See keyboard.c for the whole history.
10 *
11 * Major cleanup by Martin Mares, May 1997
12 *
13 * Combined the keyboard and PS/2 mouse handling into one file,
14 * because they share the same hardware.
15 * Johan Myreen <jem@iki.fi> 1998-10-08.
16 *
17 * Code fixes to handle mouse ACKs properly.
18 * C. Scott Ananian <cananian@alumni.princeton.edu> 1999-01-29.
19 */
20/* EC3104 note:
21 * This code was written without any documentation about the EC3104 chip. While
22 * I hope I got most of the basic functionality right, the register names I use
23 * are most likely completely different from those in the chip documentation.
24 *
25 * If you have any further information about the EC3104, please tell me
26 * (prumpf@tux.org).
27 */
28
29
30#include <linux/spinlock.h>
31#include <linux/sched.h>
32#include <linux/interrupt.h>
33#include <linux/tty.h>
34#include <linux/mm.h>
35#include <linux/signal.h>
36#include <linux/init.h>
37#include <linux/kbd_ll.h>
38#include <linux/delay.h>
39#include <linux/random.h>
40#include <linux/poll.h>
41#include <linux/miscdevice.h>
42#include <linux/slab.h>
43#include <linux/kbd_kern.h>
44#include <linux/bitops.h>
45
46#include <asm/keyboard.h>
47#include <asm/uaccess.h>
48#include <asm/irq.h>
49#include <asm/system.h>
50#include <asm/ec3104.h>
51
52#include <asm/io.h>
53
54/* Some configuration switches are present in the include file... */
55
56#include <linux/pc_keyb.h>
57
58#define MSR_CTS 0x10
59#define MCR_RTS 0x02
60#define LSR_DR 0x01
61#define LSR_BOTH_EMPTY 0x60
62
63static struct e5_struct {
64 u8 packet[8];
65 int pos;
66 int length;
67
68 u8 cached_mcr;
69 u8 last_msr;
70} ec3104_keyb;
71
72/* Simple translation table for the SysRq keys */
73
74
75#ifdef CONFIG_MAGIC_SYSRQ
76unsigned char ec3104_kbd_sysrq_xlate[128] =
77 "\000\0331234567890-=\177\t" /* 0x00 - 0x0f */
78 "qwertyuiop[]\r\000as" /* 0x10 - 0x1f */
79 "dfghjkl;'`\000\\zxcv" /* 0x20 - 0x2f */
80 "bnm,./\000*\000 \000\201\202\203\204\205" /* 0x30 - 0x3f */
81 "\206\207\210\211\212\000\000789-456+1" /* 0x40 - 0x4f */
82 "230\177\000\000\213\214\000\000\000\000\000\000\000\000\000\000" /* 0x50 - 0x5f */
83 "\r\000/"; /* 0x60 - 0x6f */
84#endif
85
86static void kbd_write_command_w(int data);
87static void kbd_write_output_w(int data);
88#ifdef CONFIG_PSMOUSE
89static void aux_write_ack(int val);
90static void __aux_write_ack(int val);
91#endif
92
93static DEFINE_SPINLOCK(kbd_controller_lock);
94static unsigned char handle_kbd_event(void);
95
96/* used only by send_data - set by keyboard_interrupt */
97static volatile unsigned char reply_expected;
98static volatile unsigned char acknowledge;
99static volatile unsigned char resend;
100
101
102int ec3104_kbd_setkeycode(unsigned int scancode, unsigned int keycode)
103{
104 return 0;
105}
106
107int ec3104_kbd_getkeycode(unsigned int scancode)
108{
109 return 0;
110}
111
112
113/* yes, it probably would be faster to use an array. I don't care. */
114
115static inline unsigned char ec3104_scan2key(unsigned char scancode)
116{
117 switch (scancode) {
118 case 1: /* '`' */
119 return 41;
120
121 case 2 ... 27:
122 return scancode;
123
124 case 28: /* '\\' */
125 return 43;
126
127 case 29 ... 39:
128 return scancode + 1;
129
130 case 40: /* '\r' */
131 return 28;
132
133 case 41 ... 50:
134 return scancode + 3;
135
136 case 51: /* ' ' */
137 return 57;
138
139 case 52: /* escape */
140 return 1;
141
142 case 54: /* insert/delete (labelled delete) */
143 /* this should arguably be 110, but I'd like to have ctrl-alt-del
144 * working with a standard keymap */
145 return 111;
146
147 case 55: /* left */
148 return 105;
149 case 56: /* home */
150 return 102;
151 case 57: /* end */
152 return 107;
153 case 58: /* up */
154 return 103;
155 case 59: /* down */
156 return 108;
157 case 60: /* pgup */
158 return 104;
159 case 61: /* pgdown */
160 return 109;
161 case 62: /* right */
162 return 106;
163
164 case 79 ... 88: /* f1 - f10 */
165 return scancode - 20;
166
167 case 89 ... 90: /* f11 - f12 */
168 return scancode - 2;
169
170 case 91: /* left shift */
171 return 42;
172
173 case 92: /* right shift */
174 return 54;
175
176 case 93: /* left alt */
177 return 56;
178 case 94: /* right alt */
179 return 100;
180 case 95: /* left ctrl */
181 return 29;
182 case 96: /* right ctrl */
183 return 97;
184
185 case 97: /* caps lock */
186 return 58;
187 case 102: /* left windows */
188 return 125;
189 case 103: /* right windows */
190 return 126;
191
192 case 106: /* Fn */
193 /* this is wrong. */
194 return 84;
195
196 default:
197 return 0;
198 }
199}
200
201int ec3104_kbd_translate(unsigned char scancode, unsigned char *keycode,
202 char raw_mode)
203{
204 scancode &= 0x7f;
205
206 *keycode = ec3104_scan2key(scancode);
207
208 return 1;
209}
210
211char ec3104_kbd_unexpected_up(unsigned char keycode)
212{
213 return 0200;
214}
215
216static inline void handle_keyboard_event(unsigned char scancode)
217{
218#ifdef CONFIG_VT
219 handle_scancode(scancode, !(scancode & 0x80));
220#endif
221 tasklet_schedule(&keyboard_tasklet);
222}
223
224void ec3104_kbd_leds(unsigned char leds)
225{
226}
227
228static u8 e5_checksum(u8 *packet, int count)
229{
230 int i;
231 u8 sum = 0;
232
233 for (i=0; i<count; i++)
234 sum ^= packet[i];
235
236 if (sum & 0x80)
237 sum ^= 0xc0;
238
239 return sum;
240}
241
242static void e5_wait_for_cts(struct e5_struct *k)
243{
244 u8 msr;
245
246 do {
247 msr = ctrl_inb(EC3104_SER4_MSR);
248 } while (!(msr & MSR_CTS));
249}
250
251
252static void e5_send_byte(u8 byte, struct e5_struct *k)
253{
254 u8 status;
255
256 do {
257 status = ctrl_inb(EC3104_SER4_LSR);
258 } while ((status & LSR_BOTH_EMPTY) != LSR_BOTH_EMPTY);
259
260 printk("<%02x>", byte);
261
262 ctrl_outb(byte, EC3104_SER4_DATA);
263
264 do {
265 status = ctrl_inb(EC3104_SER4_LSR);
266 } while ((status & LSR_BOTH_EMPTY) != LSR_BOTH_EMPTY);
267
268}
269
270static int e5_send_packet(u8 *packet, int count, struct e5_struct *k)
271{
272 int i;
273
274 disable_irq(EC3104_IRQ_SER4);
275
276 if (k->cached_mcr & MCR_RTS) {
277 printk("e5_send_packet: too slow\n");
278 enable_irq(EC3104_IRQ_SER4);
279 return -EAGAIN;
280 }
281
282 k->cached_mcr |= MCR_RTS;
283 ctrl_outb(k->cached_mcr, EC3104_SER4_MCR);
284
285 e5_wait_for_cts(k);
286
287 printk("p: ");
288
289 for(i=0; i<count; i++)
290 e5_send_byte(packet[i], k);
291
292 e5_send_byte(e5_checksum(packet, count), k);
293
294 printk("\n");
295
296 udelay(1500);
297
298 k->cached_mcr &= ~MCR_RTS;
299 ctrl_outb(k->cached_mcr, EC3104_SER4_MCR);
300
301 set_current_state(TASK_UNINTERRUPTIBLE);
302
303
304
305 enable_irq(EC3104_IRQ_SER4);
306
307
308
309 return 0;
310}
311
312/*
313 * E5 packets we know about:
314 * E5->host 0x80 0x05 <checksum> - resend packet
315 * host->E5 0x83 0x43 <contrast> - set LCD contrast
316 * host->E5 0x85 0x41 0x02 <brightness> 0x02 - set LCD backlight
317 * E5->host 0x87 <ps2 packet> 0x00 <checksum> - external PS2
318 * E5->host 0x88 <scancode> <checksum> - key press
319 */
320
321static void e5_receive(struct e5_struct *k)
322{
323 k->packet[k->pos++] = ctrl_inb(EC3104_SER4_DATA);
324
325 if (k->pos == 1) {
326 switch(k->packet[0]) {
327 case 0x80:
328 k->length = 3;
329 break;
330
331 case 0x87: /* PS2 ext */
332 k->length = 6;
333 break;
334
335 case 0x88: /* keyboard */
336 k->length = 3;
337 break;
338
339 default:
340 k->length = 1;
341 printk(KERN_WARNING "unknown E5 packet %02x\n",
342 k->packet[0]);
343 }
344 }
345
346 if (k->pos == k->length) {
347 int i;
348
349 if (e5_checksum(k->packet, k->length) != 0)
350 printk(KERN_WARNING "E5: wrong checksum\n");
351
352#if 0
353 printk("E5 packet [");
354 for(i=0; i<k->length; i++) {
355 printk("%02x ", k->packet[i]);
356 }
357
358 printk("(%02x)]\n", e5_checksum(k->packet, k->length-1));
359#endif
360
361 switch(k->packet[0]) {
362 case 0x80:
363 case 0x88:
364 handle_keyboard_event(k->packet[1]);
365 break;
366 }
367
368 k->pos = k->length = 0;
369 }
370}
371
372static void ec3104_keyb_interrupt(int irq, void *data)
373{
374 struct e5_struct *k = &ec3104_keyb;
375 u8 msr, lsr;
376
377 msr = ctrl_inb(EC3104_SER4_MSR);
378
379 if ((msr & MSR_CTS) && !(k->last_msr & MSR_CTS)) {
380 if (k->cached_mcr & MCR_RTS)
381 printk("confused: RTS already high\n");
382 /* CTS went high. Send RTS. */
383 k->cached_mcr |= MCR_RTS;
384
385 ctrl_outb(k->cached_mcr, EC3104_SER4_MCR);
386 } else if ((!(msr & MSR_CTS)) && (k->last_msr & MSR_CTS)) {
387 /* CTS went low. */
388 if (!(k->cached_mcr & MCR_RTS))
389 printk("confused: RTS already low\n");
390
391 k->cached_mcr &= ~MCR_RTS;
392
393 ctrl_outb(k->cached_mcr, EC3104_SER4_MCR);
394 }
395
396 k->last_msr = msr;
397
398 lsr = ctrl_inb(EC3104_SER4_LSR);
399
400 if (lsr & LSR_DR)
401 e5_receive(k);
402}
403
404static void ec3104_keyb_clear_state(void)
405{
406 struct e5_struct *k = &ec3104_keyb;
407 u8 msr, lsr;
408
409 /* we want CTS to be low */
410 k->last_msr = 0;
411
412 for (;;) {
413 msleep(100);
414
415 msr = ctrl_inb(EC3104_SER4_MSR);
416
417 lsr = ctrl_inb(EC3104_SER4_LSR);
418
419 if (lsr & LSR_DR) {
420 e5_receive(k);
421 continue;
422 }
423
424 if ((msr & MSR_CTS) && !(k->last_msr & MSR_CTS)) {
425 if (k->cached_mcr & MCR_RTS)
426 printk("confused: RTS already high\n");
427 /* CTS went high. Send RTS. */
428 k->cached_mcr |= MCR_RTS;
429
430 ctrl_outb(k->cached_mcr, EC3104_SER4_MCR);
431 } else if ((!(msr & MSR_CTS)) && (k->last_msr & MSR_CTS)) {
432 /* CTS went low. */
433 if (!(k->cached_mcr & MCR_RTS))
434 printk("confused: RTS already low\n");
435
436 k->cached_mcr &= ~MCR_RTS;
437
438 ctrl_outb(k->cached_mcr, EC3104_SER4_MCR);
439 } else
440 break;
441
442 k->last_msr = msr;
443
444 continue;
445 }
446}
447
448void __init ec3104_kbd_init_hw(void)
449{
450 ec3104_keyb.last_msr = ctrl_inb(EC3104_SER4_MSR);
451 ec3104_keyb.cached_mcr = ctrl_inb(EC3104_SER4_MCR);
452
453 ec3104_keyb_clear_state();
454
455 /* Ok, finally allocate the IRQ, and off we go.. */
456 request_irq(EC3104_IRQ_SER4, ec3104_keyb_interrupt, 0, "keyboard", NULL);
457}
diff --git a/drivers/char/epca.c b/drivers/char/epca.c
index c6c56fb8ba50..ffcecde9e2a5 100644
--- a/drivers/char/epca.c
+++ b/drivers/char/epca.c
@@ -1,34 +1,30 @@
1/* 1/*
2
3
4 Copyright (C) 1996 Digi International. 2 Copyright (C) 1996 Digi International.
5 3
6 For technical support please email digiLinux@dgii.com or 4 For technical support please email digiLinux@dgii.com or
7 call Digi tech support at (612) 912-3456 5 call Digi tech support at (612) 912-3456
8 6
9 ** This driver is no longer supported by Digi ** 7 ** This driver is no longer supported by Digi **
10 8
11 Much of this design and code came from epca.c which was 9 Much of this design and code came from epca.c which was
12 copyright (C) 1994, 1995 Troy De Jongh, and subsquently 10 copyright (C) 1994, 1995 Troy De Jongh, and subsquently
13 modified by David Nugent, Christoph Lameter, Mike McLagan. 11 modified by David Nugent, Christoph Lameter, Mike McLagan.
14
15 This program is free software; you can redistribute it and/or modify
16 it under the terms of the GNU General Public License as published by
17 the Free Software Foundation; either version 2 of the License, or
18 (at your option) any later version.
19
20 This program is distributed in the hope that it will be useful,
21 but WITHOUT ANY WARRANTY; without even the implied warranty of
22 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23 GNU General Public License for more details.
24
25 You should have received a copy of the GNU General Public License
26 along with this program; if not, write to the Free Software
27 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
28
29--------------------------------------------------------------------------- */
30/* See README.epca for change history --DAT*/
31 12
13 This program is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 2 of the License, or
16 (at your option) any later version.
17
18 This program is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with this program; if not, write to the Free Software
25 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
26*/
27/* See README.epca for change history --DAT*/
32 28
33#include <linux/module.h> 29#include <linux/module.h>
34#include <linux/kernel.h> 30#include <linux/kernel.h>
@@ -54,13 +50,10 @@
54#include "epca.h" 50#include "epca.h"
55#include "epcaconfig.h" 51#include "epcaconfig.h"
56 52
57/* ---------------------- Begin defines ------------------------ */
58
59#define VERSION "1.3.0.1-LK2.6" 53#define VERSION "1.3.0.1-LK2.6"
60 54
61/* This major needs to be submitted to Linux to join the majors list */ 55/* This major needs to be submitted to Linux to join the majors list */
62 56#define DIGIINFOMAJOR 35 /* For Digi specific ioctl */
63#define DIGIINFOMAJOR 35 /* For Digi specific ioctl */
64 57
65 58
66#define MAXCARDS 7 59#define MAXCARDS 7
@@ -68,60 +61,48 @@
68 61
69#define PFX "epca: " 62#define PFX "epca: "
70 63
71/* ----------------- Begin global definitions ------------------- */
72
73static int nbdevs, num_cards, liloconfig; 64static int nbdevs, num_cards, liloconfig;
74static int digi_poller_inhibited = 1 ; 65static int digi_poller_inhibited = 1 ;
75 66
76static int setup_error_code; 67static int setup_error_code;
77static int invalid_lilo_config; 68static int invalid_lilo_config;
78 69
79/* The ISA boards do window flipping into the same spaces so its only sane 70/*
80 with a single lock. It's still pretty efficient */ 71 * The ISA boards do window flipping into the same spaces so its only sane with
81 72 * a single lock. It's still pretty efficient.
73 */
82static DEFINE_SPINLOCK(epca_lock); 74static DEFINE_SPINLOCK(epca_lock);
83 75
84/* ----------------------------------------------------------------------- 76/* MAXBOARDS is typically 12, but ISA and EISA cards are restricted to 7 below. */
85 MAXBOARDS is typically 12, but ISA and EISA cards are restricted to
86 7 below.
87--------------------------------------------------------------------------*/
88static struct board_info boards[MAXBOARDS]; 77static struct board_info boards[MAXBOARDS];
89 78
90
91/* ------------- Begin structures used for driver registeration ---------- */
92
93static struct tty_driver *pc_driver; 79static struct tty_driver *pc_driver;
94static struct tty_driver *pc_info; 80static struct tty_driver *pc_info;
95 81
96/* ------------------ Begin Digi specific structures -------------------- */ 82/* ------------------ Begin Digi specific structures -------------------- */
97 83
98/* ------------------------------------------------------------------------ 84/*
99 digi_channels represents an array of structures that keep track of 85 * digi_channels represents an array of structures that keep track of each
100 each channel of the Digi product. Information such as transmit and 86 * channel of the Digi product. Information such as transmit and receive
101 receive pointers, termio data, and signal definitions (DTR, CTS, etc ...) 87 * pointers, termio data, and signal definitions (DTR, CTS, etc ...) are stored
102 are stored here. This structure is NOT used to overlay the cards 88 * here. This structure is NOT used to overlay the cards physical channel
103 physical channel structure. 89 * structure.
104-------------------------------------------------------------------------- */ 90 */
105
106static struct channel digi_channels[MAX_ALLOC]; 91static struct channel digi_channels[MAX_ALLOC];
107 92
108/* ------------------------------------------------------------------------ 93/*
109 card_ptr is an array used to hold the address of the 94 * card_ptr is an array used to hold the address of the first channel structure
110 first channel structure of each card. This array will hold 95 * of each card. This array will hold the addresses of various channels located
111 the addresses of various channels located in digi_channels. 96 * in digi_channels.
112-------------------------------------------------------------------------- */ 97 */
113static struct channel *card_ptr[MAXCARDS]; 98static struct channel *card_ptr[MAXCARDS];
114 99
115static struct timer_list epca_timer; 100static struct timer_list epca_timer;
116 101
117/* ---------------------- Begin function prototypes --------------------- */ 102/*
118 103 * Begin generic memory functions. These functions will be alias (point at)
119/* ---------------------------------------------------------------------- 104 * more specific functions dependent on the board being configured.
120 Begin generic memory functions. These functions will be alias 105 */
121 (point at) more specific functions dependent on the board being
122 configured.
123----------------------------------------------------------------------- */
124
125static void memwinon(struct board_info *b, unsigned int win); 106static void memwinon(struct board_info *b, unsigned int win);
126static void memwinoff(struct board_info *b, unsigned int win); 107static void memwinoff(struct board_info *b, unsigned int win);
127static void globalwinon(struct channel *ch); 108static void globalwinon(struct channel *ch);
@@ -170,8 +151,6 @@ static void dummy_memoff(struct channel *ch);
170static void dummy_assertgwinon(struct channel *ch); 151static void dummy_assertgwinon(struct channel *ch);
171static void dummy_assertmemoff(struct channel *ch); 152static void dummy_assertmemoff(struct channel *ch);
172 153
173/* ------------------- Begin declare functions ----------------------- */
174
175static struct channel *verifyChannel(struct tty_struct *); 154static struct channel *verifyChannel(struct tty_struct *);
176static void pc_sched_event(struct channel *, int); 155static void pc_sched_event(struct channel *, int);
177static void epca_error(int, char *); 156static void epca_error(int, char *);
@@ -213,62 +192,55 @@ static int pc_write(struct tty_struct *, const unsigned char *, int);
213static int pc_init(void); 192static int pc_init(void);
214static int init_PCI(void); 193static int init_PCI(void);
215 194
216 195/*
217/* ------------------------------------------------------------------ 196 * Table of functions for each board to handle memory. Mantaining parallelism
218 Table of functions for each board to handle memory. Mantaining 197 * is a *very* good idea here. The idea is for the runtime code to blindly call
219 parallelism is a *very* good idea here. The idea is for the 198 * these functions, not knowing/caring about the underlying hardware. This
220 runtime code to blindly call these functions, not knowing/caring 199 * stuff should contain no conditionals; if more functionality is needed a
221 about the underlying hardware. This stuff should contain no 200 * different entry should be established. These calls are the interface calls
222 conditionals; if more functionality is needed a different entry 201 * and are the only functions that should be accessed. Anyone caught making
223 should be established. These calls are the interface calls and 202 * direct calls deserves what they get.
224 are the only functions that should be accessed. Anyone caught 203 */
225 making direct calls deserves what they get.
226-------------------------------------------------------------------- */
227
228static void memwinon(struct board_info *b, unsigned int win) 204static void memwinon(struct board_info *b, unsigned int win)
229{ 205{
230 (b->memwinon)(b, win); 206 b->memwinon(b, win);
231} 207}
232 208
233static void memwinoff(struct board_info *b, unsigned int win) 209static void memwinoff(struct board_info *b, unsigned int win)
234{ 210{
235 (b->memwinoff)(b, win); 211 b->memwinoff(b, win);
236} 212}
237 213
238static void globalwinon(struct channel *ch) 214static void globalwinon(struct channel *ch)
239{ 215{
240 (ch->board->globalwinon)(ch); 216 ch->board->globalwinon(ch);
241} 217}
242 218
243static void rxwinon(struct channel *ch) 219static void rxwinon(struct channel *ch)
244{ 220{
245 (ch->board->rxwinon)(ch); 221 ch->board->rxwinon(ch);
246} 222}
247 223
248static void txwinon(struct channel *ch) 224static void txwinon(struct channel *ch)
249{ 225{
250 (ch->board->txwinon)(ch); 226 ch->board->txwinon(ch);
251} 227}
252 228
253static void memoff(struct channel *ch) 229static void memoff(struct channel *ch)
254{ 230{
255 (ch->board->memoff)(ch); 231 ch->board->memoff(ch);
256} 232}
257static void assertgwinon(struct channel *ch) 233static void assertgwinon(struct channel *ch)
258{ 234{
259 (ch->board->assertgwinon)(ch); 235 ch->board->assertgwinon(ch);
260} 236}
261 237
262static void assertmemoff(struct channel *ch) 238static void assertmemoff(struct channel *ch)
263{ 239{
264 (ch->board->assertmemoff)(ch); 240 ch->board->assertmemoff(ch);
265} 241}
266 242
267/* --------------------------------------------------------- 243/* PCXEM windowing is the same as that used in the PCXR and CX series cards. */
268 PCXEM windowing is the same as that used in the PCXR
269 and CX series cards.
270------------------------------------------------------------ */
271
272static void pcxem_memwinon(struct board_info *b, unsigned int win) 244static void pcxem_memwinon(struct board_info *b, unsigned int win)
273{ 245{
274 outb_p(FEPWIN|win, b->port + 1); 246 outb_p(FEPWIN|win, b->port + 1);
@@ -300,32 +272,30 @@ static void pcxem_memoff(struct channel *ch)
300} 272}
301 273
302/* ----------------- Begin pcxe memory window stuff ------------------ */ 274/* ----------------- Begin pcxe memory window stuff ------------------ */
303
304static void pcxe_memwinon(struct board_info *b, unsigned int win) 275static void pcxe_memwinon(struct board_info *b, unsigned int win)
305{ 276{
306 outb_p(FEPWIN | win, b->port + 1); 277 outb_p(FEPWIN | win, b->port + 1);
307} 278}
308 279
309static void pcxe_memwinoff(struct board_info *b, unsigned int win) 280static void pcxe_memwinoff(struct board_info *b, unsigned int win)
310{ 281{
311 outb_p(inb(b->port) & ~FEPMEM, 282 outb_p(inb(b->port) & ~FEPMEM, b->port + 1);
312 b->port + 1);
313 outb_p(0, b->port + 1); 283 outb_p(0, b->port + 1);
314} 284}
315 285
316static void pcxe_globalwinon(struct channel *ch) 286static void pcxe_globalwinon(struct channel *ch)
317{ 287{
318 outb_p( FEPWIN, (int)ch->board->port + 1); 288 outb_p(FEPWIN, (int)ch->board->port + 1);
319} 289}
320 290
321static void pcxe_rxwinon(struct channel *ch) 291static void pcxe_rxwinon(struct channel *ch)
322{ 292{
323 outb_p(ch->rxwin, (int)ch->board->port + 1); 293 outb_p(ch->rxwin, (int)ch->board->port + 1);
324} 294}
325 295
326static void pcxe_txwinon(struct channel *ch) 296static void pcxe_txwinon(struct channel *ch)
327{ 297{
328 outb_p(ch->txwin, (int)ch->board->port + 1); 298 outb_p(ch->txwin, (int)ch->board->port + 1);
329} 299}
330 300
331static void pcxe_memoff(struct channel *ch) 301static void pcxe_memoff(struct channel *ch)
@@ -335,10 +305,9 @@ static void pcxe_memoff(struct channel *ch)
335} 305}
336 306
337/* ------------- Begin pc64xe and pcxi memory window stuff -------------- */ 307/* ------------- Begin pc64xe and pcxi memory window stuff -------------- */
338
339static void pcxi_memwinon(struct board_info *b, unsigned int win) 308static void pcxi_memwinon(struct board_info *b, unsigned int win)
340{ 309{
341 outb_p(inb(b->port) | FEPMEM, b->port); 310 outb_p(inb(b->port) | FEPMEM, b->port);
342} 311}
343 312
344static void pcxi_memwinoff(struct board_info *b, unsigned int win) 313static void pcxi_memwinoff(struct board_info *b, unsigned int win)
@@ -353,12 +322,12 @@ static void pcxi_globalwinon(struct channel *ch)
353 322
354static void pcxi_rxwinon(struct channel *ch) 323static void pcxi_rxwinon(struct channel *ch)
355{ 324{
356 outb_p(FEPMEM, ch->board->port); 325 outb_p(FEPMEM, ch->board->port);
357} 326}
358 327
359static void pcxi_txwinon(struct channel *ch) 328static void pcxi_txwinon(struct channel *ch)
360{ 329{
361 outb_p(FEPMEM, ch->board->port); 330 outb_p(FEPMEM, ch->board->port);
362} 331}
363 332
364static void pcxi_memoff(struct channel *ch) 333static void pcxi_memoff(struct channel *ch)
@@ -376,16 +345,13 @@ static void pcxi_assertmemoff(struct channel *ch)
376 epcaassert(!(inb(ch->board->port) & FEPMEM), "Memory on"); 345 epcaassert(!(inb(ch->board->port) & FEPMEM), "Memory on");
377} 346}
378 347
379 348/*
380/* ---------------------------------------------------------------------- 349 * Not all of the cards need specific memory windowing routines. Some cards
381 Not all of the cards need specific memory windowing routines. Some 350 * (Such as PCI) needs no windowing routines at all. We provide these do
382 cards (Such as PCI) needs no windowing routines at all. We provide 351 * nothing routines so that the same code base can be used. The driver will
383 these do nothing routines so that the same code base can be used. 352 * ALWAYS call a windowing routine if it thinks it needs to; regardless of the
384 The driver will ALWAYS call a windowing routine if it thinks it needs 353 * card. However, dependent on the card the routine may or may not do anything.
385 to; regardless of the card. However, dependent on the card the routine 354 */
386 may or may not do anything.
387---------------------------------------------------------------------------*/
388
389static void dummy_memwinon(struct board_info *b, unsigned int win) 355static void dummy_memwinon(struct board_info *b, unsigned int win)
390{ 356{
391} 357}
@@ -418,15 +384,14 @@ static void dummy_assertmemoff(struct channel *ch)
418{ 384{
419} 385}
420 386
421/* ----------------- Begin verifyChannel function ----------------------- */
422static struct channel *verifyChannel(struct tty_struct *tty) 387static struct channel *verifyChannel(struct tty_struct *tty)
423{ /* Begin verifyChannel */ 388{
424 /* -------------------------------------------------------------------- 389 /*
425 This routine basically provides a sanity check. It insures that 390 * This routine basically provides a sanity check. It insures that the
426 the channel returned is within the proper range of addresses as 391 * channel returned is within the proper range of addresses as well as
427 well as properly initialized. If some bogus info gets passed in 392 * properly initialized. If some bogus info gets passed in
428 through tty->driver_data this should catch it. 393 * through tty->driver_data this should catch it.
429 --------------------------------------------------------------------- */ 394 */
430 if (tty) { 395 if (tty) {
431 struct channel *ch = (struct channel *)tty->driver_data; 396 struct channel *ch = (struct channel *)tty->driver_data;
432 if ((ch >= &digi_channels[0]) && (ch < &digi_channels[nbdevs])) { 397 if ((ch >= &digi_channels[0]) && (ch < &digi_channels[nbdevs])) {
@@ -435,62 +400,55 @@ static struct channel *verifyChannel(struct tty_struct *tty)
435 } 400 }
436 } 401 }
437 return NULL; 402 return NULL;
438 403}
439} /* End verifyChannel */
440
441/* ------------------ Begin pc_sched_event ------------------------- */
442 404
443static void pc_sched_event(struct channel *ch, int event) 405static void pc_sched_event(struct channel *ch, int event)
444{ 406{
445 /* ---------------------------------------------------------------------- 407 /*
446 We call this to schedule interrupt processing on some event. The 408 * We call this to schedule interrupt processing on some event. The
447 kernel sees our request and calls the related routine in OUR driver. 409 * kernel sees our request and calls the related routine in OUR driver.
448 -------------------------------------------------------------------------*/ 410 */
449 ch->event |= 1 << event; 411 ch->event |= 1 << event;
450 schedule_work(&ch->tqueue); 412 schedule_work(&ch->tqueue);
451} /* End pc_sched_event */ 413}
452
453/* ------------------ Begin epca_error ------------------------- */
454 414
455static void epca_error(int line, char *msg) 415static void epca_error(int line, char *msg)
456{ 416{
457 printk(KERN_ERR "epca_error (Digi): line = %d %s\n",line,msg); 417 printk(KERN_ERR "epca_error (Digi): line = %d %s\n",line,msg);
458} 418}
459 419
460/* ------------------ Begin pc_close ------------------------- */ 420static void pc_close(struct tty_struct *tty, struct file *filp)
461static void pc_close(struct tty_struct * tty, struct file * filp)
462{ 421{
463 struct channel *ch; 422 struct channel *ch;
464 unsigned long flags; 423 unsigned long flags;
465 /* --------------------------------------------------------- 424 /*
466 verifyChannel returns the channel from the tty struct 425 * verifyChannel returns the channel from the tty struct if it is
467 if it is valid. This serves as a sanity check. 426 * valid. This serves as a sanity check.
468 ------------------------------------------------------------- */ 427 */
469 if ((ch = verifyChannel(tty)) != NULL) { /* Begin if ch != NULL */ 428 if ((ch = verifyChannel(tty)) != NULL) {
470 spin_lock_irqsave(&epca_lock, flags); 429 spin_lock_irqsave(&epca_lock, flags);
471 if (tty_hung_up_p(filp)) { 430 if (tty_hung_up_p(filp)) {
472 spin_unlock_irqrestore(&epca_lock, flags); 431 spin_unlock_irqrestore(&epca_lock, flags);
473 return; 432 return;
474 } 433 }
475 /* Check to see if the channel is open more than once */
476 if (ch->count-- > 1) { 434 if (ch->count-- > 1) {
477 /* Begin channel is open more than once */ 435 /* Begin channel is open more than once */
478 /* ------------------------------------------------------------- 436 /*
479 Return without doing anything. Someone might still be using 437 * Return without doing anything. Someone might still
480 the channel. 438 * be using the channel.
481 ---------------------------------------------------------------- */ 439 */
482 spin_unlock_irqrestore(&epca_lock, flags); 440 spin_unlock_irqrestore(&epca_lock, flags);
483 return; 441 return;
484 } /* End channel is open more than once */ 442 }
485 443
486 /* Port open only once go ahead with shutdown & reset */ 444 /* Port open only once go ahead with shutdown & reset */
487 BUG_ON(ch->count < 0); 445 BUG_ON(ch->count < 0);
488 446
489 /* --------------------------------------------------------------- 447 /*
490 Let the rest of the driver know the channel is being closed. 448 * Let the rest of the driver know the channel is being closed.
491 This becomes important if an open is attempted before close 449 * This becomes important if an open is attempted before close
492 is finished. 450 * is finished.
493 ------------------------------------------------------------------ */ 451 */
494 ch->asyncflags |= ASYNC_CLOSING; 452 ch->asyncflags |= ASYNC_CLOSING;
495 tty->closing = 1; 453 tty->closing = 1;
496 454
@@ -498,7 +456,7 @@ static void pc_close(struct tty_struct * tty, struct file * filp)
498 456
499 if (ch->asyncflags & ASYNC_INITIALIZED) { 457 if (ch->asyncflags & ASYNC_INITIALIZED) {
500 /* Setup an event to indicate when the transmit buffer empties */ 458 /* Setup an event to indicate when the transmit buffer empties */
501 setup_empty_event(tty, ch); 459 setup_empty_event(tty, ch);
502 tty_wait_until_sent(tty, 3000); /* 30 seconds timeout */ 460 tty_wait_until_sent(tty, 3000); /* 30 seconds timeout */
503 } 461 }
504 if (tty->driver->flush_buffer) 462 if (tty->driver->flush_buffer)
@@ -513,27 +471,24 @@ static void pc_close(struct tty_struct * tty, struct file * filp)
513 ch->tty = NULL; 471 ch->tty = NULL;
514 spin_unlock_irqrestore(&epca_lock, flags); 472 spin_unlock_irqrestore(&epca_lock, flags);
515 473
516 if (ch->blocked_open) { /* Begin if blocked_open */ 474 if (ch->blocked_open) {
517 if (ch->close_delay) 475 if (ch->close_delay)
518 msleep_interruptible(jiffies_to_msecs(ch->close_delay)); 476 msleep_interruptible(jiffies_to_msecs(ch->close_delay));
519 wake_up_interruptible(&ch->open_wait); 477 wake_up_interruptible(&ch->open_wait);
520 } /* End if blocked_open */ 478 }
521 ch->asyncflags &= ~(ASYNC_NORMAL_ACTIVE | ASYNC_INITIALIZED | 479 ch->asyncflags &= ~(ASYNC_NORMAL_ACTIVE | ASYNC_INITIALIZED |
522 ASYNC_CLOSING); 480 ASYNC_CLOSING);
523 wake_up_interruptible(&ch->close_wait); 481 wake_up_interruptible(&ch->close_wait);
524 } /* End if ch != NULL */ 482 }
525} /* End pc_close */ 483}
526
527/* ------------------ Begin shutdown ------------------------- */
528 484
529static void shutdown(struct channel *ch) 485static void shutdown(struct channel *ch)
530{ /* Begin shutdown */ 486{
531
532 unsigned long flags; 487 unsigned long flags;
533 struct tty_struct *tty; 488 struct tty_struct *tty;
534 struct board_chan __iomem *bc; 489 struct board_chan __iomem *bc;
535 490
536 if (!(ch->asyncflags & ASYNC_INITIALIZED)) 491 if (!(ch->asyncflags & ASYNC_INITIALIZED))
537 return; 492 return;
538 493
539 spin_lock_irqsave(&epca_lock, flags); 494 spin_lock_irqsave(&epca_lock, flags);
@@ -541,50 +496,40 @@ static void shutdown(struct channel *ch)
541 globalwinon(ch); 496 globalwinon(ch);
542 bc = ch->brdchan; 497 bc = ch->brdchan;
543 498
544 /* ------------------------------------------------------------------ 499 /*
545 In order for an event to be generated on the receipt of data the 500 * In order for an event to be generated on the receipt of data the
546 idata flag must be set. Since we are shutting down, this is not 501 * idata flag must be set. Since we are shutting down, this is not
547 necessary clear this flag. 502 * necessary clear this flag.
548 --------------------------------------------------------------------- */ 503 */
549
550 if (bc) 504 if (bc)
551 writeb(0, &bc->idata); 505 writeb(0, &bc->idata);
552 tty = ch->tty; 506 tty = ch->tty;
553 507
554 /* ---------------------------------------------------------------- 508 /* If we're a modem control device and HUPCL is on, drop RTS & DTR. */
555 If we're a modem control device and HUPCL is on, drop RTS & DTR.
556 ------------------------------------------------------------------ */
557
558 if (tty->termios->c_cflag & HUPCL) { 509 if (tty->termios->c_cflag & HUPCL) {
559 ch->omodem &= ~(ch->m_rts | ch->m_dtr); 510 ch->omodem &= ~(ch->m_rts | ch->m_dtr);
560 fepcmd(ch, SETMODEM, 0, ch->m_dtr | ch->m_rts, 10, 1); 511 fepcmd(ch, SETMODEM, 0, ch->m_dtr | ch->m_rts, 10, 1);
561 } 512 }
562 memoff(ch); 513 memoff(ch);
563 514
564 /* ------------------------------------------------------------------ 515 /*
565 The channel has officialy been closed. The next time it is opened 516 * The channel has officialy been closed. The next time it is opened it
566 it will have to reinitialized. Set a flag to indicate this. 517 * will have to reinitialized. Set a flag to indicate this.
567 ---------------------------------------------------------------------- */ 518 */
568
569 /* Prevent future Digi programmed interrupts from coming active */ 519 /* Prevent future Digi programmed interrupts from coming active */
570
571 ch->asyncflags &= ~ASYNC_INITIALIZED; 520 ch->asyncflags &= ~ASYNC_INITIALIZED;
572 spin_unlock_irqrestore(&epca_lock, flags); 521 spin_unlock_irqrestore(&epca_lock, flags);
573 522}
574} /* End shutdown */
575
576/* ------------------ Begin pc_hangup ------------------------- */
577 523
578static void pc_hangup(struct tty_struct *tty) 524static void pc_hangup(struct tty_struct *tty)
579{ /* Begin pc_hangup */ 525{
580 struct channel *ch; 526 struct channel *ch;
581
582 /* ---------------------------------------------------------
583 verifyChannel returns the channel from the tty struct
584 if it is valid. This serves as a sanity check.
585 ------------------------------------------------------------- */
586 527
587 if ((ch = verifyChannel(tty)) != NULL) { /* Begin if ch != NULL */ 528 /*
529 * verifyChannel returns the channel from the tty struct if it is
530 * valid. This serves as a sanity check.
531 */
532 if ((ch = verifyChannel(tty)) != NULL) {
588 unsigned long flags; 533 unsigned long flags;
589 534
590 if (tty->driver->flush_buffer) 535 if (tty->driver->flush_buffer)
@@ -599,15 +544,12 @@ static void pc_hangup(struct tty_struct *tty)
599 ch->asyncflags &= ~(ASYNC_NORMAL_ACTIVE | ASYNC_INITIALIZED); 544 ch->asyncflags &= ~(ASYNC_NORMAL_ACTIVE | ASYNC_INITIALIZED);
600 spin_unlock_irqrestore(&epca_lock, flags); 545 spin_unlock_irqrestore(&epca_lock, flags);
601 wake_up_interruptible(&ch->open_wait); 546 wake_up_interruptible(&ch->open_wait);
602 } /* End if ch != NULL */ 547 }
603 548}
604} /* End pc_hangup */
605
606/* ------------------ Begin pc_write ------------------------- */
607 549
608static int pc_write(struct tty_struct * tty, 550static int pc_write(struct tty_struct *tty,
609 const unsigned char *buf, int bytesAvailable) 551 const unsigned char *buf, int bytesAvailable)
610{ /* Begin pc_write */ 552{
611 unsigned int head, tail; 553 unsigned int head, tail;
612 int dataLen; 554 int dataLen;
613 int size; 555 int size;
@@ -617,25 +559,23 @@ static int pc_write(struct tty_struct * tty,
617 int remain; 559 int remain;
618 struct board_chan __iomem *bc; 560 struct board_chan __iomem *bc;
619 561
620 /* ---------------------------------------------------------------- 562 /*
621 pc_write is primarily called directly by the kernel routine 563 * pc_write is primarily called directly by the kernel routine
622 tty_write (Though it can also be called by put_char) found in 564 * tty_write (Though it can also be called by put_char) found in
623 tty_io.c. pc_write is passed a line discipline buffer where 565 * tty_io.c. pc_write is passed a line discipline buffer where the data
624 the data to be written out is stored. The line discipline 566 * to be written out is stored. The line discipline implementation
625 implementation itself is done at the kernel level and is not 567 * itself is done at the kernel level and is not brought into the
626 brought into the driver. 568 * driver.
627 ------------------------------------------------------------------- */ 569 */
628
629 /* ---------------------------------------------------------
630 verifyChannel returns the channel from the tty struct
631 if it is valid. This serves as a sanity check.
632 ------------------------------------------------------------- */
633 570
571 /*
572 * verifyChannel returns the channel from the tty struct if it is
573 * valid. This serves as a sanity check.
574 */
634 if ((ch = verifyChannel(tty)) == NULL) 575 if ((ch = verifyChannel(tty)) == NULL)
635 return 0; 576 return 0;
636 577
637 /* Make a pointer to the channel data structure found on the board. */ 578 /* Make a pointer to the channel data structure found on the board. */
638
639 bc = ch->brdchan; 579 bc = ch->brdchan;
640 size = ch->txbufsize; 580 size = ch->txbufsize;
641 amountCopied = 0; 581 amountCopied = 0;
@@ -650,37 +590,36 @@ static int pc_write(struct tty_struct * tty,
650 tail = readw(&bc->tout); 590 tail = readw(&bc->tout);
651 tail &= (size - 1); 591 tail &= (size - 1);
652 592
653 /* If head >= tail, head has not wrapped around. */ 593 if (head >= tail) {
654 if (head >= tail) { /* Begin head has not wrapped */ 594 /* head has not wrapped */
655 /* --------------------------------------------------------------- 595 /*
656 remain (much like dataLen above) represents the total amount of 596 * remain (much like dataLen above) represents the total amount
657 space available on the card for data. Here dataLen represents 597 * of space available on the card for data. Here dataLen
658 the space existing between the head pointer and the end of 598 * represents the space existing between the head pointer and
659 buffer. This is important because a memcpy cannot be told to 599 * the end of buffer. This is important because a memcpy cannot
660 automatically wrap around when it hits the buffer end. 600 * be told to automatically wrap around when it hits the buffer
661 ------------------------------------------------------------------ */ 601 * end.
602 */
662 dataLen = size - head; 603 dataLen = size - head;
663 remain = size - (head - tail) - 1; 604 remain = size - (head - tail) - 1;
664 } else { /* Begin head has wrapped around */ 605 } else {
665 606 /* head has wrapped around */
666 remain = tail - head - 1; 607 remain = tail - head - 1;
667 dataLen = remain; 608 dataLen = remain;
668 609 }
669 } /* End head has wrapped around */ 610 /*
670 /* ------------------------------------------------------------------- 611 * Check the space on the card. If we have more data than space; reduce
671 Check the space on the card. If we have more data than 612 * the amount of data to fit the space.
672 space; reduce the amount of data to fit the space. 613 */
673 ---------------------------------------------------------------------- */
674 bytesAvailable = min(remain, bytesAvailable); 614 bytesAvailable = min(remain, bytesAvailable);
675 txwinon(ch); 615 txwinon(ch);
676 while (bytesAvailable > 0) 616 while (bytesAvailable > 0) {
677 { /* Begin while there is data to copy onto card */ 617 /* there is data to copy onto card */
678
679 /* -----------------------------------------------------------------
680 If head is not wrapped, the below will make sure the first
681 data copy fills to the end of card buffer.
682 ------------------------------------------------------------------- */
683 618
619 /*
620 * If head is not wrapped, the below will make sure the first
621 * data copy fills to the end of card buffer.
622 */
684 dataLen = min(bytesAvailable, dataLen); 623 dataLen = min(bytesAvailable, dataLen);
685 memcpy_toio(ch->txptr + head, buf, dataLen); 624 memcpy_toio(ch->txptr + head, buf, dataLen);
686 buf += dataLen; 625 buf += dataLen;
@@ -692,7 +631,7 @@ static int pc_write(struct tty_struct * tty,
692 head = 0; 631 head = 0;
693 dataLen = tail; 632 dataLen = tail;
694 } 633 }
695 } /* End while there is data to copy onto card */ 634 }
696 ch->statusflags |= TXBUSY; 635 ch->statusflags |= TXBUSY;
697 globalwinon(ch); 636 globalwinon(ch);
698 writew(head, &bc->tin); 637 writew(head, &bc->tin);
@@ -703,22 +642,16 @@ static int pc_write(struct tty_struct * tty,
703 } 642 }
704 memoff(ch); 643 memoff(ch);
705 spin_unlock_irqrestore(&epca_lock, flags); 644 spin_unlock_irqrestore(&epca_lock, flags);
706 return(amountCopied); 645 return amountCopied;
707 646}
708} /* End pc_write */
709
710/* ------------------ Begin pc_put_char ------------------------- */
711 647
712static void pc_put_char(struct tty_struct *tty, unsigned char c) 648static void pc_put_char(struct tty_struct *tty, unsigned char c)
713{ /* Begin pc_put_char */ 649{
714 pc_write(tty, &c, 1); 650 pc_write(tty, &c, 1);
715} /* End pc_put_char */ 651}
716
717/* ------------------ Begin pc_write_room ------------------------- */
718 652
719static int pc_write_room(struct tty_struct *tty) 653static int pc_write_room(struct tty_struct *tty)
720{ /* Begin pc_write_room */ 654{
721
722 int remain; 655 int remain;
723 struct channel *ch; 656 struct channel *ch;
724 unsigned long flags; 657 unsigned long flags;
@@ -727,11 +660,10 @@ static int pc_write_room(struct tty_struct *tty)
727 660
728 remain = 0; 661 remain = 0;
729 662
730 /* --------------------------------------------------------- 663 /*
731 verifyChannel returns the channel from the tty struct 664 * verifyChannel returns the channel from the tty struct if it is
732 if it is valid. This serves as a sanity check. 665 * valid. This serves as a sanity check.
733 ------------------------------------------------------------- */ 666 */
734
735 if ((ch = verifyChannel(tty)) != NULL) { 667 if ((ch = verifyChannel(tty)) != NULL) {
736 spin_lock_irqsave(&epca_lock, flags); 668 spin_lock_irqsave(&epca_lock, flags);
737 globalwinon(ch); 669 globalwinon(ch);
@@ -757,14 +689,10 @@ static int pc_write_room(struct tty_struct *tty)
757 } 689 }
758 /* Return how much room is left on card */ 690 /* Return how much room is left on card */
759 return remain; 691 return remain;
760 692}
761} /* End pc_write_room */
762
763/* ------------------ Begin pc_chars_in_buffer ---------------------- */
764 693
765static int pc_chars_in_buffer(struct tty_struct *tty) 694static int pc_chars_in_buffer(struct tty_struct *tty)
766{ /* Begin pc_chars_in_buffer */ 695{
767
768 int chars; 696 int chars;
769 unsigned int ctail, head, tail; 697 unsigned int ctail, head, tail;
770 int remain; 698 int remain;
@@ -772,13 +700,12 @@ static int pc_chars_in_buffer(struct tty_struct *tty)
772 struct channel *ch; 700 struct channel *ch;
773 struct board_chan __iomem *bc; 701 struct board_chan __iomem *bc;
774 702
775 /* --------------------------------------------------------- 703 /*
776 verifyChannel returns the channel from the tty struct 704 * verifyChannel returns the channel from the tty struct if it is
777 if it is valid. This serves as a sanity check. 705 * valid. This serves as a sanity check.
778 ------------------------------------------------------------- */ 706 */
779
780 if ((ch = verifyChannel(tty)) == NULL) 707 if ((ch = verifyChannel(tty)) == NULL)
781 return(0); 708 return 0;
782 709
783 spin_lock_irqsave(&epca_lock, flags); 710 spin_lock_irqsave(&epca_lock, flags);
784 globalwinon(ch); 711 globalwinon(ch);
@@ -793,45 +720,40 @@ static int pc_chars_in_buffer(struct tty_struct *tty)
793 else { /* Begin if some space on the card has been used */ 720 else { /* Begin if some space on the card has been used */
794 head = readw(&bc->tin) & (ch->txbufsize - 1); 721 head = readw(&bc->tin) & (ch->txbufsize - 1);
795 tail &= (ch->txbufsize - 1); 722 tail &= (ch->txbufsize - 1);
796 /* -------------------------------------------------------------- 723 /*
797 The logic here is basically opposite of the above pc_write_room 724 * The logic here is basically opposite of the above
798 here we are finding the amount of bytes in the buffer filled. 725 * pc_write_room here we are finding the amount of bytes in the
799 Not the amount of bytes empty. 726 * buffer filled. Not the amount of bytes empty.
800 ------------------------------------------------------------------- */ 727 */
801 if ((remain = tail - head - 1) < 0 ) 728 if ((remain = tail - head - 1) < 0 )
802 remain += ch->txbufsize; 729 remain += ch->txbufsize;
803 chars = (int)(ch->txbufsize - remain); 730 chars = (int)(ch->txbufsize - remain);
804 /* ------------------------------------------------------------- 731 /*
805 Make it possible to wakeup anything waiting for output 732 * Make it possible to wakeup anything waiting for output in
806 in tty_ioctl.c, etc. 733 * tty_ioctl.c, etc.
807 734 *
808 If not already set. Setup an event to indicate when the 735 * If not already set. Setup an event to indicate when the
809 transmit buffer empties 736 * transmit buffer empties.
810 ----------------------------------------------------------------- */ 737 */
811 if (!(ch->statusflags & EMPTYWAIT)) 738 if (!(ch->statusflags & EMPTYWAIT))
812 setup_empty_event(tty,ch); 739 setup_empty_event(tty,ch);
813
814 } /* End if some space on the card has been used */ 740 } /* End if some space on the card has been used */
815 memoff(ch); 741 memoff(ch);
816 spin_unlock_irqrestore(&epca_lock, flags); 742 spin_unlock_irqrestore(&epca_lock, flags);
817 /* Return number of characters residing on card. */ 743 /* Return number of characters residing on card. */
818 return(chars); 744 return chars;
819 745}
820} /* End pc_chars_in_buffer */
821
822/* ------------------ Begin pc_flush_buffer ---------------------- */
823 746
824static void pc_flush_buffer(struct tty_struct *tty) 747static void pc_flush_buffer(struct tty_struct *tty)
825{ /* Begin pc_flush_buffer */ 748{
826
827 unsigned int tail; 749 unsigned int tail;
828 unsigned long flags; 750 unsigned long flags;
829 struct channel *ch; 751 struct channel *ch;
830 struct board_chan __iomem *bc; 752 struct board_chan __iomem *bc;
831 /* --------------------------------------------------------- 753 /*
832 verifyChannel returns the channel from the tty struct 754 * verifyChannel returns the channel from the tty struct if it is
833 if it is valid. This serves as a sanity check. 755 * valid. This serves as a sanity check.
834 ------------------------------------------------------------- */ 756 */
835 if ((ch = verifyChannel(tty)) == NULL) 757 if ((ch = verifyChannel(tty)) == NULL)
836 return; 758 return;
837 759
@@ -844,51 +766,47 @@ static void pc_flush_buffer(struct tty_struct *tty)
844 memoff(ch); 766 memoff(ch);
845 spin_unlock_irqrestore(&epca_lock, flags); 767 spin_unlock_irqrestore(&epca_lock, flags);
846 tty_wakeup(tty); 768 tty_wakeup(tty);
847} /* End pc_flush_buffer */ 769}
848
849/* ------------------ Begin pc_flush_chars ---------------------- */
850 770
851static void pc_flush_chars(struct tty_struct *tty) 771static void pc_flush_chars(struct tty_struct *tty)
852{ /* Begin pc_flush_chars */ 772{
853 struct channel * ch; 773 struct channel *ch;
854 /* --------------------------------------------------------- 774 /*
855 verifyChannel returns the channel from the tty struct 775 * verifyChannel returns the channel from the tty struct if it is
856 if it is valid. This serves as a sanity check. 776 * valid. This serves as a sanity check.
857 ------------------------------------------------------------- */ 777 */
858 if ((ch = verifyChannel(tty)) != NULL) { 778 if ((ch = verifyChannel(tty)) != NULL) {
859 unsigned long flags; 779 unsigned long flags;
860 spin_lock_irqsave(&epca_lock, flags); 780 spin_lock_irqsave(&epca_lock, flags);
861 /* ---------------------------------------------------------------- 781 /*
862 If not already set and the transmitter is busy setup an event 782 * If not already set and the transmitter is busy setup an
863 to indicate when the transmit empties. 783 * event to indicate when the transmit empties.
864 ------------------------------------------------------------------- */ 784 */
865 if ((ch->statusflags & TXBUSY) && !(ch->statusflags & EMPTYWAIT)) 785 if ((ch->statusflags & TXBUSY) && !(ch->statusflags & EMPTYWAIT))
866 setup_empty_event(tty,ch); 786 setup_empty_event(tty,ch);
867 spin_unlock_irqrestore(&epca_lock, flags); 787 spin_unlock_irqrestore(&epca_lock, flags);
868 } 788 }
869} /* End pc_flush_chars */ 789}
870
871/* ------------------ Begin block_til_ready ---------------------- */
872 790
873static int block_til_ready(struct tty_struct *tty, 791static int block_til_ready(struct tty_struct *tty,
874 struct file *filp, struct channel *ch) 792 struct file *filp, struct channel *ch)
875{ /* Begin block_til_ready */ 793{
876 DECLARE_WAITQUEUE(wait,current); 794 DECLARE_WAITQUEUE(wait,current);
877 int retval, do_clocal = 0; 795 int retval, do_clocal = 0;
878 unsigned long flags; 796 unsigned long flags;
879 797
880 if (tty_hung_up_p(filp)) { 798 if (tty_hung_up_p(filp)) {
881 if (ch->asyncflags & ASYNC_HUP_NOTIFY) 799 if (ch->asyncflags & ASYNC_HUP_NOTIFY)
882 retval = -EAGAIN; 800 retval = -EAGAIN;
883 else 801 else
884 retval = -ERESTARTSYS; 802 retval = -ERESTARTSYS;
885 return(retval); 803 return retval;
886 } 804 }
887 805
888 /* ----------------------------------------------------------------- 806 /*
889 If the device is in the middle of being closed, then block 807 * If the device is in the middle of being closed, then block until
890 until it's done, and then try again. 808 * it's done, and then try again.
891 -------------------------------------------------------------------- */ 809 */
892 if (ch->asyncflags & ASYNC_CLOSING) { 810 if (ch->asyncflags & ASYNC_CLOSING) {
893 interruptible_sleep_on(&ch->close_wait); 811 interruptible_sleep_on(&ch->close_wait);
894 812
@@ -899,17 +817,17 @@ static int block_til_ready(struct tty_struct *tty,
899 } 817 }
900 818
901 if (filp->f_flags & O_NONBLOCK) { 819 if (filp->f_flags & O_NONBLOCK) {
902 /* ----------------------------------------------------------------- 820 /*
903 If non-blocking mode is set, then make the check up front 821 * If non-blocking mode is set, then make the check up front
904 and then exit. 822 * and then exit.
905 -------------------------------------------------------------------- */ 823 */
906 ch->asyncflags |= ASYNC_NORMAL_ACTIVE; 824 ch->asyncflags |= ASYNC_NORMAL_ACTIVE;
907 return 0; 825 return 0;
908 } 826 }
909 if (tty->termios->c_cflag & CLOCAL) 827 if (tty->termios->c_cflag & CLOCAL)
910 do_clocal = 1; 828 do_clocal = 1;
911 /* Block waiting for the carrier detect and the line to become free */ 829 /* Block waiting for the carrier detect and the line to become free */
912 830
913 retval = 0; 831 retval = 0;
914 add_wait_queue(&ch->open_wait, &wait); 832 add_wait_queue(&ch->open_wait, &wait);
915 833
@@ -918,19 +836,18 @@ static int block_til_ready(struct tty_struct *tty,
918 if (!tty_hung_up_p(filp)) 836 if (!tty_hung_up_p(filp))
919 ch->count--; 837 ch->count--;
920 ch->blocked_open++; 838 ch->blocked_open++;
921 while(1) 839 while (1) {
922 { /* Begin forever while */
923 set_current_state(TASK_INTERRUPTIBLE); 840 set_current_state(TASK_INTERRUPTIBLE);
924 if (tty_hung_up_p(filp) || 841 if (tty_hung_up_p(filp) ||
925 !(ch->asyncflags & ASYNC_INITIALIZED)) 842 !(ch->asyncflags & ASYNC_INITIALIZED))
926 { 843 {
927 if (ch->asyncflags & ASYNC_HUP_NOTIFY) 844 if (ch->asyncflags & ASYNC_HUP_NOTIFY)
928 retval = -EAGAIN; 845 retval = -EAGAIN;
929 else 846 else
930 retval = -ERESTARTSYS; 847 retval = -ERESTARTSYS;
931 break; 848 break;
932 } 849 }
933 if (!(ch->asyncflags & ASYNC_CLOSING) && 850 if (!(ch->asyncflags & ASYNC_CLOSING) &&
934 (do_clocal || (ch->imodem & ch->dcd))) 851 (do_clocal || (ch->imodem & ch->dcd)))
935 break; 852 break;
936 if (signal_pending(current)) { 853 if (signal_pending(current)) {
@@ -938,16 +855,15 @@ static int block_til_ready(struct tty_struct *tty,
938 break; 855 break;
939 } 856 }
940 spin_unlock_irqrestore(&epca_lock, flags); 857 spin_unlock_irqrestore(&epca_lock, flags);
941 /* --------------------------------------------------------------- 858 /*
942 Allow someone else to be scheduled. We will occasionally go 859 * Allow someone else to be scheduled. We will occasionally go
943 through this loop until one of the above conditions change. 860 * through this loop until one of the above conditions change.
944 The below schedule call will allow other processes to enter and 861 * The below schedule call will allow other processes to enter
945 prevent this loop from hogging the cpu. 862 * and prevent this loop from hogging the cpu.
946 ------------------------------------------------------------------ */ 863 */
947 schedule(); 864 schedule();
948 spin_lock_irqsave(&epca_lock, flags); 865 spin_lock_irqsave(&epca_lock, flags);
949 866 }
950 } /* End forever while */
951 867
952 __set_current_state(TASK_RUNNING); 868 __set_current_state(TASK_RUNNING);
953 remove_wait_queue(&ch->open_wait, &wait); 869 remove_wait_queue(&ch->open_wait, &wait);
@@ -962,13 +878,10 @@ static int block_til_ready(struct tty_struct *tty,
962 878
963 ch->asyncflags |= ASYNC_NORMAL_ACTIVE; 879 ch->asyncflags |= ASYNC_NORMAL_ACTIVE;
964 return 0; 880 return 0;
965} /* End block_til_ready */ 881}
966
967/* ------------------ Begin pc_open ---------------------- */
968 882
969static int pc_open(struct tty_struct *tty, struct file * filp) 883static int pc_open(struct tty_struct *tty, struct file * filp)
970{ /* Begin pc_open */ 884{
971
972 struct channel *ch; 885 struct channel *ch;
973 unsigned long flags; 886 unsigned long flags;
974 int line, retval, boardnum; 887 int line, retval, boardnum;
@@ -984,12 +897,11 @@ static int pc_open(struct tty_struct *tty, struct file * filp)
984 897
985 /* Check status of board configured in system. */ 898 /* Check status of board configured in system. */
986 899
987 /* ----------------------------------------------------------------- 900 /*
988 I check to see if the epca_setup routine detected an user error. 901 * I check to see if the epca_setup routine detected an user error. It
989 It might be better to put this in pc_init, but for the moment it 902 * might be better to put this in pc_init, but for the moment it goes
990 goes here. 903 * here.
991 ---------------------------------------------------------------------- */ 904 */
992
993 if (invalid_lilo_config) { 905 if (invalid_lilo_config) {
994 if (setup_error_code & INVALID_BOARD_TYPE) 906 if (setup_error_code & INVALID_BOARD_TYPE)
995 printk(KERN_ERR "epca: pc_open: Invalid board type specified in kernel options.\n"); 907 printk(KERN_ERR "epca: pc_open: Invalid board type specified in kernel options.\n");
@@ -1010,49 +922,48 @@ static int pc_open(struct tty_struct *tty, struct file * filp)
1010 tty->driver_data = NULL; /* Mark this device as 'down' */ 922 tty->driver_data = NULL; /* Mark this device as 'down' */
1011 return(-ENODEV); 923 return(-ENODEV);
1012 } 924 }
1013 925
1014 if ((bc = ch->brdchan) == 0) { 926 if ((bc = ch->brdchan) == 0) {
1015 tty->driver_data = NULL; 927 tty->driver_data = NULL;
1016 return -ENODEV; 928 return -ENODEV;
1017 } 929 }
1018 930
1019 spin_lock_irqsave(&epca_lock, flags); 931 spin_lock_irqsave(&epca_lock, flags);
1020 /* ------------------------------------------------------------------ 932 /*
1021 Every time a channel is opened, increment a counter. This is 933 * Every time a channel is opened, increment a counter. This is
1022 necessary because we do not wish to flush and shutdown the channel 934 * necessary because we do not wish to flush and shutdown the channel
1023 until the last app holding the channel open, closes it. 935 * until the last app holding the channel open, closes it.
1024 --------------------------------------------------------------------- */ 936 */
1025 ch->count++; 937 ch->count++;
1026 /* ---------------------------------------------------------------- 938 /*
1027 Set a kernel structures pointer to our local channel 939 * Set a kernel structures pointer to our local channel structure. This
1028 structure. This way we can get to it when passed only 940 * way we can get to it when passed only a tty struct.
1029 a tty struct. 941 */
1030 ------------------------------------------------------------------ */
1031 tty->driver_data = ch; 942 tty->driver_data = ch;
1032 /* ---------------------------------------------------------------- 943 /*
1033 If this is the first time the channel has been opened, initialize 944 * If this is the first time the channel has been opened, initialize
1034 the tty->termios struct otherwise let pc_close handle it. 945 * the tty->termios struct otherwise let pc_close handle it.
1035 -------------------------------------------------------------------- */ 946 */
1036 globalwinon(ch); 947 globalwinon(ch);
1037 ch->statusflags = 0; 948 ch->statusflags = 0;
1038 949
1039 /* Save boards current modem status */ 950 /* Save boards current modem status */
1040 ch->imodem = readb(&bc->mstat); 951 ch->imodem = readb(&bc->mstat);
1041 952
1042 /* ---------------------------------------------------------------- 953 /*
1043 Set receive head and tail ptrs to each other. This indicates 954 * Set receive head and tail ptrs to each other. This indicates no data
1044 no data available to read. 955 * available to read.
1045 ----------------------------------------------------------------- */ 956 */
1046 head = readw(&bc->rin); 957 head = readw(&bc->rin);
1047 writew(head, &bc->rout); 958 writew(head, &bc->rout);
1048 959
1049 /* Set the channels associated tty structure */ 960 /* Set the channels associated tty structure */
1050 ch->tty = tty; 961 ch->tty = tty;
1051 962
1052 /* ----------------------------------------------------------------- 963 /*
1053 The below routine generally sets up parity, baud, flow control 964 * The below routine generally sets up parity, baud, flow control
1054 issues, etc.... It effect both control flags and input flags. 965 * issues, etc.... It effect both control flags and input flags.
1055 -------------------------------------------------------------------- */ 966 */
1056 epcaparam(tty,ch); 967 epcaparam(tty,ch);
1057 ch->asyncflags |= ASYNC_INITIALIZED; 968 ch->asyncflags |= ASYNC_INITIALIZED;
1058 memoff(ch); 969 memoff(ch);
@@ -1061,10 +972,10 @@ static int pc_open(struct tty_struct *tty, struct file * filp)
1061 retval = block_til_ready(tty, filp, ch); 972 retval = block_til_ready(tty, filp, ch);
1062 if (retval) 973 if (retval)
1063 return retval; 974 return retval;
1064 /* ------------------------------------------------------------- 975 /*
1065 Set this again in case a hangup set it to zero while this 976 * Set this again in case a hangup set it to zero while this open() was
1066 open() was waiting for the line... 977 * waiting for the line...
1067 --------------------------------------------------------------- */ 978 */
1068 spin_lock_irqsave(&epca_lock, flags); 979 spin_lock_irqsave(&epca_lock, flags);
1069 ch->tty = tty; 980 ch->tty = tty;
1070 globalwinon(ch); 981 globalwinon(ch);
@@ -1073,13 +984,12 @@ static int pc_open(struct tty_struct *tty, struct file * filp)
1073 memoff(ch); 984 memoff(ch);
1074 spin_unlock_irqrestore(&epca_lock, flags); 985 spin_unlock_irqrestore(&epca_lock, flags);
1075 return 0; 986 return 0;
1076} /* End pc_open */ 987}
1077 988
1078static int __init epca_module_init(void) 989static int __init epca_module_init(void)
1079{ /* Begin init_module */ 990{
1080 return pc_init(); 991 return pc_init();
1081} 992}
1082
1083module_init(epca_module_init); 993module_init(epca_module_init);
1084 994
1085static struct pci_driver epca_driver; 995static struct pci_driver epca_driver;
@@ -1092,8 +1002,7 @@ static void __exit epca_module_exit(void)
1092 1002
1093 del_timer_sync(&epca_timer); 1003 del_timer_sync(&epca_timer);
1094 1004
1095 if ((tty_unregister_driver(pc_driver)) || 1005 if (tty_unregister_driver(pc_driver) || tty_unregister_driver(pc_info))
1096 (tty_unregister_driver(pc_info)))
1097 { 1006 {
1098 printk(KERN_WARNING "epca: cleanup_module failed to un-register tty driver\n"); 1007 printk(KERN_WARNING "epca: cleanup_module failed to un-register tty driver\n");
1099 return; 1008 return;
@@ -1101,23 +1010,20 @@ static void __exit epca_module_exit(void)
1101 put_tty_driver(pc_driver); 1010 put_tty_driver(pc_driver);
1102 put_tty_driver(pc_info); 1011 put_tty_driver(pc_info);
1103 1012
1104 for (crd = 0; crd < num_cards; crd++) { /* Begin for each card */ 1013 for (crd = 0; crd < num_cards; crd++) {
1105 bd = &boards[crd]; 1014 bd = &boards[crd];
1106 if (!bd) 1015 if (!bd) { /* sanity check */
1107 { /* Begin sanity check */
1108 printk(KERN_ERR "<Error> - Digi : cleanup_module failed\n"); 1016 printk(KERN_ERR "<Error> - Digi : cleanup_module failed\n");
1109 return; 1017 return;
1110 } /* End sanity check */ 1018 }
1111 ch = card_ptr[crd]; 1019 ch = card_ptr[crd];
1112 for (count = 0; count < bd->numports; count++, ch++) 1020 for (count = 0; count < bd->numports; count++, ch++) {
1113 { /* Begin for each port */
1114 if (ch && ch->tty) 1021 if (ch && ch->tty)
1115 tty_hangup(ch->tty); 1022 tty_hangup(ch->tty);
1116 } /* End for each port */ 1023 }
1117 } /* End for each card */ 1024 }
1118 pci_unregister_driver (&epca_driver); 1025 pci_unregister_driver(&epca_driver);
1119} 1026}
1120
1121module_exit(epca_module_exit); 1027module_exit(epca_module_exit);
1122 1028
1123static const struct tty_operations pc_ops = { 1029static const struct tty_operations pc_ops = {
@@ -1148,10 +1054,8 @@ static struct tty_operations info_ops = {
1148 .ioctl = info_ioctl, 1054 .ioctl = info_ioctl,
1149}; 1055};
1150 1056
1151/* ------------------ Begin pc_init ---------------------- */
1152
1153static int __init pc_init(void) 1057static int __init pc_init(void)
1154{ /* Begin pc_init */ 1058{
1155 int crd; 1059 int crd;
1156 struct board_info *bd; 1060 struct board_info *bd;
1157 unsigned char board_id = 0; 1061 unsigned char board_id = 0;
@@ -1169,63 +1073,57 @@ static int __init pc_init(void)
1169 if (!pc_info) 1073 if (!pc_info)
1170 goto out2; 1074 goto out2;
1171 1075
1172 /* ----------------------------------------------------------------------- 1076 /*
1173 If epca_setup has not been ran by LILO set num_cards to defaults; copy 1077 * If epca_setup has not been ran by LILO set num_cards to defaults;
1174 board structure defined by digiConfig into drivers board structure. 1078 * copy board structure defined by digiConfig into drivers board
1175 Note : If LILO has ran epca_setup then epca_setup will handle defining 1079 * structure. Note : If LILO has ran epca_setup then epca_setup will
1176 num_cards as well as copying the data into the board structure. 1080 * handle defining num_cards as well as copying the data into the board
1177 -------------------------------------------------------------------------- */ 1081 * structure.
1178 if (!liloconfig) { /* Begin driver has been configured via. epcaconfig */ 1082 */
1179 1083 if (!liloconfig) {
1084 /* driver has been configured via. epcaconfig */
1180 nbdevs = NBDEVS; 1085 nbdevs = NBDEVS;
1181 num_cards = NUMCARDS; 1086 num_cards = NUMCARDS;
1182 memcpy((void *)&boards, (void *)&static_boards, 1087 memcpy(&boards, &static_boards,
1183 (sizeof(struct board_info) * NUMCARDS)); 1088 sizeof(struct board_info) * NUMCARDS);
1184 } /* End driver has been configured via. epcaconfig */ 1089 }
1185
1186 /* -----------------------------------------------------------------
1187 Note : If lilo was used to configure the driver and the
1188 ignore epcaconfig option was choosen (digiepca=2) then
1189 nbdevs and num_cards will equal 0 at this point. This is
1190 okay; PCI cards will still be picked up if detected.
1191 --------------------------------------------------------------------- */
1192
1193 /* -----------------------------------------------------------
1194 Set up interrupt, we will worry about memory allocation in
1195 post_fep_init.
1196 --------------------------------------------------------------- */
1197 1090
1091 /*
1092 * Note : If lilo was used to configure the driver and the ignore
1093 * epcaconfig option was choosen (digiepca=2) then nbdevs and num_cards
1094 * will equal 0 at this point. This is okay; PCI cards will still be
1095 * picked up if detected.
1096 */
1198 1097
1098 /*
1099 * Set up interrupt, we will worry about memory allocation in
1100 * post_fep_init.
1101 */
1199 printk(KERN_INFO "DIGI epca driver version %s loaded.\n",VERSION); 1102 printk(KERN_INFO "DIGI epca driver version %s loaded.\n",VERSION);
1200 1103
1201 /* ------------------------------------------------------------------ 1104 /*
1202 NOTE : This code assumes that the number of ports found in 1105 * NOTE : This code assumes that the number of ports found in the
1203 the boards array is correct. This could be wrong if 1106 * boards array is correct. This could be wrong if the card in question
1204 the card in question is PCI (And therefore has no ports 1107 * is PCI (And therefore has no ports entry in the boards structure.)
1205 entry in the boards structure.) The rest of the 1108 * The rest of the information will be valid for PCI because the
1206 information will be valid for PCI because the beginning 1109 * beginning of pc_init scans for PCI and determines i/o and base
1207 of pc_init scans for PCI and determines i/o and base 1110 * memory addresses. I am not sure if it is possible to read the number
1208 memory addresses. I am not sure if it is possible to 1111 * of ports supported by the card prior to it being booted (Since that
1209 read the number of ports supported by the card prior to 1112 * is the state it is in when pc_init is run). Because it is not
1210 it being booted (Since that is the state it is in when 1113 * possible to query the number of supported ports until after the card
1211 pc_init is run). Because it is not possible to query the 1114 * has booted; we are required to calculate the card_ptrs as the card
1212 number of supported ports until after the card has booted; 1115 * is initialized (Inside post_fep_init). The negative thing about this
1213 we are required to calculate the card_ptrs as the card is 1116 * approach is that digiDload's call to GET_INFO will have a bad port
1214 is initialized (Inside post_fep_init). The negative thing 1117 * value. (Since this is called prior to post_fep_init.)
1215 about this approach is that digiDload's call to GET_INFO 1118 */
1216 will have a bad port value. (Since this is called prior
1217 to post_fep_init.)
1218
1219 --------------------------------------------------------------------- */
1220
1221 pci_boards_found = 0; 1119 pci_boards_found = 0;
1222 if(num_cards < MAXBOARDS) 1120 if (num_cards < MAXBOARDS)
1223 pci_boards_found += init_PCI(); 1121 pci_boards_found += init_PCI();
1224 num_cards += pci_boards_found; 1122 num_cards += pci_boards_found;
1225 1123
1226 pc_driver->owner = THIS_MODULE; 1124 pc_driver->owner = THIS_MODULE;
1227 pc_driver->name = "ttyD"; 1125 pc_driver->name = "ttyD";
1228 pc_driver->major = DIGI_MAJOR; 1126 pc_driver->major = DIGI_MAJOR;
1229 pc_driver->minor_start = 0; 1127 pc_driver->minor_start = 0;
1230 pc_driver->type = TTY_DRIVER_TYPE_SERIAL; 1128 pc_driver->type = TTY_DRIVER_TYPE_SERIAL;
1231 pc_driver->subtype = SERIAL_TYPE_NORMAL; 1129 pc_driver->subtype = SERIAL_TYPE_NORMAL;
@@ -1256,120 +1154,108 @@ static int __init pc_init(void)
1256 tty_set_operations(pc_info, &info_ops); 1154 tty_set_operations(pc_info, &info_ops);
1257 1155
1258 1156
1259 for (crd = 0; crd < num_cards; crd++) 1157 for (crd = 0; crd < num_cards; crd++) {
1260 { /* Begin for each card */ 1158 /*
1261 1159 * This is where the appropriate memory handlers for the
1262 /* ------------------------------------------------------------------ 1160 * hardware is set. Everything at runtime blindly jumps through
1263 This is where the appropriate memory handlers for the hardware is 1161 * these vectors.
1264 set. Everything at runtime blindly jumps through these vectors. 1162 */
1265 ---------------------------------------------------------------------- */
1266 1163
1267 /* defined in epcaconfig.h */ 1164 /* defined in epcaconfig.h */
1268 bd = &boards[crd]; 1165 bd = &boards[crd];
1269 1166
1270 switch (bd->type) 1167 switch (bd->type) {
1271 { /* Begin switch on bd->type {board type} */ 1168 case PCXEM:
1272 case PCXEM: 1169 case EISAXEM:
1273 case EISAXEM: 1170 bd->memwinon = pcxem_memwinon;
1274 bd->memwinon = pcxem_memwinon ; 1171 bd->memwinoff = pcxem_memwinoff;
1275 bd->memwinoff = pcxem_memwinoff ; 1172 bd->globalwinon = pcxem_globalwinon;
1276 bd->globalwinon = pcxem_globalwinon ; 1173 bd->txwinon = pcxem_txwinon;
1277 bd->txwinon = pcxem_txwinon ; 1174 bd->rxwinon = pcxem_rxwinon;
1278 bd->rxwinon = pcxem_rxwinon ; 1175 bd->memoff = pcxem_memoff;
1279 bd->memoff = pcxem_memoff ; 1176 bd->assertgwinon = dummy_assertgwinon;
1280 bd->assertgwinon = dummy_assertgwinon; 1177 bd->assertmemoff = dummy_assertmemoff;
1281 bd->assertmemoff = dummy_assertmemoff;
1282 break;
1283
1284 case PCIXEM:
1285 case PCIXRJ:
1286 case PCIXR:
1287 bd->memwinon = dummy_memwinon;
1288 bd->memwinoff = dummy_memwinoff;
1289 bd->globalwinon = dummy_globalwinon;
1290 bd->txwinon = dummy_txwinon;
1291 bd->rxwinon = dummy_rxwinon;
1292 bd->memoff = dummy_memoff;
1293 bd->assertgwinon = dummy_assertgwinon;
1294 bd->assertmemoff = dummy_assertmemoff;
1295 break;
1296
1297 case PCXE:
1298 case PCXEVE:
1299
1300 bd->memwinon = pcxe_memwinon;
1301 bd->memwinoff = pcxe_memwinoff;
1302 bd->globalwinon = pcxe_globalwinon;
1303 bd->txwinon = pcxe_txwinon;
1304 bd->rxwinon = pcxe_rxwinon;
1305 bd->memoff = pcxe_memoff;
1306 bd->assertgwinon = dummy_assertgwinon;
1307 bd->assertmemoff = dummy_assertmemoff;
1308 break;
1309
1310 case PCXI:
1311 case PC64XE:
1312
1313 bd->memwinon = pcxi_memwinon;
1314 bd->memwinoff = pcxi_memwinoff;
1315 bd->globalwinon = pcxi_globalwinon;
1316 bd->txwinon = pcxi_txwinon;
1317 bd->rxwinon = pcxi_rxwinon;
1318 bd->memoff = pcxi_memoff;
1319 bd->assertgwinon = pcxi_assertgwinon;
1320 bd->assertmemoff = pcxi_assertmemoff;
1321 break;
1322
1323 default:
1324 break;
1325
1326 } /* End switch on bd->type */
1327
1328 /* ---------------------------------------------------------------
1329 Some cards need a memory segment to be defined for use in
1330 transmit and receive windowing operations. These boards
1331 are listed in the below switch. In the case of the XI the
1332 amount of memory on the board is variable so the memory_seg
1333 is also variable. This code determines what they segment
1334 should be.
1335 ----------------------------------------------------------------- */
1336
1337 switch (bd->type)
1338 { /* Begin switch on bd->type {board type} */
1339
1340 case PCXE:
1341 case PCXEVE:
1342 case PC64XE:
1343 bd->memory_seg = 0xf000;
1344 break; 1178 break;
1345 1179
1346 case PCXI: 1180 case PCIXEM:
1347 board_id = inb((int)bd->port); 1181 case PCIXRJ:
1348 if ((board_id & 0x1) == 0x1) 1182 case PCIXR:
1349 { /* Begin it's an XI card */ 1183 bd->memwinon = dummy_memwinon;
1350 1184 bd->memwinoff = dummy_memwinoff;
1351 /* Is it a 64K board */ 1185 bd->globalwinon = dummy_globalwinon;
1352 if ((board_id & 0x30) == 0) 1186 bd->txwinon = dummy_txwinon;
1353 bd->memory_seg = 0xf000; 1187 bd->rxwinon = dummy_rxwinon;
1354 1188 bd->memoff = dummy_memoff;
1355 /* Is it a 128K board */ 1189 bd->assertgwinon = dummy_assertgwinon;
1356 if ((board_id & 0x30) == 0x10) 1190 bd->assertmemoff = dummy_assertmemoff;
1357 bd->memory_seg = 0xe000; 1191 break;
1358 1192
1359 /* Is is a 256K board */ 1193 case PCXE:
1360 if ((board_id & 0x30) == 0x20) 1194 case PCXEVE:
1361 bd->memory_seg = 0xc000; 1195 bd->memwinon = pcxe_memwinon;
1196 bd->memwinoff = pcxe_memwinoff;
1197 bd->globalwinon = pcxe_globalwinon;
1198 bd->txwinon = pcxe_txwinon;
1199 bd->rxwinon = pcxe_rxwinon;
1200 bd->memoff = pcxe_memoff;
1201 bd->assertgwinon = dummy_assertgwinon;
1202 bd->assertmemoff = dummy_assertmemoff;
1203 break;
1362 1204
1363 /* Is it a 512K board */ 1205 case PCXI:
1364 if ((board_id & 0x30) == 0x30) 1206 case PC64XE:
1365 bd->memory_seg = 0x8000; 1207 bd->memwinon = pcxi_memwinon;
1208 bd->memwinoff = pcxi_memwinoff;
1209 bd->globalwinon = pcxi_globalwinon;
1210 bd->txwinon = pcxi_txwinon;
1211 bd->rxwinon = pcxi_rxwinon;
1212 bd->memoff = pcxi_memoff;
1213 bd->assertgwinon = pcxi_assertgwinon;
1214 bd->assertmemoff = pcxi_assertmemoff;
1215 break;
1366 1216
1367 } else printk(KERN_ERR "epca: Board at 0x%x doesn't appear to be an XI\n",(int)bd->port); 1217 default:
1368 break; 1218 break;
1219 }
1369 1220
1370 } /* End switch on bd->type */ 1221 /*
1222 * Some cards need a memory segment to be defined for use in
1223 * transmit and receive windowing operations. These boards are
1224 * listed in the below switch. In the case of the XI the amount
1225 * of memory on the board is variable so the memory_seg is also
1226 * variable. This code determines what they segment should be.
1227 */
1228 switch (bd->type) {
1229 case PCXE:
1230 case PCXEVE:
1231 case PC64XE:
1232 bd->memory_seg = 0xf000;
1233 break;
1371 1234
1372 } /* End for each card */ 1235 case PCXI:
1236 board_id = inb((int)bd->port);
1237 if ((board_id & 0x1) == 0x1) {
1238 /* it's an XI card */
1239 /* Is it a 64K board */
1240 if ((board_id & 0x30) == 0)
1241 bd->memory_seg = 0xf000;
1242
1243 /* Is it a 128K board */
1244 if ((board_id & 0x30) == 0x10)
1245 bd->memory_seg = 0xe000;
1246
1247 /* Is is a 256K board */
1248 if ((board_id & 0x30) == 0x20)
1249 bd->memory_seg = 0xc000;
1250
1251 /* Is it a 512K board */
1252 if ((board_id & 0x30) == 0x30)
1253 bd->memory_seg = 0x8000;
1254 } else
1255 printk(KERN_ERR "epca: Board at 0x%x doesn't appear to be an XI\n",(int)bd->port);
1256 break;
1257 }
1258 }
1373 1259
1374 err = tty_register_driver(pc_driver); 1260 err = tty_register_driver(pc_driver);
1375 if (err) { 1261 if (err) {
@@ -1383,10 +1269,7 @@ static int __init pc_init(void)
1383 goto out4; 1269 goto out4;
1384 } 1270 }
1385 1271
1386 /* ------------------------------------------------------------------- 1272 /* Start up the poller to check for events on all enabled boards */
1387 Start up the poller to check for events on all enabled boards
1388 ---------------------------------------------------------------------- */
1389
1390 init_timer(&epca_timer); 1273 init_timer(&epca_timer);
1391 epca_timer.function = epcapoll; 1274 epca_timer.function = epcapoll;
1392 mod_timer(&epca_timer, jiffies + HZ/25); 1275 mod_timer(&epca_timer, jiffies + HZ/25);
@@ -1400,51 +1283,47 @@ out2:
1400 put_tty_driver(pc_driver); 1283 put_tty_driver(pc_driver);
1401out1: 1284out1:
1402 return err; 1285 return err;
1403 1286}
1404} /* End pc_init */
1405
1406/* ------------------ Begin post_fep_init ---------------------- */
1407 1287
1408static void post_fep_init(unsigned int crd) 1288static void post_fep_init(unsigned int crd)
1409{ /* Begin post_fep_init */ 1289{
1410
1411 int i; 1290 int i;
1412 void __iomem *memaddr; 1291 void __iomem *memaddr;
1413 struct global_data __iomem *gd; 1292 struct global_data __iomem *gd;
1414 struct board_info *bd; 1293 struct board_info *bd;
1415 struct board_chan __iomem *bc; 1294 struct board_chan __iomem *bc;
1416 struct channel *ch; 1295 struct channel *ch;
1417 int shrinkmem = 0, lowwater ; 1296 int shrinkmem = 0, lowwater;
1418
1419 /* -------------------------------------------------------------
1420 This call is made by the user via. the ioctl call DIGI_INIT.
1421 It is responsible for setting up all the card specific stuff.
1422 ---------------------------------------------------------------- */
1423 bd = &boards[crd];
1424 1297
1425 /* ----------------------------------------------------------------- 1298 /*
1426 If this is a PCI board, get the port info. Remember PCI cards 1299 * This call is made by the user via. the ioctl call DIGI_INIT. It is
1427 do not have entries into the epcaconfig.h file, so we can't get 1300 * responsible for setting up all the card specific stuff.
1428 the number of ports from it. Unfortunetly, this means that anyone 1301 */
1429 doing a DIGI_GETINFO before the board has booted will get an invalid 1302 bd = &boards[crd];
1430 number of ports returned (It should return 0). Calls to DIGI_GETINFO
1431 after DIGI_INIT has been called will return the proper values.
1432 ------------------------------------------------------------------- */
1433 1303
1304 /*
1305 * If this is a PCI board, get the port info. Remember PCI cards do not
1306 * have entries into the epcaconfig.h file, so we can't get the number
1307 * of ports from it. Unfortunetly, this means that anyone doing a
1308 * DIGI_GETINFO before the board has booted will get an invalid number
1309 * of ports returned (It should return 0). Calls to DIGI_GETINFO after
1310 * DIGI_INIT has been called will return the proper values.
1311 */
1434 if (bd->type >= PCIXEM) { /* Begin get PCI number of ports */ 1312 if (bd->type >= PCIXEM) { /* Begin get PCI number of ports */
1435 /* -------------------------------------------------------------------- 1313 /*
1436 Below we use XEMPORTS as a memory offset regardless of which PCI 1314 * Below we use XEMPORTS as a memory offset regardless of which
1437 card it is. This is because all of the supported PCI cards have 1315 * PCI card it is. This is because all of the supported PCI
1438 the same memory offset for the channel data. This will have to be 1316 * cards have the same memory offset for the channel data. This
1439 changed if we ever develop a PCI/XE card. NOTE : The FEP manual 1317 * will have to be changed if we ever develop a PCI/XE card.
1440 states that the port offset is 0xC22 as opposed to 0xC02. This is 1318 * NOTE : The FEP manual states that the port offset is 0xC22
1441 only true for PC/XE, and PC/XI cards; not for the XEM, or CX series. 1319 * as opposed to 0xC02. This is only true for PC/XE, and PC/XI
1442 On the PCI cards the number of ports is determined by reading a 1320 * cards; not for the XEM, or CX series. On the PCI cards the
1443 ID PROM located in the box attached to the card. The card can then 1321 * number of ports is determined by reading a ID PROM located
1444 determine the index the id to determine the number of ports available. 1322 * in the box attached to the card. The card can then determine
1445 (FYI - The id should be located at 0x1ac (And may use up to 4 bytes 1323 * the index the id to determine the number of ports available.
1446 if the box in question is a XEM or CX)). 1324 * (FYI - The id should be located at 0x1ac (And may use up to
1447 ------------------------------------------------------------------------ */ 1325 * 4 bytes if the box in question is a XEM or CX)).
1326 */
1448 /* PCI cards are already remapped at this point ISA are not */ 1327 /* PCI cards are already remapped at this point ISA are not */
1449 bd->numports = readw(bd->re_map_membase + XEMPORTS); 1328 bd->numports = readw(bd->re_map_membase + XEMPORTS);
1450 epcaassert(bd->numports <= 64,"PCI returned a invalid number of ports"); 1329 epcaassert(bd->numports <= 64,"PCI returned a invalid number of ports");
@@ -1465,95 +1344,87 @@ static void post_fep_init(unsigned int crd)
1465 1344
1466 memaddr = bd->re_map_membase; 1345 memaddr = bd->re_map_membase;
1467 1346
1468 /* ----------------------------------------------------------------- 1347 /*
1469 The below assignment will set bc to point at the BEGINING of 1348 * The below assignment will set bc to point at the BEGINING of the
1470 the cards channel structures. For 1 card there will be between 1349 * cards channel structures. For 1 card there will be between 8 and 64
1471 8 and 64 of these structures. 1350 * of these structures.
1472 -------------------------------------------------------------------- */ 1351 */
1473
1474 bc = memaddr + CHANSTRUCT; 1352 bc = memaddr + CHANSTRUCT;
1475 1353
1476 /* ------------------------------------------------------------------- 1354 /*
1477 The below assignment will set gd to point at the BEGINING of 1355 * The below assignment will set gd to point at the BEGINING of global
1478 global memory address 0xc00. The first data in that global 1356 * memory address 0xc00. The first data in that global memory actually
1479 memory actually starts at address 0xc1a. The command in 1357 * starts at address 0xc1a. The command in pointer begins at 0xd10.
1480 pointer begins at 0xd10. 1358 */
1481 ---------------------------------------------------------------------- */
1482
1483 gd = memaddr + GLOBAL; 1359 gd = memaddr + GLOBAL;
1484 1360
1485 /* -------------------------------------------------------------------- 1361 /*
1486 XEPORTS (address 0xc22) points at the number of channels the 1362 * XEPORTS (address 0xc22) points at the number of channels the card
1487 card supports. (For 64XE, XI, XEM, and XR use 0xc02) 1363 * supports. (For 64XE, XI, XEM, and XR use 0xc02)
1488 ----------------------------------------------------------------------- */ 1364 */
1489
1490 if ((bd->type == PCXEVE || bd->type == PCXE) && (readw(memaddr + XEPORTS) < 3)) 1365 if ((bd->type == PCXEVE || bd->type == PCXE) && (readw(memaddr + XEPORTS) < 3))
1491 shrinkmem = 1; 1366 shrinkmem = 1;
1492 if (bd->type < PCIXEM) 1367 if (bd->type < PCIXEM)
1493 if (!request_region((int)bd->port, 4, board_desc[bd->type])) 1368 if (!request_region((int)bd->port, 4, board_desc[bd->type]))
1494 return; 1369 return;
1495 memwinon(bd, 0); 1370 memwinon(bd, 0);
1496 1371
1497 /* -------------------------------------------------------------------- 1372 /*
1498 Remember ch is the main drivers channels structure, while bc is 1373 * Remember ch is the main drivers channels structure, while bc is the
1499 the cards channel structure. 1374 * cards channel structure.
1500 ------------------------------------------------------------------------ */ 1375 */
1501 1376 for (i = 0; i < bd->numports; i++, ch++, bc++) {
1502 /* For every port on the card do ..... */
1503
1504 for (i = 0; i < bd->numports; i++, ch++, bc++) { /* Begin for each port */
1505 unsigned long flags; 1377 unsigned long flags;
1506 u16 tseg, rseg; 1378 u16 tseg, rseg;
1507 1379
1508 ch->brdchan = bc; 1380 ch->brdchan = bc;
1509 ch->mailbox = gd; 1381 ch->mailbox = gd;
1510 INIT_WORK(&ch->tqueue, do_softint); 1382 INIT_WORK(&ch->tqueue, do_softint);
1511 ch->board = &boards[crd]; 1383 ch->board = &boards[crd];
1512 1384
1513 spin_lock_irqsave(&epca_lock, flags); 1385 spin_lock_irqsave(&epca_lock, flags);
1514 switch (bd->type) { 1386 switch (bd->type) {
1515 /* ---------------------------------------------------------------- 1387 /*
1516 Since some of the boards use different bitmaps for their 1388 * Since some of the boards use different bitmaps for
1517 control signals we cannot hard code these values and retain 1389 * their control signals we cannot hard code these
1518 portability. We virtualize this data here. 1390 * values and retain portability. We virtualize this
1519 ------------------------------------------------------------------- */ 1391 * data here.
1520 case EISAXEM: 1392 */
1521 case PCXEM: 1393 case EISAXEM:
1522 case PCIXEM: 1394 case PCXEM:
1523 case PCIXRJ: 1395 case PCIXEM:
1524 case PCIXR: 1396 case PCIXRJ:
1525 ch->m_rts = 0x02 ; 1397 case PCIXR:
1526 ch->m_dcd = 0x80 ; 1398 ch->m_rts = 0x02;
1527 ch->m_dsr = 0x20 ; 1399 ch->m_dcd = 0x80;
1528 ch->m_cts = 0x10 ; 1400 ch->m_dsr = 0x20;
1529 ch->m_ri = 0x40 ; 1401 ch->m_cts = 0x10;
1530 ch->m_dtr = 0x01 ; 1402 ch->m_ri = 0x40;
1531 break; 1403 ch->m_dtr = 0x01;
1532 1404 break;
1533 case PCXE: 1405
1534 case PCXEVE: 1406 case PCXE:
1535 case PCXI: 1407 case PCXEVE:
1536 case PC64XE: 1408 case PCXI:
1537 ch->m_rts = 0x02 ; 1409 case PC64XE:
1538 ch->m_dcd = 0x08 ; 1410 ch->m_rts = 0x02;
1539 ch->m_dsr = 0x10 ; 1411 ch->m_dcd = 0x08;
1540 ch->m_cts = 0x20 ; 1412 ch->m_dsr = 0x10;
1541 ch->m_ri = 0x40 ; 1413 ch->m_cts = 0x20;
1542 ch->m_dtr = 0x80 ; 1414 ch->m_ri = 0x40;
1543 break; 1415 ch->m_dtr = 0x80;
1544 1416 break;
1545 } /* End switch bd->type */ 1417 }
1546 1418
1547 if (boards[crd].altpin) { 1419 if (boards[crd].altpin) {
1548 ch->dsr = ch->m_dcd; 1420 ch->dsr = ch->m_dcd;
1549 ch->dcd = ch->m_dsr; 1421 ch->dcd = ch->m_dsr;
1550 ch->digiext.digi_flags |= DIGI_ALTPIN; 1422 ch->digiext.digi_flags |= DIGI_ALTPIN;
1551 } 1423 } else {
1552 else {
1553 ch->dcd = ch->m_dcd; 1424 ch->dcd = ch->m_dcd;
1554 ch->dsr = ch->m_dsr; 1425 ch->dsr = ch->m_dsr;
1555 } 1426 }
1556 1427
1557 ch->boardnum = crd; 1428 ch->boardnum = crd;
1558 ch->channelnum = i; 1429 ch->channelnum = i;
1559 ch->magic = EPCA_MAGIC; 1430 ch->magic = EPCA_MAGIC;
@@ -1568,71 +1439,67 @@ static void post_fep_init(unsigned int crd)
1568 rseg = readw(&bc->rseg); 1439 rseg = readw(&bc->rseg);
1569 1440
1570 switch (bd->type) { 1441 switch (bd->type) {
1442 case PCIXEM:
1443 case PCIXRJ:
1444 case PCIXR:
1445 /* Cover all the 2MEG cards */
1446 ch->txptr = memaddr + ((tseg << 4) & 0x1fffff);
1447 ch->rxptr = memaddr + ((rseg << 4) & 0x1fffff);
1448 ch->txwin = FEPWIN | (tseg >> 11);
1449 ch->rxwin = FEPWIN | (rseg >> 11);
1450 break;
1571 1451
1572 case PCIXEM: 1452 case PCXEM:
1573 case PCIXRJ: 1453 case EISAXEM:
1574 case PCIXR: 1454 /* Cover all the 32K windowed cards */
1575 /* Cover all the 2MEG cards */ 1455 /* Mask equal to window size - 1 */
1576 ch->txptr = memaddr + ((tseg << 4) & 0x1fffff); 1456 ch->txptr = memaddr + ((tseg << 4) & 0x7fff);
1577 ch->rxptr = memaddr + ((rseg << 4) & 0x1fffff); 1457 ch->rxptr = memaddr + ((rseg << 4) & 0x7fff);
1578 ch->txwin = FEPWIN | (tseg >> 11); 1458 ch->txwin = FEPWIN | (tseg >> 11);
1579 ch->rxwin = FEPWIN | (rseg >> 11); 1459 ch->rxwin = FEPWIN | (rseg >> 11);
1580 break; 1460 break;
1581 1461
1582 case PCXEM: 1462 case PCXEVE:
1583 case EISAXEM: 1463 case PCXE:
1584 /* Cover all the 32K windowed cards */ 1464 ch->txptr = memaddr + (((tseg - bd->memory_seg) << 4) & 0x1fff);
1585 /* Mask equal to window size - 1 */ 1465 ch->txwin = FEPWIN | ((tseg - bd->memory_seg) >> 9);
1586 ch->txptr = memaddr + ((tseg << 4) & 0x7fff); 1466 ch->rxptr = memaddr + (((rseg - bd->memory_seg) << 4) & 0x1fff);
1587 ch->rxptr = memaddr + ((rseg << 4) & 0x7fff); 1467 ch->rxwin = FEPWIN | ((rseg - bd->memory_seg) >>9 );
1588 ch->txwin = FEPWIN | (tseg >> 11); 1468 break;
1589 ch->rxwin = FEPWIN | (rseg >> 11); 1469
1590 break; 1470 case PCXI:
1591 1471 case PC64XE:
1592 case PCXEVE: 1472 ch->txptr = memaddr + ((tseg - bd->memory_seg) << 4);
1593 case PCXE: 1473 ch->rxptr = memaddr + ((rseg - bd->memory_seg) << 4);
1594 ch->txptr = memaddr + (((tseg - bd->memory_seg) << 4) & 0x1fff); 1474 ch->txwin = ch->rxwin = 0;
1595 ch->txwin = FEPWIN | ((tseg - bd->memory_seg) >> 9); 1475 break;
1596 ch->rxptr = memaddr + (((rseg - bd->memory_seg) << 4) & 0x1fff); 1476 }
1597 ch->rxwin = FEPWIN | ((rseg - bd->memory_seg) >>9 );
1598 break;
1599
1600 case PCXI:
1601 case PC64XE:
1602 ch->txptr = memaddr + ((tseg - bd->memory_seg) << 4);
1603 ch->rxptr = memaddr + ((rseg - bd->memory_seg) << 4);
1604 ch->txwin = ch->rxwin = 0;
1605 break;
1606
1607 } /* End switch bd->type */
1608 1477
1609 ch->txbufhead = 0; 1478 ch->txbufhead = 0;
1610 ch->txbufsize = readw(&bc->tmax) + 1; 1479 ch->txbufsize = readw(&bc->tmax) + 1;
1611 1480
1612 ch->rxbufhead = 0; 1481 ch->rxbufhead = 0;
1613 ch->rxbufsize = readw(&bc->rmax) + 1; 1482 ch->rxbufsize = readw(&bc->rmax) + 1;
1614 1483
1615 lowwater = ch->txbufsize >= 2000 ? 1024 : (ch->txbufsize / 2); 1484 lowwater = ch->txbufsize >= 2000 ? 1024 : (ch->txbufsize / 2);
1616 1485
1617 /* Set transmitter low water mark */ 1486 /* Set transmitter low water mark */
1618 fepcmd(ch, STXLWATER, lowwater, 0, 10, 0); 1487 fepcmd(ch, STXLWATER, lowwater, 0, 10, 0);
1619 1488
1620 /* Set receiver low water mark */ 1489 /* Set receiver low water mark */
1621
1622 fepcmd(ch, SRXLWATER, (ch->rxbufsize / 4), 0, 10, 0); 1490 fepcmd(ch, SRXLWATER, (ch->rxbufsize / 4), 0, 10, 0);
1623 1491
1624 /* Set receiver high water mark */ 1492 /* Set receiver high water mark */
1625
1626 fepcmd(ch, SRXHWATER, (3 * ch->rxbufsize / 4), 0, 10, 0); 1493 fepcmd(ch, SRXHWATER, (3 * ch->rxbufsize / 4), 0, 10, 0);
1627 1494
1628 writew(100, &bc->edelay); 1495 writew(100, &bc->edelay);
1629 writeb(1, &bc->idata); 1496 writeb(1, &bc->idata);
1630 1497
1631 ch->startc = readb(&bc->startc); 1498 ch->startc = readb(&bc->startc);
1632 ch->stopc = readb(&bc->stopc); 1499 ch->stopc = readb(&bc->stopc);
1633 ch->startca = readb(&bc->startca); 1500 ch->startca = readb(&bc->startca);
1634 ch->stopca = readb(&bc->stopca); 1501 ch->stopca = readb(&bc->stopca);
1635 1502
1636 ch->fepcflag = 0; 1503 ch->fepcflag = 0;
1637 ch->fepiflag = 0; 1504 ch->fepiflag = 0;
1638 ch->fepoflag = 0; 1505 ch->fepoflag = 0;
@@ -1640,7 +1507,7 @@ static void post_fep_init(unsigned int crd)
1640 ch->fepstopc = 0; 1507 ch->fepstopc = 0;
1641 ch->fepstartca = 0; 1508 ch->fepstartca = 0;
1642 ch->fepstopca = 0; 1509 ch->fepstopca = 0;
1643 1510
1644 ch->close_delay = 50; 1511 ch->close_delay = 50;
1645 ch->count = 0; 1512 ch->count = 0;
1646 ch->blocked_open = 0; 1513 ch->blocked_open = 0;
@@ -1648,80 +1515,66 @@ static void post_fep_init(unsigned int crd)
1648 init_waitqueue_head(&ch->close_wait); 1515 init_waitqueue_head(&ch->close_wait);
1649 1516
1650 spin_unlock_irqrestore(&epca_lock, flags); 1517 spin_unlock_irqrestore(&epca_lock, flags);
1651 } /* End for each port */ 1518 }
1652 1519
1653 printk(KERN_INFO 1520 printk(KERN_INFO
1654 "Digi PC/Xx Driver V%s: %s I/O = 0x%lx Mem = 0x%lx Ports = %d\n", 1521 "Digi PC/Xx Driver V%s: %s I/O = 0x%lx Mem = 0x%lx Ports = %d\n",
1655 VERSION, board_desc[bd->type], (long)bd->port, (long)bd->membase, bd->numports); 1522 VERSION, board_desc[bd->type], (long)bd->port, (long)bd->membase, bd->numports);
1656 memwinoff(bd, 0); 1523 memwinoff(bd, 0);
1657 1524}
1658} /* End post_fep_init */
1659
1660/* --------------------- Begin epcapoll ------------------------ */
1661 1525
1662static void epcapoll(unsigned long ignored) 1526static void epcapoll(unsigned long ignored)
1663{ /* Begin epcapoll */ 1527{
1664
1665 unsigned long flags; 1528 unsigned long flags;
1666 int crd; 1529 int crd;
1667 volatile unsigned int head, tail; 1530 volatile unsigned int head, tail;
1668 struct channel *ch; 1531 struct channel *ch;
1669 struct board_info *bd; 1532 struct board_info *bd;
1670 1533
1671 /* ------------------------------------------------------------------- 1534 /*
1672 This routine is called upon every timer interrupt. Even though 1535 * This routine is called upon every timer interrupt. Even though the
1673 the Digi series cards are capable of generating interrupts this 1536 * Digi series cards are capable of generating interrupts this method
1674 method of non-looping polling is more efficient. This routine 1537 * of non-looping polling is more efficient. This routine checks for
1675 checks for card generated events (Such as receive data, are transmit 1538 * card generated events (Such as receive data, are transmit buffer
1676 buffer empty) and acts on those events. 1539 * empty) and acts on those events.
1677 ----------------------------------------------------------------------- */ 1540 */
1678 1541 for (crd = 0; crd < num_cards; crd++) {
1679 for (crd = 0; crd < num_cards; crd++)
1680 { /* Begin for each card */
1681
1682 bd = &boards[crd]; 1542 bd = &boards[crd];
1683 ch = card_ptr[crd]; 1543 ch = card_ptr[crd];
1684 1544
1685 if ((bd->status == DISABLED) || digi_poller_inhibited) 1545 if ((bd->status == DISABLED) || digi_poller_inhibited)
1686 continue; /* Begin loop next interation */ 1546 continue;
1687
1688 /* -----------------------------------------------------------
1689 assertmemoff is not needed here; indeed it is an empty subroutine.
1690 It is being kept because future boards may need this as well as
1691 some legacy boards.
1692 ---------------------------------------------------------------- */
1693 1547
1548 /*
1549 * assertmemoff is not needed here; indeed it is an empty
1550 * subroutine. It is being kept because future boards may need
1551 * this as well as some legacy boards.
1552 */
1694 spin_lock_irqsave(&epca_lock, flags); 1553 spin_lock_irqsave(&epca_lock, flags);
1695 1554
1696 assertmemoff(ch); 1555 assertmemoff(ch);
1697 1556
1698 globalwinon(ch); 1557 globalwinon(ch);
1699 1558
1700 /* --------------------------------------------------------------- 1559 /*
1701 In this case head and tail actually refer to the event queue not 1560 * In this case head and tail actually refer to the event queue
1702 the transmit or receive queue. 1561 * not the transmit or receive queue.
1703 ------------------------------------------------------------------- */ 1562 */
1704
1705 head = readw(&ch->mailbox->ein); 1563 head = readw(&ch->mailbox->ein);
1706 tail = readw(&ch->mailbox->eout); 1564 tail = readw(&ch->mailbox->eout);
1707
1708 /* If head isn't equal to tail we have an event */
1709 1565
1566 /* If head isn't equal to tail we have an event */
1710 if (head != tail) 1567 if (head != tail)
1711 doevent(crd); 1568 doevent(crd);
1712 memoff(ch); 1569 memoff(ch);
1713 1570
1714 spin_unlock_irqrestore(&epca_lock, flags); 1571 spin_unlock_irqrestore(&epca_lock, flags);
1715
1716 } /* End for each card */ 1572 } /* End for each card */
1717 mod_timer(&epca_timer, jiffies + (HZ / 25)); 1573 mod_timer(&epca_timer, jiffies + (HZ / 25));
1718} /* End epcapoll */ 1574}
1719
1720/* --------------------- Begin doevent ------------------------ */
1721 1575
1722static void doevent(int crd) 1576static void doevent(int crd)
1723{ /* Begin doevent */ 1577{
1724
1725 void __iomem *eventbuf; 1578 void __iomem *eventbuf;
1726 struct channel *ch, *chan0; 1579 struct channel *ch, *chan0;
1727 static struct tty_struct *tty; 1580 static struct tty_struct *tty;
@@ -1731,28 +1584,28 @@ static void doevent(int crd)
1731 int event, channel; 1584 int event, channel;
1732 int mstat, lstat; 1585 int mstat, lstat;
1733 1586
1734 /* ------------------------------------------------------------------- 1587 /*
1735 This subroutine is called by epcapoll when an event is detected 1588 * This subroutine is called by epcapoll when an event is detected
1736 in the event queue. This routine responds to those events. 1589 * in the event queue. This routine responds to those events.
1737 --------------------------------------------------------------------- */ 1590 */
1738 bd = &boards[crd]; 1591 bd = &boards[crd];
1739 1592
1740 chan0 = card_ptr[crd]; 1593 chan0 = card_ptr[crd];
1741 epcaassert(chan0 <= &digi_channels[nbdevs - 1], "ch out of range"); 1594 epcaassert(chan0 <= &digi_channels[nbdevs - 1], "ch out of range");
1742 assertgwinon(chan0); 1595 assertgwinon(chan0);
1743 while ((tail = readw(&chan0->mailbox->eout)) != (head = readw(&chan0->mailbox->ein))) 1596 while ((tail = readw(&chan0->mailbox->eout)) != (head = readw(&chan0->mailbox->ein))) { /* Begin while something in event queue */
1744 { /* Begin while something in event queue */
1745 assertgwinon(chan0); 1597 assertgwinon(chan0);
1746 eventbuf = bd->re_map_membase + tail + ISTART; 1598 eventbuf = bd->re_map_membase + tail + ISTART;
1747 /* Get the channel the event occurred on */ 1599 /* Get the channel the event occurred on */
1748 channel = readb(eventbuf); 1600 channel = readb(eventbuf);
1749 /* Get the actual event code that occurred */ 1601 /* Get the actual event code that occurred */
1750 event = readb(eventbuf + 1); 1602 event = readb(eventbuf + 1);
1751 /* ---------------------------------------------------------------- 1603 /*
1752 The two assignments below get the current modem status (mstat) 1604 * The two assignments below get the current modem status
1753 and the previous modem status (lstat). These are useful becuase 1605 * (mstat) and the previous modem status (lstat). These are
1754 an event could signal a change in modem signals itself. 1606 * useful becuase an event could signal a change in modem
1755 ------------------------------------------------------------------- */ 1607 * signals itself.
1608 */
1756 mstat = readb(eventbuf + 2); 1609 mstat = readb(eventbuf + 2);
1757 lstat = readb(eventbuf + 3); 1610 lstat = readb(eventbuf + 3);
1758 1611
@@ -1772,37 +1625,36 @@ static void doevent(int crd)
1772 assertgwinon(ch); 1625 assertgwinon(ch);
1773 } /* End DATA_IND */ 1626 } /* End DATA_IND */
1774 /* else *//* Fix for DCD transition missed bug */ 1627 /* else *//* Fix for DCD transition missed bug */
1775 if (event & MODEMCHG_IND) { /* Begin MODEMCHG_IND */ 1628 if (event & MODEMCHG_IND) {
1776 /* A modem signal change has been indicated */ 1629 /* A modem signal change has been indicated */
1777 ch->imodem = mstat; 1630 ch->imodem = mstat;
1778 if (ch->asyncflags & ASYNC_CHECK_CD) { 1631 if (ch->asyncflags & ASYNC_CHECK_CD) {
1779 if (mstat & ch->dcd) /* We are now receiving dcd */ 1632 if (mstat & ch->dcd) /* We are now receiving dcd */
1780 wake_up_interruptible(&ch->open_wait); 1633 wake_up_interruptible(&ch->open_wait);
1781 else 1634 else
1782 pc_sched_event(ch, EPCA_EVENT_HANGUP); /* No dcd; hangup */ 1635 pc_sched_event(ch, EPCA_EVENT_HANGUP); /* No dcd; hangup */
1783 } 1636 }
1784 } /* End MODEMCHG_IND */ 1637 }
1785 tty = ch->tty; 1638 tty = ch->tty;
1786 if (tty) { /* Begin if valid tty */ 1639 if (tty) {
1787 if (event & BREAK_IND) { /* Begin if BREAK_IND */ 1640 if (event & BREAK_IND) {
1788 /* A break has been indicated */ 1641 /* A break has been indicated */
1789 tty_insert_flip_char(tty, 0, TTY_BREAK); 1642 tty_insert_flip_char(tty, 0, TTY_BREAK);
1790 tty_schedule_flip(tty); 1643 tty_schedule_flip(tty);
1791 } else if (event & LOWTX_IND) { /* Begin LOWTX_IND */ 1644 } else if (event & LOWTX_IND) {
1792 if (ch->statusflags & LOWWAIT) 1645 if (ch->statusflags & LOWWAIT) {
1793 { /* Begin if LOWWAIT */
1794 ch->statusflags &= ~LOWWAIT; 1646 ch->statusflags &= ~LOWWAIT;
1795 tty_wakeup(tty); 1647 tty_wakeup(tty);
1796 } /* End if LOWWAIT */ 1648 }
1797 } else if (event & EMPTYTX_IND) { /* Begin EMPTYTX_IND */ 1649 } else if (event & EMPTYTX_IND) {
1798 /* This event is generated by setup_empty_event */ 1650 /* This event is generated by setup_empty_event */
1799 ch->statusflags &= ~TXBUSY; 1651 ch->statusflags &= ~TXBUSY;
1800 if (ch->statusflags & EMPTYWAIT) { /* Begin if EMPTYWAIT */ 1652 if (ch->statusflags & EMPTYWAIT) {
1801 ch->statusflags &= ~EMPTYWAIT; 1653 ch->statusflags &= ~EMPTYWAIT;
1802 tty_wakeup(tty); 1654 tty_wakeup(tty);
1803 } /* End if EMPTYWAIT */ 1655 }
1804 } /* End EMPTYTX_IND */ 1656 }
1805 } /* End if valid tty */ 1657 }
1806 next: 1658 next:
1807 globalwinon(ch); 1659 globalwinon(ch);
1808 BUG_ON(!bc); 1660 BUG_ON(!bc);
@@ -1810,13 +1662,11 @@ static void doevent(int crd)
1810 writew((tail + 4) & (IMAX - ISTART - 4), &chan0->mailbox->eout); 1662 writew((tail + 4) & (IMAX - ISTART - 4), &chan0->mailbox->eout);
1811 globalwinon(chan0); 1663 globalwinon(chan0);
1812 } /* End while something in event queue */ 1664 } /* End while something in event queue */
1813} /* End doevent */ 1665}
1814
1815/* --------------------- Begin fepcmd ------------------------ */
1816 1666
1817static void fepcmd(struct channel *ch, int cmd, int word_or_byte, 1667static void fepcmd(struct channel *ch, int cmd, int word_or_byte,
1818 int byte2, int ncmds, int bytecmd) 1668 int byte2, int ncmds, int bytecmd)
1819{ /* Begin fepcmd */ 1669{
1820 unchar __iomem *memaddr; 1670 unchar __iomem *memaddr;
1821 unsigned int head, cmdTail, cmdStart, cmdMax; 1671 unsigned int head, cmdTail, cmdStart, cmdMax;
1822 long count; 1672 long count;
@@ -1831,11 +1681,11 @@ static void fepcmd(struct channel *ch, int cmd, int word_or_byte,
1831 head = readw(&ch->mailbox->cin); 1681 head = readw(&ch->mailbox->cin);
1832 /* cmdStart is a base address */ 1682 /* cmdStart is a base address */
1833 cmdStart = readw(&ch->mailbox->cstart); 1683 cmdStart = readw(&ch->mailbox->cstart);
1834 /* ------------------------------------------------------------------ 1684 /*
1835 We do the addition below because we do not want a max pointer 1685 * We do the addition below because we do not want a max pointer
1836 relative to cmdStart. We want a max pointer that points at the 1686 * relative to cmdStart. We want a max pointer that points at the
1837 physical end of the command queue. 1687 * physical end of the command queue.
1838 -------------------------------------------------------------------- */ 1688 */
1839 cmdMax = (cmdStart + 4 + readw(&ch->mailbox->cmax)); 1689 cmdMax = (cmdStart + 4 + readw(&ch->mailbox->cmax));
1840 memaddr = ch->board->re_map_membase; 1690 memaddr = ch->board->re_map_membase;
1841 1691
@@ -1860,7 +1710,7 @@ static void fepcmd(struct channel *ch, int cmd, int word_or_byte,
1860 writew(head, &ch->mailbox->cin); 1710 writew(head, &ch->mailbox->cin);
1861 count = FEPTIMEOUT; 1711 count = FEPTIMEOUT;
1862 1712
1863 for (;;) { /* Begin forever loop */ 1713 for (;;) {
1864 count--; 1714 count--;
1865 if (count == 0) { 1715 if (count == 0) {
1866 printk(KERN_ERR "<Error> - Fep not responding in fepcmd()\n"); 1716 printk(KERN_ERR "<Error> - Fep not responding in fepcmd()\n");
@@ -1869,26 +1719,23 @@ static void fepcmd(struct channel *ch, int cmd, int word_or_byte,
1869 head = readw(&ch->mailbox->cin); 1719 head = readw(&ch->mailbox->cin);
1870 cmdTail = readw(&ch->mailbox->cout); 1720 cmdTail = readw(&ch->mailbox->cout);
1871 n = (head - cmdTail) & (cmdMax - cmdStart - 4); 1721 n = (head - cmdTail) & (cmdMax - cmdStart - 4);
1872 /* ---------------------------------------------------------- 1722 /*
1873 Basically this will break when the FEP acknowledges the 1723 * Basically this will break when the FEP acknowledges the
1874 command by incrementing cmdTail (Making it equal to head). 1724 * command by incrementing cmdTail (Making it equal to head).
1875 ------------------------------------------------------------- */ 1725 */
1876 if (n <= ncmds * (sizeof(short) * 4)) 1726 if (n <= ncmds * (sizeof(short) * 4))
1877 break; /* Well nearly forever :-) */ 1727 break;
1878 } /* End forever loop */ 1728 }
1879} /* End fepcmd */ 1729}
1880
1881/* ---------------------------------------------------------------------
1882 Digi products use fields in their channels structures that are very
1883 similar to the c_cflag and c_iflag fields typically found in UNIX
1884 termios structures. The below three routines allow mappings
1885 between these hardware "flags" and their respective Linux flags.
1886------------------------------------------------------------------------- */
1887
1888/* --------------------- Begin termios2digi_h -------------------- */
1889 1730
1731/*
1732 * Digi products use fields in their channels structures that are very similar
1733 * to the c_cflag and c_iflag fields typically found in UNIX termios
1734 * structures. The below three routines allow mappings between these hardware
1735 * "flags" and their respective Linux flags.
1736 */
1890static unsigned termios2digi_h(struct channel *ch, unsigned cflag) 1737static unsigned termios2digi_h(struct channel *ch, unsigned cflag)
1891{ /* Begin termios2digi_h */ 1738{
1892 unsigned res = 0; 1739 unsigned res = 0;
1893 1740
1894 if (cflag & CRTSCTS) { 1741 if (cflag & CRTSCTS) {
@@ -1918,86 +1765,73 @@ static unsigned termios2digi_h(struct channel *ch, unsigned cflag)
1918 ch->digiext.digi_flags |= CTSPACE; 1765 ch->digiext.digi_flags |= CTSPACE;
1919 1766
1920 return res; 1767 return res;
1768}
1921 1769
1922} /* End termios2digi_h */
1923
1924/* --------------------- Begin termios2digi_i -------------------- */
1925static unsigned termios2digi_i(struct channel *ch, unsigned iflag) 1770static unsigned termios2digi_i(struct channel *ch, unsigned iflag)
1926{ /* Begin termios2digi_i */ 1771{
1927 1772 unsigned res = iflag & (IGNBRK | BRKINT | IGNPAR | PARMRK |
1928 unsigned res = iflag & (IGNBRK | BRKINT | IGNPAR | PARMRK |
1929 INPCK | ISTRIP|IXON|IXANY|IXOFF); 1773 INPCK | ISTRIP|IXON|IXANY|IXOFF);
1930 if (ch->digiext.digi_flags & DIGI_AIXON) 1774 if (ch->digiext.digi_flags & DIGI_AIXON)
1931 res |= IAIXON; 1775 res |= IAIXON;
1932 return res; 1776 return res;
1933 1777}
1934} /* End termios2digi_i */
1935
1936/* --------------------- Begin termios2digi_c -------------------- */
1937 1778
1938static unsigned termios2digi_c(struct channel *ch, unsigned cflag) 1779static unsigned termios2digi_c(struct channel *ch, unsigned cflag)
1939{ /* Begin termios2digi_c */ 1780{
1940
1941 unsigned res = 0; 1781 unsigned res = 0;
1942 if (cflag & CBAUDEX) { /* Begin detected CBAUDEX */ 1782 if (cflag & CBAUDEX) {
1943 ch->digiext.digi_flags |= DIGI_FAST; 1783 ch->digiext.digi_flags |= DIGI_FAST;
1944 /* ------------------------------------------------------------- 1784 /*
1945 HUPCL bit is used by FEP to indicate fast baud 1785 * HUPCL bit is used by FEP to indicate fast baud table is to
1946 table is to be used. 1786 * be used.
1947 ----------------------------------------------------------------- */ 1787 */
1948 res |= FEP_HUPCL; 1788 res |= FEP_HUPCL;
1949 } /* End detected CBAUDEX */ 1789 } else
1950 else ch->digiext.digi_flags &= ~DIGI_FAST; 1790 ch->digiext.digi_flags &= ~DIGI_FAST;
1951 /* ------------------------------------------------------------------- 1791 /*
1952 CBAUD has bit position 0x1000 set these days to indicate Linux 1792 * CBAUD has bit position 0x1000 set these days to indicate Linux
1953 baud rate remap. Digi hardware can't handle the bit assignment. 1793 * baud rate remap. Digi hardware can't handle the bit assignment.
1954 (We use a different bit assignment for high speed.). Clear this 1794 * (We use a different bit assignment for high speed.). Clear this
1955 bit out. 1795 * bit out.
1956 ---------------------------------------------------------------------- */ 1796 */
1957 res |= cflag & ((CBAUD ^ CBAUDEX) | PARODD | PARENB | CSTOPB | CSIZE); 1797 res |= cflag & ((CBAUD ^ CBAUDEX) | PARODD | PARENB | CSTOPB | CSIZE);
1958 /* ------------------------------------------------------------- 1798 /*
1959 This gets a little confusing. The Digi cards have their own 1799 * This gets a little confusing. The Digi cards have their own
1960 representation of c_cflags controling baud rate. For the most 1800 * representation of c_cflags controling baud rate. For the most part
1961 part this is identical to the Linux implementation. However; 1801 * this is identical to the Linux implementation. However; Digi
1962 Digi supports one rate (76800) that Linux doesn't. This means 1802 * supports one rate (76800) that Linux doesn't. This means that the
1963 that the c_cflag entry that would normally mean 76800 for Digi 1803 * c_cflag entry that would normally mean 76800 for Digi actually means
1964 actually means 115200 under Linux. Without the below mapping, 1804 * 115200 under Linux. Without the below mapping, a stty 115200 would
1965 a stty 115200 would only drive the board at 76800. Since 1805 * only drive the board at 76800. Since the rate 230400 is also found
1966 the rate 230400 is also found after 76800, the same problem afflicts 1806 * after 76800, the same problem afflicts us when we choose a rate of
1967 us when we choose a rate of 230400. Without the below modificiation 1807 * 230400. Without the below modificiation stty 230400 would actually
1968 stty 230400 would actually give us 115200. 1808 * give us 115200.
1969 1809 *
1970 There are two additional differences. The Linux value for CLOCAL 1810 * There are two additional differences. The Linux value for CLOCAL
1971 (0x800; 0004000) has no meaning to the Digi hardware. Also in 1811 * (0x800; 0004000) has no meaning to the Digi hardware. Also in later
1972 later releases of Linux; the CBAUD define has CBAUDEX (0x1000; 1812 * releases of Linux; the CBAUD define has CBAUDEX (0x1000; 0010000)
1973 0010000) ored into it (CBAUD = 0x100f as opposed to 0xf). CBAUDEX 1813 * ored into it (CBAUD = 0x100f as opposed to 0xf). CBAUDEX should be
1974 should be checked for a screened out prior to termios2digi_c 1814 * checked for a screened out prior to termios2digi_c returning. Since
1975 returning. Since CLOCAL isn't used by the board this can be 1815 * CLOCAL isn't used by the board this can be ignored as long as the
1976 ignored as long as the returned value is used only by Digi hardware. 1816 * returned value is used only by Digi hardware.
1977 ----------------------------------------------------------------- */ 1817 */
1978 if (cflag & CBAUDEX) { 1818 if (cflag & CBAUDEX) {
1979 /* ------------------------------------------------------------- 1819 /*
1980 The below code is trying to guarantee that only baud rates 1820 * The below code is trying to guarantee that only baud rates
1981 115200 and 230400 are remapped. We use exclusive or because 1821 * 115200 and 230400 are remapped. We use exclusive or because
1982 the various baud rates share common bit positions and therefore 1822 * the various baud rates share common bit positions and
1983 can't be tested for easily. 1823 * therefore can't be tested for easily.
1984 ----------------------------------------------------------------- */ 1824 */
1985 1825 if ((!((cflag & 0x7) ^ (B115200 & ~CBAUDEX))) ||
1986
1987 if ((!((cflag & 0x7) ^ (B115200 & ~CBAUDEX))) ||
1988 (!((cflag & 0x7) ^ (B230400 & ~CBAUDEX)))) 1826 (!((cflag & 0x7) ^ (B230400 & ~CBAUDEX))))
1989 res += 1; 1827 res += 1;
1990 } 1828 }
1991 return res; 1829 return res;
1992 1830}
1993} /* End termios2digi_c */
1994
1995/* --------------------- Begin epcaparam ----------------------- */
1996 1831
1997/* Caller must hold the locks */ 1832/* Caller must hold the locks */
1998static void epcaparam(struct tty_struct *tty, struct channel *ch) 1833static void epcaparam(struct tty_struct *tty, struct channel *ch)
1999{ /* Begin epcaparam */ 1834{
2000
2001 unsigned int cmdHead; 1835 unsigned int cmdHead;
2002 struct ktermios *ts; 1836 struct ktermios *ts;
2003 struct board_chan __iomem *bc; 1837 struct board_chan __iomem *bc;
@@ -2013,28 +1847,29 @@ static void epcaparam(struct tty_struct *tty, struct channel *ch)
2013 writew(cmdHead, &bc->rout); 1847 writew(cmdHead, &bc->rout);
2014 cmdHead = readw(&bc->tin); 1848 cmdHead = readw(&bc->tin);
2015 /* Changing baud in mid-stream transmission can be wonderful */ 1849 /* Changing baud in mid-stream transmission can be wonderful */
2016 /* --------------------------------------------------------------- 1850 /*
2017 Flush current transmit buffer by setting cmdTail pointer (tout) 1851 * Flush current transmit buffer by setting cmdTail pointer
2018 to cmdHead pointer (tin). Hopefully the transmit buffer is empty. 1852 * (tout) to cmdHead pointer (tin). Hopefully the transmit
2019 ----------------------------------------------------------------- */ 1853 * buffer is empty.
1854 */
2020 fepcmd(ch, STOUT, (unsigned) cmdHead, 0, 0, 0); 1855 fepcmd(ch, STOUT, (unsigned) cmdHead, 0, 0, 0);
2021 mval = 0; 1856 mval = 0;
2022 } else { /* Begin CBAUD not detected */ 1857 } else { /* Begin CBAUD not detected */
2023 /* ------------------------------------------------------------------- 1858 /*
2024 c_cflags have changed but that change had nothing to do with BAUD. 1859 * c_cflags have changed but that change had nothing to do with
2025 Propagate the change to the card. 1860 * BAUD. Propagate the change to the card.
2026 ---------------------------------------------------------------------- */ 1861 */
2027 cflag = termios2digi_c(ch, ts->c_cflag); 1862 cflag = termios2digi_c(ch, ts->c_cflag);
2028 if (cflag != ch->fepcflag) { 1863 if (cflag != ch->fepcflag) {
2029 ch->fepcflag = cflag; 1864 ch->fepcflag = cflag;
2030 /* Set baud rate, char size, stop bits, parity */ 1865 /* Set baud rate, char size, stop bits, parity */
2031 fepcmd(ch, SETCTRLFLAGS, (unsigned) cflag, 0, 0, 0); 1866 fepcmd(ch, SETCTRLFLAGS, (unsigned) cflag, 0, 0, 0);
2032 } 1867 }
2033 /* ---------------------------------------------------------------- 1868 /*
2034 If the user has not forced CLOCAL and if the device is not a 1869 * If the user has not forced CLOCAL and if the device is not a
2035 CALLOUT device (Which is always CLOCAL) we set flags such that 1870 * CALLOUT device (Which is always CLOCAL) we set flags such
2036 the driver will wait on carrier detect. 1871 * that the driver will wait on carrier detect.
2037 ------------------------------------------------------------------- */ 1872 */
2038 if (ts->c_cflag & CLOCAL) 1873 if (ts->c_cflag & CLOCAL)
2039 ch->asyncflags &= ~ASYNC_CHECK_CD; 1874 ch->asyncflags &= ~ASYNC_CHECK_CD;
2040 else 1875 else
@@ -2045,19 +1880,19 @@ static void epcaparam(struct tty_struct *tty, struct channel *ch)
2045 /* Check input mode flags */ 1880 /* Check input mode flags */
2046 if (iflag != ch->fepiflag) { 1881 if (iflag != ch->fepiflag) {
2047 ch->fepiflag = iflag; 1882 ch->fepiflag = iflag;
2048 /* --------------------------------------------------------------- 1883 /*
2049 Command sets channels iflag structure on the board. Such things 1884 * Command sets channels iflag structure on the board. Such
2050 as input soft flow control, handling of parity errors, and 1885 * things as input soft flow control, handling of parity
2051 break handling are all set here. 1886 * errors, and break handling are all set here.
2052 ------------------------------------------------------------------- */ 1887 */
2053 /* break handling, parity handling, input stripping, flow control chars */ 1888 /* break handling, parity handling, input stripping, flow control chars */
2054 fepcmd(ch, SETIFLAGS, (unsigned int) ch->fepiflag, 0, 0, 0); 1889 fepcmd(ch, SETIFLAGS, (unsigned int) ch->fepiflag, 0, 0, 0);
2055 } 1890 }
2056 /* --------------------------------------------------------------- 1891 /*
2057 Set the board mint value for this channel. This will cause hardware 1892 * Set the board mint value for this channel. This will cause hardware
2058 events to be generated each time the DCD signal (Described in mint) 1893 * events to be generated each time the DCD signal (Described in mint)
2059 changes. 1894 * changes.
2060 ------------------------------------------------------------------- */ 1895 */
2061 writeb(ch->dcd, &bc->mint); 1896 writeb(ch->dcd, &bc->mint);
2062 if ((ts->c_cflag & CLOCAL) || (ch->digiext.digi_flags & DIGI_FORCEDCD)) 1897 if ((ts->c_cflag & CLOCAL) || (ch->digiext.digi_flags & DIGI_FORCEDCD))
2063 if (ch->digiext.digi_flags & DIGI_FORCEDCD) 1898 if (ch->digiext.digi_flags & DIGI_FORCEDCD)
@@ -2066,23 +1901,23 @@ static void epcaparam(struct tty_struct *tty, struct channel *ch)
2066 hflow = termios2digi_h(ch, ts->c_cflag); 1901 hflow = termios2digi_h(ch, ts->c_cflag);
2067 if (hflow != ch->hflow) { 1902 if (hflow != ch->hflow) {
2068 ch->hflow = hflow; 1903 ch->hflow = hflow;
2069 /* -------------------------------------------------------------- 1904 /*
2070 Hard flow control has been selected but the board is not 1905 * Hard flow control has been selected but the board is not
2071 using it. Activate hard flow control now. 1906 * using it. Activate hard flow control now.
2072 ----------------------------------------------------------------- */ 1907 */
2073 fepcmd(ch, SETHFLOW, hflow, 0xff, 0, 1); 1908 fepcmd(ch, SETHFLOW, hflow, 0xff, 0, 1);
2074 } 1909 }
2075 mval ^= ch->modemfake & (mval ^ ch->modem); 1910 mval ^= ch->modemfake & (mval ^ ch->modem);
2076 1911
2077 if (ch->omodem ^ mval) { 1912 if (ch->omodem ^ mval) {
2078 ch->omodem = mval; 1913 ch->omodem = mval;
2079 /* -------------------------------------------------------------- 1914 /*
2080 The below command sets the DTR and RTS mstat structure. If 1915 * The below command sets the DTR and RTS mstat structure. If
2081 hard flow control is NOT active these changes will drive the 1916 * hard flow control is NOT active these changes will drive the
2082 output of the actual DTR and RTS lines. If hard flow control 1917 * output of the actual DTR and RTS lines. If hard flow control
2083 is active, the changes will be saved in the mstat structure and 1918 * is active, the changes will be saved in the mstat structure
2084 only asserted when hard flow control is turned off. 1919 * and only asserted when hard flow control is turned off.
2085 ----------------------------------------------------------------- */ 1920 */
2086 1921
2087 /* First reset DTR & RTS; then set them */ 1922 /* First reset DTR & RTS; then set them */
2088 fepcmd(ch, SETMODEM, 0, ((ch->m_dtr)|(ch->m_rts)), 0, 1); 1923 fepcmd(ch, SETMODEM, 0, ((ch->m_dtr)|(ch->m_rts)), 0, 1);
@@ -2091,28 +1926,26 @@ static void epcaparam(struct tty_struct *tty, struct channel *ch)
2091 if (ch->startc != ch->fepstartc || ch->stopc != ch->fepstopc) { 1926 if (ch->startc != ch->fepstartc || ch->stopc != ch->fepstopc) {
2092 ch->fepstartc = ch->startc; 1927 ch->fepstartc = ch->startc;
2093 ch->fepstopc = ch->stopc; 1928 ch->fepstopc = ch->stopc;
2094 /* ------------------------------------------------------------ 1929 /*
2095 The XON / XOFF characters have changed; propagate these 1930 * The XON / XOFF characters have changed; propagate these
2096 changes to the card. 1931 * changes to the card.
2097 --------------------------------------------------------------- */ 1932 */
2098 fepcmd(ch, SONOFFC, ch->fepstartc, ch->fepstopc, 0, 1); 1933 fepcmd(ch, SONOFFC, ch->fepstartc, ch->fepstopc, 0, 1);
2099 } 1934 }
2100 if (ch->startca != ch->fepstartca || ch->stopca != ch->fepstopca) { 1935 if (ch->startca != ch->fepstartca || ch->stopca != ch->fepstopca) {
2101 ch->fepstartca = ch->startca; 1936 ch->fepstartca = ch->startca;
2102 ch->fepstopca = ch->stopca; 1937 ch->fepstopca = ch->stopca;
2103 /* --------------------------------------------------------------- 1938 /*
2104 Similar to the above, this time the auxilarly XON / XOFF 1939 * Similar to the above, this time the auxilarly XON / XOFF
2105 characters have changed; propagate these changes to the card. 1940 * characters have changed; propagate these changes to the card.
2106 ------------------------------------------------------------------ */ 1941 */
2107 fepcmd(ch, SAUXONOFFC, ch->fepstartca, ch->fepstopca, 0, 1); 1942 fepcmd(ch, SAUXONOFFC, ch->fepstartca, ch->fepstopca, 0, 1);
2108 } 1943 }
2109} /* End epcaparam */ 1944}
2110 1945
2111/* --------------------- Begin receive_data ----------------------- */
2112/* Caller holds lock */ 1946/* Caller holds lock */
2113static void receive_data(struct channel *ch) 1947static void receive_data(struct channel *ch)
2114{ /* Begin receive_data */ 1948{
2115
2116 unchar *rptr; 1949 unchar *rptr;
2117 struct ktermios *ts = NULL; 1950 struct ktermios *ts = NULL;
2118 struct tty_struct *tty; 1951 struct tty_struct *tty;
@@ -2121,11 +1954,10 @@ static void receive_data(struct channel *ch)
2121 unsigned int tail, head; 1954 unsigned int tail, head;
2122 unsigned int wrapmask; 1955 unsigned int wrapmask;
2123 1956
2124 /* --------------------------------------------------------------- 1957 /*
2125 This routine is called by doint when a receive data event 1958 * This routine is called by doint when a receive data event has taken
2126 has taken place. 1959 * place.
2127 ------------------------------------------------------------------- */ 1960 */
2128
2129 globalwinon(ch); 1961 globalwinon(ch);
2130 if (ch->statusflags & RXSTOPPED) 1962 if (ch->statusflags & RXSTOPPED)
2131 return; 1963 return;
@@ -2136,10 +1968,10 @@ static void receive_data(struct channel *ch)
2136 BUG_ON(!bc); 1968 BUG_ON(!bc);
2137 wrapmask = ch->rxbufsize - 1; 1969 wrapmask = ch->rxbufsize - 1;
2138 1970
2139 /* --------------------------------------------------------------------- 1971 /*
2140 Get the head and tail pointers to the receiver queue. Wrap the 1972 * Get the head and tail pointers to the receiver queue. Wrap the head
2141 head pointer if it has reached the end of the buffer. 1973 * pointer if it has reached the end of the buffer.
2142 ------------------------------------------------------------------------ */ 1974 */
2143 head = readw(&bc->rin); 1975 head = readw(&bc->rin);
2144 head &= wrapmask; 1976 head &= wrapmask;
2145 tail = readw(&bc->rout) & wrapmask; 1977 tail = readw(&bc->rout) & wrapmask;
@@ -2148,10 +1980,7 @@ static void receive_data(struct channel *ch)
2148 if (bytesAvailable == 0) 1980 if (bytesAvailable == 0)
2149 return; 1981 return;
2150 1982
2151 /* ------------------------------------------------------------------ 1983 /* If CREAD bit is off or device not open, set TX tail to head */
2152 If CREAD bit is off or device not open, set TX tail to head
2153 --------------------------------------------------------------------- */
2154
2155 if (!tty || !ts || !(ts->c_cflag & CREAD)) { 1984 if (!tty || !ts || !(ts->c_cflag & CREAD)) {
2156 writew(head, &bc->rout); 1985 writew(head, &bc->rout);
2157 return; 1986 return;
@@ -2168,22 +1997,20 @@ static void receive_data(struct channel *ch)
2168 rxwinon(ch); 1997 rxwinon(ch);
2169 while (bytesAvailable > 0) { /* Begin while there is data on the card */ 1998 while (bytesAvailable > 0) { /* Begin while there is data on the card */
2170 wrapgap = (head >= tail) ? head - tail : ch->rxbufsize - tail; 1999 wrapgap = (head >= tail) ? head - tail : ch->rxbufsize - tail;
2171 /* --------------------------------------------------------------- 2000 /*
2172 Even if head has wrapped around only report the amount of 2001 * Even if head has wrapped around only report the amount of
2173 data to be equal to the size - tail. Remember memcpy can't 2002 * data to be equal to the size - tail. Remember memcpy can't
2174 automaticly wrap around the receive buffer. 2003 * automaticly wrap around the receive buffer.
2175 ----------------------------------------------------------------- */ 2004 */
2176 dataToRead = (wrapgap < bytesAvailable) ? wrapgap : bytesAvailable; 2005 dataToRead = (wrapgap < bytesAvailable) ? wrapgap : bytesAvailable;
2177 /* -------------------------------------------------------------- 2006 /* Make sure we don't overflow the buffer */
2178 Make sure we don't overflow the buffer
2179 ----------------------------------------------------------------- */
2180 dataToRead = tty_prepare_flip_string(tty, &rptr, dataToRead); 2007 dataToRead = tty_prepare_flip_string(tty, &rptr, dataToRead);
2181 if (dataToRead == 0) 2008 if (dataToRead == 0)
2182 break; 2009 break;
2183 /* --------------------------------------------------------------- 2010 /*
2184 Move data read from our card into the line disciplines buffer 2011 * Move data read from our card into the line disciplines
2185 for translation if necessary. 2012 * buffer for translation if necessary.
2186 ------------------------------------------------------------------ */ 2013 */
2187 memcpy_fromio(rptr, ch->rxptr + tail, dataToRead); 2014 memcpy_fromio(rptr, ch->rxptr + tail, dataToRead);
2188 tail = (tail + dataToRead) & wrapmask; 2015 tail = (tail + dataToRead) & wrapmask;
2189 bytesAvailable -= dataToRead; 2016 bytesAvailable -= dataToRead;
@@ -2191,28 +2018,26 @@ static void receive_data(struct channel *ch)
2191 globalwinon(ch); 2018 globalwinon(ch);
2192 writew(tail, &bc->rout); 2019 writew(tail, &bc->rout);
2193 /* Must be called with global data */ 2020 /* Must be called with global data */
2194 tty_schedule_flip(ch->tty); 2021 tty_schedule_flip(ch->tty);
2195 return; 2022}
2196} /* End receive_data */
2197 2023
2198static int info_ioctl(struct tty_struct *tty, struct file * file, 2024static int info_ioctl(struct tty_struct *tty, struct file *file,
2199 unsigned int cmd, unsigned long arg) 2025 unsigned int cmd, unsigned long arg)
2200{ 2026{
2201 switch (cmd) 2027 switch (cmd) {
2202 { /* Begin switch cmd */ 2028 case DIGI_GETINFO:
2203 case DIGI_GETINFO: 2029 {
2204 { /* Begin case DIGI_GETINFO */ 2030 struct digi_info di;
2205 struct digi_info di ;
2206 int brd; 2031 int brd;
2207 2032
2208 if(get_user(brd, (unsigned int __user *)arg)) 2033 if (get_user(brd, (unsigned int __user *)arg))
2209 return -EFAULT; 2034 return -EFAULT;
2210 if (brd < 0 || brd >= num_cards || num_cards == 0) 2035 if (brd < 0 || brd >= num_cards || num_cards == 0)
2211 return -ENODEV; 2036 return -ENODEV;
2212 2037
2213 memset(&di, 0, sizeof(di)); 2038 memset(&di, 0, sizeof(di));
2214 2039
2215 di.board = brd ; 2040 di.board = brd;
2216 di.status = boards[brd].status; 2041 di.status = boards[brd].status;
2217 di.type = boards[brd].type ; 2042 di.type = boards[brd].type ;
2218 di.numports = boards[brd].numports ; 2043 di.numports = boards[brd].numports ;
@@ -2220,45 +2045,44 @@ static int info_ioctl(struct tty_struct *tty, struct file * file,
2220 di.port = (unsigned char *)boards[brd].port ; 2045 di.port = (unsigned char *)boards[brd].port ;
2221 di.membase = (unsigned char *)boards[brd].membase ; 2046 di.membase = (unsigned char *)boards[brd].membase ;
2222 2047
2223 if (copy_to_user((void __user *)arg, &di, sizeof (di))) 2048 if (copy_to_user((void __user *)arg, &di, sizeof(di)))
2224 return -EFAULT; 2049 return -EFAULT;
2225 break; 2050 break;
2226 2051
2227 } /* End case DIGI_GETINFO */ 2052 }
2228
2229 case DIGI_POLLER:
2230 { /* Begin case DIGI_POLLER */
2231 2053
2232 int brd = arg & 0xff000000 >> 16 ; 2054 case DIGI_POLLER:
2233 unsigned char state = arg & 0xff ; 2055 {
2056 int brd = arg & 0xff000000 >> 16;
2057 unsigned char state = arg & 0xff;
2234 2058
2235 if (brd < 0 || brd >= num_cards) { 2059 if (brd < 0 || brd >= num_cards) {
2236 printk(KERN_ERR "epca: DIGI POLLER : brd not valid!\n"); 2060 printk(KERN_ERR "epca: DIGI POLLER : brd not valid!\n");
2237 return (-ENODEV); 2061 return -ENODEV;
2238 } 2062 }
2239 digi_poller_inhibited = state ; 2063 digi_poller_inhibited = state;
2240 break ; 2064 break;
2241 } /* End case DIGI_POLLER */ 2065 }
2242 2066
2243 case DIGI_INIT: 2067 case DIGI_INIT:
2244 { /* Begin case DIGI_INIT */ 2068 {
2245 /* ------------------------------------------------------------ 2069 /*
2246 This call is made by the apps to complete the initilization 2070 * This call is made by the apps to complete the
2247 of the board(s). This routine is responsible for setting 2071 * initilization of the board(s). This routine is
2248 the card to its initial state and setting the drivers control 2072 * responsible for setting the card to its initial
2249 fields to the sutianle settings for the card in question. 2073 * state and setting the drivers control fields to the
2250 ---------------------------------------------------------------- */ 2074 * sutianle settings for the card in question.
2251 int crd ; 2075 */
2252 for (crd = 0; crd < num_cards; crd++) 2076 int crd;
2253 post_fep_init (crd); 2077 for (crd = 0; crd < num_cards; crd++)
2254 break ; 2078 post_fep_init(crd);
2255 } /* End case DIGI_INIT */ 2079 break;
2256 default: 2080 }
2257 return -ENOTTY; 2081 default:
2258 } /* End switch cmd */ 2082 return -ENOTTY;
2259 return (0) ; 2083 }
2084 return 0;
2260} 2085}
2261/* --------------------- Begin pc_ioctl ----------------------- */
2262 2086
2263static int pc_tiocmget(struct tty_struct *tty, struct file *file) 2087static int pc_tiocmget(struct tty_struct *tty, struct file *file)
2264{ 2088{
@@ -2304,9 +2128,9 @@ static int pc_tiocmset(struct tty_struct *tty, struct file *file,
2304 2128
2305 spin_lock_irqsave(&epca_lock, flags); 2129 spin_lock_irqsave(&epca_lock, flags);
2306 /* 2130 /*
2307 * I think this modemfake stuff is broken. It doesn't 2131 * I think this modemfake stuff is broken. It doesn't correctly reflect
2308 * correctly reflect the behaviour desired by the TIOCM* 2132 * the behaviour desired by the TIOCM* ioctls. Therefore this is
2309 * ioctls. Therefore this is probably broken. 2133 * probably broken.
2310 */ 2134 */
2311 if (set & TIOCM_RTS) { 2135 if (set & TIOCM_RTS) {
2312 ch->modemfake |= ch->m_rts; 2136 ch->modemfake |= ch->m_rts;
@@ -2325,10 +2149,10 @@ static int pc_tiocmset(struct tty_struct *tty, struct file *file,
2325 ch->modem &= ~ch->m_dtr; 2149 ch->modem &= ~ch->m_dtr;
2326 } 2150 }
2327 globalwinon(ch); 2151 globalwinon(ch);
2328 /* -------------------------------------------------------------- 2152 /*
2329 The below routine generally sets up parity, baud, flow control 2153 * The below routine generally sets up parity, baud, flow control
2330 issues, etc.... It effect both control flags and input flags. 2154 * issues, etc.... It effect both control flags and input flags.
2331 ------------------------------------------------------------------ */ 2155 */
2332 epcaparam(tty,ch); 2156 epcaparam(tty,ch);
2333 memoff(ch); 2157 memoff(ch);
2334 spin_unlock_irqrestore(&epca_lock, flags); 2158 spin_unlock_irqrestore(&epca_lock, flags);
@@ -2337,8 +2161,7 @@ static int pc_tiocmset(struct tty_struct *tty, struct file *file,
2337 2161
2338static int pc_ioctl(struct tty_struct *tty, struct file * file, 2162static int pc_ioctl(struct tty_struct *tty, struct file * file,
2339 unsigned int cmd, unsigned long arg) 2163 unsigned int cmd, unsigned long arg)
2340{ /* Begin pc_ioctl */ 2164{
2341
2342 digiflow_t dflow; 2165 digiflow_t dflow;
2343 int retval; 2166 int retval;
2344 unsigned long flags; 2167 unsigned long flags;
@@ -2347,49 +2170,47 @@ static int pc_ioctl(struct tty_struct *tty, struct file * file,
2347 struct board_chan __iomem *bc; 2170 struct board_chan __iomem *bc;
2348 struct channel *ch = (struct channel *) tty->driver_data; 2171 struct channel *ch = (struct channel *) tty->driver_data;
2349 void __user *argp = (void __user *)arg; 2172 void __user *argp = (void __user *)arg;
2350 2173
2351 if (ch) 2174 if (ch)
2352 bc = ch->brdchan; 2175 bc = ch->brdchan;
2353 else 2176 else
2354 return -EINVAL; 2177 return -EINVAL;
2355 2178
2356 /* ------------------------------------------------------------------- 2179 /*
2357 For POSIX compliance we need to add more ioctls. See tty_ioctl.c 2180 * For POSIX compliance we need to add more ioctls. See tty_ioctl.c in
2358 in /usr/src/linux/drivers/char for a good example. In particular 2181 * /usr/src/linux/drivers/char for a good example. In particular think
2359 think about adding TCSETAF, TCSETAW, TCSETA, TCSETSF, TCSETSW, TCSETS. 2182 * about adding TCSETAF, TCSETAW, TCSETA, TCSETSF, TCSETSW, TCSETS.
2360 ---------------------------------------------------------------------- */ 2183 */
2361 2184 switch (cmd) {
2362 switch (cmd) 2185 case TCSBRK: /* SVID version: non-zero arg --> no break */
2363 { /* Begin switch cmd */ 2186 retval = tty_check_change(tty);
2364 case TCSBRK: /* SVID version: non-zero arg --> no break */ 2187 if (retval)
2365 retval = tty_check_change(tty); 2188 return retval;
2366 if (retval) 2189 /* Setup an event to indicate when the transmit buffer empties */
2367 return retval; 2190 spin_lock_irqsave(&epca_lock, flags);
2368 /* Setup an event to indicate when the transmit buffer empties */ 2191 setup_empty_event(tty,ch);
2369 spin_lock_irqsave(&epca_lock, flags); 2192 spin_unlock_irqrestore(&epca_lock, flags);
2370 setup_empty_event(tty,ch); 2193 tty_wait_until_sent(tty, 0);
2371 spin_unlock_irqrestore(&epca_lock, flags); 2194 if (!arg)
2372 tty_wait_until_sent(tty, 0); 2195 digi_send_break(ch, HZ / 4); /* 1/4 second */
2373 if (!arg) 2196 return 0;
2374 digi_send_break(ch, HZ/4); /* 1/4 second */ 2197 case TCSBRKP: /* support for POSIX tcsendbreak() */
2375 return 0; 2198 retval = tty_check_change(tty);
2376 case TCSBRKP: /* support for POSIX tcsendbreak() */ 2199 if (retval)
2377 retval = tty_check_change(tty); 2200 return retval;
2378 if (retval)
2379 return retval;
2380 2201
2381 /* Setup an event to indicate when the transmit buffer empties */ 2202 /* Setup an event to indicate when the transmit buffer empties */
2382 spin_lock_irqsave(&epca_lock, flags); 2203 spin_lock_irqsave(&epca_lock, flags);
2383 setup_empty_event(tty,ch); 2204 setup_empty_event(tty,ch);
2384 spin_unlock_irqrestore(&epca_lock, flags); 2205 spin_unlock_irqrestore(&epca_lock, flags);
2385 tty_wait_until_sent(tty, 0); 2206 tty_wait_until_sent(tty, 0);
2386 digi_send_break(ch, arg ? arg*(HZ/10) : HZ/4); 2207 digi_send_break(ch, arg ? arg*(HZ/10) : HZ/4);
2387 return 0; 2208 return 0;
2388 case TIOCGSOFTCAR: 2209 case TIOCGSOFTCAR:
2389 if (put_user(C_CLOCAL(tty)?1:0, (unsigned long __user *)arg)) 2210 if (put_user(C_CLOCAL(tty)?1:0, (unsigned long __user *)arg))
2390 return -EFAULT; 2211 return -EFAULT;
2391 return 0; 2212 return 0;
2392 case TIOCSSOFTCAR: 2213 case TIOCSSOFTCAR:
2393 { 2214 {
2394 unsigned int value; 2215 unsigned int value;
2395 2216
@@ -2400,144 +2221,141 @@ static int pc_ioctl(struct tty_struct *tty, struct file * file,
2400 (value ? CLOCAL : 0)); 2221 (value ? CLOCAL : 0));
2401 return 0; 2222 return 0;
2402 } 2223 }
2403 case TIOCMODG: 2224 case TIOCMODG:
2404 mflag = pc_tiocmget(tty, file); 2225 mflag = pc_tiocmget(tty, file);
2405 if (put_user(mflag, (unsigned long __user *)argp)) 2226 if (put_user(mflag, (unsigned long __user *)argp))
2406 return -EFAULT; 2227 return -EFAULT;
2407 break; 2228 break;
2408 case TIOCMODS: 2229 case TIOCMODS:
2409 if (get_user(mstat, (unsigned __user *)argp)) 2230 if (get_user(mstat, (unsigned __user *)argp))
2410 return -EFAULT; 2231 return -EFAULT;
2411 return pc_tiocmset(tty, file, mstat, ~mstat); 2232 return pc_tiocmset(tty, file, mstat, ~mstat);
2412 case TIOCSDTR: 2233 case TIOCSDTR:
2413 spin_lock_irqsave(&epca_lock, flags); 2234 spin_lock_irqsave(&epca_lock, flags);
2414 ch->omodem |= ch->m_dtr; 2235 ch->omodem |= ch->m_dtr;
2415 globalwinon(ch); 2236 globalwinon(ch);
2416 fepcmd(ch, SETMODEM, ch->m_dtr, 0, 10, 1); 2237 fepcmd(ch, SETMODEM, ch->m_dtr, 0, 10, 1);
2417 memoff(ch); 2238 memoff(ch);
2418 spin_unlock_irqrestore(&epca_lock, flags); 2239 spin_unlock_irqrestore(&epca_lock, flags);
2419 break; 2240 break;
2420 2241
2421 case TIOCCDTR: 2242 case TIOCCDTR:
2243 spin_lock_irqsave(&epca_lock, flags);
2244 ch->omodem &= ~ch->m_dtr;
2245 globalwinon(ch);
2246 fepcmd(ch, SETMODEM, 0, ch->m_dtr, 10, 1);
2247 memoff(ch);
2248 spin_unlock_irqrestore(&epca_lock, flags);
2249 break;
2250 case DIGI_GETA:
2251 if (copy_to_user(argp, &ch->digiext, sizeof(digi_t)))
2252 return -EFAULT;
2253 break;
2254 case DIGI_SETAW:
2255 case DIGI_SETAF:
2256 if (cmd == DIGI_SETAW) {
2257 /* Setup an event to indicate when the transmit buffer empties */
2422 spin_lock_irqsave(&epca_lock, flags); 2258 spin_lock_irqsave(&epca_lock, flags);
2423 ch->omodem &= ~ch->m_dtr; 2259 setup_empty_event(tty,ch);
2424 globalwinon(ch);
2425 fepcmd(ch, SETMODEM, 0, ch->m_dtr, 10, 1);
2426 memoff(ch);
2427 spin_unlock_irqrestore(&epca_lock, flags); 2260 spin_unlock_irqrestore(&epca_lock, flags);
2428 break; 2261 tty_wait_until_sent(tty, 0);
2429 case DIGI_GETA: 2262 } else {
2430 if (copy_to_user(argp, &ch->digiext, sizeof(digi_t))) 2263 /* ldisc lock already held in ioctl */
2431 return -EFAULT; 2264 if (tty->ldisc.flush_buffer)
2432 break; 2265 tty->ldisc.flush_buffer(tty);
2433 case DIGI_SETAW: 2266 }
2434 case DIGI_SETAF: 2267 /* Fall Thru */
2435 if (cmd == DIGI_SETAW) { 2268 case DIGI_SETA:
2436 /* Setup an event to indicate when the transmit buffer empties */ 2269 if (copy_from_user(&ch->digiext, argp, sizeof(digi_t)))
2437 spin_lock_irqsave(&epca_lock, flags); 2270 return -EFAULT;
2438 setup_empty_event(tty,ch); 2271
2439 spin_unlock_irqrestore(&epca_lock, flags); 2272 if (ch->digiext.digi_flags & DIGI_ALTPIN) {
2440 tty_wait_until_sent(tty, 0); 2273 ch->dcd = ch->m_dsr;
2441 } else { 2274 ch->dsr = ch->m_dcd;
2442 /* ldisc lock already held in ioctl */ 2275 } else {
2443 if (tty->ldisc.flush_buffer) 2276 ch->dcd = ch->m_dcd;
2444 tty->ldisc.flush_buffer(tty); 2277 ch->dsr = ch->m_dsr;
2445 }
2446 /* Fall Thru */
2447 case DIGI_SETA:
2448 if (copy_from_user(&ch->digiext, argp, sizeof(digi_t)))
2449 return -EFAULT;
2450
2451 if (ch->digiext.digi_flags & DIGI_ALTPIN) {
2452 ch->dcd = ch->m_dsr;
2453 ch->dsr = ch->m_dcd;
2454 } else {
2455 ch->dcd = ch->m_dcd;
2456 ch->dsr = ch->m_dsr;
2457 } 2278 }
2458
2459 spin_lock_irqsave(&epca_lock, flags);
2460 globalwinon(ch);
2461 2279
2462 /* ----------------------------------------------------------------- 2280 spin_lock_irqsave(&epca_lock, flags);
2463 The below routine generally sets up parity, baud, flow control 2281 globalwinon(ch);
2464 issues, etc.... It effect both control flags and input flags.
2465 ------------------------------------------------------------------- */
2466 2282
2467 epcaparam(tty,ch); 2283 /*
2468 memoff(ch); 2284 * The below routine generally sets up parity, baud, flow
2469 spin_unlock_irqrestore(&epca_lock, flags); 2285 * control issues, etc.... It effect both control flags and
2470 break; 2286 * input flags.
2287 */
2288 epcaparam(tty,ch);
2289 memoff(ch);
2290 spin_unlock_irqrestore(&epca_lock, flags);
2291 break;
2292
2293 case DIGI_GETFLOW:
2294 case DIGI_GETAFLOW:
2295 spin_lock_irqsave(&epca_lock, flags);
2296 globalwinon(ch);
2297 if (cmd == DIGI_GETFLOW) {
2298 dflow.startc = readb(&bc->startc);
2299 dflow.stopc = readb(&bc->stopc);
2300 } else {
2301 dflow.startc = readb(&bc->startca);
2302 dflow.stopc = readb(&bc->stopca);
2303 }
2304 memoff(ch);
2305 spin_unlock_irqrestore(&epca_lock, flags);
2306
2307 if (copy_to_user(argp, &dflow, sizeof(dflow)))
2308 return -EFAULT;
2309 break;
2310
2311 case DIGI_SETAFLOW:
2312 case DIGI_SETFLOW:
2313 if (cmd == DIGI_SETFLOW) {
2314 startc = ch->startc;
2315 stopc = ch->stopc;
2316 } else {
2317 startc = ch->startca;
2318 stopc = ch->stopca;
2319 }
2320
2321 if (copy_from_user(&dflow, argp, sizeof(dflow)))
2322 return -EFAULT;
2471 2323
2472 case DIGI_GETFLOW: 2324 if (dflow.startc != startc || dflow.stopc != stopc) { /* Begin if setflow toggled */
2473 case DIGI_GETAFLOW:
2474 spin_lock_irqsave(&epca_lock, flags); 2325 spin_lock_irqsave(&epca_lock, flags);
2475 globalwinon(ch); 2326 globalwinon(ch);
2476 if (cmd == DIGI_GETFLOW) {
2477 dflow.startc = readb(&bc->startc);
2478 dflow.stopc = readb(&bc->stopc);
2479 } else {
2480 dflow.startc = readb(&bc->startca);
2481 dflow.stopc = readb(&bc->stopca);
2482 }
2483 memoff(ch);
2484 spin_unlock_irqrestore(&epca_lock, flags);
2485 2327
2486 if (copy_to_user(argp, &dflow, sizeof(dflow)))
2487 return -EFAULT;
2488 break;
2489
2490 case DIGI_SETAFLOW:
2491 case DIGI_SETFLOW:
2492 if (cmd == DIGI_SETFLOW) { 2328 if (cmd == DIGI_SETFLOW) {
2493 startc = ch->startc; 2329 ch->fepstartc = ch->startc = dflow.startc;
2494 stopc = ch->stopc; 2330 ch->fepstopc = ch->stopc = dflow.stopc;
2331 fepcmd(ch, SONOFFC, ch->fepstartc, ch->fepstopc, 0, 1);
2495 } else { 2332 } else {
2496 startc = ch->startca; 2333 ch->fepstartca = ch->startca = dflow.startc;
2497 stopc = ch->stopca; 2334 ch->fepstopca = ch->stopca = dflow.stopc;
2335 fepcmd(ch, SAUXONOFFC, ch->fepstartca, ch->fepstopca, 0, 1);
2498 } 2336 }
2499 2337
2500 if (copy_from_user(&dflow, argp, sizeof(dflow))) 2338 if (ch->statusflags & TXSTOPPED)
2501 return -EFAULT; 2339 pc_start(tty);
2502 2340
2503 if (dflow.startc != startc || dflow.stopc != stopc) { /* Begin if setflow toggled */ 2341 memoff(ch);
2504 spin_lock_irqsave(&epca_lock, flags); 2342 spin_unlock_irqrestore(&epca_lock, flags);
2505 globalwinon(ch); 2343 } /* End if setflow toggled */
2506 2344 break;
2507 if (cmd == DIGI_SETFLOW) { 2345 default:
2508 ch->fepstartc = ch->startc = dflow.startc; 2346 return -ENOIOCTLCMD;
2509 ch->fepstopc = ch->stopc = dflow.stopc; 2347 }
2510 fepcmd(ch, SONOFFC, ch->fepstartc, ch->fepstopc, 0, 1);
2511 } else {
2512 ch->fepstartca = ch->startca = dflow.startc;
2513 ch->fepstopca = ch->stopca = dflow.stopc;
2514 fepcmd(ch, SAUXONOFFC, ch->fepstartca, ch->fepstopca, 0, 1);
2515 }
2516
2517 if (ch->statusflags & TXSTOPPED)
2518 pc_start(tty);
2519
2520 memoff(ch);
2521 spin_unlock_irqrestore(&epca_lock, flags);
2522 } /* End if setflow toggled */
2523 break;
2524 default:
2525 return -ENOIOCTLCMD;
2526 } /* End switch cmd */
2527 return 0; 2348 return 0;
2528} /* End pc_ioctl */ 2349}
2529
2530/* --------------------- Begin pc_set_termios ----------------------- */
2531 2350
2532static void pc_set_termios(struct tty_struct *tty, struct ktermios *old_termios) 2351static void pc_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
2533{ /* Begin pc_set_termios */ 2352{
2534
2535 struct channel *ch; 2353 struct channel *ch;
2536 unsigned long flags; 2354 unsigned long flags;
2537 /* --------------------------------------------------------- 2355 /*
2538 verifyChannel returns the channel from the tty struct 2356 * verifyChannel returns the channel from the tty struct if it is
2539 if it is valid. This serves as a sanity check. 2357 * valid. This serves as a sanity check.
2540 ------------------------------------------------------------- */ 2358 */
2541 if ((ch = verifyChannel(tty)) != NULL) { /* Begin if channel valid */ 2359 if ((ch = verifyChannel(tty)) != NULL) { /* Begin if channel valid */
2542 spin_lock_irqsave(&epca_lock, flags); 2360 spin_lock_irqsave(&epca_lock, flags);
2543 globalwinon(ch); 2361 globalwinon(ch);
@@ -2554,47 +2372,40 @@ static void pc_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
2554 wake_up_interruptible(&ch->open_wait); 2372 wake_up_interruptible(&ch->open_wait);
2555 2373
2556 } /* End if channel valid */ 2374 } /* End if channel valid */
2557 2375}
2558} /* End pc_set_termios */
2559
2560/* --------------------- Begin do_softint ----------------------- */
2561 2376
2562static void do_softint(struct work_struct *work) 2377static void do_softint(struct work_struct *work)
2563{ /* Begin do_softint */ 2378{
2564 struct channel *ch = container_of(work, struct channel, tqueue); 2379 struct channel *ch = container_of(work, struct channel, tqueue);
2565 /* Called in response to a modem change event */ 2380 /* Called in response to a modem change event */
2566 if (ch && ch->magic == EPCA_MAGIC) { /* Begin EPCA_MAGIC */ 2381 if (ch && ch->magic == EPCA_MAGIC) {
2567 struct tty_struct *tty = ch->tty; 2382 struct tty_struct *tty = ch->tty;
2568 2383
2569 if (tty && tty->driver_data) { 2384 if (tty && tty->driver_data) {
2570 if (test_and_clear_bit(EPCA_EVENT_HANGUP, &ch->event)) { /* Begin if clear_bit */ 2385 if (test_and_clear_bit(EPCA_EVENT_HANGUP, &ch->event)) {
2571 tty_hangup(tty); /* FIXME: module removal race here - AKPM */ 2386 tty_hangup(tty); /* FIXME: module removal race here - AKPM */
2572 wake_up_interruptible(&ch->open_wait); 2387 wake_up_interruptible(&ch->open_wait);
2573 ch->asyncflags &= ~ASYNC_NORMAL_ACTIVE; 2388 ch->asyncflags &= ~ASYNC_NORMAL_ACTIVE;
2574 } /* End if clear_bit */ 2389 }
2575 } 2390 }
2576 } /* End EPCA_MAGIC */ 2391 }
2577} /* End do_softint */ 2392}
2578
2579/* ------------------------------------------------------------
2580 pc_stop and pc_start provide software flow control to the
2581 routine and the pc_ioctl routine.
2582---------------------------------------------------------------- */
2583
2584/* --------------------- Begin pc_stop ----------------------- */
2585 2393
2394/*
2395 * pc_stop and pc_start provide software flow control to the routine and the
2396 * pc_ioctl routine.
2397 */
2586static void pc_stop(struct tty_struct *tty) 2398static void pc_stop(struct tty_struct *tty)
2587{ /* Begin pc_stop */ 2399{
2588
2589 struct channel *ch; 2400 struct channel *ch;
2590 unsigned long flags; 2401 unsigned long flags;
2591 /* --------------------------------------------------------- 2402 /*
2592 verifyChannel returns the channel from the tty struct 2403 * verifyChannel returns the channel from the tty struct if it is
2593 if it is valid. This serves as a sanity check. 2404 * valid. This serves as a sanity check.
2594 ------------------------------------------------------------- */ 2405 */
2595 if ((ch = verifyChannel(tty)) != NULL) { /* Begin if valid channel */ 2406 if ((ch = verifyChannel(tty)) != NULL) {
2596 spin_lock_irqsave(&epca_lock, flags); 2407 spin_lock_irqsave(&epca_lock, flags);
2597 if ((ch->statusflags & TXSTOPPED) == 0) { /* Begin if transmit stop requested */ 2408 if ((ch->statusflags & TXSTOPPED) == 0) { /* Begin if transmit stop requested */
2598 globalwinon(ch); 2409 globalwinon(ch);
2599 /* STOP transmitting now !! */ 2410 /* STOP transmitting now !! */
2600 fepcmd(ch, PAUSETX, 0, 0, 0, 0); 2411 fepcmd(ch, PAUSETX, 0, 0, 0, 0);
@@ -2602,19 +2413,17 @@ static void pc_stop(struct tty_struct *tty)
2602 memoff(ch); 2413 memoff(ch);
2603 } /* End if transmit stop requested */ 2414 } /* End if transmit stop requested */
2604 spin_unlock_irqrestore(&epca_lock, flags); 2415 spin_unlock_irqrestore(&epca_lock, flags);
2605 } /* End if valid channel */ 2416 }
2606} /* End pc_stop */ 2417}
2607
2608/* --------------------- Begin pc_start ----------------------- */
2609 2418
2610static void pc_start(struct tty_struct *tty) 2419static void pc_start(struct tty_struct *tty)
2611{ /* Begin pc_start */ 2420{
2612 struct channel *ch; 2421 struct channel *ch;
2613 /* --------------------------------------------------------- 2422 /*
2614 verifyChannel returns the channel from the tty struct 2423 * verifyChannel returns the channel from the tty struct if it is
2615 if it is valid. This serves as a sanity check. 2424 * valid. This serves as a sanity check.
2616 ------------------------------------------------------------- */ 2425 */
2617 if ((ch = verifyChannel(tty)) != NULL) { /* Begin if channel valid */ 2426 if ((ch = verifyChannel(tty)) != NULL) {
2618 unsigned long flags; 2427 unsigned long flags;
2619 spin_lock_irqsave(&epca_lock, flags); 2428 spin_lock_irqsave(&epca_lock, flags);
2620 /* Just in case output was resumed because of a change in Digi-flow */ 2429 /* Just in case output was resumed because of a change in Digi-flow */
@@ -2630,28 +2439,25 @@ static void pc_start(struct tty_struct *tty)
2630 memoff(ch); 2439 memoff(ch);
2631 } /* End transmit resume requested */ 2440 } /* End transmit resume requested */
2632 spin_unlock_irqrestore(&epca_lock, flags); 2441 spin_unlock_irqrestore(&epca_lock, flags);
2633 } /* End if channel valid */ 2442 }
2634} /* End pc_start */ 2443}
2635 2444
2636/* ------------------------------------------------------------------ 2445/*
2637 The below routines pc_throttle and pc_unthrottle are used 2446 * The below routines pc_throttle and pc_unthrottle are used to slow (And
2638 to slow (And resume) the receipt of data into the kernels 2447 * resume) the receipt of data into the kernels receive buffers. The exact
2639 receive buffers. The exact occurrence of this depends on the 2448 * occurrence of this depends on the size of the kernels receive buffer and
2640 size of the kernels receive buffer and what the 'watermarks' 2449 * what the 'watermarks' are set to for that buffer. See the n_ttys.c file for
2641 are set to for that buffer. See the n_ttys.c file for more 2450 * more details.
2642 details. 2451 */
2643______________________________________________________________________ */ 2452static void pc_throttle(struct tty_struct *tty)
2644/* --------------------- Begin throttle ----------------------- */ 2453{
2645
2646static void pc_throttle(struct tty_struct * tty)
2647{ /* Begin pc_throttle */
2648 struct channel *ch; 2454 struct channel *ch;
2649 unsigned long flags; 2455 unsigned long flags;
2650 /* --------------------------------------------------------- 2456 /*
2651 verifyChannel returns the channel from the tty struct 2457 * verifyChannel returns the channel from the tty struct if it is
2652 if it is valid. This serves as a sanity check. 2458 * valid. This serves as a sanity check.
2653 ------------------------------------------------------------- */ 2459 */
2654 if ((ch = verifyChannel(tty)) != NULL) { /* Begin if channel valid */ 2460 if ((ch = verifyChannel(tty)) != NULL) {
2655 spin_lock_irqsave(&epca_lock, flags); 2461 spin_lock_irqsave(&epca_lock, flags);
2656 if ((ch->statusflags & RXSTOPPED) == 0) { 2462 if ((ch->statusflags & RXSTOPPED) == 0) {
2657 globalwinon(ch); 2463 globalwinon(ch);
@@ -2660,20 +2466,18 @@ static void pc_throttle(struct tty_struct * tty)
2660 memoff(ch); 2466 memoff(ch);
2661 } 2467 }
2662 spin_unlock_irqrestore(&epca_lock, flags); 2468 spin_unlock_irqrestore(&epca_lock, flags);
2663 } /* End if channel valid */ 2469 }
2664} /* End pc_throttle */ 2470}
2665
2666/* --------------------- Begin unthrottle ----------------------- */
2667 2471
2668static void pc_unthrottle(struct tty_struct *tty) 2472static void pc_unthrottle(struct tty_struct *tty)
2669{ /* Begin pc_unthrottle */ 2473{
2670 struct channel *ch; 2474 struct channel *ch;
2671 unsigned long flags; 2475 unsigned long flags;
2672 /* --------------------------------------------------------- 2476 /*
2673 verifyChannel returns the channel from the tty struct 2477 * verifyChannel returns the channel from the tty struct if it is
2674 if it is valid. This serves as a sanity check. 2478 * valid. This serves as a sanity check.
2675 ------------------------------------------------------------- */ 2479 */
2676 if ((ch = verifyChannel(tty)) != NULL) { /* Begin if channel valid */ 2480 if ((ch = verifyChannel(tty)) != NULL) {
2677 /* Just in case output was resumed because of a change in Digi-flow */ 2481 /* Just in case output was resumed because of a change in Digi-flow */
2678 spin_lock_irqsave(&epca_lock, flags); 2482 spin_lock_irqsave(&epca_lock, flags);
2679 if (ch->statusflags & RXSTOPPED) { 2483 if (ch->statusflags & RXSTOPPED) {
@@ -2683,151 +2487,143 @@ static void pc_unthrottle(struct tty_struct *tty)
2683 memoff(ch); 2487 memoff(ch);
2684 } 2488 }
2685 spin_unlock_irqrestore(&epca_lock, flags); 2489 spin_unlock_irqrestore(&epca_lock, flags);
2686 } /* End if channel valid */ 2490 }
2687} /* End pc_unthrottle */ 2491}
2688
2689/* --------------------- Begin digi_send_break ----------------------- */
2690 2492
2691void digi_send_break(struct channel *ch, int msec) 2493void digi_send_break(struct channel *ch, int msec)
2692{ /* Begin digi_send_break */ 2494{
2693 unsigned long flags; 2495 unsigned long flags;
2694 2496
2695 spin_lock_irqsave(&epca_lock, flags); 2497 spin_lock_irqsave(&epca_lock, flags);
2696 globalwinon(ch); 2498 globalwinon(ch);
2697 /* -------------------------------------------------------------------- 2499 /*
2698 Maybe I should send an infinite break here, schedule() for 2500 * Maybe I should send an infinite break here, schedule() for msec
2699 msec amount of time, and then stop the break. This way, 2501 * amount of time, and then stop the break. This way, the user can't
2700 the user can't screw up the FEP by causing digi_send_break() 2502 * screw up the FEP by causing digi_send_break() to be called (i.e. via
2701 to be called (i.e. via an ioctl()) more than once in msec amount 2503 * an ioctl()) more than once in msec amount of time.
2702 of time. Try this for now... 2504 * Try this for now...
2703 ------------------------------------------------------------------------ */ 2505 */
2704 fepcmd(ch, SENDBREAK, msec, 0, 10, 0); 2506 fepcmd(ch, SENDBREAK, msec, 0, 10, 0);
2705 memoff(ch); 2507 memoff(ch);
2706 spin_unlock_irqrestore(&epca_lock, flags); 2508 spin_unlock_irqrestore(&epca_lock, flags);
2707} /* End digi_send_break */ 2509}
2708
2709/* --------------------- Begin setup_empty_event ----------------------- */
2710 2510
2711/* Caller MUST hold the lock */ 2511/* Caller MUST hold the lock */
2712
2713static void setup_empty_event(struct tty_struct *tty, struct channel *ch) 2512static void setup_empty_event(struct tty_struct *tty, struct channel *ch)
2714{ /* Begin setup_empty_event */ 2513{
2715
2716 struct board_chan __iomem *bc = ch->brdchan; 2514 struct board_chan __iomem *bc = ch->brdchan;
2717 2515
2718 globalwinon(ch); 2516 globalwinon(ch);
2719 ch->statusflags |= EMPTYWAIT; 2517 ch->statusflags |= EMPTYWAIT;
2720 /* ------------------------------------------------------------------ 2518 /*
2721 When set the iempty flag request a event to be generated when the 2519 * When set the iempty flag request a event to be generated when the
2722 transmit buffer is empty (If there is no BREAK in progress). 2520 * transmit buffer is empty (If there is no BREAK in progress).
2723 --------------------------------------------------------------------- */ 2521 */
2724 writeb(1, &bc->iempty); 2522 writeb(1, &bc->iempty);
2725 memoff(ch); 2523 memoff(ch);
2726} /* End setup_empty_event */ 2524}
2727 2525
2728/* ---------------------- Begin epca_setup -------------------------- */
2729void epca_setup(char *str, int *ints) 2526void epca_setup(char *str, int *ints)
2730{ /* Begin epca_setup */ 2527{
2731 struct board_info board; 2528 struct board_info board;
2732 int index, loop, last; 2529 int index, loop, last;
2733 char *temp, *t2; 2530 char *temp, *t2;
2734 unsigned len; 2531 unsigned len;
2735 2532
2736 /* ---------------------------------------------------------------------- 2533 /*
2737 If this routine looks a little strange it is because it is only called 2534 * If this routine looks a little strange it is because it is only
2738 if a LILO append command is given to boot the kernel with parameters. 2535 * called if a LILO append command is given to boot the kernel with
2739 In this way, we can provide the user a method of changing his board 2536 * parameters. In this way, we can provide the user a method of
2740 configuration without rebuilding the kernel. 2537 * changing his board configuration without rebuilding the kernel.
2741 ----------------------------------------------------------------------- */ 2538 */
2742 if (!liloconfig) 2539 if (!liloconfig)
2743 liloconfig = 1; 2540 liloconfig = 1;
2744 2541
2745 memset(&board, 0, sizeof(board)); 2542 memset(&board, 0, sizeof(board));
2746 2543
2747 /* Assume the data is int first, later we can change it */ 2544 /* Assume the data is int first, later we can change it */
2748 /* I think that array position 0 of ints holds the number of args */ 2545 /* I think that array position 0 of ints holds the number of args */
2749 for (last = 0, index = 1; index <= ints[0]; index++) 2546 for (last = 0, index = 1; index <= ints[0]; index++)
2750 switch(index) 2547 switch (index) { /* Begin parse switch */
2751 { /* Begin parse switch */ 2548 case 1:
2752 case 1: 2549 board.status = ints[index];
2753 board.status = ints[index]; 2550 /*
2754 /* --------------------------------------------------------- 2551 * We check for 2 (As opposed to 1; because 2 is a flag
2755 We check for 2 (As opposed to 1; because 2 is a flag 2552 * instructing the driver to ignore epcaconfig.) For
2756 instructing the driver to ignore epcaconfig.) For this 2553 * this reason we check for 2.
2757 reason we check for 2. 2554 */
2758 ------------------------------------------------------------ */ 2555 if (board.status == 2) { /* Begin ignore epcaconfig as well as lilo cmd line */
2759 if (board.status == 2) { /* Begin ignore epcaconfig as well as lilo cmd line */ 2556 nbdevs = 0;
2760 nbdevs = 0; 2557 num_cards = 0;
2761 num_cards = 0; 2558 return;
2762 return; 2559 } /* End ignore epcaconfig as well as lilo cmd line */
2763 } /* End ignore epcaconfig as well as lilo cmd line */ 2560
2764 2561 if (board.status > 2) {
2765 if (board.status > 2) { 2562 printk(KERN_ERR "epca_setup: Invalid board status 0x%x\n", board.status);
2766 printk(KERN_ERR "epca_setup: Invalid board status 0x%x\n", board.status); 2563 invalid_lilo_config = 1;
2767 invalid_lilo_config = 1; 2564 setup_error_code |= INVALID_BOARD_STATUS;
2768 setup_error_code |= INVALID_BOARD_STATUS; 2565 return;
2769 return; 2566 }
2770 } 2567 last = index;
2771 last = index; 2568 break;
2772 break; 2569 case 2:
2773 case 2: 2570 board.type = ints[index];
2774 board.type = ints[index]; 2571 if (board.type >= PCIXEM) {
2775 if (board.type >= PCIXEM) { 2572 printk(KERN_ERR "epca_setup: Invalid board type 0x%x\n", board.type);
2776 printk(KERN_ERR "epca_setup: Invalid board type 0x%x\n", board.type); 2573 invalid_lilo_config = 1;
2777 invalid_lilo_config = 1; 2574 setup_error_code |= INVALID_BOARD_TYPE;
2778 setup_error_code |= INVALID_BOARD_TYPE; 2575 return;
2779 return; 2576 }
2780 } 2577 last = index;
2781 last = index; 2578 break;
2782 break; 2579 case 3:
2783 case 3: 2580 board.altpin = ints[index];
2784 board.altpin = ints[index]; 2581 if (board.altpin > 1) {
2785 if (board.altpin > 1) { 2582 printk(KERN_ERR "epca_setup: Invalid board altpin 0x%x\n", board.altpin);
2786 printk(KERN_ERR "epca_setup: Invalid board altpin 0x%x\n", board.altpin); 2583 invalid_lilo_config = 1;
2787 invalid_lilo_config = 1; 2584 setup_error_code |= INVALID_ALTPIN;
2788 setup_error_code |= INVALID_ALTPIN; 2585 return;
2789 return; 2586 }
2790 } 2587 last = index;
2791 last = index; 2588 break;
2792 break; 2589
2793 2590 case 4:
2794 case 4: 2591 board.numports = ints[index];
2795 board.numports = ints[index]; 2592 if (board.numports < 2 || board.numports > 256) {
2796 if (board.numports < 2 || board.numports > 256) { 2593 printk(KERN_ERR "epca_setup: Invalid board numports 0x%x\n", board.numports);
2797 printk(KERN_ERR "epca_setup: Invalid board numports 0x%x\n", board.numports); 2594 invalid_lilo_config = 1;
2798 invalid_lilo_config = 1; 2595 setup_error_code |= INVALID_NUM_PORTS;
2799 setup_error_code |= INVALID_NUM_PORTS; 2596 return;
2800 return; 2597 }
2801 } 2598 nbdevs += board.numports;
2802 nbdevs += board.numports; 2599 last = index;
2803 last = index; 2600 break;
2804 break;
2805
2806 case 5:
2807 board.port = ints[index];
2808 if (ints[index] <= 0) {
2809 printk(KERN_ERR "epca_setup: Invalid io port 0x%x\n", (unsigned int)board.port);
2810 invalid_lilo_config = 1;
2811 setup_error_code |= INVALID_PORT_BASE;
2812 return;
2813 }
2814 last = index;
2815 break;
2816
2817 case 6:
2818 board.membase = ints[index];
2819 if (ints[index] <= 0) {
2820 printk(KERN_ERR "epca_setup: Invalid memory base 0x%x\n",(unsigned int)board.membase);
2821 invalid_lilo_config = 1;
2822 setup_error_code |= INVALID_MEM_BASE;
2823 return;
2824 }
2825 last = index;
2826 break;
2827 2601
2828 default: 2602 case 5:
2829 printk(KERN_ERR "<Error> - epca_setup: Too many integer parms\n"); 2603 board.port = ints[index];
2604 if (ints[index] <= 0) {
2605 printk(KERN_ERR "epca_setup: Invalid io port 0x%x\n", (unsigned int)board.port);
2606 invalid_lilo_config = 1;
2607 setup_error_code |= INVALID_PORT_BASE;
2830 return; 2608 return;
2609 }
2610 last = index;
2611 break;
2612
2613 case 6:
2614 board.membase = ints[index];
2615 if (ints[index] <= 0) {
2616 printk(KERN_ERR "epca_setup: Invalid memory base 0x%x\n",(unsigned int)board.membase);
2617 invalid_lilo_config = 1;
2618 setup_error_code |= INVALID_MEM_BASE;
2619 return;
2620 }
2621 last = index;
2622 break;
2623
2624 default:
2625 printk(KERN_ERR "<Error> - epca_setup: Too many integer parms\n");
2626 return;
2831 2627
2832 } /* End parse switch */ 2628 } /* End parse switch */
2833 2629
@@ -2844,120 +2640,121 @@ void epca_setup(char *str, int *ints)
2844 /* Set index to the number of args + 1 */ 2640 /* Set index to the number of args + 1 */
2845 index = last + 1; 2641 index = last + 1;
2846 2642
2847 switch(index) 2643 switch (index) {
2848 { 2644 case 1:
2849 case 1: 2645 len = strlen(str);
2850 len = strlen(str); 2646 if (strncmp("Disable", str, len) == 0)
2851 if (strncmp("Disable", str, len) == 0) 2647 board.status = 0;
2852 board.status = 0; 2648 else if (strncmp("Enable", str, len) == 0)
2853 else if (strncmp("Enable", str, len) == 0) 2649 board.status = 1;
2854 board.status = 1; 2650 else {
2855 else { 2651 printk(KERN_ERR "epca_setup: Invalid status %s\n", str);
2856 printk(KERN_ERR "epca_setup: Invalid status %s\n", str); 2652 invalid_lilo_config = 1;
2857 invalid_lilo_config = 1; 2653 setup_error_code |= INVALID_BOARD_STATUS;
2858 setup_error_code |= INVALID_BOARD_STATUS; 2654 return;
2859 return; 2655 }
2860 } 2656 last = index;
2861 last = index; 2657 break;
2862 break;
2863
2864 case 2:
2865 for(loop = 0; loop < EPCA_NUM_TYPES; loop++)
2866 if (strcmp(board_desc[loop], str) == 0)
2867 break;
2868 /* ---------------------------------------------------------------
2869 If the index incremented above refers to a legitamate board
2870 type set it here.
2871 ------------------------------------------------------------------*/
2872 if (index < EPCA_NUM_TYPES)
2873 board.type = loop;
2874 else {
2875 printk(KERN_ERR "epca_setup: Invalid board type: %s\n", str);
2876 invalid_lilo_config = 1;
2877 setup_error_code |= INVALID_BOARD_TYPE;
2878 return;
2879 }
2880 last = index;
2881 break;
2882
2883 case 3:
2884 len = strlen(str);
2885 if (strncmp("Disable", str, len) == 0)
2886 board.altpin = 0;
2887 else if (strncmp("Enable", str, len) == 0)
2888 board.altpin = 1;
2889 else {
2890 printk(KERN_ERR "epca_setup: Invalid altpin %s\n", str);
2891 invalid_lilo_config = 1;
2892 setup_error_code |= INVALID_ALTPIN;
2893 return;
2894 }
2895 last = index;
2896 break;
2897
2898 case 4:
2899 t2 = str;
2900 while (isdigit(*t2))
2901 t2++;
2902
2903 if (*t2) {
2904 printk(KERN_ERR "epca_setup: Invalid port count %s\n", str);
2905 invalid_lilo_config = 1;
2906 setup_error_code |= INVALID_NUM_PORTS;
2907 return;
2908 }
2909 2658
2910 /* ------------------------------------------------------------ 2659 case 2:
2911 There is not a man page for simple_strtoul but the code can be 2660 for (loop = 0; loop < EPCA_NUM_TYPES; loop++)
2912 found in vsprintf.c. The first argument is the string to 2661 if (strcmp(board_desc[loop], str) == 0)
2913 translate (To an unsigned long obviously), the second argument 2662 break;
2914 can be the address of any character variable or a NULL. If a 2663 /*
2915 variable is given, the end pointer of the string will be stored 2664 * If the index incremented above refers to a
2916 in that variable; if a NULL is given the end pointer will 2665 * legitamate board type set it here.
2917 not be returned. The last argument is the base to use. If 2666 */
2918 a 0 is indicated, the routine will attempt to determine the 2667 if (index < EPCA_NUM_TYPES)
2919 proper base by looking at the values prefix (A '0' for octal, 2668 board.type = loop;
2920 a 'x' for hex, etc ... If a value is given it will use that 2669 else {
2921 value as the base. 2670 printk(KERN_ERR "epca_setup: Invalid board type: %s\n", str);
2922 ---------------------------------------------------------------- */ 2671 invalid_lilo_config = 1;
2923 board.numports = simple_strtoul(str, NULL, 0); 2672 setup_error_code |= INVALID_BOARD_TYPE;
2924 nbdevs += board.numports; 2673 return;
2925 last = index; 2674 }
2926 break; 2675 last = index;
2927 2676 break;
2928 case 5:
2929 t2 = str;
2930 while (isxdigit(*t2))
2931 t2++;
2932
2933 if (*t2) {
2934 printk(KERN_ERR "epca_setup: Invalid i/o address %s\n", str);
2935 invalid_lilo_config = 1;
2936 setup_error_code |= INVALID_PORT_BASE;
2937 return;
2938 }
2939 2677
2940 board.port = simple_strtoul(str, NULL, 16); 2678 case 3:
2941 last = index; 2679 len = strlen(str);
2942 break; 2680 if (strncmp("Disable", str, len) == 0)
2681 board.altpin = 0;
2682 else if (strncmp("Enable", str, len) == 0)
2683 board.altpin = 1;
2684 else {
2685 printk(KERN_ERR "epca_setup: Invalid altpin %s\n", str);
2686 invalid_lilo_config = 1;
2687 setup_error_code |= INVALID_ALTPIN;
2688 return;
2689 }
2690 last = index;
2691 break;
2943 2692
2944 case 6: 2693 case 4:
2945 t2 = str; 2694 t2 = str;
2946 while (isxdigit(*t2)) 2695 while (isdigit(*t2))
2947 t2++; 2696 t2++;
2948 2697
2949 if (*t2) { 2698 if (*t2) {
2950 printk(KERN_ERR "epca_setup: Invalid memory base %s\n",str); 2699 printk(KERN_ERR "epca_setup: Invalid port count %s\n", str);
2951 invalid_lilo_config = 1; 2700 invalid_lilo_config = 1;
2952 setup_error_code |= INVALID_MEM_BASE; 2701 setup_error_code |= INVALID_NUM_PORTS;
2953 return; 2702 return;
2954 } 2703 }
2955 board.membase = simple_strtoul(str, NULL, 16); 2704
2956 last = index; 2705 /*
2957 break; 2706 * There is not a man page for simple_strtoul but the
2958 default: 2707 * code can be found in vsprintf.c. The first argument
2959 printk(KERN_ERR "epca: Too many string parms\n"); 2708 * is the string to translate (To an unsigned long
2709 * obviously), the second argument can be the address
2710 * of any character variable or a NULL. If a variable
2711 * is given, the end pointer of the string will be
2712 * stored in that variable; if a NULL is given the end
2713 * pointer will not be returned. The last argument is
2714 * the base to use. If a 0 is indicated, the routine
2715 * will attempt to determine the proper base by looking
2716 * at the values prefix (A '0' for octal, a 'x' for
2717 * hex, etc ... If a value is given it will use that
2718 * value as the base.
2719 */
2720 board.numports = simple_strtoul(str, NULL, 0);
2721 nbdevs += board.numports;
2722 last = index;
2723 break;
2724
2725 case 5:
2726 t2 = str;
2727 while (isxdigit(*t2))
2728 t2++;
2729
2730 if (*t2) {
2731 printk(KERN_ERR "epca_setup: Invalid i/o address %s\n", str);
2732 invalid_lilo_config = 1;
2733 setup_error_code |= INVALID_PORT_BASE;
2734 return;
2735 }
2736
2737 board.port = simple_strtoul(str, NULL, 16);
2738 last = index;
2739 break;
2740
2741 case 6:
2742 t2 = str;
2743 while (isxdigit(*t2))
2744 t2++;
2745
2746 if (*t2) {
2747 printk(KERN_ERR "epca_setup: Invalid memory base %s\n",str);
2748 invalid_lilo_config = 1;
2749 setup_error_code |= INVALID_MEM_BASE;
2960 return; 2750 return;
2751 }
2752 board.membase = simple_strtoul(str, NULL, 16);
2753 last = index;
2754 break;
2755 default:
2756 printk(KERN_ERR "epca: Too many string parms\n");
2757 return;
2961 } 2758 }
2962 str = temp; 2759 str = temp;
2963 } /* End while there is a string arg */ 2760 } /* End while there is a string arg */
@@ -2966,19 +2763,16 @@ void epca_setup(char *str, int *ints)
2966 printk(KERN_ERR "epca: Insufficient parms specified\n"); 2763 printk(KERN_ERR "epca: Insufficient parms specified\n");
2967 return; 2764 return;
2968 } 2765 }
2969 2766
2970 /* I should REALLY validate the stuff here */ 2767 /* I should REALLY validate the stuff here */
2971 /* Copies our local copy of board into boards */ 2768 /* Copies our local copy of board into boards */
2972 memcpy((void *)&boards[num_cards],(void *)&board, sizeof(board)); 2769 memcpy((void *)&boards[num_cards],(void *)&board, sizeof(board));
2973 /* Does this get called once per lilo arg are what ? */ 2770 /* Does this get called once per lilo arg are what ? */
2974 printk(KERN_INFO "PC/Xx: Added board %i, %s %i ports at 0x%4.4X base 0x%6.6X\n", 2771 printk(KERN_INFO "PC/Xx: Added board %i, %s %i ports at 0x%4.4X base 0x%6.6X\n",
2975 num_cards, board_desc[board.type], 2772 num_cards, board_desc[board.type],
2976 board.numports, (int)board.port, (unsigned int) board.membase); 2773 board.numports, (int)board.port, (unsigned int) board.membase);
2977 num_cards++; 2774 num_cards++;
2978} /* End epca_setup */ 2775}
2979
2980
2981/* ------------------------ Begin init_PCI --------------------------- */
2982 2776
2983enum epic_board_types { 2777enum epic_board_types {
2984 brd_xr = 0, 2778 brd_xr = 0,
@@ -2987,7 +2781,6 @@ enum epic_board_types {
2987 brd_xrj, 2781 brd_xrj,
2988}; 2782};
2989 2783
2990
2991/* indexed directly by epic_board_types enum */ 2784/* indexed directly by epic_board_types enum */
2992static struct { 2785static struct {
2993 unsigned char board_type; 2786 unsigned char board_type;
@@ -2999,7 +2792,7 @@ static struct {
2999 { PCIXRJ, 2, }, 2792 { PCIXRJ, 2, },
3000}; 2793};
3001 2794
3002static int __devinit epca_init_one (struct pci_dev *pdev, 2795static int __devinit epca_init_one(struct pci_dev *pdev,
3003 const struct pci_device_id *ent) 2796 const struct pci_device_id *ent)
3004{ 2797{
3005 static int board_num = -1; 2798 static int board_num = -1;
@@ -3013,7 +2806,7 @@ static int __devinit epca_init_one (struct pci_dev *pdev,
3013 board_idx = board_num + num_cards; 2806 board_idx = board_num + num_cards;
3014 if (board_idx >= MAXBOARDS) 2807 if (board_idx >= MAXBOARDS)
3015 goto err_out; 2808 goto err_out;
3016 2809
3017 addr = pci_resource_start (pdev, epca_info_tbl[info_idx].bar_idx); 2810 addr = pci_resource_start (pdev, epca_info_tbl[info_idx].bar_idx);
3018 if (!addr) { 2811 if (!addr) {
3019 printk (KERN_ERR PFX "PCI region #%d not available (size 0)\n", 2812 printk (KERN_ERR PFX "PCI region #%d not available (size 0)\n",
@@ -3053,15 +2846,15 @@ static int __devinit epca_init_one (struct pci_dev *pdev,
3053 goto err_out_free_memregion; 2846 goto err_out_free_memregion;
3054 } 2847 }
3055 2848
3056 /* -------------------------------------------------------------- 2849 /*
3057 I don't know what the below does, but the hardware guys say 2850 * I don't know what the below does, but the hardware guys say its
3058 its required on everything except PLX (In this case XRJ). 2851 * required on everything except PLX (In this case XRJ).
3059 ---------------------------------------------------------------- */ 2852 */
3060 if (info_idx != brd_xrj) { 2853 if (info_idx != brd_xrj) {
3061 pci_write_config_byte(pdev, 0x40, 0); 2854 pci_write_config_byte(pdev, 0x40, 0);
3062 pci_write_config_byte(pdev, 0x46, 0); 2855 pci_write_config_byte(pdev, 0x46, 0);
3063 } 2856 }
3064 2857
3065 return 0; 2858 return 0;
3066 2859
3067err_out_free_memregion: 2860err_out_free_memregion:
@@ -3086,7 +2879,7 @@ static struct pci_device_id epca_pci_tbl[] = {
3086MODULE_DEVICE_TABLE(pci, epca_pci_tbl); 2879MODULE_DEVICE_TABLE(pci, epca_pci_tbl);
3087 2880
3088int __init init_PCI (void) 2881int __init init_PCI (void)
3089{ /* Begin init_PCI */ 2882{
3090 memset (&epca_driver, 0, sizeof (epca_driver)); 2883 memset (&epca_driver, 0, sizeof (epca_driver));
3091 epca_driver.name = "epca"; 2884 epca_driver.name = "epca";
3092 epca_driver.id_table = epca_pci_tbl; 2885 epca_driver.id_table = epca_pci_tbl;
diff --git a/drivers/char/hvc_console.c b/drivers/char/hvc_console.c
index 83c1151ec7a2..8252f8668538 100644
--- a/drivers/char/hvc_console.c
+++ b/drivers/char/hvc_console.c
@@ -69,6 +69,8 @@ static struct task_struct *hvc_task;
69/* Picks up late kicks after list walk but before schedule() */ 69/* Picks up late kicks after list walk but before schedule() */
70static int hvc_kicked; 70static int hvc_kicked;
71 71
72static int hvc_init(void);
73
72#ifdef CONFIG_MAGIC_SYSRQ 74#ifdef CONFIG_MAGIC_SYSRQ
73static int sysrq_pressed; 75static int sysrq_pressed;
74#endif 76#endif
@@ -754,6 +756,13 @@ struct hvc_struct __devinit *hvc_alloc(uint32_t vtermno, int irq,
754 struct hvc_struct *hp; 756 struct hvc_struct *hp;
755 int i; 757 int i;
756 758
759 /* We wait until a driver actually comes along */
760 if (!hvc_driver) {
761 int err = hvc_init();
762 if (err)
763 return ERR_PTR(err);
764 }
765
757 hp = kmalloc(ALIGN(sizeof(*hp), sizeof(long)) + outbuf_size, 766 hp = kmalloc(ALIGN(sizeof(*hp), sizeof(long)) + outbuf_size,
758 GFP_KERNEL); 767 GFP_KERNEL);
759 if (!hp) 768 if (!hp)
@@ -829,16 +838,18 @@ int __devexit hvc_remove(struct hvc_struct *hp)
829 return 0; 838 return 0;
830} 839}
831 840
832/* Driver initialization. Follow console initialization. This is where the TTY 841/* Driver initialization: called as soon as someone uses hvc_alloc(). */
833 * interfaces start to become available. */ 842static int hvc_init(void)
834static int __init hvc_init(void)
835{ 843{
836 struct tty_driver *drv; 844 struct tty_driver *drv;
845 int err;
837 846
838 /* We need more than hvc_count adapters due to hotplug additions. */ 847 /* We need more than hvc_count adapters due to hotplug additions. */
839 drv = alloc_tty_driver(HVC_ALLOC_TTY_ADAPTERS); 848 drv = alloc_tty_driver(HVC_ALLOC_TTY_ADAPTERS);
840 if (!drv) 849 if (!drv) {
841 return -ENOMEM; 850 err = -ENOMEM;
851 goto out;
852 }
842 853
843 drv->owner = THIS_MODULE; 854 drv->owner = THIS_MODULE;
844 drv->driver_name = "hvc"; 855 drv->driver_name = "hvc";
@@ -854,30 +865,43 @@ static int __init hvc_init(void)
854 * added later. */ 865 * added later. */
855 hvc_task = kthread_run(khvcd, NULL, "khvcd"); 866 hvc_task = kthread_run(khvcd, NULL, "khvcd");
856 if (IS_ERR(hvc_task)) { 867 if (IS_ERR(hvc_task)) {
857 panic("Couldn't create kthread for console.\n"); 868 printk(KERN_ERR "Couldn't create kthread for console.\n");
858 put_tty_driver(drv); 869 err = PTR_ERR(hvc_task);
859 return -EIO; 870 goto put_tty;
860 } 871 }
861 872
862 if (tty_register_driver(drv)) 873 err = tty_register_driver(drv);
863 panic("Couldn't register hvc console driver\n"); 874 if (err) {
875 printk(KERN_ERR "Couldn't register hvc console driver\n");
876 goto stop_thread;
877 }
864 878
879 /* FIXME: This mb() seems completely random. Remove it. */
865 mb(); 880 mb();
866 hvc_driver = drv; 881 hvc_driver = drv;
867 return 0; 882 return 0;
883
884put_tty:
885 put_tty_driver(hvc_driver);
886stop_thread:
887 kthread_stop(hvc_task);
888 hvc_task = NULL;
889out:
890 return err;
868} 891}
869module_init(hvc_init);
870 892
871/* This isn't particularly necessary due to this being a console driver 893/* This isn't particularly necessary due to this being a console driver
872 * but it is nice to be thorough. 894 * but it is nice to be thorough.
873 */ 895 */
874static void __exit hvc_exit(void) 896static void __exit hvc_exit(void)
875{ 897{
876 kthread_stop(hvc_task); 898 if (hvc_driver) {
899 kthread_stop(hvc_task);
877 900
878 tty_unregister_driver(hvc_driver); 901 tty_unregister_driver(hvc_driver);
879 /* return tty_struct instances allocated in hvc_init(). */ 902 /* return tty_struct instances allocated in hvc_init(). */
880 put_tty_driver(hvc_driver); 903 put_tty_driver(hvc_driver);
881 unregister_console(&hvc_con_driver); 904 unregister_console(&hvc_con_driver);
905 }
882} 906}
883module_exit(hvc_exit); 907module_exit(hvc_exit);
diff --git a/drivers/char/ip2/ip2main.c b/drivers/char/ip2/ip2main.c
index bd94d5f9e62b..2124dce38f2b 100644
--- a/drivers/char/ip2/ip2main.c
+++ b/drivers/char/ip2/ip2main.c
@@ -619,11 +619,7 @@ ip2_loadmain(int *iop, int *irqp, unsigned char *firmware, int firmsize)
619 ip2config.irq[i] = pci_dev_i->irq; 619 ip2config.irq[i] = pci_dev_i->irq;
620 } else { // ann error 620 } else { // ann error
621 ip2config.addr[i] = 0; 621 ip2config.addr[i] = 0;
622 if (status == PCIBIOS_DEVICE_NOT_FOUND) { 622 printk(KERN_ERR "IP2: PCI board %d not found\n", i);
623 printk( KERN_ERR "IP2: PCI board %d not found\n", i );
624 } else {
625 printk( KERN_ERR "IP2: PCI error 0x%x \n", status );
626 }
627 } 623 }
628 } 624 }
629#else 625#else
@@ -646,10 +642,9 @@ ip2_loadmain(int *iop, int *irqp, unsigned char *firmware, int firmsize)
646 642
647 for ( i = 0; i < IP2_MAX_BOARDS; ++i ) { 643 for ( i = 0; i < IP2_MAX_BOARDS; ++i ) {
648 if ( ip2config.addr[i] ) { 644 if ( ip2config.addr[i] ) {
649 pB = kmalloc( sizeof(i2eBordStr), GFP_KERNEL); 645 pB = kzalloc(sizeof(i2eBordStr), GFP_KERNEL);
650 if ( pB != NULL ) { 646 if (pB) {
651 i2BoardPtrTable[i] = pB; 647 i2BoardPtrTable[i] = pB;
652 memset( pB, 0, sizeof(i2eBordStr) );
653 iiSetAddress( pB, ip2config.addr[i], ii2DelayTimer ); 648 iiSetAddress( pB, ip2config.addr[i], ii2DelayTimer );
654 iiReset( pB ); 649 iiReset( pB );
655 } else { 650 } else {
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index 6a01dd9e43f8..8435fba73daf 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -4136,7 +4136,7 @@ static __exit void cleanup_ipmi(void)
4136 del_timer_sync(&ipmi_timer); 4136 del_timer_sync(&ipmi_timer);
4137 4137
4138#ifdef CONFIG_PROC_FS 4138#ifdef CONFIG_PROC_FS
4139 remove_proc_entry(proc_ipmi_root->name, &proc_root); 4139 remove_proc_entry(proc_ipmi_root->name, NULL);
4140#endif /* CONFIG_PROC_FS */ 4140#endif /* CONFIG_PROC_FS */
4141 4141
4142 driver_unregister(&ipmidriver); 4142 driver_unregister(&ipmidriver);
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index a2894d425153..c1222e98525d 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -1072,19 +1072,19 @@ static char *si_type[SI_MAX_PARMS];
1072#define MAX_SI_TYPE_STR 30 1072#define MAX_SI_TYPE_STR 30
1073static char si_type_str[MAX_SI_TYPE_STR]; 1073static char si_type_str[MAX_SI_TYPE_STR];
1074static unsigned long addrs[SI_MAX_PARMS]; 1074static unsigned long addrs[SI_MAX_PARMS];
1075static int num_addrs; 1075static unsigned int num_addrs;
1076static unsigned int ports[SI_MAX_PARMS]; 1076static unsigned int ports[SI_MAX_PARMS];
1077static int num_ports; 1077static unsigned int num_ports;
1078static int irqs[SI_MAX_PARMS]; 1078static int irqs[SI_MAX_PARMS];
1079static int num_irqs; 1079static unsigned int num_irqs;
1080static int regspacings[SI_MAX_PARMS]; 1080static int regspacings[SI_MAX_PARMS];
1081static int num_regspacings; 1081static unsigned int num_regspacings;
1082static int regsizes[SI_MAX_PARMS]; 1082static int regsizes[SI_MAX_PARMS];
1083static int num_regsizes; 1083static unsigned int num_regsizes;
1084static int regshifts[SI_MAX_PARMS]; 1084static int regshifts[SI_MAX_PARMS];
1085static int num_regshifts; 1085static unsigned int num_regshifts;
1086static int slave_addrs[SI_MAX_PARMS]; 1086static int slave_addrs[SI_MAX_PARMS];
1087static int num_slave_addrs; 1087static unsigned int num_slave_addrs;
1088 1088
1089#define IPMI_IO_ADDR_SPACE 0 1089#define IPMI_IO_ADDR_SPACE 0
1090#define IPMI_MEM_ADDR_SPACE 1 1090#define IPMI_MEM_ADDR_SPACE 1
@@ -1106,12 +1106,12 @@ MODULE_PARM_DESC(type, "Defines the type of each interface, each"
1106 " interface separated by commas. The types are 'kcs'," 1106 " interface separated by commas. The types are 'kcs',"
1107 " 'smic', and 'bt'. For example si_type=kcs,bt will set" 1107 " 'smic', and 'bt'. For example si_type=kcs,bt will set"
1108 " the first interface to kcs and the second to bt"); 1108 " the first interface to kcs and the second to bt");
1109module_param_array(addrs, long, &num_addrs, 0); 1109module_param_array(addrs, ulong, &num_addrs, 0);
1110MODULE_PARM_DESC(addrs, "Sets the memory address of each interface, the" 1110MODULE_PARM_DESC(addrs, "Sets the memory address of each interface, the"
1111 " addresses separated by commas. Only use if an interface" 1111 " addresses separated by commas. Only use if an interface"
1112 " is in memory. Otherwise, set it to zero or leave" 1112 " is in memory. Otherwise, set it to zero or leave"
1113 " it blank."); 1113 " it blank.");
1114module_param_array(ports, int, &num_ports, 0); 1114module_param_array(ports, uint, &num_ports, 0);
1115MODULE_PARM_DESC(ports, "Sets the port address of each interface, the" 1115MODULE_PARM_DESC(ports, "Sets the port address of each interface, the"
1116 " addresses separated by commas. Only use if an interface" 1116 " addresses separated by commas. Only use if an interface"
1117 " is a port. Otherwise, set it to zero or leave" 1117 " is a port. Otherwise, set it to zero or leave"
diff --git a/drivers/char/keyboard.c b/drivers/char/keyboard.c
index d95f316afb5a..212276affa1f 100644
--- a/drivers/char/keyboard.c
+++ b/drivers/char/keyboard.c
@@ -38,6 +38,7 @@
38#include <linux/kbd_kern.h> 38#include <linux/kbd_kern.h>
39#include <linux/kbd_diacr.h> 39#include <linux/kbd_diacr.h>
40#include <linux/vt_kern.h> 40#include <linux/vt_kern.h>
41#include <linux/consolemap.h>
41#include <linux/sysrq.h> 42#include <linux/sysrq.h>
42#include <linux/input.h> 43#include <linux/input.h>
43#include <linux/reboot.h> 44#include <linux/reboot.h>
@@ -403,9 +404,12 @@ static unsigned int handle_diacr(struct vc_data *vc, unsigned int ch)
403 return d; 404 return d;
404 405
405 if (kbd->kbdmode == VC_UNICODE) 406 if (kbd->kbdmode == VC_UNICODE)
406 to_utf8(vc, conv_8bit_to_uni(d)); 407 to_utf8(vc, d);
407 else if (d < 0x100) 408 else {
408 put_queue(vc, d); 409 int c = conv_uni_to_8bit(d);
410 if (c != -1)
411 put_queue(vc, c);
412 }
409 413
410 return ch; 414 return ch;
411} 415}
@@ -417,9 +421,12 @@ static void fn_enter(struct vc_data *vc)
417{ 421{
418 if (diacr) { 422 if (diacr) {
419 if (kbd->kbdmode == VC_UNICODE) 423 if (kbd->kbdmode == VC_UNICODE)
420 to_utf8(vc, conv_8bit_to_uni(diacr)); 424 to_utf8(vc, diacr);
421 else if (diacr < 0x100) 425 else {
422 put_queue(vc, diacr); 426 int c = conv_uni_to_8bit(diacr);
427 if (c != -1)
428 put_queue(vc, c);
429 }
423 diacr = 0; 430 diacr = 0;
424 } 431 }
425 put_queue(vc, 13); 432 put_queue(vc, 13);
@@ -627,9 +634,12 @@ static void k_unicode(struct vc_data *vc, unsigned int value, char up_flag)
627 return; 634 return;
628 } 635 }
629 if (kbd->kbdmode == VC_UNICODE) 636 if (kbd->kbdmode == VC_UNICODE)
630 to_utf8(vc, conv_8bit_to_uni(value)); 637 to_utf8(vc, value);
631 else if (value < 0x100) 638 else {
632 put_queue(vc, value); 639 int c = conv_uni_to_8bit(value);
640 if (c != -1)
641 put_queue(vc, c);
642 }
633} 643}
634 644
635/* 645/*
@@ -646,7 +656,12 @@ static void k_deadunicode(struct vc_data *vc, unsigned int value, char up_flag)
646 656
647static void k_self(struct vc_data *vc, unsigned char value, char up_flag) 657static void k_self(struct vc_data *vc, unsigned char value, char up_flag)
648{ 658{
649 k_unicode(vc, value, up_flag); 659 unsigned int uni;
660 if (kbd->kbdmode == VC_UNICODE)
661 uni = value;
662 else
663 uni = conv_8bit_to_uni(value);
664 k_unicode(vc, uni, up_flag);
650} 665}
651 666
652static void k_dead2(struct vc_data *vc, unsigned char value, char up_flag) 667static void k_dead2(struct vc_data *vc, unsigned char value, char up_flag)
@@ -1366,7 +1381,7 @@ int __init kbd_init(void)
1366 kbd_table[i].lockstate = KBD_DEFLOCK; 1381 kbd_table[i].lockstate = KBD_DEFLOCK;
1367 kbd_table[i].slockstate = 0; 1382 kbd_table[i].slockstate = 0;
1368 kbd_table[i].modeflags = KBD_DEFMODE; 1383 kbd_table[i].modeflags = KBD_DEFMODE;
1369 kbd_table[i].kbdmode = VC_XLATE; 1384 kbd_table[i].kbdmode = default_utf8 ? VC_UNICODE : VC_XLATE;
1370 } 1385 }
1371 1386
1372 error = input_register_handler(&kbd_handler); 1387 error = input_register_handler(&kbd_handler);
diff --git a/drivers/char/lp.c b/drivers/char/lp.c
index c59e2a0996cc..81674d7c56c7 100644
--- a/drivers/char/lp.c
+++ b/drivers/char/lp.c
@@ -144,7 +144,7 @@ static unsigned int lp_count = 0;
144static struct class *lp_class; 144static struct class *lp_class;
145 145
146#ifdef CONFIG_LP_CONSOLE 146#ifdef CONFIG_LP_CONSOLE
147static struct parport *console_registered; // initially NULL 147static struct parport *console_registered;
148#endif /* CONFIG_LP_CONSOLE */ 148#endif /* CONFIG_LP_CONSOLE */
149 149
150#undef LP_DEBUG 150#undef LP_DEBUG
@@ -749,8 +749,8 @@ static struct console lpcons = {
749/* --- initialisation code ------------------------------------- */ 749/* --- initialisation code ------------------------------------- */
750 750
751static int parport_nr[LP_NO] = { [0 ... LP_NO-1] = LP_PARPORT_UNSPEC }; 751static int parport_nr[LP_NO] = { [0 ... LP_NO-1] = LP_PARPORT_UNSPEC };
752static char *parport[LP_NO] = { NULL, }; 752static char *parport[LP_NO];
753static int reset = 0; 753static int reset;
754 754
755module_param_array(parport, charp, NULL, 0); 755module_param_array(parport, charp, NULL, 0);
756module_param(reset, bool, 0); 756module_param(reset, bool, 0);
@@ -758,10 +758,10 @@ module_param(reset, bool, 0);
758#ifndef MODULE 758#ifndef MODULE
759static int __init lp_setup (char *str) 759static int __init lp_setup (char *str)
760{ 760{
761 static int parport_ptr; // initially zero 761 static int parport_ptr;
762 int x; 762 int x;
763 763
764 if (get_option (&str, &x)) { 764 if (get_option(&str, &x)) {
765 if (x == 0) { 765 if (x == 0) {
766 /* disable driver on "lp=" or "lp=0" */ 766 /* disable driver on "lp=" or "lp=0" */
767 parport_nr[0] = LP_PARPORT_OFF; 767 parport_nr[0] = LP_PARPORT_OFF;
@@ -807,7 +807,7 @@ static int lp_register(int nr, struct parport *port)
807#ifdef CONFIG_LP_CONSOLE 807#ifdef CONFIG_LP_CONSOLE
808 if (!nr) { 808 if (!nr) {
809 if (port->modes & PARPORT_MODE_SAFEININT) { 809 if (port->modes & PARPORT_MODE_SAFEININT) {
810 register_console (&lpcons); 810 register_console(&lpcons);
811 console_registered = port; 811 console_registered = port;
812 printk (KERN_INFO "lp%d: console ready\n", CONSOLE_LP); 812 printk (KERN_INFO "lp%d: console ready\n", CONSOLE_LP);
813 } else 813 } else
@@ -823,8 +823,7 @@ static void lp_attach (struct parport *port)
823{ 823{
824 unsigned int i; 824 unsigned int i;
825 825
826 switch (parport_nr[0]) 826 switch (parport_nr[0]) {
827 {
828 case LP_PARPORT_UNSPEC: 827 case LP_PARPORT_UNSPEC:
829 case LP_PARPORT_AUTO: 828 case LP_PARPORT_AUTO:
830 if (parport_nr[0] == LP_PARPORT_AUTO && 829 if (parport_nr[0] == LP_PARPORT_AUTO &&
@@ -855,7 +854,7 @@ static void lp_detach (struct parport *port)
855 /* Write this some day. */ 854 /* Write this some day. */
856#ifdef CONFIG_LP_CONSOLE 855#ifdef CONFIG_LP_CONSOLE
857 if (console_registered == port) { 856 if (console_registered == port) {
858 unregister_console (&lpcons); 857 unregister_console(&lpcons);
859 console_registered = NULL; 858 console_registered = NULL;
860 } 859 }
861#endif /* CONFIG_LP_CONSOLE */ 860#endif /* CONFIG_LP_CONSOLE */
diff --git a/drivers/char/mbcs.c b/drivers/char/mbcs.c
index 7ee5d9444926..3c5802ae1716 100644
--- a/drivers/char/mbcs.c
+++ b/drivers/char/mbcs.c
@@ -22,7 +22,6 @@
22#include <linux/delay.h> 22#include <linux/delay.h>
23#include <linux/device.h> 23#include <linux/device.h>
24#include <linux/mm.h> 24#include <linux/mm.h>
25#include <linux/fs.h>
26#include <linux/uio.h> 25#include <linux/uio.h>
27#include <asm/io.h> 26#include <asm/io.h>
28#include <asm/uaccess.h> 27#include <asm/uaccess.h>
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index bbee97ff355f..0e937f64a789 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -625,65 +625,10 @@ static ssize_t splice_write_null(struct pipe_inode_info *pipe,struct file *out,
625 return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null); 625 return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null);
626} 626}
627 627
628#ifdef CONFIG_MMU
629/*
630 * For fun, we are using the MMU for this.
631 */
632static inline size_t read_zero_pagealigned(char __user * buf, size_t size)
633{
634 struct mm_struct *mm;
635 struct vm_area_struct * vma;
636 unsigned long addr=(unsigned long)buf;
637
638 mm = current->mm;
639 /* Oops, this was forgotten before. -ben */
640 down_read(&mm->mmap_sem);
641
642 /* For private mappings, just map in zero pages. */
643 for (vma = find_vma(mm, addr); vma; vma = vma->vm_next) {
644 unsigned long count;
645
646 if (vma->vm_start > addr || (vma->vm_flags & VM_WRITE) == 0)
647 goto out_up;
648 if (vma->vm_flags & (VM_SHARED | VM_HUGETLB))
649 break;
650 count = vma->vm_end - addr;
651 if (count > size)
652 count = size;
653
654 zap_page_range(vma, addr, count, NULL);
655 if (zeromap_page_range(vma, addr, count, PAGE_COPY))
656 break;
657
658 size -= count;
659 buf += count;
660 addr += count;
661 if (size == 0)
662 goto out_up;
663 }
664
665 up_read(&mm->mmap_sem);
666
667 /* The shared case is hard. Let's do the conventional zeroing. */
668 do {
669 unsigned long unwritten = clear_user(buf, PAGE_SIZE);
670 if (unwritten)
671 return size + unwritten - PAGE_SIZE;
672 cond_resched();
673 buf += PAGE_SIZE;
674 size -= PAGE_SIZE;
675 } while (size);
676
677 return size;
678out_up:
679 up_read(&mm->mmap_sem);
680 return size;
681}
682
683static ssize_t read_zero(struct file * file, char __user * buf, 628static ssize_t read_zero(struct file * file, char __user * buf,
684 size_t count, loff_t *ppos) 629 size_t count, loff_t *ppos)
685{ 630{
686 unsigned long left, unwritten, written = 0; 631 size_t written;
687 632
688 if (!count) 633 if (!count)
689 return 0; 634 return 0;
@@ -691,69 +636,33 @@ static ssize_t read_zero(struct file * file, char __user * buf,
691 if (!access_ok(VERIFY_WRITE, buf, count)) 636 if (!access_ok(VERIFY_WRITE, buf, count))
692 return -EFAULT; 637 return -EFAULT;
693 638
694 left = count; 639 written = 0;
695 640 while (count) {
696 /* do we want to be clever? Arbitrary cut-off */ 641 unsigned long unwritten;
697 if (count >= PAGE_SIZE*4) { 642 size_t chunk = count;
698 unsigned long partial;
699 643
700 /* How much left of the page? */ 644 if (chunk > PAGE_SIZE)
701 partial = (PAGE_SIZE-1) & -(unsigned long) buf; 645 chunk = PAGE_SIZE; /* Just for latency reasons */
702 unwritten = clear_user(buf, partial); 646 unwritten = clear_user(buf, chunk);
703 written = partial - unwritten; 647 written += chunk - unwritten;
704 if (unwritten)
705 goto out;
706 left -= partial;
707 buf += partial;
708 unwritten = read_zero_pagealigned(buf, left & PAGE_MASK);
709 written += (left & PAGE_MASK) - unwritten;
710 if (unwritten) 648 if (unwritten)
711 goto out; 649 break;
712 buf += left & PAGE_MASK;
713 left &= ~PAGE_MASK;
714 }
715 unwritten = clear_user(buf, left);
716 written += left - unwritten;
717out:
718 return written ? written : -EFAULT;
719}
720
721static int mmap_zero(struct file * file, struct vm_area_struct * vma)
722{
723 int err;
724
725 if (vma->vm_flags & VM_SHARED)
726 return shmem_zero_setup(vma);
727 err = zeromap_page_range(vma, vma->vm_start,
728 vma->vm_end - vma->vm_start, vma->vm_page_prot);
729 BUG_ON(err == -EEXIST);
730 return err;
731}
732#else /* CONFIG_MMU */
733static ssize_t read_zero(struct file * file, char * buf,
734 size_t count, loff_t *ppos)
735{
736 size_t todo = count;
737
738 while (todo) {
739 size_t chunk = todo;
740
741 if (chunk > 4096)
742 chunk = 4096; /* Just for latency reasons */
743 if (clear_user(buf, chunk))
744 return -EFAULT;
745 buf += chunk; 650 buf += chunk;
746 todo -= chunk; 651 count -= chunk;
747 cond_resched(); 652 cond_resched();
748 } 653 }
749 return count; 654 return written ? written : -EFAULT;
750} 655}
751 656
752static int mmap_zero(struct file * file, struct vm_area_struct * vma) 657static int mmap_zero(struct file * file, struct vm_area_struct * vma)
753{ 658{
659#ifndef CONFIG_MMU
754 return -ENOSYS; 660 return -ENOSYS;
661#endif
662 if (vma->vm_flags & VM_SHARED)
663 return shmem_zero_setup(vma);
664 return 0;
755} 665}
756#endif /* CONFIG_MMU */
757 666
758static ssize_t write_full(struct file * file, const char __user * buf, 667static ssize_t write_full(struct file * file, const char __user * buf,
759 size_t count, loff_t *ppos) 668 size_t count, loff_t *ppos)
@@ -984,6 +893,11 @@ static struct class *mem_class;
984static int __init chr_dev_init(void) 893static int __init chr_dev_init(void)
985{ 894{
986 int i; 895 int i;
896 int err;
897
898 err = bdi_init(&zero_bdi);
899 if (err)
900 return err;
987 901
988 if (register_chrdev(MEM_MAJOR,"mem",&memory_fops)) 902 if (register_chrdev(MEM_MAJOR,"mem",&memory_fops))
989 printk("unable to get major %d for memory devs\n", MEM_MAJOR); 903 printk("unable to get major %d for memory devs\n", MEM_MAJOR);
diff --git a/drivers/char/mspec.c b/drivers/char/mspec.c
index 04ac155d3a07..82f2e27dca7d 100644
--- a/drivers/char/mspec.c
+++ b/drivers/char/mspec.c
@@ -362,7 +362,7 @@ mspec_init(void)
362 is_sn2 = 1; 362 is_sn2 = 1;
363 if (is_shub2()) { 363 if (is_shub2()) {
364 ret = -ENOMEM; 364 ret = -ENOMEM;
365 for_each_online_node(nid) { 365 for_each_node_state(nid, N_ONLINE) {
366 int actual_nid; 366 int actual_nid;
367 int nasid; 367 int nasid;
368 unsigned long phys; 368 unsigned long phys;
diff --git a/drivers/char/mxser.h b/drivers/char/mxser.h
index 9fe28497eae9..1f4aa45ec004 100644
--- a/drivers/char/mxser.h
+++ b/drivers/char/mxser.h
@@ -30,8 +30,7 @@
30 30
31#define TTY_THRESHOLD_THROTTLE 128 31#define TTY_THRESHOLD_THROTTLE 128
32 32
33#define LO_WATER (TTY_FLIPBUF_SIZE) 33#define HI_WATER 768
34#define HI_WATER (TTY_FLIPBUF_SIZE*2*3/4)
35 34
36// added by James. 03-11-2004. 35// added by James. 03-11-2004.
37#define MOXA_SDS_GETICOUNTER (MOXA + 68) 36#define MOXA_SDS_GETICOUNTER (MOXA + 68)
diff --git a/drivers/char/n_hdlc.c b/drivers/char/n_hdlc.c
index e8332f305d72..82bcfb9c839a 100644
--- a/drivers/char/n_hdlc.c
+++ b/drivers/char/n_hdlc.c
@@ -229,7 +229,7 @@ static void n_hdlc_release(struct n_hdlc *n_hdlc)
229 wake_up_interruptible (&tty->read_wait); 229 wake_up_interruptible (&tty->read_wait);
230 wake_up_interruptible (&tty->write_wait); 230 wake_up_interruptible (&tty->write_wait);
231 231
232 if (tty != NULL && tty->disc_data == n_hdlc) 232 if (tty->disc_data == n_hdlc)
233 tty->disc_data = NULL; /* Break the tty->n_hdlc link */ 233 tty->disc_data = NULL; /* Break the tty->n_hdlc link */
234 234
235 /* Release transmit and receive buffers */ 235 /* Release transmit and receive buffers */
diff --git a/drivers/char/ppdev.c b/drivers/char/ppdev.c
index 84ac64fc48a1..efe2f5c55b91 100644
--- a/drivers/char/ppdev.c
+++ b/drivers/char/ppdev.c
@@ -66,7 +66,6 @@
66#include <linux/poll.h> 66#include <linux/poll.h>
67#include <linux/major.h> 67#include <linux/major.h>
68#include <linux/ppdev.h> 68#include <linux/ppdev.h>
69#include <linux/device.h>
70#include <asm/uaccess.h> 69#include <asm/uaccess.h>
71 70
72#define PP_VERSION "ppdev: user-space parallel port driver" 71#define PP_VERSION "ppdev: user-space parallel port driver"
diff --git a/drivers/char/rio/host.h b/drivers/char/rio/host.h
index 23d0681fe491..78f24540c224 100644
--- a/drivers/char/rio/host.h
+++ b/drivers/char/rio/host.h
@@ -99,7 +99,7 @@ struct Host {
99 struct UnixRup UnixRups[MAX_RUP + LINKS_PER_UNIT]; 99 struct UnixRup UnixRups[MAX_RUP + LINKS_PER_UNIT];
100 int timeout_id; /* For calling 100 ms delays */ 100 int timeout_id; /* For calling 100 ms delays */
101 int timeout_sem; /* For calling 100 ms delays */ 101 int timeout_sem; /* For calling 100 ms delays */
102 long locks; /* long req'd for set_bit --RR */ 102 unsigned long locks; /* long req'd for set_bit --RR */
103 char ____end_marker____; 103 char ____end_marker____;
104}; 104};
105#define Control CardP->DpControl 105#define Control CardP->DpControl
diff --git a/drivers/char/riscom8.h b/drivers/char/riscom8.h
index 6317aade201a..9cc1313d5e67 100644
--- a/drivers/char/riscom8.h
+++ b/drivers/char/riscom8.h
@@ -71,7 +71,7 @@ struct riscom_port {
71 struct tty_struct * tty; 71 struct tty_struct * tty;
72 int count; 72 int count;
73 int blocked_open; 73 int blocked_open;
74 long event; /* long req'd for set_bit --RR */ 74 unsigned long event; /* long req'd for set_bit --RR */
75 int timeout; 75 int timeout;
76 int close_delay; 76 int close_delay;
77 unsigned char * xmit_buf; 77 unsigned char * xmit_buf;
diff --git a/drivers/char/sx.h b/drivers/char/sx.h
index 432aad0a2ddd..70d9783c7323 100644
--- a/drivers/char/sx.h
+++ b/drivers/char/sx.h
@@ -27,7 +27,7 @@ struct sx_port {
27 int c_dcd; 27 int c_dcd;
28 struct sx_board *board; 28 struct sx_board *board;
29 int line; 29 int line;
30 long locks; 30 unsigned long locks;
31}; 31};
32 32
33struct sx_board { 33struct sx_board {
@@ -45,7 +45,7 @@ struct sx_board {
45 int poll; 45 int poll;
46 int ta_type; 46 int ta_type;
47 struct timer_list timer; 47 struct timer_list timer;
48 long locks; 48 unsigned long locks;
49}; 49};
50 50
51struct vpd_prom { 51struct vpd_prom {
diff --git a/drivers/char/synclink.c b/drivers/char/synclink.c
index fdc256b380b8..905d1f51a7bf 100644
--- a/drivers/char/synclink.c
+++ b/drivers/char/synclink.c
@@ -81,13 +81,9 @@
81#include <linux/mm.h> 81#include <linux/mm.h>
82#include <linux/slab.h> 82#include <linux/slab.h>
83#include <linux/delay.h> 83#include <linux/delay.h>
84
85#include <linux/netdevice.h> 84#include <linux/netdevice.h>
86
87#include <linux/vmalloc.h> 85#include <linux/vmalloc.h>
88#include <linux/init.h> 86#include <linux/init.h>
89
90#include <linux/delay.h>
91#include <linux/ioctl.h> 87#include <linux/ioctl.h>
92 88
93#include <asm/system.h> 89#include <asm/system.h>
diff --git a/drivers/char/synclink_gt.c b/drivers/char/synclink_gt.c
index 2f97d2f8f916..64e835f62438 100644
--- a/drivers/char/synclink_gt.c
+++ b/drivers/char/synclink_gt.c
@@ -206,10 +206,10 @@ static void flush_cond_wait(struct cond_wait **head);
206 */ 206 */
207struct slgt_desc 207struct slgt_desc
208{ 208{
209 unsigned short count; 209 __le16 count;
210 unsigned short status; 210 __le16 status;
211 unsigned int pbuf; /* physical address of data buffer */ 211 __le32 pbuf; /* physical address of data buffer */
212 unsigned int next; /* physical address of next descriptor */ 212 __le32 next; /* physical address of next descriptor */
213 213
214 /* driver book keeping */ 214 /* driver book keeping */
215 char *buf; /* virtual address of data buffer */ 215 char *buf; /* virtual address of data buffer */
diff --git a/drivers/char/sysrq.c b/drivers/char/sysrq.c
index 39cc318011ea..78d14935f2b8 100644
--- a/drivers/char/sysrq.c
+++ b/drivers/char/sysrq.c
@@ -36,6 +36,7 @@
36#include <linux/kexec.h> 36#include <linux/kexec.h>
37#include <linux/irq.h> 37#include <linux/irq.h>
38#include <linux/hrtimer.h> 38#include <linux/hrtimer.h>
39#include <linux/oom.h>
39 40
40#include <asm/ptrace.h> 41#include <asm/ptrace.h>
41#include <asm/irq_regs.h> 42#include <asm/irq_regs.h>
@@ -107,12 +108,12 @@ static void sysrq_handle_unraw(int key, struct tty_struct *tty)
107 struct kbd_struct *kbd = &kbd_table[fg_console]; 108 struct kbd_struct *kbd = &kbd_table[fg_console];
108 109
109 if (kbd) 110 if (kbd)
110 kbd->kbdmode = VC_XLATE; 111 kbd->kbdmode = default_utf8 ? VC_UNICODE : VC_XLATE;
111} 112}
112static struct sysrq_key_op sysrq_unraw_op = { 113static struct sysrq_key_op sysrq_unraw_op = {
113 .handler = sysrq_handle_unraw, 114 .handler = sysrq_handle_unraw,
114 .help_msg = "unRaw", 115 .help_msg = "unRaw",
115 .action_msg = "Keyboard mode set to XLATE", 116 .action_msg = "Keyboard mode set to system default",
116 .enable_mask = SYSRQ_ENABLE_KEYBOARD, 117 .enable_mask = SYSRQ_ENABLE_KEYBOARD,
117}; 118};
118#else 119#else
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index 23fa18a6654c..a8e808461377 100644
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -435,17 +435,12 @@ module_param(interrupts, bool, 0444);
435MODULE_PARM_DESC(interrupts, "Enable interrupts"); 435MODULE_PARM_DESC(interrupts, "Enable interrupts");
436 436
437static int tpm_tis_init(struct device *dev, resource_size_t start, 437static int tpm_tis_init(struct device *dev, resource_size_t start,
438 resource_size_t len) 438 resource_size_t len, unsigned int irq)
439{ 439{
440 u32 vendor, intfcaps, intmask; 440 u32 vendor, intfcaps, intmask;
441 int rc, i; 441 int rc, i;
442 struct tpm_chip *chip; 442 struct tpm_chip *chip;
443 443
444 if (!start)
445 start = TIS_MEM_BASE;
446 if (!len)
447 len = TIS_MEM_LEN;
448
449 if (!(chip = tpm_register_hardware(dev, &tpm_tis))) 444 if (!(chip = tpm_register_hardware(dev, &tpm_tis)))
450 return -ENODEV; 445 return -ENODEV;
451 446
@@ -512,7 +507,9 @@ static int tpm_tis_init(struct device *dev, resource_size_t start,
512 iowrite32(intmask, 507 iowrite32(intmask,
513 chip->vendor.iobase + 508 chip->vendor.iobase +
514 TPM_INT_ENABLE(chip->vendor.locality)); 509 TPM_INT_ENABLE(chip->vendor.locality));
515 if (interrupts) { 510 if (interrupts)
511 chip->vendor.irq = irq;
512 if (interrupts && !chip->vendor.irq) {
516 chip->vendor.irq = 513 chip->vendor.irq =
517 ioread8(chip->vendor.iobase + 514 ioread8(chip->vendor.iobase +
518 TPM_INT_VECTOR(chip->vendor.locality)); 515 TPM_INT_VECTOR(chip->vendor.locality));
@@ -597,10 +594,17 @@ static int __devinit tpm_tis_pnp_init(struct pnp_dev *pnp_dev,
597 const struct pnp_device_id *pnp_id) 594 const struct pnp_device_id *pnp_id)
598{ 595{
599 resource_size_t start, len; 596 resource_size_t start, len;
597 unsigned int irq = 0;
598
600 start = pnp_mem_start(pnp_dev, 0); 599 start = pnp_mem_start(pnp_dev, 0);
601 len = pnp_mem_len(pnp_dev, 0); 600 len = pnp_mem_len(pnp_dev, 0);
602 601
603 return tpm_tis_init(&pnp_dev->dev, start, len); 602 if (pnp_irq_valid(pnp_dev, 0))
603 irq = pnp_irq(pnp_dev, 0);
604 else
605 interrupts = 0;
606
607 return tpm_tis_init(&pnp_dev->dev, start, len, irq);
604} 608}
605 609
606static int tpm_tis_pnp_suspend(struct pnp_dev *dev, pm_message_t msg) 610static int tpm_tis_pnp_suspend(struct pnp_dev *dev, pm_message_t msg)
@@ -660,7 +664,7 @@ static int __init init_tis(void)
660 return rc; 664 return rc;
661 if (IS_ERR(pdev=platform_device_register_simple("tpm_tis", -1, NULL, 0))) 665 if (IS_ERR(pdev=platform_device_register_simple("tpm_tis", -1, NULL, 0)))
662 return PTR_ERR(pdev); 666 return PTR_ERR(pdev);
663 if((rc=tpm_tis_init(&pdev->dev, 0, 0)) != 0) { 667 if((rc=tpm_tis_init(&pdev->dev, TIS_MEM_BASE, TIS_MEM_LEN, 0)) != 0) {
664 platform_device_unregister(pdev); 668 platform_device_unregister(pdev);
665 driver_unregister(&tis_drv); 669 driver_unregister(&tis_drv);
666 } 670 }
diff --git a/drivers/char/tty_ioctl.c b/drivers/char/tty_ioctl.c
index 3ee73cf64bd2..745d552620bf 100644
--- a/drivers/char/tty_ioctl.c
+++ b/drivers/char/tty_ioctl.c
@@ -206,8 +206,6 @@ speed_t tty_termios_input_baud_rate(struct ktermios *termios)
206 206
207EXPORT_SYMBOL(tty_termios_input_baud_rate); 207EXPORT_SYMBOL(tty_termios_input_baud_rate);
208 208
209#ifdef BOTHER
210
211/** 209/**
212 * tty_termios_encode_baud_rate 210 * tty_termios_encode_baud_rate
213 * @termios: ktermios structure holding user requested state 211 * @termios: ktermios structure holding user requested state
@@ -225,6 +223,9 @@ EXPORT_SYMBOL(tty_termios_input_baud_rate);
225 * 223 *
226 * Locking: Caller should hold termios lock. This is already held 224 * Locking: Caller should hold termios lock. This is already held
227 * when calling this function from the driver termios handler. 225 * when calling this function from the driver termios handler.
226 *
227 * The ifdefs deal with platforms whose owners have yet to update them
228 * and will all go away once this is done.
228 */ 229 */
229 230
230void tty_termios_encode_baud_rate(struct ktermios *termios, speed_t ibaud, speed_t obaud) 231void tty_termios_encode_baud_rate(struct ktermios *termios, speed_t ibaud, speed_t obaud)
@@ -234,9 +235,13 @@ void tty_termios_encode_baud_rate(struct ktermios *termios, speed_t ibaud, speed
234 int iclose = ibaud/50, oclose = obaud/50; 235 int iclose = ibaud/50, oclose = obaud/50;
235 int ibinput = 0; 236 int ibinput = 0;
236 237
238 if (obaud == 0) /* CD dropped */
239 ibaud = 0; /* Clear ibaud to be sure */
240
237 termios->c_ispeed = ibaud; 241 termios->c_ispeed = ibaud;
238 termios->c_ospeed = obaud; 242 termios->c_ospeed = obaud;
239 243
244#ifdef BOTHER
240 /* If the user asked for a precise weird speed give a precise weird 245 /* If the user asked for a precise weird speed give a precise weird
241 answer. If they asked for a Bfoo speed they many have problems 246 answer. If they asked for a Bfoo speed they many have problems
242 digesting non-exact replies so fuzz a bit */ 247 digesting non-exact replies so fuzz a bit */
@@ -247,32 +252,60 @@ void tty_termios_encode_baud_rate(struct ktermios *termios, speed_t ibaud, speed
247 iclose = 0; 252 iclose = 0;
248 if ((termios->c_cflag >> IBSHIFT) & CBAUD) 253 if ((termios->c_cflag >> IBSHIFT) & CBAUD)
249 ibinput = 1; /* An input speed was specified */ 254 ibinput = 1; /* An input speed was specified */
250 255#endif
251 termios->c_cflag &= ~CBAUD; 256 termios->c_cflag &= ~CBAUD;
252 257
258 /*
259 * Our goal is to find a close match to the standard baud rate
260 * returned. Walk the baud rate table and if we get a very close
261 * match then report back the speed as a POSIX Bxxxx value by
262 * preference
263 */
264
253 do { 265 do {
254 if (obaud - oclose >= baud_table[i] && obaud + oclose <= baud_table[i]) { 266 if (obaud - oclose >= baud_table[i] && obaud + oclose <= baud_table[i]) {
255 termios->c_cflag |= baud_bits[i]; 267 termios->c_cflag |= baud_bits[i];
256 ofound = i; 268 ofound = i;
257 } 269 }
258 if (ibaud - iclose >= baud_table[i] && ibaud + iclose <= baud_table[i]) { 270 if (ibaud - iclose >= baud_table[i] && ibaud + iclose <= baud_table[i]) {
259 /* For the case input == output don't set IBAUD bits if the user didn't do so */ 271 if (ofound == i && !ibinput)
260 if (ofound != i || ibinput) 272 ifound = i;
273#ifdef IBSHIFT
274 else {
275 ifound = i;
261 termios->c_cflag |= (baud_bits[i] << IBSHIFT); 276 termios->c_cflag |= (baud_bits[i] << IBSHIFT);
262 ifound = i; 277 }
278#endif
263 } 279 }
264 } while (++i < n_baud_table); 280 } while (++i < n_baud_table);
281
282 /*
283 * If we found no match then use BOTHER if provided or warn
284 * the user their platform maintainer needs to wake up if not.
285 */
286#ifdef BOTHER
265 if (ofound == -1) 287 if (ofound == -1)
266 termios->c_cflag |= BOTHER; 288 termios->c_cflag |= BOTHER;
267 /* Set exact input bits only if the input and output differ or the 289 /* Set exact input bits only if the input and output differ or the
268 user already did */ 290 user already did */
269 if (ifound == -1 && (ibaud != obaud || ibinput)) 291 if (ifound == -1 && (ibaud != obaud || ibinput))
270 termios->c_cflag |= (BOTHER << IBSHIFT); 292 termios->c_cflag |= (BOTHER << IBSHIFT);
293#else
294 if (ifound == -1 || ofound == -1) {
295 static int warned;
296 if (!warned++)
297 printk(KERN_WARNING "tty: Unable to return correct "
298 "speed data as your architecture needs updating.\n");
299 }
300#endif
271} 301}
272
273EXPORT_SYMBOL_GPL(tty_termios_encode_baud_rate); 302EXPORT_SYMBOL_GPL(tty_termios_encode_baud_rate);
274 303
275#endif 304void tty_encode_baud_rate(struct tty_struct *tty, speed_t ibaud, speed_t obaud)
305{
306 tty_termios_encode_baud_rate(tty->termios, ibaud, obaud);
307}
308EXPORT_SYMBOL_GPL(tty_encode_baud_rate);
276 309
277/** 310/**
278 * tty_get_baud_rate - get tty bit rates 311 * tty_get_baud_rate - get tty bit rates
@@ -304,6 +337,29 @@ speed_t tty_get_baud_rate(struct tty_struct *tty)
304EXPORT_SYMBOL(tty_get_baud_rate); 337EXPORT_SYMBOL(tty_get_baud_rate);
305 338
306/** 339/**
340 * tty_termios_copy_hw - copy hardware settings
341 * @new: New termios
342 * @old: Old termios
343 *
344 * Propogate the hardware specific terminal setting bits from
345 * the old termios structure to the new one. This is used in cases
346 * where the hardware does not support reconfiguration or as a helper
347 * in some cases where only minimal reconfiguration is supported
348 */
349
350void tty_termios_copy_hw(struct ktermios *new, struct ktermios *old)
351{
352 /* The bits a dumb device handles in software. Smart devices need
353 to always provide a set_termios method */
354 new->c_cflag &= HUPCL | CREAD | CLOCAL;
355 new->c_cflag |= old->c_cflag & ~(HUPCL | CREAD | CLOCAL);
356 new->c_ispeed = old->c_ispeed;
357 new->c_ospeed = old->c_ospeed;
358}
359
360EXPORT_SYMBOL(tty_termios_copy_hw);
361
362/**
307 * change_termios - update termios values 363 * change_termios - update termios values
308 * @tty: tty to update 364 * @tty: tty to update
309 * @new_termios: desired new value 365 * @new_termios: desired new value
@@ -340,13 +396,12 @@ static void change_termios(struct tty_struct * tty, struct ktermios * new_termio
340 tty->erasing = 0; 396 tty->erasing = 0;
341 } 397 }
342 398
343 399 /* This bit should be in the ldisc code */
344 if (canon_change && !L_ICANON(tty) && tty->read_cnt) 400 if (canon_change && !L_ICANON(tty) && tty->read_cnt)
345 /* Get characters left over from canonical mode. */ 401 /* Get characters left over from canonical mode. */
346 wake_up_interruptible(&tty->read_wait); 402 wake_up_interruptible(&tty->read_wait);
347 403
348 /* See if packet mode change of state. */ 404 /* See if packet mode change of state. */
349
350 if (tty->link && tty->link->packet) { 405 if (tty->link && tty->link->packet) {
351 int old_flow = ((old_termios.c_iflag & IXON) && 406 int old_flow = ((old_termios.c_iflag & IXON) &&
352 (old_termios.c_cc[VSTOP] == '\023') && 407 (old_termios.c_cc[VSTOP] == '\023') &&
@@ -366,6 +421,8 @@ static void change_termios(struct tty_struct * tty, struct ktermios * new_termio
366 421
367 if (tty->driver->set_termios) 422 if (tty->driver->set_termios)
368 (*tty->driver->set_termios)(tty, &old_termios); 423 (*tty->driver->set_termios)(tty, &old_termios);
424 else
425 tty_termios_copy_hw(tty->termios, &old_termios);
369 426
370 ld = tty_ldisc_ref(tty); 427 ld = tty_ldisc_ref(tty);
371 if (ld != NULL) { 428 if (ld != NULL) {
@@ -440,6 +497,11 @@ static int set_termios(struct tty_struct * tty, void __user *arg, int opt)
440 } 497 }
441 498
442 change_termios(tty, &tmp_termios); 499 change_termios(tty, &tmp_termios);
500
501 /* FIXME: Arguably if tmp_termios == tty->termios AND the
502 actual requested termios was not tmp_termios then we may
503 want to return an error as no user requested change has
504 succeeded */
443 return 0; 505 return 0;
444} 506}
445 507
diff --git a/drivers/char/vt.c b/drivers/char/vt.c
index edb7002a3216..645ad9808982 100644
--- a/drivers/char/vt.c
+++ b/drivers/char/vt.c
@@ -158,7 +158,11 @@ static void blank_screen_t(unsigned long dummy);
158static void set_palette(struct vc_data *vc); 158static void set_palette(struct vc_data *vc);
159 159
160static int printable; /* Is console ready for printing? */ 160static int printable; /* Is console ready for printing? */
161static int default_utf8; 161#ifdef CONFIG_VT_UNICODE
162int default_utf8 = 1;
163#else
164int default_utf8;
165#endif
162module_param(default_utf8, int, S_IRUGO | S_IWUSR); 166module_param(default_utf8, int, S_IRUGO | S_IWUSR);
163 167
164/* 168/*
@@ -750,13 +754,15 @@ int vc_allocate(unsigned int currcons) /* return 0 on success */
750 return 0; 754 return 0;
751} 755}
752 756
753static inline int resize_screen(struct vc_data *vc, int width, int height) 757static inline int resize_screen(struct vc_data *vc, int width, int height,
758 int user)
754{ 759{
755 /* Resizes the resolution of the display adapater */ 760 /* Resizes the resolution of the display adapater */
756 int err = 0; 761 int err = 0;
757 762
758 if (vc->vc_mode != KD_GRAPHICS && vc->vc_sw->con_resize) 763 if (vc->vc_mode != KD_GRAPHICS && vc->vc_sw->con_resize)
759 err = vc->vc_sw->con_resize(vc, width, height); 764 err = vc->vc_sw->con_resize(vc, width, height, user);
765
760 return err; 766 return err;
761} 767}
762 768
@@ -772,7 +778,7 @@ int vc_resize(struct vc_data *vc, unsigned int cols, unsigned int lines)
772 unsigned long old_origin, new_origin, new_scr_end, rlth, rrem, err = 0; 778 unsigned long old_origin, new_origin, new_scr_end, rlth, rrem, err = 0;
773 unsigned int old_cols, old_rows, old_row_size, old_screen_size; 779 unsigned int old_cols, old_rows, old_row_size, old_screen_size;
774 unsigned int new_cols, new_rows, new_row_size, new_screen_size; 780 unsigned int new_cols, new_rows, new_row_size, new_screen_size;
775 unsigned int end; 781 unsigned int end, user;
776 unsigned short *newscreen; 782 unsigned short *newscreen;
777 783
778 WARN_CONSOLE_UNLOCKED(); 784 WARN_CONSOLE_UNLOCKED();
@@ -780,6 +786,9 @@ int vc_resize(struct vc_data *vc, unsigned int cols, unsigned int lines)
780 if (!vc) 786 if (!vc)
781 return -ENXIO; 787 return -ENXIO;
782 788
789 user = vc->vc_resize_user;
790 vc->vc_resize_user = 0;
791
783 if (cols > VC_RESIZE_MAXCOL || lines > VC_RESIZE_MAXROW) 792 if (cols > VC_RESIZE_MAXCOL || lines > VC_RESIZE_MAXROW)
784 return -EINVAL; 793 return -EINVAL;
785 794
@@ -800,7 +809,7 @@ int vc_resize(struct vc_data *vc, unsigned int cols, unsigned int lines)
800 old_row_size = vc->vc_size_row; 809 old_row_size = vc->vc_size_row;
801 old_screen_size = vc->vc_screenbuf_size; 810 old_screen_size = vc->vc_screenbuf_size;
802 811
803 err = resize_screen(vc, new_cols, new_rows); 812 err = resize_screen(vc, new_cols, new_rows, user);
804 if (err) { 813 if (err) {
805 kfree(newscreen); 814 kfree(newscreen);
806 return err; 815 return err;
diff --git a/drivers/char/vt_ioctl.c b/drivers/char/vt_ioctl.c
index 7a61a2a9aafe..e6f89e8b9258 100644
--- a/drivers/char/vt_ioctl.c
+++ b/drivers/char/vt_ioctl.c
@@ -23,6 +23,7 @@
23#include <linux/major.h> 23#include <linux/major.h>
24#include <linux/fs.h> 24#include <linux/fs.h>
25#include <linux/console.h> 25#include <linux/console.h>
26#include <linux/consolemap.h>
26#include <linux/signal.h> 27#include <linux/signal.h>
27#include <linux/timex.h> 28#include <linux/timex.h>
28 29
@@ -582,10 +583,27 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
582 case KDGKBDIACR: 583 case KDGKBDIACR:
583 { 584 {
584 struct kbdiacrs __user *a = up; 585 struct kbdiacrs __user *a = up;
586 struct kbdiacr diacr;
587 int i;
585 588
586 if (put_user(accent_table_size, &a->kb_cnt)) 589 if (put_user(accent_table_size, &a->kb_cnt))
587 return -EFAULT; 590 return -EFAULT;
588 if (copy_to_user(a->kbdiacr, accent_table, accent_table_size*sizeof(struct kbdiacr))) 591 for (i = 0; i < accent_table_size; i++) {
592 diacr.diacr = conv_uni_to_8bit(accent_table[i].diacr);
593 diacr.base = conv_uni_to_8bit(accent_table[i].base);
594 diacr.result = conv_uni_to_8bit(accent_table[i].result);
595 if (copy_to_user(a->kbdiacr + i, &diacr, sizeof(struct kbdiacr)))
596 return -EFAULT;
597 }
598 return 0;
599 }
600 case KDGKBDIACRUC:
601 {
602 struct kbdiacrsuc __user *a = up;
603
604 if (put_user(accent_table_size, &a->kb_cnt))
605 return -EFAULT;
606 if (copy_to_user(a->kbdiacruc, accent_table, accent_table_size*sizeof(struct kbdiacruc)))
589 return -EFAULT; 607 return -EFAULT;
590 return 0; 608 return 0;
591 } 609 }
@@ -593,6 +611,30 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
593 case KDSKBDIACR: 611 case KDSKBDIACR:
594 { 612 {
595 struct kbdiacrs __user *a = up; 613 struct kbdiacrs __user *a = up;
614 struct kbdiacr diacr;
615 unsigned int ct;
616 int i;
617
618 if (!perm)
619 return -EPERM;
620 if (get_user(ct,&a->kb_cnt))
621 return -EFAULT;
622 if (ct >= MAX_DIACR)
623 return -EINVAL;
624 accent_table_size = ct;
625 for (i = 0; i < ct; i++) {
626 if (copy_from_user(&diacr, a->kbdiacr + i, sizeof(struct kbdiacr)))
627 return -EFAULT;
628 accent_table[i].diacr = conv_8bit_to_uni(diacr.diacr);
629 accent_table[i].base = conv_8bit_to_uni(diacr.base);
630 accent_table[i].result = conv_8bit_to_uni(diacr.result);
631 }
632 return 0;
633 }
634
635 case KDSKBDIACRUC:
636 {
637 struct kbdiacrsuc __user *a = up;
596 unsigned int ct; 638 unsigned int ct;
597 639
598 if (!perm) 640 if (!perm)
@@ -602,7 +644,7 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
602 if (ct >= MAX_DIACR) 644 if (ct >= MAX_DIACR)
603 return -EINVAL; 645 return -EINVAL;
604 accent_table_size = ct; 646 accent_table_size = ct;
605 if (copy_from_user(accent_table, a->kbdiacr, ct*sizeof(struct kbdiacr))) 647 if (copy_from_user(accent_table, a->kbdiacruc, ct*sizeof(struct kbdiacruc)))
606 return -EFAULT; 648 return -EFAULT;
607 return 0; 649 return 0;
608 } 650 }
@@ -847,14 +889,24 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
847 case VT_RESIZE: 889 case VT_RESIZE:
848 { 890 {
849 struct vt_sizes __user *vtsizes = up; 891 struct vt_sizes __user *vtsizes = up;
892 struct vc_data *vc;
893
850 ushort ll,cc; 894 ushort ll,cc;
851 if (!perm) 895 if (!perm)
852 return -EPERM; 896 return -EPERM;
853 if (get_user(ll, &vtsizes->v_rows) || 897 if (get_user(ll, &vtsizes->v_rows) ||
854 get_user(cc, &vtsizes->v_cols)) 898 get_user(cc, &vtsizes->v_cols))
855 return -EFAULT; 899 return -EFAULT;
856 for (i = 0; i < MAX_NR_CONSOLES; i++) 900
857 vc_lock_resize(vc_cons[i].d, cc, ll); 901 for (i = 0; i < MAX_NR_CONSOLES; i++) {
902 vc = vc_cons[i].d;
903
904 if (vc) {
905 vc->vc_resize_user = 1;
906 vc_lock_resize(vc_cons[i].d, cc, ll);
907 }
908 }
909
858 return 0; 910 return 0;
859 } 911 }
860 912
@@ -900,6 +952,7 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
900 vc_cons[i].d->vc_scan_lines = vlin; 952 vc_cons[i].d->vc_scan_lines = vlin;
901 if (clin) 953 if (clin)
902 vc_cons[i].d->vc_font.height = clin; 954 vc_cons[i].d->vc_font.height = clin;
955 vc_cons[i].d->vc_resize_user = 1;
903 vc_resize(vc_cons[i].d, cc, ll); 956 vc_resize(vc_cons[i].d, cc, ll);
904 release_console_sem(); 957 release_console_sem();
905 } 958 }
@@ -1072,7 +1125,7 @@ int vt_waitactive(int vt)
1072void reset_vc(struct vc_data *vc) 1125void reset_vc(struct vc_data *vc)
1073{ 1126{
1074 vc->vc_mode = KD_TEXT; 1127 vc->vc_mode = KD_TEXT;
1075 kbd_table[vc->vc_num].kbdmode = VC_XLATE; 1128 kbd_table[vc->vc_num].kbdmode = default_utf8 ? VC_UNICODE : VC_XLATE;
1076 vc->vt_mode.mode = VT_AUTO; 1129 vc->vt_mode.mode = VT_AUTO;
1077 vc->vt_mode.waitv = 0; 1130 vc->vt_mode.waitv = 0;
1078 vc->vt_mode.relsig = 0; 1131 vc->vt_mode.relsig = 0;
diff --git a/drivers/char/watchdog/mpc5200_wdt.c b/drivers/char/watchdog/mpc5200_wdt.c
index 564143d40610..9cfb97576623 100644
--- a/drivers/char/watchdog/mpc5200_wdt.c
+++ b/drivers/char/watchdog/mpc5200_wdt.c
@@ -81,7 +81,7 @@ static int mpc5200_wdt_stop(struct mpc5200_wdt *wdt)
81 81
82 82
83/* file operations */ 83/* file operations */
84static ssize_t mpc5200_wdt_write(struct file *file, const char *data, 84static ssize_t mpc5200_wdt_write(struct file *file, const char __user *data,
85 size_t len, loff_t *ppos) 85 size_t len, loff_t *ppos)
86{ 86{
87 struct mpc5200_wdt *wdt = file->private_data; 87 struct mpc5200_wdt *wdt = file->private_data;