aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-06-20 13:15:30 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-06-20 13:15:30 -0400
commit43813f399c72aa22e01a680559c1cb5274bf2140 (patch)
tree933c0e7c445b9c3478b5a0db06a162d0d39f00f2 /drivers
parenta552f0af753eb4b5bbbe9eff205fe874b04c4583 (diff)
parent0b7af262aba912f52bc6ef76f1bc0960b01b8502 (diff)
Merge branch 'drm-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6
* 'drm-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6: (24 commits) agp/intel: Make intel_i965_mask_memory use dma_addr_t for physical addresses agp: add user mapping support to ATI AGP bridge. drm/i915: enable GEM on PAE. drm/radeon: fix unused variables warning agp: switch AGP to use page array instead of unsigned long array agpgart: detected ALi M???? chipset with M1621 drm/radeon: command stream checker for r3xx-r5xx hardware drm/radeon: Fully initialize LVDS info also when we can't get it from the ROM. radeon: Fix CP byte order on big endian architectures with KMS. agp/uninorth: Handle user memory types. drm/ttm: Add some powerpc cache flush code. radeon: Enable modesetting on non-x86. drm/radeon: Respect AGP cant_use_aperture flag. drm: EDID endianness fixes. drm/radeon: this VRAM vs aperture test is wrong, just remove it. drm/ttm: fix an error path to exit function correctly drm: Apply "Memory fragmentation from lost alignment blocks" ttm: Return -ERESTART when a signal interrupts bo eviction. drm: Remove memory debugging infrastructure. drm/i915: Clear fence register on tiling stride change. ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/char/agp/agp.h12
-rw-r--r--drivers/char/agp/ali-agp.c28
-rw-r--r--drivers/char/agp/amd-k7-agp.c2
-rw-r--r--drivers/char/agp/amd64-agp.c2
-rw-r--r--drivers/char/agp/ati-agp.c23
-rw-r--r--drivers/char/agp/backend.c8
-rw-r--r--drivers/char/agp/efficeon-agp.c5
-rw-r--r--drivers/char/agp/generic.c69
-rw-r--r--drivers/char/agp/hp-agp.c9
-rw-r--r--drivers/char/agp/i460-agp.c47
-rw-r--r--drivers/char/agp/intel-agp.c49
-rw-r--r--drivers/char/agp/nvidia-agp.c2
-rw-r--r--drivers/char/agp/parisc-agp.c20
-rw-r--r--drivers/char/agp/sgi-agp.c9
-rw-r--r--drivers/char/agp/sworks-agp.c2
-rw-r--r--drivers/char/agp/uninorth-agp.c30
-rw-r--r--drivers/gpu/drm/drm_agpsupport.c14
-rw-r--r--drivers/gpu/drm/drm_auth.c4
-rw-r--r--drivers/gpu/drm/drm_bufs.c140
-rw-r--r--drivers/gpu/drm/drm_context.c4
-rw-r--r--drivers/gpu/drm/drm_debugfs.c9
-rw-r--r--drivers/gpu/drm/drm_dma.c31
-rw-r--r--drivers/gpu/drm/drm_drawable.c25
-rw-r--r--drivers/gpu/drm/drm_drv.c18
-rw-r--r--drivers/gpu/drm/drm_edid.c100
-rw-r--r--drivers/gpu/drm/drm_fops.c8
-rw-r--r--drivers/gpu/drm/drm_gem.c8
-rw-r--r--drivers/gpu/drm/drm_hashtab.c6
-rw-r--r--drivers/gpu/drm/drm_ioctl.c14
-rw-r--r--drivers/gpu/drm/drm_irq.c44
-rw-r--r--drivers/gpu/drm/drm_memory.c33
-rw-r--r--drivers/gpu/drm/drm_mm.c48
-rw-r--r--drivers/gpu/drm/drm_pci.c53
-rw-r--r--drivers/gpu/drm/drm_proc.c8
-rw-r--r--drivers/gpu/drm/drm_scatter.c33
-rw-r--r--drivers/gpu/drm/drm_sman.c29
-rw-r--r--drivers/gpu/drm/drm_stub.c19
-rw-r--r--drivers/gpu/drm/drm_vm.c12
-rw-r--r--drivers/gpu/drm/i810/i810_dma.c6
-rw-r--r--drivers/gpu/drm/i830/i830_dma.c6
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c45
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h2
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c86
-rw-r--r--drivers/gpu/drm/i915/i915_gem_debugfs.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c67
-rw-r--r--drivers/gpu/drm/i915/i915_mem.c24
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c6
-rw-r--r--drivers/gpu/drm/i915/intel_display.c20
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c6
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c11
-rw-r--r--drivers/gpu/drm/mga/mga_dma.c14
-rw-r--r--drivers/gpu/drm/r128/r128_cce.c12
-rw-r--r--drivers/gpu/drm/r128/r128_state.c84
-rw-r--r--drivers/gpu/drm/radeon/r100.c85
-rw-r--r--drivers/gpu/drm/radeon/r300.c478
-rw-r--r--drivers/gpu/drm/radeon/r300.h36
-rw-r--r--drivers/gpu/drm/radeon/radeon.h9
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h15
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_cp.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_i2c.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_mem.c24
-rw-r--r--drivers/gpu/drm/radeon/radeon_reg.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_state.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c8
-rw-r--r--drivers/gpu/drm/radeon/rv515.c58
-rw-r--r--drivers/gpu/drm/savage/savage_bci.c21
-rw-r--r--drivers/gpu/drm/savage/savage_state.c17
-rw-r--r--drivers/gpu/drm/sis/sis_drv.c6
-rw-r--r--drivers/gpu/drm/ttm/ttm_agp_backend.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c11
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c11
-rw-r--r--drivers/gpu/drm/via/via_map.c8
78 files changed, 1332 insertions, 883 deletions
diff --git a/drivers/char/agp/agp.h b/drivers/char/agp/agp.h
index 46f507531177..178e2e9e9f09 100644
--- a/drivers/char/agp/agp.h
+++ b/drivers/char/agp/agp.h
@@ -107,7 +107,7 @@ struct agp_bridge_driver {
107 void (*agp_enable)(struct agp_bridge_data *, u32); 107 void (*agp_enable)(struct agp_bridge_data *, u32);
108 void (*cleanup)(void); 108 void (*cleanup)(void);
109 void (*tlb_flush)(struct agp_memory *); 109 void (*tlb_flush)(struct agp_memory *);
110 unsigned long (*mask_memory)(struct agp_bridge_data *, unsigned long, int); 110 unsigned long (*mask_memory)(struct agp_bridge_data *, struct page *, int);
111 void (*cache_flush)(void); 111 void (*cache_flush)(void);
112 int (*create_gatt_table)(struct agp_bridge_data *); 112 int (*create_gatt_table)(struct agp_bridge_data *);
113 int (*free_gatt_table)(struct agp_bridge_data *); 113 int (*free_gatt_table)(struct agp_bridge_data *);
@@ -115,9 +115,9 @@ struct agp_bridge_driver {
115 int (*remove_memory)(struct agp_memory *, off_t, int); 115 int (*remove_memory)(struct agp_memory *, off_t, int);
116 struct agp_memory *(*alloc_by_type) (size_t, int); 116 struct agp_memory *(*alloc_by_type) (size_t, int);
117 void (*free_by_type)(struct agp_memory *); 117 void (*free_by_type)(struct agp_memory *);
118 void *(*agp_alloc_page)(struct agp_bridge_data *); 118 struct page *(*agp_alloc_page)(struct agp_bridge_data *);
119 int (*agp_alloc_pages)(struct agp_bridge_data *, struct agp_memory *, size_t); 119 int (*agp_alloc_pages)(struct agp_bridge_data *, struct agp_memory *, size_t);
120 void (*agp_destroy_page)(void *, int flags); 120 void (*agp_destroy_page)(struct page *, int flags);
121 void (*agp_destroy_pages)(struct agp_memory *); 121 void (*agp_destroy_pages)(struct agp_memory *);
122 int (*agp_type_to_mask_type) (struct agp_bridge_data *, int); 122 int (*agp_type_to_mask_type) (struct agp_bridge_data *, int);
123 void (*chipset_flush)(struct agp_bridge_data *); 123 void (*chipset_flush)(struct agp_bridge_data *);
@@ -278,10 +278,10 @@ int agp_generic_insert_memory(struct agp_memory *mem, off_t pg_start, int type);
278int agp_generic_remove_memory(struct agp_memory *mem, off_t pg_start, int type); 278int agp_generic_remove_memory(struct agp_memory *mem, off_t pg_start, int type);
279struct agp_memory *agp_generic_alloc_by_type(size_t page_count, int type); 279struct agp_memory *agp_generic_alloc_by_type(size_t page_count, int type);
280void agp_generic_free_by_type(struct agp_memory *curr); 280void agp_generic_free_by_type(struct agp_memory *curr);
281void *agp_generic_alloc_page(struct agp_bridge_data *bridge); 281struct page *agp_generic_alloc_page(struct agp_bridge_data *bridge);
282int agp_generic_alloc_pages(struct agp_bridge_data *agp_bridge, 282int agp_generic_alloc_pages(struct agp_bridge_data *agp_bridge,
283 struct agp_memory *memory, size_t page_count); 283 struct agp_memory *memory, size_t page_count);
284void agp_generic_destroy_page(void *addr, int flags); 284void agp_generic_destroy_page(struct page *page, int flags);
285void agp_generic_destroy_pages(struct agp_memory *memory); 285void agp_generic_destroy_pages(struct agp_memory *memory);
286void agp_free_key(int key); 286void agp_free_key(int key);
287int agp_num_entries(void); 287int agp_num_entries(void);
@@ -291,7 +291,7 @@ int agp_3_5_enable(struct agp_bridge_data *bridge);
291void global_cache_flush(void); 291void global_cache_flush(void);
292void get_agp_version(struct agp_bridge_data *bridge); 292void get_agp_version(struct agp_bridge_data *bridge);
293unsigned long agp_generic_mask_memory(struct agp_bridge_data *bridge, 293unsigned long agp_generic_mask_memory(struct agp_bridge_data *bridge,
294 unsigned long addr, int type); 294 struct page *page, int type);
295int agp_generic_type_to_mask_type(struct agp_bridge_data *bridge, 295int agp_generic_type_to_mask_type(struct agp_bridge_data *bridge,
296 int type); 296 int type);
297struct agp_bridge_data *agp_generic_find_bridge(struct pci_dev *pdev); 297struct agp_bridge_data *agp_generic_find_bridge(struct pci_dev *pdev);
diff --git a/drivers/char/agp/ali-agp.c b/drivers/char/agp/ali-agp.c
index dc8d1a90971f..201ef3ffd484 100644
--- a/drivers/char/agp/ali-agp.c
+++ b/drivers/char/agp/ali-agp.c
@@ -141,37 +141,37 @@ static void m1541_cache_flush(void)
141 } 141 }
142} 142}
143 143
144static void *m1541_alloc_page(struct agp_bridge_data *bridge) 144static struct page *m1541_alloc_page(struct agp_bridge_data *bridge)
145{ 145{
146 void *addr = agp_generic_alloc_page(agp_bridge); 146 struct page *page = agp_generic_alloc_page(agp_bridge);
147 u32 temp; 147 u32 temp;
148 148
149 if (!addr) 149 if (!page)
150 return NULL; 150 return NULL;
151 151
152 pci_read_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL, &temp); 152 pci_read_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL, &temp);
153 pci_write_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL, 153 pci_write_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL,
154 (((temp & ALI_CACHE_FLUSH_ADDR_MASK) | 154 (((temp & ALI_CACHE_FLUSH_ADDR_MASK) |
155 virt_to_gart(addr)) | ALI_CACHE_FLUSH_EN )); 155 phys_to_gart(page_to_phys(page))) | ALI_CACHE_FLUSH_EN ));
156 return addr; 156 return page;
157} 157}
158 158
159static void ali_destroy_page(void * addr, int flags) 159static void ali_destroy_page(struct page *page, int flags)
160{ 160{
161 if (addr) { 161 if (page) {
162 if (flags & AGP_PAGE_DESTROY_UNMAP) { 162 if (flags & AGP_PAGE_DESTROY_UNMAP) {
163 global_cache_flush(); /* is this really needed? --hch */ 163 global_cache_flush(); /* is this really needed? --hch */
164 agp_generic_destroy_page(addr, flags); 164 agp_generic_destroy_page(page, flags);
165 } else 165 } else
166 agp_generic_destroy_page(addr, flags); 166 agp_generic_destroy_page(page, flags);
167 } 167 }
168} 168}
169 169
170static void m1541_destroy_page(void * addr, int flags) 170static void m1541_destroy_page(struct page *page, int flags)
171{ 171{
172 u32 temp; 172 u32 temp;
173 173
174 if (addr == NULL) 174 if (page == NULL)
175 return; 175 return;
176 176
177 if (flags & AGP_PAGE_DESTROY_UNMAP) { 177 if (flags & AGP_PAGE_DESTROY_UNMAP) {
@@ -180,9 +180,9 @@ static void m1541_destroy_page(void * addr, int flags)
180 pci_read_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL, &temp); 180 pci_read_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL, &temp);
181 pci_write_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL, 181 pci_write_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL,
182 (((temp & ALI_CACHE_FLUSH_ADDR_MASK) | 182 (((temp & ALI_CACHE_FLUSH_ADDR_MASK) |
183 virt_to_gart(addr)) | ALI_CACHE_FLUSH_EN)); 183 phys_to_gart(page_to_phys(page))) | ALI_CACHE_FLUSH_EN));
184 } 184 }
185 agp_generic_destroy_page(addr, flags); 185 agp_generic_destroy_page(page, flags);
186} 186}
187 187
188 188
@@ -346,7 +346,7 @@ found:
346 devs[j].chipset_name = "M1641"; 346 devs[j].chipset_name = "M1641";
347 break; 347 break;
348 case 0x43: 348 case 0x43:
349 devs[j].chipset_name = "M????"; 349 devs[j].chipset_name = "M1621";
350 break; 350 break;
351 case 0x47: 351 case 0x47:
352 devs[j].chipset_name = "M1647"; 352 devs[j].chipset_name = "M1647";
diff --git a/drivers/char/agp/amd-k7-agp.c b/drivers/char/agp/amd-k7-agp.c
index 3f98254b911f..ba9bde71eaaf 100644
--- a/drivers/char/agp/amd-k7-agp.c
+++ b/drivers/char/agp/amd-k7-agp.c
@@ -325,7 +325,7 @@ static int amd_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
325 addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr; 325 addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr;
326 cur_gatt = GET_GATT(addr); 326 cur_gatt = GET_GATT(addr);
327 writel(agp_generic_mask_memory(agp_bridge, 327 writel(agp_generic_mask_memory(agp_bridge,
328 mem->memory[i], mem->type), cur_gatt+GET_GATT_OFF(addr)); 328 mem->pages[i], mem->type), cur_gatt+GET_GATT_OFF(addr));
329 readl(cur_gatt+GET_GATT_OFF(addr)); /* PCI Posting. */ 329 readl(cur_gatt+GET_GATT_OFF(addr)); /* PCI Posting. */
330 } 330 }
331 amd_irongate_tlbflush(mem); 331 amd_irongate_tlbflush(mem);
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c
index d765afda9c2a..3bf5dda90f4a 100644
--- a/drivers/char/agp/amd64-agp.c
+++ b/drivers/char/agp/amd64-agp.c
@@ -79,7 +79,7 @@ static int amd64_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
79 79
80 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { 80 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
81 tmp = agp_bridge->driver->mask_memory(agp_bridge, 81 tmp = agp_bridge->driver->mask_memory(agp_bridge,
82 mem->memory[i], mask_type); 82 mem->pages[i], mask_type);
83 83
84 BUG_ON(tmp & 0xffffff0000000ffcULL); 84 BUG_ON(tmp & 0xffffff0000000ffcULL);
85 pte = (tmp & 0x000000ff00000000ULL) >> 28; 85 pte = (tmp & 0x000000ff00000000ULL) >> 28;
diff --git a/drivers/char/agp/ati-agp.c b/drivers/char/agp/ati-agp.c
index f1537eece07f..33656e144cc5 100644
--- a/drivers/char/agp/ati-agp.c
+++ b/drivers/char/agp/ati-agp.c
@@ -269,12 +269,17 @@ static int ati_insert_memory(struct agp_memory * mem,
269 int i, j, num_entries; 269 int i, j, num_entries;
270 unsigned long __iomem *cur_gatt; 270 unsigned long __iomem *cur_gatt;
271 unsigned long addr; 271 unsigned long addr;
272 int mask_type;
272 273
273 num_entries = A_SIZE_LVL2(agp_bridge->current_size)->num_entries; 274 num_entries = A_SIZE_LVL2(agp_bridge->current_size)->num_entries;
274 275
275 if (type != 0 || mem->type != 0) 276 mask_type = agp_generic_type_to_mask_type(mem->bridge, type);
277 if (mask_type != 0 || type != mem->type)
276 return -EINVAL; 278 return -EINVAL;
277 279
280 if (mem->page_count == 0)
281 return 0;
282
278 if ((pg_start + mem->page_count) > num_entries) 283 if ((pg_start + mem->page_count) > num_entries)
279 return -EINVAL; 284 return -EINVAL;
280 285
@@ -296,10 +301,11 @@ static int ati_insert_memory(struct agp_memory * mem,
296 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { 301 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
297 addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr; 302 addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr;
298 cur_gatt = GET_GATT(addr); 303 cur_gatt = GET_GATT(addr);
299 writel(agp_bridge->driver->mask_memory(agp_bridge, 304 writel(agp_bridge->driver->mask_memory(agp_bridge,
300 mem->memory[i], mem->type), cur_gatt+GET_GATT_OFF(addr)); 305 mem->pages[i], mem->type),
301 readl(cur_gatt+GET_GATT_OFF(addr)); /* PCI Posting. */ 306 cur_gatt+GET_GATT_OFF(addr));
302 } 307 }
308 readl(GET_GATT(agp_bridge->gart_bus_addr)); /* PCI posting */
303 agp_bridge->driver->tlb_flush(mem); 309 agp_bridge->driver->tlb_flush(mem);
304 return 0; 310 return 0;
305} 311}
@@ -310,17 +316,22 @@ static int ati_remove_memory(struct agp_memory * mem, off_t pg_start,
310 int i; 316 int i;
311 unsigned long __iomem *cur_gatt; 317 unsigned long __iomem *cur_gatt;
312 unsigned long addr; 318 unsigned long addr;
319 int mask_type;
313 320
314 if (type != 0 || mem->type != 0) 321 mask_type = agp_generic_type_to_mask_type(mem->bridge, type);
322 if (mask_type != 0 || type != mem->type)
315 return -EINVAL; 323 return -EINVAL;
316 324
325 if (mem->page_count == 0)
326 return 0;
327
317 for (i = pg_start; i < (mem->page_count + pg_start); i++) { 328 for (i = pg_start; i < (mem->page_count + pg_start); i++) {
318 addr = (i * PAGE_SIZE) + agp_bridge->gart_bus_addr; 329 addr = (i * PAGE_SIZE) + agp_bridge->gart_bus_addr;
319 cur_gatt = GET_GATT(addr); 330 cur_gatt = GET_GATT(addr);
320 writel(agp_bridge->scratch_page, cur_gatt+GET_GATT_OFF(addr)); 331 writel(agp_bridge->scratch_page, cur_gatt+GET_GATT_OFF(addr));
321 readl(cur_gatt+GET_GATT_OFF(addr)); /* PCI Posting. */
322 } 332 }
323 333
334 readl(GET_GATT(agp_bridge->gart_bus_addr)); /* PCI posting */
324 agp_bridge->driver->tlb_flush(mem); 335 agp_bridge->driver->tlb_flush(mem);
325 return 0; 336 return 0;
326} 337}
diff --git a/drivers/char/agp/backend.c b/drivers/char/agp/backend.c
index 8c617ad7497f..cfa5a649dfe7 100644
--- a/drivers/char/agp/backend.c
+++ b/drivers/char/agp/backend.c
@@ -141,17 +141,17 @@ static int agp_backend_initialize(struct agp_bridge_data *bridge)
141 bridge->version = &agp_current_version; 141 bridge->version = &agp_current_version;
142 142
143 if (bridge->driver->needs_scratch_page) { 143 if (bridge->driver->needs_scratch_page) {
144 void *addr = bridge->driver->agp_alloc_page(bridge); 144 struct page *page = bridge->driver->agp_alloc_page(bridge);
145 145
146 if (!addr) { 146 if (!page) {
147 dev_err(&bridge->dev->dev, 147 dev_err(&bridge->dev->dev,
148 "can't get memory for scratch page\n"); 148 "can't get memory for scratch page\n");
149 return -ENOMEM; 149 return -ENOMEM;
150 } 150 }
151 151
152 bridge->scratch_page_real = virt_to_gart(addr); 152 bridge->scratch_page_real = phys_to_gart(page_to_phys(page));
153 bridge->scratch_page = 153 bridge->scratch_page =
154 bridge->driver->mask_memory(bridge, bridge->scratch_page_real, 0); 154 bridge->driver->mask_memory(bridge, page, 0);
155 } 155 }
156 156
157 size_value = bridge->driver->fetch_size(); 157 size_value = bridge->driver->fetch_size();
diff --git a/drivers/char/agp/efficeon-agp.c b/drivers/char/agp/efficeon-agp.c
index 453543a1f293..35d50f2861b6 100644
--- a/drivers/char/agp/efficeon-agp.c
+++ b/drivers/char/agp/efficeon-agp.c
@@ -65,8 +65,9 @@ static const struct gatt_mask efficeon_generic_masks[] =
65}; 65};
66 66
67/* This function does the same thing as mask_memory() for this chipset... */ 67/* This function does the same thing as mask_memory() for this chipset... */
68static inline unsigned long efficeon_mask_memory(unsigned long addr) 68static inline unsigned long efficeon_mask_memory(struct page *page)
69{ 69{
70 unsigned long addr = phys_to_gart(page_to_phys(page));
70 return addr | 0x00000001; 71 return addr | 0x00000001;
71} 72}
72 73
@@ -257,7 +258,7 @@ static int efficeon_insert_memory(struct agp_memory * mem, off_t pg_start, int t
257 last_page = NULL; 258 last_page = NULL;
258 for (i = 0; i < count; i++) { 259 for (i = 0; i < count; i++) {
259 int index = pg_start + i; 260 int index = pg_start + i;
260 unsigned long insert = efficeon_mask_memory(mem->memory[i]); 261 unsigned long insert = efficeon_mask_memory(mem->pages[i]);
261 262
262 page = (unsigned int *) efficeon_private.l1_table[index >> 10]; 263 page = (unsigned int *) efficeon_private.l1_table[index >> 10];
263 264
diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c
index 2224b762b7fb..1e8b461b91f1 100644
--- a/drivers/char/agp/generic.c
+++ b/drivers/char/agp/generic.c
@@ -95,13 +95,13 @@ EXPORT_SYMBOL(agp_flush_chipset);
95 95
96void agp_alloc_page_array(size_t size, struct agp_memory *mem) 96void agp_alloc_page_array(size_t size, struct agp_memory *mem)
97{ 97{
98 mem->memory = NULL; 98 mem->pages = NULL;
99 mem->vmalloc_flag = false; 99 mem->vmalloc_flag = false;
100 100
101 if (size <= 2*PAGE_SIZE) 101 if (size <= 2*PAGE_SIZE)
102 mem->memory = kmalloc(size, GFP_KERNEL | __GFP_NORETRY); 102 mem->pages = kmalloc(size, GFP_KERNEL | __GFP_NORETRY);
103 if (mem->memory == NULL) { 103 if (mem->pages == NULL) {
104 mem->memory = vmalloc(size); 104 mem->pages = vmalloc(size);
105 mem->vmalloc_flag = true; 105 mem->vmalloc_flag = true;
106 } 106 }
107} 107}
@@ -110,9 +110,9 @@ EXPORT_SYMBOL(agp_alloc_page_array);
110void agp_free_page_array(struct agp_memory *mem) 110void agp_free_page_array(struct agp_memory *mem)
111{ 111{
112 if (mem->vmalloc_flag) { 112 if (mem->vmalloc_flag) {
113 vfree(mem->memory); 113 vfree(mem->pages);
114 } else { 114 } else {
115 kfree(mem->memory); 115 kfree(mem->pages);
116 } 116 }
117} 117}
118EXPORT_SYMBOL(agp_free_page_array); 118EXPORT_SYMBOL(agp_free_page_array);
@@ -136,7 +136,7 @@ static struct agp_memory *agp_create_user_memory(unsigned long num_agp_pages)
136 136
137 agp_alloc_page_array(alloc_size, new); 137 agp_alloc_page_array(alloc_size, new);
138 138
139 if (new->memory == NULL) { 139 if (new->pages == NULL) {
140 agp_free_key(new->key); 140 agp_free_key(new->key);
141 kfree(new); 141 kfree(new);
142 return NULL; 142 return NULL;
@@ -162,7 +162,7 @@ struct agp_memory *agp_create_memory(int scratch_pages)
162 162
163 agp_alloc_page_array(PAGE_SIZE * scratch_pages, new); 163 agp_alloc_page_array(PAGE_SIZE * scratch_pages, new);
164 164
165 if (new->memory == NULL) { 165 if (new->pages == NULL) {
166 agp_free_key(new->key); 166 agp_free_key(new->key);
167 kfree(new); 167 kfree(new);
168 return NULL; 168 return NULL;
@@ -206,15 +206,13 @@ void agp_free_memory(struct agp_memory *curr)
206 } else { 206 } else {
207 207
208 for (i = 0; i < curr->page_count; i++) { 208 for (i = 0; i < curr->page_count; i++) {
209 curr->memory[i] = (unsigned long)gart_to_virt(
210 curr->memory[i]);
211 curr->bridge->driver->agp_destroy_page( 209 curr->bridge->driver->agp_destroy_page(
212 (void *)curr->memory[i], 210 curr->pages[i],
213 AGP_PAGE_DESTROY_UNMAP); 211 AGP_PAGE_DESTROY_UNMAP);
214 } 212 }
215 for (i = 0; i < curr->page_count; i++) { 213 for (i = 0; i < curr->page_count; i++) {
216 curr->bridge->driver->agp_destroy_page( 214 curr->bridge->driver->agp_destroy_page(
217 (void *)curr->memory[i], 215 curr->pages[i],
218 AGP_PAGE_DESTROY_FREE); 216 AGP_PAGE_DESTROY_FREE);
219 } 217 }
220 } 218 }
@@ -282,13 +280,13 @@ struct agp_memory *agp_allocate_memory(struct agp_bridge_data *bridge,
282 } 280 }
283 281
284 for (i = 0; i < page_count; i++) { 282 for (i = 0; i < page_count; i++) {
285 void *addr = bridge->driver->agp_alloc_page(bridge); 283 struct page *page = bridge->driver->agp_alloc_page(bridge);
286 284
287 if (addr == NULL) { 285 if (page == NULL) {
288 agp_free_memory(new); 286 agp_free_memory(new);
289 return NULL; 287 return NULL;
290 } 288 }
291 new->memory[i] = virt_to_gart(addr); 289 new->pages[i] = page;
292 new->page_count++; 290 new->page_count++;
293 } 291 }
294 new->bridge = bridge; 292 new->bridge = bridge;
@@ -1134,7 +1132,7 @@ int agp_generic_insert_memory(struct agp_memory * mem, off_t pg_start, int type)
1134 } 1132 }
1135 1133
1136 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { 1134 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
1137 writel(bridge->driver->mask_memory(bridge, mem->memory[i], mask_type), 1135 writel(bridge->driver->mask_memory(bridge, mem->pages[i], mask_type),
1138 bridge->gatt_table+j); 1136 bridge->gatt_table+j);
1139 } 1137 }
1140 readl(bridge->gatt_table+j-1); /* PCI Posting. */ 1138 readl(bridge->gatt_table+j-1); /* PCI Posting. */
@@ -1204,7 +1202,7 @@ struct agp_memory *agp_generic_alloc_user(size_t page_count, int type)
1204 return NULL; 1202 return NULL;
1205 1203
1206 for (i = 0; i < page_count; i++) 1204 for (i = 0; i < page_count; i++)
1207 new->memory[i] = 0; 1205 new->pages[i] = 0;
1208 new->page_count = 0; 1206 new->page_count = 0;
1209 new->type = type; 1207 new->type = type;
1210 new->num_scratch_pages = pages; 1208 new->num_scratch_pages = pages;
@@ -1237,23 +1235,20 @@ int agp_generic_alloc_pages(struct agp_bridge_data *bridge, struct agp_memory *m
1237 get_page(page); 1235 get_page(page);
1238 atomic_inc(&agp_bridge->current_memory_agp); 1236 atomic_inc(&agp_bridge->current_memory_agp);
1239 1237
1240 /* set_memory_array_uc() needs virtual address */ 1238 mem->pages[i] = page;
1241 mem->memory[i] = (unsigned long)page_address(page);
1242 mem->page_count++; 1239 mem->page_count++;
1243 } 1240 }
1244 1241
1245#ifdef CONFIG_X86 1242#ifdef CONFIG_X86
1246 set_memory_array_uc(mem->memory, num_pages); 1243 set_pages_array_uc(mem->pages, num_pages);
1247#endif 1244#endif
1248 ret = 0; 1245 ret = 0;
1249out: 1246out:
1250 for (i = 0; i < mem->page_count; i++)
1251 mem->memory[i] = virt_to_gart((void *)mem->memory[i]);
1252 return ret; 1247 return ret;
1253} 1248}
1254EXPORT_SYMBOL(agp_generic_alloc_pages); 1249EXPORT_SYMBOL(agp_generic_alloc_pages);
1255 1250
1256void *agp_generic_alloc_page(struct agp_bridge_data *bridge) 1251struct page *agp_generic_alloc_page(struct agp_bridge_data *bridge)
1257{ 1252{
1258 struct page * page; 1253 struct page * page;
1259 1254
@@ -1265,56 +1260,47 @@ void *agp_generic_alloc_page(struct agp_bridge_data *bridge)
1265 1260
1266 get_page(page); 1261 get_page(page);
1267 atomic_inc(&agp_bridge->current_memory_agp); 1262 atomic_inc(&agp_bridge->current_memory_agp);
1268 return page_address(page); 1263 return page;
1269} 1264}
1270EXPORT_SYMBOL(agp_generic_alloc_page); 1265EXPORT_SYMBOL(agp_generic_alloc_page);
1271 1266
1272void agp_generic_destroy_pages(struct agp_memory *mem) 1267void agp_generic_destroy_pages(struct agp_memory *mem)
1273{ 1268{
1274 int i; 1269 int i;
1275 void *addr;
1276 struct page *page; 1270 struct page *page;
1277 1271
1278 if (!mem) 1272 if (!mem)
1279 return; 1273 return;
1280 1274
1281 for (i = 0; i < mem->page_count; i++)
1282 mem->memory[i] = (unsigned long)gart_to_virt(mem->memory[i]);
1283
1284#ifdef CONFIG_X86 1275#ifdef CONFIG_X86
1285 set_memory_array_wb(mem->memory, mem->page_count); 1276 set_pages_array_wb(mem->pages, mem->page_count);
1286#endif 1277#endif
1287 1278
1288 for (i = 0; i < mem->page_count; i++) { 1279 for (i = 0; i < mem->page_count; i++) {
1289 addr = (void *)mem->memory[i]; 1280 page = mem->pages[i];
1290 page = virt_to_page(addr);
1291 1281
1292#ifndef CONFIG_X86 1282#ifndef CONFIG_X86
1293 unmap_page_from_agp(page); 1283 unmap_page_from_agp(page);
1294#endif 1284#endif
1295
1296 put_page(page); 1285 put_page(page);
1297 free_page((unsigned long)addr); 1286 __free_page(page);
1298 atomic_dec(&agp_bridge->current_memory_agp); 1287 atomic_dec(&agp_bridge->current_memory_agp);
1299 mem->memory[i] = 0; 1288 mem->pages[i] = NULL;
1300 } 1289 }
1301} 1290}
1302EXPORT_SYMBOL(agp_generic_destroy_pages); 1291EXPORT_SYMBOL(agp_generic_destroy_pages);
1303 1292
1304void agp_generic_destroy_page(void *addr, int flags) 1293void agp_generic_destroy_page(struct page *page, int flags)
1305{ 1294{
1306 struct page *page; 1295 if (page == NULL)
1307
1308 if (addr == NULL)
1309 return; 1296 return;
1310 1297
1311 page = virt_to_page(addr);
1312 if (flags & AGP_PAGE_DESTROY_UNMAP) 1298 if (flags & AGP_PAGE_DESTROY_UNMAP)
1313 unmap_page_from_agp(page); 1299 unmap_page_from_agp(page);
1314 1300
1315 if (flags & AGP_PAGE_DESTROY_FREE) { 1301 if (flags & AGP_PAGE_DESTROY_FREE) {
1316 put_page(page); 1302 put_page(page);
1317 free_page((unsigned long)addr); 1303 __free_page(page);
1318 atomic_dec(&agp_bridge->current_memory_agp); 1304 atomic_dec(&agp_bridge->current_memory_agp);
1319 } 1305 }
1320} 1306}
@@ -1361,8 +1347,9 @@ void global_cache_flush(void)
1361EXPORT_SYMBOL(global_cache_flush); 1347EXPORT_SYMBOL(global_cache_flush);
1362 1348
1363unsigned long agp_generic_mask_memory(struct agp_bridge_data *bridge, 1349unsigned long agp_generic_mask_memory(struct agp_bridge_data *bridge,
1364 unsigned long addr, int type) 1350 struct page *page, int type)
1365{ 1351{
1352 unsigned long addr = phys_to_gart(page_to_phys(page));
1366 /* memory type is ignored in the generic routine */ 1353 /* memory type is ignored in the generic routine */
1367 if (bridge->driver->masks) 1354 if (bridge->driver->masks)
1368 return addr | bridge->driver->masks[0].mask; 1355 return addr | bridge->driver->masks[0].mask;
diff --git a/drivers/char/agp/hp-agp.c b/drivers/char/agp/hp-agp.c
index 9c7e2343c399..8f3d4c184914 100644
--- a/drivers/char/agp/hp-agp.c
+++ b/drivers/char/agp/hp-agp.c
@@ -361,13 +361,11 @@ hp_zx1_insert_memory (struct agp_memory *mem, off_t pg_start, int type)
361 for (i = 0, j = io_pg_start; i < mem->page_count; i++) { 361 for (i = 0, j = io_pg_start; i < mem->page_count; i++) {
362 unsigned long paddr; 362 unsigned long paddr;
363 363
364 paddr = mem->memory[i]; 364 paddr = page_to_phys(mem->pages[i]);
365 for (k = 0; 365 for (k = 0;
366 k < hp->io_pages_per_kpage; 366 k < hp->io_pages_per_kpage;
367 k++, j++, paddr += hp->io_page_size) { 367 k++, j++, paddr += hp->io_page_size) {
368 hp->gatt[j] = 368 hp->gatt[j] = HP_ZX1_PDIR_VALID_BIT | paddr;
369 agp_bridge->driver->mask_memory(agp_bridge,
370 paddr, type);
371 } 369 }
372 } 370 }
373 371
@@ -397,8 +395,9 @@ hp_zx1_remove_memory (struct agp_memory *mem, off_t pg_start, int type)
397 395
398static unsigned long 396static unsigned long
399hp_zx1_mask_memory (struct agp_bridge_data *bridge, 397hp_zx1_mask_memory (struct agp_bridge_data *bridge,
400 unsigned long addr, int type) 398 struct page *page, int type)
401{ 399{
400 unsigned long addr = phys_to_gart(page_to_phys(page));
402 return HP_ZX1_PDIR_VALID_BIT | addr; 401 return HP_ZX1_PDIR_VALID_BIT | addr;
403} 402}
404 403
diff --git a/drivers/char/agp/i460-agp.c b/drivers/char/agp/i460-agp.c
index 10da687d131a..60cc35bb5db7 100644
--- a/drivers/char/agp/i460-agp.c
+++ b/drivers/char/agp/i460-agp.c
@@ -60,6 +60,9 @@
60 */ 60 */
61#define WR_FLUSH_GATT(index) RD_GATT(index) 61#define WR_FLUSH_GATT(index) RD_GATT(index)
62 62
63static unsigned long i460_mask_memory (struct agp_bridge_data *bridge,
64 unsigned long addr, int type);
65
63static struct { 66static struct {
64 void *gatt; /* ioremap'd GATT area */ 67 void *gatt; /* ioremap'd GATT area */
65 68
@@ -74,6 +77,7 @@ static struct {
74 unsigned long *alloced_map; /* bitmap of kernel-pages in use */ 77 unsigned long *alloced_map; /* bitmap of kernel-pages in use */
75 int refcount; /* number of kernel pages using the large page */ 78 int refcount; /* number of kernel pages using the large page */
76 u64 paddr; /* physical address of large page */ 79 u64 paddr; /* physical address of large page */
80 struct page *page; /* page pointer */
77 } *lp_desc; 81 } *lp_desc;
78} i460; 82} i460;
79 83
@@ -294,7 +298,7 @@ static int i460_insert_memory_small_io_page (struct agp_memory *mem,
294 void *temp; 298 void *temp;
295 299
296 pr_debug("i460_insert_memory_small_io_page(mem=%p, pg_start=%ld, type=%d, paddr0=0x%lx)\n", 300 pr_debug("i460_insert_memory_small_io_page(mem=%p, pg_start=%ld, type=%d, paddr0=0x%lx)\n",
297 mem, pg_start, type, mem->memory[0]); 301 mem, pg_start, type, page_to_phys(mem->pages[0]));
298 302
299 if (type >= AGP_USER_TYPES || mem->type >= AGP_USER_TYPES) 303 if (type >= AGP_USER_TYPES || mem->type >= AGP_USER_TYPES)
300 return -EINVAL; 304 return -EINVAL;
@@ -321,10 +325,9 @@ static int i460_insert_memory_small_io_page (struct agp_memory *mem,
321 325
322 io_page_size = 1UL << I460_IO_PAGE_SHIFT; 326 io_page_size = 1UL << I460_IO_PAGE_SHIFT;
323 for (i = 0, j = io_pg_start; i < mem->page_count; i++) { 327 for (i = 0, j = io_pg_start; i < mem->page_count; i++) {
324 paddr = mem->memory[i]; 328 paddr = phys_to_gart(page_to_phys(mem->pages[i]));
325 for (k = 0; k < I460_IOPAGES_PER_KPAGE; k++, j++, paddr += io_page_size) 329 for (k = 0; k < I460_IOPAGES_PER_KPAGE; k++, j++, paddr += io_page_size)
326 WR_GATT(j, agp_bridge->driver->mask_memory(agp_bridge, 330 WR_GATT(j, i460_mask_memory(agp_bridge, paddr, mem->type));
327 paddr, mem->type));
328 } 331 }
329 WR_FLUSH_GATT(j - 1); 332 WR_FLUSH_GATT(j - 1);
330 return 0; 333 return 0;
@@ -364,10 +367,9 @@ static int i460_alloc_large_page (struct lp_desc *lp)
364{ 367{
365 unsigned long order = I460_IO_PAGE_SHIFT - PAGE_SHIFT; 368 unsigned long order = I460_IO_PAGE_SHIFT - PAGE_SHIFT;
366 size_t map_size; 369 size_t map_size;
367 void *lpage;
368 370
369 lpage = (void *) __get_free_pages(GFP_KERNEL, order); 371 lp->page = alloc_pages(GFP_KERNEL, order);
370 if (!lpage) { 372 if (!lp->page) {
371 printk(KERN_ERR PFX "Couldn't alloc 4M GART page...\n"); 373 printk(KERN_ERR PFX "Couldn't alloc 4M GART page...\n");
372 return -ENOMEM; 374 return -ENOMEM;
373 } 375 }
@@ -375,12 +377,12 @@ static int i460_alloc_large_page (struct lp_desc *lp)
375 map_size = ((I460_KPAGES_PER_IOPAGE + BITS_PER_LONG - 1) & -BITS_PER_LONG)/8; 377 map_size = ((I460_KPAGES_PER_IOPAGE + BITS_PER_LONG - 1) & -BITS_PER_LONG)/8;
376 lp->alloced_map = kzalloc(map_size, GFP_KERNEL); 378 lp->alloced_map = kzalloc(map_size, GFP_KERNEL);
377 if (!lp->alloced_map) { 379 if (!lp->alloced_map) {
378 free_pages((unsigned long) lpage, order); 380 __free_pages(lp->page, order);
379 printk(KERN_ERR PFX "Out of memory, we're in trouble...\n"); 381 printk(KERN_ERR PFX "Out of memory, we're in trouble...\n");
380 return -ENOMEM; 382 return -ENOMEM;
381 } 383 }
382 384
383 lp->paddr = virt_to_gart(lpage); 385 lp->paddr = phys_to_gart(page_to_phys(lp->page));
384 lp->refcount = 0; 386 lp->refcount = 0;
385 atomic_add(I460_KPAGES_PER_IOPAGE, &agp_bridge->current_memory_agp); 387 atomic_add(I460_KPAGES_PER_IOPAGE, &agp_bridge->current_memory_agp);
386 return 0; 388 return 0;
@@ -391,7 +393,7 @@ static void i460_free_large_page (struct lp_desc *lp)
391 kfree(lp->alloced_map); 393 kfree(lp->alloced_map);
392 lp->alloced_map = NULL; 394 lp->alloced_map = NULL;
393 395
394 free_pages((unsigned long) gart_to_virt(lp->paddr), I460_IO_PAGE_SHIFT - PAGE_SHIFT); 396 __free_pages(lp->page, I460_IO_PAGE_SHIFT - PAGE_SHIFT);
395 atomic_sub(I460_KPAGES_PER_IOPAGE, &agp_bridge->current_memory_agp); 397 atomic_sub(I460_KPAGES_PER_IOPAGE, &agp_bridge->current_memory_agp);
396} 398}
397 399
@@ -439,8 +441,8 @@ static int i460_insert_memory_large_io_page (struct agp_memory *mem,
439 if (i460_alloc_large_page(lp) < 0) 441 if (i460_alloc_large_page(lp) < 0)
440 return -ENOMEM; 442 return -ENOMEM;
441 pg = lp - i460.lp_desc; 443 pg = lp - i460.lp_desc;
442 WR_GATT(pg, agp_bridge->driver->mask_memory(agp_bridge, 444 WR_GATT(pg, i460_mask_memory(agp_bridge,
443 lp->paddr, 0)); 445 lp->paddr, 0));
444 WR_FLUSH_GATT(pg); 446 WR_FLUSH_GATT(pg);
445 } 447 }
446 448
@@ -448,7 +450,7 @@ static int i460_insert_memory_large_io_page (struct agp_memory *mem,
448 idx < ((lp == end) ? (end_offset + 1) : I460_KPAGES_PER_IOPAGE); 450 idx < ((lp == end) ? (end_offset + 1) : I460_KPAGES_PER_IOPAGE);
449 idx++, i++) 451 idx++, i++)
450 { 452 {
451 mem->memory[i] = lp->paddr + idx*PAGE_SIZE; 453 mem->pages[i] = lp->page;
452 __set_bit(idx, lp->alloced_map); 454 __set_bit(idx, lp->alloced_map);
453 ++lp->refcount; 455 ++lp->refcount;
454 } 456 }
@@ -463,7 +465,7 @@ static int i460_remove_memory_large_io_page (struct agp_memory *mem,
463 struct lp_desc *start, *end, *lp; 465 struct lp_desc *start, *end, *lp;
464 void *temp; 466 void *temp;
465 467
466 temp = agp_bridge->driver->current_size; 468 temp = agp_bridge->current_size;
467 num_entries = A_SIZE_8(temp)->num_entries; 469 num_entries = A_SIZE_8(temp)->num_entries;
468 470
469 /* Figure out what pg_start means in terms of our large GART pages */ 471 /* Figure out what pg_start means in terms of our large GART pages */
@@ -477,7 +479,7 @@ static int i460_remove_memory_large_io_page (struct agp_memory *mem,
477 idx < ((lp == end) ? (end_offset + 1) : I460_KPAGES_PER_IOPAGE); 479 idx < ((lp == end) ? (end_offset + 1) : I460_KPAGES_PER_IOPAGE);
478 idx++, i++) 480 idx++, i++)
479 { 481 {
480 mem->memory[i] = 0; 482 mem->pages[i] = NULL;
481 __clear_bit(idx, lp->alloced_map); 483 __clear_bit(idx, lp->alloced_map);
482 --lp->refcount; 484 --lp->refcount;
483 } 485 }
@@ -521,7 +523,7 @@ static int i460_remove_memory (struct agp_memory *mem,
521 * Let's just hope nobody counts on the allocated AGP memory being there before bind time 523 * Let's just hope nobody counts on the allocated AGP memory being there before bind time
522 * (I don't think current drivers do)... 524 * (I don't think current drivers do)...
523 */ 525 */
524static void *i460_alloc_page (struct agp_bridge_data *bridge) 526static struct page *i460_alloc_page (struct agp_bridge_data *bridge)
525{ 527{
526 void *page; 528 void *page;
527 529
@@ -534,7 +536,7 @@ static void *i460_alloc_page (struct agp_bridge_data *bridge)
534 return page; 536 return page;
535} 537}
536 538
537static void i460_destroy_page (void *page, int flags) 539static void i460_destroy_page (struct page *page, int flags)
538{ 540{
539 if (I460_IO_PAGE_SHIFT <= PAGE_SHIFT) { 541 if (I460_IO_PAGE_SHIFT <= PAGE_SHIFT) {
540 agp_generic_destroy_page(page, flags); 542 agp_generic_destroy_page(page, flags);
@@ -544,13 +546,20 @@ static void i460_destroy_page (void *page, int flags)
544#endif /* I460_LARGE_IO_PAGES */ 546#endif /* I460_LARGE_IO_PAGES */
545 547
546static unsigned long i460_mask_memory (struct agp_bridge_data *bridge, 548static unsigned long i460_mask_memory (struct agp_bridge_data *bridge,
547 unsigned long addr, int type) 549 unsigned long addr, int type)
548{ 550{
549 /* Make sure the returned address is a valid GATT entry */ 551 /* Make sure the returned address is a valid GATT entry */
550 return bridge->driver->masks[0].mask 552 return bridge->driver->masks[0].mask
551 | (((addr & ~((1 << I460_IO_PAGE_SHIFT) - 1)) & 0xfffff000) >> 12); 553 | (((addr & ~((1 << I460_IO_PAGE_SHIFT) - 1)) & 0xfffff000) >> 12);
552} 554}
553 555
556static unsigned long i460_page_mask_memory(struct agp_bridge_data *bridge,
557 struct page *page, int type)
558{
559 unsigned long addr = phys_to_gart(page_to_phys(page));
560 return i460_mask_memory(bridge, addr, type);
561}
562
554const struct agp_bridge_driver intel_i460_driver = { 563const struct agp_bridge_driver intel_i460_driver = {
555 .owner = THIS_MODULE, 564 .owner = THIS_MODULE,
556 .aperture_sizes = i460_sizes, 565 .aperture_sizes = i460_sizes,
@@ -560,7 +569,7 @@ const struct agp_bridge_driver intel_i460_driver = {
560 .fetch_size = i460_fetch_size, 569 .fetch_size = i460_fetch_size,
561 .cleanup = i460_cleanup, 570 .cleanup = i460_cleanup,
562 .tlb_flush = i460_tlb_flush, 571 .tlb_flush = i460_tlb_flush,
563 .mask_memory = i460_mask_memory, 572 .mask_memory = i460_page_mask_memory,
564 .masks = i460_masks, 573 .masks = i460_masks,
565 .agp_enable = agp_generic_enable, 574 .agp_enable = agp_generic_enable,
566 .cache_flush = global_cache_flush, 575 .cache_flush = global_cache_flush,
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index 7a748fa0dfce..8c9d50db5c3a 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -257,7 +257,7 @@ static void intel_i810_agp_enable(struct agp_bridge_data *bridge, u32 mode)
257} 257}
258 258
259/* Exists to support ARGB cursors */ 259/* Exists to support ARGB cursors */
260static void *i8xx_alloc_pages(void) 260static struct page *i8xx_alloc_pages(void)
261{ 261{
262 struct page *page; 262 struct page *page;
263 263
@@ -272,17 +272,14 @@ static void *i8xx_alloc_pages(void)
272 } 272 }
273 get_page(page); 273 get_page(page);
274 atomic_inc(&agp_bridge->current_memory_agp); 274 atomic_inc(&agp_bridge->current_memory_agp);
275 return page_address(page); 275 return page;
276} 276}
277 277
278static void i8xx_destroy_pages(void *addr) 278static void i8xx_destroy_pages(struct page *page)
279{ 279{
280 struct page *page; 280 if (page == NULL)
281
282 if (addr == NULL)
283 return; 281 return;
284 282
285 page = virt_to_page(addr);
286 set_pages_wb(page, 4); 283 set_pages_wb(page, 4);
287 put_page(page); 284 put_page(page);
288 __free_pages(page, 2); 285 __free_pages(page, 2);
@@ -346,7 +343,7 @@ static int intel_i810_insert_entries(struct agp_memory *mem, off_t pg_start,
346 global_cache_flush(); 343 global_cache_flush();
347 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { 344 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
348 writel(agp_bridge->driver->mask_memory(agp_bridge, 345 writel(agp_bridge->driver->mask_memory(agp_bridge,
349 mem->memory[i], 346 mem->pages[i],
350 mask_type), 347 mask_type),
351 intel_private.registers+I810_PTE_BASE+(j*4)); 348 intel_private.registers+I810_PTE_BASE+(j*4));
352 } 349 }
@@ -389,37 +386,37 @@ static int intel_i810_remove_entries(struct agp_memory *mem, off_t pg_start,
389static struct agp_memory *alloc_agpphysmem_i8xx(size_t pg_count, int type) 386static struct agp_memory *alloc_agpphysmem_i8xx(size_t pg_count, int type)
390{ 387{
391 struct agp_memory *new; 388 struct agp_memory *new;
392 void *addr; 389 struct page *page;
393 390
394 switch (pg_count) { 391 switch (pg_count) {
395 case 1: addr = agp_bridge->driver->agp_alloc_page(agp_bridge); 392 case 1: page = agp_bridge->driver->agp_alloc_page(agp_bridge);
396 break; 393 break;
397 case 4: 394 case 4:
398 /* kludge to get 4 physical pages for ARGB cursor */ 395 /* kludge to get 4 physical pages for ARGB cursor */
399 addr = i8xx_alloc_pages(); 396 page = i8xx_alloc_pages();
400 break; 397 break;
401 default: 398 default:
402 return NULL; 399 return NULL;
403 } 400 }
404 401
405 if (addr == NULL) 402 if (page == NULL)
406 return NULL; 403 return NULL;
407 404
408 new = agp_create_memory(pg_count); 405 new = agp_create_memory(pg_count);
409 if (new == NULL) 406 if (new == NULL)
410 return NULL; 407 return NULL;
411 408
412 new->memory[0] = virt_to_gart(addr); 409 new->pages[0] = page;
413 if (pg_count == 4) { 410 if (pg_count == 4) {
414 /* kludge to get 4 physical pages for ARGB cursor */ 411 /* kludge to get 4 physical pages for ARGB cursor */
415 new->memory[1] = new->memory[0] + PAGE_SIZE; 412 new->pages[1] = new->pages[0] + 1;
416 new->memory[2] = new->memory[1] + PAGE_SIZE; 413 new->pages[2] = new->pages[1] + 1;
417 new->memory[3] = new->memory[2] + PAGE_SIZE; 414 new->pages[3] = new->pages[2] + 1;
418 } 415 }
419 new->page_count = pg_count; 416 new->page_count = pg_count;
420 new->num_scratch_pages = pg_count; 417 new->num_scratch_pages = pg_count;
421 new->type = AGP_PHYS_MEMORY; 418 new->type = AGP_PHYS_MEMORY;
422 new->physical = new->memory[0]; 419 new->physical = page_to_phys(new->pages[0]);
423 return new; 420 return new;
424} 421}
425 422
@@ -451,13 +448,11 @@ static void intel_i810_free_by_type(struct agp_memory *curr)
451 agp_free_key(curr->key); 448 agp_free_key(curr->key);
452 if (curr->type == AGP_PHYS_MEMORY) { 449 if (curr->type == AGP_PHYS_MEMORY) {
453 if (curr->page_count == 4) 450 if (curr->page_count == 4)
454 i8xx_destroy_pages(gart_to_virt(curr->memory[0])); 451 i8xx_destroy_pages(curr->pages[0]);
455 else { 452 else {
456 void *va = gart_to_virt(curr->memory[0]); 453 agp_bridge->driver->agp_destroy_page(curr->pages[0],
457
458 agp_bridge->driver->agp_destroy_page(va,
459 AGP_PAGE_DESTROY_UNMAP); 454 AGP_PAGE_DESTROY_UNMAP);
460 agp_bridge->driver->agp_destroy_page(va, 455 agp_bridge->driver->agp_destroy_page(curr->pages[0],
461 AGP_PAGE_DESTROY_FREE); 456 AGP_PAGE_DESTROY_FREE);
462 } 457 }
463 agp_free_page_array(curr); 458 agp_free_page_array(curr);
@@ -466,8 +461,9 @@ static void intel_i810_free_by_type(struct agp_memory *curr)
466} 461}
467 462
468static unsigned long intel_i810_mask_memory(struct agp_bridge_data *bridge, 463static unsigned long intel_i810_mask_memory(struct agp_bridge_data *bridge,
469 unsigned long addr, int type) 464 struct page *page, int type)
470{ 465{
466 unsigned long addr = phys_to_gart(page_to_phys(page));
471 /* Type checking must be done elsewhere */ 467 /* Type checking must be done elsewhere */
472 return addr | bridge->driver->masks[type].mask; 468 return addr | bridge->driver->masks[type].mask;
473} 469}
@@ -855,7 +851,7 @@ static int intel_i830_insert_entries(struct agp_memory *mem, off_t pg_start,
855 851
856 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { 852 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
857 writel(agp_bridge->driver->mask_memory(agp_bridge, 853 writel(agp_bridge->driver->mask_memory(agp_bridge,
858 mem->memory[i], mask_type), 854 mem->pages[i], mask_type),
859 intel_private.registers+I810_PTE_BASE+(j*4)); 855 intel_private.registers+I810_PTE_BASE+(j*4));
860 } 856 }
861 readl(intel_private.registers+I810_PTE_BASE+((j-1)*4)); 857 readl(intel_private.registers+I810_PTE_BASE+((j-1)*4));
@@ -1085,7 +1081,7 @@ static int intel_i915_insert_entries(struct agp_memory *mem, off_t pg_start,
1085 1081
1086 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { 1082 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
1087 writel(agp_bridge->driver->mask_memory(agp_bridge, 1083 writel(agp_bridge->driver->mask_memory(agp_bridge,
1088 mem->memory[i], mask_type), intel_private.gtt+j); 1084 mem->pages[i], mask_type), intel_private.gtt+j);
1089 } 1085 }
1090 1086
1091 readl(intel_private.gtt+j-1); 1087 readl(intel_private.gtt+j-1);
@@ -1200,8 +1196,9 @@ static int intel_i915_create_gatt_table(struct agp_bridge_data *bridge)
1200 * this conditional. 1196 * this conditional.
1201 */ 1197 */
1202static unsigned long intel_i965_mask_memory(struct agp_bridge_data *bridge, 1198static unsigned long intel_i965_mask_memory(struct agp_bridge_data *bridge,
1203 unsigned long addr, int type) 1199 struct page *page, int type)
1204{ 1200{
1201 dma_addr_t addr = phys_to_gart(page_to_phys(page));
1205 /* Shift high bits down */ 1202 /* Shift high bits down */
1206 addr |= (addr >> 28) & 0xf0; 1203 addr |= (addr >> 28) & 0xf0;
1207 1204
diff --git a/drivers/char/agp/nvidia-agp.c b/drivers/char/agp/nvidia-agp.c
index 16acee2de117..263d71dd441c 100644
--- a/drivers/char/agp/nvidia-agp.c
+++ b/drivers/char/agp/nvidia-agp.c
@@ -225,7 +225,7 @@ static int nvidia_insert_memory(struct agp_memory *mem, off_t pg_start, int type
225 } 225 }
226 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { 226 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
227 writel(agp_bridge->driver->mask_memory(agp_bridge, 227 writel(agp_bridge->driver->mask_memory(agp_bridge,
228 mem->memory[i], mask_type), 228 mem->pages[i], mask_type),
229 agp_bridge->gatt_table+nvidia_private.pg_offset+j); 229 agp_bridge->gatt_table+nvidia_private.pg_offset+j);
230 } 230 }
231 231
diff --git a/drivers/char/agp/parisc-agp.c b/drivers/char/agp/parisc-agp.c
index 699e3422ad93..f4bb43fb8016 100644
--- a/drivers/char/agp/parisc-agp.c
+++ b/drivers/char/agp/parisc-agp.c
@@ -31,6 +31,10 @@
31#define AGP8X_MODE_BIT 3 31#define AGP8X_MODE_BIT 3
32#define AGP8X_MODE (1 << AGP8X_MODE_BIT) 32#define AGP8X_MODE (1 << AGP8X_MODE_BIT)
33 33
34static unsigned long
35parisc_agp_mask_memory(struct agp_bridge_data *bridge, unsigned long addr,
36 int type);
37
34static struct _parisc_agp_info { 38static struct _parisc_agp_info {
35 void __iomem *ioc_regs; 39 void __iomem *ioc_regs;
36 void __iomem *lba_regs; 40 void __iomem *lba_regs;
@@ -149,12 +153,12 @@ parisc_agp_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
149 for (i = 0, j = io_pg_start; i < mem->page_count; i++) { 153 for (i = 0, j = io_pg_start; i < mem->page_count; i++) {
150 unsigned long paddr; 154 unsigned long paddr;
151 155
152 paddr = mem->memory[i]; 156 paddr = page_to_phys(mem->pages[i]);
153 for (k = 0; 157 for (k = 0;
154 k < info->io_pages_per_kpage; 158 k < info->io_pages_per_kpage;
155 k++, j++, paddr += info->io_page_size) { 159 k++, j++, paddr += info->io_page_size) {
156 info->gatt[j] = 160 info->gatt[j] =
157 agp_bridge->driver->mask_memory(agp_bridge, 161 parisc_agp_mask_memory(agp_bridge,
158 paddr, type); 162 paddr, type);
159 } 163 }
160 } 164 }
@@ -185,9 +189,17 @@ parisc_agp_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
185} 189}
186 190
187static unsigned long 191static unsigned long
188parisc_agp_mask_memory(struct agp_bridge_data *bridge, 192parisc_agp_mask_memory(struct agp_bridge_data *bridge, unsigned long addr,
189 unsigned long addr, int type) 193 int type)
194{
195 return SBA_PDIR_VALID_BIT | addr;
196}
197
198static unsigned long
199parisc_agp_page_mask_memory(struct agp_bridge_data *bridge, struct page *page,
200 int type)
190{ 201{
202 unsigned long addr = phys_to_gart(page_to_phys(page));
191 return SBA_PDIR_VALID_BIT | addr; 203 return SBA_PDIR_VALID_BIT | addr;
192} 204}
193 205
diff --git a/drivers/char/agp/sgi-agp.c b/drivers/char/agp/sgi-agp.c
index b972d83bb1b2..d3ea2e4226b5 100644
--- a/drivers/char/agp/sgi-agp.c
+++ b/drivers/char/agp/sgi-agp.c
@@ -38,7 +38,7 @@ static struct aper_size_info_fixed sgi_tioca_sizes[] = {
38 {0, 0, 0}, 38 {0, 0, 0},
39}; 39};
40 40
41static void *sgi_tioca_alloc_page(struct agp_bridge_data *bridge) 41static struct page *sgi_tioca_alloc_page(struct agp_bridge_data *bridge)
42{ 42{
43 struct page *page; 43 struct page *page;
44 int nid; 44 int nid;
@@ -52,7 +52,7 @@ static void *sgi_tioca_alloc_page(struct agp_bridge_data *bridge)
52 52
53 get_page(page); 53 get_page(page);
54 atomic_inc(&agp_bridge->current_memory_agp); 54 atomic_inc(&agp_bridge->current_memory_agp);
55 return page_address(page); 55 return page;
56} 56}
57 57
58/* 58/*
@@ -71,8 +71,9 @@ static void sgi_tioca_tlbflush(struct agp_memory *mem)
71 */ 71 */
72static unsigned long 72static unsigned long
73sgi_tioca_mask_memory(struct agp_bridge_data *bridge, 73sgi_tioca_mask_memory(struct agp_bridge_data *bridge,
74 unsigned long addr, int type) 74 struct page *page, int type)
75{ 75{
76 unsigned long addr = phys_to_gart(page_to_phys(page));
76 return tioca_physpage_to_gart(addr); 77 return tioca_physpage_to_gart(addr);
77} 78}
78 79
@@ -189,7 +190,7 @@ static int sgi_tioca_insert_memory(struct agp_memory *mem, off_t pg_start,
189 190
190 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { 191 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
191 table[j] = 192 table[j] =
192 bridge->driver->mask_memory(bridge, mem->memory[i], 193 bridge->driver->mask_memory(bridge, mem->pages[i],
193 mem->type); 194 mem->type);
194 } 195 }
195 196
diff --git a/drivers/char/agp/sworks-agp.c b/drivers/char/agp/sworks-agp.c
index 6224df8b7f0a..b964a2199329 100644
--- a/drivers/char/agp/sworks-agp.c
+++ b/drivers/char/agp/sworks-agp.c
@@ -349,7 +349,7 @@ static int serverworks_insert_memory(struct agp_memory *mem,
349 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { 349 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
350 addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr; 350 addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr;
351 cur_gatt = SVRWRKS_GET_GATT(addr); 351 cur_gatt = SVRWRKS_GET_GATT(addr);
352 writel(agp_bridge->driver->mask_memory(agp_bridge, mem->memory[i], mem->type), cur_gatt+GET_GATT_OFF(addr)); 352 writel(agp_bridge->driver->mask_memory(agp_bridge, mem->pages[i], mem->type), cur_gatt+GET_GATT_OFF(addr));
353 } 353 }
354 serverworks_tlbflush(mem); 354 serverworks_tlbflush(mem);
355 return 0; 355 return 0;
diff --git a/drivers/char/agp/uninorth-agp.c b/drivers/char/agp/uninorth-agp.c
index 03f95ec08f59..f192c3b9ad41 100644
--- a/drivers/char/agp/uninorth-agp.c
+++ b/drivers/char/agp/uninorth-agp.c
@@ -146,13 +146,20 @@ static int uninorth_insert_memory(struct agp_memory *mem, off_t pg_start,
146{ 146{
147 int i, j, num_entries; 147 int i, j, num_entries;
148 void *temp; 148 void *temp;
149 int mask_type;
149 150
150 temp = agp_bridge->current_size; 151 temp = agp_bridge->current_size;
151 num_entries = A_SIZE_32(temp)->num_entries; 152 num_entries = A_SIZE_32(temp)->num_entries;
152 153
153 if (type != 0 || mem->type != 0) 154 if (type != mem->type)
155 return -EINVAL;
156
157 mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
158 if (mask_type != 0) {
154 /* We know nothing of memory types */ 159 /* We know nothing of memory types */
155 return -EINVAL; 160 return -EINVAL;
161 }
162
156 if ((pg_start + mem->page_count) > num_entries) 163 if ((pg_start + mem->page_count) > num_entries)
157 return -EINVAL; 164 return -EINVAL;
158 165
@@ -166,9 +173,9 @@ static int uninorth_insert_memory(struct agp_memory *mem, off_t pg_start,
166 173
167 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { 174 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
168 agp_bridge->gatt_table[j] = 175 agp_bridge->gatt_table[j] =
169 cpu_to_le32((mem->memory[i] & 0xFFFFF000UL) | 0x1UL); 176 cpu_to_le32((page_to_phys(mem->pages[i]) & 0xFFFFF000UL) | 0x1UL);
170 flush_dcache_range((unsigned long)__va(mem->memory[i]), 177 flush_dcache_range((unsigned long)__va(page_to_phys(mem->pages[i])),
171 (unsigned long)__va(mem->memory[i])+0x1000); 178 (unsigned long)__va(page_to_phys(mem->pages[i]))+0x1000);
172 } 179 }
173 (void)in_le32((volatile u32*)&agp_bridge->gatt_table[pg_start]); 180 (void)in_le32((volatile u32*)&agp_bridge->gatt_table[pg_start]);
174 mb(); 181 mb();
@@ -184,13 +191,20 @@ static int u3_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
184 int i, num_entries; 191 int i, num_entries;
185 void *temp; 192 void *temp;
186 u32 *gp; 193 u32 *gp;
194 int mask_type;
187 195
188 temp = agp_bridge->current_size; 196 temp = agp_bridge->current_size;
189 num_entries = A_SIZE_32(temp)->num_entries; 197 num_entries = A_SIZE_32(temp)->num_entries;
190 198
191 if (type != 0 || mem->type != 0) 199 if (type != mem->type)
200 return -EINVAL;
201
202 mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
203 if (mask_type != 0) {
192 /* We know nothing of memory types */ 204 /* We know nothing of memory types */
193 return -EINVAL; 205 return -EINVAL;
206 }
207
194 if ((pg_start + mem->page_count) > num_entries) 208 if ((pg_start + mem->page_count) > num_entries)
195 return -EINVAL; 209 return -EINVAL;
196 210
@@ -205,9 +219,9 @@ static int u3_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
205 } 219 }
206 220
207 for (i = 0; i < mem->page_count; i++) { 221 for (i = 0; i < mem->page_count; i++) {
208 gp[i] = (mem->memory[i] >> PAGE_SHIFT) | 0x80000000UL; 222 gp[i] = (page_to_phys(mem->pages[i]) >> PAGE_SHIFT) | 0x80000000UL;
209 flush_dcache_range((unsigned long)__va(mem->memory[i]), 223 flush_dcache_range((unsigned long)__va(page_to_phys(mem->pages[i])),
210 (unsigned long)__va(mem->memory[i])+0x1000); 224 (unsigned long)__va(page_to_phys(mem->pages[i]))+0x1000);
211 } 225 }
212 mb(); 226 mb();
213 flush_dcache_range((unsigned long)gp, (unsigned long) &gp[i]); 227 flush_dcache_range((unsigned long)gp, (unsigned long) &gp[i]);
diff --git a/drivers/gpu/drm/drm_agpsupport.c b/drivers/gpu/drm/drm_agpsupport.c
index 14796594e5d9..d68888fe3df9 100644
--- a/drivers/gpu/drm/drm_agpsupport.c
+++ b/drivers/gpu/drm/drm_agpsupport.c
@@ -203,7 +203,7 @@ int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request)
203 203
204 if (!dev->agp || !dev->agp->acquired) 204 if (!dev->agp || !dev->agp->acquired)
205 return -EINVAL; 205 return -EINVAL;
206 if (!(entry = drm_alloc(sizeof(*entry), DRM_MEM_AGPLISTS))) 206 if (!(entry = kmalloc(sizeof(*entry), GFP_KERNEL)))
207 return -ENOMEM; 207 return -ENOMEM;
208 208
209 memset(entry, 0, sizeof(*entry)); 209 memset(entry, 0, sizeof(*entry));
@@ -211,7 +211,7 @@ int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request)
211 pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE; 211 pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE;
212 type = (u32) request->type; 212 type = (u32) request->type;
213 if (!(memory = drm_alloc_agp(dev, pages, type))) { 213 if (!(memory = drm_alloc_agp(dev, pages, type))) {
214 drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS); 214 kfree(entry);
215 return -ENOMEM; 215 return -ENOMEM;
216 } 216 }
217 217
@@ -369,7 +369,7 @@ int drm_agp_free(struct drm_device *dev, struct drm_agp_buffer *request)
369 list_del(&entry->head); 369 list_del(&entry->head);
370 370
371 drm_free_agp(entry->memory, entry->pages); 371 drm_free_agp(entry->memory, entry->pages);
372 drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS); 372 kfree(entry);
373 return 0; 373 return 0;
374} 374}
375EXPORT_SYMBOL(drm_agp_free); 375EXPORT_SYMBOL(drm_agp_free);
@@ -397,13 +397,13 @@ struct drm_agp_head *drm_agp_init(struct drm_device *dev)
397{ 397{
398 struct drm_agp_head *head = NULL; 398 struct drm_agp_head *head = NULL;
399 399
400 if (!(head = drm_alloc(sizeof(*head), DRM_MEM_AGPLISTS))) 400 if (!(head = kmalloc(sizeof(*head), GFP_KERNEL)))
401 return NULL; 401 return NULL;
402 memset((void *)head, 0, sizeof(*head)); 402 memset((void *)head, 0, sizeof(*head));
403 head->bridge = agp_find_bridge(dev->pdev); 403 head->bridge = agp_find_bridge(dev->pdev);
404 if (!head->bridge) { 404 if (!head->bridge) {
405 if (!(head->bridge = agp_backend_acquire(dev->pdev))) { 405 if (!(head->bridge = agp_backend_acquire(dev->pdev))) {
406 drm_free(head, sizeof(*head), DRM_MEM_AGPLISTS); 406 kfree(head);
407 return NULL; 407 return NULL;
408 } 408 }
409 agp_copy_info(head->bridge, &head->agp_info); 409 agp_copy_info(head->bridge, &head->agp_info);
@@ -412,7 +412,7 @@ struct drm_agp_head *drm_agp_init(struct drm_device *dev)
412 agp_copy_info(head->bridge, &head->agp_info); 412 agp_copy_info(head->bridge, &head->agp_info);
413 } 413 }
414 if (head->agp_info.chipset == NOT_SUPPORTED) { 414 if (head->agp_info.chipset == NOT_SUPPORTED) {
415 drm_free(head, sizeof(*head), DRM_MEM_AGPLISTS); 415 kfree(head);
416 return NULL; 416 return NULL;
417 } 417 }
418 INIT_LIST_HEAD(&head->memory); 418 INIT_LIST_HEAD(&head->memory);
@@ -482,7 +482,7 @@ drm_agp_bind_pages(struct drm_device *dev,
482 } 482 }
483 483
484 for (i = 0; i < num_pages; i++) 484 for (i = 0; i < num_pages; i++)
485 mem->memory[i] = phys_to_gart(page_to_phys(pages[i])); 485 mem->pages[i] = pages[i];
486 mem->page_count = num_pages; 486 mem->page_count = num_pages;
487 487
488 mem->is_flushed = true; 488 mem->is_flushed = true;
diff --git a/drivers/gpu/drm/drm_auth.c b/drivers/gpu/drm/drm_auth.c
index ca7a9ef5007b..932b5aa96a67 100644
--- a/drivers/gpu/drm/drm_auth.c
+++ b/drivers/gpu/drm/drm_auth.c
@@ -79,7 +79,7 @@ static int drm_add_magic(struct drm_master *master, struct drm_file *priv,
79 struct drm_device *dev = master->minor->dev; 79 struct drm_device *dev = master->minor->dev;
80 DRM_DEBUG("%d\n", magic); 80 DRM_DEBUG("%d\n", magic);
81 81
82 entry = drm_alloc(sizeof(*entry), DRM_MEM_MAGIC); 82 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
83 if (!entry) 83 if (!entry)
84 return -ENOMEM; 84 return -ENOMEM;
85 memset(entry, 0, sizeof(*entry)); 85 memset(entry, 0, sizeof(*entry));
@@ -120,7 +120,7 @@ static int drm_remove_magic(struct drm_master *master, drm_magic_t magic)
120 list_del(&pt->head); 120 list_del(&pt->head);
121 mutex_unlock(&dev->struct_mutex); 121 mutex_unlock(&dev->struct_mutex);
122 122
123 drm_free(pt, sizeof(*pt), DRM_MEM_MAGIC); 123 kfree(pt);
124 124
125 return 0; 125 return 0;
126} 126}
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index 80a257554b30..6246e3f3dad7 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -151,7 +151,7 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
151 unsigned long user_token; 151 unsigned long user_token;
152 int ret; 152 int ret;
153 153
154 map = drm_alloc(sizeof(*map), DRM_MEM_MAPS); 154 map = kmalloc(sizeof(*map), GFP_KERNEL);
155 if (!map) 155 if (!map)
156 return -ENOMEM; 156 return -ENOMEM;
157 157
@@ -165,7 +165,7 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
165 * when processes fork. 165 * when processes fork.
166 */ 166 */
167 if ((map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM) { 167 if ((map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM) {
168 drm_free(map, sizeof(*map), DRM_MEM_MAPS); 168 kfree(map);
169 return -EINVAL; 169 return -EINVAL;
170 } 170 }
171 DRM_DEBUG("offset = 0x%08llx, size = 0x%08lx, type = %d\n", 171 DRM_DEBUG("offset = 0x%08llx, size = 0x%08lx, type = %d\n",
@@ -179,7 +179,7 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
179 map->size = PAGE_ALIGN(map->size); 179 map->size = PAGE_ALIGN(map->size);
180 180
181 if ((map->offset & (~(resource_size_t)PAGE_MASK)) || (map->size & (~PAGE_MASK))) { 181 if ((map->offset & (~(resource_size_t)PAGE_MASK)) || (map->size & (~PAGE_MASK))) {
182 drm_free(map, sizeof(*map), DRM_MEM_MAPS); 182 kfree(map);
183 return -EINVAL; 183 return -EINVAL;
184 } 184 }
185 map->mtrr = -1; 185 map->mtrr = -1;
@@ -191,7 +191,7 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
191#if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__) 191#if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__)
192 if (map->offset + (map->size-1) < map->offset || 192 if (map->offset + (map->size-1) < map->offset ||
193 map->offset < virt_to_phys(high_memory)) { 193 map->offset < virt_to_phys(high_memory)) {
194 drm_free(map, sizeof(*map), DRM_MEM_MAPS); 194 kfree(map);
195 return -EINVAL; 195 return -EINVAL;
196 } 196 }
197#endif 197#endif
@@ -212,7 +212,7 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
212 list->map->size = map->size; 212 list->map->size = map->size;
213 } 213 }
214 214
215 drm_free(map, sizeof(*map), DRM_MEM_MAPS); 215 kfree(map);
216 *maplist = list; 216 *maplist = list;
217 return 0; 217 return 0;
218 } 218 }
@@ -227,7 +227,7 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
227 if (map->type == _DRM_REGISTERS) { 227 if (map->type == _DRM_REGISTERS) {
228 map->handle = ioremap(map->offset, map->size); 228 map->handle = ioremap(map->offset, map->size);
229 if (!map->handle) { 229 if (!map->handle) {
230 drm_free(map, sizeof(*map), DRM_MEM_MAPS); 230 kfree(map);
231 return -ENOMEM; 231 return -ENOMEM;
232 } 232 }
233 } 233 }
@@ -243,7 +243,7 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
243 list->map->size = map->size; 243 list->map->size = map->size;
244 } 244 }
245 245
246 drm_free(map, sizeof(*map), DRM_MEM_MAPS); 246 kfree(map);
247 *maplist = list; 247 *maplist = list;
248 return 0; 248 return 0;
249 } 249 }
@@ -251,7 +251,7 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
251 DRM_DEBUG("%lu %d %p\n", 251 DRM_DEBUG("%lu %d %p\n",
252 map->size, drm_order(map->size), map->handle); 252 map->size, drm_order(map->size), map->handle);
253 if (!map->handle) { 253 if (!map->handle) {
254 drm_free(map, sizeof(*map), DRM_MEM_MAPS); 254 kfree(map);
255 return -ENOMEM; 255 return -ENOMEM;
256 } 256 }
257 map->offset = (unsigned long)map->handle; 257 map->offset = (unsigned long)map->handle;
@@ -259,7 +259,7 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
259 /* Prevent a 2nd X Server from creating a 2nd lock */ 259 /* Prevent a 2nd X Server from creating a 2nd lock */
260 if (dev->primary->master->lock.hw_lock != NULL) { 260 if (dev->primary->master->lock.hw_lock != NULL) {
261 vfree(map->handle); 261 vfree(map->handle);
262 drm_free(map, sizeof(*map), DRM_MEM_MAPS); 262 kfree(map);
263 return -EBUSY; 263 return -EBUSY;
264 } 264 }
265 dev->sigdata.lock = dev->primary->master->lock.hw_lock = map->handle; /* Pointer to lock */ 265 dev->sigdata.lock = dev->primary->master->lock.hw_lock = map->handle; /* Pointer to lock */
@@ -270,7 +270,7 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
270 int valid = 0; 270 int valid = 0;
271 271
272 if (!drm_core_has_AGP(dev)) { 272 if (!drm_core_has_AGP(dev)) {
273 drm_free(map, sizeof(*map), DRM_MEM_MAPS); 273 kfree(map);
274 return -EINVAL; 274 return -EINVAL;
275 } 275 }
276#ifdef __alpha__ 276#ifdef __alpha__
@@ -303,7 +303,7 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
303 } 303 }
304 } 304 }
305 if (!list_empty(&dev->agp->memory) && !valid) { 305 if (!list_empty(&dev->agp->memory) && !valid) {
306 drm_free(map, sizeof(*map), DRM_MEM_MAPS); 306 kfree(map);
307 return -EPERM; 307 return -EPERM;
308 } 308 }
309 DRM_DEBUG("AGP offset = 0x%08llx, size = 0x%08lx\n", 309 DRM_DEBUG("AGP offset = 0x%08llx, size = 0x%08lx\n",
@@ -316,7 +316,7 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
316 } 316 }
317 case _DRM_SCATTER_GATHER: 317 case _DRM_SCATTER_GATHER:
318 if (!dev->sg) { 318 if (!dev->sg) {
319 drm_free(map, sizeof(*map), DRM_MEM_MAPS); 319 kfree(map);
320 return -EINVAL; 320 return -EINVAL;
321 } 321 }
322 map->offset += (unsigned long)dev->sg->virtual; 322 map->offset += (unsigned long)dev->sg->virtual;
@@ -328,7 +328,7 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
328 * need to point to a 64bit variable first. */ 328 * need to point to a 64bit variable first. */
329 dmah = drm_pci_alloc(dev, map->size, map->size, 0xffffffffUL); 329 dmah = drm_pci_alloc(dev, map->size, map->size, 0xffffffffUL);
330 if (!dmah) { 330 if (!dmah) {
331 drm_free(map, sizeof(*map), DRM_MEM_MAPS); 331 kfree(map);
332 return -ENOMEM; 332 return -ENOMEM;
333 } 333 }
334 map->handle = dmah->vaddr; 334 map->handle = dmah->vaddr;
@@ -336,15 +336,15 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
336 kfree(dmah); 336 kfree(dmah);
337 break; 337 break;
338 default: 338 default:
339 drm_free(map, sizeof(*map), DRM_MEM_MAPS); 339 kfree(map);
340 return -EINVAL; 340 return -EINVAL;
341 } 341 }
342 342
343 list = drm_alloc(sizeof(*list), DRM_MEM_MAPS); 343 list = kmalloc(sizeof(*list), GFP_KERNEL);
344 if (!list) { 344 if (!list) {
345 if (map->type == _DRM_REGISTERS) 345 if (map->type == _DRM_REGISTERS)
346 iounmap(map->handle); 346 iounmap(map->handle);
347 drm_free(map, sizeof(*map), DRM_MEM_MAPS); 347 kfree(map);
348 return -EINVAL; 348 return -EINVAL;
349 } 349 }
350 memset(list, 0, sizeof(*list)); 350 memset(list, 0, sizeof(*list));
@@ -362,8 +362,8 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
362 if (ret) { 362 if (ret) {
363 if (map->type == _DRM_REGISTERS) 363 if (map->type == _DRM_REGISTERS)
364 iounmap(map->handle); 364 iounmap(map->handle);
365 drm_free(map, sizeof(*map), DRM_MEM_MAPS); 365 kfree(map);
366 drm_free(list, sizeof(*list), DRM_MEM_MAPS); 366 kfree(list);
367 mutex_unlock(&dev->struct_mutex); 367 mutex_unlock(&dev->struct_mutex);
368 return ret; 368 return ret;
369 } 369 }
@@ -448,7 +448,7 @@ int drm_rmmap_locked(struct drm_device *dev, struct drm_local_map *map)
448 list_del(&r_list->head); 448 list_del(&r_list->head);
449 drm_ht_remove_key(&dev->map_hash, 449 drm_ht_remove_key(&dev->map_hash,
450 r_list->user_token >> PAGE_SHIFT); 450 r_list->user_token >> PAGE_SHIFT);
451 drm_free(r_list, sizeof(*r_list), DRM_MEM_MAPS); 451 kfree(r_list);
452 found = 1; 452 found = 1;
453 break; 453 break;
454 } 454 }
@@ -491,7 +491,7 @@ int drm_rmmap_locked(struct drm_device *dev, struct drm_local_map *map)
491 DRM_ERROR("tried to rmmap GEM object\n"); 491 DRM_ERROR("tried to rmmap GEM object\n");
492 break; 492 break;
493 } 493 }
494 drm_free(map, sizeof(*map), DRM_MEM_MAPS); 494 kfree(map);
495 495
496 return 0; 496 return 0;
497} 497}
@@ -582,24 +582,16 @@ static void drm_cleanup_buf_error(struct drm_device * dev,
582 drm_pci_free(dev, entry->seglist[i]); 582 drm_pci_free(dev, entry->seglist[i]);
583 } 583 }
584 } 584 }
585 drm_free(entry->seglist, 585 kfree(entry->seglist);
586 entry->seg_count *
587 sizeof(*entry->seglist), DRM_MEM_SEGS);
588 586
589 entry->seg_count = 0; 587 entry->seg_count = 0;
590 } 588 }
591 589
592 if (entry->buf_count) { 590 if (entry->buf_count) {
593 for (i = 0; i < entry->buf_count; i++) { 591 for (i = 0; i < entry->buf_count; i++) {
594 if (entry->buflist[i].dev_private) { 592 kfree(entry->buflist[i].dev_private);
595 drm_free(entry->buflist[i].dev_private,
596 entry->buflist[i].dev_priv_size,
597 DRM_MEM_BUFS);
598 }
599 } 593 }
600 drm_free(entry->buflist, 594 kfree(entry->buflist);
601 entry->buf_count *
602 sizeof(*entry->buflist), DRM_MEM_BUFS);
603 595
604 entry->buf_count = 0; 596 entry->buf_count = 0;
605 } 597 }
@@ -698,8 +690,7 @@ int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request)
698 return -EINVAL; 690 return -EINVAL;
699 } 691 }
700 692
701 entry->buflist = drm_alloc(count * sizeof(*entry->buflist), 693 entry->buflist = kmalloc(count * sizeof(*entry->buflist), GFP_KERNEL);
702 DRM_MEM_BUFS);
703 if (!entry->buflist) { 694 if (!entry->buflist) {
704 mutex_unlock(&dev->struct_mutex); 695 mutex_unlock(&dev->struct_mutex);
705 atomic_dec(&dev->buf_alloc); 696 atomic_dec(&dev->buf_alloc);
@@ -729,7 +720,7 @@ int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request)
729 buf->file_priv = NULL; 720 buf->file_priv = NULL;
730 721
731 buf->dev_priv_size = dev->driver->dev_priv_size; 722 buf->dev_priv_size = dev->driver->dev_priv_size;
732 buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS); 723 buf->dev_private = kmalloc(buf->dev_priv_size, GFP_KERNEL);
733 if (!buf->dev_private) { 724 if (!buf->dev_private) {
734 /* Set count correctly so we free the proper amount. */ 725 /* Set count correctly so we free the proper amount. */
735 entry->buf_count = count; 726 entry->buf_count = count;
@@ -749,10 +740,9 @@ int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request)
749 740
750 DRM_DEBUG("byte_count: %d\n", byte_count); 741 DRM_DEBUG("byte_count: %d\n", byte_count);
751 742
752 temp_buflist = drm_realloc(dma->buflist, 743 temp_buflist = krealloc(dma->buflist,
753 dma->buf_count * sizeof(*dma->buflist), 744 (dma->buf_count + entry->buf_count) *
754 (dma->buf_count + entry->buf_count) 745 sizeof(*dma->buflist), GFP_KERNEL);
755 * sizeof(*dma->buflist), DRM_MEM_BUFS);
756 if (!temp_buflist) { 746 if (!temp_buflist) {
757 /* Free the entry because it isn't valid */ 747 /* Free the entry because it isn't valid */
758 drm_cleanup_buf_error(dev, entry); 748 drm_cleanup_buf_error(dev, entry);
@@ -854,8 +844,7 @@ int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
854 return -EINVAL; 844 return -EINVAL;
855 } 845 }
856 846
857 entry->buflist = drm_alloc(count * sizeof(*entry->buflist), 847 entry->buflist = kmalloc(count * sizeof(*entry->buflist), GFP_KERNEL);
858 DRM_MEM_BUFS);
859 if (!entry->buflist) { 848 if (!entry->buflist) {
860 mutex_unlock(&dev->struct_mutex); 849 mutex_unlock(&dev->struct_mutex);
861 atomic_dec(&dev->buf_alloc); 850 atomic_dec(&dev->buf_alloc);
@@ -863,11 +852,9 @@ int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
863 } 852 }
864 memset(entry->buflist, 0, count * sizeof(*entry->buflist)); 853 memset(entry->buflist, 0, count * sizeof(*entry->buflist));
865 854
866 entry->seglist = drm_alloc(count * sizeof(*entry->seglist), 855 entry->seglist = kmalloc(count * sizeof(*entry->seglist), GFP_KERNEL);
867 DRM_MEM_SEGS);
868 if (!entry->seglist) { 856 if (!entry->seglist) {
869 drm_free(entry->buflist, 857 kfree(entry->buflist);
870 count * sizeof(*entry->buflist), DRM_MEM_BUFS);
871 mutex_unlock(&dev->struct_mutex); 858 mutex_unlock(&dev->struct_mutex);
872 atomic_dec(&dev->buf_alloc); 859 atomic_dec(&dev->buf_alloc);
873 return -ENOMEM; 860 return -ENOMEM;
@@ -877,13 +864,11 @@ int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
877 /* Keep the original pagelist until we know all the allocations 864 /* Keep the original pagelist until we know all the allocations
878 * have succeeded 865 * have succeeded
879 */ 866 */
880 temp_pagelist = drm_alloc((dma->page_count + (count << page_order)) 867 temp_pagelist = kmalloc((dma->page_count + (count << page_order)) *
881 * sizeof(*dma->pagelist), DRM_MEM_PAGES); 868 sizeof(*dma->pagelist), GFP_KERNEL);
882 if (!temp_pagelist) { 869 if (!temp_pagelist) {
883 drm_free(entry->buflist, 870 kfree(entry->buflist);
884 count * sizeof(*entry->buflist), DRM_MEM_BUFS); 871 kfree(entry->seglist);
885 drm_free(entry->seglist,
886 count * sizeof(*entry->seglist), DRM_MEM_SEGS);
887 mutex_unlock(&dev->struct_mutex); 872 mutex_unlock(&dev->struct_mutex);
888 atomic_dec(&dev->buf_alloc); 873 atomic_dec(&dev->buf_alloc);
889 return -ENOMEM; 874 return -ENOMEM;
@@ -907,9 +892,7 @@ int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
907 entry->buf_count = count; 892 entry->buf_count = count;
908 entry->seg_count = count; 893 entry->seg_count = count;
909 drm_cleanup_buf_error(dev, entry); 894 drm_cleanup_buf_error(dev, entry);
910 drm_free(temp_pagelist, 895 kfree(temp_pagelist);
911 (dma->page_count + (count << page_order))
912 * sizeof(*dma->pagelist), DRM_MEM_PAGES);
913 mutex_unlock(&dev->struct_mutex); 896 mutex_unlock(&dev->struct_mutex);
914 atomic_dec(&dev->buf_alloc); 897 atomic_dec(&dev->buf_alloc);
915 return -ENOMEM; 898 return -ENOMEM;
@@ -940,18 +923,14 @@ int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
940 buf->file_priv = NULL; 923 buf->file_priv = NULL;
941 924
942 buf->dev_priv_size = dev->driver->dev_priv_size; 925 buf->dev_priv_size = dev->driver->dev_priv_size;
943 buf->dev_private = drm_alloc(buf->dev_priv_size, 926 buf->dev_private = kmalloc(buf->dev_priv_size,
944 DRM_MEM_BUFS); 927 GFP_KERNEL);
945 if (!buf->dev_private) { 928 if (!buf->dev_private) {
946 /* Set count correctly so we free the proper amount. */ 929 /* Set count correctly so we free the proper amount. */
947 entry->buf_count = count; 930 entry->buf_count = count;
948 entry->seg_count = count; 931 entry->seg_count = count;
949 drm_cleanup_buf_error(dev, entry); 932 drm_cleanup_buf_error(dev, entry);
950 drm_free(temp_pagelist, 933 kfree(temp_pagelist);
951 (dma->page_count +
952 (count << page_order))
953 * sizeof(*dma->pagelist),
954 DRM_MEM_PAGES);
955 mutex_unlock(&dev->struct_mutex); 934 mutex_unlock(&dev->struct_mutex);
956 atomic_dec(&dev->buf_alloc); 935 atomic_dec(&dev->buf_alloc);
957 return -ENOMEM; 936 return -ENOMEM;
@@ -964,16 +943,13 @@ int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
964 byte_count += PAGE_SIZE << page_order; 943 byte_count += PAGE_SIZE << page_order;
965 } 944 }
966 945
967 temp_buflist = drm_realloc(dma->buflist, 946 temp_buflist = krealloc(dma->buflist,
968 dma->buf_count * sizeof(*dma->buflist), 947 (dma->buf_count + entry->buf_count) *
969 (dma->buf_count + entry->buf_count) 948 sizeof(*dma->buflist), GFP_KERNEL);
970 * sizeof(*dma->buflist), DRM_MEM_BUFS);
971 if (!temp_buflist) { 949 if (!temp_buflist) {
972 /* Free the entry because it isn't valid */ 950 /* Free the entry because it isn't valid */
973 drm_cleanup_buf_error(dev, entry); 951 drm_cleanup_buf_error(dev, entry);
974 drm_free(temp_pagelist, 952 kfree(temp_pagelist);
975 (dma->page_count + (count << page_order))
976 * sizeof(*dma->pagelist), DRM_MEM_PAGES);
977 mutex_unlock(&dev->struct_mutex); 953 mutex_unlock(&dev->struct_mutex);
978 atomic_dec(&dev->buf_alloc); 954 atomic_dec(&dev->buf_alloc);
979 return -ENOMEM; 955 return -ENOMEM;
@@ -988,9 +964,7 @@ int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
988 * with the new one. 964 * with the new one.
989 */ 965 */
990 if (dma->page_count) { 966 if (dma->page_count) {
991 drm_free(dma->pagelist, 967 kfree(dma->pagelist);
992 dma->page_count * sizeof(*dma->pagelist),
993 DRM_MEM_PAGES);
994 } 968 }
995 dma->pagelist = temp_pagelist; 969 dma->pagelist = temp_pagelist;
996 970
@@ -1086,8 +1060,8 @@ static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request
1086 return -EINVAL; 1060 return -EINVAL;
1087 } 1061 }
1088 1062
1089 entry->buflist = drm_alloc(count * sizeof(*entry->buflist), 1063 entry->buflist = kmalloc(count * sizeof(*entry->buflist),
1090 DRM_MEM_BUFS); 1064 GFP_KERNEL);
1091 if (!entry->buflist) { 1065 if (!entry->buflist) {
1092 mutex_unlock(&dev->struct_mutex); 1066 mutex_unlock(&dev->struct_mutex);
1093 atomic_dec(&dev->buf_alloc); 1067 atomic_dec(&dev->buf_alloc);
@@ -1118,7 +1092,7 @@ static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request
1118 buf->file_priv = NULL; 1092 buf->file_priv = NULL;
1119 1093
1120 buf->dev_priv_size = dev->driver->dev_priv_size; 1094 buf->dev_priv_size = dev->driver->dev_priv_size;
1121 buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS); 1095 buf->dev_private = kmalloc(buf->dev_priv_size, GFP_KERNEL);
1122 if (!buf->dev_private) { 1096 if (!buf->dev_private) {
1123 /* Set count correctly so we free the proper amount. */ 1097 /* Set count correctly so we free the proper amount. */
1124 entry->buf_count = count; 1098 entry->buf_count = count;
@@ -1139,10 +1113,9 @@ static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request
1139 1113
1140 DRM_DEBUG("byte_count: %d\n", byte_count); 1114 DRM_DEBUG("byte_count: %d\n", byte_count);
1141 1115
1142 temp_buflist = drm_realloc(dma->buflist, 1116 temp_buflist = krealloc(dma->buflist,
1143 dma->buf_count * sizeof(*dma->buflist), 1117 (dma->buf_count + entry->buf_count) *
1144 (dma->buf_count + entry->buf_count) 1118 sizeof(*dma->buflist), GFP_KERNEL);
1145 * sizeof(*dma->buflist), DRM_MEM_BUFS);
1146 if (!temp_buflist) { 1119 if (!temp_buflist) {
1147 /* Free the entry because it isn't valid */ 1120 /* Free the entry because it isn't valid */
1148 drm_cleanup_buf_error(dev, entry); 1121 drm_cleanup_buf_error(dev, entry);
@@ -1248,8 +1221,8 @@ static int drm_addbufs_fb(struct drm_device * dev, struct drm_buf_desc * request
1248 return -EINVAL; 1221 return -EINVAL;
1249 } 1222 }
1250 1223
1251 entry->buflist = drm_alloc(count * sizeof(*entry->buflist), 1224 entry->buflist = kmalloc(count * sizeof(*entry->buflist),
1252 DRM_MEM_BUFS); 1225 GFP_KERNEL);
1253 if (!entry->buflist) { 1226 if (!entry->buflist) {
1254 mutex_unlock(&dev->struct_mutex); 1227 mutex_unlock(&dev->struct_mutex);
1255 atomic_dec(&dev->buf_alloc); 1228 atomic_dec(&dev->buf_alloc);
@@ -1279,7 +1252,7 @@ static int drm_addbufs_fb(struct drm_device * dev, struct drm_buf_desc * request
1279 buf->file_priv = NULL; 1252 buf->file_priv = NULL;
1280 1253
1281 buf->dev_priv_size = dev->driver->dev_priv_size; 1254 buf->dev_priv_size = dev->driver->dev_priv_size;
1282 buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS); 1255 buf->dev_private = kmalloc(buf->dev_priv_size, GFP_KERNEL);
1283 if (!buf->dev_private) { 1256 if (!buf->dev_private) {
1284 /* Set count correctly so we free the proper amount. */ 1257 /* Set count correctly so we free the proper amount. */
1285 entry->buf_count = count; 1258 entry->buf_count = count;
@@ -1299,10 +1272,9 @@ static int drm_addbufs_fb(struct drm_device * dev, struct drm_buf_desc * request
1299 1272
1300 DRM_DEBUG("byte_count: %d\n", byte_count); 1273 DRM_DEBUG("byte_count: %d\n", byte_count);
1301 1274
1302 temp_buflist = drm_realloc(dma->buflist, 1275 temp_buflist = krealloc(dma->buflist,
1303 dma->buf_count * sizeof(*dma->buflist), 1276 (dma->buf_count + entry->buf_count) *
1304 (dma->buf_count + entry->buf_count) 1277 sizeof(*dma->buflist), GFP_KERNEL);
1305 * sizeof(*dma->buflist), DRM_MEM_BUFS);
1306 if (!temp_buflist) { 1278 if (!temp_buflist) {
1307 /* Free the entry because it isn't valid */ 1279 /* Free the entry because it isn't valid */
1308 drm_cleanup_buf_error(dev, entry); 1280 drm_cleanup_buf_error(dev, entry);
diff --git a/drivers/gpu/drm/drm_context.c b/drivers/gpu/drm/drm_context.c
index 7d1e53c10d4b..2607753a320b 100644
--- a/drivers/gpu/drm/drm_context.c
+++ b/drivers/gpu/drm/drm_context.c
@@ -341,7 +341,7 @@ int drm_addctx(struct drm_device *dev, void *data,
341 } 341 }
342 } 342 }
343 343
344 ctx_entry = drm_alloc(sizeof(*ctx_entry), DRM_MEM_CTXLIST); 344 ctx_entry = kmalloc(sizeof(*ctx_entry), GFP_KERNEL);
345 if (!ctx_entry) { 345 if (!ctx_entry) {
346 DRM_DEBUG("out of memory\n"); 346 DRM_DEBUG("out of memory\n");
347 return -ENOMEM; 347 return -ENOMEM;
@@ -456,7 +456,7 @@ int drm_rmctx(struct drm_device *dev, void *data,
456 list_for_each_entry_safe(pos, n, &dev->ctxlist, head) { 456 list_for_each_entry_safe(pos, n, &dev->ctxlist, head) {
457 if (pos->handle == ctx->handle) { 457 if (pos->handle == ctx->handle) {
458 list_del(&pos->head); 458 list_del(&pos->head);
459 drm_free(pos, sizeof(*pos), DRM_MEM_CTXLIST); 459 kfree(pos);
460 --dev->ctx_count; 460 --dev->ctx_count;
461 } 461 }
462 } 462 }
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
index 6ce0e2667a85..2960b6d73456 100644
--- a/drivers/gpu/drm/drm_debugfs.c
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -100,15 +100,13 @@ int drm_debugfs_create_files(struct drm_info_list *files, int count,
100 (dev->driver->driver_features & features) != features) 100 (dev->driver->driver_features & features) != features)
101 continue; 101 continue;
102 102
103 tmp = drm_alloc(sizeof(struct drm_info_node), 103 tmp = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL);
104 _DRM_DRIVER);
105 ent = debugfs_create_file(files[i].name, S_IFREG | S_IRUGO, 104 ent = debugfs_create_file(files[i].name, S_IFREG | S_IRUGO,
106 root, tmp, &drm_debugfs_fops); 105 root, tmp, &drm_debugfs_fops);
107 if (!ent) { 106 if (!ent) {
108 DRM_ERROR("Cannot create /sys/kernel/debug/dri/%s/%s\n", 107 DRM_ERROR("Cannot create /sys/kernel/debug/dri/%s/%s\n",
109 name, files[i].name); 108 name, files[i].name);
110 drm_free(tmp, sizeof(struct drm_info_node), 109 kfree(tmp);
111 _DRM_DRIVER);
112 ret = -1; 110 ret = -1;
113 goto fail; 111 goto fail;
114 } 112 }
@@ -196,8 +194,7 @@ int drm_debugfs_remove_files(struct drm_info_list *files, int count,
196 if (tmp->info_ent == &files[i]) { 194 if (tmp->info_ent == &files[i]) {
197 debugfs_remove(tmp->dent); 195 debugfs_remove(tmp->dent);
198 list_del(pos); 196 list_del(pos);
199 drm_free(tmp, sizeof(struct drm_info_node), 197 kfree(tmp);
200 _DRM_DRIVER);
201 } 198 }
202 } 199 }
203 } 200 }
diff --git a/drivers/gpu/drm/drm_dma.c b/drivers/gpu/drm/drm_dma.c
index 7a8e2fba4678..13f1537413fb 100644
--- a/drivers/gpu/drm/drm_dma.c
+++ b/drivers/gpu/drm/drm_dma.c
@@ -47,7 +47,7 @@ int drm_dma_setup(struct drm_device *dev)
47{ 47{
48 int i; 48 int i;
49 49
50 dev->dma = drm_alloc(sizeof(*dev->dma), DRM_MEM_DRIVER); 50 dev->dma = kmalloc(sizeof(*dev->dma), GFP_KERNEL);
51 if (!dev->dma) 51 if (!dev->dma)
52 return -ENOMEM; 52 return -ENOMEM;
53 53
@@ -88,36 +88,19 @@ void drm_dma_takedown(struct drm_device *dev)
88 drm_pci_free(dev, dma->bufs[i].seglist[j]); 88 drm_pci_free(dev, dma->bufs[i].seglist[j]);
89 } 89 }
90 } 90 }
91 drm_free(dma->bufs[i].seglist, 91 kfree(dma->bufs[i].seglist);
92 dma->bufs[i].seg_count
93 * sizeof(*dma->bufs[0].seglist), DRM_MEM_SEGS);
94 } 92 }
95 if (dma->bufs[i].buf_count) { 93 if (dma->bufs[i].buf_count) {
96 for (j = 0; j < dma->bufs[i].buf_count; j++) { 94 for (j = 0; j < dma->bufs[i].buf_count; j++) {
97 if (dma->bufs[i].buflist[j].dev_private) { 95 kfree(dma->bufs[i].buflist[j].dev_private);
98 drm_free(dma->bufs[i].buflist[j].
99 dev_private,
100 dma->bufs[i].buflist[j].
101 dev_priv_size, DRM_MEM_BUFS);
102 }
103 } 96 }
104 drm_free(dma->bufs[i].buflist, 97 kfree(dma->bufs[i].buflist);
105 dma->bufs[i].buf_count *
106 sizeof(*dma->bufs[0].buflist), DRM_MEM_BUFS);
107 } 98 }
108 } 99 }
109 100
110 if (dma->buflist) { 101 kfree(dma->buflist);
111 drm_free(dma->buflist, 102 kfree(dma->pagelist);
112 dma->buf_count * sizeof(*dma->buflist), DRM_MEM_BUFS); 103 kfree(dev->dma);
113 }
114
115 if (dma->pagelist) {
116 drm_free(dma->pagelist,
117 dma->page_count * sizeof(*dma->pagelist),
118 DRM_MEM_PAGES);
119 }
120 drm_free(dev->dma, sizeof(*dev->dma), DRM_MEM_DRIVER);
121 dev->dma = NULL; 104 dev->dma = NULL;
122} 105}
123 106
diff --git a/drivers/gpu/drm/drm_drawable.c b/drivers/gpu/drm/drm_drawable.c
index 80be1cab62af..c53c9768cc11 100644
--- a/drivers/gpu/drm/drm_drawable.c
+++ b/drivers/gpu/drm/drm_drawable.c
@@ -85,9 +85,8 @@ int drm_rmdraw(struct drm_device *dev, void *data, struct drm_file *file_priv)
85 spin_unlock_irqrestore(&dev->drw_lock, irqflags); 85 spin_unlock_irqrestore(&dev->drw_lock, irqflags);
86 return -EINVAL; 86 return -EINVAL;
87 } 87 }
88 drm_free(info->rects, info->num_rects * sizeof(struct drm_clip_rect), 88 kfree(info->rects);
89 DRM_MEM_BUFS); 89 kfree(info);
90 drm_free(info, sizeof(struct drm_drawable_info), DRM_MEM_BUFS);
91 90
92 idr_remove(&dev->drw_idr, draw->handle); 91 idr_remove(&dev->drw_idr, draw->handle);
93 92
@@ -106,12 +105,12 @@ int drm_update_drawable_info(struct drm_device *dev, void *data, struct drm_file
106 105
107 info = idr_find(&dev->drw_idr, update->handle); 106 info = idr_find(&dev->drw_idr, update->handle);
108 if (!info) { 107 if (!info) {
109 info = drm_calloc(1, sizeof(*info), DRM_MEM_BUFS); 108 info = kzalloc(sizeof(*info), GFP_KERNEL);
110 if (!info) 109 if (!info)
111 return -ENOMEM; 110 return -ENOMEM;
112 if (IS_ERR(idr_replace(&dev->drw_idr, info, update->handle))) { 111 if (IS_ERR(idr_replace(&dev->drw_idr, info, update->handle))) {
113 DRM_ERROR("No such drawable %d\n", update->handle); 112 DRM_ERROR("No such drawable %d\n", update->handle);
114 drm_free(info, sizeof(*info), DRM_MEM_BUFS); 113 kfree(info);
115 return -EINVAL; 114 return -EINVAL;
116 } 115 }
117 } 116 }
@@ -121,8 +120,9 @@ int drm_update_drawable_info(struct drm_device *dev, void *data, struct drm_file
121 if (update->num == 0) 120 if (update->num == 0)
122 rects = NULL; 121 rects = NULL;
123 else if (update->num != info->num_rects) { 122 else if (update->num != info->num_rects) {
124 rects = drm_alloc(update->num * sizeof(struct drm_clip_rect), 123 rects = kmalloc(update->num *
125 DRM_MEM_BUFS); 124 sizeof(struct drm_clip_rect),
125 GFP_KERNEL);
126 } else 126 } else
127 rects = info->rects; 127 rects = info->rects;
128 128
@@ -145,8 +145,7 @@ int drm_update_drawable_info(struct drm_device *dev, void *data, struct drm_file
145 spin_lock_irqsave(&dev->drw_lock, irqflags); 145 spin_lock_irqsave(&dev->drw_lock, irqflags);
146 146
147 if (rects != info->rects) { 147 if (rects != info->rects) {
148 drm_free(info->rects, info->num_rects * 148 kfree(info->rects);
149 sizeof(struct drm_clip_rect), DRM_MEM_BUFS);
150 } 149 }
151 150
152 info->rects = rects; 151 info->rects = rects;
@@ -166,8 +165,7 @@ int drm_update_drawable_info(struct drm_device *dev, void *data, struct drm_file
166 165
167error: 166error:
168 if (rects != info->rects) 167 if (rects != info->rects)
169 drm_free(rects, update->num * sizeof(struct drm_clip_rect), 168 kfree(rects);
170 DRM_MEM_BUFS);
171 169
172 return err; 170 return err;
173} 171}
@@ -186,9 +184,8 @@ static int drm_drawable_free(int idr, void *p, void *data)
186 struct drm_drawable_info *info = p; 184 struct drm_drawable_info *info = p;
187 185
188 if (info) { 186 if (info) {
189 drm_free(info->rects, info->num_rects * 187 kfree(info->rects);
190 sizeof(struct drm_clip_rect), DRM_MEM_BUFS); 188 kfree(info);
191 drm_free(info, sizeof(*info), DRM_MEM_BUFS);
192 } 189 }
193 190
194 return 0; 191 return 0;
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 1bf7efd8d334..b39d7bfc0c9c 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -189,7 +189,7 @@ int drm_lastclose(struct drm_device * dev)
189 if (entry->bound) 189 if (entry->bound)
190 drm_unbind_agp(entry->memory); 190 drm_unbind_agp(entry->memory);
191 drm_free_agp(entry->memory, entry->pages); 191 drm_free_agp(entry->memory, entry->pages);
192 drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS); 192 kfree(entry);
193 } 193 }
194 INIT_LIST_HEAD(&dev->agp->memory); 194 INIT_LIST_HEAD(&dev->agp->memory);
195 195
@@ -208,21 +208,15 @@ int drm_lastclose(struct drm_device * dev)
208 /* Clear vma list (only built for debugging) */ 208 /* Clear vma list (only built for debugging) */
209 list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) { 209 list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) {
210 list_del(&vma->head); 210 list_del(&vma->head);
211 drm_free(vma, sizeof(*vma), DRM_MEM_VMAS); 211 kfree(vma);
212 } 212 }
213 213
214 if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE) && dev->queuelist) { 214 if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE) && dev->queuelist) {
215 for (i = 0; i < dev->queue_count; i++) { 215 for (i = 0; i < dev->queue_count; i++) {
216 if (dev->queuelist[i]) { 216 kfree(dev->queuelist[i]);
217 drm_free(dev->queuelist[i], 217 dev->queuelist[i] = NULL;
218 sizeof(*dev->queuelist[0]),
219 DRM_MEM_QUEUES);
220 dev->queuelist[i] = NULL;
221 }
222 } 218 }
223 drm_free(dev->queuelist, 219 kfree(dev->queuelist);
224 dev->queue_slots * sizeof(*dev->queuelist),
225 DRM_MEM_QUEUES);
226 dev->queuelist = NULL; 220 dev->queuelist = NULL;
227 } 221 }
228 dev->queue_count = 0; 222 dev->queue_count = 0;
@@ -344,8 +338,6 @@ static int __init drm_core_init(void)
344 goto err_p3; 338 goto err_p3;
345 } 339 }
346 340
347 drm_mem_init();
348
349 DRM_INFO("Initialized %s %d.%d.%d %s\n", 341 DRM_INFO("Initialized %s %d.%d.%d %s\n",
350 CORE_NAME, CORE_MAJOR, CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE); 342 CORE_NAME, CORE_MAJOR, CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE);
351 return 0; 343 return 0;
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 801a0d0e0810..7d0835226f6e 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -252,16 +252,18 @@ struct drm_display_mode *drm_mode_std(struct drm_device *dev,
252{ 252{
253 struct drm_display_mode *mode; 253 struct drm_display_mode *mode;
254 int hsize = t->hsize * 8 + 248, vsize; 254 int hsize = t->hsize * 8 + 248, vsize;
255 unsigned aspect_ratio = (t->vfreq_aspect & EDID_TIMING_ASPECT_MASK)
256 >> EDID_TIMING_ASPECT_SHIFT;
255 257
256 mode = drm_mode_create(dev); 258 mode = drm_mode_create(dev);
257 if (!mode) 259 if (!mode)
258 return NULL; 260 return NULL;
259 261
260 if (t->aspect_ratio == 0) 262 if (aspect_ratio == 0)
261 vsize = (hsize * 10) / 16; 263 vsize = (hsize * 10) / 16;
262 else if (t->aspect_ratio == 1) 264 else if (aspect_ratio == 1)
263 vsize = (hsize * 3) / 4; 265 vsize = (hsize * 3) / 4;
264 else if (t->aspect_ratio == 2) 266 else if (aspect_ratio == 2)
265 vsize = (hsize * 4) / 5; 267 vsize = (hsize * 4) / 5;
266 else 268 else
267 vsize = (hsize * 9) / 16; 269 vsize = (hsize * 9) / 16;
@@ -288,17 +290,24 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
288{ 290{
289 struct drm_display_mode *mode; 291 struct drm_display_mode *mode;
290 struct detailed_pixel_timing *pt = &timing->data.pixel_data; 292 struct detailed_pixel_timing *pt = &timing->data.pixel_data;
293 unsigned hactive = (pt->hactive_hblank_hi & 0xf0) << 4 | pt->hactive_lo;
294 unsigned vactive = (pt->vactive_vblank_hi & 0xf0) << 4 | pt->vactive_lo;
295 unsigned hblank = (pt->hactive_hblank_hi & 0xf) << 8 | pt->hblank_lo;
296 unsigned vblank = (pt->vactive_vblank_hi & 0xf) << 8 | pt->vblank_lo;
297 unsigned hsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0x3) << 8 | pt->hsync_offset_lo;
298 unsigned hsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0xc) << 6 | pt->hsync_pulse_width_lo;
299 unsigned vsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0x30) | (pt->vsync_offset_pulse_width_lo & 0xf);
300 unsigned vsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0xc0) >> 2 | pt->vsync_offset_pulse_width_lo >> 4;
291 301
292 /* ignore tiny modes */ 302 /* ignore tiny modes */
293 if (((pt->hactive_hi << 8) | pt->hactive_lo) < 64 || 303 if (hactive < 64 || vactive < 64)
294 ((pt->vactive_hi << 8) | pt->hactive_lo) < 64)
295 return NULL; 304 return NULL;
296 305
297 if (pt->stereo) { 306 if (pt->misc & DRM_EDID_PT_STEREO) {
298 printk(KERN_WARNING "stereo mode not supported\n"); 307 printk(KERN_WARNING "stereo mode not supported\n");
299 return NULL; 308 return NULL;
300 } 309 }
301 if (!pt->separate_sync) { 310 if (!(pt->misc & DRM_EDID_PT_SEPARATE_SYNC)) {
302 printk(KERN_WARNING "integrated sync not supported\n"); 311 printk(KERN_WARNING "integrated sync not supported\n");
303 return NULL; 312 return NULL;
304 } 313 }
@@ -310,41 +319,36 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
310 mode->type = DRM_MODE_TYPE_DRIVER; 319 mode->type = DRM_MODE_TYPE_DRIVER;
311 320
312 if (quirks & EDID_QUIRK_135_CLOCK_TOO_HIGH) 321 if (quirks & EDID_QUIRK_135_CLOCK_TOO_HIGH)
313 timing->pixel_clock = 1088; 322 timing->pixel_clock = cpu_to_le16(1088);
314 323
315 mode->clock = timing->pixel_clock * 10; 324 mode->clock = le16_to_cpu(timing->pixel_clock) * 10;
316 325
317 mode->hdisplay = (pt->hactive_hi << 8) | pt->hactive_lo; 326 mode->hdisplay = hactive;
318 mode->hsync_start = mode->hdisplay + ((pt->hsync_offset_hi << 8) | 327 mode->hsync_start = mode->hdisplay + hsync_offset;
319 pt->hsync_offset_lo); 328 mode->hsync_end = mode->hsync_start + hsync_pulse_width;
320 mode->hsync_end = mode->hsync_start + 329 mode->htotal = mode->hdisplay + hblank;
321 ((pt->hsync_pulse_width_hi << 8) | 330
322 pt->hsync_pulse_width_lo); 331 mode->vdisplay = vactive;
323 mode->htotal = mode->hdisplay + ((pt->hblank_hi << 8) | pt->hblank_lo); 332 mode->vsync_start = mode->vdisplay + vsync_offset;
324 333 mode->vsync_end = mode->vsync_start + vsync_pulse_width;
325 mode->vdisplay = (pt->vactive_hi << 8) | pt->vactive_lo; 334 mode->vtotal = mode->vdisplay + vblank;
326 mode->vsync_start = mode->vdisplay + ((pt->vsync_offset_hi << 4) |
327 pt->vsync_offset_lo);
328 mode->vsync_end = mode->vsync_start +
329 ((pt->vsync_pulse_width_hi << 4) |
330 pt->vsync_pulse_width_lo);
331 mode->vtotal = mode->vdisplay + ((pt->vblank_hi << 8) | pt->vblank_lo);
332 335
333 drm_mode_set_name(mode); 336 drm_mode_set_name(mode);
334 337
335 if (pt->interlaced) 338 if (pt->misc & DRM_EDID_PT_INTERLACED)
336 mode->flags |= DRM_MODE_FLAG_INTERLACE; 339 mode->flags |= DRM_MODE_FLAG_INTERLACE;
337 340
338 if (quirks & EDID_QUIRK_DETAILED_SYNC_PP) { 341 if (quirks & EDID_QUIRK_DETAILED_SYNC_PP) {
339 pt->hsync_positive = 1; 342 pt->misc |= DRM_EDID_PT_HSYNC_POSITIVE | DRM_EDID_PT_VSYNC_POSITIVE;
340 pt->vsync_positive = 1;
341 } 343 }
342 344
343 mode->flags |= pt->hsync_positive ? DRM_MODE_FLAG_PHSYNC : DRM_MODE_FLAG_NHSYNC; 345 mode->flags |= (pt->misc & DRM_EDID_PT_HSYNC_POSITIVE) ?
344 mode->flags |= pt->vsync_positive ? DRM_MODE_FLAG_PVSYNC : DRM_MODE_FLAG_NVSYNC; 346 DRM_MODE_FLAG_PHSYNC : DRM_MODE_FLAG_NHSYNC;
347 mode->flags |= (pt->misc & DRM_EDID_PT_VSYNC_POSITIVE) ?
348 DRM_MODE_FLAG_PVSYNC : DRM_MODE_FLAG_NVSYNC;
345 349
346 mode->width_mm = pt->width_mm_lo | (pt->width_mm_hi << 8); 350 mode->width_mm = pt->width_mm_lo | (pt->width_height_mm_hi & 0xf) << 8;
347 mode->height_mm = pt->height_mm_lo | (pt->height_mm_hi << 8); 351 mode->height_mm = pt->height_mm_lo | (pt->width_height_mm_hi & 0xf0) << 4;
348 352
349 if (quirks & EDID_QUIRK_DETAILED_IN_CM) { 353 if (quirks & EDID_QUIRK_DETAILED_IN_CM) {
350 mode->width_mm *= 10; 354 mode->width_mm *= 10;
@@ -465,7 +469,7 @@ static int add_standard_modes(struct drm_connector *connector, struct edid *edid
465 struct drm_display_mode *newmode; 469 struct drm_display_mode *newmode;
466 470
467 /* If std timings bytes are 1, 1 it's empty */ 471 /* If std timings bytes are 1, 1 it's empty */
468 if (t->hsize == 1 && (t->aspect_ratio | t->vfreq) == 1) 472 if (t->hsize == 1 && t->vfreq_aspect == 1)
469 continue; 473 continue;
470 474
471 newmode = drm_mode_std(dev, &edid->standard_timings[i]); 475 newmode = drm_mode_std(dev, &edid->standard_timings[i]);
@@ -509,7 +513,7 @@ static int add_detailed_info(struct drm_connector *connector,
509 continue; 513 continue;
510 514
511 /* First detailed mode is preferred */ 515 /* First detailed mode is preferred */
512 if (i == 0 && edid->preferred_timing) 516 if (i == 0 && (edid->features & DRM_EDID_FEATURE_PREFERRED_TIMING))
513 newmode->type |= DRM_MODE_TYPE_PREFERRED; 517 newmode->type |= DRM_MODE_TYPE_PREFERRED;
514 drm_mode_probed_add(connector, newmode); 518 drm_mode_probed_add(connector, newmode);
515 519
@@ -767,22 +771,22 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
767 if (quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75)) 771 if (quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75))
768 edid_fixup_preferred(connector, quirks); 772 edid_fixup_preferred(connector, quirks);
769 773
770 connector->display_info.serration_vsync = edid->serration_vsync; 774 connector->display_info.serration_vsync = (edid->input & DRM_EDID_INPUT_SERRATION_VSYNC) ? 1 : 0;
771 connector->display_info.sync_on_green = edid->sync_on_green; 775 connector->display_info.sync_on_green = (edid->input & DRM_EDID_INPUT_SYNC_ON_GREEN) ? 1 : 0;
772 connector->display_info.composite_sync = edid->composite_sync; 776 connector->display_info.composite_sync = (edid->input & DRM_EDID_INPUT_COMPOSITE_SYNC) ? 1 : 0;
773 connector->display_info.separate_syncs = edid->separate_syncs; 777 connector->display_info.separate_syncs = (edid->input & DRM_EDID_INPUT_SEPARATE_SYNCS) ? 1 : 0;
774 connector->display_info.blank_to_black = edid->blank_to_black; 778 connector->display_info.blank_to_black = (edid->input & DRM_EDID_INPUT_BLANK_TO_BLACK) ? 1 : 0;
775 connector->display_info.video_level = edid->video_level; 779 connector->display_info.video_level = (edid->input & DRM_EDID_INPUT_VIDEO_LEVEL) >> 5;
776 connector->display_info.digital = edid->digital; 780 connector->display_info.digital = (edid->input & DRM_EDID_INPUT_DIGITAL) ? 1 : 0;
777 connector->display_info.width_mm = edid->width_cm * 10; 781 connector->display_info.width_mm = edid->width_cm * 10;
778 connector->display_info.height_mm = edid->height_cm * 10; 782 connector->display_info.height_mm = edid->height_cm * 10;
779 connector->display_info.gamma = edid->gamma; 783 connector->display_info.gamma = edid->gamma;
780 connector->display_info.gtf_supported = edid->default_gtf; 784 connector->display_info.gtf_supported = (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF) ? 1 : 0;
781 connector->display_info.standard_color = edid->standard_color; 785 connector->display_info.standard_color = (edid->features & DRM_EDID_FEATURE_STANDARD_COLOR) ? 1 : 0;
782 connector->display_info.display_type = edid->display_type; 786 connector->display_info.display_type = (edid->features & DRM_EDID_FEATURE_DISPLAY_TYPE) >> 3;
783 connector->display_info.active_off_supported = edid->pm_active_off; 787 connector->display_info.active_off_supported = (edid->features & DRM_EDID_FEATURE_PM_ACTIVE_OFF) ? 1 : 0;
784 connector->display_info.suspend_supported = edid->pm_suspend; 788 connector->display_info.suspend_supported = (edid->features & DRM_EDID_FEATURE_PM_SUSPEND) ? 1 : 0;
785 connector->display_info.standby_supported = edid->pm_standby; 789 connector->display_info.standby_supported = (edid->features & DRM_EDID_FEATURE_PM_STANDBY) ? 1 : 0;
786 connector->display_info.gamma = edid->gamma; 790 connector->display_info.gamma = edid->gamma;
787 791
788 return num_modes; 792 return num_modes;
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index 09a3571c9908..251bc0e3b5ec 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -240,7 +240,7 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
240 240
241 DRM_DEBUG("pid = %d, minor = %d\n", task_pid_nr(current), minor_id); 241 DRM_DEBUG("pid = %d, minor = %d\n", task_pid_nr(current), minor_id);
242 242
243 priv = drm_alloc(sizeof(*priv), DRM_MEM_FILES); 243 priv = kmalloc(sizeof(*priv), GFP_KERNEL);
244 if (!priv) 244 if (!priv)
245 return -ENOMEM; 245 return -ENOMEM;
246 246
@@ -328,7 +328,7 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
328 328
329 return 0; 329 return 0;
330 out_free: 330 out_free:
331 drm_free(priv, sizeof(*priv), DRM_MEM_FILES); 331 kfree(priv);
332 filp->private_data = NULL; 332 filp->private_data = NULL;
333 return ret; 333 return ret;
334} 334}
@@ -471,7 +471,7 @@ int drm_release(struct inode *inode, struct file *filp)
471 drm_ctxbitmap_free(dev, pos->handle); 471 drm_ctxbitmap_free(dev, pos->handle);
472 472
473 list_del(&pos->head); 473 list_del(&pos->head);
474 drm_free(pos, sizeof(*pos), DRM_MEM_CTXLIST); 474 kfree(pos);
475 --dev->ctx_count; 475 --dev->ctx_count;
476 } 476 }
477 } 477 }
@@ -516,7 +516,7 @@ int drm_release(struct inode *inode, struct file *filp)
516 516
517 if (dev->driver->postclose) 517 if (dev->driver->postclose)
518 dev->driver->postclose(dev, file_priv); 518 dev->driver->postclose(dev, file_priv);
519 drm_free(file_priv, sizeof(*file_priv), DRM_MEM_FILES); 519 kfree(file_priv);
520 520
521 /* ======================================================== 521 /* ========================================================
522 * End inline drm_release 522 * End inline drm_release
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index ec43005100d9..8104ecaea26f 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -89,7 +89,7 @@ drm_gem_init(struct drm_device *dev)
89 atomic_set(&dev->gtt_count, 0); 89 atomic_set(&dev->gtt_count, 0);
90 atomic_set(&dev->gtt_memory, 0); 90 atomic_set(&dev->gtt_memory, 0);
91 91
92 mm = drm_calloc(1, sizeof(struct drm_gem_mm), DRM_MEM_MM); 92 mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
93 if (!mm) { 93 if (!mm) {
94 DRM_ERROR("out of memory\n"); 94 DRM_ERROR("out of memory\n");
95 return -ENOMEM; 95 return -ENOMEM;
@@ -98,14 +98,14 @@ drm_gem_init(struct drm_device *dev)
98 dev->mm_private = mm; 98 dev->mm_private = mm;
99 99
100 if (drm_ht_create(&mm->offset_hash, 19)) { 100 if (drm_ht_create(&mm->offset_hash, 19)) {
101 drm_free(mm, sizeof(struct drm_gem_mm), DRM_MEM_MM); 101 kfree(mm);
102 return -ENOMEM; 102 return -ENOMEM;
103 } 103 }
104 104
105 if (drm_mm_init(&mm->offset_manager, DRM_FILE_PAGE_OFFSET_START, 105 if (drm_mm_init(&mm->offset_manager, DRM_FILE_PAGE_OFFSET_START,
106 DRM_FILE_PAGE_OFFSET_SIZE)) { 106 DRM_FILE_PAGE_OFFSET_SIZE)) {
107 drm_ht_remove(&mm->offset_hash); 107 drm_ht_remove(&mm->offset_hash);
108 drm_free(mm, sizeof(struct drm_gem_mm), DRM_MEM_MM); 108 kfree(mm);
109 return -ENOMEM; 109 return -ENOMEM;
110 } 110 }
111 111
@@ -119,7 +119,7 @@ drm_gem_destroy(struct drm_device *dev)
119 119
120 drm_mm_takedown(&mm->offset_manager); 120 drm_mm_takedown(&mm->offset_manager);
121 drm_ht_remove(&mm->offset_hash); 121 drm_ht_remove(&mm->offset_hash);
122 drm_free(mm, sizeof(struct drm_gem_mm), DRM_MEM_MM); 122 kfree(mm);
123 dev->mm_private = NULL; 123 dev->mm_private = NULL;
124} 124}
125 125
diff --git a/drivers/gpu/drm/drm_hashtab.c b/drivers/gpu/drm/drm_hashtab.c
index ac35145c3e20..f36b21c5b2e1 100644
--- a/drivers/gpu/drm/drm_hashtab.c
+++ b/drivers/gpu/drm/drm_hashtab.c
@@ -46,8 +46,7 @@ int drm_ht_create(struct drm_open_hash *ht, unsigned int order)
46 ht->table = NULL; 46 ht->table = NULL;
47 ht->use_vmalloc = ((ht->size * sizeof(*ht->table)) > PAGE_SIZE); 47 ht->use_vmalloc = ((ht->size * sizeof(*ht->table)) > PAGE_SIZE);
48 if (!ht->use_vmalloc) { 48 if (!ht->use_vmalloc) {
49 ht->table = drm_calloc(ht->size, sizeof(*ht->table), 49 ht->table = kcalloc(ht->size, sizeof(*ht->table), GFP_KERNEL);
50 DRM_MEM_HASHTAB);
51 } 50 }
52 if (!ht->table) { 51 if (!ht->table) {
53 ht->use_vmalloc = 1; 52 ht->use_vmalloc = 1;
@@ -200,8 +199,7 @@ void drm_ht_remove(struct drm_open_hash *ht)
200 if (ht->use_vmalloc) 199 if (ht->use_vmalloc)
201 vfree(ht->table); 200 vfree(ht->table);
202 else 201 else
203 drm_free(ht->table, ht->size * sizeof(*ht->table), 202 kfree(ht->table);
204 DRM_MEM_HASHTAB);
205 ht->table = NULL; 203 ht->table = NULL;
206 } 204 }
207} 205}
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 1fad76289e66..9b9ff46c2378 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -93,7 +93,7 @@ int drm_setunique(struct drm_device *dev, void *data,
93 93
94 master->unique_len = u->unique_len; 94 master->unique_len = u->unique_len;
95 master->unique_size = u->unique_len + 1; 95 master->unique_size = u->unique_len + 1;
96 master->unique = drm_alloc(master->unique_size, DRM_MEM_DRIVER); 96 master->unique = kmalloc(master->unique_size, GFP_KERNEL);
97 if (!master->unique) 97 if (!master->unique)
98 return -ENOMEM; 98 return -ENOMEM;
99 if (copy_from_user(master->unique, u->unique, master->unique_len)) 99 if (copy_from_user(master->unique, u->unique, master->unique_len))
@@ -101,9 +101,8 @@ int drm_setunique(struct drm_device *dev, void *data,
101 101
102 master->unique[master->unique_len] = '\0'; 102 master->unique[master->unique_len] = '\0';
103 103
104 dev->devname = 104 dev->devname = kmalloc(strlen(dev->driver->pci_driver.name) +
105 drm_alloc(strlen(dev->driver->pci_driver.name) + 105 strlen(master->unique) + 2, GFP_KERNEL);
106 strlen(master->unique) + 2, DRM_MEM_DRIVER);
107 if (!dev->devname) 106 if (!dev->devname)
108 return -ENOMEM; 107 return -ENOMEM;
109 108
@@ -138,7 +137,7 @@ static int drm_set_busid(struct drm_device *dev, struct drm_file *file_priv)
138 137
139 master->unique_len = 40; 138 master->unique_len = 40;
140 master->unique_size = master->unique_len; 139 master->unique_size = master->unique_len;
141 master->unique = drm_alloc(master->unique_size, DRM_MEM_DRIVER); 140 master->unique = kmalloc(master->unique_size, GFP_KERNEL);
142 if (master->unique == NULL) 141 if (master->unique == NULL)
143 return -ENOMEM; 142 return -ENOMEM;
144 143
@@ -152,9 +151,8 @@ static int drm_set_busid(struct drm_device *dev, struct drm_file *file_priv)
152 else 151 else
153 master->unique_len = len; 152 master->unique_len = len;
154 153
155 dev->devname = 154 dev->devname = kmalloc(strlen(dev->driver->pci_driver.name) +
156 drm_alloc(strlen(dev->driver->pci_driver.name) + master->unique_len + 155 master->unique_len + 2, GFP_KERNEL);
157 2, DRM_MEM_DRIVER);
158 if (dev->devname == NULL) 156 if (dev->devname == NULL)
159 return -ENOMEM; 157 return -ENOMEM;
160 158
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index fc8e5acd9d9a..b4a3dbcebe9b 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -104,21 +104,13 @@ void drm_vblank_cleanup(struct drm_device *dev)
104 104
105 vblank_disable_fn((unsigned long)dev); 105 vblank_disable_fn((unsigned long)dev);
106 106
107 drm_free(dev->vbl_queue, sizeof(*dev->vbl_queue) * dev->num_crtcs, 107 kfree(dev->vbl_queue);
108 DRM_MEM_DRIVER); 108 kfree(dev->_vblank_count);
109 drm_free(dev->_vblank_count, sizeof(*dev->_vblank_count) * 109 kfree(dev->vblank_refcount);
110 dev->num_crtcs, DRM_MEM_DRIVER); 110 kfree(dev->vblank_enabled);
111 drm_free(dev->vblank_refcount, sizeof(*dev->vblank_refcount) * 111 kfree(dev->last_vblank);
112 dev->num_crtcs, DRM_MEM_DRIVER); 112 kfree(dev->last_vblank_wait);
113 drm_free(dev->vblank_enabled, sizeof(*dev->vblank_enabled) * 113 kfree(dev->vblank_inmodeset);
114 dev->num_crtcs, DRM_MEM_DRIVER);
115 drm_free(dev->last_vblank, sizeof(*dev->last_vblank) * dev->num_crtcs,
116 DRM_MEM_DRIVER);
117 drm_free(dev->last_vblank_wait,
118 sizeof(*dev->last_vblank_wait) * dev->num_crtcs,
119 DRM_MEM_DRIVER);
120 drm_free(dev->vblank_inmodeset, sizeof(*dev->vblank_inmodeset) *
121 dev->num_crtcs, DRM_MEM_DRIVER);
122 114
123 dev->num_crtcs = 0; 115 dev->num_crtcs = 0;
124} 116}
@@ -132,37 +124,33 @@ int drm_vblank_init(struct drm_device *dev, int num_crtcs)
132 spin_lock_init(&dev->vbl_lock); 124 spin_lock_init(&dev->vbl_lock);
133 dev->num_crtcs = num_crtcs; 125 dev->num_crtcs = num_crtcs;
134 126
135 dev->vbl_queue = drm_alloc(sizeof(wait_queue_head_t) * num_crtcs, 127 dev->vbl_queue = kmalloc(sizeof(wait_queue_head_t) * num_crtcs,
136 DRM_MEM_DRIVER); 128 GFP_KERNEL);
137 if (!dev->vbl_queue) 129 if (!dev->vbl_queue)
138 goto err; 130 goto err;
139 131
140 dev->_vblank_count = drm_alloc(sizeof(atomic_t) * num_crtcs, 132 dev->_vblank_count = kmalloc(sizeof(atomic_t) * num_crtcs, GFP_KERNEL);
141 DRM_MEM_DRIVER);
142 if (!dev->_vblank_count) 133 if (!dev->_vblank_count)
143 goto err; 134 goto err;
144 135
145 dev->vblank_refcount = drm_alloc(sizeof(atomic_t) * num_crtcs, 136 dev->vblank_refcount = kmalloc(sizeof(atomic_t) * num_crtcs,
146 DRM_MEM_DRIVER); 137 GFP_KERNEL);
147 if (!dev->vblank_refcount) 138 if (!dev->vblank_refcount)
148 goto err; 139 goto err;
149 140
150 dev->vblank_enabled = drm_calloc(num_crtcs, sizeof(int), 141 dev->vblank_enabled = kcalloc(num_crtcs, sizeof(int), GFP_KERNEL);
151 DRM_MEM_DRIVER);
152 if (!dev->vblank_enabled) 142 if (!dev->vblank_enabled)
153 goto err; 143 goto err;
154 144
155 dev->last_vblank = drm_calloc(num_crtcs, sizeof(u32), DRM_MEM_DRIVER); 145 dev->last_vblank = kcalloc(num_crtcs, sizeof(u32), GFP_KERNEL);
156 if (!dev->last_vblank) 146 if (!dev->last_vblank)
157 goto err; 147 goto err;
158 148
159 dev->last_vblank_wait = drm_calloc(num_crtcs, sizeof(u32), 149 dev->last_vblank_wait = kcalloc(num_crtcs, sizeof(u32), GFP_KERNEL);
160 DRM_MEM_DRIVER);
161 if (!dev->last_vblank_wait) 150 if (!dev->last_vblank_wait)
162 goto err; 151 goto err;
163 152
164 dev->vblank_inmodeset = drm_calloc(num_crtcs, sizeof(int), 153 dev->vblank_inmodeset = kcalloc(num_crtcs, sizeof(int), GFP_KERNEL);
165 DRM_MEM_DRIVER);
166 if (!dev->vblank_inmodeset) 154 if (!dev->vblank_inmodeset)
167 goto err; 155 goto err;
168 156
diff --git a/drivers/gpu/drm/drm_memory.c b/drivers/gpu/drm/drm_memory.c
index 0c707f533eab..e4865f99989c 100644
--- a/drivers/gpu/drm/drm_memory.c
+++ b/drivers/gpu/drm/drm_memory.c
@@ -36,15 +36,6 @@
36#include <linux/highmem.h> 36#include <linux/highmem.h>
37#include "drmP.h" 37#include "drmP.h"
38 38
39#ifdef DEBUG_MEMORY
40#include "drm_memory_debug.h"
41#else
42
43/** No-op. */
44void drm_mem_init(void)
45{
46}
47
48/** 39/**
49 * Called when "/proc/dri/%dev%/mem" is read. 40 * Called when "/proc/dri/%dev%/mem" is read.
50 * 41 *
@@ -64,28 +55,15 @@ int drm_mem_info(char *buf, char **start, off_t offset,
64 return 0; 55 return 0;
65} 56}
66 57
67/** Wrapper around kmalloc() and kfree() */
68void *drm_realloc(void *oldpt, size_t oldsize, size_t size, int area)
69{
70 void *pt;
71
72 if (!(pt = kmalloc(size, GFP_KERNEL)))
73 return NULL;
74 if (oldpt && oldsize) {
75 memcpy(pt, oldpt, oldsize);
76 kfree(oldpt);
77 }
78 return pt;
79}
80
81#if __OS_HAS_AGP 58#if __OS_HAS_AGP
82static void *agp_remap(unsigned long offset, unsigned long size, 59static void *agp_remap(unsigned long offset, unsigned long size,
83 struct drm_device * dev) 60 struct drm_device * dev)
84{ 61{
85 unsigned long *phys_addr_map, i, num_pages = 62 unsigned long i, num_pages =
86 PAGE_ALIGN(size) / PAGE_SIZE; 63 PAGE_ALIGN(size) / PAGE_SIZE;
87 struct drm_agp_mem *agpmem; 64 struct drm_agp_mem *agpmem;
88 struct page **page_map; 65 struct page **page_map;
66 struct page **phys_page_map;
89 void *addr; 67 void *addr;
90 68
91 size = PAGE_ALIGN(size); 69 size = PAGE_ALIGN(size);
@@ -112,10 +90,9 @@ static void *agp_remap(unsigned long offset, unsigned long size,
112 if (!page_map) 90 if (!page_map)
113 return NULL; 91 return NULL;
114 92
115 phys_addr_map = 93 phys_page_map = (agpmem->memory->pages + (offset - agpmem->bound) / PAGE_SIZE);
116 agpmem->memory->memory + (offset - agpmem->bound) / PAGE_SIZE;
117 for (i = 0; i < num_pages; ++i) 94 for (i = 0; i < num_pages; ++i)
118 page_map[i] = pfn_to_page(phys_addr_map[i] >> PAGE_SHIFT); 95 page_map[i] = phys_page_map[i];
119 addr = vmap(page_map, num_pages, VM_IOREMAP, PAGE_AGP); 96 addr = vmap(page_map, num_pages, VM_IOREMAP, PAGE_AGP);
120 vfree(page_map); 97 vfree(page_map);
121 98
@@ -157,8 +134,6 @@ static inline void *agp_remap(unsigned long offset, unsigned long size,
157 134
158#endif /* agp */ 135#endif /* agp */
159 136
160#endif /* debug_memory */
161
162void drm_core_ioremap(struct drm_local_map *map, struct drm_device *dev) 137void drm_core_ioremap(struct drm_local_map *map, struct drm_device *dev)
163{ 138{
164 if (drm_core_has_AGP(dev) && 139 if (drm_core_has_AGP(dev) &&
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index a912a0ff11cc..3e47869d6dae 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -187,9 +187,10 @@ static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent,
187} 187}
188 188
189 189
190 190struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
191struct drm_mm_node *drm_mm_get_block(struct drm_mm_node *node, 191 unsigned long size,
192 unsigned long size, unsigned alignment) 192 unsigned alignment,
193 int atomic)
193{ 194{
194 195
195 struct drm_mm_node *align_splitoff = NULL; 196 struct drm_mm_node *align_splitoff = NULL;
@@ -200,7 +201,7 @@ struct drm_mm_node *drm_mm_get_block(struct drm_mm_node *node,
200 201
201 if (tmp) { 202 if (tmp) {
202 align_splitoff = 203 align_splitoff =
203 drm_mm_split_at_start(node, alignment - tmp, 0); 204 drm_mm_split_at_start(node, alignment - tmp, atomic);
204 if (unlikely(align_splitoff == NULL)) 205 if (unlikely(align_splitoff == NULL))
205 return NULL; 206 return NULL;
206 } 207 }
@@ -209,7 +210,7 @@ struct drm_mm_node *drm_mm_get_block(struct drm_mm_node *node,
209 list_del_init(&node->fl_entry); 210 list_del_init(&node->fl_entry);
210 node->free = 0; 211 node->free = 0;
211 } else { 212 } else {
212 node = drm_mm_split_at_start(node, size, 0); 213 node = drm_mm_split_at_start(node, size, atomic);
213 } 214 }
214 215
215 if (align_splitoff) 216 if (align_splitoff)
@@ -217,42 +218,7 @@ struct drm_mm_node *drm_mm_get_block(struct drm_mm_node *node,
217 218
218 return node; 219 return node;
219} 220}
220 221EXPORT_SYMBOL(drm_mm_get_block_generic);
221EXPORT_SYMBOL(drm_mm_get_block);
222
223struct drm_mm_node *drm_mm_get_block_atomic(struct drm_mm_node *parent,
224 unsigned long size,
225 unsigned alignment)
226{
227
228 struct drm_mm_node *align_splitoff = NULL;
229 struct drm_mm_node *child;
230 unsigned tmp = 0;
231
232 if (alignment)
233 tmp = parent->start % alignment;
234
235 if (tmp) {
236 align_splitoff =
237 drm_mm_split_at_start(parent, alignment - tmp, 1);
238 if (unlikely(align_splitoff == NULL))
239 return NULL;
240 }
241
242 if (parent->size == size) {
243 list_del_init(&parent->fl_entry);
244 parent->free = 0;
245 return parent;
246 } else {
247 child = drm_mm_split_at_start(parent, size, 1);
248 }
249
250 if (align_splitoff)
251 drm_mm_put_block(align_splitoff);
252
253 return child;
254}
255EXPORT_SYMBOL(drm_mm_get_block_atomic);
256 222
257/* 223/*
258 * Put a block. Merge with the previous and / or next block if they are free. 224 * Put a block. Merge with the previous and / or next block if they are free.
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index b55d5bc6ea61..577094fb1995 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -55,17 +55,6 @@ drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t ali
55 unsigned long addr; 55 unsigned long addr;
56 size_t sz; 56 size_t sz;
57#endif 57#endif
58#ifdef DRM_DEBUG_MEMORY
59 int area = DRM_MEM_DMA;
60
61 spin_lock(&drm_mem_lock);
62 if ((drm_ram_used >> PAGE_SHIFT)
63 > (DRM_RAM_PERCENT * drm_ram_available) / 100) {
64 spin_unlock(&drm_mem_lock);
65 return 0;
66 }
67 spin_unlock(&drm_mem_lock);
68#endif
69 58
70 /* pci_alloc_consistent only guarantees alignment to the smallest 59 /* pci_alloc_consistent only guarantees alignment to the smallest
71 * PAGE_SIZE order which is greater than or equal to the requested size. 60 * PAGE_SIZE order which is greater than or equal to the requested size.
@@ -86,26 +75,10 @@ drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t ali
86 dmah->size = size; 75 dmah->size = size;
87 dmah->vaddr = dma_alloc_coherent(&dev->pdev->dev, size, &dmah->busaddr, GFP_KERNEL | __GFP_COMP); 76 dmah->vaddr = dma_alloc_coherent(&dev->pdev->dev, size, &dmah->busaddr, GFP_KERNEL | __GFP_COMP);
88 77
89#ifdef DRM_DEBUG_MEMORY
90 if (dmah->vaddr == NULL) {
91 spin_lock(&drm_mem_lock);
92 ++drm_mem_stats[area].fail_count;
93 spin_unlock(&drm_mem_lock);
94 kfree(dmah);
95 return NULL;
96 }
97
98 spin_lock(&drm_mem_lock);
99 ++drm_mem_stats[area].succeed_count;
100 drm_mem_stats[area].bytes_allocated += size;
101 drm_ram_used += size;
102 spin_unlock(&drm_mem_lock);
103#else
104 if (dmah->vaddr == NULL) { 78 if (dmah->vaddr == NULL) {
105 kfree(dmah); 79 kfree(dmah);
106 return NULL; 80 return NULL;
107 } 81 }
108#endif
109 82
110 memset(dmah->vaddr, 0, size); 83 memset(dmah->vaddr, 0, size);
111 84
@@ -132,17 +105,8 @@ void __drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
132 unsigned long addr; 105 unsigned long addr;
133 size_t sz; 106 size_t sz;
134#endif 107#endif
135#ifdef DRM_DEBUG_MEMORY
136 int area = DRM_MEM_DMA;
137 int alloc_count;
138 int free_count;
139#endif
140 108
141 if (!dmah->vaddr) { 109 if (dmah->vaddr) {
142#ifdef DRM_DEBUG_MEMORY
143 DRM_MEM_ERROR(area, "Attempt to free address 0\n");
144#endif
145 } else {
146 /* XXX - Is virt_to_page() legal for consistent mem? */ 110 /* XXX - Is virt_to_page() legal for consistent mem? */
147 /* Unreserve */ 111 /* Unreserve */
148 for (addr = (unsigned long)dmah->vaddr, sz = dmah->size; 112 for (addr = (unsigned long)dmah->vaddr, sz = dmah->size;
@@ -152,21 +116,6 @@ void __drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
152 dma_free_coherent(&dev->pdev->dev, dmah->size, dmah->vaddr, 116 dma_free_coherent(&dev->pdev->dev, dmah->size, dmah->vaddr,
153 dmah->busaddr); 117 dmah->busaddr);
154 } 118 }
155
156#ifdef DRM_DEBUG_MEMORY
157 spin_lock(&drm_mem_lock);
158 free_count = ++drm_mem_stats[area].free_count;
159 alloc_count = drm_mem_stats[area].succeed_count;
160 drm_mem_stats[area].bytes_freed += size;
161 drm_ram_used -= size;
162 spin_unlock(&drm_mem_lock);
163 if (free_count > alloc_count) {
164 DRM_MEM_ERROR(area,
165 "Excess frees: %d frees, %d allocs\n",
166 free_count, alloc_count);
167 }
168#endif
169
170} 119}
171 120
172/** 121/**
diff --git a/drivers/gpu/drm/drm_proc.c b/drivers/gpu/drm/drm_proc.c
index bae5391165ac..bbd4b3d1074a 100644
--- a/drivers/gpu/drm/drm_proc.c
+++ b/drivers/gpu/drm/drm_proc.c
@@ -105,13 +105,12 @@ int drm_proc_create_files(struct drm_info_list *files, int count,
105 (dev->driver->driver_features & features) != features) 105 (dev->driver->driver_features & features) != features)
106 continue; 106 continue;
107 107
108 tmp = drm_alloc(sizeof(struct drm_info_node), _DRM_DRIVER); 108 tmp = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL);
109 ent = create_proc_entry(files[i].name, S_IFREG | S_IRUGO, root); 109 ent = create_proc_entry(files[i].name, S_IFREG | S_IRUGO, root);
110 if (!ent) { 110 if (!ent) {
111 DRM_ERROR("Cannot create /proc/dri/%s/%s\n", 111 DRM_ERROR("Cannot create /proc/dri/%s/%s\n",
112 name, files[i].name); 112 name, files[i].name);
113 drm_free(tmp, sizeof(struct drm_info_node), 113 kfree(tmp);
114 _DRM_DRIVER);
115 ret = -1; 114 ret = -1;
116 goto fail; 115 goto fail;
117 } 116 }
@@ -192,8 +191,7 @@ int drm_proc_remove_files(struct drm_info_list *files, int count,
192 remove_proc_entry(files[i].name, 191 remove_proc_entry(files[i].name,
193 minor->proc_root); 192 minor->proc_root);
194 list_del(pos); 193 list_del(pos);
195 drm_free(tmp, sizeof(struct drm_info_node), 194 kfree(tmp);
196 _DRM_DRIVER);
197 } 195 }
198 } 196 }
199 } 197 }
diff --git a/drivers/gpu/drm/drm_scatter.c b/drivers/gpu/drm/drm_scatter.c
index b2b0f3d41714..c7823c863d4f 100644
--- a/drivers/gpu/drm/drm_scatter.c
+++ b/drivers/gpu/drm/drm_scatter.c
@@ -58,11 +58,9 @@ void drm_sg_cleanup(struct drm_sg_mem * entry)
58 58
59 vfree(entry->virtual); 59 vfree(entry->virtual);
60 60
61 drm_free(entry->busaddr, 61 kfree(entry->busaddr);
62 entry->pages * sizeof(*entry->busaddr), DRM_MEM_PAGES); 62 kfree(entry->pagelist);
63 drm_free(entry->pagelist, 63 kfree(entry);
64 entry->pages * sizeof(*entry->pagelist), DRM_MEM_PAGES);
65 drm_free(entry, sizeof(*entry), DRM_MEM_SGLISTS);
66} 64}
67 65
68#ifdef _LP64 66#ifdef _LP64
@@ -84,7 +82,7 @@ int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request)
84 if (dev->sg) 82 if (dev->sg)
85 return -EINVAL; 83 return -EINVAL;
86 84
87 entry = drm_alloc(sizeof(*entry), DRM_MEM_SGLISTS); 85 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
88 if (!entry) 86 if (!entry)
89 return -ENOMEM; 87 return -ENOMEM;
90 88
@@ -93,34 +91,27 @@ int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request)
93 DRM_DEBUG("size=%ld pages=%ld\n", request->size, pages); 91 DRM_DEBUG("size=%ld pages=%ld\n", request->size, pages);
94 92
95 entry->pages = pages; 93 entry->pages = pages;
96 entry->pagelist = drm_alloc(pages * sizeof(*entry->pagelist), 94 entry->pagelist = kmalloc(pages * sizeof(*entry->pagelist), GFP_KERNEL);
97 DRM_MEM_PAGES);
98 if (!entry->pagelist) { 95 if (!entry->pagelist) {
99 drm_free(entry, sizeof(*entry), DRM_MEM_SGLISTS); 96 kfree(entry);
100 return -ENOMEM; 97 return -ENOMEM;
101 } 98 }
102 99
103 memset(entry->pagelist, 0, pages * sizeof(*entry->pagelist)); 100 memset(entry->pagelist, 0, pages * sizeof(*entry->pagelist));
104 101
105 entry->busaddr = drm_alloc(pages * sizeof(*entry->busaddr), 102 entry->busaddr = kmalloc(pages * sizeof(*entry->busaddr), GFP_KERNEL);
106 DRM_MEM_PAGES);
107 if (!entry->busaddr) { 103 if (!entry->busaddr) {
108 drm_free(entry->pagelist, 104 kfree(entry->pagelist);
109 entry->pages * sizeof(*entry->pagelist), 105 kfree(entry);
110 DRM_MEM_PAGES);
111 drm_free(entry, sizeof(*entry), DRM_MEM_SGLISTS);
112 return -ENOMEM; 106 return -ENOMEM;
113 } 107 }
114 memset((void *)entry->busaddr, 0, pages * sizeof(*entry->busaddr)); 108 memset((void *)entry->busaddr, 0, pages * sizeof(*entry->busaddr));
115 109
116 entry->virtual = drm_vmalloc_dma(pages << PAGE_SHIFT); 110 entry->virtual = drm_vmalloc_dma(pages << PAGE_SHIFT);
117 if (!entry->virtual) { 111 if (!entry->virtual) {
118 drm_free(entry->busaddr, 112 kfree(entry->busaddr);
119 entry->pages * sizeof(*entry->busaddr), DRM_MEM_PAGES); 113 kfree(entry->pagelist);
120 drm_free(entry->pagelist, 114 kfree(entry);
121 entry->pages * sizeof(*entry->pagelist),
122 DRM_MEM_PAGES);
123 drm_free(entry, sizeof(*entry), DRM_MEM_SGLISTS);
124 return -ENOMEM; 115 return -ENOMEM;
125 } 116 }
126 117
diff --git a/drivers/gpu/drm/drm_sman.c b/drivers/gpu/drm/drm_sman.c
index 926f146390ce..463aed9403db 100644
--- a/drivers/gpu/drm/drm_sman.c
+++ b/drivers/gpu/drm/drm_sman.c
@@ -48,9 +48,7 @@ void drm_sman_takedown(struct drm_sman * sman)
48{ 48{
49 drm_ht_remove(&sman->user_hash_tab); 49 drm_ht_remove(&sman->user_hash_tab);
50 drm_ht_remove(&sman->owner_hash_tab); 50 drm_ht_remove(&sman->owner_hash_tab);
51 if (sman->mm) 51 kfree(sman->mm);
52 drm_free(sman->mm, sman->num_managers * sizeof(*sman->mm),
53 DRM_MEM_MM);
54} 52}
55 53
56EXPORT_SYMBOL(drm_sman_takedown); 54EXPORT_SYMBOL(drm_sman_takedown);
@@ -61,8 +59,9 @@ drm_sman_init(struct drm_sman * sman, unsigned int num_managers,
61{ 59{
62 int ret = 0; 60 int ret = 0;
63 61
64 sman->mm = (struct drm_sman_mm *) drm_calloc(num_managers, sizeof(*sman->mm), 62 sman->mm = (struct drm_sman_mm *) kcalloc(num_managers,
65 DRM_MEM_MM); 63 sizeof(*sman->mm),
64 GFP_KERNEL);
66 if (!sman->mm) { 65 if (!sman->mm) {
67 ret = -ENOMEM; 66 ret = -ENOMEM;
68 goto out; 67 goto out;
@@ -78,7 +77,7 @@ drm_sman_init(struct drm_sman * sman, unsigned int num_managers,
78 77
79 drm_ht_remove(&sman->owner_hash_tab); 78 drm_ht_remove(&sman->owner_hash_tab);
80out1: 79out1:
81 drm_free(sman->mm, num_managers * sizeof(*sman->mm), DRM_MEM_MM); 80 kfree(sman->mm);
82out: 81out:
83 return ret; 82 return ret;
84} 83}
@@ -110,7 +109,7 @@ static void drm_sman_mm_destroy(void *private)
110{ 109{
111 struct drm_mm *mm = (struct drm_mm *) private; 110 struct drm_mm *mm = (struct drm_mm *) private;
112 drm_mm_takedown(mm); 111 drm_mm_takedown(mm);
113 drm_free(mm, sizeof(*mm), DRM_MEM_MM); 112 kfree(mm);
114} 113}
115 114
116static unsigned long drm_sman_mm_offset(void *private, void *ref) 115static unsigned long drm_sman_mm_offset(void *private, void *ref)
@@ -130,7 +129,7 @@ drm_sman_set_range(struct drm_sman * sman, unsigned int manager,
130 BUG_ON(manager >= sman->num_managers); 129 BUG_ON(manager >= sman->num_managers);
131 130
132 sman_mm = &sman->mm[manager]; 131 sman_mm = &sman->mm[manager];
133 mm = drm_calloc(1, sizeof(*mm), DRM_MEM_MM); 132 mm = kzalloc(sizeof(*mm), GFP_KERNEL);
134 if (!mm) { 133 if (!mm) {
135 return -ENOMEM; 134 return -ENOMEM;
136 } 135 }
@@ -138,7 +137,7 @@ drm_sman_set_range(struct drm_sman * sman, unsigned int manager,
138 ret = drm_mm_init(mm, start, size); 137 ret = drm_mm_init(mm, start, size);
139 138
140 if (ret) { 139 if (ret) {
141 drm_free(mm, sizeof(*mm), DRM_MEM_MM); 140 kfree(mm);
142 return ret; 141 return ret;
143 } 142 }
144 143
@@ -176,7 +175,7 @@ static struct drm_owner_item *drm_sman_get_owner_item(struct drm_sman * sman,
176 owner_hash); 175 owner_hash);
177 } 176 }
178 177
179 owner_item = drm_calloc(1, sizeof(*owner_item), DRM_MEM_MM); 178 owner_item = kzalloc(sizeof(*owner_item), GFP_KERNEL);
180 if (!owner_item) 179 if (!owner_item)
181 goto out; 180 goto out;
182 181
@@ -189,7 +188,7 @@ static struct drm_owner_item *drm_sman_get_owner_item(struct drm_sman * sman,
189 return owner_item; 188 return owner_item;
190 189
191out1: 190out1:
192 drm_free(owner_item, sizeof(*owner_item), DRM_MEM_MM); 191 kfree(owner_item);
193out: 192out:
194 return NULL; 193 return NULL;
195} 194}
@@ -212,7 +211,7 @@ struct drm_memblock_item *drm_sman_alloc(struct drm_sman *sman, unsigned int man
212 return NULL; 211 return NULL;
213 } 212 }
214 213
215 memblock = drm_calloc(1, sizeof(*memblock), DRM_MEM_MM); 214 memblock = kzalloc(sizeof(*memblock), GFP_KERNEL);
216 215
217 if (!memblock) 216 if (!memblock)
218 goto out; 217 goto out;
@@ -237,7 +236,7 @@ struct drm_memblock_item *drm_sman_alloc(struct drm_sman *sman, unsigned int man
237out2: 236out2:
238 drm_ht_remove_item(&sman->user_hash_tab, &memblock->user_hash); 237 drm_ht_remove_item(&sman->user_hash_tab, &memblock->user_hash);
239out1: 238out1:
240 drm_free(memblock, sizeof(*memblock), DRM_MEM_MM); 239 kfree(memblock);
241out: 240out:
242 sman_mm->free(sman_mm->private, tmp); 241 sman_mm->free(sman_mm->private, tmp);
243 242
@@ -253,7 +252,7 @@ static void drm_sman_free(struct drm_memblock_item *item)
253 list_del(&item->owner_list); 252 list_del(&item->owner_list);
254 drm_ht_remove_item(&sman->user_hash_tab, &item->user_hash); 253 drm_ht_remove_item(&sman->user_hash_tab, &item->user_hash);
255 item->mm->free(item->mm->private, item->mm_info); 254 item->mm->free(item->mm->private, item->mm_info);
256 drm_free(item, sizeof(*item), DRM_MEM_MM); 255 kfree(item);
257} 256}
258 257
259int drm_sman_free_key(struct drm_sman *sman, unsigned int key) 258int drm_sman_free_key(struct drm_sman *sman, unsigned int key)
@@ -277,7 +276,7 @@ static void drm_sman_remove_owner(struct drm_sman *sman,
277{ 276{
278 list_del(&owner_item->sman_list); 277 list_del(&owner_item->sman_list);
279 drm_ht_remove_item(&sman->owner_hash_tab, &owner_item->owner_hash); 278 drm_ht_remove_item(&sman->owner_hash_tab, &owner_item->owner_hash);
280 drm_free(owner_item, sizeof(*owner_item), DRM_MEM_MM); 279 kfree(owner_item);
281} 280}
282 281
283int drm_sman_owner_clean(struct drm_sman *sman, unsigned long owner) 282int drm_sman_owner_clean(struct drm_sman *sman, unsigned long owner)
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index 387a8de1bc7e..155a5bbce680 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -107,7 +107,7 @@ struct drm_master *drm_master_create(struct drm_minor *minor)
107{ 107{
108 struct drm_master *master; 108 struct drm_master *master;
109 109
110 master = drm_calloc(1, sizeof(*master), DRM_MEM_DRIVER); 110 master = kzalloc(sizeof(*master), GFP_KERNEL);
111 if (!master) 111 if (!master)
112 return NULL; 112 return NULL;
113 113
@@ -149,7 +149,7 @@ static void drm_master_destroy(struct kref *kref)
149 } 149 }
150 150
151 if (master->unique) { 151 if (master->unique) {
152 drm_free(master->unique, master->unique_size, DRM_MEM_DRIVER); 152 kfree(master->unique);
153 master->unique = NULL; 153 master->unique = NULL;
154 master->unique_len = 0; 154 master->unique_len = 0;
155 } 155 }
@@ -157,12 +157,12 @@ static void drm_master_destroy(struct kref *kref)
157 list_for_each_entry_safe(pt, next, &master->magicfree, head) { 157 list_for_each_entry_safe(pt, next, &master->magicfree, head) {
158 list_del(&pt->head); 158 list_del(&pt->head);
159 drm_ht_remove_item(&master->magiclist, &pt->hash_item); 159 drm_ht_remove_item(&master->magiclist, &pt->hash_item);
160 drm_free(pt, sizeof(*pt), DRM_MEM_MAGIC); 160 kfree(pt);
161 } 161 }
162 162
163 drm_ht_remove(&master->magiclist); 163 drm_ht_remove(&master->magiclist);
164 164
165 drm_free(master, sizeof(*master), DRM_MEM_DRIVER); 165 kfree(master);
166} 166}
167 167
168void drm_master_put(struct drm_master **master) 168void drm_master_put(struct drm_master **master)
@@ -390,7 +390,7 @@ int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
390 390
391 DRM_DEBUG("\n"); 391 DRM_DEBUG("\n");
392 392
393 dev = drm_calloc(1, sizeof(*dev), DRM_MEM_STUB); 393 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
394 if (!dev) 394 if (!dev)
395 return -ENOMEM; 395 return -ENOMEM;
396 396
@@ -443,7 +443,7 @@ err_g3:
443err_g2: 443err_g2:
444 pci_disable_device(pdev); 444 pci_disable_device(pdev);
445err_g1: 445err_g1:
446 drm_free(dev, sizeof(*dev), DRM_MEM_STUB); 446 kfree(dev);
447 return ret; 447 return ret;
448} 448}
449EXPORT_SYMBOL(drm_get_dev); 449EXPORT_SYMBOL(drm_get_dev);
@@ -516,7 +516,7 @@ void drm_put_dev(struct drm_device *dev)
516 dev->driver->unload(dev); 516 dev->driver->unload(dev);
517 517
518 if (drm_core_has_AGP(dev) && dev->agp) { 518 if (drm_core_has_AGP(dev) && dev->agp) {
519 drm_free(dev->agp, sizeof(*dev->agp), DRM_MEM_AGPLISTS); 519 kfree(dev->agp);
520 dev->agp = NULL; 520 dev->agp = NULL;
521 } 521 }
522 522
@@ -535,10 +535,9 @@ void drm_put_dev(struct drm_device *dev)
535 drm_put_minor(&dev->primary); 535 drm_put_minor(&dev->primary);
536 536
537 if (dev->devname) { 537 if (dev->devname) {
538 drm_free(dev->devname, strlen(dev->devname) + 1, 538 kfree(dev->devname);
539 DRM_MEM_DRIVER);
540 dev->devname = NULL; 539 dev->devname = NULL;
541 } 540 }
542 drm_free(dev, sizeof(*dev), DRM_MEM_STUB); 541 kfree(dev);
543} 542}
544EXPORT_SYMBOL(drm_put_dev); 543EXPORT_SYMBOL(drm_put_dev);
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index 22f76567ac7d..7e1fbe5d4779 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -144,14 +144,14 @@ static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
144 * Get the page, inc the use count, and return it 144 * Get the page, inc the use count, and return it
145 */ 145 */
146 offset = (baddr - agpmem->bound) >> PAGE_SHIFT; 146 offset = (baddr - agpmem->bound) >> PAGE_SHIFT;
147 page = virt_to_page(__va(agpmem->memory->memory[offset])); 147 page = agpmem->memory->pages[offset];
148 get_page(page); 148 get_page(page);
149 vmf->page = page; 149 vmf->page = page;
150 150
151 DRM_DEBUG 151 DRM_DEBUG
152 ("baddr = 0x%llx page = 0x%p, offset = 0x%llx, count=%d\n", 152 ("baddr = 0x%llx page = 0x%p, offset = 0x%llx, count=%d\n",
153 (unsigned long long)baddr, 153 (unsigned long long)baddr,
154 __va(agpmem->memory->memory[offset]), 154 agpmem->memory->pages[offset],
155 (unsigned long long)offset, 155 (unsigned long long)offset,
156 page_count(page)); 156 page_count(page));
157 return 0; 157 return 0;
@@ -227,7 +227,7 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
227 found_maps++; 227 found_maps++;
228 if (pt->vma == vma) { 228 if (pt->vma == vma) {
229 list_del(&pt->head); 229 list_del(&pt->head);
230 drm_free(pt, sizeof(*pt), DRM_MEM_VMAS); 230 kfree(pt);
231 } 231 }
232 } 232 }
233 233
@@ -273,7 +273,7 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
273 DRM_ERROR("tried to rmmap GEM object\n"); 273 DRM_ERROR("tried to rmmap GEM object\n");
274 break; 274 break;
275 } 275 }
276 drm_free(map, sizeof(*map), DRM_MEM_MAPS); 276 kfree(map);
277 } 277 }
278 } 278 }
279 mutex_unlock(&dev->struct_mutex); 279 mutex_unlock(&dev->struct_mutex);
@@ -414,7 +414,7 @@ void drm_vm_open_locked(struct vm_area_struct *vma)
414 vma->vm_start, vma->vm_end - vma->vm_start); 414 vma->vm_start, vma->vm_end - vma->vm_start);
415 atomic_inc(&dev->vma_count); 415 atomic_inc(&dev->vma_count);
416 416
417 vma_entry = drm_alloc(sizeof(*vma_entry), DRM_MEM_VMAS); 417 vma_entry = kmalloc(sizeof(*vma_entry), GFP_KERNEL);
418 if (vma_entry) { 418 if (vma_entry) {
419 vma_entry->vma = vma; 419 vma_entry->vma = vma;
420 vma_entry->pid = current->pid; 420 vma_entry->pid = current->pid;
@@ -454,7 +454,7 @@ static void drm_vm_close(struct vm_area_struct *vma)
454 list_for_each_entry_safe(pt, temp, &dev->vmalist, head) { 454 list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
455 if (pt->vma == vma) { 455 if (pt->vma == vma) {
456 list_del(&pt->head); 456 list_del(&pt->head);
457 drm_free(pt, sizeof(*pt), DRM_MEM_VMAS); 457 kfree(pt);
458 break; 458 break;
459 } 459 }
460 } 460 }
diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
index e5de8ea41544..7d1d88cdf2dc 100644
--- a/drivers/gpu/drm/i810/i810_dma.c
+++ b/drivers/gpu/drm/i810/i810_dma.c
@@ -227,8 +227,7 @@ static int i810_dma_cleanup(struct drm_device * dev)
227 /* Need to rewrite hardware status page */ 227 /* Need to rewrite hardware status page */
228 I810_WRITE(0x02080, 0x1ffff000); 228 I810_WRITE(0x02080, 0x1ffff000);
229 } 229 }
230 drm_free(dev->dev_private, sizeof(drm_i810_private_t), 230 kfree(dev->dev_private);
231 DRM_MEM_DRIVER);
232 dev->dev_private = NULL; 231 dev->dev_private = NULL;
233 232
234 for (i = 0; i < dma->buf_count; i++) { 233 for (i = 0; i < dma->buf_count; i++) {
@@ -439,8 +438,7 @@ static int i810_dma_init(struct drm_device *dev, void *data,
439 switch (init->func) { 438 switch (init->func) {
440 case I810_INIT_DMA_1_4: 439 case I810_INIT_DMA_1_4:
441 DRM_INFO("Using v1.4 init.\n"); 440 DRM_INFO("Using v1.4 init.\n");
442 dev_priv = drm_alloc(sizeof(drm_i810_private_t), 441 dev_priv = kmalloc(sizeof(drm_i810_private_t), GFP_KERNEL);
443 DRM_MEM_DRIVER);
444 if (dev_priv == NULL) 442 if (dev_priv == NULL)
445 return -ENOMEM; 443 return -ENOMEM;
446 retcode = i810_dma_initialize(dev, dev_priv, init); 444 retcode = i810_dma_initialize(dev, dev_priv, init);
diff --git a/drivers/gpu/drm/i830/i830_dma.c b/drivers/gpu/drm/i830/i830_dma.c
index a86ab30b4620..877bf6cb14a4 100644
--- a/drivers/gpu/drm/i830/i830_dma.c
+++ b/drivers/gpu/drm/i830/i830_dma.c
@@ -232,8 +232,7 @@ static int i830_dma_cleanup(struct drm_device * dev)
232 I830_WRITE(0x02080, 0x1ffff000); 232 I830_WRITE(0x02080, 0x1ffff000);
233 } 233 }
234 234
235 drm_free(dev->dev_private, sizeof(drm_i830_private_t), 235 kfree(dev->dev_private);
236 DRM_MEM_DRIVER);
237 dev->dev_private = NULL; 236 dev->dev_private = NULL;
238 237
239 for (i = 0; i < dma->buf_count; i++) { 238 for (i = 0; i < dma->buf_count; i++) {
@@ -459,8 +458,7 @@ static int i830_dma_init(struct drm_device *dev, void *data,
459 458
460 switch (init->func) { 459 switch (init->func) {
461 case I830_INIT_DMA: 460 case I830_INIT_DMA:
462 dev_priv = drm_alloc(sizeof(drm_i830_private_t), 461 dev_priv = kmalloc(sizeof(drm_i830_private_t), GFP_KERNEL);
463 DRM_MEM_DRIVER);
464 if (dev_priv == NULL) 462 if (dev_priv == NULL)
465 return -ENOMEM; 463 return -ENOMEM;
466 retcode = i830_dma_initialize(dev, dev_priv, init); 464 retcode = i830_dma_initialize(dev, dev_priv, init);
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 1a60626f6803..f112c769d533 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -643,9 +643,9 @@ static int i915_batchbuffer(struct drm_device *dev, void *data,
643 return -EINVAL; 643 return -EINVAL;
644 644
645 if (batch->num_cliprects) { 645 if (batch->num_cliprects) {
646 cliprects = drm_calloc(batch->num_cliprects, 646 cliprects = kcalloc(batch->num_cliprects,
647 sizeof(struct drm_clip_rect), 647 sizeof(struct drm_clip_rect),
648 DRM_MEM_DRIVER); 648 GFP_KERNEL);
649 if (cliprects == NULL) 649 if (cliprects == NULL)
650 return -ENOMEM; 650 return -ENOMEM;
651 651
@@ -664,9 +664,7 @@ static int i915_batchbuffer(struct drm_device *dev, void *data,
664 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); 664 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
665 665
666fail_free: 666fail_free:
667 drm_free(cliprects, 667 kfree(cliprects);
668 batch->num_cliprects * sizeof(struct drm_clip_rect),
669 DRM_MEM_DRIVER);
670 668
671 return ret; 669 return ret;
672} 670}
@@ -692,7 +690,7 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
692 if (cmdbuf->num_cliprects < 0) 690 if (cmdbuf->num_cliprects < 0)
693 return -EINVAL; 691 return -EINVAL;
694 692
695 batch_data = drm_alloc(cmdbuf->sz, DRM_MEM_DRIVER); 693 batch_data = kmalloc(cmdbuf->sz, GFP_KERNEL);
696 if (batch_data == NULL) 694 if (batch_data == NULL)
697 return -ENOMEM; 695 return -ENOMEM;
698 696
@@ -701,9 +699,8 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
701 goto fail_batch_free; 699 goto fail_batch_free;
702 700
703 if (cmdbuf->num_cliprects) { 701 if (cmdbuf->num_cliprects) {
704 cliprects = drm_calloc(cmdbuf->num_cliprects, 702 cliprects = kcalloc(cmdbuf->num_cliprects,
705 sizeof(struct drm_clip_rect), 703 sizeof(struct drm_clip_rect), GFP_KERNEL);
706 DRM_MEM_DRIVER);
707 if (cliprects == NULL) 704 if (cliprects == NULL)
708 goto fail_batch_free; 705 goto fail_batch_free;
709 706
@@ -726,11 +723,9 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
726 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); 723 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
727 724
728fail_clip_free: 725fail_clip_free:
729 drm_free(cliprects, 726 kfree(cliprects);
730 cmdbuf->num_cliprects * sizeof(struct drm_clip_rect),
731 DRM_MEM_DRIVER);
732fail_batch_free: 727fail_batch_free:
733 drm_free(batch_data, cmdbuf->sz, DRM_MEM_DRIVER); 728 kfree(batch_data);
734 729
735 return ret; 730 return ret;
736} 731}
@@ -1067,7 +1062,7 @@ int i915_master_create(struct drm_device *dev, struct drm_master *master)
1067{ 1062{
1068 struct drm_i915_master_private *master_priv; 1063 struct drm_i915_master_private *master_priv;
1069 1064
1070 master_priv = drm_calloc(1, sizeof(*master_priv), DRM_MEM_DRIVER); 1065 master_priv = kzalloc(sizeof(*master_priv), GFP_KERNEL);
1071 if (!master_priv) 1066 if (!master_priv)
1072 return -ENOMEM; 1067 return -ENOMEM;
1073 1068
@@ -1082,7 +1077,7 @@ void i915_master_destroy(struct drm_device *dev, struct drm_master *master)
1082 if (!master_priv) 1077 if (!master_priv)
1083 return; 1078 return;
1084 1079
1085 drm_free(master_priv, sizeof(*master_priv), DRM_MEM_DRIVER); 1080 kfree(master_priv);
1086 1081
1087 master->driver_priv = NULL; 1082 master->driver_priv = NULL;
1088} 1083}
@@ -1111,12 +1106,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1111 dev->types[8] = _DRM_STAT_SECONDARY; 1106 dev->types[8] = _DRM_STAT_SECONDARY;
1112 dev->types[9] = _DRM_STAT_DMA; 1107 dev->types[9] = _DRM_STAT_DMA;
1113 1108
1114 dev_priv = drm_alloc(sizeof(drm_i915_private_t), DRM_MEM_DRIVER); 1109 dev_priv = kzalloc(sizeof(drm_i915_private_t), GFP_KERNEL);
1115 if (dev_priv == NULL) 1110 if (dev_priv == NULL)
1116 return -ENOMEM; 1111 return -ENOMEM;
1117 1112
1118 memset(dev_priv, 0, sizeof(drm_i915_private_t));
1119
1120 dev->dev_private = (void *)dev_priv; 1113 dev->dev_private = (void *)dev_priv;
1121 dev_priv->dev = dev; 1114 dev_priv->dev = dev;
1122 1115
@@ -1153,13 +1146,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1153 "performance may suffer.\n"); 1146 "performance may suffer.\n");
1154 } 1147 }
1155 1148
1156#ifdef CONFIG_HIGHMEM64G
1157 /* don't enable GEM on PAE - needs agp + set_memory_* interface fixes */
1158 dev_priv->has_gem = 0;
1159#else
1160 /* enable GEM by default */ 1149 /* enable GEM by default */
1161 dev_priv->has_gem = 1; 1150 dev_priv->has_gem = 1;
1162#endif
1163 1151
1164 dev->driver->get_vblank_counter = i915_get_vblank_counter; 1152 dev->driver->get_vblank_counter = i915_get_vblank_counter;
1165 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 1153 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
@@ -1221,7 +1209,7 @@ out_iomapfree:
1221out_rmmap: 1209out_rmmap:
1222 iounmap(dev_priv->regs); 1210 iounmap(dev_priv->regs);
1223free_priv: 1211free_priv:
1224 drm_free(dev_priv, sizeof(struct drm_i915_private), DRM_MEM_DRIVER); 1212 kfree(dev_priv);
1225 return ret; 1213 return ret;
1226} 1214}
1227 1215
@@ -1261,8 +1249,7 @@ int i915_driver_unload(struct drm_device *dev)
1261 i915_gem_lastclose(dev); 1249 i915_gem_lastclose(dev);
1262 } 1250 }
1263 1251
1264 drm_free(dev->dev_private, sizeof(drm_i915_private_t), 1252 kfree(dev->dev_private);
1265 DRM_MEM_DRIVER);
1266 1253
1267 return 0; 1254 return 0;
1268} 1255}
@@ -1273,7 +1260,7 @@ int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv)
1273 1260
1274 DRM_DEBUG_DRIVER(I915_DRV, "\n"); 1261 DRM_DEBUG_DRIVER(I915_DRV, "\n");
1275 i915_file_priv = (struct drm_i915_file_private *) 1262 i915_file_priv = (struct drm_i915_file_private *)
1276 drm_alloc(sizeof(*i915_file_priv), DRM_MEM_FILES); 1263 kmalloc(sizeof(*i915_file_priv), GFP_KERNEL);
1277 1264
1278 if (!i915_file_priv) 1265 if (!i915_file_priv)
1279 return -ENOMEM; 1266 return -ENOMEM;
@@ -1326,7 +1313,7 @@ void i915_driver_postclose(struct drm_device *dev, struct drm_file *file_priv)
1326{ 1313{
1327 struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv; 1314 struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
1328 1315
1329 drm_free(i915_file_priv, sizeof(*i915_file_priv), DRM_MEM_FILES); 1316 kfree(i915_file_priv);
1330} 1317}
1331 1318
1332struct drm_ioctl_desc i915_ioctls[] = { 1319struct drm_ioctl_desc i915_ioctls[] = {
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 8ef6bcec211b..7a84f04e8439 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -646,6 +646,8 @@ void i915_gem_object_unpin(struct drm_gem_object *obj);
646int i915_gem_object_unbind(struct drm_gem_object *obj); 646int i915_gem_object_unbind(struct drm_gem_object *obj);
647void i915_gem_lastclose(struct drm_device *dev); 647void i915_gem_lastclose(struct drm_device *dev);
648uint32_t i915_get_gem_seqno(struct drm_device *dev); 648uint32_t i915_get_gem_seqno(struct drm_device *dev);
649int i915_gem_object_get_fence_reg(struct drm_gem_object *obj);
650int i915_gem_object_put_fence_reg(struct drm_gem_object *obj);
649void i915_gem_retire_requests(struct drm_device *dev); 651void i915_gem_retire_requests(struct drm_device *dev);
650void i915_gem_retire_work_handler(struct work_struct *work); 652void i915_gem_retire_work_handler(struct work_struct *work);
651void i915_gem_clflush_object(struct drm_gem_object *obj); 653void i915_gem_clflush_object(struct drm_gem_object *obj);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index c0ae6bbbd9b5..fd2b8bdffe3f 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -46,7 +46,6 @@ static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *o
46static int i915_gem_object_wait_rendering(struct drm_gem_object *obj); 46static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
47static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, 47static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
48 unsigned alignment); 48 unsigned alignment);
49static int i915_gem_object_get_fence_reg(struct drm_gem_object *obj, bool write);
50static void i915_gem_clear_fence_reg(struct drm_gem_object *obj); 49static void i915_gem_clear_fence_reg(struct drm_gem_object *obj);
51static int i915_gem_evict_something(struct drm_device *dev); 50static int i915_gem_evict_something(struct drm_device *dev);
52static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj, 51static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
@@ -1158,7 +1157,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1158 /* Need a new fence register? */ 1157 /* Need a new fence register? */
1159 if (obj_priv->fence_reg == I915_FENCE_REG_NONE && 1158 if (obj_priv->fence_reg == I915_FENCE_REG_NONE &&
1160 obj_priv->tiling_mode != I915_TILING_NONE) { 1159 obj_priv->tiling_mode != I915_TILING_NONE) {
1161 ret = i915_gem_object_get_fence_reg(obj, write); 1160 ret = i915_gem_object_get_fence_reg(obj);
1162 if (ret) { 1161 if (ret) {
1163 mutex_unlock(&dev->struct_mutex); 1162 mutex_unlock(&dev->struct_mutex);
1164 return VM_FAULT_SIGBUS; 1163 return VM_FAULT_SIGBUS;
@@ -1208,8 +1207,7 @@ i915_gem_create_mmap_offset(struct drm_gem_object *obj)
1208 1207
1209 /* Set the object up for mmap'ing */ 1208 /* Set the object up for mmap'ing */
1210 list = &obj->map_list; 1209 list = &obj->map_list;
1211 list->map = drm_calloc(1, sizeof(struct drm_map_list), 1210 list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
1212 DRM_MEM_DRIVER);
1213 if (!list->map) 1211 if (!list->map)
1214 return -ENOMEM; 1212 return -ENOMEM;
1215 1213
@@ -1249,7 +1247,7 @@ i915_gem_create_mmap_offset(struct drm_gem_object *obj)
1249out_free_mm: 1247out_free_mm:
1250 drm_mm_put_block(list->file_offset_node); 1248 drm_mm_put_block(list->file_offset_node);
1251out_free_list: 1249out_free_list:
1252 drm_free(list->map, sizeof(struct drm_map_list), DRM_MEM_DRIVER); 1250 kfree(list->map);
1253 1251
1254 return ret; 1252 return ret;
1255} 1253}
@@ -1271,7 +1269,7 @@ i915_gem_free_mmap_offset(struct drm_gem_object *obj)
1271 } 1269 }
1272 1270
1273 if (list->map) { 1271 if (list->map) {
1274 drm_free(list->map, sizeof(struct drm_map), DRM_MEM_DRIVER); 1272 kfree(list->map);
1275 list->map = NULL; 1273 list->map = NULL;
1276 } 1274 }
1277 1275
@@ -1494,7 +1492,7 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
1494 if (file_priv != NULL) 1492 if (file_priv != NULL)
1495 i915_file_priv = file_priv->driver_priv; 1493 i915_file_priv = file_priv->driver_priv;
1496 1494
1497 request = drm_calloc(1, sizeof(*request), DRM_MEM_DRIVER); 1495 request = kzalloc(sizeof(*request), GFP_KERNEL);
1498 if (request == NULL) 1496 if (request == NULL)
1499 return 0; 1497 return 0;
1500 1498
@@ -1676,7 +1674,7 @@ i915_gem_retire_requests(struct drm_device *dev)
1676 1674
1677 list_del(&request->list); 1675 list_del(&request->list);
1678 list_del(&request->client_list); 1676 list_del(&request->client_list);
1679 drm_free(request, sizeof(*request), DRM_MEM_DRIVER); 1677 kfree(request);
1680 } else 1678 } else
1681 break; 1679 break;
1682 } 1680 }
@@ -2163,13 +2161,11 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
2163 val |= I830_FENCE_REG_VALID; 2161 val |= I830_FENCE_REG_VALID;
2164 2162
2165 I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val); 2163 I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val);
2166
2167} 2164}
2168 2165
2169/** 2166/**
2170 * i915_gem_object_get_fence_reg - set up a fence reg for an object 2167 * i915_gem_object_get_fence_reg - set up a fence reg for an object
2171 * @obj: object to map through a fence reg 2168 * @obj: object to map through a fence reg
2172 * @write: object is about to be written
2173 * 2169 *
2174 * When mapping objects through the GTT, userspace wants to be able to write 2170 * When mapping objects through the GTT, userspace wants to be able to write
2175 * to them without having to worry about swizzling if the object is tiled. 2171 * to them without having to worry about swizzling if the object is tiled.
@@ -2180,8 +2176,8 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
2180 * It then sets up the reg based on the object's properties: address, pitch 2176 * It then sets up the reg based on the object's properties: address, pitch
2181 * and tiling format. 2177 * and tiling format.
2182 */ 2178 */
2183static int 2179int
2184i915_gem_object_get_fence_reg(struct drm_gem_object *obj, bool write) 2180i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
2185{ 2181{
2186 struct drm_device *dev = obj->dev; 2182 struct drm_device *dev = obj->dev;
2187 struct drm_i915_private *dev_priv = dev->dev_private; 2183 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2331,6 +2327,42 @@ i915_gem_clear_fence_reg(struct drm_gem_object *obj)
2331} 2327}
2332 2328
2333/** 2329/**
2330 * i915_gem_object_put_fence_reg - waits on outstanding fenced access
2331 * to the buffer to finish, and then resets the fence register.
2332 * @obj: tiled object holding a fence register.
2333 *
2334 * Zeroes out the fence register itself and clears out the associated
2335 * data structures in dev_priv and obj_priv.
2336 */
2337int
2338i915_gem_object_put_fence_reg(struct drm_gem_object *obj)
2339{
2340 struct drm_device *dev = obj->dev;
2341 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2342
2343 if (obj_priv->fence_reg == I915_FENCE_REG_NONE)
2344 return 0;
2345
2346 /* On the i915, GPU access to tiled buffers is via a fence,
2347 * therefore we must wait for any outstanding access to complete
2348 * before clearing the fence.
2349 */
2350 if (!IS_I965G(dev)) {
2351 int ret;
2352
2353 i915_gem_object_flush_gpu_write_domain(obj);
2354 i915_gem_object_flush_gtt_write_domain(obj);
2355 ret = i915_gem_object_wait_rendering(obj);
2356 if (ret != 0)
2357 return ret;
2358 }
2359
2360 i915_gem_clear_fence_reg (obj);
2361
2362 return 0;
2363}
2364
2365/**
2334 * Finds free space in the GTT aperture and binds the object there. 2366 * Finds free space in the GTT aperture and binds the object there.
2335 */ 2367 */
2336static int 2368static int
@@ -2800,8 +2832,7 @@ i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj)
2800 /* Free the page_cpu_valid mappings which are now stale, whether 2832 /* Free the page_cpu_valid mappings which are now stale, whether
2801 * or not we've got I915_GEM_DOMAIN_CPU. 2833 * or not we've got I915_GEM_DOMAIN_CPU.
2802 */ 2834 */
2803 drm_free(obj_priv->page_cpu_valid, obj->size / PAGE_SIZE, 2835 kfree(obj_priv->page_cpu_valid);
2804 DRM_MEM_DRIVER);
2805 obj_priv->page_cpu_valid = NULL; 2836 obj_priv->page_cpu_valid = NULL;
2806} 2837}
2807 2838
@@ -2843,8 +2874,8 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
2843 * newly adding I915_GEM_DOMAIN_CPU 2874 * newly adding I915_GEM_DOMAIN_CPU
2844 */ 2875 */
2845 if (obj_priv->page_cpu_valid == NULL) { 2876 if (obj_priv->page_cpu_valid == NULL) {
2846 obj_priv->page_cpu_valid = drm_calloc(1, obj->size / PAGE_SIZE, 2877 obj_priv->page_cpu_valid = kzalloc(obj->size / PAGE_SIZE,
2847 DRM_MEM_DRIVER); 2878 GFP_KERNEL);
2848 if (obj_priv->page_cpu_valid == NULL) 2879 if (obj_priv->page_cpu_valid == NULL)
2849 return -ENOMEM; 2880 return -ENOMEM;
2850 } else if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) 2881 } else if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0)
@@ -3267,8 +3298,8 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
3267 } 3298 }
3268 3299
3269 if (args->num_cliprects != 0) { 3300 if (args->num_cliprects != 0) {
3270 cliprects = drm_calloc(args->num_cliprects, sizeof(*cliprects), 3301 cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects),
3271 DRM_MEM_DRIVER); 3302 GFP_KERNEL);
3272 if (cliprects == NULL) 3303 if (cliprects == NULL)
3273 goto pre_mutex_err; 3304 goto pre_mutex_err;
3274 3305
@@ -3521,8 +3552,7 @@ err:
3521pre_mutex_err: 3552pre_mutex_err:
3522 drm_free_large(object_list); 3553 drm_free_large(object_list);
3523 drm_free_large(exec_list); 3554 drm_free_large(exec_list);
3524 drm_free(cliprects, sizeof(*cliprects) * args->num_cliprects, 3555 kfree(cliprects);
3525 DRM_MEM_DRIVER);
3526 3556
3527 return ret; 3557 return ret;
3528} 3558}
@@ -3550,7 +3580,7 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
3550 if (!IS_I965G(dev) && 3580 if (!IS_I965G(dev) &&
3551 obj_priv->fence_reg == I915_FENCE_REG_NONE && 3581 obj_priv->fence_reg == I915_FENCE_REG_NONE &&
3552 obj_priv->tiling_mode != I915_TILING_NONE) { 3582 obj_priv->tiling_mode != I915_TILING_NONE) {
3553 ret = i915_gem_object_get_fence_reg(obj, true); 3583 ret = i915_gem_object_get_fence_reg(obj);
3554 if (ret != 0) { 3584 if (ret != 0) {
3555 if (ret != -EBUSY && ret != -ERESTARTSYS) 3585 if (ret != -EBUSY && ret != -ERESTARTSYS)
3556 DRM_ERROR("Failure to install fence: %d\n", 3586 DRM_ERROR("Failure to install fence: %d\n",
@@ -3739,7 +3769,7 @@ int i915_gem_init_object(struct drm_gem_object *obj)
3739{ 3769{
3740 struct drm_i915_gem_object *obj_priv; 3770 struct drm_i915_gem_object *obj_priv;
3741 3771
3742 obj_priv = drm_calloc(1, sizeof(*obj_priv), DRM_MEM_DRIVER); 3772 obj_priv = kzalloc(sizeof(*obj_priv), GFP_KERNEL);
3743 if (obj_priv == NULL) 3773 if (obj_priv == NULL)
3744 return -ENOMEM; 3774 return -ENOMEM;
3745 3775
@@ -3777,9 +3807,9 @@ void i915_gem_free_object(struct drm_gem_object *obj)
3777 3807
3778 i915_gem_free_mmap_offset(obj); 3808 i915_gem_free_mmap_offset(obj);
3779 3809
3780 drm_free(obj_priv->page_cpu_valid, 1, DRM_MEM_DRIVER); 3810 kfree(obj_priv->page_cpu_valid);
3781 kfree(obj_priv->bit_17); 3811 kfree(obj_priv->bit_17);
3782 drm_free(obj->driver_private, 1, DRM_MEM_DRIVER); 3812 kfree(obj->driver_private);
3783} 3813}
3784 3814
3785/** Unbinds all objects that are on the given buffer list. */ 3815/** Unbinds all objects that are on the given buffer list. */
@@ -4233,7 +4263,7 @@ int i915_gem_init_phys_object(struct drm_device *dev,
4233 if (dev_priv->mm.phys_objs[id - 1] || !size) 4263 if (dev_priv->mm.phys_objs[id - 1] || !size)
4234 return 0; 4264 return 0;
4235 4265
4236 phys_obj = drm_calloc(1, sizeof(struct drm_i915_gem_phys_object), DRM_MEM_DRIVER); 4266 phys_obj = kzalloc(sizeof(struct drm_i915_gem_phys_object), GFP_KERNEL);
4237 if (!phys_obj) 4267 if (!phys_obj)
4238 return -ENOMEM; 4268 return -ENOMEM;
4239 4269
@@ -4252,7 +4282,7 @@ int i915_gem_init_phys_object(struct drm_device *dev,
4252 4282
4253 return 0; 4283 return 0;
4254kfree_obj: 4284kfree_obj:
4255 drm_free(phys_obj, sizeof(struct drm_i915_gem_phys_object), DRM_MEM_DRIVER); 4285 kfree(phys_obj);
4256 return ret; 4286 return ret;
4257} 4287}
4258 4288
@@ -4312,6 +4342,8 @@ void i915_gem_detach_phys_object(struct drm_device *dev,
4312 } 4342 }
4313 drm_clflush_pages(obj_priv->pages, page_count); 4343 drm_clflush_pages(obj_priv->pages, page_count);
4314 drm_agp_chipset_flush(dev); 4344 drm_agp_chipset_flush(dev);
4345
4346 i915_gem_object_put_pages(obj);
4315out: 4347out:
4316 obj_priv->phys_obj->cur_obj = NULL; 4348 obj_priv->phys_obj->cur_obj = NULL;
4317 obj_priv->phys_obj = NULL; 4349 obj_priv->phys_obj = NULL;
@@ -4369,6 +4401,8 @@ i915_gem_attach_phys_object(struct drm_device *dev,
4369 kunmap_atomic(src, KM_USER0); 4401 kunmap_atomic(src, KM_USER0);
4370 } 4402 }
4371 4403
4404 i915_gem_object_put_pages(obj);
4405
4372 return 0; 4406 return 0;
4373out: 4407out:
4374 return ret; 4408 return ret;
diff --git a/drivers/gpu/drm/i915/i915_gem_debugfs.c b/drivers/gpu/drm/i915/i915_gem_debugfs.c
index 986f1082c596..28146e405e87 100644
--- a/drivers/gpu/drm/i915/i915_gem_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_gem_debugfs.c
@@ -104,7 +104,7 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
104 if (obj->name) 104 if (obj->name)
105 seq_printf(m, " (name: %d)", obj->name); 105 seq_printf(m, " (name: %d)", obj->name);
106 if (obj_priv->fence_reg != I915_FENCE_REG_NONE) 106 if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
107 seq_printf(m, " (fence: %d\n", obj_priv->fence_reg); 107 seq_printf(m, " (fence: %d)\n", obj_priv->fence_reg);
108 seq_printf(m, "\n"); 108 seq_printf(m, "\n");
109 } 109 }
110 110
@@ -318,7 +318,7 @@ static int i915_ringbuffer_info(struct seq_file *m, void *data)
318 seq_printf(m, "RingTail : %08x\n", tail); 318 seq_printf(m, "RingTail : %08x\n", tail);
319 seq_printf(m, "RingMask : %08x\n", mask); 319 seq_printf(m, "RingMask : %08x\n", mask);
320 seq_printf(m, "RingSize : %08lx\n", dev_priv->ring.Size); 320 seq_printf(m, "RingSize : %08lx\n", dev_priv->ring.Size);
321 seq_printf(m, "Acthd : %08x\n", I915_READ(IS_I965G(dev) ? ACTHD_I965 : ACTHD)); 321 seq_printf(m, "Acthd : %08x\n", I915_READ(IS_I965G(dev) ? ACTHD_I965 : ACTHD));
322 322
323 return 0; 323 return 0;
324} 324}
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 9a05cadaa4ad..5c1ceec49f5b 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -408,7 +408,7 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
408 if (stride & (stride - 1)) 408 if (stride & (stride - 1))
409 return false; 409 return false;
410 410
411 /* We don't handle the aperture area covered by the fence being bigger 411 /* We don't 0handle the aperture area covered by the fence being bigger
412 * than the object size. 412 * than the object size.
413 */ 413 */
414 if (i915_get_fence_size(dev, size) != size) 414 if (i915_get_fence_size(dev, size) != size)
@@ -417,6 +417,33 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
417 return true; 417 return true;
418} 418}
419 419
420static bool
421i915_gem_object_fence_offset_ok(struct drm_gem_object *obj, int tiling_mode)
422{
423 struct drm_device *dev = obj->dev;
424 struct drm_i915_gem_object *obj_priv = obj->driver_private;
425
426 if (obj_priv->gtt_space == NULL)
427 return true;
428
429 if (tiling_mode == I915_TILING_NONE)
430 return true;
431
432 if (!IS_I965G(dev)) {
433 if (obj_priv->gtt_offset & (obj->size - 1))
434 return false;
435 if (IS_I9XX(dev)) {
436 if (obj_priv->gtt_offset & ~I915_FENCE_START_MASK)
437 return false;
438 } else {
439 if (obj_priv->gtt_offset & ~I830_FENCE_START_MASK)
440 return false;
441 }
442 }
443
444 return true;
445}
446
420/** 447/**
421 * Sets the tiling mode of an object, returning the required swizzling of 448 * Sets the tiling mode of an object, returning the required swizzling of
422 * bit 6 of addresses in the object. 449 * bit 6 of addresses in the object.
@@ -429,6 +456,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
429 drm_i915_private_t *dev_priv = dev->dev_private; 456 drm_i915_private_t *dev_priv = dev->dev_private;
430 struct drm_gem_object *obj; 457 struct drm_gem_object *obj;
431 struct drm_i915_gem_object *obj_priv; 458 struct drm_i915_gem_object *obj_priv;
459 int ret = 0;
432 460
433 obj = drm_gem_object_lookup(dev, file_priv, args->handle); 461 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
434 if (obj == NULL) 462 if (obj == NULL)
@@ -436,14 +464,15 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
436 obj_priv = obj->driver_private; 464 obj_priv = obj->driver_private;
437 465
438 if (!i915_tiling_ok(dev, args->stride, obj->size, args->tiling_mode)) { 466 if (!i915_tiling_ok(dev, args->stride, obj->size, args->tiling_mode)) {
467 mutex_lock(&dev->struct_mutex);
439 drm_gem_object_unreference(obj); 468 drm_gem_object_unreference(obj);
469 mutex_unlock(&dev->struct_mutex);
440 return -EINVAL; 470 return -EINVAL;
441 } 471 }
442 472
443 mutex_lock(&dev->struct_mutex);
444
445 if (args->tiling_mode == I915_TILING_NONE) { 473 if (args->tiling_mode == I915_TILING_NONE) {
446 args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; 474 args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
475 args->stride = 0;
447 } else { 476 } else {
448 if (args->tiling_mode == I915_TILING_X) 477 if (args->tiling_mode == I915_TILING_X)
449 args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x; 478 args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
@@ -466,32 +495,38 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
466 if (args->swizzle_mode == I915_BIT_6_SWIZZLE_UNKNOWN) { 495 if (args->swizzle_mode == I915_BIT_6_SWIZZLE_UNKNOWN) {
467 args->tiling_mode = I915_TILING_NONE; 496 args->tiling_mode = I915_TILING_NONE;
468 args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; 497 args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
498 args->stride = 0;
469 } 499 }
470 } 500 }
471 if (args->tiling_mode != obj_priv->tiling_mode) {
472 int ret;
473 501
474 /* Unbind the object, as switching tiling means we're 502 mutex_lock(&dev->struct_mutex);
475 * switching the cache organization due to fencing, probably. 503 if (args->tiling_mode != obj_priv->tiling_mode ||
504 args->stride != obj_priv->stride) {
505 /* We need to rebind the object if its current allocation
506 * no longer meets the alignment restrictions for its new
507 * tiling mode. Otherwise we can just leave it alone, but
508 * need to ensure that any fence register is cleared.
476 */ 509 */
477 ret = i915_gem_object_unbind(obj); 510 if (!i915_gem_object_fence_offset_ok(obj, args->tiling_mode))
511 ret = i915_gem_object_unbind(obj);
512 else
513 ret = i915_gem_object_put_fence_reg(obj);
478 if (ret != 0) { 514 if (ret != 0) {
479 WARN(ret != -ERESTARTSYS, 515 WARN(ret != -ERESTARTSYS,
480 "failed to unbind object for tiling switch"); 516 "failed to reset object for tiling switch");
481 args->tiling_mode = obj_priv->tiling_mode; 517 args->tiling_mode = obj_priv->tiling_mode;
482 mutex_unlock(&dev->struct_mutex); 518 args->stride = obj_priv->stride;
483 drm_gem_object_unreference(obj); 519 goto err;
484
485 return ret;
486 } 520 }
521
487 obj_priv->tiling_mode = args->tiling_mode; 522 obj_priv->tiling_mode = args->tiling_mode;
523 obj_priv->stride = args->stride;
488 } 524 }
489 obj_priv->stride = args->stride; 525err:
490
491 drm_gem_object_unreference(obj); 526 drm_gem_object_unreference(obj);
492 mutex_unlock(&dev->struct_mutex); 527 mutex_unlock(&dev->struct_mutex);
493 528
494 return 0; 529 return ret;
495} 530}
496 531
497/** 532/**
diff --git a/drivers/gpu/drm/i915/i915_mem.c b/drivers/gpu/drm/i915/i915_mem.c
index 96e271986d2a..83b7b81bb2b8 100644
--- a/drivers/gpu/drm/i915/i915_mem.c
+++ b/drivers/gpu/drm/i915/i915_mem.c
@@ -94,8 +94,8 @@ static struct mem_block *split_block(struct mem_block *p, int start, int size,
94{ 94{
95 /* Maybe cut off the start of an existing block */ 95 /* Maybe cut off the start of an existing block */
96 if (start > p->start) { 96 if (start > p->start) {
97 struct mem_block *newblock = 97 struct mem_block *newblock = kmalloc(sizeof(*newblock),
98 drm_alloc(sizeof(*newblock), DRM_MEM_BUFLISTS); 98 GFP_KERNEL);
99 if (!newblock) 99 if (!newblock)
100 goto out; 100 goto out;
101 newblock->start = start; 101 newblock->start = start;
@@ -111,8 +111,8 @@ static struct mem_block *split_block(struct mem_block *p, int start, int size,
111 111
112 /* Maybe cut off the end of an existing block */ 112 /* Maybe cut off the end of an existing block */
113 if (size < p->size) { 113 if (size < p->size) {
114 struct mem_block *newblock = 114 struct mem_block *newblock = kmalloc(sizeof(*newblock),
115 drm_alloc(sizeof(*newblock), DRM_MEM_BUFLISTS); 115 GFP_KERNEL);
116 if (!newblock) 116 if (!newblock)
117 goto out; 117 goto out;
118 newblock->start = start + size; 118 newblock->start = start + size;
@@ -169,7 +169,7 @@ static void free_block(struct mem_block *p)
169 p->size += q->size; 169 p->size += q->size;
170 p->next = q->next; 170 p->next = q->next;
171 p->next->prev = p; 171 p->next->prev = p;
172 drm_free(q, sizeof(*q), DRM_MEM_BUFLISTS); 172 kfree(q);
173 } 173 }
174 174
175 if (p->prev->file_priv == NULL) { 175 if (p->prev->file_priv == NULL) {
@@ -177,7 +177,7 @@ static void free_block(struct mem_block *p)
177 q->size += p->size; 177 q->size += p->size;
178 q->next = p->next; 178 q->next = p->next;
179 q->next->prev = q; 179 q->next->prev = q;
180 drm_free(p, sizeof(*q), DRM_MEM_BUFLISTS); 180 kfree(p);
181 } 181 }
182} 182}
183 183
@@ -185,14 +185,14 @@ static void free_block(struct mem_block *p)
185 */ 185 */
186static int init_heap(struct mem_block **heap, int start, int size) 186static int init_heap(struct mem_block **heap, int start, int size)
187{ 187{
188 struct mem_block *blocks = drm_alloc(sizeof(*blocks), DRM_MEM_BUFLISTS); 188 struct mem_block *blocks = kmalloc(sizeof(*blocks), GFP_KERNEL);
189 189
190 if (!blocks) 190 if (!blocks)
191 return -ENOMEM; 191 return -ENOMEM;
192 192
193 *heap = drm_alloc(sizeof(**heap), DRM_MEM_BUFLISTS); 193 *heap = kmalloc(sizeof(**heap), GFP_KERNEL);
194 if (!*heap) { 194 if (!*heap) {
195 drm_free(blocks, sizeof(*blocks), DRM_MEM_BUFLISTS); 195 kfree(blocks);
196 return -ENOMEM; 196 return -ENOMEM;
197 } 197 }
198 198
@@ -233,7 +233,7 @@ void i915_mem_release(struct drm_device * dev, struct drm_file *file_priv,
233 p->size += q->size; 233 p->size += q->size;
234 p->next = q->next; 234 p->next = q->next;
235 p->next->prev = p; 235 p->next->prev = p;
236 drm_free(q, sizeof(*q), DRM_MEM_BUFLISTS); 236 kfree(q);
237 } 237 }
238 } 238 }
239} 239}
@@ -250,10 +250,10 @@ void i915_mem_takedown(struct mem_block **heap)
250 for (p = (*heap)->next; p != *heap;) { 250 for (p = (*heap)->next; p != *heap;) {
251 struct mem_block *q = p; 251 struct mem_block *q = p;
252 p = p->next; 252 p = p->next;
253 drm_free(q, sizeof(*q), DRM_MEM_BUFLISTS); 253 kfree(q);
254 } 254 }
255 255
256 drm_free(*heap, sizeof(**heap), DRM_MEM_BUFLISTS); 256 kfree(*heap);
257 *heap = NULL; 257 *heap = NULL;
258} 258}
259 259
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 754dd22fdd77..cdd126d068a7 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -124,8 +124,7 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
124 entry = &lvds_lfp_data->data[lvds_options->panel_type]; 124 entry = &lvds_lfp_data->data[lvds_options->panel_type];
125 dvo_timing = &entry->dvo_timing; 125 dvo_timing = &entry->dvo_timing;
126 126
127 panel_fixed_mode = drm_calloc(1, sizeof(*panel_fixed_mode), 127 panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL);
128 DRM_MEM_DRIVER);
129 128
130 fill_detail_timing_data(panel_fixed_mode, dvo_timing); 129 fill_detail_timing_data(panel_fixed_mode, dvo_timing);
131 130
@@ -156,8 +155,7 @@ parse_sdvo_panel_data(struct drm_i915_private *dev_priv,
156 if (!dvo_timing) 155 if (!dvo_timing)
157 return; 156 return;
158 157
159 panel_fixed_mode = drm_calloc(1, sizeof(*panel_fixed_mode), 158 panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL);
160 DRM_MEM_DRIVER);
161 159
162 if (!panel_fixed_mode) 160 if (!panel_fixed_mode)
163 return; 161 return;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 028f5b66e3d8..3e1c78162119 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -828,19 +828,31 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
828 } 828 }
829 829
830 mutex_lock(&dev->struct_mutex); 830 mutex_lock(&dev->struct_mutex);
831 ret = i915_gem_object_pin(intel_fb->obj, alignment); 831 ret = i915_gem_object_pin(obj, alignment);
832 if (ret != 0) { 832 if (ret != 0) {
833 mutex_unlock(&dev->struct_mutex); 833 mutex_unlock(&dev->struct_mutex);
834 return ret; 834 return ret;
835 } 835 }
836 836
837 ret = i915_gem_object_set_to_gtt_domain(intel_fb->obj, 1); 837 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
838 if (ret != 0) { 838 if (ret != 0) {
839 i915_gem_object_unpin(intel_fb->obj); 839 i915_gem_object_unpin(obj);
840 mutex_unlock(&dev->struct_mutex); 840 mutex_unlock(&dev->struct_mutex);
841 return ret; 841 return ret;
842 } 842 }
843 843
844 /* Pre-i965 needs to install a fence for tiled scan-out */
845 if (!IS_I965G(dev) &&
846 obj_priv->fence_reg == I915_FENCE_REG_NONE &&
847 obj_priv->tiling_mode != I915_TILING_NONE) {
848 ret = i915_gem_object_get_fence_reg(obj);
849 if (ret != 0) {
850 i915_gem_object_unpin(obj);
851 mutex_unlock(&dev->struct_mutex);
852 return ret;
853 }
854 }
855
844 dspcntr = I915_READ(dspcntr_reg); 856 dspcntr = I915_READ(dspcntr_reg);
845 /* Mask out pixel format bits in case we change it */ 857 /* Mask out pixel format bits in case we change it */
846 dspcntr &= ~DISPPLANE_PIXFORMAT_MASK; 858 dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
@@ -860,7 +872,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
860 break; 872 break;
861 default: 873 default:
862 DRM_ERROR("Unknown color depth\n"); 874 DRM_ERROR("Unknown color depth\n");
863 i915_gem_object_unpin(intel_fb->obj); 875 i915_gem_object_unpin(obj);
864 mutex_unlock(&dev->struct_mutex); 876 mutex_unlock(&dev->struct_mutex);
865 return -EINVAL; 877 return -EINVAL;
866 } 878 }
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index 8e28e5993df5..1af7d68e3807 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -870,7 +870,11 @@ static int intelfb_single_fb_probe(struct drm_device *dev)
870 */ 870 */
871void intelfb_restore(void) 871void intelfb_restore(void)
872{ 872{
873 drm_crtc_helper_set_config(&kernelfb_mode); 873 int ret;
874 if ((ret = drm_crtc_helper_set_config(&kernelfb_mode)) != 0) {
875 printk(KERN_ERR "Failed to restore crtc configuration: %d\n",
876 ret);
877 }
874} 878}
875 879
876static void intelfb_restore_work_fn(struct work_struct *ignored) 880static void intelfb_restore_work_fn(struct work_struct *ignored)
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 50d7ed70b338..ea68992e4416 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1561,8 +1561,7 @@ intel_tv_destroy (struct drm_connector *connector)
1561 1561
1562 drm_sysfs_connector_remove(connector); 1562 drm_sysfs_connector_remove(connector);
1563 drm_connector_cleanup(connector); 1563 drm_connector_cleanup(connector);
1564 drm_free(intel_output, sizeof(struct intel_output) + sizeof(struct intel_tv_priv), 1564 kfree(intel_output);
1565 DRM_MEM_DRIVER);
1566} 1565}
1567 1566
1568 1567
@@ -1695,8 +1694,8 @@ intel_tv_init(struct drm_device *dev)
1695 (tv_dac_off & TVDAC_STATE_CHG_EN) != 0) 1694 (tv_dac_off & TVDAC_STATE_CHG_EN) != 0)
1696 return; 1695 return;
1697 1696
1698 intel_output = drm_calloc(1, sizeof(struct intel_output) + 1697 intel_output = kzalloc(sizeof(struct intel_output) +
1699 sizeof(struct intel_tv_priv), DRM_MEM_DRIVER); 1698 sizeof(struct intel_tv_priv), GFP_KERNEL);
1700 if (!intel_output) { 1699 if (!intel_output) {
1701 return; 1700 return;
1702 } 1701 }
@@ -1730,8 +1729,8 @@ intel_tv_init(struct drm_device *dev)
1730 connector->doublescan_allowed = false; 1729 connector->doublescan_allowed = false;
1731 1730
1732 /* Create TV properties then attach current values */ 1731 /* Create TV properties then attach current values */
1733 tv_format_names = drm_alloc(sizeof(char *) * NUM_TV_MODES, 1732 tv_format_names = kmalloc(sizeof(char *) * NUM_TV_MODES,
1734 DRM_MEM_DRIVER); 1733 GFP_KERNEL);
1735 if (!tv_format_names) 1734 if (!tv_format_names)
1736 goto out; 1735 goto out;
1737 for (i = 0; i < NUM_TV_MODES; i++) 1736 for (i = 0; i < NUM_TV_MODES; i++)
diff --git a/drivers/gpu/drm/mga/mga_dma.c b/drivers/gpu/drm/mga/mga_dma.c
index 7a6bf9ffc5a3..6c67a02910c8 100644
--- a/drivers/gpu/drm/mga/mga_dma.c
+++ b/drivers/gpu/drm/mga/mga_dma.c
@@ -254,23 +254,20 @@ static int mga_freelist_init(struct drm_device * dev, drm_mga_private_t * dev_pr
254 int i; 254 int i;
255 DRM_DEBUG("count=%d\n", dma->buf_count); 255 DRM_DEBUG("count=%d\n", dma->buf_count);
256 256
257 dev_priv->head = drm_alloc(sizeof(drm_mga_freelist_t), DRM_MEM_DRIVER); 257 dev_priv->head = kzalloc(sizeof(drm_mga_freelist_t), GFP_KERNEL);
258 if (dev_priv->head == NULL) 258 if (dev_priv->head == NULL)
259 return -ENOMEM; 259 return -ENOMEM;
260 260
261 memset(dev_priv->head, 0, sizeof(drm_mga_freelist_t));
262 SET_AGE(&dev_priv->head->age, MGA_BUFFER_USED, 0); 261 SET_AGE(&dev_priv->head->age, MGA_BUFFER_USED, 0);
263 262
264 for (i = 0; i < dma->buf_count; i++) { 263 for (i = 0; i < dma->buf_count; i++) {
265 buf = dma->buflist[i]; 264 buf = dma->buflist[i];
266 buf_priv = buf->dev_private; 265 buf_priv = buf->dev_private;
267 266
268 entry = drm_alloc(sizeof(drm_mga_freelist_t), DRM_MEM_DRIVER); 267 entry = kzalloc(sizeof(drm_mga_freelist_t), GFP_KERNEL);
269 if (entry == NULL) 268 if (entry == NULL)
270 return -ENOMEM; 269 return -ENOMEM;
271 270
272 memset(entry, 0, sizeof(drm_mga_freelist_t));
273
274 entry->next = dev_priv->head->next; 271 entry->next = dev_priv->head->next;
275 entry->prev = dev_priv->head; 272 entry->prev = dev_priv->head;
276 SET_AGE(&entry->age, MGA_BUFFER_FREE, 0); 273 SET_AGE(&entry->age, MGA_BUFFER_FREE, 0);
@@ -301,7 +298,7 @@ static void mga_freelist_cleanup(struct drm_device * dev)
301 entry = dev_priv->head; 298 entry = dev_priv->head;
302 while (entry) { 299 while (entry) {
303 next = entry->next; 300 next = entry->next;
304 drm_free(entry, sizeof(drm_mga_freelist_t), DRM_MEM_DRIVER); 301 kfree(entry);
305 entry = next; 302 entry = next;
306 } 303 }
307 304
@@ -399,12 +396,11 @@ int mga_driver_load(struct drm_device * dev, unsigned long flags)
399 drm_mga_private_t *dev_priv; 396 drm_mga_private_t *dev_priv;
400 int ret; 397 int ret;
401 398
402 dev_priv = drm_alloc(sizeof(drm_mga_private_t), DRM_MEM_DRIVER); 399 dev_priv = kzalloc(sizeof(drm_mga_private_t), GFP_KERNEL);
403 if (!dev_priv) 400 if (!dev_priv)
404 return -ENOMEM; 401 return -ENOMEM;
405 402
406 dev->dev_private = (void *)dev_priv; 403 dev->dev_private = (void *)dev_priv;
407 memset(dev_priv, 0, sizeof(drm_mga_private_t));
408 404
409 dev_priv->usec_timeout = MGA_DEFAULT_USEC_TIMEOUT; 405 dev_priv->usec_timeout = MGA_DEFAULT_USEC_TIMEOUT;
410 dev_priv->chipset = flags; 406 dev_priv->chipset = flags;
@@ -1150,7 +1146,7 @@ int mga_dma_buffers(struct drm_device *dev, void *data,
1150 */ 1146 */
1151int mga_driver_unload(struct drm_device * dev) 1147int mga_driver_unload(struct drm_device * dev)
1152{ 1148{
1153 drm_free(dev->dev_private, sizeof(drm_mga_private_t), DRM_MEM_DRIVER); 1149 kfree(dev->dev_private);
1154 dev->dev_private = NULL; 1150 dev->dev_private = NULL;
1155 1151
1156 return 0; 1152 return 0;
diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
index 077c0455a6b9..c75fd3564040 100644
--- a/drivers/gpu/drm/r128/r128_cce.c
+++ b/drivers/gpu/drm/r128/r128_cce.c
@@ -353,12 +353,10 @@ static int r128_do_init_cce(struct drm_device * dev, drm_r128_init_t * init)
353 353
354 DRM_DEBUG("\n"); 354 DRM_DEBUG("\n");
355 355
356 dev_priv = drm_alloc(sizeof(drm_r128_private_t), DRM_MEM_DRIVER); 356 dev_priv = kzalloc(sizeof(drm_r128_private_t), GFP_KERNEL);
357 if (dev_priv == NULL) 357 if (dev_priv == NULL)
358 return -ENOMEM; 358 return -ENOMEM;
359 359
360 memset(dev_priv, 0, sizeof(drm_r128_private_t));
361
362 dev_priv->is_pci = init->is_pci; 360 dev_priv->is_pci = init->is_pci;
363 361
364 if (dev_priv->is_pci && !dev->sg) { 362 if (dev_priv->is_pci && !dev->sg) {
@@ -619,8 +617,7 @@ int r128_do_cleanup_cce(struct drm_device * dev)
619 ("failed to cleanup PCI GART!\n"); 617 ("failed to cleanup PCI GART!\n");
620 } 618 }
621 619
622 drm_free(dev->dev_private, sizeof(drm_r128_private_t), 620 kfree(dev->dev_private);
623 DRM_MEM_DRIVER);
624 dev->dev_private = NULL; 621 dev->dev_private = NULL;
625 } 622 }
626 623
@@ -768,18 +765,17 @@ static int r128_freelist_init(struct drm_device * dev)
768 drm_r128_freelist_t *entry; 765 drm_r128_freelist_t *entry;
769 int i; 766 int i;
770 767
771 dev_priv->head = drm_alloc(sizeof(drm_r128_freelist_t), DRM_MEM_DRIVER); 768 dev_priv->head = kzalloc(sizeof(drm_r128_freelist_t), GFP_KERNEL);
772 if (dev_priv->head == NULL) 769 if (dev_priv->head == NULL)
773 return -ENOMEM; 770 return -ENOMEM;
774 771
775 memset(dev_priv->head, 0, sizeof(drm_r128_freelist_t));
776 dev_priv->head->age = R128_BUFFER_USED; 772 dev_priv->head->age = R128_BUFFER_USED;
777 773
778 for (i = 0; i < dma->buf_count; i++) { 774 for (i = 0; i < dma->buf_count; i++) {
779 buf = dma->buflist[i]; 775 buf = dma->buflist[i];
780 buf_priv = buf->dev_private; 776 buf_priv = buf->dev_private;
781 777
782 entry = drm_alloc(sizeof(drm_r128_freelist_t), DRM_MEM_DRIVER); 778 entry = kmalloc(sizeof(drm_r128_freelist_t), GFP_KERNEL);
783 if (!entry) 779 if (!entry)
784 return -ENOMEM; 780 return -ENOMEM;
785 781
diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
index f7a5b5740764..026a48c95c8f 100644
--- a/drivers/gpu/drm/r128/r128_state.c
+++ b/drivers/gpu/drm/r128/r128_state.c
@@ -910,24 +910,24 @@ static int r128_cce_dispatch_write_span(struct drm_device * dev,
910 } 910 }
911 911
912 buffer_size = depth->n * sizeof(u32); 912 buffer_size = depth->n * sizeof(u32);
913 buffer = drm_alloc(buffer_size, DRM_MEM_BUFS); 913 buffer = kmalloc(buffer_size, GFP_KERNEL);
914 if (buffer == NULL) 914 if (buffer == NULL)
915 return -ENOMEM; 915 return -ENOMEM;
916 if (DRM_COPY_FROM_USER(buffer, depth->buffer, buffer_size)) { 916 if (DRM_COPY_FROM_USER(buffer, depth->buffer, buffer_size)) {
917 drm_free(buffer, buffer_size, DRM_MEM_BUFS); 917 kfree(buffer);
918 return -EFAULT; 918 return -EFAULT;
919 } 919 }
920 920
921 mask_size = depth->n * sizeof(u8); 921 mask_size = depth->n * sizeof(u8);
922 if (depth->mask) { 922 if (depth->mask) {
923 mask = drm_alloc(mask_size, DRM_MEM_BUFS); 923 mask = kmalloc(mask_size, GFP_KERNEL);
924 if (mask == NULL) { 924 if (mask == NULL) {
925 drm_free(buffer, buffer_size, DRM_MEM_BUFS); 925 kfree(buffer);
926 return -ENOMEM; 926 return -ENOMEM;
927 } 927 }
928 if (DRM_COPY_FROM_USER(mask, depth->mask, mask_size)) { 928 if (DRM_COPY_FROM_USER(mask, depth->mask, mask_size)) {
929 drm_free(buffer, buffer_size, DRM_MEM_BUFS); 929 kfree(buffer);
930 drm_free(mask, mask_size, DRM_MEM_BUFS); 930 kfree(mask);
931 return -EFAULT; 931 return -EFAULT;
932 } 932 }
933 933
@@ -954,7 +954,7 @@ static int r128_cce_dispatch_write_span(struct drm_device * dev,
954 } 954 }
955 } 955 }
956 956
957 drm_free(mask, mask_size, DRM_MEM_BUFS); 957 kfree(mask);
958 } else { 958 } else {
959 for (i = 0; i < count; i++, x++) { 959 for (i = 0; i < count; i++, x++) {
960 BEGIN_RING(6); 960 BEGIN_RING(6);
@@ -978,7 +978,7 @@ static int r128_cce_dispatch_write_span(struct drm_device * dev,
978 } 978 }
979 } 979 }
980 980
981 drm_free(buffer, buffer_size, DRM_MEM_BUFS); 981 kfree(buffer);
982 982
983 return 0; 983 return 0;
984} 984}
@@ -1000,54 +1000,54 @@ static int r128_cce_dispatch_write_pixels(struct drm_device * dev,
1000 1000
1001 xbuf_size = count * sizeof(*x); 1001 xbuf_size = count * sizeof(*x);
1002 ybuf_size = count * sizeof(*y); 1002 ybuf_size = count * sizeof(*y);
1003 x = drm_alloc(xbuf_size, DRM_MEM_BUFS); 1003 x = kmalloc(xbuf_size, GFP_KERNEL);
1004 if (x == NULL) { 1004 if (x == NULL) {
1005 return -ENOMEM; 1005 return -ENOMEM;
1006 } 1006 }
1007 y = drm_alloc(ybuf_size, DRM_MEM_BUFS); 1007 y = kmalloc(ybuf_size, GFP_KERNEL);
1008 if (y == NULL) { 1008 if (y == NULL) {
1009 drm_free(x, xbuf_size, DRM_MEM_BUFS); 1009 kfree(x);
1010 return -ENOMEM; 1010 return -ENOMEM;
1011 } 1011 }
1012 if (DRM_COPY_FROM_USER(x, depth->x, xbuf_size)) { 1012 if (DRM_COPY_FROM_USER(x, depth->x, xbuf_size)) {
1013 drm_free(x, xbuf_size, DRM_MEM_BUFS); 1013 kfree(x);
1014 drm_free(y, ybuf_size, DRM_MEM_BUFS); 1014 kfree(y);
1015 return -EFAULT; 1015 return -EFAULT;
1016 } 1016 }
1017 if (DRM_COPY_FROM_USER(y, depth->y, xbuf_size)) { 1017 if (DRM_COPY_FROM_USER(y, depth->y, xbuf_size)) {
1018 drm_free(x, xbuf_size, DRM_MEM_BUFS); 1018 kfree(x);
1019 drm_free(y, ybuf_size, DRM_MEM_BUFS); 1019 kfree(y);
1020 return -EFAULT; 1020 return -EFAULT;
1021 } 1021 }
1022 1022
1023 buffer_size = depth->n * sizeof(u32); 1023 buffer_size = depth->n * sizeof(u32);
1024 buffer = drm_alloc(buffer_size, DRM_MEM_BUFS); 1024 buffer = kmalloc(buffer_size, GFP_KERNEL);
1025 if (buffer == NULL) { 1025 if (buffer == NULL) {
1026 drm_free(x, xbuf_size, DRM_MEM_BUFS); 1026 kfree(x);
1027 drm_free(y, ybuf_size, DRM_MEM_BUFS); 1027 kfree(y);
1028 return -ENOMEM; 1028 return -ENOMEM;
1029 } 1029 }
1030 if (DRM_COPY_FROM_USER(buffer, depth->buffer, buffer_size)) { 1030 if (DRM_COPY_FROM_USER(buffer, depth->buffer, buffer_size)) {
1031 drm_free(x, xbuf_size, DRM_MEM_BUFS); 1031 kfree(x);
1032 drm_free(y, ybuf_size, DRM_MEM_BUFS); 1032 kfree(y);
1033 drm_free(buffer, buffer_size, DRM_MEM_BUFS); 1033 kfree(buffer);
1034 return -EFAULT; 1034 return -EFAULT;
1035 } 1035 }
1036 1036
1037 if (depth->mask) { 1037 if (depth->mask) {
1038 mask_size = depth->n * sizeof(u8); 1038 mask_size = depth->n * sizeof(u8);
1039 mask = drm_alloc(mask_size, DRM_MEM_BUFS); 1039 mask = kmalloc(mask_size, GFP_KERNEL);
1040 if (mask == NULL) { 1040 if (mask == NULL) {
1041 drm_free(x, xbuf_size, DRM_MEM_BUFS); 1041 kfree(x);
1042 drm_free(y, ybuf_size, DRM_MEM_BUFS); 1042 kfree(y);
1043 drm_free(buffer, buffer_size, DRM_MEM_BUFS); 1043 kfree(buffer);
1044 return -ENOMEM; 1044 return -ENOMEM;
1045 } 1045 }
1046 if (DRM_COPY_FROM_USER(mask, depth->mask, mask_size)) { 1046 if (DRM_COPY_FROM_USER(mask, depth->mask, mask_size)) {
1047 drm_free(x, xbuf_size, DRM_MEM_BUFS); 1047 kfree(x);
1048 drm_free(y, ybuf_size, DRM_MEM_BUFS); 1048 kfree(y);
1049 drm_free(buffer, buffer_size, DRM_MEM_BUFS); 1049 kfree(buffer);
1050 drm_free(mask, mask_size, DRM_MEM_BUFS); 1050 kfree(mask);
1051 return -EFAULT; 1051 return -EFAULT;
1052 } 1052 }
1053 1053
@@ -1074,7 +1074,7 @@ static int r128_cce_dispatch_write_pixels(struct drm_device * dev,
1074 } 1074 }
1075 } 1075 }
1076 1076
1077 drm_free(mask, mask_size, DRM_MEM_BUFS); 1077 kfree(mask);
1078 } else { 1078 } else {
1079 for (i = 0; i < count; i++) { 1079 for (i = 0; i < count; i++) {
1080 BEGIN_RING(6); 1080 BEGIN_RING(6);
@@ -1098,9 +1098,9 @@ static int r128_cce_dispatch_write_pixels(struct drm_device * dev,
1098 } 1098 }
1099 } 1099 }
1100 1100
1101 drm_free(x, xbuf_size, DRM_MEM_BUFS); 1101 kfree(x);
1102 drm_free(y, ybuf_size, DRM_MEM_BUFS); 1102 kfree(y);
1103 drm_free(buffer, buffer_size, DRM_MEM_BUFS); 1103 kfree(buffer);
1104 1104
1105 return 0; 1105 return 0;
1106} 1106}
@@ -1167,23 +1167,23 @@ static int r128_cce_dispatch_read_pixels(struct drm_device * dev,
1167 1167
1168 xbuf_size = count * sizeof(*x); 1168 xbuf_size = count * sizeof(*x);
1169 ybuf_size = count * sizeof(*y); 1169 ybuf_size = count * sizeof(*y);
1170 x = drm_alloc(xbuf_size, DRM_MEM_BUFS); 1170 x = kmalloc(xbuf_size, GFP_KERNEL);
1171 if (x == NULL) { 1171 if (x == NULL) {
1172 return -ENOMEM; 1172 return -ENOMEM;
1173 } 1173 }
1174 y = drm_alloc(ybuf_size, DRM_MEM_BUFS); 1174 y = kmalloc(ybuf_size, GFP_KERNEL);
1175 if (y == NULL) { 1175 if (y == NULL) {
1176 drm_free(x, xbuf_size, DRM_MEM_BUFS); 1176 kfree(x);
1177 return -ENOMEM; 1177 return -ENOMEM;
1178 } 1178 }
1179 if (DRM_COPY_FROM_USER(x, depth->x, xbuf_size)) { 1179 if (DRM_COPY_FROM_USER(x, depth->x, xbuf_size)) {
1180 drm_free(x, xbuf_size, DRM_MEM_BUFS); 1180 kfree(x);
1181 drm_free(y, ybuf_size, DRM_MEM_BUFS); 1181 kfree(y);
1182 return -EFAULT; 1182 return -EFAULT;
1183 } 1183 }
1184 if (DRM_COPY_FROM_USER(y, depth->y, ybuf_size)) { 1184 if (DRM_COPY_FROM_USER(y, depth->y, ybuf_size)) {
1185 drm_free(x, xbuf_size, DRM_MEM_BUFS); 1185 kfree(x);
1186 drm_free(y, ybuf_size, DRM_MEM_BUFS); 1186 kfree(y);
1187 return -EFAULT; 1187 return -EFAULT;
1188 } 1188 }
1189 1189
@@ -1210,8 +1210,8 @@ static int r128_cce_dispatch_read_pixels(struct drm_device * dev,
1210 ADVANCE_RING(); 1210 ADVANCE_RING();
1211 } 1211 }
1212 1212
1213 drm_free(x, xbuf_size, DRM_MEM_BUFS); 1213 kfree(x);
1214 drm_free(y, ybuf_size, DRM_MEM_BUFS); 1214 kfree(y);
1215 1215
1216 return 0; 1216 return 0;
1217} 1217}
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 5225f5be7ea7..c550932a108f 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -551,6 +551,9 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
551 /* cp setup */ 551 /* cp setup */
552 WREG32(0x718, pre_write_timer | (pre_write_limit << 28)); 552 WREG32(0x718, pre_write_timer | (pre_write_limit << 28));
553 WREG32(RADEON_CP_RB_CNTL, 553 WREG32(RADEON_CP_RB_CNTL,
554#ifdef __BIG_ENDIAN
555 RADEON_BUF_SWAP_32BIT |
556#endif
554 REG_SET(RADEON_RB_BUFSZ, rb_bufsz) | 557 REG_SET(RADEON_RB_BUFSZ, rb_bufsz) |
555 REG_SET(RADEON_RB_BLKSZ, rb_blksz) | 558 REG_SET(RADEON_RB_BLKSZ, rb_blksz) |
556 REG_SET(RADEON_MAX_FETCH, max_fetch) | 559 REG_SET(RADEON_MAX_FETCH, max_fetch) |
@@ -644,7 +647,7 @@ int r100_cp_reset(struct radeon_device *rdev)
644 */ 647 */
645int r100_cs_parse_packet0(struct radeon_cs_parser *p, 648int r100_cs_parse_packet0(struct radeon_cs_parser *p,
646 struct radeon_cs_packet *pkt, 649 struct radeon_cs_packet *pkt,
647 unsigned *auth, unsigned n, 650 const unsigned *auth, unsigned n,
648 radeon_packet0_check_t check) 651 radeon_packet0_check_t check)
649{ 652{
650 unsigned reg; 653 unsigned reg;
@@ -654,6 +657,10 @@ int r100_cs_parse_packet0(struct radeon_cs_parser *p,
654 657
655 idx = pkt->idx + 1; 658 idx = pkt->idx + 1;
656 reg = pkt->reg; 659 reg = pkt->reg;
660 /* Check that register fall into register range
661 * determined by the number of entry (n) in the
662 * safe register bitmap.
663 */
657 if (pkt->one_reg_wr) { 664 if (pkt->one_reg_wr) {
658 if ((reg >> 7) > n) { 665 if ((reg >> 7) > n) {
659 return -EINVAL; 666 return -EINVAL;
@@ -683,24 +690,6 @@ int r100_cs_parse_packet0(struct radeon_cs_parser *p,
683 return 0; 690 return 0;
684} 691}
685 692
686int r100_cs_parse_packet3(struct radeon_cs_parser *p,
687 struct radeon_cs_packet *pkt,
688 unsigned *auth, unsigned n,
689 radeon_packet3_check_t check)
690{
691 unsigned i, m;
692
693 if ((pkt->opcode >> 5) > n) {
694 return -EINVAL;
695 }
696 i = pkt->opcode >> 5;
697 m = 1 << (pkt->opcode & 31);
698 if (auth[i] & m) {
699 return check(p, pkt);
700 }
701 return 0;
702}
703
704void r100_cs_dump_packet(struct radeon_cs_parser *p, 693void r100_cs_dump_packet(struct radeon_cs_parser *p,
705 struct radeon_cs_packet *pkt) 694 struct radeon_cs_packet *pkt)
706{ 695{
@@ -901,6 +890,25 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
901 return 0; 890 return 0;
902} 891}
903 892
893int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
894 struct radeon_cs_packet *pkt,
895 struct radeon_object *robj)
896{
897 struct radeon_cs_chunk *ib_chunk;
898 unsigned idx;
899
900 ib_chunk = &p->chunks[p->chunk_ib_idx];
901 idx = pkt->idx + 1;
902 if ((ib_chunk->kdata[idx+2] + 1) > radeon_object_size(robj)) {
903 DRM_ERROR("[drm] Buffer too small for PACKET3 INDX_BUFFER "
904 "(need %u have %lu) !\n",
905 ib_chunk->kdata[idx+2] + 1,
906 radeon_object_size(robj));
907 return -EINVAL;
908 }
909 return 0;
910}
911
904static int r100_packet3_check(struct radeon_cs_parser *p, 912static int r100_packet3_check(struct radeon_cs_parser *p,
905 struct radeon_cs_packet *pkt) 913 struct radeon_cs_packet *pkt)
906{ 914{
@@ -954,6 +962,10 @@ static int r100_packet3_check(struct radeon_cs_parser *p,
954 return r; 962 return r;
955 } 963 }
956 ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset); 964 ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
965 r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj);
966 if (r) {
967 return r;
968 }
957 break; 969 break;
958 case 0x23: 970 case 0x23:
959 /* FIXME: cleanup */ 971 /* FIXME: cleanup */
@@ -999,18 +1011,18 @@ int r100_cs_parse(struct radeon_cs_parser *p)
999 } 1011 }
1000 p->idx += pkt.count + 2; 1012 p->idx += pkt.count + 2;
1001 switch (pkt.type) { 1013 switch (pkt.type) {
1002 case PACKET_TYPE0: 1014 case PACKET_TYPE0:
1003 r = r100_packet0_check(p, &pkt); 1015 r = r100_packet0_check(p, &pkt);
1004 break; 1016 break;
1005 case PACKET_TYPE2: 1017 case PACKET_TYPE2:
1006 break; 1018 break;
1007 case PACKET_TYPE3: 1019 case PACKET_TYPE3:
1008 r = r100_packet3_check(p, &pkt); 1020 r = r100_packet3_check(p, &pkt);
1009 break; 1021 break;
1010 default: 1022 default:
1011 DRM_ERROR("Unknown packet type %d !\n", 1023 DRM_ERROR("Unknown packet type %d !\n",
1012 pkt.type); 1024 pkt.type);
1013 return -EINVAL; 1025 return -EINVAL;
1014 } 1026 }
1015 if (r) { 1027 if (r) {
1016 return r; 1028 return r;
@@ -1267,12 +1279,6 @@ void r100_vram_info(struct radeon_device *rdev)
1267 1279
1268 rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); 1280 rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
1269 rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); 1281 rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
1270 if (rdev->mc.aper_size > rdev->mc.vram_size) {
1271 /* Why does some hw doesn't have CONFIG_MEMSIZE properly
1272 * setup ? */
1273 rdev->mc.vram_size = rdev->mc.aper_size;
1274 WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size);
1275 }
1276} 1282}
1277 1283
1278 1284
@@ -1352,6 +1358,11 @@ void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
1352 } 1358 }
1353} 1359}
1354 1360
1361int r100_init(struct radeon_device *rdev)
1362{
1363 return 0;
1364}
1365
1355/* 1366/*
1356 * Debugfs info 1367 * Debugfs info
1357 */ 1368 */
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index f5870a099d4f..e2ed5bc08170 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -48,14 +48,13 @@ int r100_cs_packet_next_reloc(struct radeon_cs_parser *p,
48 struct radeon_cs_reloc **cs_reloc); 48 struct radeon_cs_reloc **cs_reloc);
49int r100_cs_parse_packet0(struct radeon_cs_parser *p, 49int r100_cs_parse_packet0(struct radeon_cs_parser *p,
50 struct radeon_cs_packet *pkt, 50 struct radeon_cs_packet *pkt,
51 unsigned *auth, unsigned n, 51 const unsigned *auth, unsigned n,
52 radeon_packet0_check_t check); 52 radeon_packet0_check_t check);
53int r100_cs_parse_packet3(struct radeon_cs_parser *p,
54 struct radeon_cs_packet *pkt,
55 unsigned *auth, unsigned n,
56 radeon_packet3_check_t check);
57void r100_cs_dump_packet(struct radeon_cs_parser *p, 53void r100_cs_dump_packet(struct radeon_cs_parser *p,
58 struct radeon_cs_packet *pkt); 54 struct radeon_cs_packet *pkt);
55int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
56 struct radeon_cs_packet *pkt,
57 struct radeon_object *robj);
59 58
60/* This files gather functions specifics to: 59/* This files gather functions specifics to:
61 * r300,r350,rv350,rv370,rv380 60 * r300,r350,rv350,rv370,rv380
@@ -288,7 +287,7 @@ int r300_copy_dma(struct radeon_device *rdev,
288 return r; 287 return r;
289 } 288 }
290 /* Must wait for 2D idle & clean before DMA or hangs might happen */ 289 /* Must wait for 2D idle & clean before DMA or hangs might happen */
291 radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0)); 290 radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0 ));
292 radeon_ring_write(rdev, (1 << 16)); 291 radeon_ring_write(rdev, (1 << 16));
293 for (i = 0; i < num_loops; i++) { 292 for (i = 0; i < num_loops; i++) {
294 cur_size = size; 293 cur_size = size;
@@ -319,7 +318,7 @@ void r300_ring_start(struct radeon_device *rdev)
319 318
320 /* Sub pixel 1/12 so we can have 4K rendering according to doc */ 319 /* Sub pixel 1/12 so we can have 4K rendering according to doc */
321 gb_tile_config = (R300_ENABLE_TILING | R300_TILE_SIZE_16); 320 gb_tile_config = (R300_ENABLE_TILING | R300_TILE_SIZE_16);
322 switch (rdev->num_gb_pipes) { 321 switch(rdev->num_gb_pipes) {
323 case 2: 322 case 2:
324 gb_tile_config |= R300_PIPE_COUNT_R300; 323 gb_tile_config |= R300_PIPE_COUNT_R300;
325 break; 324 break;
@@ -452,8 +451,8 @@ void r300_gpu_init(struct radeon_device *rdev)
452 case 4: 451 case 4:
453 gb_tile_config |= R300_PIPE_COUNT_R420; 452 gb_tile_config |= R300_PIPE_COUNT_R420;
454 break; 453 break;
455 case 1:
456 default: 454 default:
455 case 1:
457 gb_tile_config |= R300_PIPE_COUNT_RV350; 456 gb_tile_config |= R300_PIPE_COUNT_RV350;
458 break; 457 break;
459 } 458 }
@@ -725,18 +724,120 @@ struct r300_cs_track_cb {
725 unsigned offset; 724 unsigned offset;
726}; 725};
727 726
727struct r300_cs_track_array {
728 struct radeon_object *robj;
729 unsigned esize;
730};
731
732struct r300_cs_track_texture {
733 struct radeon_object *robj;
734 unsigned pitch;
735 unsigned width;
736 unsigned height;
737 unsigned num_levels;
738 unsigned cpp;
739 unsigned tex_coord_type;
740 unsigned txdepth;
741 unsigned width_11;
742 unsigned height_11;
743 bool use_pitch;
744 bool enabled;
745 bool roundup_w;
746 bool roundup_h;
747};
748
728struct r300_cs_track { 749struct r300_cs_track {
729 unsigned num_cb; 750 unsigned num_cb;
730 unsigned maxy; 751 unsigned maxy;
731 struct r300_cs_track_cb cb[4]; 752 unsigned vtx_size;
732 struct r300_cs_track_cb zb; 753 unsigned vap_vf_cntl;
733 bool z_enabled; 754 unsigned immd_dwords;
755 unsigned num_arrays;
756 unsigned max_indx;
757 struct r300_cs_track_array arrays[11];
758 struct r300_cs_track_cb cb[4];
759 struct r300_cs_track_cb zb;
760 struct r300_cs_track_texture textures[16];
761 bool z_enabled;
734}; 762};
735 763
764static inline void r300_cs_track_texture_print(struct r300_cs_track_texture *t)
765{
766 DRM_ERROR("pitch %d\n", t->pitch);
767 DRM_ERROR("width %d\n", t->width);
768 DRM_ERROR("height %d\n", t->height);
769 DRM_ERROR("num levels %d\n", t->num_levels);
770 DRM_ERROR("depth %d\n", t->txdepth);
771 DRM_ERROR("bpp %d\n", t->cpp);
772 DRM_ERROR("coordinate type %d\n", t->tex_coord_type);
773 DRM_ERROR("width round to power of 2 %d\n", t->roundup_w);
774 DRM_ERROR("height round to power of 2 %d\n", t->roundup_h);
775}
776
777static inline int r300_cs_track_texture_check(struct radeon_device *rdev,
778 struct r300_cs_track *track)
779{
780 struct radeon_object *robj;
781 unsigned long size;
782 unsigned u, i, w, h;
783
784 for (u = 0; u < 16; u++) {
785 if (!track->textures[u].enabled)
786 continue;
787 robj = track->textures[u].robj;
788 if (robj == NULL) {
789 DRM_ERROR("No texture bound to unit %u\n", u);
790 return -EINVAL;
791 }
792 size = 0;
793 for (i = 0; i <= track->textures[u].num_levels; i++) {
794 if (track->textures[u].use_pitch) {
795 w = track->textures[u].pitch / (1 << i);
796 } else {
797 w = track->textures[u].width / (1 << i);
798 if (rdev->family >= CHIP_RV515)
799 w |= track->textures[u].width_11;
800 if (track->textures[u].roundup_w)
801 w = roundup_pow_of_two(w);
802 }
803 h = track->textures[u].height / (1 << i);
804 if (rdev->family >= CHIP_RV515)
805 h |= track->textures[u].height_11;
806 if (track->textures[u].roundup_h)
807 h = roundup_pow_of_two(h);
808 size += w * h;
809 }
810 size *= track->textures[u].cpp;
811 switch (track->textures[u].tex_coord_type) {
812 case 0:
813 break;
814 case 1:
815 size *= (1 << track->textures[u].txdepth);
816 break;
817 case 2:
818 size *= 6;
819 break;
820 default:
821 DRM_ERROR("Invalid texture coordinate type %u for unit "
822 "%u\n", track->textures[u].tex_coord_type, u);
823 return -EINVAL;
824 }
825 if (size > radeon_object_size(robj)) {
826 DRM_ERROR("Texture of unit %u needs %lu bytes but is "
827 "%lu\n", u, size, radeon_object_size(robj));
828 r300_cs_track_texture_print(&track->textures[u]);
829 return -EINVAL;
830 }
831 }
832 return 0;
833}
834
736int r300_cs_track_check(struct radeon_device *rdev, struct r300_cs_track *track) 835int r300_cs_track_check(struct radeon_device *rdev, struct r300_cs_track *track)
737{ 836{
738 unsigned i; 837 unsigned i;
739 unsigned long size; 838 unsigned long size;
839 unsigned prim_walk;
840 unsigned nverts;
740 841
741 for (i = 0; i < track->num_cb; i++) { 842 for (i = 0; i < track->num_cb; i++) {
742 if (track->cb[i].robj == NULL) { 843 if (track->cb[i].robj == NULL) {
@@ -769,7 +870,59 @@ int r300_cs_track_check(struct radeon_device *rdev, struct r300_cs_track *track)
769 return -EINVAL; 870 return -EINVAL;
770 } 871 }
771 } 872 }
772 return 0; 873 prim_walk = (track->vap_vf_cntl >> 4) & 0x3;
874 nverts = (track->vap_vf_cntl >> 16) & 0xFFFF;
875 switch (prim_walk) {
876 case 1:
877 for (i = 0; i < track->num_arrays; i++) {
878 size = track->arrays[i].esize * track->max_indx * 4;
879 if (track->arrays[i].robj == NULL) {
880 DRM_ERROR("(PW %u) Vertex array %u no buffer "
881 "bound\n", prim_walk, i);
882 return -EINVAL;
883 }
884 if (size > radeon_object_size(track->arrays[i].robj)) {
885 DRM_ERROR("(PW %u) Vertex array %u need %lu dwords "
886 "have %lu dwords\n", prim_walk, i,
887 size >> 2,
888 radeon_object_size(track->arrays[i].robj) >> 2);
889 DRM_ERROR("Max indices %u\n", track->max_indx);
890 return -EINVAL;
891 }
892 }
893 break;
894 case 2:
895 for (i = 0; i < track->num_arrays; i++) {
896 size = track->arrays[i].esize * (nverts - 1) * 4;
897 if (track->arrays[i].robj == NULL) {
898 DRM_ERROR("(PW %u) Vertex array %u no buffer "
899 "bound\n", prim_walk, i);
900 return -EINVAL;
901 }
902 if (size > radeon_object_size(track->arrays[i].robj)) {
903 DRM_ERROR("(PW %u) Vertex array %u need %lu dwords "
904 "have %lu dwords\n", prim_walk, i, size >> 2,
905 radeon_object_size(track->arrays[i].robj) >> 2);
906 return -EINVAL;
907 }
908 }
909 break;
910 case 3:
911 size = track->vtx_size * nverts;
912 if (size != track->immd_dwords) {
913 DRM_ERROR("IMMD draw %u dwors but needs %lu dwords\n",
914 track->immd_dwords, size);
915 DRM_ERROR("VAP_VF_CNTL.NUM_VERTICES %u, VTX_SIZE %u\n",
916 nverts, track->vtx_size);
917 return -EINVAL;
918 }
919 break;
920 default:
921 DRM_ERROR("[drm] Invalid primitive walk %d for VAP_VF_CNTL\n",
922 prim_walk);
923 return -EINVAL;
924 }
925 return r300_cs_track_texture_check(rdev, track);
773} 926}
774 927
775static inline void r300_cs_track_clear(struct r300_cs_track *track) 928static inline void r300_cs_track_clear(struct r300_cs_track *track)
@@ -789,9 +942,33 @@ static inline void r300_cs_track_clear(struct r300_cs_track *track)
789 track->zb.pitch = 8192; 942 track->zb.pitch = 8192;
790 track->zb.cpp = 4; 943 track->zb.cpp = 4;
791 track->zb.offset = 0; 944 track->zb.offset = 0;
945 track->vtx_size = 0x7F;
946 track->immd_dwords = 0xFFFFFFFFUL;
947 track->num_arrays = 11;
948 track->max_indx = 0x00FFFFFFUL;
949 for (i = 0; i < track->num_arrays; i++) {
950 track->arrays[i].robj = NULL;
951 track->arrays[i].esize = 0x7F;
952 }
953 for (i = 0; i < 16; i++) {
954 track->textures[i].pitch = 16536;
955 track->textures[i].width = 16536;
956 track->textures[i].height = 16536;
957 track->textures[i].width_11 = 1 << 11;
958 track->textures[i].height_11 = 1 << 11;
959 track->textures[i].num_levels = 12;
960 track->textures[i].txdepth = 16;
961 track->textures[i].cpp = 64;
962 track->textures[i].tex_coord_type = 1;
963 track->textures[i].robj = NULL;
964 /* CS IB emission code makes sure texture unit are disabled */
965 track->textures[i].enabled = false;
966 track->textures[i].roundup_w = true;
967 track->textures[i].roundup_h = true;
968 }
792} 969}
793 970
794static unsigned r300_auth_reg[] = { 971static const unsigned r300_reg_safe_bm[159] = {
795 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 972 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
796 0xFFFFFFBF, 0xFFFFFFFF, 0xFFFFFFBF, 0xFFFFFFFF, 973 0xFFFFFFBF, 0xFFFFFFFF, 0xFFFFFFBF, 0xFFFFFFFF,
797 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 974 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
@@ -808,7 +985,7 @@ static unsigned r300_auth_reg[] = {
808 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 985 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
809 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFF03F, 986 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFF03F,
810 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 987 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
811 0xFFFFFFFF, 0xFFFFCFCC, 0xF00E9FFF, 0x007C0000, 988 0xFFFFFFFF, 0xFFFFEFCE, 0xF00EBFFF, 0x007C0000,
812 0xF0000078, 0xFF000009, 0xFFFFFFFF, 0xFFFFFFFF, 989 0xF0000078, 0xFF000009, 0xFFFFFFFF, 0xFFFFFFFF,
813 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 990 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
814 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 991 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
@@ -824,9 +1001,9 @@ static unsigned r300_auth_reg[] = {
824 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 1001 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
825 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 1002 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
826 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 1003 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
827 0xFFFFFC78, 0xFFFFFFFF, 0xFFFFFFFC, 0xFFFFFFFF, 1004 0xFFFFFC78, 0xFFFFFFFF, 0xFFFFFFFE, 0xFFFFFFFF,
828 0x38FF8F50, 0xFFF88082, 0xF000000C, 0xFAE009FF, 1005 0x38FF8F50, 0xFFF88082, 0xF000000C, 0xFAE009FF,
829 0x00000000, 0x00000000, 0xFFFF0000, 0x00000000, 1006 0x0000FFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000,
830 0x00000000, 0x0000C100, 0x00000000, 0x00000000, 1007 0x00000000, 0x0000C100, 0x00000000, 0x00000000,
831 0x00000000, 0x00000000, 0x00000000, 0x00000000, 1008 0x00000000, 0x00000000, 0x00000000, 0x00000000,
832 0x00000000, 0xFFFF0000, 0xFFFFFFFF, 0xFF80FFFF, 1009 0x00000000, 0xFFFF0000, 0xFFFFFFFF, 0xFF80FFFF,
@@ -848,8 +1025,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
848 1025
849 ib = p->ib->ptr; 1026 ib = p->ib->ptr;
850 ib_chunk = &p->chunks[p->chunk_ib_idx]; 1027 ib_chunk = &p->chunks[p->chunk_ib_idx];
851 track = (struct r300_cs_track *)p->track; 1028 track = (struct r300_cs_track*)p->track;
852 switch (reg) { 1029 switch(reg) {
853 case RADEON_DST_PITCH_OFFSET: 1030 case RADEON_DST_PITCH_OFFSET:
854 case RADEON_SRC_PITCH_OFFSET: 1031 case RADEON_SRC_PITCH_OFFSET:
855 r = r100_cs_packet_next_reloc(p, &reloc); 1032 r = r100_cs_packet_next_reloc(p, &reloc);
@@ -907,6 +1084,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
907 case R300_TX_OFFSET_0+52: 1084 case R300_TX_OFFSET_0+52:
908 case R300_TX_OFFSET_0+56: 1085 case R300_TX_OFFSET_0+56:
909 case R300_TX_OFFSET_0+60: 1086 case R300_TX_OFFSET_0+60:
1087 i = (reg - R300_TX_OFFSET_0) >> 2;
910 r = r100_cs_packet_next_reloc(p, &reloc); 1088 r = r100_cs_packet_next_reloc(p, &reloc);
911 if (r) { 1089 if (r) {
912 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1090 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
@@ -915,11 +1093,23 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
915 return r; 1093 return r;
916 } 1094 }
917 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); 1095 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
1096 track->textures[i].robj = reloc->robj;
918 break; 1097 break;
919 /* Tracked registers */ 1098 /* Tracked registers */
1099 case 0x2084:
1100 /* VAP_VF_CNTL */
1101 track->vap_vf_cntl = ib_chunk->kdata[idx];
1102 break;
1103 case 0x20B4:
1104 /* VAP_VTX_SIZE */
1105 track->vtx_size = ib_chunk->kdata[idx] & 0x7F;
1106 break;
1107 case 0x2134:
1108 /* VAP_VF_MAX_VTX_INDX */
1109 track->max_indx = ib_chunk->kdata[idx] & 0x00FFFFFFUL;
1110 break;
920 case 0x43E4: 1111 case 0x43E4:
921 /* SC_SCISSOR1 */ 1112 /* SC_SCISSOR1 */
922
923 track->maxy = ((ib_chunk->kdata[idx] >> 13) & 0x1FFF) + 1; 1113 track->maxy = ((ib_chunk->kdata[idx] >> 13) & 0x1FFF) + 1;
924 if (p->rdev->family < CHIP_RV515) { 1114 if (p->rdev->family < CHIP_RV515) {
925 track->maxy -= 1440; 1115 track->maxy -= 1440;
@@ -994,8 +1184,166 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
994 /* ZB_DEPTHPITCH */ 1184 /* ZB_DEPTHPITCH */
995 track->zb.pitch = ib_chunk->kdata[idx] & 0x3FFC; 1185 track->zb.pitch = ib_chunk->kdata[idx] & 0x3FFC;
996 break; 1186 break;
1187 case 0x4104:
1188 for (i = 0; i < 16; i++) {
1189 bool enabled;
1190
1191 enabled = !!(ib_chunk->kdata[idx] & (1 << i));
1192 track->textures[i].enabled = enabled;
1193 }
1194 break;
1195 case 0x44C0:
1196 case 0x44C4:
1197 case 0x44C8:
1198 case 0x44CC:
1199 case 0x44D0:
1200 case 0x44D4:
1201 case 0x44D8:
1202 case 0x44DC:
1203 case 0x44E0:
1204 case 0x44E4:
1205 case 0x44E8:
1206 case 0x44EC:
1207 case 0x44F0:
1208 case 0x44F4:
1209 case 0x44F8:
1210 case 0x44FC:
1211 /* TX_FORMAT1_[0-15] */
1212 i = (reg - 0x44C0) >> 2;
1213 tmp = (ib_chunk->kdata[idx] >> 25) & 0x3;
1214 track->textures[i].tex_coord_type = tmp;
1215 switch ((ib_chunk->kdata[idx] & 0x1F)) {
1216 case 0:
1217 case 2:
1218 case 5:
1219 case 18:
1220 case 20:
1221 case 21:
1222 track->textures[i].cpp = 1;
1223 break;
1224 case 1:
1225 case 3:
1226 case 6:
1227 case 7:
1228 case 10:
1229 case 11:
1230 case 19:
1231 case 22:
1232 case 24:
1233 track->textures[i].cpp = 2;
1234 break;
1235 case 4:
1236 case 8:
1237 case 9:
1238 case 12:
1239 case 13:
1240 case 23:
1241 case 25:
1242 case 27:
1243 case 30:
1244 track->textures[i].cpp = 4;
1245 break;
1246 case 14:
1247 case 26:
1248 case 28:
1249 track->textures[i].cpp = 8;
1250 break;
1251 case 29:
1252 track->textures[i].cpp = 16;
1253 break;
1254 default:
1255 DRM_ERROR("Invalid texture format %u\n",
1256 (ib_chunk->kdata[idx] & 0x1F));
1257 return -EINVAL;
1258 break;
1259 }
1260 break;
1261 case 0x4400:
1262 case 0x4404:
1263 case 0x4408:
1264 case 0x440C:
1265 case 0x4410:
1266 case 0x4414:
1267 case 0x4418:
1268 case 0x441C:
1269 case 0x4420:
1270 case 0x4424:
1271 case 0x4428:
1272 case 0x442C:
1273 case 0x4430:
1274 case 0x4434:
1275 case 0x4438:
1276 case 0x443C:
1277 /* TX_FILTER0_[0-15] */
1278 i = (reg - 0x4400) >> 2;
1279 tmp = ib_chunk->kdata[idx] & 0x7;;
1280 if (tmp == 2 || tmp == 4 || tmp == 6) {
1281 track->textures[i].roundup_w = false;
1282 }
1283 tmp = (ib_chunk->kdata[idx] >> 3) & 0x7;;
1284 if (tmp == 2 || tmp == 4 || tmp == 6) {
1285 track->textures[i].roundup_h = false;
1286 }
1287 break;
1288 case 0x4500:
1289 case 0x4504:
1290 case 0x4508:
1291 case 0x450C:
1292 case 0x4510:
1293 case 0x4514:
1294 case 0x4518:
1295 case 0x451C:
1296 case 0x4520:
1297 case 0x4524:
1298 case 0x4528:
1299 case 0x452C:
1300 case 0x4530:
1301 case 0x4534:
1302 case 0x4538:
1303 case 0x453C:
1304 /* TX_FORMAT2_[0-15] */
1305 i = (reg - 0x4500) >> 2;
1306 tmp = ib_chunk->kdata[idx] & 0x3FFF;
1307 track->textures[i].pitch = tmp + 1;
1308 if (p->rdev->family >= CHIP_RV515) {
1309 tmp = ((ib_chunk->kdata[idx] >> 15) & 1) << 11;
1310 track->textures[i].width_11 = tmp;
1311 tmp = ((ib_chunk->kdata[idx] >> 16) & 1) << 11;
1312 track->textures[i].height_11 = tmp;
1313 }
1314 break;
1315 case 0x4480:
1316 case 0x4484:
1317 case 0x4488:
1318 case 0x448C:
1319 case 0x4490:
1320 case 0x4494:
1321 case 0x4498:
1322 case 0x449C:
1323 case 0x44A0:
1324 case 0x44A4:
1325 case 0x44A8:
1326 case 0x44AC:
1327 case 0x44B0:
1328 case 0x44B4:
1329 case 0x44B8:
1330 case 0x44BC:
1331 /* TX_FORMAT0_[0-15] */
1332 i = (reg - 0x4480) >> 2;
1333 tmp = ib_chunk->kdata[idx] & 0x7FF;
1334 track->textures[i].width = tmp + 1;
1335 tmp = (ib_chunk->kdata[idx] >> 11) & 0x7FF;
1336 track->textures[i].height = tmp + 1;
1337 tmp = (ib_chunk->kdata[idx] >> 26) & 0xF;
1338 track->textures[i].num_levels = tmp;
1339 tmp = ib_chunk->kdata[idx] & (1 << 31);
1340 track->textures[i].use_pitch = !!tmp;
1341 tmp = (ib_chunk->kdata[idx] >> 22) & 0xF;
1342 track->textures[i].txdepth = tmp;
1343 break;
997 default: 1344 default:
998 printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n", reg, idx); 1345 printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
1346 reg, idx);
999 return -EINVAL; 1347 return -EINVAL;
1000 } 1348 }
1001 return 0; 1349 return 0;
@@ -1015,11 +1363,12 @@ static int r300_packet3_check(struct radeon_cs_parser *p,
1015 ib = p->ib->ptr; 1363 ib = p->ib->ptr;
1016 ib_chunk = &p->chunks[p->chunk_ib_idx]; 1364 ib_chunk = &p->chunks[p->chunk_ib_idx];
1017 idx = pkt->idx + 1; 1365 idx = pkt->idx + 1;
1018 track = (struct r300_cs_track *)p->track; 1366 track = (struct r300_cs_track*)p->track;
1019 switch (pkt->opcode) { 1367 switch(pkt->opcode) {
1020 case PACKET3_3D_LOAD_VBPNTR: 1368 case PACKET3_3D_LOAD_VBPNTR:
1021 c = ib_chunk->kdata[idx++]; 1369 c = ib_chunk->kdata[idx++] & 0x1F;
1022 for (i = 0; i < (c - 1); i += 2, idx += 3) { 1370 track->num_arrays = c;
1371 for (i = 0; i < (c - 1); i+=2, idx+=3) {
1023 r = r100_cs_packet_next_reloc(p, &reloc); 1372 r = r100_cs_packet_next_reloc(p, &reloc);
1024 if (r) { 1373 if (r) {
1025 DRM_ERROR("No reloc for packet3 %d\n", 1374 DRM_ERROR("No reloc for packet3 %d\n",
@@ -1028,6 +1377,9 @@ static int r300_packet3_check(struct radeon_cs_parser *p,
1028 return r; 1377 return r;
1029 } 1378 }
1030 ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset); 1379 ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
1380 track->arrays[i + 0].robj = reloc->robj;
1381 track->arrays[i + 0].esize = ib_chunk->kdata[idx] >> 8;
1382 track->arrays[i + 0].esize &= 0x7F;
1031 r = r100_cs_packet_next_reloc(p, &reloc); 1383 r = r100_cs_packet_next_reloc(p, &reloc);
1032 if (r) { 1384 if (r) {
1033 DRM_ERROR("No reloc for packet3 %d\n", 1385 DRM_ERROR("No reloc for packet3 %d\n",
@@ -1036,6 +1388,9 @@ static int r300_packet3_check(struct radeon_cs_parser *p,
1036 return r; 1388 return r;
1037 } 1389 }
1038 ib[idx+2] = ib_chunk->kdata[idx+2] + ((u32)reloc->lobj.gpu_offset); 1390 ib[idx+2] = ib_chunk->kdata[idx+2] + ((u32)reloc->lobj.gpu_offset);
1391 track->arrays[i + 1].robj = reloc->robj;
1392 track->arrays[i + 1].esize = ib_chunk->kdata[idx] >> 24;
1393 track->arrays[i + 1].esize &= 0x7F;
1039 } 1394 }
1040 if (c & 1) { 1395 if (c & 1) {
1041 r = r100_cs_packet_next_reloc(p, &reloc); 1396 r = r100_cs_packet_next_reloc(p, &reloc);
@@ -1046,6 +1401,9 @@ static int r300_packet3_check(struct radeon_cs_parser *p,
1046 return r; 1401 return r;
1047 } 1402 }
1048 ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset); 1403 ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
1404 track->arrays[i + 0].robj = reloc->robj;
1405 track->arrays[i + 0].esize = ib_chunk->kdata[idx] >> 8;
1406 track->arrays[i + 0].esize &= 0x7F;
1049 } 1407 }
1050 break; 1408 break;
1051 case PACKET3_INDX_BUFFER: 1409 case PACKET3_INDX_BUFFER:
@@ -1056,14 +1414,65 @@ static int r300_packet3_check(struct radeon_cs_parser *p,
1056 return r; 1414 return r;
1057 } 1415 }
1058 ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset); 1416 ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
1417 r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj);
1418 if (r) {
1419 return r;
1420 }
1059 break; 1421 break;
1060 /* Draw packet */ 1422 /* Draw packet */
1061 case PACKET3_3D_DRAW_VBUF:
1062 case PACKET3_3D_DRAW_IMMD: 1423 case PACKET3_3D_DRAW_IMMD:
1063 case PACKET3_3D_DRAW_INDX: 1424 /* Number of dwords is vtx_size * (num_vertices - 1)
1064 case PACKET3_3D_DRAW_VBUF_2: 1425 * PRIM_WALK must be equal to 3 vertex data in embedded
1426 * in cmd stream */
1427 if (((ib_chunk->kdata[idx+1] >> 4) & 0x3) != 3) {
1428 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
1429 return -EINVAL;
1430 }
1431 track->vap_vf_cntl = ib_chunk->kdata[idx+1];
1432 track->immd_dwords = pkt->count - 1;
1433 r = r300_cs_track_check(p->rdev, track);
1434 if (r) {
1435 return r;
1436 }
1437 break;
1065 case PACKET3_3D_DRAW_IMMD_2: 1438 case PACKET3_3D_DRAW_IMMD_2:
1439 /* Number of dwords is vtx_size * (num_vertices - 1)
1440 * PRIM_WALK must be equal to 3 vertex data in embedded
1441 * in cmd stream */
1442 if (((ib_chunk->kdata[idx] >> 4) & 0x3) != 3) {
1443 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
1444 return -EINVAL;
1445 }
1446 track->vap_vf_cntl = ib_chunk->kdata[idx];
1447 track->immd_dwords = pkt->count;
1448 r = r300_cs_track_check(p->rdev, track);
1449 if (r) {
1450 return r;
1451 }
1452 break;
1453 case PACKET3_3D_DRAW_VBUF:
1454 track->vap_vf_cntl = ib_chunk->kdata[idx + 1];
1455 r = r300_cs_track_check(p->rdev, track);
1456 if (r) {
1457 return r;
1458 }
1459 break;
1460 case PACKET3_3D_DRAW_VBUF_2:
1461 track->vap_vf_cntl = ib_chunk->kdata[idx];
1462 r = r300_cs_track_check(p->rdev, track);
1463 if (r) {
1464 return r;
1465 }
1466 break;
1467 case PACKET3_3D_DRAW_INDX:
1468 track->vap_vf_cntl = ib_chunk->kdata[idx + 1];
1469 r = r300_cs_track_check(p->rdev, track);
1470 if (r) {
1471 return r;
1472 }
1473 break;
1066 case PACKET3_3D_DRAW_INDX_2: 1474 case PACKET3_3D_DRAW_INDX_2:
1475 track->vap_vf_cntl = ib_chunk->kdata[idx];
1067 r = r300_cs_track_check(p->rdev, track); 1476 r = r300_cs_track_check(p->rdev, track);
1068 if (r) { 1477 if (r) {
1069 return r; 1478 return r;
@@ -1095,8 +1504,8 @@ int r300_cs_parse(struct radeon_cs_parser *p)
1095 switch (pkt.type) { 1504 switch (pkt.type) {
1096 case PACKET_TYPE0: 1505 case PACKET_TYPE0:
1097 r = r100_cs_parse_packet0(p, &pkt, 1506 r = r100_cs_parse_packet0(p, &pkt,
1098 r300_auth_reg, 1507 p->rdev->config.r300.reg_safe_bm,
1099 ARRAY_SIZE(r300_auth_reg), 1508 p->rdev->config.r300.reg_safe_bm_size,
1100 &r300_packet0_check); 1509 &r300_packet0_check);
1101 break; 1510 break;
1102 case PACKET_TYPE2: 1511 case PACKET_TYPE2:
@@ -1114,3 +1523,10 @@ int r300_cs_parse(struct radeon_cs_parser *p)
1114 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw); 1523 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
1115 return 0; 1524 return 0;
1116} 1525}
1526
1527int r300_init(struct radeon_device *rdev)
1528{
1529 rdev->config.r300.reg_safe_bm = r300_reg_safe_bm;
1530 rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r300_reg_safe_bm);
1531 return 0;
1532}
diff --git a/drivers/gpu/drm/radeon/r300.h b/drivers/gpu/drm/radeon/r300.h
new file mode 100644
index 000000000000..8486b4da9d69
--- /dev/null
+++ b/drivers/gpu/drm/radeon/r300.h
@@ -0,0 +1,36 @@
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#ifndef R300_H
29#define R300_H
30
31struct r300_asic {
32 const unsigned *reg_safe_bm;
33 unsigned reg_safe_bm_size;
34};
35
36#endif
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index c3f24cc56009..d61f2fc61df5 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -51,7 +51,7 @@
51 51
52#include "radeon_mode.h" 52#include "radeon_mode.h"
53#include "radeon_reg.h" 53#include "radeon_reg.h"
54 54#include "r300.h"
55 55
56/* 56/*
57 * Modules parameters. 57 * Modules parameters.
@@ -496,6 +496,7 @@ int r100_debugfs_cp_init(struct radeon_device *rdev);
496 * ASIC specific functions. 496 * ASIC specific functions.
497 */ 497 */
498struct radeon_asic { 498struct radeon_asic {
499 int (*init)(struct radeon_device *rdev);
499 void (*errata)(struct radeon_device *rdev); 500 void (*errata)(struct radeon_device *rdev);
500 void (*vram_info)(struct radeon_device *rdev); 501 void (*vram_info)(struct radeon_device *rdev);
501 int (*gpu_reset)(struct radeon_device *rdev); 502 int (*gpu_reset)(struct radeon_device *rdev);
@@ -536,6 +537,10 @@ struct radeon_asic {
536 void (*set_clock_gating)(struct radeon_device *rdev, int enable); 537 void (*set_clock_gating)(struct radeon_device *rdev, int enable);
537}; 538};
538 539
540union radeon_asic_config {
541 struct r300_asic r300;
542};
543
539 544
540/* 545/*
541 * IOCTL. 546 * IOCTL.
@@ -573,6 +578,7 @@ struct radeon_device {
573 struct drm_device *ddev; 578 struct drm_device *ddev;
574 struct pci_dev *pdev; 579 struct pci_dev *pdev;
575 /* ASIC */ 580 /* ASIC */
581 union radeon_asic_config config;
576 enum radeon_family family; 582 enum radeon_family family;
577 unsigned long flags; 583 unsigned long flags;
578 int usec_timeout; 584 int usec_timeout;
@@ -763,6 +769,7 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v)
763/* 769/*
764 * ASICs macro. 770 * ASICs macro.
765 */ 771 */
772#define radeon_init(rdev) (rdev)->asic->init((rdev))
766#define radeon_cs_parse(p) rdev->asic->cs_parse((p)) 773#define radeon_cs_parse(p) rdev->asic->cs_parse((p))
767#define radeon_errata(rdev) (rdev)->asic->errata((rdev)) 774#define radeon_errata(rdev) (rdev)->asic->errata((rdev))
768#define radeon_vram_info(rdev) (rdev)->asic->vram_info((rdev)) 775#define radeon_vram_info(rdev) (rdev)->asic->vram_info((rdev))
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index e57d8a784e9f..e2e567395df8 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -41,6 +41,7 @@ void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable);
41/* 41/*
42 * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 42 * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280
43 */ 43 */
44int r100_init(struct radeon_device *rdev);
44uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg); 45uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg);
45void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); 46void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
46void r100_errata(struct radeon_device *rdev); 47void r100_errata(struct radeon_device *rdev);
@@ -72,6 +73,7 @@ int r100_copy_blit(struct radeon_device *rdev,
72 struct radeon_fence *fence); 73 struct radeon_fence *fence);
73 74
74static struct radeon_asic r100_asic = { 75static struct radeon_asic r100_asic = {
76 .init = &r100_init,
75 .errata = &r100_errata, 77 .errata = &r100_errata,
76 .vram_info = &r100_vram_info, 78 .vram_info = &r100_vram_info,
77 .gpu_reset = &r100_gpu_reset, 79 .gpu_reset = &r100_gpu_reset,
@@ -104,6 +106,7 @@ static struct radeon_asic r100_asic = {
104/* 106/*
105 * r300,r350,rv350,rv380 107 * r300,r350,rv350,rv380
106 */ 108 */
109int r300_init(struct radeon_device *rdev);
107void r300_errata(struct radeon_device *rdev); 110void r300_errata(struct radeon_device *rdev);
108void r300_vram_info(struct radeon_device *rdev); 111void r300_vram_info(struct radeon_device *rdev);
109int r300_gpu_reset(struct radeon_device *rdev); 112int r300_gpu_reset(struct radeon_device *rdev);
@@ -126,6 +129,7 @@ int r300_copy_dma(struct radeon_device *rdev,
126 unsigned num_pages, 129 unsigned num_pages,
127 struct radeon_fence *fence); 130 struct radeon_fence *fence);
128static struct radeon_asic r300_asic = { 131static struct radeon_asic r300_asic = {
132 .init = &r300_init,
129 .errata = &r300_errata, 133 .errata = &r300_errata,
130 .vram_info = &r300_vram_info, 134 .vram_info = &r300_vram_info,
131 .gpu_reset = &r300_gpu_reset, 135 .gpu_reset = &r300_gpu_reset,
@@ -162,6 +166,7 @@ void r420_vram_info(struct radeon_device *rdev);
162int r420_mc_init(struct radeon_device *rdev); 166int r420_mc_init(struct radeon_device *rdev);
163void r420_mc_fini(struct radeon_device *rdev); 167void r420_mc_fini(struct radeon_device *rdev);
164static struct radeon_asic r420_asic = { 168static struct radeon_asic r420_asic = {
169 .init = &r300_init,
165 .errata = &r420_errata, 170 .errata = &r420_errata,
166 .vram_info = &r420_vram_info, 171 .vram_info = &r420_vram_info,
167 .gpu_reset = &r300_gpu_reset, 172 .gpu_reset = &r300_gpu_reset,
@@ -205,6 +210,7 @@ int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
205uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg); 210uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg);
206void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); 211void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
207static struct radeon_asic rs400_asic = { 212static struct radeon_asic rs400_asic = {
213 .init = &r300_init,
208 .errata = &rs400_errata, 214 .errata = &rs400_errata,
209 .vram_info = &rs400_vram_info, 215 .vram_info = &rs400_vram_info,
210 .gpu_reset = &r300_gpu_reset, 216 .gpu_reset = &r300_gpu_reset,
@@ -249,6 +255,7 @@ int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
249uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg); 255uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg);
250void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); 256void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
251static struct radeon_asic rs600_asic = { 257static struct radeon_asic rs600_asic = {
258 .init = &r300_init,
252 .errata = &rs600_errata, 259 .errata = &rs600_errata,
253 .vram_info = &rs600_vram_info, 260 .vram_info = &rs600_vram_info,
254 .gpu_reset = &r300_gpu_reset, 261 .gpu_reset = &r300_gpu_reset,
@@ -288,6 +295,7 @@ void rs690_mc_fini(struct radeon_device *rdev);
288uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg); 295uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg);
289void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); 296void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
290static struct radeon_asic rs690_asic = { 297static struct radeon_asic rs690_asic = {
298 .init = &r300_init,
291 .errata = &rs690_errata, 299 .errata = &rs690_errata,
292 .vram_info = &rs690_vram_info, 300 .vram_info = &rs690_vram_info,
293 .gpu_reset = &r300_gpu_reset, 301 .gpu_reset = &r300_gpu_reset,
@@ -320,6 +328,7 @@ static struct radeon_asic rs690_asic = {
320/* 328/*
321 * rv515 329 * rv515
322 */ 330 */
331int rv515_init(struct radeon_device *rdev);
323void rv515_errata(struct radeon_device *rdev); 332void rv515_errata(struct radeon_device *rdev);
324void rv515_vram_info(struct radeon_device *rdev); 333void rv515_vram_info(struct radeon_device *rdev);
325int rv515_gpu_reset(struct radeon_device *rdev); 334int rv515_gpu_reset(struct radeon_device *rdev);
@@ -331,6 +340,7 @@ void rv515_ring_start(struct radeon_device *rdev);
331uint32_t rv515_pcie_rreg(struct radeon_device *rdev, uint32_t reg); 340uint32_t rv515_pcie_rreg(struct radeon_device *rdev, uint32_t reg);
332void rv515_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); 341void rv515_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
333static struct radeon_asic rv515_asic = { 342static struct radeon_asic rv515_asic = {
343 .init = &rv515_init,
334 .errata = &rv515_errata, 344 .errata = &rv515_errata,
335 .vram_info = &rv515_vram_info, 345 .vram_info = &rv515_vram_info,
336 .gpu_reset = &rv515_gpu_reset, 346 .gpu_reset = &rv515_gpu_reset,
@@ -349,7 +359,7 @@ static struct radeon_asic rv515_asic = {
349 .irq_set = &r100_irq_set, 359 .irq_set = &r100_irq_set,
350 .irq_process = &r100_irq_process, 360 .irq_process = &r100_irq_process,
351 .fence_ring_emit = &r300_fence_ring_emit, 361 .fence_ring_emit = &r300_fence_ring_emit,
352 .cs_parse = &r100_cs_parse, 362 .cs_parse = &r300_cs_parse,
353 .copy_blit = &r100_copy_blit, 363 .copy_blit = &r100_copy_blit,
354 .copy_dma = &r300_copy_dma, 364 .copy_dma = &r300_copy_dma,
355 .copy = &r100_copy_blit, 365 .copy = &r100_copy_blit,
@@ -368,6 +378,7 @@ void r520_vram_info(struct radeon_device *rdev);
368int r520_mc_init(struct radeon_device *rdev); 378int r520_mc_init(struct radeon_device *rdev);
369void r520_mc_fini(struct radeon_device *rdev); 379void r520_mc_fini(struct radeon_device *rdev);
370static struct radeon_asic r520_asic = { 380static struct radeon_asic r520_asic = {
381 .init = &rv515_init,
371 .errata = &r520_errata, 382 .errata = &r520_errata,
372 .vram_info = &r520_vram_info, 383 .vram_info = &r520_vram_info,
373 .gpu_reset = &rv515_gpu_reset, 384 .gpu_reset = &rv515_gpu_reset,
@@ -386,7 +397,7 @@ static struct radeon_asic r520_asic = {
386 .irq_set = &r100_irq_set, 397 .irq_set = &r100_irq_set,
387 .irq_process = &r100_irq_process, 398 .irq_process = &r100_irq_process,
388 .fence_ring_emit = &r300_fence_ring_emit, 399 .fence_ring_emit = &r300_fence_ring_emit,
389 .cs_parse = &r100_cs_parse, 400 .cs_parse = &r300_cs_parse,
390 .copy_blit = &r100_copy_blit, 401 .copy_blit = &r100_copy_blit,
391 .copy_dma = &r300_copy_dma, 402 .copy_dma = &r300_copy_dma,
392 .copy = &r100_copy_blit, 403 .copy = &r100_copy_blit,
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 786632d3e378..1f5a1a490984 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -835,7 +835,6 @@ radeon_atombios_get_primary_dac_info(struct radeon_encoder *encoder)
835 struct _COMPASSIONATE_DATA *dac_info; 835 struct _COMPASSIONATE_DATA *dac_info;
836 uint8_t frev, crev; 836 uint8_t frev, crev;
837 uint8_t bg, dac; 837 uint8_t bg, dac;
838 int i;
839 struct radeon_encoder_primary_dac *p_dac = NULL; 838 struct radeon_encoder_primary_dac *p_dac = NULL;
840 839
841 atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset); 840 atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset);
@@ -867,7 +866,6 @@ radeon_atombios_get_tv_dac_info(struct radeon_encoder *encoder)
867 struct _COMPASSIONATE_DATA *dac_info; 866 struct _COMPASSIONATE_DATA *dac_info;
868 uint8_t frev, crev; 867 uint8_t frev, crev;
869 uint8_t bg, dac; 868 uint8_t bg, dac;
870 int i;
871 struct radeon_encoder_tv_dac *tv_dac = NULL; 869 struct radeon_encoder_tv_dac *tv_dac = NULL;
872 870
873 atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset); 871 atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset);
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 06e8038bc4ac..afc4db280b94 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -799,6 +799,7 @@ static struct radeon_encoder_lvds *radeon_legacy_get_lvds_info_from_regs(struct
799 struct radeon_encoder_lvds *lvds = NULL; 799 struct radeon_encoder_lvds *lvds = NULL;
800 uint32_t fp_vert_stretch, fp_horz_stretch; 800 uint32_t fp_vert_stretch, fp_horz_stretch;
801 uint32_t ppll_div_sel, ppll_val; 801 uint32_t ppll_div_sel, ppll_val;
802 uint32_t lvds_ss_gen_cntl = RREG32(RADEON_LVDS_SS_GEN_CNTL);
802 803
803 lvds = kzalloc(sizeof(struct radeon_encoder_lvds), GFP_KERNEL); 804 lvds = kzalloc(sizeof(struct radeon_encoder_lvds), GFP_KERNEL);
804 805
@@ -808,6 +809,14 @@ static struct radeon_encoder_lvds *radeon_legacy_get_lvds_info_from_regs(struct
808 fp_vert_stretch = RREG32(RADEON_FP_VERT_STRETCH); 809 fp_vert_stretch = RREG32(RADEON_FP_VERT_STRETCH);
809 fp_horz_stretch = RREG32(RADEON_FP_HORZ_STRETCH); 810 fp_horz_stretch = RREG32(RADEON_FP_HORZ_STRETCH);
810 811
812 /* These should be fail-safe defaults, fingers crossed */
813 lvds->panel_pwr_delay = 200;
814 lvds->panel_vcc_delay = 2000;
815
816 lvds->lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL);
817 lvds->panel_digon_delay = (lvds_ss_gen_cntl >> RADEON_LVDS_PWRSEQ_DELAY1_SHIFT) & 0xf;
818 lvds->panel_blon_delay = (lvds_ss_gen_cntl >> RADEON_LVDS_PWRSEQ_DELAY2_SHIFT) & 0xf;
819
811 if (fp_vert_stretch & RADEON_VERT_STRETCH_ENABLE) 820 if (fp_vert_stretch & RADEON_VERT_STRETCH_ENABLE)
812 lvds->native_mode.panel_yres = 821 lvds->native_mode.panel_yres =
813 ((fp_vert_stretch & RADEON_VERT_PANEL_SIZE) >> 822 ((fp_vert_stretch & RADEON_VERT_PANEL_SIZE) >>
diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c
index 89c4c44169f7..d8356827ef17 100644
--- a/drivers/gpu/drm/radeon/radeon_cp.c
+++ b/drivers/gpu/drm/radeon/radeon_cp.c
@@ -2045,11 +2045,10 @@ int radeon_driver_load(struct drm_device *dev, unsigned long flags)
2045 drm_radeon_private_t *dev_priv; 2045 drm_radeon_private_t *dev_priv;
2046 int ret = 0; 2046 int ret = 0;
2047 2047
2048 dev_priv = drm_alloc(sizeof(drm_radeon_private_t), DRM_MEM_DRIVER); 2048 dev_priv = kzalloc(sizeof(drm_radeon_private_t), GFP_KERNEL);
2049 if (dev_priv == NULL) 2049 if (dev_priv == NULL)
2050 return -ENOMEM; 2050 return -ENOMEM;
2051 2051
2052 memset(dev_priv, 0, sizeof(drm_radeon_private_t));
2053 dev->dev_private = (void *)dev_priv; 2052 dev->dev_private = (void *)dev_priv;
2054 dev_priv->flags = flags; 2053 dev_priv->flags = flags;
2055 2054
@@ -2103,7 +2102,7 @@ int radeon_master_create(struct drm_device *dev, struct drm_master *master)
2103 unsigned long sareapage; 2102 unsigned long sareapage;
2104 int ret; 2103 int ret;
2105 2104
2106 master_priv = drm_calloc(1, sizeof(*master_priv), DRM_MEM_DRIVER); 2105 master_priv = kzalloc(sizeof(*master_priv), GFP_KERNEL);
2107 if (!master_priv) 2106 if (!master_priv)
2108 return -ENOMEM; 2107 return -ENOMEM;
2109 2108
@@ -2137,7 +2136,7 @@ void radeon_master_destroy(struct drm_device *dev, struct drm_master *master)
2137 if (master_priv->sarea) 2136 if (master_priv->sarea)
2138 drm_rmmap_locked(dev, master_priv->sarea); 2137 drm_rmmap_locked(dev, master_priv->sarea);
2139 2138
2140 drm_free(master_priv, sizeof(*master_priv), DRM_MEM_DRIVER); 2139 kfree(master_priv);
2141 2140
2142 master->driver_priv = NULL; 2141 master->driver_priv = NULL;
2143} 2142}
@@ -2171,7 +2170,7 @@ int radeon_driver_unload(struct drm_device *dev)
2171 2170
2172 drm_rmmap(dev, dev_priv->mmio); 2171 drm_rmmap(dev, dev_priv->mmio);
2173 2172
2174 drm_free(dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER); 2173 kfree(dev_priv);
2175 2174
2176 dev->dev_private = NULL; 2175 dev->dev_private = NULL;
2177 return 0; 2176 return 0;
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 5fd2b639bf66..f30aa7274a54 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -470,6 +470,10 @@ int radeon_device_init(struct radeon_device *rdev,
470 if (r) { 470 if (r) {
471 return r; 471 return r;
472 } 472 }
473 r = radeon_init(rdev);
474 if (r) {
475 return r;
476 }
473 477
474 /* Report DMA addressing limitation */ 478 /* Report DMA addressing limitation */
475 r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(32)); 479 r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(32));
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 5452bb9d925e..3efcf1a526be 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -351,7 +351,7 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
351 radeon_i2c_do_lock(radeon_connector, 0); 351 radeon_i2c_do_lock(radeon_connector, 0);
352 if (edid) { 352 if (edid) {
353 /* update digital bits here */ 353 /* update digital bits here */
354 if (edid->digital) 354 if (edid->input & DRM_EDID_INPUT_DIGITAL)
355 radeon_connector->use_digital = 1; 355 radeon_connector->use_digital = 1;
356 else 356 else
357 radeon_connector->use_digital = 0; 357 radeon_connector->use_digital = 0;
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index c815a2cbf7b3..09c9fb9f6210 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -313,7 +313,7 @@ static int __init radeon_init(void)
313{ 313{
314 driver = &driver_old; 314 driver = &driver_old;
315 driver->num_ioctls = radeon_max_ioctl; 315 driver->num_ioctls = radeon_max_ioctl;
316#if defined(CONFIG_DRM_RADEON_KMS) && defined(CONFIG_X86) 316#if defined(CONFIG_DRM_RADEON_KMS)
317 /* if enabled by default */ 317 /* if enabled by default */
318 if (radeon_modeset == -1) { 318 if (radeon_modeset == -1) {
319 DRM_INFO("radeon default to kernel modesetting.\n"); 319 DRM_INFO("radeon default to kernel modesetting.\n");
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
index 71465ed2688a..dd438d32e5c0 100644
--- a/drivers/gpu/drm/radeon/radeon_i2c.c
+++ b/drivers/gpu/drm/radeon/radeon_i2c.c
@@ -162,7 +162,7 @@ struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
162 struct radeon_i2c_chan *i2c; 162 struct radeon_i2c_chan *i2c;
163 int ret; 163 int ret;
164 164
165 i2c = drm_calloc(1, sizeof(struct radeon_i2c_chan), DRM_MEM_DRIVER); 165 i2c = kzalloc(sizeof(struct radeon_i2c_chan), GFP_KERNEL);
166 if (i2c == NULL) 166 if (i2c == NULL)
167 return NULL; 167 return NULL;
168 168
@@ -189,7 +189,7 @@ struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
189 189
190 return i2c; 190 return i2c;
191out_free: 191out_free:
192 drm_free(i2c, sizeof(struct radeon_i2c_chan), DRM_MEM_DRIVER); 192 kfree(i2c);
193 return NULL; 193 return NULL;
194 194
195} 195}
@@ -200,7 +200,7 @@ void radeon_i2c_destroy(struct radeon_i2c_chan *i2c)
200 return; 200 return;
201 201
202 i2c_del_adapter(&i2c->adapter); 202 i2c_del_adapter(&i2c->adapter);
203 drm_free(i2c, sizeof(struct radeon_i2c_chan), DRM_MEM_DRIVER); 203 kfree(i2c);
204} 204}
205 205
206struct drm_encoder *radeon_best_encoder(struct drm_connector *connector) 206struct drm_encoder *radeon_best_encoder(struct drm_connector *connector)
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 64f42b19cbfa..4612a7c146d1 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -169,7 +169,7 @@ int radeon_master_create_kms(struct drm_device *dev, struct drm_master *master)
169 unsigned long sareapage; 169 unsigned long sareapage;
170 int ret; 170 int ret;
171 171
172 master_priv = drm_calloc(1, sizeof(*master_priv), DRM_MEM_DRIVER); 172 master_priv = kzalloc(sizeof(*master_priv), GFP_KERNEL);
173 if (master_priv == NULL) { 173 if (master_priv == NULL) {
174 return -ENOMEM; 174 return -ENOMEM;
175 } 175 }
@@ -199,7 +199,7 @@ void radeon_master_destroy_kms(struct drm_device *dev,
199 if (master_priv->sarea) { 199 if (master_priv->sarea) {
200 drm_rmmap_locked(dev, master_priv->sarea); 200 drm_rmmap_locked(dev, master_priv->sarea);
201 } 201 }
202 drm_free(master_priv, sizeof(*master_priv), DRM_MEM_DRIVER); 202 kfree(master_priv);
203 master->driver_priv = NULL; 203 master->driver_priv = NULL;
204} 204}
205 205
diff --git a/drivers/gpu/drm/radeon/radeon_mem.c b/drivers/gpu/drm/radeon/radeon_mem.c
index 4af5286a36fb..ed95155c4b1d 100644
--- a/drivers/gpu/drm/radeon/radeon_mem.c
+++ b/drivers/gpu/drm/radeon/radeon_mem.c
@@ -43,8 +43,8 @@ static struct mem_block *split_block(struct mem_block *p, int start, int size,
43{ 43{
44 /* Maybe cut off the start of an existing block */ 44 /* Maybe cut off the start of an existing block */
45 if (start > p->start) { 45 if (start > p->start) {
46 struct mem_block *newblock = 46 struct mem_block *newblock = kmalloc(sizeof(*newblock),
47 drm_alloc(sizeof(*newblock), DRM_MEM_BUFS); 47 GFP_KERNEL);
48 if (!newblock) 48 if (!newblock)
49 goto out; 49 goto out;
50 newblock->start = start; 50 newblock->start = start;
@@ -60,8 +60,8 @@ static struct mem_block *split_block(struct mem_block *p, int start, int size,
60 60
61 /* Maybe cut off the end of an existing block */ 61 /* Maybe cut off the end of an existing block */
62 if (size < p->size) { 62 if (size < p->size) {
63 struct mem_block *newblock = 63 struct mem_block *newblock = kmalloc(sizeof(*newblock),
64 drm_alloc(sizeof(*newblock), DRM_MEM_BUFS); 64 GFP_KERNEL);
65 if (!newblock) 65 if (!newblock)
66 goto out; 66 goto out;
67 newblock->start = start + size; 67 newblock->start = start + size;
@@ -118,7 +118,7 @@ static void free_block(struct mem_block *p)
118 p->size += q->size; 118 p->size += q->size;
119 p->next = q->next; 119 p->next = q->next;
120 p->next->prev = p; 120 p->next->prev = p;
121 drm_free(q, sizeof(*q), DRM_MEM_BUFS); 121 kfree(q);
122 } 122 }
123 123
124 if (p->prev->file_priv == NULL) { 124 if (p->prev->file_priv == NULL) {
@@ -126,7 +126,7 @@ static void free_block(struct mem_block *p)
126 q->size += p->size; 126 q->size += p->size;
127 q->next = p->next; 127 q->next = p->next;
128 q->next->prev = q; 128 q->next->prev = q;
129 drm_free(p, sizeof(*q), DRM_MEM_BUFS); 129 kfree(p);
130 } 130 }
131} 131}
132 132
@@ -134,14 +134,14 @@ static void free_block(struct mem_block *p)
134 */ 134 */
135static int init_heap(struct mem_block **heap, int start, int size) 135static int init_heap(struct mem_block **heap, int start, int size)
136{ 136{
137 struct mem_block *blocks = drm_alloc(sizeof(*blocks), DRM_MEM_BUFS); 137 struct mem_block *blocks = kmalloc(sizeof(*blocks), GFP_KERNEL);
138 138
139 if (!blocks) 139 if (!blocks)
140 return -ENOMEM; 140 return -ENOMEM;
141 141
142 *heap = drm_alloc(sizeof(**heap), DRM_MEM_BUFS); 142 *heap = kmalloc(sizeof(**heap), GFP_KERNEL);
143 if (!*heap) { 143 if (!*heap) {
144 drm_free(blocks, sizeof(*blocks), DRM_MEM_BUFS); 144 kfree(blocks);
145 return -ENOMEM; 145 return -ENOMEM;
146 } 146 }
147 147
@@ -179,7 +179,7 @@ void radeon_mem_release(struct drm_file *file_priv, struct mem_block *heap)
179 p->size += q->size; 179 p->size += q->size;
180 p->next = q->next; 180 p->next = q->next;
181 p->next->prev = p; 181 p->next->prev = p;
182 drm_free(q, sizeof(*q), DRM_MEM_DRIVER); 182 kfree(q);
183 } 183 }
184 } 184 }
185} 185}
@@ -196,10 +196,10 @@ void radeon_mem_takedown(struct mem_block **heap)
196 for (p = (*heap)->next; p != *heap;) { 196 for (p = (*heap)->next; p != *heap;) {
197 struct mem_block *q = p; 197 struct mem_block *q = p;
198 p = p->next; 198 p = p->next;
199 drm_free(q, sizeof(*q), DRM_MEM_DRIVER); 199 kfree(q);
200 } 200 }
201 201
202 drm_free(*heap, sizeof(**heap), DRM_MEM_DRIVER); 202 kfree(*heap);
203 *heap = NULL; 203 *heap = NULL;
204} 204}
205 205
diff --git a/drivers/gpu/drm/radeon/radeon_reg.h b/drivers/gpu/drm/radeon/radeon_reg.h
index 6d3d90406a24..e1b618574461 100644
--- a/drivers/gpu/drm/radeon/radeon_reg.h
+++ b/drivers/gpu/drm/radeon/radeon_reg.h
@@ -3184,6 +3184,7 @@
3184# define RADEON_RB_BUFSZ_MASK (0x3f << 0) 3184# define RADEON_RB_BUFSZ_MASK (0x3f << 0)
3185# define RADEON_RB_BLKSZ_SHIFT 8 3185# define RADEON_RB_BLKSZ_SHIFT 8
3186# define RADEON_RB_BLKSZ_MASK (0x3f << 8) 3186# define RADEON_RB_BLKSZ_MASK (0x3f << 8)
3187# define RADEON_BUF_SWAP_32BIT (1 << 17)
3187# define RADEON_MAX_FETCH_SHIFT 18 3188# define RADEON_MAX_FETCH_SHIFT 18
3188# define RADEON_MAX_FETCH_MASK (0x3 << 18) 3189# define RADEON_MAX_FETCH_MASK (0x3 << 18)
3189# define RADEON_RB_NO_UPDATE (1 << 27) 3190# define RADEON_RB_NO_UPDATE (1 << 27)
diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
index fa728ec6ed34..46645f3e0328 100644
--- a/drivers/gpu/drm/radeon/radeon_state.c
+++ b/drivers/gpu/drm/radeon/radeon_state.c
@@ -2866,12 +2866,12 @@ static int radeon_cp_cmdbuf(struct drm_device *dev, void *data, struct drm_file
2866 */ 2866 */
2867 orig_bufsz = cmdbuf->bufsz; 2867 orig_bufsz = cmdbuf->bufsz;
2868 if (orig_bufsz != 0) { 2868 if (orig_bufsz != 0) {
2869 kbuf = drm_alloc(cmdbuf->bufsz, DRM_MEM_DRIVER); 2869 kbuf = kmalloc(cmdbuf->bufsz, GFP_KERNEL);
2870 if (kbuf == NULL) 2870 if (kbuf == NULL)
2871 return -ENOMEM; 2871 return -ENOMEM;
2872 if (DRM_COPY_FROM_USER(kbuf, (void __user *)cmdbuf->buf, 2872 if (DRM_COPY_FROM_USER(kbuf, (void __user *)cmdbuf->buf,
2873 cmdbuf->bufsz)) { 2873 cmdbuf->bufsz)) {
2874 drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER); 2874 kfree(kbuf);
2875 return -EFAULT; 2875 return -EFAULT;
2876 } 2876 }
2877 cmdbuf->buf = kbuf; 2877 cmdbuf->buf = kbuf;
@@ -2884,7 +2884,7 @@ static int radeon_cp_cmdbuf(struct drm_device *dev, void *data, struct drm_file
2884 temp = r300_do_cp_cmdbuf(dev, file_priv, cmdbuf); 2884 temp = r300_do_cp_cmdbuf(dev, file_priv, cmdbuf);
2885 2885
2886 if (orig_bufsz != 0) 2886 if (orig_bufsz != 0)
2887 drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER); 2887 kfree(kbuf);
2888 2888
2889 return temp; 2889 return temp;
2890 } 2890 }
@@ -2991,7 +2991,7 @@ static int radeon_cp_cmdbuf(struct drm_device *dev, void *data, struct drm_file
2991 } 2991 }
2992 2992
2993 if (orig_bufsz != 0) 2993 if (orig_bufsz != 0)
2994 drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER); 2994 kfree(kbuf);
2995 2995
2996 DRM_DEBUG("DONE\n"); 2996 DRM_DEBUG("DONE\n");
2997 COMMIT_RING(); 2997 COMMIT_RING();
@@ -2999,7 +2999,7 @@ static int radeon_cp_cmdbuf(struct drm_device *dev, void *data, struct drm_file
2999 2999
3000 err: 3000 err:
3001 if (orig_bufsz != 0) 3001 if (orig_bufsz != 0)
3002 drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER); 3002 kfree(kbuf);
3003 return -EINVAL; 3003 return -EINVAL;
3004} 3004}
3005 3005
@@ -3175,9 +3175,7 @@ int radeon_driver_open(struct drm_device *dev, struct drm_file *file_priv)
3175 struct drm_radeon_driver_file_fields *radeon_priv; 3175 struct drm_radeon_driver_file_fields *radeon_priv;
3176 3176
3177 DRM_DEBUG("\n"); 3177 DRM_DEBUG("\n");
3178 radeon_priv = 3178 radeon_priv = kmalloc(sizeof(*radeon_priv), GFP_KERNEL);
3179 (struct drm_radeon_driver_file_fields *)
3180 drm_alloc(sizeof(*radeon_priv), DRM_MEM_FILES);
3181 3179
3182 if (!radeon_priv) 3180 if (!radeon_priv)
3183 return -ENOMEM; 3181 return -ENOMEM;
@@ -3196,7 +3194,7 @@ void radeon_driver_postclose(struct drm_device *dev, struct drm_file *file_priv)
3196 struct drm_radeon_driver_file_fields *radeon_priv = 3194 struct drm_radeon_driver_file_fields *radeon_priv =
3197 file_priv->driver_priv; 3195 file_priv->driver_priv;
3198 3196
3199 drm_free(radeon_priv, sizeof(*radeon_priv), DRM_MEM_FILES); 3197 kfree(radeon_priv);
3200} 3198}
3201 3199
3202struct drm_ioctl_desc radeon_ioctls[] = { 3200struct drm_ioctl_desc radeon_ioctls[] = {
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 4c087c1510d7..1227a97f5169 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -133,6 +133,7 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
133 man->gpu_offset = 0; 133 man->gpu_offset = 0;
134 man->available_caching = TTM_PL_MASK_CACHING; 134 man->available_caching = TTM_PL_MASK_CACHING;
135 man->default_caching = TTM_PL_FLAG_CACHED; 135 man->default_caching = TTM_PL_FLAG_CACHED;
136 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA;
136#if __OS_HAS_AGP 137#if __OS_HAS_AGP
137 if (rdev->flags & RADEON_IS_AGP) { 138 if (rdev->flags & RADEON_IS_AGP) {
138 if (!(drm_core_has_AGP(rdev->ddev) && rdev->ddev->agp)) { 139 if (!(drm_core_has_AGP(rdev->ddev) && rdev->ddev->agp)) {
@@ -143,8 +144,9 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
143 man->io_offset = rdev->mc.agp_base; 144 man->io_offset = rdev->mc.agp_base;
144 man->io_size = rdev->mc.gtt_size; 145 man->io_size = rdev->mc.gtt_size;
145 man->io_addr = NULL; 146 man->io_addr = NULL;
146 man->flags = TTM_MEMTYPE_FLAG_NEEDS_IOREMAP | 147 if (!rdev->ddev->agp->cant_use_aperture)
147 TTM_MEMTYPE_FLAG_MAPPABLE; 148 man->flags = TTM_MEMTYPE_FLAG_NEEDS_IOREMAP |
149 TTM_MEMTYPE_FLAG_MAPPABLE;
148 man->available_caching = TTM_PL_FLAG_UNCACHED | 150 man->available_caching = TTM_PL_FLAG_UNCACHED |
149 TTM_PL_FLAG_WC; 151 TTM_PL_FLAG_WC;
150 man->default_caching = TTM_PL_FLAG_WC; 152 man->default_caching = TTM_PL_FLAG_WC;
@@ -154,8 +156,6 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
154 man->io_offset = 0; 156 man->io_offset = 0;
155 man->io_size = 0; 157 man->io_size = 0;
156 man->io_addr = NULL; 158 man->io_addr = NULL;
157 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
158 TTM_MEMTYPE_FLAG_CMA;
159 } 159 }
160 break; 160 break;
161 case TTM_PL_VRAM: 161 case TTM_PL_VRAM:
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 7eab95db58ac..ffea37b1b3e2 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -225,6 +225,8 @@ void rv515_ring_start(struct radeon_device *rdev)
225 radeon_ring_write(rdev, 225 radeon_ring_write(rdev,
226 R300_GEOMETRY_ROUND_NEAREST | 226 R300_GEOMETRY_ROUND_NEAREST |
227 R300_COLOR_ROUND_NEAREST); 227 R300_COLOR_ROUND_NEAREST);
228 radeon_ring_write(rdev, PACKET0(0x20C8, 0));
229 radeon_ring_write(rdev, 0);
228 radeon_ring_unlock_commit(rdev); 230 radeon_ring_unlock_commit(rdev);
229} 231}
230 232
@@ -502,3 +504,59 @@ int rv515_debugfs_ga_info_init(struct radeon_device *rdev)
502 return 0; 504 return 0;
503#endif 505#endif
504} 506}
507
508
509/*
510 * Asic initialization
511 */
512static const unsigned r500_reg_safe_bm[159] = {
513 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
514 0xFFFFFFBF, 0xFFFFFFFF, 0xFFFFFFBF, 0xFFFFFFFF,
515 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
516 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
517 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
518 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
519 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
520 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
521 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
522 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
523 0x17FF1FFF, 0xFFFFFFFC, 0xFFFFFFFF, 0xFF30FFBF,
524 0xFFFFFFF8, 0xC3E6FFFF, 0xFFFFF6DF, 0xFFFFFFFF,
525 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
526 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
527 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFF03F,
528 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
529 0xFFFFFFFF, 0xFFFFEFCE, 0xF00EBFFF, 0x007C0000,
530 0xF0000038, 0xFF000009, 0xFFFFFFFF, 0xFFFFFFFF,
531 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF,
532 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
533 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
534 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
535 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
536 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
537 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
538 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
539 0xFFFFF7FF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
540 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
541 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
542 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
543 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
544 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
545 0x1FFFFC78, 0xFFFFE000, 0xFFFFFFFE, 0xFFFFFFFF,
546 0x38CF8F50, 0xFFF88082, 0xFF0000FC, 0xFAE009FF,
547 0x0000FFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000,
548 0xFFFF8CFC, 0xFFFFC1FF, 0xFFFFFFFF, 0xFFFFFFFF,
549 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
550 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFF80FFFF,
551 0x00000000, 0x00000000, 0x00000000, 0x00000000,
552 0x0003FC01, 0x3FFFFCF8, 0xFE800B19,
553};
554
555
556
557int rv515_init(struct radeon_device *rdev)
558{
559 rdev->config.r300.reg_safe_bm = r500_reg_safe_bm;
560 rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r500_reg_safe_bm);
561 return 0;
562}
diff --git a/drivers/gpu/drm/savage/savage_bci.c b/drivers/gpu/drm/savage/savage_bci.c
index 456cd040f31a..bff6fc2524c8 100644
--- a/drivers/gpu/drm/savage/savage_bci.c
+++ b/drivers/gpu/drm/savage/savage_bci.c
@@ -298,8 +298,8 @@ static int savage_dma_init(drm_savage_private_t * dev_priv)
298 298
299 dev_priv->nr_dma_pages = dev_priv->cmd_dma->size / 299 dev_priv->nr_dma_pages = dev_priv->cmd_dma->size /
300 (SAVAGE_DMA_PAGE_SIZE * 4); 300 (SAVAGE_DMA_PAGE_SIZE * 4);
301 dev_priv->dma_pages = drm_alloc(sizeof(drm_savage_dma_page_t) * 301 dev_priv->dma_pages = kmalloc(sizeof(drm_savage_dma_page_t) *
302 dev_priv->nr_dma_pages, DRM_MEM_DRIVER); 302 dev_priv->nr_dma_pages, GFP_KERNEL);
303 if (dev_priv->dma_pages == NULL) 303 if (dev_priv->dma_pages == NULL)
304 return -ENOMEM; 304 return -ENOMEM;
305 305
@@ -539,7 +539,7 @@ int savage_driver_load(struct drm_device *dev, unsigned long chipset)
539{ 539{
540 drm_savage_private_t *dev_priv; 540 drm_savage_private_t *dev_priv;
541 541
542 dev_priv = drm_alloc(sizeof(drm_savage_private_t), DRM_MEM_DRIVER); 542 dev_priv = kmalloc(sizeof(drm_savage_private_t), GFP_KERNEL);
543 if (dev_priv == NULL) 543 if (dev_priv == NULL)
544 return -ENOMEM; 544 return -ENOMEM;
545 545
@@ -671,7 +671,7 @@ int savage_driver_unload(struct drm_device *dev)
671{ 671{
672 drm_savage_private_t *dev_priv = dev->dev_private; 672 drm_savage_private_t *dev_priv = dev->dev_private;
673 673
674 drm_free(dev_priv, sizeof(drm_savage_private_t), DRM_MEM_DRIVER); 674 kfree(dev_priv);
675 675
676 return 0; 676 return 0;
677} 677}
@@ -804,8 +804,8 @@ static int savage_do_init_bci(struct drm_device * dev, drm_savage_init_t * init)
804 dev_priv->fake_dma.offset = 0; 804 dev_priv->fake_dma.offset = 0;
805 dev_priv->fake_dma.size = SAVAGE_FAKE_DMA_SIZE; 805 dev_priv->fake_dma.size = SAVAGE_FAKE_DMA_SIZE;
806 dev_priv->fake_dma.type = _DRM_SHM; 806 dev_priv->fake_dma.type = _DRM_SHM;
807 dev_priv->fake_dma.handle = drm_alloc(SAVAGE_FAKE_DMA_SIZE, 807 dev_priv->fake_dma.handle = kmalloc(SAVAGE_FAKE_DMA_SIZE,
808 DRM_MEM_DRIVER); 808 GFP_KERNEL);
809 if (!dev_priv->fake_dma.handle) { 809 if (!dev_priv->fake_dma.handle) {
810 DRM_ERROR("could not allocate faked DMA buffer!\n"); 810 DRM_ERROR("could not allocate faked DMA buffer!\n");
811 savage_do_cleanup_bci(dev); 811 savage_do_cleanup_bci(dev);
@@ -903,9 +903,7 @@ static int savage_do_cleanup_bci(struct drm_device * dev)
903 drm_savage_private_t *dev_priv = dev->dev_private; 903 drm_savage_private_t *dev_priv = dev->dev_private;
904 904
905 if (dev_priv->cmd_dma == &dev_priv->fake_dma) { 905 if (dev_priv->cmd_dma == &dev_priv->fake_dma) {
906 if (dev_priv->fake_dma.handle) 906 kfree(dev_priv->fake_dma.handle);
907 drm_free(dev_priv->fake_dma.handle,
908 SAVAGE_FAKE_DMA_SIZE, DRM_MEM_DRIVER);
909 } else if (dev_priv->cmd_dma && dev_priv->cmd_dma->handle && 907 } else if (dev_priv->cmd_dma && dev_priv->cmd_dma->handle &&
910 dev_priv->cmd_dma->type == _DRM_AGP && 908 dev_priv->cmd_dma->type == _DRM_AGP &&
911 dev_priv->dma_type == SAVAGE_DMA_AGP) 909 dev_priv->dma_type == SAVAGE_DMA_AGP)
@@ -920,10 +918,7 @@ static int savage_do_cleanup_bci(struct drm_device * dev)
920 dev->agp_buffer_map = NULL; 918 dev->agp_buffer_map = NULL;
921 } 919 }
922 920
923 if (dev_priv->dma_pages) 921 kfree(dev_priv->dma_pages);
924 drm_free(dev_priv->dma_pages,
925 sizeof(drm_savage_dma_page_t) * dev_priv->nr_dma_pages,
926 DRM_MEM_DRIVER);
927 922
928 return 0; 923 return 0;
929} 924}
diff --git a/drivers/gpu/drm/savage/savage_state.c b/drivers/gpu/drm/savage/savage_state.c
index 5f6238fdf1fa..8a3e31599c94 100644
--- a/drivers/gpu/drm/savage/savage_state.c
+++ b/drivers/gpu/drm/savage/savage_state.c
@@ -988,20 +988,20 @@ int savage_bci_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_
988 * for locking on FreeBSD. 988 * for locking on FreeBSD.
989 */ 989 */
990 if (cmdbuf->size) { 990 if (cmdbuf->size) {
991 kcmd_addr = drm_alloc(cmdbuf->size * 8, DRM_MEM_DRIVER); 991 kcmd_addr = kmalloc(cmdbuf->size * 8, GFP_KERNEL);
992 if (kcmd_addr == NULL) 992 if (kcmd_addr == NULL)
993 return -ENOMEM; 993 return -ENOMEM;
994 994
995 if (DRM_COPY_FROM_USER(kcmd_addr, cmdbuf->cmd_addr, 995 if (DRM_COPY_FROM_USER(kcmd_addr, cmdbuf->cmd_addr,
996 cmdbuf->size * 8)) 996 cmdbuf->size * 8))
997 { 997 {
998 drm_free(kcmd_addr, cmdbuf->size * 8, DRM_MEM_DRIVER); 998 kfree(kcmd_addr);
999 return -EFAULT; 999 return -EFAULT;
1000 } 1000 }
1001 cmdbuf->cmd_addr = kcmd_addr; 1001 cmdbuf->cmd_addr = kcmd_addr;
1002 } 1002 }
1003 if (cmdbuf->vb_size) { 1003 if (cmdbuf->vb_size) {
1004 kvb_addr = drm_alloc(cmdbuf->vb_size, DRM_MEM_DRIVER); 1004 kvb_addr = kmalloc(cmdbuf->vb_size, GFP_KERNEL);
1005 if (kvb_addr == NULL) { 1005 if (kvb_addr == NULL) {
1006 ret = -ENOMEM; 1006 ret = -ENOMEM;
1007 goto done; 1007 goto done;
@@ -1015,8 +1015,8 @@ int savage_bci_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_
1015 cmdbuf->vb_addr = kvb_addr; 1015 cmdbuf->vb_addr = kvb_addr;
1016 } 1016 }
1017 if (cmdbuf->nbox) { 1017 if (cmdbuf->nbox) {
1018 kbox_addr = drm_alloc(cmdbuf->nbox * sizeof(struct drm_clip_rect), 1018 kbox_addr = kmalloc(cmdbuf->nbox * sizeof(struct drm_clip_rect),
1019 DRM_MEM_DRIVER); 1019 GFP_KERNEL);
1020 if (kbox_addr == NULL) { 1020 if (kbox_addr == NULL) {
1021 ret = -ENOMEM; 1021 ret = -ENOMEM;
1022 goto done; 1022 goto done;
@@ -1154,10 +1154,9 @@ int savage_bci_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_
1154 1154
1155done: 1155done:
1156 /* If we didn't need to allocate them, these'll be NULL */ 1156 /* If we didn't need to allocate them, these'll be NULL */
1157 drm_free(kcmd_addr, cmdbuf->size * 8, DRM_MEM_DRIVER); 1157 kfree(kcmd_addr);
1158 drm_free(kvb_addr, cmdbuf->vb_size, DRM_MEM_DRIVER); 1158 kfree(kvb_addr);
1159 drm_free(kbox_addr, cmdbuf->nbox * sizeof(struct drm_clip_rect), 1159 kfree(kbox_addr);
1160 DRM_MEM_DRIVER);
1161 1160
1162 return ret; 1161 return ret;
1163} 1162}
diff --git a/drivers/gpu/drm/sis/sis_drv.c b/drivers/gpu/drm/sis/sis_drv.c
index 7dacc64e9b56..e725cc0b1155 100644
--- a/drivers/gpu/drm/sis/sis_drv.c
+++ b/drivers/gpu/drm/sis/sis_drv.c
@@ -40,7 +40,7 @@ static int sis_driver_load(struct drm_device *dev, unsigned long chipset)
40 drm_sis_private_t *dev_priv; 40 drm_sis_private_t *dev_priv;
41 int ret; 41 int ret;
42 42
43 dev_priv = drm_calloc(1, sizeof(drm_sis_private_t), DRM_MEM_DRIVER); 43 dev_priv = kzalloc(sizeof(drm_sis_private_t), GFP_KERNEL);
44 if (dev_priv == NULL) 44 if (dev_priv == NULL)
45 return -ENOMEM; 45 return -ENOMEM;
46 46
@@ -48,7 +48,7 @@ static int sis_driver_load(struct drm_device *dev, unsigned long chipset)
48 dev_priv->chipset = chipset; 48 dev_priv->chipset = chipset;
49 ret = drm_sman_init(&dev_priv->sman, 2, 12, 8); 49 ret = drm_sman_init(&dev_priv->sman, 2, 12, 8);
50 if (ret) { 50 if (ret) {
51 drm_free(dev_priv, sizeof(dev_priv), DRM_MEM_DRIVER); 51 kfree(dev_priv);
52 } 52 }
53 53
54 return ret; 54 return ret;
@@ -59,7 +59,7 @@ static int sis_driver_unload(struct drm_device *dev)
59 drm_sis_private_t *dev_priv = dev->dev_private; 59 drm_sis_private_t *dev_priv = dev->dev_private;
60 60
61 drm_sman_takedown(&dev_priv->sman); 61 drm_sman_takedown(&dev_priv->sman);
62 drm_free(dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER); 62 kfree(dev_priv);
63 63
64 return 0; 64 return 0;
65} 65}
diff --git a/drivers/gpu/drm/ttm/ttm_agp_backend.c b/drivers/gpu/drm/ttm/ttm_agp_backend.c
index e8f6d2229d8c..4648ed2f0143 100644
--- a/drivers/gpu/drm/ttm/ttm_agp_backend.c
+++ b/drivers/gpu/drm/ttm/ttm_agp_backend.c
@@ -63,8 +63,7 @@ static int ttm_agp_populate(struct ttm_backend *backend,
63 if (!page) 63 if (!page)
64 page = dummy_read_page; 64 page = dummy_read_page;
65 65
66 mem->memory[mem->page_count++] = 66 mem->pages[mem->page_count++] = page;
67 phys_to_gart(page_to_phys(page));
68 } 67 }
69 agp_be->mem = mem; 68 agp_be->mem = mem;
70 return 0; 69 return 0;
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 1587aeca7bea..c1c407f7cca3 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -282,7 +282,7 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
282 282
283 ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement); 283 ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement);
284 if (ret) 284 if (ret)
285 return ret; 285 goto out_err;
286 286
287 if (mem->mem_type != TTM_PL_SYSTEM) { 287 if (mem->mem_type != TTM_PL_SYSTEM) {
288 ret = ttm_tt_bind(bo->ttm, mem); 288 ret = ttm_tt_bind(bo->ttm, mem);
@@ -527,9 +527,12 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, unsigned mem_type,
527 ret = ttm_bo_wait(bo, false, interruptible, no_wait); 527 ret = ttm_bo_wait(bo, false, interruptible, no_wait);
528 spin_unlock(&bo->lock); 528 spin_unlock(&bo->lock);
529 529
530 if (ret && ret != -ERESTART) { 530 if (unlikely(ret != 0)) {
531 printk(KERN_ERR TTM_PFX "Failed to expire sync object before " 531 if (ret != -ERESTART) {
532 "buffer eviction.\n"); 532 printk(KERN_ERR TTM_PFX
533 "Failed to expire sync object before "
534 "buffer eviction.\n");
535 }
533 goto out; 536 goto out;
534 } 537 }
535 538
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index c27ab3a877ad..0331fa74cd3f 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -68,7 +68,7 @@ static void ttm_tt_cache_flush_clflush(struct page *pages[],
68 ttm_tt_clflush_page(*pages++); 68 ttm_tt_clflush_page(*pages++);
69 mb(); 69 mb();
70} 70}
71#else 71#elif !defined(__powerpc__)
72static void ttm_tt_ipi_handler(void *null) 72static void ttm_tt_ipi_handler(void *null)
73{ 73{
74 ; 74 ;
@@ -83,6 +83,15 @@ void ttm_tt_cache_flush(struct page *pages[], unsigned long num_pages)
83 ttm_tt_cache_flush_clflush(pages, num_pages); 83 ttm_tt_cache_flush_clflush(pages, num_pages);
84 return; 84 return;
85 } 85 }
86#elif defined(__powerpc__)
87 unsigned long i;
88
89 for (i = 0; i < num_pages; ++i) {
90 if (pages[i]) {
91 unsigned long start = (unsigned long)page_address(pages[i]);
92 flush_dcache_range(start, start + PAGE_SIZE);
93 }
94 }
86#else 95#else
87 if (on_each_cpu(ttm_tt_ipi_handler, NULL, 1) != 0) 96 if (on_each_cpu(ttm_tt_ipi_handler, NULL, 1) != 0)
88 printk(KERN_ERR TTM_PFX 97 printk(KERN_ERR TTM_PFX
diff --git a/drivers/gpu/drm/via/via_map.c b/drivers/gpu/drm/via/via_map.c
index 2c4f0b485792..6e6f91591639 100644
--- a/drivers/gpu/drm/via/via_map.c
+++ b/drivers/gpu/drm/via/via_map.c
@@ -96,7 +96,7 @@ int via_driver_load(struct drm_device *dev, unsigned long chipset)
96 drm_via_private_t *dev_priv; 96 drm_via_private_t *dev_priv;
97 int ret = 0; 97 int ret = 0;
98 98
99 dev_priv = drm_calloc(1, sizeof(drm_via_private_t), DRM_MEM_DRIVER); 99 dev_priv = kzalloc(sizeof(drm_via_private_t), GFP_KERNEL);
100 if (dev_priv == NULL) 100 if (dev_priv == NULL)
101 return -ENOMEM; 101 return -ENOMEM;
102 102
@@ -106,14 +106,14 @@ int via_driver_load(struct drm_device *dev, unsigned long chipset)
106 106
107 ret = drm_sman_init(&dev_priv->sman, 2, 12, 8); 107 ret = drm_sman_init(&dev_priv->sman, 2, 12, 8);
108 if (ret) { 108 if (ret) {
109 drm_free(dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER); 109 kfree(dev_priv);
110 return ret; 110 return ret;
111 } 111 }
112 112
113 ret = drm_vblank_init(dev, 1); 113 ret = drm_vblank_init(dev, 1);
114 if (ret) { 114 if (ret) {
115 drm_sman_takedown(&dev_priv->sman); 115 drm_sman_takedown(&dev_priv->sman);
116 drm_free(dev_priv, sizeof(drm_via_private_t), DRM_MEM_DRIVER); 116 kfree(dev_priv);
117 return ret; 117 return ret;
118 } 118 }
119 119
@@ -126,7 +126,7 @@ int via_driver_unload(struct drm_device *dev)
126 126
127 drm_sman_takedown(&dev_priv->sman); 127 drm_sman_takedown(&dev_priv->sman);
128 128
129 drm_free(dev_priv, sizeof(drm_via_private_t), DRM_MEM_DRIVER); 129 kfree(dev_priv);
130 130
131 return 0; 131 return 0;
132} 132}