aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/char/drm
diff options
context:
space:
mode:
authorDave Airlie <airlied@linux.ie>2006-04-07 02:55:43 -0400
committerDave Airlie <airlied@linux.ie>2006-04-07 02:55:43 -0400
commit31f64bd101ea256f9fc4a7f1f1706d6417d5550a (patch)
tree87f5c3355f891b75656e61acacd5971b6fb672b8 /drivers/char/drm
parent11bab7d2c86fe486e3581ac3dcdb349478ffb899 (diff)
drm: deline a few large inlines in DRM code
This patch moves a few large functions from drm_memory.h to drm_memory.c, with the following effect: text data bss dec hex filename 46305 1304 20 47629 ba0d new/drm.ko 46367 1304 20 47691 ba4b org/drm.ko 12969 1372 0 14341 3805 new/i810.ko 14712 1372 0 16084 3ed4 org/i810.ko 16447 1364 0 17811 4593 new/i830.ko 18198 1364 0 19562 4c6a org/i830.ko 11875 1324 0 13199 338f new/i915.ko 13025 1324 0 14349 380d org/i915.ko 23936 29288 0 53224 cfe8 new/mga.ko 27280 29288 0 56568 dcf8 org/mga.ko Please apply. Signed-off-by: Denis Vlasenko <vda@ilport.com.ua> Signed-off-by: Dave Airlie <airlied@linux.ie>
Diffstat (limited to 'drivers/char/drm')
-rw-r--r--drivers/char/drm/drm_memory.c117
-rw-r--r--drivers/char/drm/drm_memory.h116
2 files changed, 127 insertions, 106 deletions
diff --git a/drivers/char/drm/drm_memory.c b/drivers/char/drm/drm_memory.c
index dddf8de66143..7ea00e3372fd 100644
--- a/drivers/char/drm/drm_memory.c
+++ b/drivers/char/drm/drm_memory.c
@@ -80,6 +80,71 @@ void *drm_realloc(void *oldpt, size_t oldsize, size_t size, int area)
80} 80}
81 81
82#if __OS_HAS_AGP 82#if __OS_HAS_AGP
83/*
84 * Find the drm_map that covers the range [offset, offset+size).
85 */
86drm_map_t *drm_lookup_map(unsigned long offset,
87 unsigned long size, drm_device_t * dev)
88{
89 struct list_head *list;
90 drm_map_list_t *r_list;
91 drm_map_t *map;
92
93 list_for_each(list, &dev->maplist->head) {
94 r_list = (drm_map_list_t *) list;
95 map = r_list->map;
96 if (!map)
97 continue;
98 if (map->offset <= offset
99 && (offset + size) <= (map->offset + map->size))
100 return map;
101 }
102 return NULL;
103}
104
105void *agp_remap(unsigned long offset, unsigned long size,
106 drm_device_t * dev)
107{
108 unsigned long *phys_addr_map, i, num_pages =
109 PAGE_ALIGN(size) / PAGE_SIZE;
110 struct drm_agp_mem *agpmem;
111 struct page **page_map;
112 void *addr;
113
114 size = PAGE_ALIGN(size);
115
116#ifdef __alpha__
117 offset -= dev->hose->mem_space->start;
118#endif
119
120 for (agpmem = dev->agp->memory; agpmem; agpmem = agpmem->next)
121 if (agpmem->bound <= offset
122 && (agpmem->bound + (agpmem->pages << PAGE_SHIFT)) >=
123 (offset + size))
124 break;
125 if (!agpmem)
126 return NULL;
127
128 /*
129 * OK, we're mapping AGP space on a chipset/platform on which memory accesses by
130 * the CPU do not get remapped by the GART. We fix this by using the kernel's
131 * page-table instead (that's probably faster anyhow...).
132 */
133 /* note: use vmalloc() because num_pages could be large... */
134 page_map = vmalloc(num_pages * sizeof(struct page *));
135 if (!page_map)
136 return NULL;
137
138 phys_addr_map =
139 agpmem->memory->memory + (offset - agpmem->bound) / PAGE_SIZE;
140 for (i = 0; i < num_pages; ++i)
141 page_map[i] = pfn_to_page(phys_addr_map[i] >> PAGE_SHIFT);
142 addr = vmap(page_map, num_pages, VM_IOREMAP, PAGE_AGP);
143 vfree(page_map);
144
145 return addr;
146}
147
83/** Wrapper around agp_allocate_memory() */ 148/** Wrapper around agp_allocate_memory() */
84DRM_AGP_MEM *drm_alloc_agp(drm_device_t * dev, int pages, u32 type) 149DRM_AGP_MEM *drm_alloc_agp(drm_device_t * dev, int pages, u32 type)
85{ 150{
@@ -104,4 +169,56 @@ int drm_unbind_agp(DRM_AGP_MEM * handle)
104 return drm_agp_unbind_memory(handle); 169 return drm_agp_unbind_memory(handle);
105} 170}
106#endif /* agp */ 171#endif /* agp */
172
173void *drm_ioremap(unsigned long offset, unsigned long size,
174 drm_device_t * dev)
175{
176 if (drm_core_has_AGP(dev) && dev->agp && dev->agp->cant_use_aperture) {
177 drm_map_t *map = drm_lookup_map(offset, size, dev);
178
179 if (map && map->type == _DRM_AGP)
180 return agp_remap(offset, size, dev);
181 }
182 return ioremap(offset, size);
183}
184EXPORT_SYMBOL(drm_ioremap);
185
186void *drm_ioremap_nocache(unsigned long offset,
187 unsigned long size, drm_device_t * dev)
188{
189 if (drm_core_has_AGP(dev) && dev->agp && dev->agp->cant_use_aperture) {
190 drm_map_t *map = drm_lookup_map(offset, size, dev);
191
192 if (map && map->type == _DRM_AGP)
193 return agp_remap(offset, size, dev);
194 }
195 return ioremap_nocache(offset, size);
196}
197
198void drm_ioremapfree(void *pt, unsigned long size,
199 drm_device_t * dev)
200{
201 /*
202 * This is a bit ugly. It would be much cleaner if the DRM API would use separate
203 * routines for handling mappings in the AGP space. Hopefully this can be done in
204 * a future revision of the interface...
205 */
206 if (drm_core_has_AGP(dev) && dev->agp && dev->agp->cant_use_aperture
207 && ((unsigned long)pt >= VMALLOC_START
208 && (unsigned long)pt < VMALLOC_END)) {
209 unsigned long offset;
210 drm_map_t *map;
211
212 offset = drm_follow_page(pt) | ((unsigned long)pt & ~PAGE_MASK);
213 map = drm_lookup_map(offset, size, dev);
214 if (map && map->type == _DRM_AGP) {
215 vunmap(pt);
216 return;
217 }
218 }
219
220 iounmap(pt);
221}
222EXPORT_SYMBOL(drm_ioremapfree);
223
107#endif /* debug_memory */ 224#endif /* debug_memory */
diff --git a/drivers/char/drm/drm_memory.h b/drivers/char/drm/drm_memory.h
index 3732a61c3762..645a08878e55 100644
--- a/drivers/char/drm/drm_memory.h
+++ b/drivers/char/drm/drm_memory.h
@@ -60,67 +60,11 @@
60/* 60/*
61 * Find the drm_map that covers the range [offset, offset+size). 61 * Find the drm_map that covers the range [offset, offset+size).
62 */ 62 */
63static inline drm_map_t *drm_lookup_map(unsigned long offset, 63drm_map_t *drm_lookup_map(unsigned long offset,
64 unsigned long size, drm_device_t * dev) 64 unsigned long size, drm_device_t * dev);
65{
66 struct list_head *list;
67 drm_map_list_t *r_list;
68 drm_map_t *map;
69
70 list_for_each(list, &dev->maplist->head) {
71 r_list = (drm_map_list_t *) list;
72 map = r_list->map;
73 if (!map)
74 continue;
75 if (map->offset <= offset
76 && (offset + size) <= (map->offset + map->size))
77 return map;
78 }
79 return NULL;
80}
81
82static inline void *agp_remap(unsigned long offset, unsigned long size,
83 drm_device_t * dev)
84{
85 unsigned long *phys_addr_map, i, num_pages =
86 PAGE_ALIGN(size) / PAGE_SIZE;
87 struct drm_agp_mem *agpmem;
88 struct page **page_map;
89 void *addr;
90
91 size = PAGE_ALIGN(size);
92
93#ifdef __alpha__
94 offset -= dev->hose->mem_space->start;
95#endif
96 65
97 for (agpmem = dev->agp->memory; agpmem; agpmem = agpmem->next) 66void *agp_remap(unsigned long offset, unsigned long size,
98 if (agpmem->bound <= offset 67 drm_device_t * dev);
99 && (agpmem->bound + (agpmem->pages << PAGE_SHIFT)) >=
100 (offset + size))
101 break;
102 if (!agpmem)
103 return NULL;
104
105 /*
106 * OK, we're mapping AGP space on a chipset/platform on which memory accesses by
107 * the CPU do not get remapped by the GART. We fix this by using the kernel's
108 * page-table instead (that's probably faster anyhow...).
109 */
110 /* note: use vmalloc() because num_pages could be large... */
111 page_map = vmalloc(num_pages * sizeof(struct page *));
112 if (!page_map)
113 return NULL;
114
115 phys_addr_map =
116 agpmem->memory->memory + (offset - agpmem->bound) / PAGE_SIZE;
117 for (i = 0; i < num_pages; ++i)
118 page_map[i] = pfn_to_page(phys_addr_map[i] >> PAGE_SHIFT);
119 addr = vmap(page_map, num_pages, VM_IOREMAP, PAGE_AGP);
120 vfree(page_map);
121
122 return addr;
123}
124 68
125static inline unsigned long drm_follow_page(void *vaddr) 69static inline unsigned long drm_follow_page(void *vaddr)
126{ 70{
@@ -152,51 +96,11 @@ static inline unsigned long drm_follow_page(void *vaddr)
152 96
153#endif 97#endif
154 98
155static inline void *drm_ioremap(unsigned long offset, unsigned long size, 99void *drm_ioremap(unsigned long offset, unsigned long size,
156 drm_device_t * dev) 100 drm_device_t * dev);
157{
158 if (drm_core_has_AGP(dev) && dev->agp && dev->agp->cant_use_aperture) {
159 drm_map_t *map = drm_lookup_map(offset, size, dev);
160 101
161 if (map && map->type == _DRM_AGP) 102void *drm_ioremap_nocache(unsigned long offset,
162 return agp_remap(offset, size, dev); 103 unsigned long size, drm_device_t * dev);
163 }
164 return ioremap(offset, size);
165}
166 104
167static inline void *drm_ioremap_nocache(unsigned long offset, 105void drm_ioremapfree(void *pt, unsigned long size,
168 unsigned long size, drm_device_t * dev) 106 drm_device_t * dev);
169{
170 if (drm_core_has_AGP(dev) && dev->agp && dev->agp->cant_use_aperture) {
171 drm_map_t *map = drm_lookup_map(offset, size, dev);
172
173 if (map && map->type == _DRM_AGP)
174 return agp_remap(offset, size, dev);
175 }
176 return ioremap_nocache(offset, size);
177}
178
179static inline void drm_ioremapfree(void *pt, unsigned long size,
180 drm_device_t * dev)
181{
182 /*
183 * This is a bit ugly. It would be much cleaner if the DRM API would use separate
184 * routines for handling mappings in the AGP space. Hopefully this can be done in
185 * a future revision of the interface...
186 */
187 if (drm_core_has_AGP(dev) && dev->agp && dev->agp->cant_use_aperture
188 && ((unsigned long)pt >= VMALLOC_START
189 && (unsigned long)pt < VMALLOC_END)) {
190 unsigned long offset;
191 drm_map_t *map;
192
193 offset = drm_follow_page(pt) | ((unsigned long)pt & ~PAGE_MASK);
194 map = drm_lookup_map(offset, size, dev);
195 if (map && map->type == _DRM_AGP) {
196 vunmap(pt);
197 return;
198 }
199 }
200
201 iounmap(pt);
202}