diff options
author | Dave Airlie <airlied@redhat.com> | 2008-11-27 23:22:24 -0500 |
---|---|---|
committer | Dave Airlie <airlied@linux.ie> | 2008-12-29 02:47:22 -0500 |
commit | 7c1c2871a6a3a114853ec6836e9035ac1c0c7f7a (patch) | |
tree | 1b5debcc86ff20bd5e11b42ea5c52da42214e376 | |
parent | e7f7ab45ebcb54fd5f814ea15ea079e079662f67 (diff) |
drm: move to kref per-master structures.
This is step one towards having multiple masters sharing a drm
device in order to get fast-user-switching to work.
It splits out the information associated with the drm master
into a separate kref counted structure, and allocates this when
a master opens the device node. It also allows the current master
to abdicate (say while VT switched), and a new master to take over
the hardware.
It moves the Intel and radeon drivers to using the sarea from
within the new master structures.
Signed-off-by: Dave Airlie <airlied@redhat.com>
-rw-r--r-- | drivers/gpu/drm/drm_auth.c | 29 | ||||
-rw-r--r-- | drivers/gpu/drm/drm_bufs.c | 20 | ||||
-rw-r--r-- | drivers/gpu/drm/drm_context.c | 10 | ||||
-rw-r--r-- | drivers/gpu/drm/drm_drv.c | 33 | ||||
-rw-r--r-- | drivers/gpu/drm/drm_fops.c | 201 | ||||
-rw-r--r-- | drivers/gpu/drm/drm_ioctl.c | 57 | ||||
-rw-r--r-- | drivers/gpu/drm/drm_lock.c | 42 | ||||
-rw-r--r-- | drivers/gpu/drm/drm_proc.c | 8 | ||||
-rw-r--r-- | drivers/gpu/drm/drm_stub.c | 105 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_dma.c | 81 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_drv.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_drv.h | 10 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_irq.c | 30 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_mem.c | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/r300_cmdbuf.c | 11 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_cp.c | 73 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_drv.h | 14 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_state.c | 166 | ||||
-rw-r--r-- | include/drm/drm.h | 3 | ||||
-rw-r--r-- | include/drm/drmP.h | 64 | ||||
-rw-r--r-- | include/drm/drm_sarea.h | 6 |
21 files changed, 617 insertions, 351 deletions
diff --git a/drivers/gpu/drm/drm_auth.c b/drivers/gpu/drm/drm_auth.c index a73462723d2d..ca7a9ef5007b 100644 --- a/drivers/gpu/drm/drm_auth.c +++ b/drivers/gpu/drm/drm_auth.c | |||
@@ -45,14 +45,15 @@ | |||
45 | * the one with matching magic number, while holding the drm_device::struct_mutex | 45 | * the one with matching magic number, while holding the drm_device::struct_mutex |
46 | * lock. | 46 | * lock. |
47 | */ | 47 | */ |
48 | static struct drm_file *drm_find_file(struct drm_device * dev, drm_magic_t magic) | 48 | static struct drm_file *drm_find_file(struct drm_master *master, drm_magic_t magic) |
49 | { | 49 | { |
50 | struct drm_file *retval = NULL; | 50 | struct drm_file *retval = NULL; |
51 | struct drm_magic_entry *pt; | 51 | struct drm_magic_entry *pt; |
52 | struct drm_hash_item *hash; | 52 | struct drm_hash_item *hash; |
53 | struct drm_device *dev = master->minor->dev; | ||
53 | 54 | ||
54 | mutex_lock(&dev->struct_mutex); | 55 | mutex_lock(&dev->struct_mutex); |
55 | if (!drm_ht_find_item(&dev->magiclist, (unsigned long)magic, &hash)) { | 56 | if (!drm_ht_find_item(&master->magiclist, (unsigned long)magic, &hash)) { |
56 | pt = drm_hash_entry(hash, struct drm_magic_entry, hash_item); | 57 | pt = drm_hash_entry(hash, struct drm_magic_entry, hash_item); |
57 | retval = pt->priv; | 58 | retval = pt->priv; |
58 | } | 59 | } |
@@ -71,11 +72,11 @@ static struct drm_file *drm_find_file(struct drm_device * dev, drm_magic_t magic | |||
71 | * associated the magic number hash key in drm_device::magiclist, while holding | 72 | * associated the magic number hash key in drm_device::magiclist, while holding |
72 | * the drm_device::struct_mutex lock. | 73 | * the drm_device::struct_mutex lock. |
73 | */ | 74 | */ |
74 | static int drm_add_magic(struct drm_device * dev, struct drm_file * priv, | 75 | static int drm_add_magic(struct drm_master *master, struct drm_file *priv, |
75 | drm_magic_t magic) | 76 | drm_magic_t magic) |
76 | { | 77 | { |
77 | struct drm_magic_entry *entry; | 78 | struct drm_magic_entry *entry; |
78 | 79 | struct drm_device *dev = master->minor->dev; | |
79 | DRM_DEBUG("%d\n", magic); | 80 | DRM_DEBUG("%d\n", magic); |
80 | 81 | ||
81 | entry = drm_alloc(sizeof(*entry), DRM_MEM_MAGIC); | 82 | entry = drm_alloc(sizeof(*entry), DRM_MEM_MAGIC); |
@@ -83,11 +84,10 @@ static int drm_add_magic(struct drm_device * dev, struct drm_file * priv, | |||
83 | return -ENOMEM; | 84 | return -ENOMEM; |
84 | memset(entry, 0, sizeof(*entry)); | 85 | memset(entry, 0, sizeof(*entry)); |
85 | entry->priv = priv; | 86 | entry->priv = priv; |
86 | |||
87 | entry->hash_item.key = (unsigned long)magic; | 87 | entry->hash_item.key = (unsigned long)magic; |
88 | mutex_lock(&dev->struct_mutex); | 88 | mutex_lock(&dev->struct_mutex); |
89 | drm_ht_insert_item(&dev->magiclist, &entry->hash_item); | 89 | drm_ht_insert_item(&master->magiclist, &entry->hash_item); |
90 | list_add_tail(&entry->head, &dev->magicfree); | 90 | list_add_tail(&entry->head, &master->magicfree); |
91 | mutex_unlock(&dev->struct_mutex); | 91 | mutex_unlock(&dev->struct_mutex); |
92 | 92 | ||
93 | return 0; | 93 | return 0; |
@@ -102,20 +102,21 @@ static int drm_add_magic(struct drm_device * dev, struct drm_file * priv, | |||
102 | * Searches and unlinks the entry in drm_device::magiclist with the magic | 102 | * Searches and unlinks the entry in drm_device::magiclist with the magic |
103 | * number hash key, while holding the drm_device::struct_mutex lock. | 103 | * number hash key, while holding the drm_device::struct_mutex lock. |
104 | */ | 104 | */ |
105 | static int drm_remove_magic(struct drm_device * dev, drm_magic_t magic) | 105 | static int drm_remove_magic(struct drm_master *master, drm_magic_t magic) |
106 | { | 106 | { |
107 | struct drm_magic_entry *pt; | 107 | struct drm_magic_entry *pt; |
108 | struct drm_hash_item *hash; | 108 | struct drm_hash_item *hash; |
109 | struct drm_device *dev = master->minor->dev; | ||
109 | 110 | ||
110 | DRM_DEBUG("%d\n", magic); | 111 | DRM_DEBUG("%d\n", magic); |
111 | 112 | ||
112 | mutex_lock(&dev->struct_mutex); | 113 | mutex_lock(&dev->struct_mutex); |
113 | if (drm_ht_find_item(&dev->magiclist, (unsigned long)magic, &hash)) { | 114 | if (drm_ht_find_item(&master->magiclist, (unsigned long)magic, &hash)) { |
114 | mutex_unlock(&dev->struct_mutex); | 115 | mutex_unlock(&dev->struct_mutex); |
115 | return -EINVAL; | 116 | return -EINVAL; |
116 | } | 117 | } |
117 | pt = drm_hash_entry(hash, struct drm_magic_entry, hash_item); | 118 | pt = drm_hash_entry(hash, struct drm_magic_entry, hash_item); |
118 | drm_ht_remove_item(&dev->magiclist, hash); | 119 | drm_ht_remove_item(&master->magiclist, hash); |
119 | list_del(&pt->head); | 120 | list_del(&pt->head); |
120 | mutex_unlock(&dev->struct_mutex); | 121 | mutex_unlock(&dev->struct_mutex); |
121 | 122 | ||
@@ -153,9 +154,9 @@ int drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv) | |||
153 | ++sequence; /* reserve 0 */ | 154 | ++sequence; /* reserve 0 */ |
154 | auth->magic = sequence++; | 155 | auth->magic = sequence++; |
155 | spin_unlock(&lock); | 156 | spin_unlock(&lock); |
156 | } while (drm_find_file(dev, auth->magic)); | 157 | } while (drm_find_file(file_priv->master, auth->magic)); |
157 | file_priv->magic = auth->magic; | 158 | file_priv->magic = auth->magic; |
158 | drm_add_magic(dev, file_priv, auth->magic); | 159 | drm_add_magic(file_priv->master, file_priv, auth->magic); |
159 | } | 160 | } |
160 | 161 | ||
161 | DRM_DEBUG("%u\n", auth->magic); | 162 | DRM_DEBUG("%u\n", auth->magic); |
@@ -181,9 +182,9 @@ int drm_authmagic(struct drm_device *dev, void *data, | |||
181 | struct drm_file *file; | 182 | struct drm_file *file; |
182 | 183 | ||
183 | DRM_DEBUG("%u\n", auth->magic); | 184 | DRM_DEBUG("%u\n", auth->magic); |
184 | if ((file = drm_find_file(dev, auth->magic))) { | 185 | if ((file = drm_find_file(file_priv->master, auth->magic))) { |
185 | file->authenticated = 1; | 186 | file->authenticated = 1; |
186 | drm_remove_magic(dev, auth->magic); | 187 | drm_remove_magic(file_priv->master, auth->magic); |
187 | return 0; | 188 | return 0; |
188 | } | 189 | } |
189 | return -EINVAL; | 190 | return -EINVAL; |
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c index bde64b84166e..dc3ce3e0a0a4 100644 --- a/drivers/gpu/drm/drm_bufs.c +++ b/drivers/gpu/drm/drm_bufs.c | |||
@@ -54,9 +54,9 @@ static struct drm_map_list *drm_find_matching_map(struct drm_device *dev, | |||
54 | { | 54 | { |
55 | struct drm_map_list *entry; | 55 | struct drm_map_list *entry; |
56 | list_for_each_entry(entry, &dev->maplist, head) { | 56 | list_for_each_entry(entry, &dev->maplist, head) { |
57 | if (entry->map && map->type == entry->map->type && | 57 | if (entry->map && (entry->master == dev->primary->master) && (map->type == entry->map->type) && |
58 | ((entry->map->offset == map->offset) || | 58 | ((entry->map->offset == map->offset) || |
59 | (map->type == _DRM_SHM && map->flags==_DRM_CONTAINS_LOCK))) { | 59 | ((map->type == _DRM_SHM) && (map->flags&_DRM_CONTAINS_LOCK)))) { |
60 | return entry; | 60 | return entry; |
61 | } | 61 | } |
62 | } | 62 | } |
@@ -210,12 +210,12 @@ static int drm_addmap_core(struct drm_device * dev, unsigned int offset, | |||
210 | map->offset = (unsigned long)map->handle; | 210 | map->offset = (unsigned long)map->handle; |
211 | if (map->flags & _DRM_CONTAINS_LOCK) { | 211 | if (map->flags & _DRM_CONTAINS_LOCK) { |
212 | /* Prevent a 2nd X Server from creating a 2nd lock */ | 212 | /* Prevent a 2nd X Server from creating a 2nd lock */ |
213 | if (dev->lock.hw_lock != NULL) { | 213 | if (dev->primary->master->lock.hw_lock != NULL) { |
214 | vfree(map->handle); | 214 | vfree(map->handle); |
215 | drm_free(map, sizeof(*map), DRM_MEM_MAPS); | 215 | drm_free(map, sizeof(*map), DRM_MEM_MAPS); |
216 | return -EBUSY; | 216 | return -EBUSY; |
217 | } | 217 | } |
218 | dev->sigdata.lock = dev->lock.hw_lock = map->handle; /* Pointer to lock */ | 218 | dev->sigdata.lock = dev->primary->master->lock.hw_lock = map->handle; /* Pointer to lock */ |
219 | } | 219 | } |
220 | break; | 220 | break; |
221 | case _DRM_AGP: { | 221 | case _DRM_AGP: { |
@@ -319,6 +319,7 @@ static int drm_addmap_core(struct drm_device * dev, unsigned int offset, | |||
319 | list->user_token = list->hash.key << PAGE_SHIFT; | 319 | list->user_token = list->hash.key << PAGE_SHIFT; |
320 | mutex_unlock(&dev->struct_mutex); | 320 | mutex_unlock(&dev->struct_mutex); |
321 | 321 | ||
322 | list->master = dev->primary->master; | ||
322 | *maplist = list; | 323 | *maplist = list; |
323 | return 0; | 324 | return 0; |
324 | } | 325 | } |
@@ -345,7 +346,7 @@ int drm_addmap_ioctl(struct drm_device *dev, void *data, | |||
345 | struct drm_map_list *maplist; | 346 | struct drm_map_list *maplist; |
346 | int err; | 347 | int err; |
347 | 348 | ||
348 | if (!(capable(CAP_SYS_ADMIN) || map->type == _DRM_AGP)) | 349 | if (!(capable(CAP_SYS_ADMIN) || map->type == _DRM_AGP || map->type == _DRM_SHM)) |
349 | return -EPERM; | 350 | return -EPERM; |
350 | 351 | ||
351 | err = drm_addmap_core(dev, map->offset, map->size, map->type, | 352 | err = drm_addmap_core(dev, map->offset, map->size, map->type, |
@@ -380,10 +381,12 @@ int drm_rmmap_locked(struct drm_device *dev, drm_local_map_t *map) | |||
380 | struct drm_map_list *r_list = NULL, *list_t; | 381 | struct drm_map_list *r_list = NULL, *list_t; |
381 | drm_dma_handle_t dmah; | 382 | drm_dma_handle_t dmah; |
382 | int found = 0; | 383 | int found = 0; |
384 | struct drm_master *master; | ||
383 | 385 | ||
384 | /* Find the list entry for the map and remove it */ | 386 | /* Find the list entry for the map and remove it */ |
385 | list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) { | 387 | list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) { |
386 | if (r_list->map == map) { | 388 | if (r_list->map == map) { |
389 | master = r_list->master; | ||
387 | list_del(&r_list->head); | 390 | list_del(&r_list->head); |
388 | drm_ht_remove_key(&dev->map_hash, | 391 | drm_ht_remove_key(&dev->map_hash, |
389 | r_list->user_token >> PAGE_SHIFT); | 392 | r_list->user_token >> PAGE_SHIFT); |
@@ -409,6 +412,13 @@ int drm_rmmap_locked(struct drm_device *dev, drm_local_map_t *map) | |||
409 | break; | 412 | break; |
410 | case _DRM_SHM: | 413 | case _DRM_SHM: |
411 | vfree(map->handle); | 414 | vfree(map->handle); |
415 | if (master) { | ||
416 | if (dev->sigdata.lock == master->lock.hw_lock) | ||
417 | dev->sigdata.lock = NULL; | ||
418 | master->lock.hw_lock = NULL; /* SHM removed */ | ||
419 | master->lock.file_priv = NULL; | ||
420 | wake_up_interruptible(&master->lock.lock_queue); | ||
421 | } | ||
412 | break; | 422 | break; |
413 | case _DRM_AGP: | 423 | case _DRM_AGP: |
414 | case _DRM_SCATTER_GATHER: | 424 | case _DRM_SCATTER_GATHER: |
diff --git a/drivers/gpu/drm/drm_context.c b/drivers/gpu/drm/drm_context.c index d505f695421f..809ec0f03452 100644 --- a/drivers/gpu/drm/drm_context.c +++ b/drivers/gpu/drm/drm_context.c | |||
@@ -256,12 +256,13 @@ static int drm_context_switch(struct drm_device * dev, int old, int new) | |||
256 | * hardware lock is held, clears the drm_device::context_flag and wakes up | 256 | * hardware lock is held, clears the drm_device::context_flag and wakes up |
257 | * drm_device::context_wait. | 257 | * drm_device::context_wait. |
258 | */ | 258 | */ |
259 | static int drm_context_switch_complete(struct drm_device * dev, int new) | 259 | static int drm_context_switch_complete(struct drm_device *dev, |
260 | struct drm_file *file_priv, int new) | ||
260 | { | 261 | { |
261 | dev->last_context = new; /* PRE/POST: This is the _only_ writer. */ | 262 | dev->last_context = new; /* PRE/POST: This is the _only_ writer. */ |
262 | dev->last_switch = jiffies; | 263 | dev->last_switch = jiffies; |
263 | 264 | ||
264 | if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) { | 265 | if (!_DRM_LOCK_IS_HELD(file_priv->master->lock.hw_lock->lock)) { |
265 | DRM_ERROR("Lock isn't held after context switch\n"); | 266 | DRM_ERROR("Lock isn't held after context switch\n"); |
266 | } | 267 | } |
267 | 268 | ||
@@ -420,7 +421,7 @@ int drm_newctx(struct drm_device *dev, void *data, | |||
420 | struct drm_ctx *ctx = data; | 421 | struct drm_ctx *ctx = data; |
421 | 422 | ||
422 | DRM_DEBUG("%d\n", ctx->handle); | 423 | DRM_DEBUG("%d\n", ctx->handle); |
423 | drm_context_switch_complete(dev, ctx->handle); | 424 | drm_context_switch_complete(dev, file_priv, ctx->handle); |
424 | 425 | ||
425 | return 0; | 426 | return 0; |
426 | } | 427 | } |
@@ -442,9 +443,6 @@ int drm_rmctx(struct drm_device *dev, void *data, | |||
442 | struct drm_ctx *ctx = data; | 443 | struct drm_ctx *ctx = data; |
443 | 444 | ||
444 | DRM_DEBUG("%d\n", ctx->handle); | 445 | DRM_DEBUG("%d\n", ctx->handle); |
445 | if (ctx->handle == DRM_KERNEL_CONTEXT + 1) { | ||
446 | file_priv->remove_auth_on_close = 1; | ||
447 | } | ||
448 | if (ctx->handle != DRM_KERNEL_CONTEXT) { | 446 | if (ctx->handle != DRM_KERNEL_CONTEXT) { |
449 | if (dev->driver->context_dtor) | 447 | if (dev->driver->context_dtor) |
450 | dev->driver->context_dtor(dev, ctx->handle); | 448 | dev->driver->context_dtor(dev, ctx->handle); |
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index 3cb87a932b33..9f04ca37df6d 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c | |||
@@ -74,6 +74,9 @@ static struct drm_ioctl_desc drm_ioctls[] = { | |||
74 | DRM_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | 74 | DRM_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
75 | DRM_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_getsareactx, DRM_AUTH), | 75 | DRM_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_getsareactx, DRM_AUTH), |
76 | 76 | ||
77 | DRM_IOCTL_DEF(DRM_IOCTL_SET_MASTER, drm_setmaster_ioctl, DRM_ROOT_ONLY), | ||
78 | DRM_IOCTL_DEF(DRM_IOCTL_DROP_MASTER, drm_dropmaster_ioctl, DRM_ROOT_ONLY), | ||
79 | |||
77 | DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_addctx, DRM_AUTH|DRM_ROOT_ONLY), | 80 | DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_addctx, DRM_AUTH|DRM_ROOT_ONLY), |
78 | DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | 81 | DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
79 | DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_modctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | 82 | DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_modctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
@@ -138,8 +141,6 @@ static struct drm_ioctl_desc drm_ioctls[] = { | |||
138 | */ | 141 | */ |
139 | int drm_lastclose(struct drm_device * dev) | 142 | int drm_lastclose(struct drm_device * dev) |
140 | { | 143 | { |
141 | struct drm_magic_entry *pt, *next; | ||
142 | struct drm_map_list *r_list, *list_t; | ||
143 | struct drm_vma_entry *vma, *vma_temp; | 144 | struct drm_vma_entry *vma, *vma_temp; |
144 | int i; | 145 | int i; |
145 | 146 | ||
@@ -149,12 +150,6 @@ int drm_lastclose(struct drm_device * dev) | |||
149 | dev->driver->lastclose(dev); | 150 | dev->driver->lastclose(dev); |
150 | DRM_DEBUG("driver lastclose completed\n"); | 151 | DRM_DEBUG("driver lastclose completed\n"); |
151 | 152 | ||
152 | if (dev->unique) { | ||
153 | drm_free(dev->unique, strlen(dev->unique) + 1, DRM_MEM_DRIVER); | ||
154 | dev->unique = NULL; | ||
155 | dev->unique_len = 0; | ||
156 | } | ||
157 | |||
158 | if (dev->irq_enabled) | 153 | if (dev->irq_enabled) |
159 | drm_irq_uninstall(dev); | 154 | drm_irq_uninstall(dev); |
160 | 155 | ||
@@ -164,16 +159,6 @@ int drm_lastclose(struct drm_device * dev) | |||
164 | drm_drawable_free_all(dev); | 159 | drm_drawable_free_all(dev); |
165 | del_timer(&dev->timer); | 160 | del_timer(&dev->timer); |
166 | 161 | ||
167 | /* Clear pid list */ | ||
168 | if (dev->magicfree.next) { | ||
169 | list_for_each_entry_safe(pt, next, &dev->magicfree, head) { | ||
170 | list_del(&pt->head); | ||
171 | drm_ht_remove_item(&dev->magiclist, &pt->hash_item); | ||
172 | drm_free(pt, sizeof(*pt), DRM_MEM_MAGIC); | ||
173 | } | ||
174 | drm_ht_remove(&dev->magiclist); | ||
175 | } | ||
176 | |||
177 | /* Clear AGP information */ | 162 | /* Clear AGP information */ |
178 | if (drm_core_has_AGP(dev) && dev->agp) { | 163 | if (drm_core_has_AGP(dev) && dev->agp) { |
179 | struct drm_agp_mem *entry, *tempe; | 164 | struct drm_agp_mem *entry, *tempe; |
@@ -205,13 +190,6 @@ int drm_lastclose(struct drm_device * dev) | |||
205 | drm_free(vma, sizeof(*vma), DRM_MEM_VMAS); | 190 | drm_free(vma, sizeof(*vma), DRM_MEM_VMAS); |
206 | } | 191 | } |
207 | 192 | ||
208 | list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) { | ||
209 | if (!(r_list->map->flags & _DRM_DRIVER)) { | ||
210 | drm_rmmap_locked(dev, r_list->map); | ||
211 | r_list = NULL; | ||
212 | } | ||
213 | } | ||
214 | |||
215 | if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE) && dev->queuelist) { | 193 | if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE) && dev->queuelist) { |
216 | for (i = 0; i < dev->queue_count; i++) { | 194 | for (i = 0; i < dev->queue_count; i++) { |
217 | if (dev->queuelist[i]) { | 195 | if (dev->queuelist[i]) { |
@@ -231,11 +209,6 @@ int drm_lastclose(struct drm_device * dev) | |||
231 | if (drm_core_check_feature(dev, DRIVER_HAVE_DMA)) | 209 | if (drm_core_check_feature(dev, DRIVER_HAVE_DMA)) |
232 | drm_dma_takedown(dev); | 210 | drm_dma_takedown(dev); |
233 | 211 | ||
234 | if (dev->lock.hw_lock) { | ||
235 | dev->sigdata.lock = dev->lock.hw_lock = NULL; /* SHM removed */ | ||
236 | dev->lock.file_priv = NULL; | ||
237 | wake_up_interruptible(&dev->lock.lock_queue); | ||
238 | } | ||
239 | mutex_unlock(&dev->struct_mutex); | 212 | mutex_unlock(&dev->struct_mutex); |
240 | 213 | ||
241 | DRM_DEBUG("lastclose completed\n"); | 214 | DRM_DEBUG("lastclose completed\n"); |
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c index 78eeed5caaff..f2285237df49 100644 --- a/drivers/gpu/drm/drm_fops.c +++ b/drivers/gpu/drm/drm_fops.c | |||
@@ -44,10 +44,8 @@ static int drm_open_helper(struct inode *inode, struct file *filp, | |||
44 | 44 | ||
45 | static int drm_setup(struct drm_device * dev) | 45 | static int drm_setup(struct drm_device * dev) |
46 | { | 46 | { |
47 | drm_local_map_t *map; | ||
48 | int i; | 47 | int i; |
49 | int ret; | 48 | int ret; |
50 | u32 sareapage; | ||
51 | 49 | ||
52 | if (dev->driver->firstopen) { | 50 | if (dev->driver->firstopen) { |
53 | ret = dev->driver->firstopen(dev); | 51 | ret = dev->driver->firstopen(dev); |
@@ -55,14 +53,6 @@ static int drm_setup(struct drm_device * dev) | |||
55 | return ret; | 53 | return ret; |
56 | } | 54 | } |
57 | 55 | ||
58 | dev->magicfree.next = NULL; | ||
59 | |||
60 | /* prebuild the SAREA */ | ||
61 | sareapage = max_t(unsigned, SAREA_MAX, PAGE_SIZE); | ||
62 | i = drm_addmap(dev, 0, sareapage, _DRM_SHM, _DRM_CONTAINS_LOCK, &map); | ||
63 | if (i != 0) | ||
64 | return i; | ||
65 | |||
66 | atomic_set(&dev->ioctl_count, 0); | 56 | atomic_set(&dev->ioctl_count, 0); |
67 | atomic_set(&dev->vma_count, 0); | 57 | atomic_set(&dev->vma_count, 0); |
68 | dev->buf_use = 0; | 58 | dev->buf_use = 0; |
@@ -77,16 +67,12 @@ static int drm_setup(struct drm_device * dev) | |||
77 | for (i = 0; i < ARRAY_SIZE(dev->counts); i++) | 67 | for (i = 0; i < ARRAY_SIZE(dev->counts); i++) |
78 | atomic_set(&dev->counts[i], 0); | 68 | atomic_set(&dev->counts[i], 0); |
79 | 69 | ||
80 | drm_ht_create(&dev->magiclist, DRM_MAGIC_HASH_ORDER); | ||
81 | INIT_LIST_HEAD(&dev->magicfree); | ||
82 | |||
83 | dev->sigdata.lock = NULL; | 70 | dev->sigdata.lock = NULL; |
84 | init_waitqueue_head(&dev->lock.lock_queue); | 71 | |
85 | dev->queue_count = 0; | 72 | dev->queue_count = 0; |
86 | dev->queue_reserved = 0; | 73 | dev->queue_reserved = 0; |
87 | dev->queue_slots = 0; | 74 | dev->queue_slots = 0; |
88 | dev->queuelist = NULL; | 75 | dev->queuelist = NULL; |
89 | dev->irq_enabled = 0; | ||
90 | dev->context_flag = 0; | 76 | dev->context_flag = 0; |
91 | dev->interrupt_flag = 0; | 77 | dev->interrupt_flag = 0; |
92 | dev->dma_flag = 0; | 78 | dev->dma_flag = 0; |
@@ -265,10 +251,42 @@ static int drm_open_helper(struct inode *inode, struct file *filp, | |||
265 | goto out_free; | 251 | goto out_free; |
266 | } | 252 | } |
267 | 253 | ||
254 | |||
255 | /* if there is no current master make this fd it */ | ||
268 | mutex_lock(&dev->struct_mutex); | 256 | mutex_lock(&dev->struct_mutex); |
269 | if (list_empty(&dev->filelist)) | 257 | if (!priv->minor->master) { |
270 | priv->master = 1; | 258 | /* create a new master */ |
259 | priv->minor->master = drm_master_create(priv->minor); | ||
260 | if (!priv->minor->master) { | ||
261 | ret = -ENOMEM; | ||
262 | goto out_free; | ||
263 | } | ||
264 | |||
265 | priv->is_master = 1; | ||
266 | /* take another reference for the copy in the local file priv */ | ||
267 | priv->master = drm_master_get(priv->minor->master); | ||
268 | |||
269 | priv->authenticated = 1; | ||
270 | |||
271 | mutex_unlock(&dev->struct_mutex); | ||
272 | if (dev->driver->master_create) { | ||
273 | ret = dev->driver->master_create(dev, priv->master); | ||
274 | if (ret) { | ||
275 | mutex_lock(&dev->struct_mutex); | ||
276 | /* drop both references if this fails */ | ||
277 | drm_master_put(&priv->minor->master); | ||
278 | drm_master_put(&priv->master); | ||
279 | mutex_unlock(&dev->struct_mutex); | ||
280 | goto out_free; | ||
281 | } | ||
282 | } | ||
283 | } else { | ||
284 | /* get a reference to the master */ | ||
285 | priv->master = drm_master_get(priv->minor->master); | ||
286 | mutex_unlock(&dev->struct_mutex); | ||
287 | } | ||
271 | 288 | ||
289 | mutex_lock(&dev->struct_mutex); | ||
272 | list_add(&priv->lhead, &dev->filelist); | 290 | list_add(&priv->lhead, &dev->filelist); |
273 | mutex_unlock(&dev->struct_mutex); | 291 | mutex_unlock(&dev->struct_mutex); |
274 | 292 | ||
@@ -314,6 +332,74 @@ int drm_fasync(int fd, struct file *filp, int on) | |||
314 | } | 332 | } |
315 | EXPORT_SYMBOL(drm_fasync); | 333 | EXPORT_SYMBOL(drm_fasync); |
316 | 334 | ||
335 | /* | ||
336 | * Reclaim locked buffers; note that this may be a bad idea if the current | ||
337 | * context doesn't have the hw lock... | ||
338 | */ | ||
339 | static void drm_reclaim_locked_buffers(struct drm_device *dev, struct file *f) | ||
340 | { | ||
341 | struct drm_file *file_priv = f->private_data; | ||
342 | |||
343 | if (drm_i_have_hw_lock(dev, file_priv)) { | ||
344 | dev->driver->reclaim_buffers_locked(dev, file_priv); | ||
345 | } else { | ||
346 | unsigned long _end = jiffies + 3 * DRM_HZ; | ||
347 | int locked = 0; | ||
348 | |||
349 | drm_idlelock_take(&file_priv->master->lock); | ||
350 | |||
351 | /* | ||
352 | * Wait for a while. | ||
353 | */ | ||
354 | do { | ||
355 | spin_lock_bh(&file_priv->master->lock.spinlock); | ||
356 | locked = file_priv->master->lock.idle_has_lock; | ||
357 | spin_unlock_bh(&file_priv->master->lock.spinlock); | ||
358 | if (locked) | ||
359 | break; | ||
360 | schedule(); | ||
361 | } while (!time_after_eq(jiffies, _end)); | ||
362 | |||
363 | if (!locked) { | ||
364 | DRM_ERROR("reclaim_buffers_locked() deadlock. Please rework this\n" | ||
365 | "\tdriver to use reclaim_buffers_idlelocked() instead.\n" | ||
366 | "\tI will go on reclaiming the buffers anyway.\n"); | ||
367 | } | ||
368 | |||
369 | dev->driver->reclaim_buffers_locked(dev, file_priv); | ||
370 | drm_idlelock_release(&file_priv->master->lock); | ||
371 | } | ||
372 | } | ||
373 | |||
374 | static void drm_master_release(struct drm_device *dev, struct file *filp) | ||
375 | { | ||
376 | struct drm_file *file_priv = filp->private_data; | ||
377 | |||
378 | if (dev->driver->reclaim_buffers_locked && | ||
379 | file_priv->master->lock.hw_lock) | ||
380 | drm_reclaim_locked_buffers(dev, filp); | ||
381 | |||
382 | if (dev->driver->reclaim_buffers_idlelocked && | ||
383 | file_priv->master->lock.hw_lock) { | ||
384 | drm_idlelock_take(&file_priv->master->lock); | ||
385 | dev->driver->reclaim_buffers_idlelocked(dev, file_priv); | ||
386 | drm_idlelock_release(&file_priv->master->lock); | ||
387 | } | ||
388 | |||
389 | |||
390 | if (drm_i_have_hw_lock(dev, file_priv)) { | ||
391 | DRM_DEBUG("File %p released, freeing lock for context %d\n", | ||
392 | filp, _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock)); | ||
393 | drm_lock_free(&file_priv->master->lock, | ||
394 | _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock)); | ||
395 | } | ||
396 | |||
397 | if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) && | ||
398 | !dev->driver->reclaim_buffers_locked) { | ||
399 | dev->driver->reclaim_buffers(dev, file_priv); | ||
400 | } | ||
401 | } | ||
402 | |||
317 | /** | 403 | /** |
318 | * Release file. | 404 | * Release file. |
319 | * | 405 | * |
@@ -348,60 +434,9 @@ int drm_release(struct inode *inode, struct file *filp) | |||
348 | (long)old_encode_dev(file_priv->minor->device), | 434 | (long)old_encode_dev(file_priv->minor->device), |
349 | dev->open_count); | 435 | dev->open_count); |
350 | 436 | ||
351 | if (dev->driver->reclaim_buffers_locked && dev->lock.hw_lock) { | 437 | /* if the master has gone away we can't do anything with the lock */ |
352 | if (drm_i_have_hw_lock(dev, file_priv)) { | 438 | if (file_priv->minor->master) |
353 | dev->driver->reclaim_buffers_locked(dev, file_priv); | 439 | drm_master_release(dev, filp); |
354 | } else { | ||
355 | unsigned long endtime = jiffies + 3 * DRM_HZ; | ||
356 | int locked = 0; | ||
357 | |||
358 | drm_idlelock_take(&dev->lock); | ||
359 | |||
360 | /* | ||
361 | * Wait for a while. | ||
362 | */ | ||
363 | |||
364 | do{ | ||
365 | spin_lock_bh(&dev->lock.spinlock); | ||
366 | locked = dev->lock.idle_has_lock; | ||
367 | spin_unlock_bh(&dev->lock.spinlock); | ||
368 | if (locked) | ||
369 | break; | ||
370 | schedule(); | ||
371 | } while (!time_after_eq(jiffies, endtime)); | ||
372 | |||
373 | if (!locked) { | ||
374 | DRM_ERROR("reclaim_buffers_locked() deadlock. Please rework this\n" | ||
375 | "\tdriver to use reclaim_buffers_idlelocked() instead.\n" | ||
376 | "\tI will go on reclaiming the buffers anyway.\n"); | ||
377 | } | ||
378 | |||
379 | dev->driver->reclaim_buffers_locked(dev, file_priv); | ||
380 | drm_idlelock_release(&dev->lock); | ||
381 | } | ||
382 | } | ||
383 | |||
384 | if (dev->driver->reclaim_buffers_idlelocked && dev->lock.hw_lock) { | ||
385 | |||
386 | drm_idlelock_take(&dev->lock); | ||
387 | dev->driver->reclaim_buffers_idlelocked(dev, file_priv); | ||
388 | drm_idlelock_release(&dev->lock); | ||
389 | |||
390 | } | ||
391 | |||
392 | if (drm_i_have_hw_lock(dev, file_priv)) { | ||
393 | DRM_DEBUG("File %p released, freeing lock for context %d\n", | ||
394 | filp, _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)); | ||
395 | |||
396 | drm_lock_free(&dev->lock, | ||
397 | _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)); | ||
398 | } | ||
399 | |||
400 | |||
401 | if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) && | ||
402 | !dev->driver->reclaim_buffers_locked) { | ||
403 | dev->driver->reclaim_buffers(dev, file_priv); | ||
404 | } | ||
405 | 440 | ||
406 | if (dev->driver->driver_features & DRIVER_GEM) | 441 | if (dev->driver->driver_features & DRIVER_GEM) |
407 | drm_gem_release(dev, file_priv); | 442 | drm_gem_release(dev, file_priv); |
@@ -428,12 +463,24 @@ int drm_release(struct inode *inode, struct file *filp) | |||
428 | mutex_unlock(&dev->ctxlist_mutex); | 463 | mutex_unlock(&dev->ctxlist_mutex); |
429 | 464 | ||
430 | mutex_lock(&dev->struct_mutex); | 465 | mutex_lock(&dev->struct_mutex); |
431 | if (file_priv->remove_auth_on_close == 1) { | 466 | |
467 | if (file_priv->is_master) { | ||
432 | struct drm_file *temp; | 468 | struct drm_file *temp; |
469 | list_for_each_entry(temp, &dev->filelist, lhead) { | ||
470 | if ((temp->master == file_priv->master) && | ||
471 | (temp != file_priv)) | ||
472 | temp->authenticated = 0; | ||
473 | } | ||
433 | 474 | ||
434 | list_for_each_entry(temp, &dev->filelist, lhead) | 475 | if (file_priv->minor->master == file_priv->master) { |
435 | temp->authenticated = 0; | 476 | /* drop the reference held my the minor */ |
477 | drm_master_put(&file_priv->minor->master); | ||
478 | } | ||
436 | } | 479 | } |
480 | |||
481 | /* drop the reference held my the file priv */ | ||
482 | drm_master_put(&file_priv->master); | ||
483 | file_priv->is_master = 0; | ||
437 | list_del(&file_priv->lhead); | 484 | list_del(&file_priv->lhead); |
438 | mutex_unlock(&dev->struct_mutex); | 485 | mutex_unlock(&dev->struct_mutex); |
439 | 486 | ||
@@ -448,9 +495,9 @@ int drm_release(struct inode *inode, struct file *filp) | |||
448 | atomic_inc(&dev->counts[_DRM_STAT_CLOSES]); | 495 | atomic_inc(&dev->counts[_DRM_STAT_CLOSES]); |
449 | spin_lock(&dev->count_lock); | 496 | spin_lock(&dev->count_lock); |
450 | if (!--dev->open_count) { | 497 | if (!--dev->open_count) { |
451 | if (atomic_read(&dev->ioctl_count) || dev->blocked) { | 498 | if (atomic_read(&dev->ioctl_count)) { |
452 | DRM_ERROR("Device busy: %d %d\n", | 499 | DRM_ERROR("Device busy: %d\n", |
453 | atomic_read(&dev->ioctl_count), dev->blocked); | 500 | atomic_read(&dev->ioctl_count)); |
454 | spin_unlock(&dev->count_lock); | 501 | spin_unlock(&dev->count_lock); |
455 | unlock_kernel(); | 502 | unlock_kernel(); |
456 | return -EBUSY; | 503 | return -EBUSY; |
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c index 16829fb3089d..e35126a35093 100644 --- a/drivers/gpu/drm/drm_ioctl.c +++ b/drivers/gpu/drm/drm_ioctl.c | |||
@@ -53,12 +53,13 @@ int drm_getunique(struct drm_device *dev, void *data, | |||
53 | struct drm_file *file_priv) | 53 | struct drm_file *file_priv) |
54 | { | 54 | { |
55 | struct drm_unique *u = data; | 55 | struct drm_unique *u = data; |
56 | struct drm_master *master = file_priv->master; | ||
56 | 57 | ||
57 | if (u->unique_len >= dev->unique_len) { | 58 | if (u->unique_len >= master->unique_len) { |
58 | if (copy_to_user(u->unique, dev->unique, dev->unique_len)) | 59 | if (copy_to_user(u->unique, master->unique, master->unique_len)) |
59 | return -EFAULT; | 60 | return -EFAULT; |
60 | } | 61 | } |
61 | u->unique_len = dev->unique_len; | 62 | u->unique_len = master->unique_len; |
62 | 63 | ||
63 | return 0; | 64 | return 0; |
64 | } | 65 | } |
@@ -81,36 +82,37 @@ int drm_setunique(struct drm_device *dev, void *data, | |||
81 | struct drm_file *file_priv) | 82 | struct drm_file *file_priv) |
82 | { | 83 | { |
83 | struct drm_unique *u = data; | 84 | struct drm_unique *u = data; |
85 | struct drm_master *master = file_priv->master; | ||
84 | int domain, bus, slot, func, ret; | 86 | int domain, bus, slot, func, ret; |
85 | 87 | ||
86 | if (dev->unique_len || dev->unique) | 88 | if (master->unique_len || master->unique) |
87 | return -EBUSY; | 89 | return -EBUSY; |
88 | 90 | ||
89 | if (!u->unique_len || u->unique_len > 1024) | 91 | if (!u->unique_len || u->unique_len > 1024) |
90 | return -EINVAL; | 92 | return -EINVAL; |
91 | 93 | ||
92 | dev->unique_len = u->unique_len; | 94 | master->unique_len = u->unique_len; |
93 | dev->unique = drm_alloc(u->unique_len + 1, DRM_MEM_DRIVER); | 95 | master->unique = drm_alloc(u->unique_len + 1, DRM_MEM_DRIVER); |
94 | if (!dev->unique) | 96 | if (!master->unique) |
95 | return -ENOMEM; | 97 | return -ENOMEM; |
96 | if (copy_from_user(dev->unique, u->unique, dev->unique_len)) | 98 | if (copy_from_user(master->unique, u->unique, master->unique_len)) |
97 | return -EFAULT; | 99 | return -EFAULT; |
98 | 100 | ||
99 | dev->unique[dev->unique_len] = '\0'; | 101 | master->unique[master->unique_len] = '\0'; |
100 | 102 | ||
101 | dev->devname = | 103 | dev->devname = |
102 | drm_alloc(strlen(dev->driver->pci_driver.name) + | 104 | drm_alloc(strlen(dev->driver->pci_driver.name) + |
103 | strlen(dev->unique) + 2, DRM_MEM_DRIVER); | 105 | strlen(master->unique) + 2, DRM_MEM_DRIVER); |
104 | if (!dev->devname) | 106 | if (!dev->devname) |
105 | return -ENOMEM; | 107 | return -ENOMEM; |
106 | 108 | ||
107 | sprintf(dev->devname, "%s@%s", dev->driver->pci_driver.name, | 109 | sprintf(dev->devname, "%s@%s", dev->driver->pci_driver.name, |
108 | dev->unique); | 110 | master->unique); |
109 | 111 | ||
110 | /* Return error if the busid submitted doesn't match the device's actual | 112 | /* Return error if the busid submitted doesn't match the device's actual |
111 | * busid. | 113 | * busid. |
112 | */ | 114 | */ |
113 | ret = sscanf(dev->unique, "PCI:%d:%d:%d", &bus, &slot, &func); | 115 | ret = sscanf(master->unique, "PCI:%d:%d:%d", &bus, &slot, &func); |
114 | if (ret != 3) | 116 | if (ret != 3) |
115 | return -EINVAL; | 117 | return -EINVAL; |
116 | domain = bus >> 8; | 118 | domain = bus >> 8; |
@@ -125,34 +127,35 @@ int drm_setunique(struct drm_device *dev, void *data, | |||
125 | return 0; | 127 | return 0; |
126 | } | 128 | } |
127 | 129 | ||
128 | static int drm_set_busid(struct drm_device * dev) | 130 | static int drm_set_busid(struct drm_device *dev, struct drm_file *file_priv) |
129 | { | 131 | { |
132 | struct drm_master *master = file_priv->master; | ||
130 | int len; | 133 | int len; |
131 | 134 | ||
132 | if (dev->unique != NULL) | 135 | if (master->unique != NULL) |
133 | return 0; | 136 | return -EBUSY; |
134 | 137 | ||
135 | dev->unique_len = 40; | 138 | master->unique_len = 40; |
136 | dev->unique = drm_alloc(dev->unique_len + 1, DRM_MEM_DRIVER); | 139 | master->unique = drm_alloc(master->unique_len + 1, DRM_MEM_DRIVER); |
137 | if (dev->unique == NULL) | 140 | if (master->unique == NULL) |
138 | return -ENOMEM; | 141 | return -ENOMEM; |
139 | 142 | ||
140 | len = snprintf(dev->unique, dev->unique_len, "pci:%04x:%02x:%02x.%d", | 143 | len = snprintf(master->unique, master->unique_len, "pci:%04x:%02x:%02x.%d", |
141 | drm_get_pci_domain(dev), dev->pdev->bus->number, | 144 | drm_get_pci_domain(dev), |
145 | dev->pdev->bus->number, | ||
142 | PCI_SLOT(dev->pdev->devfn), | 146 | PCI_SLOT(dev->pdev->devfn), |
143 | PCI_FUNC(dev->pdev->devfn)); | 147 | PCI_FUNC(dev->pdev->devfn)); |
144 | 148 | if (len > master->unique_len) | |
145 | if (len > dev->unique_len) | 149 | DRM_ERROR("buffer overflow"); |
146 | DRM_ERROR("Unique buffer overflowed\n"); | ||
147 | 150 | ||
148 | dev->devname = | 151 | dev->devname = |
149 | drm_alloc(strlen(dev->driver->pci_driver.name) + dev->unique_len + | 152 | drm_alloc(strlen(dev->driver->pci_driver.name) + master->unique_len + |
150 | 2, DRM_MEM_DRIVER); | 153 | 2, DRM_MEM_DRIVER); |
151 | if (dev->devname == NULL) | 154 | if (dev->devname == NULL) |
152 | return -ENOMEM; | 155 | return -ENOMEM; |
153 | 156 | ||
154 | sprintf(dev->devname, "%s@%s", dev->driver->pci_driver.name, | 157 | sprintf(dev->devname, "%s@%s", dev->driver->pci_driver.name, |
155 | dev->unique); | 158 | master->unique); |
156 | 159 | ||
157 | return 0; | 160 | return 0; |
158 | } | 161 | } |
@@ -276,7 +279,7 @@ int drm_getstats(struct drm_device *dev, void *data, | |||
276 | for (i = 0; i < dev->counters; i++) { | 279 | for (i = 0; i < dev->counters; i++) { |
277 | if (dev->types[i] == _DRM_STAT_LOCK) | 280 | if (dev->types[i] == _DRM_STAT_LOCK) |
278 | stats->data[i].value = | 281 | stats->data[i].value = |
279 | (dev->lock.hw_lock ? dev->lock.hw_lock->lock : 0); | 282 | (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0); |
280 | else | 283 | else |
281 | stats->data[i].value = atomic_read(&dev->counts[i]); | 284 | stats->data[i].value = atomic_read(&dev->counts[i]); |
282 | stats->data[i].type = dev->types[i]; | 285 | stats->data[i].type = dev->types[i]; |
@@ -318,7 +321,7 @@ int drm_setversion(struct drm_device *dev, void *data, struct drm_file *file_pri | |||
318 | /* | 321 | /* |
319 | * Version 1.1 includes tying of DRM to specific device | 322 | * Version 1.1 includes tying of DRM to specific device |
320 | */ | 323 | */ |
321 | drm_set_busid(dev); | 324 | drm_set_busid(dev, file_priv); |
322 | } | 325 | } |
323 | } | 326 | } |
324 | 327 | ||
diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c index 1cfa72031f8f..46e7b28f0707 100644 --- a/drivers/gpu/drm/drm_lock.c +++ b/drivers/gpu/drm/drm_lock.c | |||
@@ -52,6 +52,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv) | |||
52 | { | 52 | { |
53 | DECLARE_WAITQUEUE(entry, current); | 53 | DECLARE_WAITQUEUE(entry, current); |
54 | struct drm_lock *lock = data; | 54 | struct drm_lock *lock = data; |
55 | struct drm_master *master = file_priv->master; | ||
55 | int ret = 0; | 56 | int ret = 0; |
56 | 57 | ||
57 | ++file_priv->lock_count; | 58 | ++file_priv->lock_count; |
@@ -64,26 +65,27 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv) | |||
64 | 65 | ||
65 | DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n", | 66 | DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n", |
66 | lock->context, task_pid_nr(current), | 67 | lock->context, task_pid_nr(current), |
67 | dev->lock.hw_lock->lock, lock->flags); | 68 | master->lock.hw_lock->lock, lock->flags); |
68 | 69 | ||
69 | if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE)) | 70 | if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE)) |
70 | if (lock->context < 0) | 71 | if (lock->context < 0) |
71 | return -EINVAL; | 72 | return -EINVAL; |
72 | 73 | ||
73 | add_wait_queue(&dev->lock.lock_queue, &entry); | 74 | add_wait_queue(&master->lock.lock_queue, &entry); |
74 | spin_lock_bh(&dev->lock.spinlock); | 75 | spin_lock_bh(&master->lock.spinlock); |
75 | dev->lock.user_waiters++; | 76 | master->lock.user_waiters++; |
76 | spin_unlock_bh(&dev->lock.spinlock); | 77 | spin_unlock_bh(&master->lock.spinlock); |
78 | |||
77 | for (;;) { | 79 | for (;;) { |
78 | __set_current_state(TASK_INTERRUPTIBLE); | 80 | __set_current_state(TASK_INTERRUPTIBLE); |
79 | if (!dev->lock.hw_lock) { | 81 | if (!master->lock.hw_lock) { |
80 | /* Device has been unregistered */ | 82 | /* Device has been unregistered */ |
81 | ret = -EINTR; | 83 | ret = -EINTR; |
82 | break; | 84 | break; |
83 | } | 85 | } |
84 | if (drm_lock_take(&dev->lock, lock->context)) { | 86 | if (drm_lock_take(&master->lock, lock->context)) { |
85 | dev->lock.file_priv = file_priv; | 87 | master->lock.file_priv = file_priv; |
86 | dev->lock.lock_time = jiffies; | 88 | master->lock.lock_time = jiffies; |
87 | atomic_inc(&dev->counts[_DRM_STAT_LOCKS]); | 89 | atomic_inc(&dev->counts[_DRM_STAT_LOCKS]); |
88 | break; /* Got lock */ | 90 | break; /* Got lock */ |
89 | } | 91 | } |
@@ -95,11 +97,11 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv) | |||
95 | break; | 97 | break; |
96 | } | 98 | } |
97 | } | 99 | } |
98 | spin_lock_bh(&dev->lock.spinlock); | 100 | spin_lock_bh(&master->lock.spinlock); |
99 | dev->lock.user_waiters--; | 101 | master->lock.user_waiters--; |
100 | spin_unlock_bh(&dev->lock.spinlock); | 102 | spin_unlock_bh(&master->lock.spinlock); |
101 | __set_current_state(TASK_RUNNING); | 103 | __set_current_state(TASK_RUNNING); |
102 | remove_wait_queue(&dev->lock.lock_queue, &entry); | 104 | remove_wait_queue(&master->lock.lock_queue, &entry); |
103 | 105 | ||
104 | DRM_DEBUG("%d %s\n", lock->context, | 106 | DRM_DEBUG("%d %s\n", lock->context, |
105 | ret ? "interrupted" : "has lock"); | 107 | ret ? "interrupted" : "has lock"); |
@@ -108,14 +110,14 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv) | |||
108 | /* don't set the block all signals on the master process for now | 110 | /* don't set the block all signals on the master process for now |
109 | * really probably not the correct answer but lets us debug xkb | 111 | * really probably not the correct answer but lets us debug xkb |
110 | * xserver for now */ | 112 | * xserver for now */ |
111 | if (!file_priv->master) { | 113 | if (!file_priv->is_master) { |
112 | sigemptyset(&dev->sigmask); | 114 | sigemptyset(&dev->sigmask); |
113 | sigaddset(&dev->sigmask, SIGSTOP); | 115 | sigaddset(&dev->sigmask, SIGSTOP); |
114 | sigaddset(&dev->sigmask, SIGTSTP); | 116 | sigaddset(&dev->sigmask, SIGTSTP); |
115 | sigaddset(&dev->sigmask, SIGTTIN); | 117 | sigaddset(&dev->sigmask, SIGTTIN); |
116 | sigaddset(&dev->sigmask, SIGTTOU); | 118 | sigaddset(&dev->sigmask, SIGTTOU); |
117 | dev->sigdata.context = lock->context; | 119 | dev->sigdata.context = lock->context; |
118 | dev->sigdata.lock = dev->lock.hw_lock; | 120 | dev->sigdata.lock = master->lock.hw_lock; |
119 | block_all_signals(drm_notifier, &dev->sigdata, &dev->sigmask); | 121 | block_all_signals(drm_notifier, &dev->sigdata, &dev->sigmask); |
120 | } | 122 | } |
121 | 123 | ||
@@ -154,6 +156,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv) | |||
154 | int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv) | 156 | int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv) |
155 | { | 157 | { |
156 | struct drm_lock *lock = data; | 158 | struct drm_lock *lock = data; |
159 | struct drm_master *master = file_priv->master; | ||
157 | 160 | ||
158 | if (lock->context == DRM_KERNEL_CONTEXT) { | 161 | if (lock->context == DRM_KERNEL_CONTEXT) { |
159 | DRM_ERROR("Process %d using kernel context %d\n", | 162 | DRM_ERROR("Process %d using kernel context %d\n", |
@@ -169,7 +172,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv) | |||
169 | if (dev->driver->kernel_context_switch_unlock) | 172 | if (dev->driver->kernel_context_switch_unlock) |
170 | dev->driver->kernel_context_switch_unlock(dev); | 173 | dev->driver->kernel_context_switch_unlock(dev); |
171 | else { | 174 | else { |
172 | if (drm_lock_free(&dev->lock,lock->context)) { | 175 | if (drm_lock_free(&master->lock, lock->context)) { |
173 | /* FIXME: Should really bail out here. */ | 176 | /* FIXME: Should really bail out here. */ |
174 | } | 177 | } |
175 | } | 178 | } |
@@ -379,9 +382,10 @@ EXPORT_SYMBOL(drm_idlelock_release); | |||
379 | 382 | ||
380 | int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv) | 383 | int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv) |
381 | { | 384 | { |
382 | return (file_priv->lock_count && dev->lock.hw_lock && | 385 | struct drm_master *master = file_priv->master; |
383 | _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) && | 386 | return (file_priv->lock_count && master->lock.hw_lock && |
384 | dev->lock.file_priv == file_priv); | 387 | _DRM_LOCK_IS_HELD(master->lock.hw_lock->lock) && |
388 | master->lock.file_priv == file_priv); | ||
385 | } | 389 | } |
386 | 390 | ||
387 | EXPORT_SYMBOL(drm_i_have_hw_lock); | 391 | EXPORT_SYMBOL(drm_i_have_hw_lock); |
diff --git a/drivers/gpu/drm/drm_proc.c b/drivers/gpu/drm/drm_proc.c index ae73b7f7249a..7dbaa1a19ea4 100644 --- a/drivers/gpu/drm/drm_proc.c +++ b/drivers/gpu/drm/drm_proc.c | |||
@@ -195,6 +195,7 @@ static int drm_name_info(char *buf, char **start, off_t offset, int request, | |||
195 | int *eof, void *data) | 195 | int *eof, void *data) |
196 | { | 196 | { |
197 | struct drm_minor *minor = (struct drm_minor *) data; | 197 | struct drm_minor *minor = (struct drm_minor *) data; |
198 | struct drm_master *master = minor->master; | ||
198 | struct drm_device *dev = minor->dev; | 199 | struct drm_device *dev = minor->dev; |
199 | int len = 0; | 200 | int len = 0; |
200 | 201 | ||
@@ -203,13 +204,16 @@ static int drm_name_info(char *buf, char **start, off_t offset, int request, | |||
203 | return 0; | 204 | return 0; |
204 | } | 205 | } |
205 | 206 | ||
207 | if (!master) | ||
208 | return 0; | ||
209 | |||
206 | *start = &buf[offset]; | 210 | *start = &buf[offset]; |
207 | *eof = 0; | 211 | *eof = 0; |
208 | 212 | ||
209 | if (dev->unique) { | 213 | if (master->unique) { |
210 | DRM_PROC_PRINT("%s %s %s\n", | 214 | DRM_PROC_PRINT("%s %s %s\n", |
211 | dev->driver->pci_driver.name, | 215 | dev->driver->pci_driver.name, |
212 | pci_name(dev->pdev), dev->unique); | 216 | pci_name(dev->pdev), master->unique); |
213 | } else { | 217 | } else { |
214 | DRM_PROC_PRINT("%s %s\n", dev->driver->pci_driver.name, | 218 | DRM_PROC_PRINT("%s %s\n", dev->driver->pci_driver.name, |
215 | pci_name(dev->pdev)); | 219 | pci_name(dev->pdev)); |
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c index 849c0a9fe7fd..0f24c2dcd517 100644 --- a/drivers/gpu/drm/drm_stub.c +++ b/drivers/gpu/drm/drm_stub.c | |||
@@ -79,6 +79,104 @@ again: | |||
79 | return new_id; | 79 | return new_id; |
80 | } | 80 | } |
81 | 81 | ||
82 | struct drm_master *drm_master_create(struct drm_minor *minor) | ||
83 | { | ||
84 | struct drm_master *master; | ||
85 | |||
86 | master = drm_calloc(1, sizeof(*master), DRM_MEM_DRIVER); | ||
87 | if (!master) | ||
88 | return NULL; | ||
89 | |||
90 | kref_init(&master->refcount); | ||
91 | spin_lock_init(&master->lock.spinlock); | ||
92 | init_waitqueue_head(&master->lock.lock_queue); | ||
93 | drm_ht_create(&master->magiclist, DRM_MAGIC_HASH_ORDER); | ||
94 | INIT_LIST_HEAD(&master->magicfree); | ||
95 | master->minor = minor; | ||
96 | |||
97 | list_add_tail(&master->head, &minor->master_list); | ||
98 | |||
99 | return master; | ||
100 | } | ||
101 | |||
102 | struct drm_master *drm_master_get(struct drm_master *master) | ||
103 | { | ||
104 | kref_get(&master->refcount); | ||
105 | return master; | ||
106 | } | ||
107 | |||
108 | static void drm_master_destroy(struct kref *kref) | ||
109 | { | ||
110 | struct drm_master *master = container_of(kref, struct drm_master, refcount); | ||
111 | struct drm_magic_entry *pt, *next; | ||
112 | struct drm_device *dev = master->minor->dev; | ||
113 | |||
114 | list_del(&master->head); | ||
115 | |||
116 | if (dev->driver->master_destroy) | ||
117 | dev->driver->master_destroy(dev, master); | ||
118 | |||
119 | if (master->unique) { | ||
120 | drm_free(master->unique, strlen(master->unique) + 1, DRM_MEM_DRIVER); | ||
121 | master->unique = NULL; | ||
122 | master->unique_len = 0; | ||
123 | } | ||
124 | |||
125 | list_for_each_entry_safe(pt, next, &master->magicfree, head) { | ||
126 | list_del(&pt->head); | ||
127 | drm_ht_remove_item(&master->magiclist, &pt->hash_item); | ||
128 | drm_free(pt, sizeof(*pt), DRM_MEM_MAGIC); | ||
129 | } | ||
130 | |||
131 | drm_ht_remove(&master->magiclist); | ||
132 | |||
133 | if (master->lock.hw_lock) { | ||
134 | if (dev->sigdata.lock == master->lock.hw_lock) | ||
135 | dev->sigdata.lock = NULL; | ||
136 | master->lock.hw_lock = NULL; | ||
137 | master->lock.file_priv = NULL; | ||
138 | wake_up_interruptible(&master->lock.lock_queue); | ||
139 | } | ||
140 | |||
141 | drm_free(master, sizeof(*master), DRM_MEM_DRIVER); | ||
142 | } | ||
143 | |||
144 | void drm_master_put(struct drm_master **master) | ||
145 | { | ||
146 | kref_put(&(*master)->refcount, drm_master_destroy); | ||
147 | *master = NULL; | ||
148 | } | ||
149 | |||
150 | int drm_setmaster_ioctl(struct drm_device *dev, void *data, | ||
151 | struct drm_file *file_priv) | ||
152 | { | ||
153 | if (file_priv->minor->master && file_priv->minor->master != file_priv->master) | ||
154 | return -EINVAL; | ||
155 | |||
156 | if (!file_priv->master) | ||
157 | return -EINVAL; | ||
158 | |||
159 | if (!file_priv->minor->master && | ||
160 | file_priv->minor->master != file_priv->master) { | ||
161 | mutex_lock(&dev->struct_mutex); | ||
162 | file_priv->minor->master = drm_master_get(file_priv->master); | ||
163 | mutex_lock(&dev->struct_mutex); | ||
164 | } | ||
165 | |||
166 | return 0; | ||
167 | } | ||
168 | |||
169 | int drm_dropmaster_ioctl(struct drm_device *dev, void *data, | ||
170 | struct drm_file *file_priv) | ||
171 | { | ||
172 | if (!file_priv->master) | ||
173 | return -EINVAL; | ||
174 | mutex_lock(&dev->struct_mutex); | ||
175 | drm_master_put(&file_priv->minor->master); | ||
176 | mutex_unlock(&dev->struct_mutex); | ||
177 | return 0; | ||
178 | } | ||
179 | |||
82 | static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev, | 180 | static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev, |
83 | const struct pci_device_id *ent, | 181 | const struct pci_device_id *ent, |
84 | struct drm_driver *driver) | 182 | struct drm_driver *driver) |
@@ -92,7 +190,6 @@ static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev, | |||
92 | 190 | ||
93 | spin_lock_init(&dev->count_lock); | 191 | spin_lock_init(&dev->count_lock); |
94 | spin_lock_init(&dev->drw_lock); | 192 | spin_lock_init(&dev->drw_lock); |
95 | spin_lock_init(&dev->lock.spinlock); | ||
96 | init_timer(&dev->timer); | 193 | init_timer(&dev->timer); |
97 | mutex_init(&dev->struct_mutex); | 194 | mutex_init(&dev->struct_mutex); |
98 | mutex_init(&dev->ctxlist_mutex); | 195 | mutex_init(&dev->ctxlist_mutex); |
@@ -200,6 +297,7 @@ static int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int t | |||
200 | new_minor->device = MKDEV(DRM_MAJOR, minor_id); | 297 | new_minor->device = MKDEV(DRM_MAJOR, minor_id); |
201 | new_minor->dev = dev; | 298 | new_minor->dev = dev; |
202 | new_minor->index = minor_id; | 299 | new_minor->index = minor_id; |
300 | INIT_LIST_HEAD(&new_minor->master_list); | ||
203 | 301 | ||
204 | idr_replace(&drm_minors_idr, new_minor, minor_id); | 302 | idr_replace(&drm_minors_idr, new_minor, minor_id); |
205 | 303 | ||
@@ -299,11 +397,6 @@ int drm_put_dev(struct drm_device * dev) | |||
299 | { | 397 | { |
300 | DRM_DEBUG("release primary %s\n", dev->driver->pci_driver.name); | 398 | DRM_DEBUG("release primary %s\n", dev->driver->pci_driver.name); |
301 | 399 | ||
302 | if (dev->unique) { | ||
303 | drm_free(dev->unique, strlen(dev->unique) + 1, DRM_MEM_DRIVER); | ||
304 | dev->unique = NULL; | ||
305 | dev->unique_len = 0; | ||
306 | } | ||
307 | if (dev->devname) { | 400 | if (dev->devname) { |
308 | drm_free(dev->devname, strlen(dev->devname) + 1, | 401 | drm_free(dev->devname, strlen(dev->devname) + 1, |
309 | DRM_MEM_DRIVER); | 402 | DRM_MEM_DRIVER); |
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index afa8a12cd009..dacdf3c577cb 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c | |||
@@ -39,6 +39,7 @@ | |||
39 | int i915_wait_ring(struct drm_device * dev, int n, const char *caller) | 39 | int i915_wait_ring(struct drm_device * dev, int n, const char *caller) |
40 | { | 40 | { |
41 | drm_i915_private_t *dev_priv = dev->dev_private; | 41 | drm_i915_private_t *dev_priv = dev->dev_private; |
42 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; | ||
42 | drm_i915_ring_buffer_t *ring = &(dev_priv->ring); | 43 | drm_i915_ring_buffer_t *ring = &(dev_priv->ring); |
43 | u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD; | 44 | u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD; |
44 | u32 last_acthd = I915_READ(acthd_reg); | 45 | u32 last_acthd = I915_READ(acthd_reg); |
@@ -55,8 +56,8 @@ int i915_wait_ring(struct drm_device * dev, int n, const char *caller) | |||
55 | if (ring->space >= n) | 56 | if (ring->space >= n) |
56 | return 0; | 57 | return 0; |
57 | 58 | ||
58 | if (dev_priv->sarea_priv) | 59 | if (master_priv->sarea_priv) |
59 | dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; | 60 | master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; |
60 | 61 | ||
61 | if (ring->head != last_head) | 62 | if (ring->head != last_head) |
62 | i = 0; | 63 | i = 0; |
@@ -121,6 +122,7 @@ static void i915_free_hws(struct drm_device *dev) | |||
121 | void i915_kernel_lost_context(struct drm_device * dev) | 122 | void i915_kernel_lost_context(struct drm_device * dev) |
122 | { | 123 | { |
123 | drm_i915_private_t *dev_priv = dev->dev_private; | 124 | drm_i915_private_t *dev_priv = dev->dev_private; |
125 | struct drm_i915_master_private *master_priv; | ||
124 | drm_i915_ring_buffer_t *ring = &(dev_priv->ring); | 126 | drm_i915_ring_buffer_t *ring = &(dev_priv->ring); |
125 | 127 | ||
126 | ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR; | 128 | ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR; |
@@ -129,8 +131,12 @@ void i915_kernel_lost_context(struct drm_device * dev) | |||
129 | if (ring->space < 0) | 131 | if (ring->space < 0) |
130 | ring->space += ring->Size; | 132 | ring->space += ring->Size; |
131 | 133 | ||
132 | if (ring->head == ring->tail && dev_priv->sarea_priv) | 134 | if (!dev->primary->master) |
133 | dev_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY; | 135 | return; |
136 | |||
137 | master_priv = dev->primary->master->driver_priv; | ||
138 | if (ring->head == ring->tail && master_priv->sarea_priv) | ||
139 | master_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY; | ||
134 | } | 140 | } |
135 | 141 | ||
136 | static int i915_dma_cleanup(struct drm_device * dev) | 142 | static int i915_dma_cleanup(struct drm_device * dev) |
@@ -154,25 +160,13 @@ static int i915_dma_cleanup(struct drm_device * dev) | |||
154 | if (I915_NEED_GFX_HWS(dev)) | 160 | if (I915_NEED_GFX_HWS(dev)) |
155 | i915_free_hws(dev); | 161 | i915_free_hws(dev); |
156 | 162 | ||
157 | dev_priv->sarea = NULL; | ||
158 | dev_priv->sarea_priv = NULL; | ||
159 | |||
160 | return 0; | 163 | return 0; |
161 | } | 164 | } |
162 | 165 | ||
163 | static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init) | 166 | static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init) |
164 | { | 167 | { |
165 | drm_i915_private_t *dev_priv = dev->dev_private; | 168 | drm_i915_private_t *dev_priv = dev->dev_private; |
166 | 169 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; | |
167 | dev_priv->sarea = drm_getsarea(dev); | ||
168 | if (!dev_priv->sarea) { | ||
169 | DRM_ERROR("can not find sarea!\n"); | ||
170 | i915_dma_cleanup(dev); | ||
171 | return -EINVAL; | ||
172 | } | ||
173 | |||
174 | dev_priv->sarea_priv = (drm_i915_sarea_t *) | ||
175 | ((u8 *) dev_priv->sarea->handle + init->sarea_priv_offset); | ||
176 | 170 | ||
177 | if (init->ring_size != 0) { | 171 | if (init->ring_size != 0) { |
178 | if (dev_priv->ring.ring_obj != NULL) { | 172 | if (dev_priv->ring.ring_obj != NULL) { |
@@ -207,7 +201,8 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init) | |||
207 | dev_priv->back_offset = init->back_offset; | 201 | dev_priv->back_offset = init->back_offset; |
208 | dev_priv->front_offset = init->front_offset; | 202 | dev_priv->front_offset = init->front_offset; |
209 | dev_priv->current_page = 0; | 203 | dev_priv->current_page = 0; |
210 | dev_priv->sarea_priv->pf_current_page = dev_priv->current_page; | 204 | if (master_priv->sarea_priv) |
205 | master_priv->sarea_priv->pf_current_page = 0; | ||
211 | 206 | ||
212 | /* Allow hardware batchbuffers unless told otherwise. | 207 | /* Allow hardware batchbuffers unless told otherwise. |
213 | */ | 208 | */ |
@@ -222,11 +217,6 @@ static int i915_dma_resume(struct drm_device * dev) | |||
222 | 217 | ||
223 | DRM_DEBUG("%s\n", __func__); | 218 | DRM_DEBUG("%s\n", __func__); |
224 | 219 | ||
225 | if (!dev_priv->sarea) { | ||
226 | DRM_ERROR("can not find sarea!\n"); | ||
227 | return -EINVAL; | ||
228 | } | ||
229 | |||
230 | if (dev_priv->ring.map.handle == NULL) { | 220 | if (dev_priv->ring.map.handle == NULL) { |
231 | DRM_ERROR("can not ioremap virtual address for" | 221 | DRM_ERROR("can not ioremap virtual address for" |
232 | " ring buffer\n"); | 222 | " ring buffer\n"); |
@@ -435,13 +425,14 @@ i915_emit_box(struct drm_device *dev, | |||
435 | static void i915_emit_breadcrumb(struct drm_device *dev) | 425 | static void i915_emit_breadcrumb(struct drm_device *dev) |
436 | { | 426 | { |
437 | drm_i915_private_t *dev_priv = dev->dev_private; | 427 | drm_i915_private_t *dev_priv = dev->dev_private; |
428 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; | ||
438 | RING_LOCALS; | 429 | RING_LOCALS; |
439 | 430 | ||
440 | dev_priv->counter++; | 431 | dev_priv->counter++; |
441 | if (dev_priv->counter > 0x7FFFFFFFUL) | 432 | if (dev_priv->counter > 0x7FFFFFFFUL) |
442 | dev_priv->counter = 0; | 433 | dev_priv->counter = 0; |
443 | if (dev_priv->sarea_priv) | 434 | if (master_priv->sarea_priv) |
444 | dev_priv->sarea_priv->last_enqueue = dev_priv->counter; | 435 | master_priv->sarea_priv->last_enqueue = dev_priv->counter; |
445 | 436 | ||
446 | BEGIN_LP_RING(4); | 437 | BEGIN_LP_RING(4); |
447 | OUT_RING(MI_STORE_DWORD_INDEX); | 438 | OUT_RING(MI_STORE_DWORD_INDEX); |
@@ -537,15 +528,17 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev, | |||
537 | static int i915_dispatch_flip(struct drm_device * dev) | 528 | static int i915_dispatch_flip(struct drm_device * dev) |
538 | { | 529 | { |
539 | drm_i915_private_t *dev_priv = dev->dev_private; | 530 | drm_i915_private_t *dev_priv = dev->dev_private; |
531 | struct drm_i915_master_private *master_priv = | ||
532 | dev->primary->master->driver_priv; | ||
540 | RING_LOCALS; | 533 | RING_LOCALS; |
541 | 534 | ||
542 | if (!dev_priv->sarea_priv) | 535 | if (!master_priv->sarea_priv) |
543 | return -EINVAL; | 536 | return -EINVAL; |
544 | 537 | ||
545 | DRM_DEBUG("%s: page=%d pfCurrentPage=%d\n", | 538 | DRM_DEBUG("%s: page=%d pfCurrentPage=%d\n", |
546 | __func__, | 539 | __func__, |
547 | dev_priv->current_page, | 540 | dev_priv->current_page, |
548 | dev_priv->sarea_priv->pf_current_page); | 541 | master_priv->sarea_priv->pf_current_page); |
549 | 542 | ||
550 | i915_kernel_lost_context(dev); | 543 | i915_kernel_lost_context(dev); |
551 | 544 | ||
@@ -572,7 +565,7 @@ static int i915_dispatch_flip(struct drm_device * dev) | |||
572 | OUT_RING(0); | 565 | OUT_RING(0); |
573 | ADVANCE_LP_RING(); | 566 | ADVANCE_LP_RING(); |
574 | 567 | ||
575 | dev_priv->sarea_priv->last_enqueue = dev_priv->counter++; | 568 | master_priv->sarea_priv->last_enqueue = dev_priv->counter++; |
576 | 569 | ||
577 | BEGIN_LP_RING(4); | 570 | BEGIN_LP_RING(4); |
578 | OUT_RING(MI_STORE_DWORD_INDEX); | 571 | OUT_RING(MI_STORE_DWORD_INDEX); |
@@ -581,7 +574,7 @@ static int i915_dispatch_flip(struct drm_device * dev) | |||
581 | OUT_RING(0); | 574 | OUT_RING(0); |
582 | ADVANCE_LP_RING(); | 575 | ADVANCE_LP_RING(); |
583 | 576 | ||
584 | dev_priv->sarea_priv->pf_current_page = dev_priv->current_page; | 577 | master_priv->sarea_priv->pf_current_page = dev_priv->current_page; |
585 | return 0; | 578 | return 0; |
586 | } | 579 | } |
587 | 580 | ||
@@ -611,8 +604,9 @@ static int i915_batchbuffer(struct drm_device *dev, void *data, | |||
611 | struct drm_file *file_priv) | 604 | struct drm_file *file_priv) |
612 | { | 605 | { |
613 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 606 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
607 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; | ||
614 | drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) | 608 | drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) |
615 | dev_priv->sarea_priv; | 609 | master_priv->sarea_priv; |
616 | drm_i915_batchbuffer_t *batch = data; | 610 | drm_i915_batchbuffer_t *batch = data; |
617 | int ret; | 611 | int ret; |
618 | 612 | ||
@@ -644,8 +638,9 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data, | |||
644 | struct drm_file *file_priv) | 638 | struct drm_file *file_priv) |
645 | { | 639 | { |
646 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 640 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
641 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; | ||
647 | drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) | 642 | drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) |
648 | dev_priv->sarea_priv; | 643 | master_priv->sarea_priv; |
649 | drm_i915_cmdbuffer_t *cmdbuf = data; | 644 | drm_i915_cmdbuffer_t *cmdbuf = data; |
650 | int ret; | 645 | int ret; |
651 | 646 | ||
@@ -802,6 +797,30 @@ static int i915_set_status_page(struct drm_device *dev, void *data, | |||
802 | return 0; | 797 | return 0; |
803 | } | 798 | } |
804 | 799 | ||
800 | int i915_master_create(struct drm_device *dev, struct drm_master *master) | ||
801 | { | ||
802 | struct drm_i915_master_private *master_priv; | ||
803 | |||
804 | master_priv = drm_calloc(1, sizeof(*master_priv), DRM_MEM_DRIVER); | ||
805 | if (!master_priv) | ||
806 | return -ENOMEM; | ||
807 | |||
808 | master->driver_priv = master_priv; | ||
809 | return 0; | ||
810 | } | ||
811 | |||
812 | void i915_master_destroy(struct drm_device *dev, struct drm_master *master) | ||
813 | { | ||
814 | struct drm_i915_master_private *master_priv = master->driver_priv; | ||
815 | |||
816 | if (!master_priv) | ||
817 | return; | ||
818 | |||
819 | drm_free(master_priv, sizeof(*master_priv), DRM_MEM_DRIVER); | ||
820 | |||
821 | master->driver_priv = NULL; | ||
822 | } | ||
823 | |||
805 | int i915_driver_load(struct drm_device *dev, unsigned long flags) | 824 | int i915_driver_load(struct drm_device *dev, unsigned long flags) |
806 | { | 825 | { |
807 | struct drm_i915_private *dev_priv = dev->dev_private; | 826 | struct drm_i915_private *dev_priv = dev->dev_private; |
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index a80ead215282..c91648320a8b 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
@@ -107,6 +107,8 @@ static struct drm_driver driver = { | |||
107 | .reclaim_buffers = drm_core_reclaim_buffers, | 107 | .reclaim_buffers = drm_core_reclaim_buffers, |
108 | .get_map_ofs = drm_core_get_map_ofs, | 108 | .get_map_ofs = drm_core_get_map_ofs, |
109 | .get_reg_ofs = drm_core_get_reg_ofs, | 109 | .get_reg_ofs = drm_core_get_reg_ofs, |
110 | .master_create = i915_master_create, | ||
111 | .master_destroy = i915_master_destroy, | ||
110 | .proc_init = i915_gem_proc_init, | 112 | .proc_init = i915_gem_proc_init, |
111 | .proc_cleanup = i915_gem_proc_cleanup, | 113 | .proc_cleanup = i915_gem_proc_cleanup, |
112 | .gem_init_object = i915_gem_init_object, | 114 | .gem_init_object = i915_gem_init_object, |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index b3cc4731aa7c..ba096f9a7641 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -103,15 +103,18 @@ struct intel_opregion { | |||
103 | int enabled; | 103 | int enabled; |
104 | }; | 104 | }; |
105 | 105 | ||
106 | struct drm_i915_master_private { | ||
107 | drm_local_map_t *sarea; | ||
108 | struct _drm_i915_sarea *sarea_priv; | ||
109 | }; | ||
110 | |||
106 | typedef struct drm_i915_private { | 111 | typedef struct drm_i915_private { |
107 | struct drm_device *dev; | 112 | struct drm_device *dev; |
108 | 113 | ||
109 | int has_gem; | 114 | int has_gem; |
110 | 115 | ||
111 | void __iomem *regs; | 116 | void __iomem *regs; |
112 | drm_local_map_t *sarea; | ||
113 | 117 | ||
114 | drm_i915_sarea_t *sarea_priv; | ||
115 | drm_i915_ring_buffer_t ring; | 118 | drm_i915_ring_buffer_t ring; |
116 | 119 | ||
117 | drm_dma_handle_t *status_page_dmah; | 120 | drm_dma_handle_t *status_page_dmah; |
@@ -417,6 +420,9 @@ struct drm_i915_file_private { | |||
417 | extern struct drm_ioctl_desc i915_ioctls[]; | 420 | extern struct drm_ioctl_desc i915_ioctls[]; |
418 | extern int i915_max_ioctl; | 421 | extern int i915_max_ioctl; |
419 | 422 | ||
423 | extern int i915_master_create(struct drm_device *dev, struct drm_master *master); | ||
424 | extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master); | ||
425 | |||
420 | /* i915_dma.c */ | 426 | /* i915_dma.c */ |
421 | extern void i915_kernel_lost_context(struct drm_device * dev); | 427 | extern void i915_kernel_lost_context(struct drm_device * dev); |
422 | extern int i915_driver_load(struct drm_device *, unsigned long flags); | 428 | extern int i915_driver_load(struct drm_device *, unsigned long flags); |
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 69b9a42da95e..9b673d2f912b 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
@@ -168,6 +168,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) | |||
168 | { | 168 | { |
169 | struct drm_device *dev = (struct drm_device *) arg; | 169 | struct drm_device *dev = (struct drm_device *) arg; |
170 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 170 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
171 | struct drm_i915_master_private *master_priv; | ||
171 | u32 iir, new_iir; | 172 | u32 iir, new_iir; |
172 | u32 pipea_stats, pipeb_stats; | 173 | u32 pipea_stats, pipeb_stats; |
173 | u32 vblank_status; | 174 | u32 vblank_status; |
@@ -222,9 +223,12 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) | |||
222 | I915_WRITE(IIR, iir); | 223 | I915_WRITE(IIR, iir); |
223 | new_iir = I915_READ(IIR); /* Flush posted writes */ | 224 | new_iir = I915_READ(IIR); /* Flush posted writes */ |
224 | 225 | ||
225 | if (dev_priv->sarea_priv) | 226 | if (dev->primary->master) { |
226 | dev_priv->sarea_priv->last_dispatch = | 227 | master_priv = dev->primary->master->driver_priv; |
227 | READ_BREADCRUMB(dev_priv); | 228 | if (master_priv->sarea_priv) |
229 | master_priv->sarea_priv->last_dispatch = | ||
230 | READ_BREADCRUMB(dev_priv); | ||
231 | } | ||
228 | 232 | ||
229 | if (iir & I915_USER_INTERRUPT) { | 233 | if (iir & I915_USER_INTERRUPT) { |
230 | dev_priv->mm.irq_gem_seqno = i915_get_gem_seqno(dev); | 234 | dev_priv->mm.irq_gem_seqno = i915_get_gem_seqno(dev); |
@@ -269,6 +273,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) | |||
269 | static int i915_emit_irq(struct drm_device * dev) | 273 | static int i915_emit_irq(struct drm_device * dev) |
270 | { | 274 | { |
271 | drm_i915_private_t *dev_priv = dev->dev_private; | 275 | drm_i915_private_t *dev_priv = dev->dev_private; |
276 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; | ||
272 | RING_LOCALS; | 277 | RING_LOCALS; |
273 | 278 | ||
274 | i915_kernel_lost_context(dev); | 279 | i915_kernel_lost_context(dev); |
@@ -278,8 +283,8 @@ static int i915_emit_irq(struct drm_device * dev) | |||
278 | dev_priv->counter++; | 283 | dev_priv->counter++; |
279 | if (dev_priv->counter > 0x7FFFFFFFUL) | 284 | if (dev_priv->counter > 0x7FFFFFFFUL) |
280 | dev_priv->counter = 1; | 285 | dev_priv->counter = 1; |
281 | if (dev_priv->sarea_priv) | 286 | if (master_priv->sarea_priv) |
282 | dev_priv->sarea_priv->last_enqueue = dev_priv->counter; | 287 | master_priv->sarea_priv->last_enqueue = dev_priv->counter; |
283 | 288 | ||
284 | BEGIN_LP_RING(4); | 289 | BEGIN_LP_RING(4); |
285 | OUT_RING(MI_STORE_DWORD_INDEX); | 290 | OUT_RING(MI_STORE_DWORD_INDEX); |
@@ -317,21 +322,20 @@ void i915_user_irq_put(struct drm_device *dev) | |||
317 | static int i915_wait_irq(struct drm_device * dev, int irq_nr) | 322 | static int i915_wait_irq(struct drm_device * dev, int irq_nr) |
318 | { | 323 | { |
319 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 324 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
325 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; | ||
320 | int ret = 0; | 326 | int ret = 0; |
321 | 327 | ||
322 | DRM_DEBUG("irq_nr=%d breadcrumb=%d\n", irq_nr, | 328 | DRM_DEBUG("irq_nr=%d breadcrumb=%d\n", irq_nr, |
323 | READ_BREADCRUMB(dev_priv)); | 329 | READ_BREADCRUMB(dev_priv)); |
324 | 330 | ||
325 | if (READ_BREADCRUMB(dev_priv) >= irq_nr) { | 331 | if (READ_BREADCRUMB(dev_priv) >= irq_nr) { |
326 | if (dev_priv->sarea_priv) { | 332 | if (master_priv->sarea_priv) |
327 | dev_priv->sarea_priv->last_dispatch = | 333 | master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); |
328 | READ_BREADCRUMB(dev_priv); | ||
329 | } | ||
330 | return 0; | 334 | return 0; |
331 | } | 335 | } |
332 | 336 | ||
333 | if (dev_priv->sarea_priv) | 337 | if (master_priv->sarea_priv) |
334 | dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; | 338 | master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; |
335 | 339 | ||
336 | i915_user_irq_get(dev); | 340 | i915_user_irq_get(dev); |
337 | DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ, | 341 | DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ, |
@@ -343,10 +347,6 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr) | |||
343 | READ_BREADCRUMB(dev_priv), (int)dev_priv->counter); | 347 | READ_BREADCRUMB(dev_priv), (int)dev_priv->counter); |
344 | } | 348 | } |
345 | 349 | ||
346 | if (dev_priv->sarea_priv) | ||
347 | dev_priv->sarea_priv->last_dispatch = | ||
348 | READ_BREADCRUMB(dev_priv); | ||
349 | |||
350 | return ret; | 350 | return ret; |
351 | } | 351 | } |
352 | 352 | ||
diff --git a/drivers/gpu/drm/i915/i915_mem.c b/drivers/gpu/drm/i915/i915_mem.c index 6126a60dc9cb..96e271986d2a 100644 --- a/drivers/gpu/drm/i915/i915_mem.c +++ b/drivers/gpu/drm/i915/i915_mem.c | |||
@@ -46,7 +46,8 @@ | |||
46 | static void mark_block(struct drm_device * dev, struct mem_block *p, int in_use) | 46 | static void mark_block(struct drm_device * dev, struct mem_block *p, int in_use) |
47 | { | 47 | { |
48 | drm_i915_private_t *dev_priv = dev->dev_private; | 48 | drm_i915_private_t *dev_priv = dev->dev_private; |
49 | drm_i915_sarea_t *sarea_priv = dev_priv->sarea_priv; | 49 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; |
50 | drm_i915_sarea_t *sarea_priv = master_priv->sarea_priv; | ||
50 | struct drm_tex_region *list; | 51 | struct drm_tex_region *list; |
51 | unsigned shift, nr; | 52 | unsigned shift, nr; |
52 | unsigned start; | 53 | unsigned start; |
diff --git a/drivers/gpu/drm/radeon/r300_cmdbuf.c b/drivers/gpu/drm/radeon/r300_cmdbuf.c index 4b27d9abb7bc..cace3964feeb 100644 --- a/drivers/gpu/drm/radeon/r300_cmdbuf.c +++ b/drivers/gpu/drm/radeon/r300_cmdbuf.c | |||
@@ -860,12 +860,12 @@ static __inline__ void r300_pacify(drm_radeon_private_t *dev_priv) | |||
860 | * The actual age emit is done by r300_do_cp_cmdbuf, which is why you must | 860 | * The actual age emit is done by r300_do_cp_cmdbuf, which is why you must |
861 | * be careful about how this function is called. | 861 | * be careful about how this function is called. |
862 | */ | 862 | */ |
863 | static void r300_discard_buffer(struct drm_device * dev, struct drm_buf * buf) | 863 | static void r300_discard_buffer(struct drm_device *dev, struct drm_master *master, struct drm_buf *buf) |
864 | { | 864 | { |
865 | drm_radeon_private_t *dev_priv = dev->dev_private; | ||
866 | drm_radeon_buf_priv_t *buf_priv = buf->dev_private; | 865 | drm_radeon_buf_priv_t *buf_priv = buf->dev_private; |
866 | struct drm_radeon_master_private *master_priv = master->driver_priv; | ||
867 | 867 | ||
868 | buf_priv->age = ++dev_priv->sarea_priv->last_dispatch; | 868 | buf_priv->age = ++master_priv->sarea_priv->last_dispatch; |
869 | buf->pending = 1; | 869 | buf->pending = 1; |
870 | buf->used = 0; | 870 | buf->used = 0; |
871 | } | 871 | } |
@@ -1027,6 +1027,7 @@ int r300_do_cp_cmdbuf(struct drm_device *dev, | |||
1027 | drm_radeon_kcmd_buffer_t *cmdbuf) | 1027 | drm_radeon_kcmd_buffer_t *cmdbuf) |
1028 | { | 1028 | { |
1029 | drm_radeon_private_t *dev_priv = dev->dev_private; | 1029 | drm_radeon_private_t *dev_priv = dev->dev_private; |
1030 | struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv; | ||
1030 | struct drm_device_dma *dma = dev->dma; | 1031 | struct drm_device_dma *dma = dev->dma; |
1031 | struct drm_buf *buf = NULL; | 1032 | struct drm_buf *buf = NULL; |
1032 | int emit_dispatch_age = 0; | 1033 | int emit_dispatch_age = 0; |
@@ -1134,7 +1135,7 @@ int r300_do_cp_cmdbuf(struct drm_device *dev, | |||
1134 | } | 1135 | } |
1135 | 1136 | ||
1136 | emit_dispatch_age = 1; | 1137 | emit_dispatch_age = 1; |
1137 | r300_discard_buffer(dev, buf); | 1138 | r300_discard_buffer(dev, file_priv->master, buf); |
1138 | break; | 1139 | break; |
1139 | 1140 | ||
1140 | case R300_CMD_WAIT: | 1141 | case R300_CMD_WAIT: |
@@ -1189,7 +1190,7 @@ int r300_do_cp_cmdbuf(struct drm_device *dev, | |||
1189 | 1190 | ||
1190 | /* Emit the vertex buffer age */ | 1191 | /* Emit the vertex buffer age */ |
1191 | BEGIN_RING(2); | 1192 | BEGIN_RING(2); |
1192 | RADEON_DISPATCH_AGE(dev_priv->sarea_priv->last_dispatch); | 1193 | RADEON_DISPATCH_AGE(master_priv->sarea_priv->last_dispatch); |
1193 | ADVANCE_RING(); | 1194 | ADVANCE_RING(); |
1194 | } | 1195 | } |
1195 | 1196 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c index dcebb4bee7aa..7b37a4906377 100644 --- a/drivers/gpu/drm/radeon/radeon_cp.c +++ b/drivers/gpu/drm/radeon/radeon_cp.c | |||
@@ -31,6 +31,7 @@ | |||
31 | 31 | ||
32 | #include "drmP.h" | 32 | #include "drmP.h" |
33 | #include "drm.h" | 33 | #include "drm.h" |
34 | #include "drm_sarea.h" | ||
34 | #include "radeon_drm.h" | 35 | #include "radeon_drm.h" |
35 | #include "radeon_drv.h" | 36 | #include "radeon_drv.h" |
36 | #include "r300_reg.h" | 37 | #include "r300_reg.h" |
@@ -667,15 +668,14 @@ static void radeon_cp_init_ring_buffer(struct drm_device * dev, | |||
667 | RADEON_WRITE(RADEON_BUS_CNTL, tmp); | 668 | RADEON_WRITE(RADEON_BUS_CNTL, tmp); |
668 | } /* PCIE cards appears to not need this */ | 669 | } /* PCIE cards appears to not need this */ |
669 | 670 | ||
670 | dev_priv->sarea_priv->last_frame = dev_priv->scratch[0] = 0; | 671 | dev_priv->scratch[0] = 0; |
671 | RADEON_WRITE(RADEON_LAST_FRAME_REG, dev_priv->sarea_priv->last_frame); | 672 | RADEON_WRITE(RADEON_LAST_FRAME_REG, 0); |
672 | 673 | ||
673 | dev_priv->sarea_priv->last_dispatch = dev_priv->scratch[1] = 0; | 674 | dev_priv->scratch[1] = 0; |
674 | RADEON_WRITE(RADEON_LAST_DISPATCH_REG, | 675 | RADEON_WRITE(RADEON_LAST_DISPATCH_REG, 0); |
675 | dev_priv->sarea_priv->last_dispatch); | ||
676 | 676 | ||
677 | dev_priv->sarea_priv->last_clear = dev_priv->scratch[2] = 0; | 677 | dev_priv->scratch[2] = 0; |
678 | RADEON_WRITE(RADEON_LAST_CLEAR_REG, dev_priv->sarea_priv->last_clear); | 678 | RADEON_WRITE(RADEON_LAST_CLEAR_REG, 0); |
679 | 679 | ||
680 | radeon_do_wait_for_idle(dev_priv); | 680 | radeon_do_wait_for_idle(dev_priv); |
681 | 681 | ||
@@ -871,9 +871,11 @@ static void radeon_set_pcigart(drm_radeon_private_t * dev_priv, int on) | |||
871 | } | 871 | } |
872 | } | 872 | } |
873 | 873 | ||
874 | static int radeon_do_init_cp(struct drm_device * dev, drm_radeon_init_t * init) | 874 | static int radeon_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init, |
875 | struct drm_file *file_priv) | ||
875 | { | 876 | { |
876 | drm_radeon_private_t *dev_priv = dev->dev_private; | 877 | drm_radeon_private_t *dev_priv = dev->dev_private; |
878 | struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv; | ||
877 | 879 | ||
878 | DRM_DEBUG("\n"); | 880 | DRM_DEBUG("\n"); |
879 | 881 | ||
@@ -998,8 +1000,8 @@ static int radeon_do_init_cp(struct drm_device * dev, drm_radeon_init_t * init) | |||
998 | dev_priv->buffers_offset = init->buffers_offset; | 1000 | dev_priv->buffers_offset = init->buffers_offset; |
999 | dev_priv->gart_textures_offset = init->gart_textures_offset; | 1001 | dev_priv->gart_textures_offset = init->gart_textures_offset; |
1000 | 1002 | ||
1001 | dev_priv->sarea = drm_getsarea(dev); | 1003 | master_priv->sarea = drm_getsarea(dev); |
1002 | if (!dev_priv->sarea) { | 1004 | if (!master_priv->sarea) { |
1003 | DRM_ERROR("could not find sarea!\n"); | 1005 | DRM_ERROR("could not find sarea!\n"); |
1004 | radeon_do_cleanup_cp(dev); | 1006 | radeon_do_cleanup_cp(dev); |
1005 | return -EINVAL; | 1007 | return -EINVAL; |
@@ -1035,10 +1037,6 @@ static int radeon_do_init_cp(struct drm_device * dev, drm_radeon_init_t * init) | |||
1035 | } | 1037 | } |
1036 | } | 1038 | } |
1037 | 1039 | ||
1038 | dev_priv->sarea_priv = | ||
1039 | (drm_radeon_sarea_t *) ((u8 *) dev_priv->sarea->handle + | ||
1040 | init->sarea_priv_offset); | ||
1041 | |||
1042 | #if __OS_HAS_AGP | 1040 | #if __OS_HAS_AGP |
1043 | if (dev_priv->flags & RADEON_IS_AGP) { | 1041 | if (dev_priv->flags & RADEON_IS_AGP) { |
1044 | drm_core_ioremap(dev_priv->cp_ring, dev); | 1042 | drm_core_ioremap(dev_priv->cp_ring, dev); |
@@ -1329,7 +1327,7 @@ int radeon_cp_init(struct drm_device *dev, void *data, struct drm_file *file_pri | |||
1329 | case RADEON_INIT_CP: | 1327 | case RADEON_INIT_CP: |
1330 | case RADEON_INIT_R200_CP: | 1328 | case RADEON_INIT_R200_CP: |
1331 | case RADEON_INIT_R300_CP: | 1329 | case RADEON_INIT_R300_CP: |
1332 | return radeon_do_init_cp(dev, init); | 1330 | return radeon_do_init_cp(dev, init, file_priv); |
1333 | case RADEON_CLEANUP_CP: | 1331 | case RADEON_CLEANUP_CP: |
1334 | return radeon_do_cleanup_cp(dev); | 1332 | return radeon_do_cleanup_cp(dev); |
1335 | } | 1333 | } |
@@ -1768,6 +1766,51 @@ int radeon_driver_load(struct drm_device *dev, unsigned long flags) | |||
1768 | return ret; | 1766 | return ret; |
1769 | } | 1767 | } |
1770 | 1768 | ||
1769 | int radeon_master_create(struct drm_device *dev, struct drm_master *master) | ||
1770 | { | ||
1771 | struct drm_radeon_master_private *master_priv; | ||
1772 | unsigned long sareapage; | ||
1773 | int ret; | ||
1774 | |||
1775 | master_priv = drm_calloc(1, sizeof(*master_priv), DRM_MEM_DRIVER); | ||
1776 | if (!master_priv) | ||
1777 | return -ENOMEM; | ||
1778 | |||
1779 | /* prebuild the SAREA */ | ||
1780 | sareapage = max(SAREA_MAX, PAGE_SIZE); | ||
1781 | ret = drm_addmap(dev, 0, sareapage, _DRM_SHM, _DRM_CONTAINS_LOCK|_DRM_DRIVER, | ||
1782 | &master_priv->sarea); | ||
1783 | if (ret) { | ||
1784 | DRM_ERROR("SAREA setup failed\n"); | ||
1785 | return ret; | ||
1786 | } | ||
1787 | master_priv->sarea_priv = master_priv->sarea->handle + sizeof(struct drm_sarea); | ||
1788 | master_priv->sarea_priv->pfCurrentPage = 0; | ||
1789 | |||
1790 | master->driver_priv = master_priv; | ||
1791 | return 0; | ||
1792 | } | ||
1793 | |||
1794 | void radeon_master_destroy(struct drm_device *dev, struct drm_master *master) | ||
1795 | { | ||
1796 | struct drm_radeon_master_private *master_priv = master->driver_priv; | ||
1797 | |||
1798 | if (!master_priv) | ||
1799 | return; | ||
1800 | |||
1801 | if (master_priv->sarea_priv && | ||
1802 | master_priv->sarea_priv->pfCurrentPage != 0) | ||
1803 | radeon_cp_dispatch_flip(dev, master); | ||
1804 | |||
1805 | master_priv->sarea_priv = NULL; | ||
1806 | if (master_priv->sarea) | ||
1807 | drm_rmmap(dev, master_priv->sarea); | ||
1808 | |||
1809 | drm_free(master_priv, sizeof(*master_priv), DRM_MEM_DRIVER); | ||
1810 | |||
1811 | master->driver_priv = NULL; | ||
1812 | } | ||
1813 | |||
1771 | /* Create mappings for registers and framebuffer so userland doesn't necessarily | 1814 | /* Create mappings for registers and framebuffer so userland doesn't necessarily |
1772 | * have to find them. | 1815 | * have to find them. |
1773 | */ | 1816 | */ |
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h index 3bbb871b25d5..490bc7ceef60 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.h +++ b/drivers/gpu/drm/radeon/radeon_drv.h | |||
@@ -226,9 +226,13 @@ struct radeon_virt_surface { | |||
226 | #define RADEON_FLUSH_EMITED (1 < 0) | 226 | #define RADEON_FLUSH_EMITED (1 < 0) |
227 | #define RADEON_PURGE_EMITED (1 < 1) | 227 | #define RADEON_PURGE_EMITED (1 < 1) |
228 | 228 | ||
229 | struct drm_radeon_master_private { | ||
230 | drm_local_map_t *sarea; | ||
231 | drm_radeon_sarea_t *sarea_priv; | ||
232 | }; | ||
233 | |||
229 | typedef struct drm_radeon_private { | 234 | typedef struct drm_radeon_private { |
230 | drm_radeon_ring_buffer_t ring; | 235 | drm_radeon_ring_buffer_t ring; |
231 | drm_radeon_sarea_t *sarea_priv; | ||
232 | 236 | ||
233 | u32 fb_location; | 237 | u32 fb_location; |
234 | u32 fb_size; | 238 | u32 fb_size; |
@@ -409,6 +413,9 @@ extern int radeon_driver_open(struct drm_device *dev, | |||
409 | extern long radeon_compat_ioctl(struct file *filp, unsigned int cmd, | 413 | extern long radeon_compat_ioctl(struct file *filp, unsigned int cmd, |
410 | unsigned long arg); | 414 | unsigned long arg); |
411 | 415 | ||
416 | extern int radeon_master_create(struct drm_device *dev, struct drm_master *master); | ||
417 | extern void radeon_master_destroy(struct drm_device *dev, struct drm_master *master); | ||
418 | extern void radeon_cp_dispatch_flip(struct drm_device *dev, struct drm_master *master); | ||
412 | /* r300_cmdbuf.c */ | 419 | /* r300_cmdbuf.c */ |
413 | extern void r300_init_reg_flags(struct drm_device *dev); | 420 | extern void r300_init_reg_flags(struct drm_device *dev); |
414 | 421 | ||
@@ -1335,8 +1342,9 @@ do { \ | |||
1335 | } while (0) | 1342 | } while (0) |
1336 | 1343 | ||
1337 | #define VB_AGE_TEST_WITH_RETURN( dev_priv ) \ | 1344 | #define VB_AGE_TEST_WITH_RETURN( dev_priv ) \ |
1338 | do { \ | 1345 | do { \ |
1339 | drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; \ | 1346 | struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv; \ |
1347 | drm_radeon_sarea_t *sarea_priv = master_priv->sarea_priv; \ | ||
1340 | if ( sarea_priv->last_dispatch >= RADEON_MAX_VB_AGE ) { \ | 1348 | if ( sarea_priv->last_dispatch >= RADEON_MAX_VB_AGE ) { \ |
1341 | int __ret = radeon_do_cp_idle( dev_priv ); \ | 1349 | int __ret = radeon_do_cp_idle( dev_priv ); \ |
1342 | if ( __ret ) return __ret; \ | 1350 | if ( __ret ) return __ret; \ |
diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c index 5d7153fcc7b0..ef940a079dcb 100644 --- a/drivers/gpu/drm/radeon/radeon_state.c +++ b/drivers/gpu/drm/radeon/radeon_state.c | |||
@@ -742,13 +742,14 @@ static struct { | |||
742 | */ | 742 | */ |
743 | 743 | ||
744 | static void radeon_clear_box(drm_radeon_private_t * dev_priv, | 744 | static void radeon_clear_box(drm_radeon_private_t * dev_priv, |
745 | struct drm_radeon_master_private *master_priv, | ||
745 | int x, int y, int w, int h, int r, int g, int b) | 746 | int x, int y, int w, int h, int r, int g, int b) |
746 | { | 747 | { |
747 | u32 color; | 748 | u32 color; |
748 | RING_LOCALS; | 749 | RING_LOCALS; |
749 | 750 | ||
750 | x += dev_priv->sarea_priv->boxes[0].x1; | 751 | x += master_priv->sarea_priv->boxes[0].x1; |
751 | y += dev_priv->sarea_priv->boxes[0].y1; | 752 | y += master_priv->sarea_priv->boxes[0].y1; |
752 | 753 | ||
753 | switch (dev_priv->color_fmt) { | 754 | switch (dev_priv->color_fmt) { |
754 | case RADEON_COLOR_FORMAT_RGB565: | 755 | case RADEON_COLOR_FORMAT_RGB565: |
@@ -776,7 +777,7 @@ static void radeon_clear_box(drm_radeon_private_t * dev_priv, | |||
776 | RADEON_GMC_SRC_DATATYPE_COLOR | | 777 | RADEON_GMC_SRC_DATATYPE_COLOR | |
777 | RADEON_ROP3_P | RADEON_GMC_CLR_CMP_CNTL_DIS); | 778 | RADEON_ROP3_P | RADEON_GMC_CLR_CMP_CNTL_DIS); |
778 | 779 | ||
779 | if (dev_priv->sarea_priv->pfCurrentPage == 1) { | 780 | if (master_priv->sarea_priv->pfCurrentPage == 1) { |
780 | OUT_RING(dev_priv->front_pitch_offset); | 781 | OUT_RING(dev_priv->front_pitch_offset); |
781 | } else { | 782 | } else { |
782 | OUT_RING(dev_priv->back_pitch_offset); | 783 | OUT_RING(dev_priv->back_pitch_offset); |
@@ -790,7 +791,7 @@ static void radeon_clear_box(drm_radeon_private_t * dev_priv, | |||
790 | ADVANCE_RING(); | 791 | ADVANCE_RING(); |
791 | } | 792 | } |
792 | 793 | ||
793 | static void radeon_cp_performance_boxes(drm_radeon_private_t * dev_priv) | 794 | static void radeon_cp_performance_boxes(drm_radeon_private_t *dev_priv, struct drm_radeon_master_private *master_priv) |
794 | { | 795 | { |
795 | /* Collapse various things into a wait flag -- trying to | 796 | /* Collapse various things into a wait flag -- trying to |
796 | * guess if userspase slept -- better just to have them tell us. | 797 | * guess if userspase slept -- better just to have them tell us. |
@@ -807,12 +808,12 @@ static void radeon_cp_performance_boxes(drm_radeon_private_t * dev_priv) | |||
807 | /* Purple box for page flipping | 808 | /* Purple box for page flipping |
808 | */ | 809 | */ |
809 | if (dev_priv->stats.boxes & RADEON_BOX_FLIP) | 810 | if (dev_priv->stats.boxes & RADEON_BOX_FLIP) |
810 | radeon_clear_box(dev_priv, 4, 4, 8, 8, 255, 0, 255); | 811 | radeon_clear_box(dev_priv, master_priv, 4, 4, 8, 8, 255, 0, 255); |
811 | 812 | ||
812 | /* Red box if we have to wait for idle at any point | 813 | /* Red box if we have to wait for idle at any point |
813 | */ | 814 | */ |
814 | if (dev_priv->stats.boxes & RADEON_BOX_WAIT_IDLE) | 815 | if (dev_priv->stats.boxes & RADEON_BOX_WAIT_IDLE) |
815 | radeon_clear_box(dev_priv, 16, 4, 8, 8, 255, 0, 0); | 816 | radeon_clear_box(dev_priv, master_priv, 16, 4, 8, 8, 255, 0, 0); |
816 | 817 | ||
817 | /* Blue box: lost context? | 818 | /* Blue box: lost context? |
818 | */ | 819 | */ |
@@ -820,12 +821,12 @@ static void radeon_cp_performance_boxes(drm_radeon_private_t * dev_priv) | |||
820 | /* Yellow box for texture swaps | 821 | /* Yellow box for texture swaps |
821 | */ | 822 | */ |
822 | if (dev_priv->stats.boxes & RADEON_BOX_TEXTURE_LOAD) | 823 | if (dev_priv->stats.boxes & RADEON_BOX_TEXTURE_LOAD) |
823 | radeon_clear_box(dev_priv, 40, 4, 8, 8, 255, 255, 0); | 824 | radeon_clear_box(dev_priv, master_priv, 40, 4, 8, 8, 255, 255, 0); |
824 | 825 | ||
825 | /* Green box if hardware never idles (as far as we can tell) | 826 | /* Green box if hardware never idles (as far as we can tell) |
826 | */ | 827 | */ |
827 | if (!(dev_priv->stats.boxes & RADEON_BOX_DMA_IDLE)) | 828 | if (!(dev_priv->stats.boxes & RADEON_BOX_DMA_IDLE)) |
828 | radeon_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0); | 829 | radeon_clear_box(dev_priv, master_priv, 64, 4, 8, 8, 0, 255, 0); |
829 | 830 | ||
830 | /* Draw bars indicating number of buffers allocated | 831 | /* Draw bars indicating number of buffers allocated |
831 | * (not a great measure, easily confused) | 832 | * (not a great measure, easily confused) |
@@ -834,7 +835,7 @@ static void radeon_cp_performance_boxes(drm_radeon_private_t * dev_priv) | |||
834 | if (dev_priv->stats.requested_bufs > 100) | 835 | if (dev_priv->stats.requested_bufs > 100) |
835 | dev_priv->stats.requested_bufs = 100; | 836 | dev_priv->stats.requested_bufs = 100; |
836 | 837 | ||
837 | radeon_clear_box(dev_priv, 4, 16, | 838 | radeon_clear_box(dev_priv, master_priv, 4, 16, |
838 | dev_priv->stats.requested_bufs, 4, | 839 | dev_priv->stats.requested_bufs, 4, |
839 | 196, 128, 128); | 840 | 196, 128, 128); |
840 | } | 841 | } |
@@ -848,11 +849,13 @@ static void radeon_cp_performance_boxes(drm_radeon_private_t * dev_priv) | |||
848 | */ | 849 | */ |
849 | 850 | ||
850 | static void radeon_cp_dispatch_clear(struct drm_device * dev, | 851 | static void radeon_cp_dispatch_clear(struct drm_device * dev, |
852 | struct drm_master *master, | ||
851 | drm_radeon_clear_t * clear, | 853 | drm_radeon_clear_t * clear, |
852 | drm_radeon_clear_rect_t * depth_boxes) | 854 | drm_radeon_clear_rect_t * depth_boxes) |
853 | { | 855 | { |
854 | drm_radeon_private_t *dev_priv = dev->dev_private; | 856 | drm_radeon_private_t *dev_priv = dev->dev_private; |
855 | drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; | 857 | struct drm_radeon_master_private *master_priv = master->driver_priv; |
858 | drm_radeon_sarea_t *sarea_priv = master_priv->sarea_priv; | ||
856 | drm_radeon_depth_clear_t *depth_clear = &dev_priv->depth_clear; | 859 | drm_radeon_depth_clear_t *depth_clear = &dev_priv->depth_clear; |
857 | int nbox = sarea_priv->nbox; | 860 | int nbox = sarea_priv->nbox; |
858 | struct drm_clip_rect *pbox = sarea_priv->boxes; | 861 | struct drm_clip_rect *pbox = sarea_priv->boxes; |
@@ -864,7 +867,7 @@ static void radeon_cp_dispatch_clear(struct drm_device * dev, | |||
864 | 867 | ||
865 | dev_priv->stats.clears++; | 868 | dev_priv->stats.clears++; |
866 | 869 | ||
867 | if (dev_priv->sarea_priv->pfCurrentPage == 1) { | 870 | if (sarea_priv->pfCurrentPage == 1) { |
868 | unsigned int tmp = flags; | 871 | unsigned int tmp = flags; |
869 | 872 | ||
870 | flags &= ~(RADEON_FRONT | RADEON_BACK); | 873 | flags &= ~(RADEON_FRONT | RADEON_BACK); |
@@ -890,7 +893,7 @@ static void radeon_cp_dispatch_clear(struct drm_device * dev, | |||
890 | 893 | ||
891 | /* Make sure we restore the 3D state next time. | 894 | /* Make sure we restore the 3D state next time. |
892 | */ | 895 | */ |
893 | dev_priv->sarea_priv->ctx_owner = 0; | 896 | sarea_priv->ctx_owner = 0; |
894 | 897 | ||
895 | for (i = 0; i < nbox; i++) { | 898 | for (i = 0; i < nbox; i++) { |
896 | int x = pbox[i].x1; | 899 | int x = pbox[i].x1; |
@@ -967,7 +970,7 @@ static void radeon_cp_dispatch_clear(struct drm_device * dev, | |||
967 | /* Make sure we restore the 3D state next time. | 970 | /* Make sure we restore the 3D state next time. |
968 | * we haven't touched any "normal" state - still need this? | 971 | * we haven't touched any "normal" state - still need this? |
969 | */ | 972 | */ |
970 | dev_priv->sarea_priv->ctx_owner = 0; | 973 | sarea_priv->ctx_owner = 0; |
971 | 974 | ||
972 | if ((dev_priv->flags & RADEON_HAS_HIERZ) | 975 | if ((dev_priv->flags & RADEON_HAS_HIERZ) |
973 | && (flags & RADEON_USE_HIERZ)) { | 976 | && (flags & RADEON_USE_HIERZ)) { |
@@ -1214,7 +1217,7 @@ static void radeon_cp_dispatch_clear(struct drm_device * dev, | |||
1214 | 1217 | ||
1215 | /* Make sure we restore the 3D state next time. | 1218 | /* Make sure we restore the 3D state next time. |
1216 | */ | 1219 | */ |
1217 | dev_priv->sarea_priv->ctx_owner = 0; | 1220 | sarea_priv->ctx_owner = 0; |
1218 | 1221 | ||
1219 | for (i = 0; i < nbox; i++) { | 1222 | for (i = 0; i < nbox; i++) { |
1220 | 1223 | ||
@@ -1285,7 +1288,7 @@ static void radeon_cp_dispatch_clear(struct drm_device * dev, | |||
1285 | 1288 | ||
1286 | /* Make sure we restore the 3D state next time. | 1289 | /* Make sure we restore the 3D state next time. |
1287 | */ | 1290 | */ |
1288 | dev_priv->sarea_priv->ctx_owner = 0; | 1291 | sarea_priv->ctx_owner = 0; |
1289 | 1292 | ||
1290 | for (i = 0; i < nbox; i++) { | 1293 | for (i = 0; i < nbox; i++) { |
1291 | 1294 | ||
@@ -1328,20 +1331,21 @@ static void radeon_cp_dispatch_clear(struct drm_device * dev, | |||
1328 | * wait on this value before performing the clear ioctl. We | 1331 | * wait on this value before performing the clear ioctl. We |
1329 | * need this because the card's so damned fast... | 1332 | * need this because the card's so damned fast... |
1330 | */ | 1333 | */ |
1331 | dev_priv->sarea_priv->last_clear++; | 1334 | sarea_priv->last_clear++; |
1332 | 1335 | ||
1333 | BEGIN_RING(4); | 1336 | BEGIN_RING(4); |
1334 | 1337 | ||
1335 | RADEON_CLEAR_AGE(dev_priv->sarea_priv->last_clear); | 1338 | RADEON_CLEAR_AGE(sarea_priv->last_clear); |
1336 | RADEON_WAIT_UNTIL_IDLE(); | 1339 | RADEON_WAIT_UNTIL_IDLE(); |
1337 | 1340 | ||
1338 | ADVANCE_RING(); | 1341 | ADVANCE_RING(); |
1339 | } | 1342 | } |
1340 | 1343 | ||
1341 | static void radeon_cp_dispatch_swap(struct drm_device * dev) | 1344 | static void radeon_cp_dispatch_swap(struct drm_device *dev, struct drm_master *master) |
1342 | { | 1345 | { |
1343 | drm_radeon_private_t *dev_priv = dev->dev_private; | 1346 | drm_radeon_private_t *dev_priv = dev->dev_private; |
1344 | drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; | 1347 | struct drm_radeon_master_private *master_priv = master->driver_priv; |
1348 | drm_radeon_sarea_t *sarea_priv = master_priv->sarea_priv; | ||
1345 | int nbox = sarea_priv->nbox; | 1349 | int nbox = sarea_priv->nbox; |
1346 | struct drm_clip_rect *pbox = sarea_priv->boxes; | 1350 | struct drm_clip_rect *pbox = sarea_priv->boxes; |
1347 | int i; | 1351 | int i; |
@@ -1351,7 +1355,7 @@ static void radeon_cp_dispatch_swap(struct drm_device * dev) | |||
1351 | /* Do some trivial performance monitoring... | 1355 | /* Do some trivial performance monitoring... |
1352 | */ | 1356 | */ |
1353 | if (dev_priv->do_boxes) | 1357 | if (dev_priv->do_boxes) |
1354 | radeon_cp_performance_boxes(dev_priv); | 1358 | radeon_cp_performance_boxes(dev_priv, master_priv); |
1355 | 1359 | ||
1356 | /* Wait for the 3D stream to idle before dispatching the bitblt. | 1360 | /* Wait for the 3D stream to idle before dispatching the bitblt. |
1357 | * This will prevent data corruption between the two streams. | 1361 | * This will prevent data corruption between the two streams. |
@@ -1385,7 +1389,7 @@ static void radeon_cp_dispatch_swap(struct drm_device * dev) | |||
1385 | /* Make this work even if front & back are flipped: | 1389 | /* Make this work even if front & back are flipped: |
1386 | */ | 1390 | */ |
1387 | OUT_RING(CP_PACKET0(RADEON_SRC_PITCH_OFFSET, 1)); | 1391 | OUT_RING(CP_PACKET0(RADEON_SRC_PITCH_OFFSET, 1)); |
1388 | if (dev_priv->sarea_priv->pfCurrentPage == 0) { | 1392 | if (sarea_priv->pfCurrentPage == 0) { |
1389 | OUT_RING(dev_priv->back_pitch_offset); | 1393 | OUT_RING(dev_priv->back_pitch_offset); |
1390 | OUT_RING(dev_priv->front_pitch_offset); | 1394 | OUT_RING(dev_priv->front_pitch_offset); |
1391 | } else { | 1395 | } else { |
@@ -1405,31 +1409,32 @@ static void radeon_cp_dispatch_swap(struct drm_device * dev) | |||
1405 | * throttle the framerate by waiting for this value before | 1409 | * throttle the framerate by waiting for this value before |
1406 | * performing the swapbuffer ioctl. | 1410 | * performing the swapbuffer ioctl. |
1407 | */ | 1411 | */ |
1408 | dev_priv->sarea_priv->last_frame++; | 1412 | sarea_priv->last_frame++; |
1409 | 1413 | ||
1410 | BEGIN_RING(4); | 1414 | BEGIN_RING(4); |
1411 | 1415 | ||
1412 | RADEON_FRAME_AGE(dev_priv->sarea_priv->last_frame); | 1416 | RADEON_FRAME_AGE(sarea_priv->last_frame); |
1413 | RADEON_WAIT_UNTIL_2D_IDLE(); | 1417 | RADEON_WAIT_UNTIL_2D_IDLE(); |
1414 | 1418 | ||
1415 | ADVANCE_RING(); | 1419 | ADVANCE_RING(); |
1416 | } | 1420 | } |
1417 | 1421 | ||
1418 | static void radeon_cp_dispatch_flip(struct drm_device * dev) | 1422 | void radeon_cp_dispatch_flip(struct drm_device *dev, struct drm_master *master) |
1419 | { | 1423 | { |
1420 | drm_radeon_private_t *dev_priv = dev->dev_private; | 1424 | drm_radeon_private_t *dev_priv = dev->dev_private; |
1421 | struct drm_sarea *sarea = (struct drm_sarea *) dev_priv->sarea->handle; | 1425 | struct drm_radeon_master_private *master_priv = master->driver_priv; |
1422 | int offset = (dev_priv->sarea_priv->pfCurrentPage == 1) | 1426 | struct drm_sarea *sarea = (struct drm_sarea *)master_priv->sarea->handle; |
1427 | int offset = (master_priv->sarea_priv->pfCurrentPage == 1) | ||
1423 | ? dev_priv->front_offset : dev_priv->back_offset; | 1428 | ? dev_priv->front_offset : dev_priv->back_offset; |
1424 | RING_LOCALS; | 1429 | RING_LOCALS; |
1425 | DRM_DEBUG("pfCurrentPage=%d\n", | 1430 | DRM_DEBUG("pfCurrentPage=%d\n", |
1426 | dev_priv->sarea_priv->pfCurrentPage); | 1431 | master_priv->sarea_priv->pfCurrentPage); |
1427 | 1432 | ||
1428 | /* Do some trivial performance monitoring... | 1433 | /* Do some trivial performance monitoring... |
1429 | */ | 1434 | */ |
1430 | if (dev_priv->do_boxes) { | 1435 | if (dev_priv->do_boxes) { |
1431 | dev_priv->stats.boxes |= RADEON_BOX_FLIP; | 1436 | dev_priv->stats.boxes |= RADEON_BOX_FLIP; |
1432 | radeon_cp_performance_boxes(dev_priv); | 1437 | radeon_cp_performance_boxes(dev_priv, master_priv); |
1433 | } | 1438 | } |
1434 | 1439 | ||
1435 | /* Update the frame offsets for both CRTCs | 1440 | /* Update the frame offsets for both CRTCs |
@@ -1441,7 +1446,7 @@ static void radeon_cp_dispatch_flip(struct drm_device * dev) | |||
1441 | ((sarea->frame.y * dev_priv->front_pitch + | 1446 | ((sarea->frame.y * dev_priv->front_pitch + |
1442 | sarea->frame.x * (dev_priv->color_fmt - 2)) & ~7) | 1447 | sarea->frame.x * (dev_priv->color_fmt - 2)) & ~7) |
1443 | + offset); | 1448 | + offset); |
1444 | OUT_RING_REG(RADEON_CRTC2_OFFSET, dev_priv->sarea_priv->crtc2_base | 1449 | OUT_RING_REG(RADEON_CRTC2_OFFSET, master_priv->sarea_priv->crtc2_base |
1445 | + offset); | 1450 | + offset); |
1446 | 1451 | ||
1447 | ADVANCE_RING(); | 1452 | ADVANCE_RING(); |
@@ -1450,13 +1455,13 @@ static void radeon_cp_dispatch_flip(struct drm_device * dev) | |||
1450 | * throttle the framerate by waiting for this value before | 1455 | * throttle the framerate by waiting for this value before |
1451 | * performing the swapbuffer ioctl. | 1456 | * performing the swapbuffer ioctl. |
1452 | */ | 1457 | */ |
1453 | dev_priv->sarea_priv->last_frame++; | 1458 | master_priv->sarea_priv->last_frame++; |
1454 | dev_priv->sarea_priv->pfCurrentPage = | 1459 | master_priv->sarea_priv->pfCurrentPage = |
1455 | 1 - dev_priv->sarea_priv->pfCurrentPage; | 1460 | 1 - master_priv->sarea_priv->pfCurrentPage; |
1456 | 1461 | ||
1457 | BEGIN_RING(2); | 1462 | BEGIN_RING(2); |
1458 | 1463 | ||
1459 | RADEON_FRAME_AGE(dev_priv->sarea_priv->last_frame); | 1464 | RADEON_FRAME_AGE(master_priv->sarea_priv->last_frame); |
1460 | 1465 | ||
1461 | ADVANCE_RING(); | 1466 | ADVANCE_RING(); |
1462 | } | 1467 | } |
@@ -1494,11 +1499,13 @@ typedef struct { | |||
1494 | } drm_radeon_tcl_prim_t; | 1499 | } drm_radeon_tcl_prim_t; |
1495 | 1500 | ||
1496 | static void radeon_cp_dispatch_vertex(struct drm_device * dev, | 1501 | static void radeon_cp_dispatch_vertex(struct drm_device * dev, |
1502 | struct drm_file *file_priv, | ||
1497 | struct drm_buf * buf, | 1503 | struct drm_buf * buf, |
1498 | drm_radeon_tcl_prim_t * prim) | 1504 | drm_radeon_tcl_prim_t * prim) |
1499 | { | 1505 | { |
1500 | drm_radeon_private_t *dev_priv = dev->dev_private; | 1506 | drm_radeon_private_t *dev_priv = dev->dev_private; |
1501 | drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; | 1507 | struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv; |
1508 | drm_radeon_sarea_t *sarea_priv = master_priv->sarea_priv; | ||
1502 | int offset = dev_priv->gart_buffers_offset + buf->offset + prim->start; | 1509 | int offset = dev_priv->gart_buffers_offset + buf->offset + prim->start; |
1503 | int numverts = (int)prim->numverts; | 1510 | int numverts = (int)prim->numverts; |
1504 | int nbox = sarea_priv->nbox; | 1511 | int nbox = sarea_priv->nbox; |
@@ -1539,13 +1546,14 @@ static void radeon_cp_dispatch_vertex(struct drm_device * dev, | |||
1539 | } while (i < nbox); | 1546 | } while (i < nbox); |
1540 | } | 1547 | } |
1541 | 1548 | ||
1542 | static void radeon_cp_discard_buffer(struct drm_device * dev, struct drm_buf * buf) | 1549 | static void radeon_cp_discard_buffer(struct drm_device *dev, struct drm_master *master, struct drm_buf *buf) |
1543 | { | 1550 | { |
1544 | drm_radeon_private_t *dev_priv = dev->dev_private; | 1551 | drm_radeon_private_t *dev_priv = dev->dev_private; |
1552 | struct drm_radeon_master_private *master_priv = master->driver_priv; | ||
1545 | drm_radeon_buf_priv_t *buf_priv = buf->dev_private; | 1553 | drm_radeon_buf_priv_t *buf_priv = buf->dev_private; |
1546 | RING_LOCALS; | 1554 | RING_LOCALS; |
1547 | 1555 | ||
1548 | buf_priv->age = ++dev_priv->sarea_priv->last_dispatch; | 1556 | buf_priv->age = ++master_priv->sarea_priv->last_dispatch; |
1549 | 1557 | ||
1550 | /* Emit the vertex buffer age */ | 1558 | /* Emit the vertex buffer age */ |
1551 | BEGIN_RING(2); | 1559 | BEGIN_RING(2); |
@@ -1590,12 +1598,14 @@ static void radeon_cp_dispatch_indirect(struct drm_device * dev, | |||
1590 | } | 1598 | } |
1591 | } | 1599 | } |
1592 | 1600 | ||
1593 | static void radeon_cp_dispatch_indices(struct drm_device * dev, | 1601 | static void radeon_cp_dispatch_indices(struct drm_device *dev, |
1602 | struct drm_master *master, | ||
1594 | struct drm_buf * elt_buf, | 1603 | struct drm_buf * elt_buf, |
1595 | drm_radeon_tcl_prim_t * prim) | 1604 | drm_radeon_tcl_prim_t * prim) |
1596 | { | 1605 | { |
1597 | drm_radeon_private_t *dev_priv = dev->dev_private; | 1606 | drm_radeon_private_t *dev_priv = dev->dev_private; |
1598 | drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; | 1607 | struct drm_radeon_master_private *master_priv = master->driver_priv; |
1608 | drm_radeon_sarea_t *sarea_priv = master_priv->sarea_priv; | ||
1599 | int offset = dev_priv->gart_buffers_offset + prim->offset; | 1609 | int offset = dev_priv->gart_buffers_offset + prim->offset; |
1600 | u32 *data; | 1610 | u32 *data; |
1601 | int dwords; | 1611 | int dwords; |
@@ -1870,7 +1880,7 @@ static int radeon_cp_dispatch_texture(struct drm_device * dev, | |||
1870 | ADVANCE_RING(); | 1880 | ADVANCE_RING(); |
1871 | COMMIT_RING(); | 1881 | COMMIT_RING(); |
1872 | 1882 | ||
1873 | radeon_cp_discard_buffer(dev, buf); | 1883 | radeon_cp_discard_buffer(dev, file_priv->master, buf); |
1874 | 1884 | ||
1875 | /* Update the input parameters for next time */ | 1885 | /* Update the input parameters for next time */ |
1876 | image->y += height; | 1886 | image->y += height; |
@@ -2110,7 +2120,8 @@ static int radeon_surface_free(struct drm_device *dev, void *data, struct drm_fi | |||
2110 | static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *file_priv) | 2120 | static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *file_priv) |
2111 | { | 2121 | { |
2112 | drm_radeon_private_t *dev_priv = dev->dev_private; | 2122 | drm_radeon_private_t *dev_priv = dev->dev_private; |
2113 | drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; | 2123 | struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv; |
2124 | drm_radeon_sarea_t *sarea_priv = master_priv->sarea_priv; | ||
2114 | drm_radeon_clear_t *clear = data; | 2125 | drm_radeon_clear_t *clear = data; |
2115 | drm_radeon_clear_rect_t depth_boxes[RADEON_NR_SAREA_CLIPRECTS]; | 2126 | drm_radeon_clear_rect_t depth_boxes[RADEON_NR_SAREA_CLIPRECTS]; |
2116 | DRM_DEBUG("\n"); | 2127 | DRM_DEBUG("\n"); |
@@ -2126,7 +2137,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file * | |||
2126 | sarea_priv->nbox * sizeof(depth_boxes[0]))) | 2137 | sarea_priv->nbox * sizeof(depth_boxes[0]))) |
2127 | return -EFAULT; | 2138 | return -EFAULT; |
2128 | 2139 | ||
2129 | radeon_cp_dispatch_clear(dev, clear, depth_boxes); | 2140 | radeon_cp_dispatch_clear(dev, file_priv->master, clear, depth_boxes); |
2130 | 2141 | ||
2131 | COMMIT_RING(); | 2142 | COMMIT_RING(); |
2132 | return 0; | 2143 | return 0; |
@@ -2134,9 +2145,10 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file * | |||
2134 | 2145 | ||
2135 | /* Not sure why this isn't set all the time: | 2146 | /* Not sure why this isn't set all the time: |
2136 | */ | 2147 | */ |
2137 | static int radeon_do_init_pageflip(struct drm_device * dev) | 2148 | static int radeon_do_init_pageflip(struct drm_device *dev, struct drm_master *master) |
2138 | { | 2149 | { |
2139 | drm_radeon_private_t *dev_priv = dev->dev_private; | 2150 | drm_radeon_private_t *dev_priv = dev->dev_private; |
2151 | struct drm_radeon_master_private *master_priv = master->driver_priv; | ||
2140 | RING_LOCALS; | 2152 | RING_LOCALS; |
2141 | 2153 | ||
2142 | DRM_DEBUG("\n"); | 2154 | DRM_DEBUG("\n"); |
@@ -2153,8 +2165,8 @@ static int radeon_do_init_pageflip(struct drm_device * dev) | |||
2153 | 2165 | ||
2154 | dev_priv->page_flipping = 1; | 2166 | dev_priv->page_flipping = 1; |
2155 | 2167 | ||
2156 | if (dev_priv->sarea_priv->pfCurrentPage != 1) | 2168 | if (master_priv->sarea_priv->pfCurrentPage != 1) |
2157 | dev_priv->sarea_priv->pfCurrentPage = 0; | 2169 | master_priv->sarea_priv->pfCurrentPage = 0; |
2158 | 2170 | ||
2159 | return 0; | 2171 | return 0; |
2160 | } | 2172 | } |
@@ -2172,9 +2184,9 @@ static int radeon_cp_flip(struct drm_device *dev, void *data, struct drm_file *f | |||
2172 | RING_SPACE_TEST_WITH_RETURN(dev_priv); | 2184 | RING_SPACE_TEST_WITH_RETURN(dev_priv); |
2173 | 2185 | ||
2174 | if (!dev_priv->page_flipping) | 2186 | if (!dev_priv->page_flipping) |
2175 | radeon_do_init_pageflip(dev); | 2187 | radeon_do_init_pageflip(dev, file_priv->master); |
2176 | 2188 | ||
2177 | radeon_cp_dispatch_flip(dev); | 2189 | radeon_cp_dispatch_flip(dev, file_priv->master); |
2178 | 2190 | ||
2179 | COMMIT_RING(); | 2191 | COMMIT_RING(); |
2180 | return 0; | 2192 | return 0; |
@@ -2183,7 +2195,9 @@ static int radeon_cp_flip(struct drm_device *dev, void *data, struct drm_file *f | |||
2183 | static int radeon_cp_swap(struct drm_device *dev, void *data, struct drm_file *file_priv) | 2195 | static int radeon_cp_swap(struct drm_device *dev, void *data, struct drm_file *file_priv) |
2184 | { | 2196 | { |
2185 | drm_radeon_private_t *dev_priv = dev->dev_private; | 2197 | drm_radeon_private_t *dev_priv = dev->dev_private; |
2186 | drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; | 2198 | struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv; |
2199 | drm_radeon_sarea_t *sarea_priv = master_priv->sarea_priv; | ||
2200 | |||
2187 | DRM_DEBUG("\n"); | 2201 | DRM_DEBUG("\n"); |
2188 | 2202 | ||
2189 | LOCK_TEST_WITH_RETURN(dev, file_priv); | 2203 | LOCK_TEST_WITH_RETURN(dev, file_priv); |
@@ -2193,8 +2207,8 @@ static int radeon_cp_swap(struct drm_device *dev, void *data, struct drm_file *f | |||
2193 | if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS) | 2207 | if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS) |
2194 | sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS; | 2208 | sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS; |
2195 | 2209 | ||
2196 | radeon_cp_dispatch_swap(dev); | 2210 | radeon_cp_dispatch_swap(dev, file_priv->master); |
2197 | dev_priv->sarea_priv->ctx_owner = 0; | 2211 | sarea_priv->ctx_owner = 0; |
2198 | 2212 | ||
2199 | COMMIT_RING(); | 2213 | COMMIT_RING(); |
2200 | return 0; | 2214 | return 0; |
@@ -2203,7 +2217,8 @@ static int radeon_cp_swap(struct drm_device *dev, void *data, struct drm_file *f | |||
2203 | static int radeon_cp_vertex(struct drm_device *dev, void *data, struct drm_file *file_priv) | 2217 | static int radeon_cp_vertex(struct drm_device *dev, void *data, struct drm_file *file_priv) |
2204 | { | 2218 | { |
2205 | drm_radeon_private_t *dev_priv = dev->dev_private; | 2219 | drm_radeon_private_t *dev_priv = dev->dev_private; |
2206 | drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; | 2220 | struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv; |
2221 | drm_radeon_sarea_t *sarea_priv; | ||
2207 | struct drm_device_dma *dma = dev->dma; | 2222 | struct drm_device_dma *dma = dev->dma; |
2208 | struct drm_buf *buf; | 2223 | struct drm_buf *buf; |
2209 | drm_radeon_vertex_t *vertex = data; | 2224 | drm_radeon_vertex_t *vertex = data; |
@@ -2211,6 +2226,8 @@ static int radeon_cp_vertex(struct drm_device *dev, void *data, struct drm_file | |||
2211 | 2226 | ||
2212 | LOCK_TEST_WITH_RETURN(dev, file_priv); | 2227 | LOCK_TEST_WITH_RETURN(dev, file_priv); |
2213 | 2228 | ||
2229 | sarea_priv = master_priv->sarea_priv; | ||
2230 | |||
2214 | DRM_DEBUG("pid=%d index=%d count=%d discard=%d\n", | 2231 | DRM_DEBUG("pid=%d index=%d count=%d discard=%d\n", |
2215 | DRM_CURRENTPID, vertex->idx, vertex->count, vertex->discard); | 2232 | DRM_CURRENTPID, vertex->idx, vertex->count, vertex->discard); |
2216 | 2233 | ||
@@ -2263,13 +2280,13 @@ static int radeon_cp_vertex(struct drm_device *dev, void *data, struct drm_file | |||
2263 | prim.finish = vertex->count; /* unused */ | 2280 | prim.finish = vertex->count; /* unused */ |
2264 | prim.prim = vertex->prim; | 2281 | prim.prim = vertex->prim; |
2265 | prim.numverts = vertex->count; | 2282 | prim.numverts = vertex->count; |
2266 | prim.vc_format = dev_priv->sarea_priv->vc_format; | 2283 | prim.vc_format = sarea_priv->vc_format; |
2267 | 2284 | ||
2268 | radeon_cp_dispatch_vertex(dev, buf, &prim); | 2285 | radeon_cp_dispatch_vertex(dev, file_priv, buf, &prim); |
2269 | } | 2286 | } |
2270 | 2287 | ||
2271 | if (vertex->discard) { | 2288 | if (vertex->discard) { |
2272 | radeon_cp_discard_buffer(dev, buf); | 2289 | radeon_cp_discard_buffer(dev, file_priv->master, buf); |
2273 | } | 2290 | } |
2274 | 2291 | ||
2275 | COMMIT_RING(); | 2292 | COMMIT_RING(); |
@@ -2279,7 +2296,8 @@ static int radeon_cp_vertex(struct drm_device *dev, void *data, struct drm_file | |||
2279 | static int radeon_cp_indices(struct drm_device *dev, void *data, struct drm_file *file_priv) | 2296 | static int radeon_cp_indices(struct drm_device *dev, void *data, struct drm_file *file_priv) |
2280 | { | 2297 | { |
2281 | drm_radeon_private_t *dev_priv = dev->dev_private; | 2298 | drm_radeon_private_t *dev_priv = dev->dev_private; |
2282 | drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; | 2299 | struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv; |
2300 | drm_radeon_sarea_t *sarea_priv; | ||
2283 | struct drm_device_dma *dma = dev->dma; | 2301 | struct drm_device_dma *dma = dev->dma; |
2284 | struct drm_buf *buf; | 2302 | struct drm_buf *buf; |
2285 | drm_radeon_indices_t *elts = data; | 2303 | drm_radeon_indices_t *elts = data; |
@@ -2288,6 +2306,8 @@ static int radeon_cp_indices(struct drm_device *dev, void *data, struct drm_file | |||
2288 | 2306 | ||
2289 | LOCK_TEST_WITH_RETURN(dev, file_priv); | 2307 | LOCK_TEST_WITH_RETURN(dev, file_priv); |
2290 | 2308 | ||
2309 | sarea_priv = master_priv->sarea_priv; | ||
2310 | |||
2291 | DRM_DEBUG("pid=%d index=%d start=%d end=%d discard=%d\n", | 2311 | DRM_DEBUG("pid=%d index=%d start=%d end=%d discard=%d\n", |
2292 | DRM_CURRENTPID, elts->idx, elts->start, elts->end, | 2312 | DRM_CURRENTPID, elts->idx, elts->start, elts->end, |
2293 | elts->discard); | 2313 | elts->discard); |
@@ -2353,11 +2373,11 @@ static int radeon_cp_indices(struct drm_device *dev, void *data, struct drm_file | |||
2353 | prim.prim = elts->prim; | 2373 | prim.prim = elts->prim; |
2354 | prim.offset = 0; /* offset from start of dma buffers */ | 2374 | prim.offset = 0; /* offset from start of dma buffers */ |
2355 | prim.numverts = RADEON_MAX_VB_VERTS; /* duh */ | 2375 | prim.numverts = RADEON_MAX_VB_VERTS; /* duh */ |
2356 | prim.vc_format = dev_priv->sarea_priv->vc_format; | 2376 | prim.vc_format = sarea_priv->vc_format; |
2357 | 2377 | ||
2358 | radeon_cp_dispatch_indices(dev, buf, &prim); | 2378 | radeon_cp_dispatch_indices(dev, file_priv->master, buf, &prim); |
2359 | if (elts->discard) { | 2379 | if (elts->discard) { |
2360 | radeon_cp_discard_buffer(dev, buf); | 2380 | radeon_cp_discard_buffer(dev, file_priv->master, buf); |
2361 | } | 2381 | } |
2362 | 2382 | ||
2363 | COMMIT_RING(); | 2383 | COMMIT_RING(); |
@@ -2468,7 +2488,7 @@ static int radeon_cp_indirect(struct drm_device *dev, void *data, struct drm_fil | |||
2468 | */ | 2488 | */ |
2469 | radeon_cp_dispatch_indirect(dev, buf, indirect->start, indirect->end); | 2489 | radeon_cp_dispatch_indirect(dev, buf, indirect->start, indirect->end); |
2470 | if (indirect->discard) { | 2490 | if (indirect->discard) { |
2471 | radeon_cp_discard_buffer(dev, buf); | 2491 | radeon_cp_discard_buffer(dev, file_priv->master, buf); |
2472 | } | 2492 | } |
2473 | 2493 | ||
2474 | COMMIT_RING(); | 2494 | COMMIT_RING(); |
@@ -2478,7 +2498,8 @@ static int radeon_cp_indirect(struct drm_device *dev, void *data, struct drm_fil | |||
2478 | static int radeon_cp_vertex2(struct drm_device *dev, void *data, struct drm_file *file_priv) | 2498 | static int radeon_cp_vertex2(struct drm_device *dev, void *data, struct drm_file *file_priv) |
2479 | { | 2499 | { |
2480 | drm_radeon_private_t *dev_priv = dev->dev_private; | 2500 | drm_radeon_private_t *dev_priv = dev->dev_private; |
2481 | drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; | 2501 | struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv; |
2502 | drm_radeon_sarea_t *sarea_priv; | ||
2482 | struct drm_device_dma *dma = dev->dma; | 2503 | struct drm_device_dma *dma = dev->dma; |
2483 | struct drm_buf *buf; | 2504 | struct drm_buf *buf; |
2484 | drm_radeon_vertex2_t *vertex = data; | 2505 | drm_radeon_vertex2_t *vertex = data; |
@@ -2487,6 +2508,8 @@ static int radeon_cp_vertex2(struct drm_device *dev, void *data, struct drm_file | |||
2487 | 2508 | ||
2488 | LOCK_TEST_WITH_RETURN(dev, file_priv); | 2509 | LOCK_TEST_WITH_RETURN(dev, file_priv); |
2489 | 2510 | ||
2511 | sarea_priv = master_priv->sarea_priv; | ||
2512 | |||
2490 | DRM_DEBUG("pid=%d index=%d discard=%d\n", | 2513 | DRM_DEBUG("pid=%d index=%d discard=%d\n", |
2491 | DRM_CURRENTPID, vertex->idx, vertex->discard); | 2514 | DRM_CURRENTPID, vertex->idx, vertex->discard); |
2492 | 2515 | ||
@@ -2547,12 +2570,12 @@ static int radeon_cp_vertex2(struct drm_device *dev, void *data, struct drm_file | |||
2547 | tclprim.offset = prim.numverts * 64; | 2570 | tclprim.offset = prim.numverts * 64; |
2548 | tclprim.numverts = RADEON_MAX_VB_VERTS; /* duh */ | 2571 | tclprim.numverts = RADEON_MAX_VB_VERTS; /* duh */ |
2549 | 2572 | ||
2550 | radeon_cp_dispatch_indices(dev, buf, &tclprim); | 2573 | radeon_cp_dispatch_indices(dev, file_priv->master, buf, &tclprim); |
2551 | } else { | 2574 | } else { |
2552 | tclprim.numverts = prim.numverts; | 2575 | tclprim.numverts = prim.numverts; |
2553 | tclprim.offset = 0; /* not used */ | 2576 | tclprim.offset = 0; /* not used */ |
2554 | 2577 | ||
2555 | radeon_cp_dispatch_vertex(dev, buf, &tclprim); | 2578 | radeon_cp_dispatch_vertex(dev, file_priv, buf, &tclprim); |
2556 | } | 2579 | } |
2557 | 2580 | ||
2558 | if (sarea_priv->nbox == 1) | 2581 | if (sarea_priv->nbox == 1) |
@@ -2560,7 +2583,7 @@ static int radeon_cp_vertex2(struct drm_device *dev, void *data, struct drm_file | |||
2560 | } | 2583 | } |
2561 | 2584 | ||
2562 | if (vertex->discard) { | 2585 | if (vertex->discard) { |
2563 | radeon_cp_discard_buffer(dev, buf); | 2586 | radeon_cp_discard_buffer(dev, file_priv->master, buf); |
2564 | } | 2587 | } |
2565 | 2588 | ||
2566 | COMMIT_RING(); | 2589 | COMMIT_RING(); |
@@ -2909,7 +2932,7 @@ static int radeon_cp_cmdbuf(struct drm_device *dev, void *data, struct drm_file | |||
2909 | goto err; | 2932 | goto err; |
2910 | } | 2933 | } |
2911 | 2934 | ||
2912 | radeon_cp_discard_buffer(dev, buf); | 2935 | radeon_cp_discard_buffer(dev, file_priv->master, buf); |
2913 | break; | 2936 | break; |
2914 | 2937 | ||
2915 | case RADEON_CMD_PACKET3: | 2938 | case RADEON_CMD_PACKET3: |
@@ -3020,7 +3043,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil | |||
3020 | */ | 3043 | */ |
3021 | case RADEON_PARAM_SAREA_HANDLE: | 3044 | case RADEON_PARAM_SAREA_HANDLE: |
3022 | /* The lock is the first dword in the sarea. */ | 3045 | /* The lock is the first dword in the sarea. */ |
3023 | value = (long)dev->lock.hw_lock; | 3046 | /* no users of this parameter */ |
3024 | break; | 3047 | break; |
3025 | #endif | 3048 | #endif |
3026 | case RADEON_PARAM_GART_TEX_HANDLE: | 3049 | case RADEON_PARAM_GART_TEX_HANDLE: |
@@ -3064,6 +3087,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil | |||
3064 | static int radeon_cp_setparam(struct drm_device *dev, void *data, struct drm_file *file_priv) | 3087 | static int radeon_cp_setparam(struct drm_device *dev, void *data, struct drm_file *file_priv) |
3065 | { | 3088 | { |
3066 | drm_radeon_private_t *dev_priv = dev->dev_private; | 3089 | drm_radeon_private_t *dev_priv = dev->dev_private; |
3090 | struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv; | ||
3067 | drm_radeon_setparam_t *sp = data; | 3091 | drm_radeon_setparam_t *sp = data; |
3068 | struct drm_radeon_driver_file_fields *radeon_priv; | 3092 | struct drm_radeon_driver_file_fields *radeon_priv; |
3069 | 3093 | ||
@@ -3078,12 +3102,14 @@ static int radeon_cp_setparam(struct drm_device *dev, void *data, struct drm_fil | |||
3078 | DRM_DEBUG("color tiling disabled\n"); | 3102 | DRM_DEBUG("color tiling disabled\n"); |
3079 | dev_priv->front_pitch_offset &= ~RADEON_DST_TILE_MACRO; | 3103 | dev_priv->front_pitch_offset &= ~RADEON_DST_TILE_MACRO; |
3080 | dev_priv->back_pitch_offset &= ~RADEON_DST_TILE_MACRO; | 3104 | dev_priv->back_pitch_offset &= ~RADEON_DST_TILE_MACRO; |
3081 | dev_priv->sarea_priv->tiling_enabled = 0; | 3105 | if (master_priv->sarea_priv) |
3106 | master_priv->sarea_priv->tiling_enabled = 0; | ||
3082 | } else if (sp->value == 1) { | 3107 | } else if (sp->value == 1) { |
3083 | DRM_DEBUG("color tiling enabled\n"); | 3108 | DRM_DEBUG("color tiling enabled\n"); |
3084 | dev_priv->front_pitch_offset |= RADEON_DST_TILE_MACRO; | 3109 | dev_priv->front_pitch_offset |= RADEON_DST_TILE_MACRO; |
3085 | dev_priv->back_pitch_offset |= RADEON_DST_TILE_MACRO; | 3110 | dev_priv->back_pitch_offset |= RADEON_DST_TILE_MACRO; |
3086 | dev_priv->sarea_priv->tiling_enabled = 1; | 3111 | if (master_priv->sarea_priv) |
3112 | master_priv->sarea_priv->tiling_enabled = 1; | ||
3087 | } | 3113 | } |
3088 | break; | 3114 | break; |
3089 | case RADEON_SETPARAM_PCIGART_LOCATION: | 3115 | case RADEON_SETPARAM_PCIGART_LOCATION: |
@@ -3129,14 +3155,6 @@ void radeon_driver_preclose(struct drm_device *dev, struct drm_file *file_priv) | |||
3129 | 3155 | ||
3130 | void radeon_driver_lastclose(struct drm_device *dev) | 3156 | void radeon_driver_lastclose(struct drm_device *dev) |
3131 | { | 3157 | { |
3132 | if (dev->dev_private) { | ||
3133 | drm_radeon_private_t *dev_priv = dev->dev_private; | ||
3134 | |||
3135 | if (dev_priv->sarea_priv && | ||
3136 | dev_priv->sarea_priv->pfCurrentPage != 0) | ||
3137 | radeon_cp_dispatch_flip(dev); | ||
3138 | } | ||
3139 | |||
3140 | radeon_do_release(dev); | 3158 | radeon_do_release(dev); |
3141 | } | 3159 | } |
3142 | 3160 | ||
diff --git a/include/drm/drm.h b/include/drm/drm.h index f46ba4b57da4..3fb173c5af3e 100644 --- a/include/drm/drm.h +++ b/include/drm/drm.h | |||
@@ -634,6 +634,9 @@ struct drm_gem_open { | |||
634 | #define DRM_IOCTL_SET_SAREA_CTX DRM_IOW( 0x1c, struct drm_ctx_priv_map) | 634 | #define DRM_IOCTL_SET_SAREA_CTX DRM_IOW( 0x1c, struct drm_ctx_priv_map) |
635 | #define DRM_IOCTL_GET_SAREA_CTX DRM_IOWR(0x1d, struct drm_ctx_priv_map) | 635 | #define DRM_IOCTL_GET_SAREA_CTX DRM_IOWR(0x1d, struct drm_ctx_priv_map) |
636 | 636 | ||
637 | #define DRM_IOCTL_SET_MASTER DRM_IO(0x1e) | ||
638 | #define DRM_IOCTL_DROP_MASTER DRM_IO(0x1f) | ||
639 | |||
637 | #define DRM_IOCTL_ADD_CTX DRM_IOWR(0x20, struct drm_ctx) | 640 | #define DRM_IOCTL_ADD_CTX DRM_IOWR(0x20, struct drm_ctx) |
638 | #define DRM_IOCTL_RM_CTX DRM_IOWR(0x21, struct drm_ctx) | 641 | #define DRM_IOCTL_RM_CTX DRM_IOWR(0x21, struct drm_ctx) |
639 | #define DRM_IOCTL_MOD_CTX DRM_IOW( 0x22, struct drm_ctx) | 642 | #define DRM_IOCTL_MOD_CTX DRM_IOW( 0x22, struct drm_ctx) |
diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 08b8539e7b3c..4c6e8298b424 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h | |||
@@ -238,11 +238,11 @@ struct drm_device; | |||
238 | */ | 238 | */ |
239 | #define LOCK_TEST_WITH_RETURN( dev, file_priv ) \ | 239 | #define LOCK_TEST_WITH_RETURN( dev, file_priv ) \ |
240 | do { \ | 240 | do { \ |
241 | if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) || \ | 241 | if (!_DRM_LOCK_IS_HELD(file_priv->master->lock.hw_lock->lock) || \ |
242 | dev->lock.file_priv != file_priv ) { \ | 242 | file_priv->master->lock.file_priv != file_priv) { \ |
243 | DRM_ERROR( "%s called without lock held, held %d owner %p %p\n",\ | 243 | DRM_ERROR( "%s called without lock held, held %d owner %p %p\n",\ |
244 | __func__, _DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ),\ | 244 | __func__, _DRM_LOCK_IS_HELD(file_priv->master->lock.hw_lock->lock),\ |
245 | dev->lock.file_priv, file_priv ); \ | 245 | file_priv->master->lock.file_priv, file_priv); \ |
246 | return -EINVAL; \ | 246 | return -EINVAL; \ |
247 | } \ | 247 | } \ |
248 | } while (0) | 248 | } while (0) |
@@ -379,21 +379,25 @@ struct drm_buf_entry { | |||
379 | /** File private data */ | 379 | /** File private data */ |
380 | struct drm_file { | 380 | struct drm_file { |
381 | int authenticated; | 381 | int authenticated; |
382 | int master; | ||
383 | pid_t pid; | 382 | pid_t pid; |
384 | uid_t uid; | 383 | uid_t uid; |
385 | drm_magic_t magic; | 384 | drm_magic_t magic; |
386 | unsigned long ioctl_count; | 385 | unsigned long ioctl_count; |
387 | struct list_head lhead; | 386 | struct list_head lhead; |
388 | struct drm_minor *minor; | 387 | struct drm_minor *minor; |
389 | int remove_auth_on_close; | ||
390 | unsigned long lock_count; | 388 | unsigned long lock_count; |
389 | |||
391 | /** Mapping of mm object handles to object pointers. */ | 390 | /** Mapping of mm object handles to object pointers. */ |
392 | struct idr object_idr; | 391 | struct idr object_idr; |
393 | /** Lock for synchronization of access to object_idr. */ | 392 | /** Lock for synchronization of access to object_idr. */ |
394 | spinlock_t table_lock; | 393 | spinlock_t table_lock; |
394 | |||
395 | struct file *filp; | 395 | struct file *filp; |
396 | void *driver_priv; | 396 | void *driver_priv; |
397 | |||
398 | int is_master; /* this file private is a master for a minor */ | ||
399 | struct drm_master *master; /* master this node is currently associated with | ||
400 | N.B. not always minor->master */ | ||
397 | }; | 401 | }; |
398 | 402 | ||
399 | /** Wait queue */ | 403 | /** Wait queue */ |
@@ -523,6 +527,7 @@ struct drm_map_list { | |||
523 | struct drm_hash_item hash; | 527 | struct drm_hash_item hash; |
524 | struct drm_map *map; /**< mapping */ | 528 | struct drm_map *map; /**< mapping */ |
525 | uint64_t user_token; | 529 | uint64_t user_token; |
530 | struct drm_master *master; | ||
526 | }; | 531 | }; |
527 | 532 | ||
528 | typedef struct drm_map drm_local_map_t; | 533 | typedef struct drm_map drm_local_map_t; |
@@ -612,6 +617,30 @@ struct drm_gem_object { | |||
612 | void *driver_private; | 617 | void *driver_private; |
613 | }; | 618 | }; |
614 | 619 | ||
620 | /* per-master structure */ | ||
621 | struct drm_master { | ||
622 | |||
623 | struct kref refcount; /* refcount for this master */ | ||
624 | |||
625 | struct list_head head; /**< each minor contains a list of masters */ | ||
626 | struct drm_minor *minor; /**< link back to minor we are a master for */ | ||
627 | |||
628 | char *unique; /**< Unique identifier: e.g., busid */ | ||
629 | int unique_len; /**< Length of unique field */ | ||
630 | |||
631 | int blocked; /**< Blocked due to VC switch? */ | ||
632 | |||
633 | /** \name Authentication */ | ||
634 | /*@{ */ | ||
635 | struct drm_open_hash magiclist; | ||
636 | struct list_head magicfree; | ||
637 | /*@} */ | ||
638 | |||
639 | struct drm_lock_data lock; /**< Information on hardware lock */ | ||
640 | |||
641 | void *driver_priv; /**< Private structure for driver to use */ | ||
642 | }; | ||
643 | |||
615 | /** | 644 | /** |
616 | * DRM driver structure. This structure represent the common code for | 645 | * DRM driver structure. This structure represent the common code for |
617 | * a family of cards. There will one drm_device for each card present | 646 | * a family of cards. There will one drm_device for each card present |
@@ -712,6 +741,10 @@ struct drm_driver { | |||
712 | void (*set_version) (struct drm_device *dev, | 741 | void (*set_version) (struct drm_device *dev, |
713 | struct drm_set_version *sv); | 742 | struct drm_set_version *sv); |
714 | 743 | ||
744 | /* Master routines */ | ||
745 | int (*master_create)(struct drm_device *dev, struct drm_master *master); | ||
746 | void (*master_destroy)(struct drm_device *dev, struct drm_master *master); | ||
747 | |||
715 | int (*proc_init)(struct drm_minor *minor); | 748 | int (*proc_init)(struct drm_minor *minor); |
716 | void (*proc_cleanup)(struct drm_minor *minor); | 749 | void (*proc_cleanup)(struct drm_minor *minor); |
717 | 750 | ||
@@ -754,6 +787,8 @@ struct drm_minor { | |||
754 | struct device kdev; /**< Linux device */ | 787 | struct device kdev; /**< Linux device */ |
755 | struct drm_device *dev; | 788 | struct drm_device *dev; |
756 | struct proc_dir_entry *dev_root; /**< proc directory entry */ | 789 | struct proc_dir_entry *dev_root; /**< proc directory entry */ |
790 | struct drm_master *master; /* currently active master for this node */ | ||
791 | struct list_head master_list; | ||
757 | }; | 792 | }; |
758 | 793 | ||
759 | /** | 794 | /** |
@@ -762,13 +797,9 @@ struct drm_minor { | |||
762 | */ | 797 | */ |
763 | struct drm_device { | 798 | struct drm_device { |
764 | struct list_head driver_item; /**< list of devices per driver */ | 799 | struct list_head driver_item; /**< list of devices per driver */ |
765 | char *unique; /**< Unique identifier: e.g., busid */ | ||
766 | int unique_len; /**< Length of unique field */ | ||
767 | char *devname; /**< For /proc/interrupts */ | 800 | char *devname; /**< For /proc/interrupts */ |
768 | int if_version; /**< Highest interface version set */ | 801 | int if_version; /**< Highest interface version set */ |
769 | 802 | ||
770 | int blocked; /**< Blocked due to VC switch? */ | ||
771 | |||
772 | /** \name Locks */ | 803 | /** \name Locks */ |
773 | /*@{ */ | 804 | /*@{ */ |
774 | spinlock_t count_lock; /**< For inuse, drm_device::open_count, drm_device::buf_use */ | 805 | spinlock_t count_lock; /**< For inuse, drm_device::open_count, drm_device::buf_use */ |
@@ -791,12 +822,7 @@ struct drm_device { | |||
791 | atomic_t counts[15]; | 822 | atomic_t counts[15]; |
792 | /*@} */ | 823 | /*@} */ |
793 | 824 | ||
794 | /** \name Authentication */ | ||
795 | /*@{ */ | ||
796 | struct list_head filelist; | 825 | struct list_head filelist; |
797 | struct drm_open_hash magiclist; /**< magic hash table */ | ||
798 | struct list_head magicfree; | ||
799 | /*@} */ | ||
800 | 826 | ||
801 | /** \name Memory management */ | 827 | /** \name Memory management */ |
802 | /*@{ */ | 828 | /*@{ */ |
@@ -813,7 +839,6 @@ struct drm_device { | |||
813 | struct idr ctx_idr; | 839 | struct idr ctx_idr; |
814 | 840 | ||
815 | struct list_head vmalist; /**< List of vmas (for debugging) */ | 841 | struct list_head vmalist; /**< List of vmas (for debugging) */ |
816 | struct drm_lock_data lock; /**< Information on hardware lock */ | ||
817 | /*@} */ | 842 | /*@} */ |
818 | 843 | ||
819 | /** \name DMA queues (contexts) */ | 844 | /** \name DMA queues (contexts) */ |
@@ -1192,6 +1217,13 @@ extern int drm_agp_unbind_memory(DRM_AGP_MEM * handle); | |||
1192 | extern void drm_agp_chipset_flush(struct drm_device *dev); | 1217 | extern void drm_agp_chipset_flush(struct drm_device *dev); |
1193 | 1218 | ||
1194 | /* Stub support (drm_stub.h) */ | 1219 | /* Stub support (drm_stub.h) */ |
1220 | extern int drm_setmaster_ioctl(struct drm_device *dev, void *data, | ||
1221 | struct drm_file *file_priv); | ||
1222 | extern int drm_dropmaster_ioctl(struct drm_device *dev, void *data, | ||
1223 | struct drm_file *file_priv); | ||
1224 | struct drm_master *drm_master_create(struct drm_minor *minor); | ||
1225 | extern struct drm_master *drm_master_get(struct drm_master *master); | ||
1226 | extern void drm_master_put(struct drm_master **master); | ||
1195 | extern int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent, | 1227 | extern int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent, |
1196 | struct drm_driver *driver); | 1228 | struct drm_driver *driver); |
1197 | extern int drm_put_dev(struct drm_device *dev); | 1229 | extern int drm_put_dev(struct drm_device *dev); |
diff --git a/include/drm/drm_sarea.h b/include/drm/drm_sarea.h index 480037331e4e..ee5389d22c64 100644 --- a/include/drm/drm_sarea.h +++ b/include/drm/drm_sarea.h | |||
@@ -36,12 +36,12 @@ | |||
36 | 36 | ||
37 | /* SAREA area needs to be at least a page */ | 37 | /* SAREA area needs to be at least a page */ |
38 | #if defined(__alpha__) | 38 | #if defined(__alpha__) |
39 | #define SAREA_MAX 0x2000 | 39 | #define SAREA_MAX 0x2000U |
40 | #elif defined(__ia64__) | 40 | #elif defined(__ia64__) |
41 | #define SAREA_MAX 0x10000 /* 64kB */ | 41 | #define SAREA_MAX 0x10000U /* 64kB */ |
42 | #else | 42 | #else |
43 | /* Intel 830M driver needs at least 8k SAREA */ | 43 | /* Intel 830M driver needs at least 8k SAREA */ |
44 | #define SAREA_MAX 0x2000 | 44 | #define SAREA_MAX 0x2000U |
45 | #endif | 45 | #endif |
46 | 46 | ||
47 | /** Maximum number of drawables in the SAREA */ | 47 | /** Maximum number of drawables in the SAREA */ |