aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
authorAlan Cox <alan@linux.intel.com>2011-11-03 14:21:09 -0400
committerDave Airlie <airlied@redhat.com>2011-11-16 06:23:38 -0500
commit8c8f1c958ab5e948e954ebd97e328f23d347293b (patch)
treed2539a02b51207edc29d437b6ccf34a881cbd02e /drivers/gpu/drm
parente32681d66dd33a7792a3f1a1e3ea0eb0c415f895 (diff)
gma500: introduce the GTT and MMU handling logic
This fits alongside the GEM support to manage our resources on the card itself. It's not actually clear we need to configure the MMU at all. Further research is needed before removing it entirely. For now we suck it in (slightly abused) from the old semi-free driver. Signed-off-by: Alan Cox <alan@linux.intel.com> Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/gma500/gtt.c500
-rw-r--r--drivers/gpu/drm/gma500/gtt.h61
-rw-r--r--drivers/gpu/drm/gma500/mmu.c858
3 files changed, 1419 insertions, 0 deletions
diff --git a/drivers/gpu/drm/gma500/gtt.c b/drivers/gpu/drm/gma500/gtt.c
new file mode 100644
index 000000000000..461ead251bbd
--- /dev/null
+++ b/drivers/gpu/drm/gma500/gtt.c
@@ -0,0 +1,500 @@
1/*
2 * Copyright (c) 2007, Intel Corporation.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com>
19 * Alan Cox <alan@linux.intel.com>
20 */
21
22#include <drm/drmP.h>
23#include "psb_drv.h"
24
25
26/*
27 * GTT resource allocator - manage page mappings in GTT space
28 */
29
30/**
31 * psb_gtt_mask_pte - generate GTT pte entry
32 * @pfn: page number to encode
33 * @type: type of memory in the GTT
34 *
35 * Set the GTT entry for the appropriate memory type.
36 */
37static inline uint32_t psb_gtt_mask_pte(uint32_t pfn, int type)
38{
39 uint32_t mask = PSB_PTE_VALID;
40
41 if (type & PSB_MMU_CACHED_MEMORY)
42 mask |= PSB_PTE_CACHED;
43 if (type & PSB_MMU_RO_MEMORY)
44 mask |= PSB_PTE_RO;
45 if (type & PSB_MMU_WO_MEMORY)
46 mask |= PSB_PTE_WO;
47
48 return (pfn << PAGE_SHIFT) | mask;
49}
50
51/**
52 * psb_gtt_entry - find the GTT entries for a gtt_range
53 * @dev: our DRM device
54 * @r: our GTT range
55 *
56 * Given a gtt_range object return the GTT offset of the page table
57 * entries for this gtt_range
58 */
59u32 *psb_gtt_entry(struct drm_device *dev, struct gtt_range *r)
60{
61 struct drm_psb_private *dev_priv = dev->dev_private;
62 unsigned long offset;
63
64 offset = r->resource.start - dev_priv->gtt_mem->start;
65
66 return dev_priv->gtt_map + (offset >> PAGE_SHIFT);
67}
68
69/**
70 * psb_gtt_insert - put an object into the GTT
71 * @dev: our DRM device
72 * @r: our GTT range
73 *
74 * Take our preallocated GTT range and insert the GEM object into
75 * the GTT.
76 *
77 * FIXME: gtt lock ?
78 */
79static int psb_gtt_insert(struct drm_device *dev, struct gtt_range *r)
80{
81 u32 *gtt_slot, pte;
82 struct page **pages;
83 int i;
84
85 if (r->pages == NULL) {
86 WARN_ON(1);
87 return -EINVAL;
88 }
89
90 WARN_ON(r->stolen); /* refcount these maybe ? */
91
92 gtt_slot = psb_gtt_entry(dev, r);
93 pages = r->pages;
94
95 /* Make sure changes are visible to the GPU */
96 set_pages_array_uc(pages, r->npage);
97
98 /* Write our page entries into the GTT itself */
99 for (i = 0; i < r->npage; i++) {
100 pte = psb_gtt_mask_pte(page_to_pfn(*pages++), 0/*type*/);
101 iowrite32(pte, gtt_slot++);
102 }
103 /* Make sure all the entries are set before we return */
104 ioread32(gtt_slot - 1);
105 return 0;
106}
107
108/**
109 * psb_gtt_remove - remove an object from the GTT
110 * @dev: our DRM device
111 * @r: our GTT range
112 *
113 * Remove a preallocated GTT range from the GTT. Overwrite all the
114 * page table entries with the dummy page
115 */
116
117static void psb_gtt_remove(struct drm_device *dev, struct gtt_range *r)
118{
119 struct drm_psb_private *dev_priv = dev->dev_private;
120 u32 *gtt_slot, pte;
121 int i;
122
123 WARN_ON(r->stolen);
124
125 gtt_slot = psb_gtt_entry(dev, r);
126 pte = psb_gtt_mask_pte(page_to_pfn(dev_priv->scratch_page), 0);
127
128 for (i = 0; i < r->npage; i++)
129 iowrite32(pte, gtt_slot++);
130 ioread32(gtt_slot - 1);
131 set_pages_array_wb(r->pages, r->npage);
132}
133
134/**
135 * psb_gtt_attach_pages - attach and pin GEM pages
136 * @gt: the gtt range
137 *
138 * Pin and build an in kernel list of the pages that back our GEM object.
139 * While we hold this the pages cannot be swapped out
140 */
141static int psb_gtt_attach_pages(struct gtt_range *gt)
142{
143 struct inode *inode;
144 struct address_space *mapping;
145 int i;
146 struct page *p;
147 int pages = gt->gem.size / PAGE_SIZE;
148
149 WARN_ON(gt->pages);
150
151 /* This is the shared memory object that backs the GEM resource */
152 inode = gt->gem.filp->f_path.dentry->d_inode;
153 mapping = inode->i_mapping;
154
155 gt->pages = kmalloc(pages * sizeof(struct page *), GFP_KERNEL);
156 if (gt->pages == NULL)
157 return -ENOMEM;
158 gt->npage = pages;
159
160 for (i = 0; i < pages; i++) {
161 /* FIXME: review flags later */
162 p = read_cache_page_gfp(mapping, i,
163 __GFP_COLD | GFP_KERNEL);
164 if (IS_ERR(p))
165 goto err;
166 gt->pages[i] = p;
167 }
168 return 0;
169
170err:
171 while (i--)
172 page_cache_release(gt->pages[i]);
173 kfree(gt->pages);
174 gt->pages = NULL;
175 return PTR_ERR(p);
176}
177
178/**
179 * psb_gtt_detach_pages - attach and pin GEM pages
180 * @gt: the gtt range
181 *
182 * Undo the effect of psb_gtt_attach_pages. At this point the pages
183 * must have been removed from the GTT as they could now be paged out
184 * and move bus address.
185 */
186static void psb_gtt_detach_pages(struct gtt_range *gt)
187{
188 int i;
189 for (i = 0; i < gt->npage; i++) {
190 /* FIXME: do we need to force dirty */
191 set_page_dirty(gt->pages[i]);
192 page_cache_release(gt->pages[i]);
193 }
194 kfree(gt->pages);
195 gt->pages = NULL;
196}
197
198/**
199 * psb_gtt_pin - pin pages into the GTT
200 * @gt: range to pin
201 *
202 * Pin a set of pages into the GTT. The pins are refcounted so that
203 * multiple pins need multiple unpins to undo.
204 *
205 * Non GEM backed objects treat this as a no-op as they are always GTT
206 * backed objects.
207 */
208int psb_gtt_pin(struct gtt_range *gt)
209{
210 int ret = 0;
211 struct drm_device *dev = gt->gem.dev;
212 struct drm_psb_private *dev_priv = dev->dev_private;
213
214 mutex_lock(&dev_priv->gtt_mutex);
215
216 if (gt->in_gart == 0 && gt->stolen == 0) {
217 ret = psb_gtt_attach_pages(gt);
218 if (ret < 0)
219 goto out;
220 ret = psb_gtt_insert(dev, gt);
221 if (ret < 0) {
222 psb_gtt_detach_pages(gt);
223 goto out;
224 }
225 }
226 gt->in_gart++;
227out:
228 mutex_unlock(&dev_priv->gtt_mutex);
229 return ret;
230}
231
232/**
233 * psb_gtt_unpin - Drop a GTT pin requirement
234 * @gt: range to pin
235 *
236 * Undoes the effect of psb_gtt_pin. On the last drop the GEM object
237 * will be removed from the GTT which will also drop the page references
238 * and allow the VM to clean up or page stuff.
239 *
240 * Non GEM backed objects treat this as a no-op as they are always GTT
241 * backed objects.
242 */
243void psb_gtt_unpin(struct gtt_range *gt)
244{
245 struct drm_device *dev = gt->gem.dev;
246 struct drm_psb_private *dev_priv = dev->dev_private;
247
248 mutex_lock(&dev_priv->gtt_mutex);
249
250 WARN_ON(!gt->in_gart);
251
252 gt->in_gart--;
253 if (gt->in_gart == 0 && gt->stolen == 0) {
254 psb_gtt_remove(dev, gt);
255 psb_gtt_detach_pages(gt);
256 }
257 mutex_unlock(&dev_priv->gtt_mutex);
258}
259
260/*
261 * GTT resource allocator - allocate and manage GTT address space
262 */
263
264/**
265 * psb_gtt_alloc_range - allocate GTT address space
266 * @dev: Our DRM device
267 * @len: length (bytes) of address space required
268 * @name: resource name
269 * @backed: resource should be backed by stolen pages
270 *
271 * Ask the kernel core to find us a suitable range of addresses
272 * to use for a GTT mapping.
273 *
274 * Returns a gtt_range structure describing the object, or NULL on
275 * error. On successful return the resource is both allocated and marked
276 * as in use.
277 */
278struct gtt_range *psb_gtt_alloc_range(struct drm_device *dev, int len,
279 const char *name, int backed)
280{
281 struct drm_psb_private *dev_priv = dev->dev_private;
282 struct gtt_range *gt;
283 struct resource *r = dev_priv->gtt_mem;
284 int ret;
285 unsigned long start, end;
286
287 if (backed) {
288 /* The start of the GTT is the stolen pages */
289 start = r->start;
290 end = r->start + dev_priv->gtt.stolen_size - 1;
291 } else {
292 /* The rest we will use for GEM backed objects */
293 start = r->start + dev_priv->gtt.stolen_size;
294 end = r->end;
295 }
296
297 gt = kzalloc(sizeof(struct gtt_range), GFP_KERNEL);
298 if (gt == NULL)
299 return NULL;
300 gt->resource.name = name;
301 gt->stolen = backed;
302 gt->in_gart = backed;
303 /* Ensure this is set for non GEM objects */
304 gt->gem.dev = dev;
305 ret = allocate_resource(dev_priv->gtt_mem, &gt->resource,
306 len, start, end, PAGE_SIZE, NULL, NULL);
307 if (ret == 0) {
308 gt->offset = gt->resource.start - r->start;
309 return gt;
310 }
311 kfree(gt);
312 return NULL;
313}
314
315/**
316 * psb_gtt_free_range - release GTT address space
317 * @dev: our DRM device
318 * @gt: a mapping created with psb_gtt_alloc_range
319 *
320 * Release a resource that was allocated with psb_gtt_alloc_range. If the
321 * object has been pinned by mmap users we clean this up here currently.
322 */
323void psb_gtt_free_range(struct drm_device *dev, struct gtt_range *gt)
324{
325 /* Undo the mmap pin if we are destroying the object */
326 if (gt->mmapping) {
327 psb_gtt_unpin(gt);
328 gt->mmapping = 0;
329 }
330 WARN_ON(gt->in_gart && !gt->stolen);
331 release_resource(&gt->resource);
332 kfree(gt);
333}
334
335void psb_gtt_alloc(struct drm_device *dev)
336{
337 struct drm_psb_private *dev_priv = dev->dev_private;
338 init_rwsem(&dev_priv->gtt.sem);
339}
340
341void psb_gtt_takedown(struct drm_device *dev)
342{
343 struct drm_psb_private *dev_priv = dev->dev_private;
344
345 if (dev_priv->gtt_map) {
346 iounmap(dev_priv->gtt_map);
347 dev_priv->gtt_map = NULL;
348 }
349 if (dev_priv->gtt_initialized) {
350 pci_write_config_word(dev->pdev, PSB_GMCH_CTRL,
351 dev_priv->gmch_ctrl);
352 PSB_WVDC32(dev_priv->pge_ctl, PSB_PGETBL_CTL);
353 (void) PSB_RVDC32(PSB_PGETBL_CTL);
354 }
355 if (dev_priv->vram_addr)
356 iounmap(dev_priv->gtt_map);
357}
358
359int psb_gtt_init(struct drm_device *dev, int resume)
360{
361 struct drm_psb_private *dev_priv = dev->dev_private;
362 unsigned gtt_pages;
363 unsigned long stolen_size, vram_stolen_size;
364 unsigned i, num_pages;
365 unsigned pfn_base;
366 uint32_t vram_pages;
367 uint32_t dvmt_mode = 0;
368 struct psb_gtt *pg;
369
370 int ret = 0;
371 uint32_t pte;
372
373 mutex_init(&dev_priv->gtt_mutex);
374
375 psb_gtt_alloc(dev);
376 pg = &dev_priv->gtt;
377
378 /* Enable the GTT */
379 pci_read_config_word(dev->pdev, PSB_GMCH_CTRL, &dev_priv->gmch_ctrl);
380 pci_write_config_word(dev->pdev, PSB_GMCH_CTRL,
381 dev_priv->gmch_ctrl | _PSB_GMCH_ENABLED);
382
383 dev_priv->pge_ctl = PSB_RVDC32(PSB_PGETBL_CTL);
384 PSB_WVDC32(dev_priv->pge_ctl | _PSB_PGETBL_ENABLED, PSB_PGETBL_CTL);
385 (void) PSB_RVDC32(PSB_PGETBL_CTL);
386
387 /* The root resource we allocate address space from */
388 dev_priv->gtt_initialized = 1;
389
390 pg->gtt_phys_start = dev_priv->pge_ctl & PAGE_MASK;
391
392 /*
393 * FIXME: video mmu has hw bug to access 0x0D0000000,
394 * then make gatt start at 0x0e000,0000
395 */
396 pg->mmu_gatt_start = 0xE0000000;
397
398 pg->gtt_start = pci_resource_start(dev->pdev, PSB_GTT_RESOURCE);
399 gtt_pages = pci_resource_len(dev->pdev, PSB_GTT_RESOURCE)
400 >> PAGE_SHIFT;
401 /* CDV workaround */
402 if (pg->gtt_start == 0 || gtt_pages == 0) {
403 dev_err(dev->dev, "GTT PCI BAR not initialized.\n");
404 gtt_pages = 64;
405 pg->gtt_start = dev_priv->pge_ctl;
406 }
407
408 pg->gatt_start = pci_resource_start(dev->pdev, PSB_GATT_RESOURCE);
409 pg->gatt_pages = pci_resource_len(dev->pdev, PSB_GATT_RESOURCE)
410 >> PAGE_SHIFT;
411 dev_priv->gtt_mem = &dev->pdev->resource[PSB_GATT_RESOURCE];
412
413 if (pg->gatt_pages == 0 || pg->gatt_start == 0) {
414 static struct resource fudge; /* Preferably peppermint */
415
416 /* This can occur on CDV SDV systems. Fudge it in this case.
417 We really don't care what imaginary space is being allocated
418 at this point */
419 dev_err(dev->dev, "GATT PCI BAR not initialized.\n");
420 pg->gatt_start = 0x40000000;
421 pg->gatt_pages = (128 * 1024 * 1024) >> PAGE_SHIFT;
422 fudge.start = 0x40000000;
423 fudge.end = 0x40000000 + 128 * 1024 * 1024 - 1;
424 fudge.name = "fudge";
425 fudge.flags = IORESOURCE_MEM;
426 dev_priv->gtt_mem = &fudge;
427 }
428
429 pci_read_config_dword(dev->pdev, PSB_BSM, &dev_priv->stolen_base);
430 vram_stolen_size = pg->gtt_phys_start - dev_priv->stolen_base
431 - PAGE_SIZE;
432
433 stolen_size = vram_stolen_size;
434
435 printk(KERN_INFO "Stolen memory information\n");
436 printk(KERN_INFO " base in RAM: 0x%x\n", dev_priv->stolen_base);
437 printk(KERN_INFO " size: %luK, calculated by (GTT RAM base) - (Stolen base), seems wrong\n",
438 vram_stolen_size/1024);
439 dvmt_mode = (dev_priv->gmch_ctrl >> 4) & 0x7;
440 printk(KERN_INFO " the correct size should be: %dM(dvmt mode=%d)\n",
441 (dvmt_mode == 1) ? 1 : (2 << (dvmt_mode - 1)), dvmt_mode);
442
443 if (resume && (gtt_pages != pg->gtt_pages) &&
444 (stolen_size != pg->stolen_size)) {
445 dev_err(dev->dev, "GTT resume error.\n");
446 ret = -EINVAL;
447 goto out_err;
448 }
449
450 pg->gtt_pages = gtt_pages;
451 pg->stolen_size = stolen_size;
452 dev_priv->vram_stolen_size = vram_stolen_size;
453
454 /*
455 * Map the GTT and the stolen memory area
456 */
457 dev_priv->gtt_map = ioremap_nocache(pg->gtt_phys_start,
458 gtt_pages << PAGE_SHIFT);
459 if (!dev_priv->gtt_map) {
460 dev_err(dev->dev, "Failure to map gtt.\n");
461 ret = -ENOMEM;
462 goto out_err;
463 }
464
465 dev_priv->vram_addr = ioremap_wc(dev_priv->stolen_base, stolen_size);
466 if (!dev_priv->vram_addr) {
467 dev_err(dev->dev, "Failure to map stolen base.\n");
468 ret = -ENOMEM;
469 goto out_err;
470 }
471
472 /*
473 * Insert vram stolen pages into the GTT
474 */
475
476 pfn_base = dev_priv->stolen_base >> PAGE_SHIFT;
477 vram_pages = num_pages = vram_stolen_size >> PAGE_SHIFT;
478 printk(KERN_INFO"Set up %d stolen pages starting at 0x%08x, GTT offset %dK\n",
479 num_pages, pfn_base << PAGE_SHIFT, 0);
480 for (i = 0; i < num_pages; ++i) {
481 pte = psb_gtt_mask_pte(pfn_base + i, 0);
482 iowrite32(pte, dev_priv->gtt_map + i);
483 }
484
485 /*
486 * Init rest of GTT to the scratch page to avoid accidents or scribbles
487 */
488
489 pfn_base = page_to_pfn(dev_priv->scratch_page);
490 pte = psb_gtt_mask_pte(pfn_base, 0);
491 for (; i < gtt_pages; ++i)
492 iowrite32(pte, dev_priv->gtt_map + i);
493
494 (void) ioread32(dev_priv->gtt_map + i - 1);
495 return 0;
496
497out_err:
498 psb_gtt_takedown(dev);
499 return ret;
500}
diff --git a/drivers/gpu/drm/gma500/gtt.h b/drivers/gpu/drm/gma500/gtt.h
new file mode 100644
index 000000000000..e0e1cb6f9bd6
--- /dev/null
+++ b/drivers/gpu/drm/gma500/gtt.h
@@ -0,0 +1,61 @@
1/**************************************************************************
2 * Copyright (c) 2007-2008, Intel Corporation.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 **************************************************************************/
19
20#ifndef _PSB_GTT_H_
21#define _PSB_GTT_H_
22
23#include <drm/drmP.h>
24
25/* This wants cleaning up with respect to the psb_dev and un-needed stuff */
26struct psb_gtt {
27 uint32_t gatt_start;
28 uint32_t mmu_gatt_start;
29 uint32_t gtt_start;
30 uint32_t gtt_phys_start;
31 unsigned gtt_pages;
32 unsigned gatt_pages;
33 unsigned long stolen_size;
34 unsigned long vram_stolen_size;
35 struct rw_semaphore sem;
36};
37
38/* Exported functions */
39extern int psb_gtt_init(struct drm_device *dev, int resume);
40extern void psb_gtt_takedown(struct drm_device *dev);
41
42/* Each gtt_range describes an allocation in the GTT area */
43struct gtt_range {
44 struct resource resource; /* Resource for our allocation */
45 u32 offset; /* GTT offset of our object */
46 struct drm_gem_object gem; /* GEM high level stuff */
47 int in_gart; /* Currently in the GART (ref ct) */
48 bool stolen; /* Backed from stolen RAM */
49 bool mmapping; /* Is mmappable */
50 struct page **pages; /* Backing pages if present */
51 int npage; /* Number of backing pages */
52};
53
54extern struct gtt_range *psb_gtt_alloc_range(struct drm_device *dev, int len,
55 const char *name, int backed);
56extern void psb_gtt_kref_put(struct gtt_range *gt);
57extern void psb_gtt_free_range(struct drm_device *dev, struct gtt_range *gt);
58extern int psb_gtt_pin(struct gtt_range *gt);
59extern void psb_gtt_unpin(struct gtt_range *gt);
60
61#endif
diff --git a/drivers/gpu/drm/gma500/mmu.c b/drivers/gpu/drm/gma500/mmu.c
new file mode 100644
index 000000000000..c904d73b1de3
--- /dev/null
+++ b/drivers/gpu/drm/gma500/mmu.c
@@ -0,0 +1,858 @@
1/**************************************************************************
2 * Copyright (c) 2007, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 *
17 **************************************************************************/
18#include <drm/drmP.h>
19#include "psb_drv.h"
20#include "psb_reg.h"
21
22/*
23 * Code for the SGX MMU:
24 */
25
26/*
27 * clflush on one processor only:
28 * clflush should apparently flush the cache line on all processors in an
29 * SMP system.
30 */
31
32/*
33 * kmap atomic:
34 * The usage of the slots must be completely encapsulated within a spinlock, and
35 * no other functions that may be using the locks for other purposed may be
36 * called from within the locked region.
37 * Since the slots are per processor, this will guarantee that we are the only
38 * user.
39 */
40
41/*
42 * TODO: Inserting ptes from an interrupt handler:
43 * This may be desirable for some SGX functionality where the GPU can fault in
44 * needed pages. For that, we need to make an atomic insert_pages function, that
45 * may fail.
46 * If it fails, the caller need to insert the page using a workqueue function,
47 * but on average it should be fast.
48 */
49
50struct psb_mmu_driver {
51 /* protects driver- and pd structures. Always take in read mode
52 * before taking the page table spinlock.
53 */
54 struct rw_semaphore sem;
55
56 /* protects page tables, directory tables and pt tables.
57 * and pt structures.
58 */
59 spinlock_t lock;
60
61 atomic_t needs_tlbflush;
62
63 uint8_t __iomem *register_map;
64 struct psb_mmu_pd *default_pd;
65 /*uint32_t bif_ctrl;*/
66 int has_clflush;
67 int clflush_add;
68 unsigned long clflush_mask;
69
70 struct drm_psb_private *dev_priv;
71};
72
73struct psb_mmu_pd;
74
75struct psb_mmu_pt {
76 struct psb_mmu_pd *pd;
77 uint32_t index;
78 uint32_t count;
79 struct page *p;
80 uint32_t *v;
81};
82
83struct psb_mmu_pd {
84 struct psb_mmu_driver *driver;
85 int hw_context;
86 struct psb_mmu_pt **tables;
87 struct page *p;
88 struct page *dummy_pt;
89 struct page *dummy_page;
90 uint32_t pd_mask;
91 uint32_t invalid_pde;
92 uint32_t invalid_pte;
93};
94
95static inline uint32_t psb_mmu_pt_index(uint32_t offset)
96{
97 return (offset >> PSB_PTE_SHIFT) & 0x3FF;
98}
99
100static inline uint32_t psb_mmu_pd_index(uint32_t offset)
101{
102 return offset >> PSB_PDE_SHIFT;
103}
104
105static inline void psb_clflush(void *addr)
106{
107 __asm__ __volatile__("clflush (%0)\n" : : "r"(addr) : "memory");
108}
109
110static inline void psb_mmu_clflush(struct psb_mmu_driver *driver,
111 void *addr)
112{
113 if (!driver->has_clflush)
114 return;
115
116 mb();
117 psb_clflush(addr);
118 mb();
119}
120
121static void psb_page_clflush(struct psb_mmu_driver *driver, struct page* page)
122{
123 uint32_t clflush_add = driver->clflush_add >> PAGE_SHIFT;
124 uint32_t clflush_count = PAGE_SIZE / clflush_add;
125 int i;
126 uint8_t *clf;
127
128 clf = kmap_atomic(page, KM_USER0);
129 mb();
130 for (i = 0; i < clflush_count; ++i) {
131 psb_clflush(clf);
132 clf += clflush_add;
133 }
134 mb();
135 kunmap_atomic(clf, KM_USER0);
136}
137
138static void psb_pages_clflush(struct psb_mmu_driver *driver,
139 struct page *page[], unsigned long num_pages)
140{
141 int i;
142
143 if (!driver->has_clflush)
144 return ;
145
146 for (i = 0; i < num_pages; i++)
147 psb_page_clflush(driver, *page++);
148}
149
150static void psb_mmu_flush_pd_locked(struct psb_mmu_driver *driver,
151 int force)
152{
153 atomic_set(&driver->needs_tlbflush, 0);
154}
155
156static void psb_mmu_flush_pd(struct psb_mmu_driver *driver, int force)
157{
158 down_write(&driver->sem);
159 psb_mmu_flush_pd_locked(driver, force);
160 up_write(&driver->sem);
161}
162
163void psb_mmu_flush(struct psb_mmu_driver *driver, int rc_prot)
164{
165 if (rc_prot)
166 down_write(&driver->sem);
167 if (rc_prot)
168 up_write(&driver->sem);
169}
170
171void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context)
172{
173 /*ttm_tt_cache_flush(&pd->p, 1);*/
174 psb_pages_clflush(pd->driver, &pd->p, 1);
175 down_write(&pd->driver->sem);
176 wmb();
177 psb_mmu_flush_pd_locked(pd->driver, 1);
178 pd->hw_context = hw_context;
179 up_write(&pd->driver->sem);
180
181}
182
183static inline unsigned long psb_pd_addr_end(unsigned long addr,
184 unsigned long end)
185{
186
187 addr = (addr + PSB_PDE_MASK + 1) & ~PSB_PDE_MASK;
188 return (addr < end) ? addr : end;
189}
190
191static inline uint32_t psb_mmu_mask_pte(uint32_t pfn, int type)
192{
193 uint32_t mask = PSB_PTE_VALID;
194
195 if (type & PSB_MMU_CACHED_MEMORY)
196 mask |= PSB_PTE_CACHED;
197 if (type & PSB_MMU_RO_MEMORY)
198 mask |= PSB_PTE_RO;
199 if (type & PSB_MMU_WO_MEMORY)
200 mask |= PSB_PTE_WO;
201
202 return (pfn << PAGE_SHIFT) | mask;
203}
204
205struct psb_mmu_pd *psb_mmu_alloc_pd(struct psb_mmu_driver *driver,
206 int trap_pagefaults, int invalid_type)
207{
208 struct psb_mmu_pd *pd = kmalloc(sizeof(*pd), GFP_KERNEL);
209 uint32_t *v;
210 int i;
211
212 if (!pd)
213 return NULL;
214
215 pd->p = alloc_page(GFP_DMA32);
216 if (!pd->p)
217 goto out_err1;
218 pd->dummy_pt = alloc_page(GFP_DMA32);
219 if (!pd->dummy_pt)
220 goto out_err2;
221 pd->dummy_page = alloc_page(GFP_DMA32);
222 if (!pd->dummy_page)
223 goto out_err3;
224
225 if (!trap_pagefaults) {
226 pd->invalid_pde =
227 psb_mmu_mask_pte(page_to_pfn(pd->dummy_pt),
228 invalid_type);
229 pd->invalid_pte =
230 psb_mmu_mask_pte(page_to_pfn(pd->dummy_page),
231 invalid_type);
232 } else {
233 pd->invalid_pde = 0;
234 pd->invalid_pte = 0;
235 }
236
237 v = kmap(pd->dummy_pt);
238 for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
239 v[i] = pd->invalid_pte;
240
241 kunmap(pd->dummy_pt);
242
243 v = kmap(pd->p);
244 for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
245 v[i] = pd->invalid_pde;
246
247 kunmap(pd->p);
248
249 clear_page(kmap(pd->dummy_page));
250 kunmap(pd->dummy_page);
251
252 pd->tables = vmalloc_user(sizeof(struct psb_mmu_pt *) * 1024);
253 if (!pd->tables)
254 goto out_err4;
255
256 pd->hw_context = -1;
257 pd->pd_mask = PSB_PTE_VALID;
258 pd->driver = driver;
259
260 return pd;
261
262out_err4:
263 __free_page(pd->dummy_page);
264out_err3:
265 __free_page(pd->dummy_pt);
266out_err2:
267 __free_page(pd->p);
268out_err1:
269 kfree(pd);
270 return NULL;
271}
272
273void psb_mmu_free_pt(struct psb_mmu_pt *pt)
274{
275 __free_page(pt->p);
276 kfree(pt);
277}
278
279void psb_mmu_free_pagedir(struct psb_mmu_pd *pd)
280{
281 struct psb_mmu_driver *driver = pd->driver;
282 struct psb_mmu_pt *pt;
283 int i;
284
285 down_write(&driver->sem);
286 if (pd->hw_context != -1)
287 psb_mmu_flush_pd_locked(driver, 1);
288
289 /* Should take the spinlock here, but we don't need to do that
290 since we have the semaphore in write mode. */
291
292 for (i = 0; i < 1024; ++i) {
293 pt = pd->tables[i];
294 if (pt)
295 psb_mmu_free_pt(pt);
296 }
297
298 vfree(pd->tables);
299 __free_page(pd->dummy_page);
300 __free_page(pd->dummy_pt);
301 __free_page(pd->p);
302 kfree(pd);
303 up_write(&driver->sem);
304}
305
306static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd)
307{
308 struct psb_mmu_pt *pt = kmalloc(sizeof(*pt), GFP_KERNEL);
309 void *v;
310 uint32_t clflush_add = pd->driver->clflush_add >> PAGE_SHIFT;
311 uint32_t clflush_count = PAGE_SIZE / clflush_add;
312 spinlock_t *lock = &pd->driver->lock;
313 uint8_t *clf;
314 uint32_t *ptes;
315 int i;
316
317 if (!pt)
318 return NULL;
319
320 pt->p = alloc_page(GFP_DMA32);
321 if (!pt->p) {
322 kfree(pt);
323 return NULL;
324 }
325
326 spin_lock(lock);
327
328 v = kmap_atomic(pt->p, KM_USER0);
329 clf = (uint8_t *) v;
330 ptes = (uint32_t *) v;
331 for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
332 *ptes++ = pd->invalid_pte;
333
334
335 if (pd->driver->has_clflush && pd->hw_context != -1) {
336 mb();
337 for (i = 0; i < clflush_count; ++i) {
338 psb_clflush(clf);
339 clf += clflush_add;
340 }
341 mb();
342 }
343
344 kunmap_atomic(v, KM_USER0);
345 spin_unlock(lock);
346
347 pt->count = 0;
348 pt->pd = pd;
349 pt->index = 0;
350
351 return pt;
352}
353
354struct psb_mmu_pt *psb_mmu_pt_alloc_map_lock(struct psb_mmu_pd *pd,
355 unsigned long addr)
356{
357 uint32_t index = psb_mmu_pd_index(addr);
358 struct psb_mmu_pt *pt;
359 uint32_t *v;
360 spinlock_t *lock = &pd->driver->lock;
361
362 spin_lock(lock);
363 pt = pd->tables[index];
364 while (!pt) {
365 spin_unlock(lock);
366 pt = psb_mmu_alloc_pt(pd);
367 if (!pt)
368 return NULL;
369 spin_lock(lock);
370
371 if (pd->tables[index]) {
372 spin_unlock(lock);
373 psb_mmu_free_pt(pt);
374 spin_lock(lock);
375 pt = pd->tables[index];
376 continue;
377 }
378
379 v = kmap_atomic(pd->p, KM_USER0);
380 pd->tables[index] = pt;
381 v[index] = (page_to_pfn(pt->p) << 12) | pd->pd_mask;
382 pt->index = index;
383 kunmap_atomic((void *) v, KM_USER0);
384
385 if (pd->hw_context != -1) {
386 psb_mmu_clflush(pd->driver, (void *) &v[index]);
387 atomic_set(&pd->driver->needs_tlbflush, 1);
388 }
389 }
390 pt->v = kmap_atomic(pt->p, KM_USER0);
391 return pt;
392}
393
394static struct psb_mmu_pt *psb_mmu_pt_map_lock(struct psb_mmu_pd *pd,
395 unsigned long addr)
396{
397 uint32_t index = psb_mmu_pd_index(addr);
398 struct psb_mmu_pt *pt;
399 spinlock_t *lock = &pd->driver->lock;
400
401 spin_lock(lock);
402 pt = pd->tables[index];
403 if (!pt) {
404 spin_unlock(lock);
405 return NULL;
406 }
407 pt->v = kmap_atomic(pt->p, KM_USER0);
408 return pt;
409}
410
411static void psb_mmu_pt_unmap_unlock(struct psb_mmu_pt *pt)
412{
413 struct psb_mmu_pd *pd = pt->pd;
414 uint32_t *v;
415
416 kunmap_atomic(pt->v, KM_USER0);
417 if (pt->count == 0) {
418 v = kmap_atomic(pd->p, KM_USER0);
419 v[pt->index] = pd->invalid_pde;
420 pd->tables[pt->index] = NULL;
421
422 if (pd->hw_context != -1) {
423 psb_mmu_clflush(pd->driver,
424 (void *) &v[pt->index]);
425 atomic_set(&pd->driver->needs_tlbflush, 1);
426 }
427 kunmap_atomic(pt->v, KM_USER0);
428 spin_unlock(&pd->driver->lock);
429 psb_mmu_free_pt(pt);
430 return;
431 }
432 spin_unlock(&pd->driver->lock);
433}
434
435static inline void psb_mmu_set_pte(struct psb_mmu_pt *pt,
436 unsigned long addr, uint32_t pte)
437{
438 pt->v[psb_mmu_pt_index(addr)] = pte;
439}
440
441static inline void psb_mmu_invalidate_pte(struct psb_mmu_pt *pt,
442 unsigned long addr)
443{
444 pt->v[psb_mmu_pt_index(addr)] = pt->pd->invalid_pte;
445}
446
447
448void psb_mmu_mirror_gtt(struct psb_mmu_pd *pd,
449 uint32_t mmu_offset, uint32_t gtt_start,
450 uint32_t gtt_pages)
451{
452 uint32_t *v;
453 uint32_t start = psb_mmu_pd_index(mmu_offset);
454 struct psb_mmu_driver *driver = pd->driver;
455 int num_pages = gtt_pages;
456
457 down_read(&driver->sem);
458 spin_lock(&driver->lock);
459
460 v = kmap_atomic(pd->p, KM_USER0);
461 v += start;
462
463 while (gtt_pages--) {
464 *v++ = gtt_start | pd->pd_mask;
465 gtt_start += PAGE_SIZE;
466 }
467
468 /*ttm_tt_cache_flush(&pd->p, num_pages);*/
469 psb_pages_clflush(pd->driver, &pd->p, num_pages);
470 kunmap_atomic(v, KM_USER0);
471 spin_unlock(&driver->lock);
472
473 if (pd->hw_context != -1)
474 atomic_set(&pd->driver->needs_tlbflush, 1);
475
476 up_read(&pd->driver->sem);
477 psb_mmu_flush_pd(pd->driver, 0);
478}
479
480struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver *driver)
481{
482 struct psb_mmu_pd *pd;
483
484 /* down_read(&driver->sem); */
485 pd = driver->default_pd;
486 /* up_read(&driver->sem); */
487
488 return pd;
489}
490
491/* Returns the physical address of the PD shared by sgx/msvdx */
492uint32_t psb_get_default_pd_addr(struct psb_mmu_driver *driver)
493{
494 struct psb_mmu_pd *pd;
495
496 pd = psb_mmu_get_default_pd(driver);
497 return page_to_pfn(pd->p) << PAGE_SHIFT;
498}
499
500void psb_mmu_driver_takedown(struct psb_mmu_driver *driver)
501{
502 psb_mmu_free_pagedir(driver->default_pd);
503 kfree(driver);
504}
505
506struct psb_mmu_driver *psb_mmu_driver_init(uint8_t __iomem * registers,
507 int trap_pagefaults,
508 int invalid_type,
509 struct drm_psb_private *dev_priv)
510{
511 struct psb_mmu_driver *driver;
512
513 driver = kmalloc(sizeof(*driver), GFP_KERNEL);
514
515 if (!driver)
516 return NULL;
517 driver->dev_priv = dev_priv;
518
519 driver->default_pd = psb_mmu_alloc_pd(driver, trap_pagefaults,
520 invalid_type);
521 if (!driver->default_pd)
522 goto out_err1;
523
524 spin_lock_init(&driver->lock);
525 init_rwsem(&driver->sem);
526 down_write(&driver->sem);
527 driver->register_map = registers;
528 atomic_set(&driver->needs_tlbflush, 1);
529
530 driver->has_clflush = 0;
531
532 if (boot_cpu_has(X86_FEATURE_CLFLSH)) {
533 uint32_t tfms, misc, cap0, cap4, clflush_size;
534
535 /*
536 * clflush size is determined at kernel setup for x86_64
537 * but not for i386. We have to do it here.
538 */
539
540 cpuid(0x00000001, &tfms, &misc, &cap0, &cap4);
541 clflush_size = ((misc >> 8) & 0xff) * 8;
542 driver->has_clflush = 1;
543 driver->clflush_add =
544 PAGE_SIZE * clflush_size / sizeof(uint32_t);
545 driver->clflush_mask = driver->clflush_add - 1;
546 driver->clflush_mask = ~driver->clflush_mask;
547 }
548
549 up_write(&driver->sem);
550 return driver;
551
552out_err1:
553 kfree(driver);
554 return NULL;
555}
556
557static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd,
558 unsigned long address, uint32_t num_pages,
559 uint32_t desired_tile_stride,
560 uint32_t hw_tile_stride)
561{
562 struct psb_mmu_pt *pt;
563 uint32_t rows = 1;
564 uint32_t i;
565 unsigned long addr;
566 unsigned long end;
567 unsigned long next;
568 unsigned long add;
569 unsigned long row_add;
570 unsigned long clflush_add = pd->driver->clflush_add;
571 unsigned long clflush_mask = pd->driver->clflush_mask;
572
573 if (!pd->driver->has_clflush) {
574 /*ttm_tt_cache_flush(&pd->p, num_pages);*/
575 psb_pages_clflush(pd->driver, &pd->p, num_pages);
576 return;
577 }
578
579 if (hw_tile_stride)
580 rows = num_pages / desired_tile_stride;
581 else
582 desired_tile_stride = num_pages;
583
584 add = desired_tile_stride << PAGE_SHIFT;
585 row_add = hw_tile_stride << PAGE_SHIFT;
586 mb();
587 for (i = 0; i < rows; ++i) {
588
589 addr = address;
590 end = addr + add;
591
592 do {
593 next = psb_pd_addr_end(addr, end);
594 pt = psb_mmu_pt_map_lock(pd, addr);
595 if (!pt)
596 continue;
597 do {
598 psb_clflush(&pt->v
599 [psb_mmu_pt_index(addr)]);
600 } while (addr +=
601 clflush_add,
602 (addr & clflush_mask) < next);
603
604 psb_mmu_pt_unmap_unlock(pt);
605 } while (addr = next, next != end);
606 address += row_add;
607 }
608 mb();
609}
610
611void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd,
612 unsigned long address, uint32_t num_pages)
613{
614 struct psb_mmu_pt *pt;
615 unsigned long addr;
616 unsigned long end;
617 unsigned long next;
618 unsigned long f_address = address;
619
620 down_read(&pd->driver->sem);
621
622 addr = address;
623 end = addr + (num_pages << PAGE_SHIFT);
624
625 do {
626 next = psb_pd_addr_end(addr, end);
627 pt = psb_mmu_pt_alloc_map_lock(pd, addr);
628 if (!pt)
629 goto out;
630 do {
631 psb_mmu_invalidate_pte(pt, addr);
632 --pt->count;
633 } while (addr += PAGE_SIZE, addr < next);
634 psb_mmu_pt_unmap_unlock(pt);
635
636 } while (addr = next, next != end);
637
638out:
639 if (pd->hw_context != -1)
640 psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1);
641
642 up_read(&pd->driver->sem);
643
644 if (pd->hw_context != -1)
645 psb_mmu_flush(pd->driver, 0);
646
647 return;
648}
649
650void psb_mmu_remove_pages(struct psb_mmu_pd *pd, unsigned long address,
651 uint32_t num_pages, uint32_t desired_tile_stride,
652 uint32_t hw_tile_stride)
653{
654 struct psb_mmu_pt *pt;
655 uint32_t rows = 1;
656 uint32_t i;
657 unsigned long addr;
658 unsigned long end;
659 unsigned long next;
660 unsigned long add;
661 unsigned long row_add;
662 unsigned long f_address = address;
663
664 if (hw_tile_stride)
665 rows = num_pages / desired_tile_stride;
666 else
667 desired_tile_stride = num_pages;
668
669 add = desired_tile_stride << PAGE_SHIFT;
670 row_add = hw_tile_stride << PAGE_SHIFT;
671
672 /* down_read(&pd->driver->sem); */
673
674 /* Make sure we only need to flush this processor's cache */
675
676 for (i = 0; i < rows; ++i) {
677
678 addr = address;
679 end = addr + add;
680
681 do {
682 next = psb_pd_addr_end(addr, end);
683 pt = psb_mmu_pt_map_lock(pd, addr);
684 if (!pt)
685 continue;
686 do {
687 psb_mmu_invalidate_pte(pt, addr);
688 --pt->count;
689
690 } while (addr += PAGE_SIZE, addr < next);
691 psb_mmu_pt_unmap_unlock(pt);
692
693 } while (addr = next, next != end);
694 address += row_add;
695 }
696 if (pd->hw_context != -1)
697 psb_mmu_flush_ptes(pd, f_address, num_pages,
698 desired_tile_stride, hw_tile_stride);
699
700 /* up_read(&pd->driver->sem); */
701
702 if (pd->hw_context != -1)
703 psb_mmu_flush(pd->driver, 0);
704}
705
706int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd, uint32_t start_pfn,
707 unsigned long address, uint32_t num_pages,
708 int type)
709{
710 struct psb_mmu_pt *pt;
711 uint32_t pte;
712 unsigned long addr;
713 unsigned long end;
714 unsigned long next;
715 unsigned long f_address = address;
716 int ret = 0;
717
718 down_read(&pd->driver->sem);
719
720 addr = address;
721 end = addr + (num_pages << PAGE_SHIFT);
722
723 do {
724 next = psb_pd_addr_end(addr, end);
725 pt = psb_mmu_pt_alloc_map_lock(pd, addr);
726 if (!pt) {
727 ret = -ENOMEM;
728 goto out;
729 }
730 do {
731 pte = psb_mmu_mask_pte(start_pfn++, type);
732 psb_mmu_set_pte(pt, addr, pte);
733 pt->count++;
734 } while (addr += PAGE_SIZE, addr < next);
735 psb_mmu_pt_unmap_unlock(pt);
736
737 } while (addr = next, next != end);
738
739out:
740 if (pd->hw_context != -1)
741 psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1);
742
743 up_read(&pd->driver->sem);
744
745 if (pd->hw_context != -1)
746 psb_mmu_flush(pd->driver, 1);
747
748 return ret;
749}
750
751int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
752 unsigned long address, uint32_t num_pages,
753 uint32_t desired_tile_stride,
754 uint32_t hw_tile_stride, int type)
755{
756 struct psb_mmu_pt *pt;
757 uint32_t rows = 1;
758 uint32_t i;
759 uint32_t pte;
760 unsigned long addr;
761 unsigned long end;
762 unsigned long next;
763 unsigned long add;
764 unsigned long row_add;
765 unsigned long f_address = address;
766 int ret = 0;
767
768 if (hw_tile_stride) {
769 if (num_pages % desired_tile_stride != 0)
770 return -EINVAL;
771 rows = num_pages / desired_tile_stride;
772 } else {
773 desired_tile_stride = num_pages;
774 }
775
776 add = desired_tile_stride << PAGE_SHIFT;
777 row_add = hw_tile_stride << PAGE_SHIFT;
778
779 down_read(&pd->driver->sem);
780
781 for (i = 0; i < rows; ++i) {
782
783 addr = address;
784 end = addr + add;
785
786 do {
787 next = psb_pd_addr_end(addr, end);
788 pt = psb_mmu_pt_alloc_map_lock(pd, addr);
789 if (!pt) {
790 ret = -ENOMEM;
791 goto out;
792 }
793 do {
794 pte =
795 psb_mmu_mask_pte(page_to_pfn(*pages++),
796 type);
797 psb_mmu_set_pte(pt, addr, pte);
798 pt->count++;
799 } while (addr += PAGE_SIZE, addr < next);
800 psb_mmu_pt_unmap_unlock(pt);
801
802 } while (addr = next, next != end);
803
804 address += row_add;
805 }
806out:
807 if (pd->hw_context != -1)
808 psb_mmu_flush_ptes(pd, f_address, num_pages,
809 desired_tile_stride, hw_tile_stride);
810
811 up_read(&pd->driver->sem);
812
813 if (pd->hw_context != -1)
814 psb_mmu_flush(pd->driver, 1);
815
816 return ret;
817}
818
819int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual,
820 unsigned long *pfn)
821{
822 int ret;
823 struct psb_mmu_pt *pt;
824 uint32_t tmp;
825 spinlock_t *lock = &pd->driver->lock;
826
827 down_read(&pd->driver->sem);
828 pt = psb_mmu_pt_map_lock(pd, virtual);
829 if (!pt) {
830 uint32_t *v;
831
832 spin_lock(lock);
833 v = kmap_atomic(pd->p, KM_USER0);
834 tmp = v[psb_mmu_pd_index(virtual)];
835 kunmap_atomic(v, KM_USER0);
836 spin_unlock(lock);
837
838 if (tmp != pd->invalid_pde || !(tmp & PSB_PTE_VALID) ||
839 !(pd->invalid_pte & PSB_PTE_VALID)) {
840 ret = -EINVAL;
841 goto out;
842 }
843 ret = 0;
844 *pfn = pd->invalid_pte >> PAGE_SHIFT;
845 goto out;
846 }
847 tmp = pt->v[psb_mmu_pt_index(virtual)];
848 if (!(tmp & PSB_PTE_VALID)) {
849 ret = -EINVAL;
850 } else {
851 ret = 0;
852 *pfn = tmp >> PAGE_SHIFT;
853 }
854 psb_mmu_pt_unmap_unlock(pt);
855out:
856 up_read(&pd->driver->sem);
857 return ret;
858}