summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gm20b/mm_gm20b.c
diff options
context:
space:
mode:
authorSupriya <ssharatkumar@nvidia.com>2014-06-13 03:14:27 -0400
committerDan Willemsen <dwillemsen@nvidia.com>2015-03-18 15:10:14 -0400
commitb7793a493a1fa292a22d5ce84c43ee342b9824b2 (patch)
tree963d128e317d319d2f53aff96420aec17b732bf6 /drivers/gpu/nvgpu/gm20b/mm_gm20b.c
parentc32ac10b0bba400c1e83540a20c5ca210fa48613 (diff)
nvgpu: Host side changes to support HS mode
GM20B changes in PMU boot sequence to support booting in HS mode and LS mode Bug 1509680 Change-Id: I2832eda0efe17dd5e3a8f11dd06e7d4da267be70 Signed-off-by: Supriya <ssharatkumar@nvidia.com> Reviewed-on: http://git-master/r/423140 Reviewed-by: Automatic_Commit_Validation_User Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com> Reviewed-by: Seshendra Gadagottu <sgadagottu@nvidia.com> Tested-by: Seshendra Gadagottu <sgadagottu@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Shridhar Rasal <srasal@nvidia.com> Reviewed-by: Deepak Nibade <dnibade@nvidia.com> Reviewed-by: Bharat Nihalani <bnihalani@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gm20b/mm_gm20b.c')
-rw-r--r--drivers/gpu/nvgpu/gm20b/mm_gm20b.c63
1 files changed, 58 insertions, 5 deletions
diff --git a/drivers/gpu/nvgpu/gm20b/mm_gm20b.c b/drivers/gpu/nvgpu/gm20b/mm_gm20b.c
index 67d61569..2c211a57 100644
--- a/drivers/gpu/nvgpu/gm20b/mm_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/mm_gm20b.c
@@ -13,9 +13,11 @@
13 * more details. 13 * more details.
14 */ 14 */
15 15
16#include <linux/pm_runtime.h>
16#include "gk20a/gk20a.h" 17#include "gk20a/gk20a.h"
17#include "mm_gm20b.h" 18#include "mm_gm20b.h"
18#include "hw_gmmu_gm20b.h" 19#include "hw_gmmu_gm20b.h"
20#include "hw_fb_gm20b.h"
19 21
20static const u32 gmmu_page_sizes[gmmu_nr_page_sizes] = { SZ_4K, SZ_128K }; 22static const u32 gmmu_page_sizes[gmmu_nr_page_sizes] = { SZ_4K, SZ_128K };
21static const u32 gmmu_page_shifts[gmmu_nr_page_sizes] = { 12, 17 }; 23static const u32 gmmu_page_shifts[gmmu_nr_page_sizes] = { 12, 17 };
@@ -24,8 +26,8 @@ static const u64 gmmu_page_offset_masks[gmmu_nr_page_sizes] = { 0xfffLL,
24static const u64 gmmu_page_masks[gmmu_nr_page_sizes] = { ~0xfffLL, ~0x1ffffLL }; 26static const u64 gmmu_page_masks[gmmu_nr_page_sizes] = { ~0xfffLL, ~0x1ffffLL };
25 27
26static int allocate_gmmu_ptes_sparse(struct vm_gk20a *vm, 28static int allocate_gmmu_ptes_sparse(struct vm_gk20a *vm,
27 enum gmmu_pgsz_gk20a pgsz_idx, 29 enum gmmu_pgsz_gk20a pgsz_idx,
28 u64 first_vaddr, u64 last_vaddr) 30 u64 first_vaddr, u64 last_vaddr)
29{ 31{
30 int err; 32 int err;
31 u32 pte_lo, pte_hi; 33 u32 pte_lo, pte_hi;
@@ -39,10 +41,10 @@ static int allocate_gmmu_ptes_sparse(struct vm_gk20a *vm,
39 gk20a_dbg_fn(""); 41 gk20a_dbg_fn("");
40 42
41 pde_range_from_vaddr_range(vm, first_vaddr, last_vaddr, 43 pde_range_from_vaddr_range(vm, first_vaddr, last_vaddr,
42 &pde_lo, &pde_hi); 44 &pde_lo, &pde_hi);
43 45
44 gk20a_dbg(gpu_dbg_pte, "size_idx=%d, pde_lo=%d, pde_hi=%d", 46 gk20a_dbg(gpu_dbg_pte, "size_idx=%d, pde_lo=%d, pde_hi=%d",
45 pgsz_idx, pde_lo, pde_hi); 47 pgsz_idx, pde_lo, pde_hi);
46 48
47 /* Expect ptes of the same pde */ 49 /* Expect ptes of the same pde */
48 BUG_ON(pde_lo != pde_hi); 50 BUG_ON(pde_lo != pde_hi);
@@ -185,7 +187,8 @@ static int gm20b_vm_put_sparse(struct vm_gk20a *vm, u64 vaddr,
185 vaddr_pde_start = (u64)i << pde_shift; 187 vaddr_pde_start = (u64)i << pde_shift;
186 allocate_gmmu_ptes_sparse(vm, pgsz_idx, 188 allocate_gmmu_ptes_sparse(vm, pgsz_idx,
187 vaddr_pde_start, 189 vaddr_pde_start,
188 PDE_ADDR_END(vaddr_pde_start, pde_shift)); 190 PDE_ADDR_END(vaddr_pde_start,
191 pde_shift));
189 } else { 192 } else {
190 /* Check leading and trailing spaces which doesn't fit 193 /* Check leading and trailing spaces which doesn't fit
191 * into entire pde. */ 194 * into entire pde. */
@@ -212,6 +215,56 @@ fail:
212 return err; 215 return err;
213} 216}
214 217
218static int gm20b_mm_mmu_vpr_info_fetch_wait(struct gk20a *g,
219 const unsigned int msec)
220{
221 unsigned long timeout;
222
223 if (tegra_platform_is_silicon())
224 timeout = jiffies + msecs_to_jiffies(msec);
225 else
226 timeout = msecs_to_jiffies(msec);
227
228 while (1) {
229 u32 val;
230 val = gk20a_readl(g, fb_mmu_vpr_info_r());
231 if (fb_mmu_vpr_info_fetch_v(val) ==
232 fb_mmu_vpr_info_fetch_false_v())
233 break;
234 if (tegra_platform_is_silicon()) {
235 if (WARN_ON(time_after(jiffies, timeout)))
236 return -ETIME;
237 } else if (--timeout == 0)
238 return -ETIME;
239 }
240 return 0;
241}
242
243int gm20b_mm_mmu_vpr_info_fetch(struct gk20a *g)
244{
245 int ret = 0;
246
247 gk20a_busy_noresume(g->dev);
248#ifdef CONFIG_PM_RUNTIME
249 if (!pm_runtime_active(&g->dev->dev))
250 goto fail;
251#endif
252
253 if (gm20b_mm_mmu_vpr_info_fetch_wait(g, VPR_INFO_FETCH_WAIT)) {
254 ret = -ETIME;
255 goto fail;
256 }
257
258 gk20a_writel(g, fb_mmu_vpr_info_r(),
259 fb_mmu_vpr_info_fetch_true_v());
260
261 ret = gm20b_mm_mmu_vpr_info_fetch_wait(g, VPR_INFO_FETCH_WAIT);
262
263fail:
264 gk20a_idle(g->dev);
265 return ret;
266}
267
215void gm20b_init_mm(struct gpu_ops *gops) 268void gm20b_init_mm(struct gpu_ops *gops)
216{ 269{
217 gops->mm.set_sparse = gm20b_vm_put_sparse; 270 gops->mm.set_sparse = gm20b_vm_put_sparse;