diff options
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/mm_gk20a.c')
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/mm_gk20a.c | 32 |
1 files changed, 16 insertions, 16 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c index 7a02d68e..ac4625e0 100644 --- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c | |||
@@ -3515,7 +3515,7 @@ int gk20a_mm_fb_flush(struct gk20a *g) | |||
3515 | 3515 | ||
3516 | gk20a_busy_noresume(g->dev); | 3516 | gk20a_busy_noresume(g->dev); |
3517 | if (!g->power_on) { | 3517 | if (!g->power_on) { |
3518 | pm_runtime_put_noidle(&g->dev->dev); | 3518 | pm_runtime_put_noidle(g->dev); |
3519 | return 0; | 3519 | return 0; |
3520 | } | 3520 | } |
3521 | 3521 | ||
@@ -3525,7 +3525,7 @@ int gk20a_mm_fb_flush(struct gk20a *g) | |||
3525 | guarantee that writes are to DRAM. This will be a sysmembar internal | 3525 | guarantee that writes are to DRAM. This will be a sysmembar internal |
3526 | to the L2. */ | 3526 | to the L2. */ |
3527 | 3527 | ||
3528 | trace_gk20a_mm_fb_flush(g->dev->name); | 3528 | trace_gk20a_mm_fb_flush(dev_name(g->dev)); |
3529 | 3529 | ||
3530 | gk20a_writel(g, flush_fb_flush_r(), | 3530 | gk20a_writel(g, flush_fb_flush_r(), |
3531 | flush_fb_flush_pending_busy_f()); | 3531 | flush_fb_flush_pending_busy_f()); |
@@ -3552,11 +3552,11 @@ int gk20a_mm_fb_flush(struct gk20a *g) | |||
3552 | ret = -EBUSY; | 3552 | ret = -EBUSY; |
3553 | } | 3553 | } |
3554 | 3554 | ||
3555 | trace_gk20a_mm_fb_flush_done(g->dev->name); | 3555 | trace_gk20a_mm_fb_flush_done(dev_name(g->dev)); |
3556 | 3556 | ||
3557 | mutex_unlock(&mm->l2_op_lock); | 3557 | mutex_unlock(&mm->l2_op_lock); |
3558 | 3558 | ||
3559 | pm_runtime_put_noidle(&g->dev->dev); | 3559 | pm_runtime_put_noidle(g->dev); |
3560 | 3560 | ||
3561 | return ret; | 3561 | return ret; |
3562 | } | 3562 | } |
@@ -3566,7 +3566,7 @@ static void gk20a_mm_l2_invalidate_locked(struct gk20a *g) | |||
3566 | u32 data; | 3566 | u32 data; |
3567 | s32 retry = 200; | 3567 | s32 retry = 200; |
3568 | 3568 | ||
3569 | trace_gk20a_mm_l2_invalidate(g->dev->name); | 3569 | trace_gk20a_mm_l2_invalidate(dev_name(g->dev)); |
3570 | 3570 | ||
3571 | /* Invalidate any clean lines from the L2 so subsequent reads go to | 3571 | /* Invalidate any clean lines from the L2 so subsequent reads go to |
3572 | DRAM. Dirty lines are not affected by this operation. */ | 3572 | DRAM. Dirty lines are not affected by this operation. */ |
@@ -3592,7 +3592,7 @@ static void gk20a_mm_l2_invalidate_locked(struct gk20a *g) | |||
3592 | gk20a_warn(dev_from_gk20a(g), | 3592 | gk20a_warn(dev_from_gk20a(g), |
3593 | "l2_system_invalidate too many retries"); | 3593 | "l2_system_invalidate too many retries"); |
3594 | 3594 | ||
3595 | trace_gk20a_mm_l2_invalidate_done(g->dev->name); | 3595 | trace_gk20a_mm_l2_invalidate_done(dev_name(g->dev)); |
3596 | } | 3596 | } |
3597 | 3597 | ||
3598 | void gk20a_mm_l2_invalidate(struct gk20a *g) | 3598 | void gk20a_mm_l2_invalidate(struct gk20a *g) |
@@ -3604,7 +3604,7 @@ void gk20a_mm_l2_invalidate(struct gk20a *g) | |||
3604 | gk20a_mm_l2_invalidate_locked(g); | 3604 | gk20a_mm_l2_invalidate_locked(g); |
3605 | mutex_unlock(&mm->l2_op_lock); | 3605 | mutex_unlock(&mm->l2_op_lock); |
3606 | } | 3606 | } |
3607 | pm_runtime_put_noidle(&g->dev->dev); | 3607 | pm_runtime_put_noidle(g->dev); |
3608 | } | 3608 | } |
3609 | 3609 | ||
3610 | void gk20a_mm_l2_flush(struct gk20a *g, bool invalidate) | 3610 | void gk20a_mm_l2_flush(struct gk20a *g, bool invalidate) |
@@ -3621,7 +3621,7 @@ void gk20a_mm_l2_flush(struct gk20a *g, bool invalidate) | |||
3621 | 3621 | ||
3622 | mutex_lock(&mm->l2_op_lock); | 3622 | mutex_lock(&mm->l2_op_lock); |
3623 | 3623 | ||
3624 | trace_gk20a_mm_l2_flush(g->dev->name); | 3624 | trace_gk20a_mm_l2_flush(dev_name(g->dev)); |
3625 | 3625 | ||
3626 | /* Flush all dirty lines from the L2 to DRAM. Lines are left in the L2 | 3626 | /* Flush all dirty lines from the L2 to DRAM. Lines are left in the L2 |
3627 | as clean, so subsequent reads might hit in the L2. */ | 3627 | as clean, so subsequent reads might hit in the L2. */ |
@@ -3646,7 +3646,7 @@ void gk20a_mm_l2_flush(struct gk20a *g, bool invalidate) | |||
3646 | gk20a_warn(dev_from_gk20a(g), | 3646 | gk20a_warn(dev_from_gk20a(g), |
3647 | "l2_flush_dirty too many retries"); | 3647 | "l2_flush_dirty too many retries"); |
3648 | 3648 | ||
3649 | trace_gk20a_mm_l2_flush_done(g->dev->name); | 3649 | trace_gk20a_mm_l2_flush_done(dev_name(g->dev)); |
3650 | 3650 | ||
3651 | if (invalidate) | 3651 | if (invalidate) |
3652 | gk20a_mm_l2_invalidate_locked(g); | 3652 | gk20a_mm_l2_invalidate_locked(g); |
@@ -3654,7 +3654,7 @@ void gk20a_mm_l2_flush(struct gk20a *g, bool invalidate) | |||
3654 | mutex_unlock(&mm->l2_op_lock); | 3654 | mutex_unlock(&mm->l2_op_lock); |
3655 | 3655 | ||
3656 | hw_was_off: | 3656 | hw_was_off: |
3657 | pm_runtime_put_noidle(&g->dev->dev); | 3657 | pm_runtime_put_noidle(g->dev); |
3658 | } | 3658 | } |
3659 | 3659 | ||
3660 | void gk20a_mm_cbc_clean(struct gk20a *g) | 3660 | void gk20a_mm_cbc_clean(struct gk20a *g) |
@@ -3696,7 +3696,7 @@ void gk20a_mm_cbc_clean(struct gk20a *g) | |||
3696 | mutex_unlock(&mm->l2_op_lock); | 3696 | mutex_unlock(&mm->l2_op_lock); |
3697 | 3697 | ||
3698 | hw_was_off: | 3698 | hw_was_off: |
3699 | pm_runtime_put_noidle(&g->dev->dev); | 3699 | pm_runtime_put_noidle(g->dev); |
3700 | } | 3700 | } |
3701 | 3701 | ||
3702 | int gk20a_vm_find_buffer(struct vm_gk20a *vm, u64 gpu_va, | 3702 | int gk20a_vm_find_buffer(struct vm_gk20a *vm, u64 gpu_va, |
@@ -3746,7 +3746,7 @@ void gk20a_mm_tlb_invalidate(struct vm_gk20a *vm) | |||
3746 | 3746 | ||
3747 | mutex_lock(&tlb_lock); | 3747 | mutex_lock(&tlb_lock); |
3748 | 3748 | ||
3749 | trace_gk20a_mm_tlb_invalidate(g->dev->name); | 3749 | trace_gk20a_mm_tlb_invalidate(dev_name(g->dev)); |
3750 | 3750 | ||
3751 | do { | 3751 | do { |
3752 | data = gk20a_readl(g, fb_mmu_ctrl_r()); | 3752 | data = gk20a_readl(g, fb_mmu_ctrl_r()); |
@@ -3783,7 +3783,7 @@ void gk20a_mm_tlb_invalidate(struct vm_gk20a *vm) | |||
3783 | gk20a_warn(dev_from_gk20a(g), | 3783 | gk20a_warn(dev_from_gk20a(g), |
3784 | "mmu invalidate too many retries"); | 3784 | "mmu invalidate too many retries"); |
3785 | 3785 | ||
3786 | trace_gk20a_mm_tlb_invalidate_done(g->dev->name); | 3786 | trace_gk20a_mm_tlb_invalidate_done(dev_name(g->dev)); |
3787 | 3787 | ||
3788 | out: | 3788 | out: |
3789 | mutex_unlock(&tlb_lock); | 3789 | mutex_unlock(&tlb_lock); |
@@ -3868,11 +3868,11 @@ clean_up: | |||
3868 | return err; | 3868 | return err; |
3869 | } | 3869 | } |
3870 | 3870 | ||
3871 | void gk20a_mm_debugfs_init(struct platform_device *pdev) | 3871 | void gk20a_mm_debugfs_init(struct device *dev) |
3872 | { | 3872 | { |
3873 | struct gk20a_platform *platform = platform_get_drvdata(pdev); | 3873 | struct gk20a_platform *platform = dev_get_drvdata(dev); |
3874 | struct dentry *gpu_root = platform->debugfs; | 3874 | struct dentry *gpu_root = platform->debugfs; |
3875 | struct gk20a *g = gk20a_get_platform(pdev)->g; | 3875 | struct gk20a *g = gk20a_get_platform(dev)->g; |
3876 | 3876 | ||
3877 | debugfs_create_x64("separate_fixed_allocs", 0664, gpu_root, | 3877 | debugfs_create_x64("separate_fixed_allocs", 0664, gpu_root, |
3878 | &g->separate_fixed_allocs); | 3878 | &g->separate_fixed_allocs); |