aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2018-01-24 20:40:54 -0500
committerDave Airlie <airlied@redhat.com>2018-01-24 20:40:54 -0500
commit22bc72c8075fa350482cdbbd66597d626aa506c8 (patch)
tree4223a1282a53a07cd87ff0abbe1da5f8f172788d
parent92eb5f0c00b7c11d85ae823a814b2a34dda8a3c4 (diff)
parent87440329b06720e09c27ad1991204f4f0bd76f83 (diff)
Merge branch 'drm-next-4.16' of git://people.freedesktop.org/~agd5f/linux into drm-next
A few more fixes for 4.16, nothing major. A few more fixes for 4.16. This is on top of the pull request from last week. Most notable change here is a fix to the link order for the now separate from amdgpu GPU scheduler to fix crashes when the modules are build into the kernel rather than as modules. * 'drm-next-4.16' of git://people.freedesktop.org/~agd5f/linux: drm: fix gpu scheduler link order drm/amd/display: Demote error print to debug print when ATOM impl missing drm/amdgpu: Avoid leaking PM domain on driver unbind (v2) drm/amd/amdgpu: Add Polaris version check drm/amdgpu: Reenable manual GPU reset from sysfs drm/amdgpu: disable MMHUB power gating on raven drm/ttm: Don't unreserve swapped BOs that were previously reserved drm/ttm: Don't add swapped BOs to swap-LRU list drm/amdgpu: only check for ECC on Vega10 drm/amd/powerplay: Fix smu_table_entry.handle type drm/ttm: add VADDR_FLAG_UPDATED_COUNT to correctly update dma_page global count drm/radeon: fill in rb backend map on evergreen/ni. drm/amdgpu/gfx9: fix ngg enablement to clear gds reserved memory (v2) drm/ttm: only free pages rather than update global memory count together drm/amdgpu: fix CPU based VM updates drm/amdgpu: fix typo in amdgpu_vce_validate_bo drm/amdgpu: fix amdgpu_vm_pasid_fault_credit drm/ttm: check the return value of register_shrinker drm/radeon: fix sparse warning: Should it be static?
-rw-r--r--drivers/gpu/drm/Makefile2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c17
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table.c22
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table2.c16
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.h2
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c1
-rw-r--r--drivers/gpu/drm/radeon/ni.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_dp_mst.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c7
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c27
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc_dma.c79
17 files changed, 138 insertions, 101 deletions
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index dd5ae67f8e2b..50093ff4479b 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -50,6 +50,7 @@ obj-$(CONFIG_DRM_MIPI_DSI) += drm_mipi_dsi.o
50obj-$(CONFIG_DRM_PANEL_ORIENTATION_QUIRKS) += drm_panel_orientation_quirks.o 50obj-$(CONFIG_DRM_PANEL_ORIENTATION_QUIRKS) += drm_panel_orientation_quirks.o
51obj-$(CONFIG_DRM_ARM) += arm/ 51obj-$(CONFIG_DRM_ARM) += arm/
52obj-$(CONFIG_DRM_TTM) += ttm/ 52obj-$(CONFIG_DRM_TTM) += ttm/
53obj-$(CONFIG_DRM_SCHED) += scheduler/
53obj-$(CONFIG_DRM_TDFX) += tdfx/ 54obj-$(CONFIG_DRM_TDFX) += tdfx/
54obj-$(CONFIG_DRM_R128) += r128/ 55obj-$(CONFIG_DRM_R128) += r128/
55obj-y += amd/lib/ 56obj-y += amd/lib/
@@ -102,4 +103,3 @@ obj-$(CONFIG_DRM_MXSFB) += mxsfb/
102obj-$(CONFIG_DRM_TINYDRM) += tinydrm/ 103obj-$(CONFIG_DRM_TINYDRM) += tinydrm/
103obj-$(CONFIG_DRM_PL111) += pl111/ 104obj-$(CONFIG_DRM_PL111) += pl111/
104obj-$(CONFIG_DRM_TVE200) += tve200/ 105obj-$(CONFIG_DRM_TVE200) += tve200/
105obj-$(CONFIG_DRM_SCHED) += scheduler/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 9baf182d5418..00a50cc5ec9a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1874,8 +1874,6 @@ int amdgpu_device_init(struct amdgpu_device *adev,
1874 * ignore it */ 1874 * ignore it */
1875 vga_client_register(adev->pdev, adev, NULL, amdgpu_device_vga_set_decode); 1875 vga_client_register(adev->pdev, adev, NULL, amdgpu_device_vga_set_decode);
1876 1876
1877 if (amdgpu_runtime_pm == 1)
1878 runtime = true;
1879 if (amdgpu_device_is_px(ddev)) 1877 if (amdgpu_device_is_px(ddev))
1880 runtime = true; 1878 runtime = true;
1881 if (!pci_is_thunderbolt_attached(adev->pdev)) 1879 if (!pci_is_thunderbolt_attached(adev->pdev))
@@ -2619,7 +2617,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
2619 uint64_t reset_flags = 0; 2617 uint64_t reset_flags = 0;
2620 int i, r, resched; 2618 int i, r, resched;
2621 2619
2622 if (!amdgpu_device_ip_check_soft_reset(adev)) { 2620 if (!force && !amdgpu_device_ip_check_soft_reset(adev)) {
2623 DRM_INFO("No hardware hang detected. Did some blocks stall?\n"); 2621 DRM_INFO("No hardware hang detected. Did some blocks stall?\n");
2624 return 0; 2622 return 0;
2625 } 2623 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 55a726a322e3..d274ae535530 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -585,8 +585,8 @@ static int amdgpu_vce_validate_bo(struct amdgpu_cs_parser *p, uint32_t ib_idx,
585 585
586 for (i = 0; i < bo->placement.num_placement; ++i) { 586 for (i = 0; i < bo->placement.num_placement; ++i) {
587 bo->placements[i].fpfn = max(bo->placements[i].fpfn, fpfn); 587 bo->placements[i].fpfn = max(bo->placements[i].fpfn, fpfn);
588 bo->placements[i].lpfn = bo->placements[i].fpfn ? 588 bo->placements[i].lpfn = bo->placements[i].lpfn ?
589 min(bo->placements[i].fpfn, lpfn) : lpfn; 589 min(bo->placements[i].lpfn, lpfn) : lpfn;
590 } 590 }
591 return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 591 return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
592} 592}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index cd1752b6afa9..6fc16eecf2dc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -970,12 +970,16 @@ static void amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params *p,
970 amdgpu_gart_get_vm_pde(p->adev, AMDGPU_VM_PDB0, 970 amdgpu_gart_get_vm_pde(p->adev, AMDGPU_VM_PDB0,
971 &dst, &flags); 971 &dst, &flags);
972 972
973 if (parent->base.bo->shadow) { 973 if (p->func == amdgpu_vm_cpu_set_ptes) {
974 pd_addr = amdgpu_bo_gpu_offset(parent->base.bo->shadow); 974 pd_addr = (unsigned long)amdgpu_bo_kptr(parent->base.bo);
975 pde = pd_addr + (entry - parent->entries) * 8; 975 } else {
976 p->func(p, pde, dst, 1, 0, flags); 976 if (parent->base.bo->shadow) {
977 pd_addr = amdgpu_bo_gpu_offset(parent->base.bo->shadow);
978 pde = pd_addr + (entry - parent->entries) * 8;
979 p->func(p, pde, dst, 1, 0, flags);
980 }
981 pd_addr = amdgpu_bo_gpu_offset(parent->base.bo);
977 } 982 }
978 pd_addr = amdgpu_bo_gpu_offset(parent->base.bo);
979 pde = pd_addr + (entry - parent->entries) * 8; 983 pde = pd_addr + (entry - parent->entries) * 8;
980 p->func(p, pde, dst, 1, 0, flags); 984 p->func(p, pde, dst, 1, 0, flags);
981} 985}
@@ -2478,17 +2482,21 @@ bool amdgpu_vm_pasid_fault_credit(struct amdgpu_device *adev,
2478 2482
2479 spin_lock(&adev->vm_manager.pasid_lock); 2483 spin_lock(&adev->vm_manager.pasid_lock);
2480 vm = idr_find(&adev->vm_manager.pasid_idr, pasid); 2484 vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
2481 spin_unlock(&adev->vm_manager.pasid_lock); 2485 if (!vm) {
2482 if (!vm)
2483 /* VM not found, can't track fault credit */ 2486 /* VM not found, can't track fault credit */
2487 spin_unlock(&adev->vm_manager.pasid_lock);
2484 return true; 2488 return true;
2489 }
2485 2490
2486 /* No lock needed. only accessed by IRQ handler */ 2491 /* No lock needed. only accessed by IRQ handler */
2487 if (!vm->fault_credit) 2492 if (!vm->fault_credit) {
2488 /* Too many faults in this VM */ 2493 /* Too many faults in this VM */
2494 spin_unlock(&adev->vm_manager.pasid_lock);
2489 return false; 2495 return false;
2496 }
2490 2497
2491 vm->fault_credit--; 2498 vm->fault_credit--;
2499 spin_unlock(&adev->vm_manager.pasid_lock);
2492 return true; 2500 return true;
2493} 2501}
2494 2502
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index fc270e2ef91a..c06479615e8a 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -1068,8 +1068,8 @@ static int gfx_v9_0_ngg_init(struct amdgpu_device *adev)
1068 adev->gfx.ngg.gds_reserve_size = ALIGN(5 * 4, 0x40); 1068 adev->gfx.ngg.gds_reserve_size = ALIGN(5 * 4, 0x40);
1069 adev->gds.mem.total_size -= adev->gfx.ngg.gds_reserve_size; 1069 adev->gds.mem.total_size -= adev->gfx.ngg.gds_reserve_size;
1070 adev->gds.mem.gfx_partition_size -= adev->gfx.ngg.gds_reserve_size; 1070 adev->gds.mem.gfx_partition_size -= adev->gfx.ngg.gds_reserve_size;
1071 adev->gfx.ngg.gds_reserve_addr = SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_BASE); 1071 adev->gfx.ngg.gds_reserve_addr = RREG32_SOC15(GC, 0, mmGDS_VMID0_BASE);
1072 adev->gfx.ngg.gds_reserve_addr += adev->gds.mem.gfx_partition_size; 1072 adev->gfx.ngg.gds_reserve_addr += RREG32_SOC15(GC, 0, mmGDS_VMID0_SIZE);
1073 1073
1074 /* Primitive Buffer */ 1074 /* Primitive Buffer */
1075 r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_PRIM], 1075 r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_PRIM],
@@ -1181,13 +1181,14 @@ static int gfx_v9_0_ngg_en(struct amdgpu_device *adev)
1181 1181
1182 amdgpu_ring_write(ring, PACKET3(PACKET3_DMA_DATA, 5)); 1182 amdgpu_ring_write(ring, PACKET3(PACKET3_DMA_DATA, 5));
1183 amdgpu_ring_write(ring, (PACKET3_DMA_DATA_CP_SYNC | 1183 amdgpu_ring_write(ring, (PACKET3_DMA_DATA_CP_SYNC |
1184 PACKET3_DMA_DATA_DST_SEL(1) |
1184 PACKET3_DMA_DATA_SRC_SEL(2))); 1185 PACKET3_DMA_DATA_SRC_SEL(2)));
1185 amdgpu_ring_write(ring, 0); 1186 amdgpu_ring_write(ring, 0);
1186 amdgpu_ring_write(ring, 0); 1187 amdgpu_ring_write(ring, 0);
1187 amdgpu_ring_write(ring, adev->gfx.ngg.gds_reserve_addr); 1188 amdgpu_ring_write(ring, adev->gfx.ngg.gds_reserve_addr);
1188 amdgpu_ring_write(ring, 0); 1189 amdgpu_ring_write(ring, 0);
1189 amdgpu_ring_write(ring, adev->gfx.ngg.gds_reserve_size); 1190 amdgpu_ring_write(ring, PACKET3_DMA_DATA_CMD_RAW_WAIT |
1190 1191 adev->gfx.ngg.gds_reserve_size);
1191 1192
1192 gfx_v9_0_write_data_to_reg(ring, 0, false, 1193 gfx_v9_0_write_data_to_reg(ring, 0, false,
1193 SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE), 0); 1194 SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE), 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index eb8b1bb66389..2719937e09d6 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -634,14 +634,16 @@ static int gmc_v9_0_late_init(void *handle)
634 for(i = 0; i < AMDGPU_MAX_VMHUBS; ++i) 634 for(i = 0; i < AMDGPU_MAX_VMHUBS; ++i)
635 BUG_ON(vm_inv_eng[i] > 16); 635 BUG_ON(vm_inv_eng[i] > 16);
636 636
637 r = gmc_v9_0_ecc_available(adev); 637 if (adev->asic_type == CHIP_VEGA10) {
638 if (r == 1) { 638 r = gmc_v9_0_ecc_available(adev);
639 DRM_INFO("ECC is active.\n"); 639 if (r == 1) {
640 } else if (r == 0) { 640 DRM_INFO("ECC is active.\n");
641 DRM_INFO("ECC is not present.\n"); 641 } else if (r == 0) {
642 } else { 642 DRM_INFO("ECC is not present.\n");
643 DRM_ERROR("gmc_v9_0_ecc_available() failed. r: %d\n", r); 643 } else {
644 return r; 644 DRM_ERROR("gmc_v9_0_ecc_available() failed. r: %d\n", r);
645 return r;
646 }
645 } 647 }
646 648
647 return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0); 649 return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 8f2cff7b7e0c..a04a033f57de 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -666,8 +666,8 @@ static int soc15_common_early_init(void *handle)
666 AMD_CG_SUPPORT_MC_LS | 666 AMD_CG_SUPPORT_MC_LS |
667 AMD_CG_SUPPORT_SDMA_MGCG | 667 AMD_CG_SUPPORT_SDMA_MGCG |
668 AMD_CG_SUPPORT_SDMA_LS; 668 AMD_CG_SUPPORT_SDMA_LS;
669 adev->pg_flags = AMD_PG_SUPPORT_SDMA | 669 adev->pg_flags = AMD_PG_SUPPORT_SDMA;
670 AMD_PG_SUPPORT_MMHUB; 670
671 adev->external_rev_id = 0x1; 671 adev->external_rev_id = 0x1;
672 break; 672 break;
673 default: 673 default:
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index 59271055a30e..b2bfedaf57f1 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -37,6 +37,9 @@
37#include "gmc/gmc_8_1_d.h" 37#include "gmc/gmc_8_1_d.h"
38#include "vi.h" 38#include "vi.h"
39 39
40/* Polaris10/11/12 firmware version */
41#define FW_1_130_16 ((1 << 24) | (130 << 16) | (16 << 8))
42
40static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev); 43static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev);
41static void uvd_v6_0_set_enc_ring_funcs(struct amdgpu_device *adev); 44static void uvd_v6_0_set_enc_ring_funcs(struct amdgpu_device *adev);
42 45
@@ -58,7 +61,9 @@ static void uvd_v6_0_enable_mgcg(struct amdgpu_device *adev,
58*/ 61*/
59static inline bool uvd_v6_0_enc_support(struct amdgpu_device *adev) 62static inline bool uvd_v6_0_enc_support(struct amdgpu_device *adev)
60{ 63{
61 return ((adev->asic_type >= CHIP_POLARIS10) && (adev->asic_type <= CHIP_POLARIS12)); 64 return ((adev->asic_type >= CHIP_POLARIS10) &&
65 (adev->asic_type <= CHIP_POLARIS12) &&
66 (!adev->uvd.fw_version || adev->uvd.fw_version >= FW_1_130_16));
62} 67}
63 68
64/** 69/**
@@ -411,7 +416,15 @@ static int uvd_v6_0_sw_init(void *handle)
411 if (r) 416 if (r)
412 return r; 417 return r;
413 418
414 if (uvd_v6_0_enc_support(adev)) { 419 if (!uvd_v6_0_enc_support(adev)) {
420 for (i = 0; i < adev->uvd.num_enc_rings; ++i)
421 adev->uvd.ring_enc[i].funcs = NULL;
422
423 adev->uvd.irq.num_types = 1;
424 adev->uvd.num_enc_rings = 0;
425
426 DRM_INFO("UVD ENC is disabled\n");
427 } else {
415 struct drm_sched_rq *rq; 428 struct drm_sched_rq *rq;
416 ring = &adev->uvd.ring_enc[0]; 429 ring = &adev->uvd.ring_enc[0];
417 rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; 430 rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table.c b/drivers/gpu/drm/amd/display/dc/bios/command_table.c
index 1aefed8cf98b..4b5fdd577848 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table.c
@@ -387,7 +387,7 @@ static void init_transmitter_control(struct bios_parser *bp)
387 bp->cmd_tbl.transmitter_control = transmitter_control_v1_6; 387 bp->cmd_tbl.transmitter_control = transmitter_control_v1_6;
388 break; 388 break;
389 default: 389 default:
390 dm_error("Don't have transmitter_control for v%d\n", crev); 390 dm_output_to_console("Don't have transmitter_control for v%d\n", crev);
391 bp->cmd_tbl.transmitter_control = NULL; 391 bp->cmd_tbl.transmitter_control = NULL;
392 break; 392 break;
393 } 393 }
@@ -911,7 +911,7 @@ static void init_set_pixel_clock(struct bios_parser *bp)
911 bp->cmd_tbl.set_pixel_clock = set_pixel_clock_v7; 911 bp->cmd_tbl.set_pixel_clock = set_pixel_clock_v7;
912 break; 912 break;
913 default: 913 default:
914 dm_error("Don't have set_pixel_clock for v%d\n", 914 dm_output_to_console("Don't have set_pixel_clock for v%d\n",
915 BIOS_CMD_TABLE_PARA_REVISION(SetPixelClock)); 915 BIOS_CMD_TABLE_PARA_REVISION(SetPixelClock));
916 bp->cmd_tbl.set_pixel_clock = NULL; 916 bp->cmd_tbl.set_pixel_clock = NULL;
917 break; 917 break;
@@ -1230,7 +1230,7 @@ static void init_enable_spread_spectrum_on_ppll(struct bios_parser *bp)
1230 enable_spread_spectrum_on_ppll_v3; 1230 enable_spread_spectrum_on_ppll_v3;
1231 break; 1231 break;
1232 default: 1232 default:
1233 dm_error("Don't have enable_spread_spectrum_on_ppll for v%d\n", 1233 dm_output_to_console("Don't have enable_spread_spectrum_on_ppll for v%d\n",
1234 BIOS_CMD_TABLE_PARA_REVISION(EnableSpreadSpectrumOnPPLL)); 1234 BIOS_CMD_TABLE_PARA_REVISION(EnableSpreadSpectrumOnPPLL));
1235 bp->cmd_tbl.enable_spread_spectrum_on_ppll = NULL; 1235 bp->cmd_tbl.enable_spread_spectrum_on_ppll = NULL;
1236 break; 1236 break;
@@ -1427,7 +1427,7 @@ static void init_adjust_display_pll(struct bios_parser *bp)
1427 bp->cmd_tbl.adjust_display_pll = adjust_display_pll_v3; 1427 bp->cmd_tbl.adjust_display_pll = adjust_display_pll_v3;
1428 break; 1428 break;
1429 default: 1429 default:
1430 dm_error("Don't have adjust_display_pll for v%d\n", 1430 dm_output_to_console("Don't have adjust_display_pll for v%d\n",
1431 BIOS_CMD_TABLE_PARA_REVISION(AdjustDisplayPll)); 1431 BIOS_CMD_TABLE_PARA_REVISION(AdjustDisplayPll));
1432 bp->cmd_tbl.adjust_display_pll = NULL; 1432 bp->cmd_tbl.adjust_display_pll = NULL;
1433 break; 1433 break;
@@ -1702,7 +1702,7 @@ static void init_set_crtc_timing(struct bios_parser *bp)
1702 set_crtc_using_dtd_timing_v3; 1702 set_crtc_using_dtd_timing_v3;
1703 break; 1703 break;
1704 default: 1704 default:
1705 dm_error("Don't have set_crtc_timing for dtd v%d\n", 1705 dm_output_to_console("Don't have set_crtc_timing for dtd v%d\n",
1706 dtd_version); 1706 dtd_version);
1707 bp->cmd_tbl.set_crtc_timing = NULL; 1707 bp->cmd_tbl.set_crtc_timing = NULL;
1708 break; 1708 break;
@@ -1713,7 +1713,7 @@ static void init_set_crtc_timing(struct bios_parser *bp)
1713 bp->cmd_tbl.set_crtc_timing = set_crtc_timing_v1; 1713 bp->cmd_tbl.set_crtc_timing = set_crtc_timing_v1;
1714 break; 1714 break;
1715 default: 1715 default:
1716 dm_error("Don't have set_crtc_timing for v%d\n", 1716 dm_output_to_console("Don't have set_crtc_timing for v%d\n",
1717 BIOS_CMD_TABLE_PARA_REVISION(SetCRTC_Timing)); 1717 BIOS_CMD_TABLE_PARA_REVISION(SetCRTC_Timing));
1718 bp->cmd_tbl.set_crtc_timing = NULL; 1718 bp->cmd_tbl.set_crtc_timing = NULL;
1719 break; 1719 break;
@@ -1901,7 +1901,7 @@ static void init_select_crtc_source(struct bios_parser *bp)
1901 bp->cmd_tbl.select_crtc_source = select_crtc_source_v3; 1901 bp->cmd_tbl.select_crtc_source = select_crtc_source_v3;
1902 break; 1902 break;
1903 default: 1903 default:
1904 dm_error("Don't select_crtc_source enable_crtc for v%d\n", 1904 dm_output_to_console("Don't select_crtc_source enable_crtc for v%d\n",
1905 BIOS_CMD_TABLE_PARA_REVISION(SelectCRTC_Source)); 1905 BIOS_CMD_TABLE_PARA_REVISION(SelectCRTC_Source));
1906 bp->cmd_tbl.select_crtc_source = NULL; 1906 bp->cmd_tbl.select_crtc_source = NULL;
1907 break; 1907 break;
@@ -2010,7 +2010,7 @@ static void init_enable_crtc(struct bios_parser *bp)
2010 bp->cmd_tbl.enable_crtc = enable_crtc_v1; 2010 bp->cmd_tbl.enable_crtc = enable_crtc_v1;
2011 break; 2011 break;
2012 default: 2012 default:
2013 dm_error("Don't have enable_crtc for v%d\n", 2013 dm_output_to_console("Don't have enable_crtc for v%d\n",
2014 BIOS_CMD_TABLE_PARA_REVISION(EnableCRTC)); 2014 BIOS_CMD_TABLE_PARA_REVISION(EnableCRTC));
2015 bp->cmd_tbl.enable_crtc = NULL; 2015 bp->cmd_tbl.enable_crtc = NULL;
2016 break; 2016 break;
@@ -2118,7 +2118,7 @@ static void init_program_clock(struct bios_parser *bp)
2118 bp->cmd_tbl.program_clock = program_clock_v6; 2118 bp->cmd_tbl.program_clock = program_clock_v6;
2119 break; 2119 break;
2120 default: 2120 default:
2121 dm_error("Don't have program_clock for v%d\n", 2121 dm_output_to_console("Don't have program_clock for v%d\n",
2122 BIOS_CMD_TABLE_PARA_REVISION(SetPixelClock)); 2122 BIOS_CMD_TABLE_PARA_REVISION(SetPixelClock));
2123 bp->cmd_tbl.program_clock = NULL; 2123 bp->cmd_tbl.program_clock = NULL;
2124 break; 2124 break;
@@ -2341,7 +2341,7 @@ static void init_enable_disp_power_gating(
2341 enable_disp_power_gating_v2_1; 2341 enable_disp_power_gating_v2_1;
2342 break; 2342 break;
2343 default: 2343 default:
2344 dm_error("Don't enable_disp_power_gating enable_crtc for v%d\n", 2344 dm_output_to_console("Don't enable_disp_power_gating enable_crtc for v%d\n",
2345 BIOS_CMD_TABLE_PARA_REVISION(EnableDispPowerGating)); 2345 BIOS_CMD_TABLE_PARA_REVISION(EnableDispPowerGating));
2346 bp->cmd_tbl.enable_disp_power_gating = NULL; 2346 bp->cmd_tbl.enable_disp_power_gating = NULL;
2347 break; 2347 break;
@@ -2390,7 +2390,7 @@ static void init_set_dce_clock(struct bios_parser *bp)
2390 bp->cmd_tbl.set_dce_clock = set_dce_clock_v2_1; 2390 bp->cmd_tbl.set_dce_clock = set_dce_clock_v2_1;
2391 break; 2391 break;
2392 default: 2392 default:
2393 dm_error("Don't have set_dce_clock for v%d\n", 2393 dm_output_to_console("Don't have set_dce_clock for v%d\n",
2394 BIOS_CMD_TABLE_PARA_REVISION(SetDCEClock)); 2394 BIOS_CMD_TABLE_PARA_REVISION(SetDCEClock));
2395 bp->cmd_tbl.set_dce_clock = NULL; 2395 bp->cmd_tbl.set_dce_clock = NULL;
2396 break; 2396 break;
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
index 946db12388d6..fea5e83736fd 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
@@ -118,7 +118,7 @@ static void init_dig_encoder_control(struct bios_parser *bp)
118 bp->cmd_tbl.dig_encoder_control = encoder_control_digx_v1_5; 118 bp->cmd_tbl.dig_encoder_control = encoder_control_digx_v1_5;
119 break; 119 break;
120 default: 120 default:
121 dm_error("Don't have dig_encoder_control for v%d\n", version); 121 dm_output_to_console("Don't have dig_encoder_control for v%d\n", version);
122 bp->cmd_tbl.dig_encoder_control = NULL; 122 bp->cmd_tbl.dig_encoder_control = NULL;
123 break; 123 break;
124 } 124 }
@@ -206,7 +206,7 @@ static void init_transmitter_control(struct bios_parser *bp)
206 bp->cmd_tbl.transmitter_control = transmitter_control_v1_6; 206 bp->cmd_tbl.transmitter_control = transmitter_control_v1_6;
207 break; 207 break;
208 default: 208 default:
209 dm_error("Don't have transmitter_control for v%d\n", crev); 209 dm_output_to_console("Don't have transmitter_control for v%d\n", crev);
210 bp->cmd_tbl.transmitter_control = NULL; 210 bp->cmd_tbl.transmitter_control = NULL;
211 break; 211 break;
212 } 212 }
@@ -270,7 +270,7 @@ static void init_set_pixel_clock(struct bios_parser *bp)
270 bp->cmd_tbl.set_pixel_clock = set_pixel_clock_v7; 270 bp->cmd_tbl.set_pixel_clock = set_pixel_clock_v7;
271 break; 271 break;
272 default: 272 default:
273 dm_error("Don't have set_pixel_clock for v%d\n", 273 dm_output_to_console("Don't have set_pixel_clock for v%d\n",
274 BIOS_CMD_TABLE_PARA_REVISION(setpixelclock)); 274 BIOS_CMD_TABLE_PARA_REVISION(setpixelclock));
275 bp->cmd_tbl.set_pixel_clock = NULL; 275 bp->cmd_tbl.set_pixel_clock = NULL;
276 break; 276 break;
@@ -383,7 +383,7 @@ static void init_set_crtc_timing(struct bios_parser *bp)
383 set_crtc_using_dtd_timing_v3; 383 set_crtc_using_dtd_timing_v3;
384 break; 384 break;
385 default: 385 default:
386 dm_error("Don't have set_crtc_timing for v%d\n", dtd_version); 386 dm_output_to_console("Don't have set_crtc_timing for v%d\n", dtd_version);
387 bp->cmd_tbl.set_crtc_timing = NULL; 387 bp->cmd_tbl.set_crtc_timing = NULL;
388 break; 388 break;
389 } 389 }
@@ -503,7 +503,7 @@ static void init_select_crtc_source(struct bios_parser *bp)
503 bp->cmd_tbl.select_crtc_source = select_crtc_source_v3; 503 bp->cmd_tbl.select_crtc_source = select_crtc_source_v3;
504 break; 504 break;
505 default: 505 default:
506 dm_error("Don't select_crtc_source enable_crtc for v%d\n", 506 dm_output_to_console("Don't select_crtc_source enable_crtc for v%d\n",
507 BIOS_CMD_TABLE_PARA_REVISION(selectcrtc_source)); 507 BIOS_CMD_TABLE_PARA_REVISION(selectcrtc_source));
508 bp->cmd_tbl.select_crtc_source = NULL; 508 bp->cmd_tbl.select_crtc_source = NULL;
509 break; 509 break;
@@ -572,7 +572,7 @@ static void init_enable_crtc(struct bios_parser *bp)
572 bp->cmd_tbl.enable_crtc = enable_crtc_v1; 572 bp->cmd_tbl.enable_crtc = enable_crtc_v1;
573 break; 573 break;
574 default: 574 default:
575 dm_error("Don't have enable_crtc for v%d\n", 575 dm_output_to_console("Don't have enable_crtc for v%d\n",
576 BIOS_CMD_TABLE_PARA_REVISION(enablecrtc)); 576 BIOS_CMD_TABLE_PARA_REVISION(enablecrtc));
577 bp->cmd_tbl.enable_crtc = NULL; 577 bp->cmd_tbl.enable_crtc = NULL;
578 break; 578 break;
@@ -670,7 +670,7 @@ static void init_enable_disp_power_gating(
670 enable_disp_power_gating_v2_1; 670 enable_disp_power_gating_v2_1;
671 break; 671 break;
672 default: 672 default:
673 dm_error("Don't enable_disp_power_gating enable_crtc for v%d\n", 673 dm_output_to_console("Don't enable_disp_power_gating enable_crtc for v%d\n",
674 BIOS_CMD_TABLE_PARA_REVISION(enabledisppowergating)); 674 BIOS_CMD_TABLE_PARA_REVISION(enabledisppowergating));
675 bp->cmd_tbl.enable_disp_power_gating = NULL; 675 bp->cmd_tbl.enable_disp_power_gating = NULL;
676 break; 676 break;
@@ -721,7 +721,7 @@ static void init_set_dce_clock(struct bios_parser *bp)
721 bp->cmd_tbl.set_dce_clock = set_dce_clock_v2_1; 721 bp->cmd_tbl.set_dce_clock = set_dce_clock_v2_1;
722 break; 722 break;
723 default: 723 default:
724 dm_error("Don't have set_dce_clock for v%d\n", 724 dm_output_to_console("Don't have set_dce_clock for v%d\n",
725 BIOS_CMD_TABLE_PARA_REVISION(setdceclock)); 725 BIOS_CMD_TABLE_PARA_REVISION(setdceclock));
726 bp->cmd_tbl.set_dce_clock = NULL; 726 bp->cmd_tbl.set_dce_clock = NULL;
727 break; 727 break;
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.h
index 58888400f1b8..caebdbebdcd8 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.h
@@ -40,7 +40,7 @@ struct smu_table_entry {
40 uint32_t table_addr_high; 40 uint32_t table_addr_high;
41 uint32_t table_addr_low; 41 uint32_t table_addr_low;
42 uint8_t *table; 42 uint8_t *table;
43 uint32_t handle; 43 unsigned long handle;
44}; 44};
45 45
46struct smu_table_array { 46struct smu_table_array {
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 24fe66c89dfb..5712d63dca20 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -3513,6 +3513,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
3513 tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.evergreen.max_backends, 3513 tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.evergreen.max_backends,
3514 EVERGREEN_MAX_BACKENDS, disabled_rb_mask); 3514 EVERGREEN_MAX_BACKENDS, disabled_rb_mask);
3515 } 3515 }
3516 rdev->config.evergreen.backend_map = tmp;
3516 WREG32(GB_BACKEND_MAP, tmp); 3517 WREG32(GB_BACKEND_MAP, tmp);
3517 3518
3518 WREG32(CGTS_SYS_TCC_DISABLE, 0); 3519 WREG32(CGTS_SYS_TCC_DISABLE, 0);
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 9eccd0c81d88..381b0255ff02 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -1148,6 +1148,7 @@ static void cayman_gpu_init(struct radeon_device *rdev)
1148 rdev->config.cayman.max_shader_engines, 1148 rdev->config.cayman.max_shader_engines,
1149 CAYMAN_MAX_BACKENDS, disabled_rb_mask); 1149 CAYMAN_MAX_BACKENDS, disabled_rb_mask);
1150 } 1150 }
1151 rdev->config.cayman.backend_map = tmp;
1151 WREG32(GB_BACKEND_MAP, tmp); 1152 WREG32(GB_BACKEND_MAP, tmp);
1152 1153
1153 cgts_tcc_disable = 0xffff0000; 1154 cgts_tcc_disable = 0xffff0000;
diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c
index 183b4b482138..238e6eb842ea 100644
--- a/drivers/gpu/drm/radeon/radeon_dp_mst.c
+++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c
@@ -328,7 +328,7 @@ static void radeon_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr)
328 drm_kms_helper_hotplug_event(dev); 328 drm_kms_helper_hotplug_event(dev);
329} 329}
330 330
331const struct drm_dp_mst_topology_cbs mst_cbs = { 331static const struct drm_dp_mst_topology_cbs mst_cbs = {
332 .add_connector = radeon_dp_add_mst_connector, 332 .add_connector = radeon_dp_add_mst_connector,
333 .register_connector = radeon_dp_register_mst_connector, 333 .register_connector = radeon_dp_register_mst_connector,
334 .destroy_connector = radeon_dp_destroy_mst_connector, 334 .destroy_connector = radeon_dp_destroy_mst_connector,
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 2eb71ffe95a6..893003fc76a1 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -170,7 +170,8 @@ void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
170 list_add_tail(&bo->lru, &man->lru[bo->priority]); 170 list_add_tail(&bo->lru, &man->lru[bo->priority]);
171 kref_get(&bo->list_kref); 171 kref_get(&bo->list_kref);
172 172
173 if (bo->ttm && !(bo->ttm->page_flags & TTM_PAGE_FLAG_SG)) { 173 if (bo->ttm && !(bo->ttm->page_flags &
174 (TTM_PAGE_FLAG_SG | TTM_PAGE_FLAG_SWAPPED))) {
174 list_add_tail(&bo->swap, 175 list_add_tail(&bo->swap,
175 &bo->glob->swap_lru[bo->priority]); 176 &bo->glob->swap_lru[bo->priority]);
176 kref_get(&bo->list_kref); 177 kref_get(&bo->list_kref);
@@ -1779,8 +1780,8 @@ out:
1779 * Unreserve without putting on LRU to avoid swapping out an 1780 * Unreserve without putting on LRU to avoid swapping out an
1780 * already swapped buffer. 1781 * already swapped buffer.
1781 */ 1782 */
1782 1783 if (locked)
1783 reservation_object_unlock(bo->resv); 1784 reservation_object_unlock(bo->resv);
1784 kref_put(&bo->list_kref, ttm_bo_release_list); 1785 kref_put(&bo->list_kref, ttm_bo_release_list);
1785 return ret; 1786 return ret;
1786} 1787}
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index 79854ab3bc47..2b12c55a3bff 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -477,12 +477,12 @@ ttm_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
477 return count; 477 return count;
478} 478}
479 479
480static void ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager) 480static int ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager)
481{ 481{
482 manager->mm_shrink.count_objects = ttm_pool_shrink_count; 482 manager->mm_shrink.count_objects = ttm_pool_shrink_count;
483 manager->mm_shrink.scan_objects = ttm_pool_shrink_scan; 483 manager->mm_shrink.scan_objects = ttm_pool_shrink_scan;
484 manager->mm_shrink.seeks = 1; 484 manager->mm_shrink.seeks = 1;
485 register_shrinker(&manager->mm_shrink); 485 return register_shrinker(&manager->mm_shrink);
486} 486}
487 487
488static void ttm_pool_mm_shrink_fini(struct ttm_pool_manager *manager) 488static void ttm_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
@@ -1034,15 +1034,18 @@ int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
1034 1034
1035 ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type, 1035 ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type,
1036 &glob->kobj, "pool"); 1036 &glob->kobj, "pool");
1037 if (unlikely(ret != 0)) { 1037 if (unlikely(ret != 0))
1038 kobject_put(&_manager->kobj); 1038 goto error;
1039 _manager = NULL;
1040 return ret;
1041 }
1042
1043 ttm_pool_mm_shrink_init(_manager);
1044 1039
1040 ret = ttm_pool_mm_shrink_init(_manager);
1041 if (unlikely(ret != 0))
1042 goto error;
1045 return 0; 1043 return 0;
1044
1045error:
1046 kobject_put(&_manager->kobj);
1047 _manager = NULL;
1048 return ret;
1046} 1049}
1047 1050
1048void ttm_page_alloc_fini(void) 1051void ttm_page_alloc_fini(void)
@@ -1072,7 +1075,8 @@ int ttm_pool_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
1072 ret = ttm_get_pages(ttm->pages, ttm->num_pages, ttm->page_flags, 1075 ret = ttm_get_pages(ttm->pages, ttm->num_pages, ttm->page_flags,
1073 ttm->caching_state); 1076 ttm->caching_state);
1074 if (unlikely(ret != 0)) { 1077 if (unlikely(ret != 0)) {
1075 ttm_pool_unpopulate(ttm); 1078 ttm_put_pages(ttm->pages, ttm->num_pages, ttm->page_flags,
1079 ttm->caching_state);
1076 return ret; 1080 return ret;
1077 } 1081 }
1078 1082
@@ -1080,7 +1084,8 @@ int ttm_pool_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
1080 ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i], 1084 ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
1081 PAGE_SIZE, ctx); 1085 PAGE_SIZE, ctx);
1082 if (unlikely(ret != 0)) { 1086 if (unlikely(ret != 0)) {
1083 ttm_pool_unpopulate(ttm); 1087 ttm_put_pages(ttm->pages, ttm->num_pages,
1088 ttm->page_flags, ttm->caching_state);
1084 return -ENOMEM; 1089 return -ENOMEM;
1085 } 1090 }
1086 } 1091 }
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
index 4c659405a008..a88051552ace 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
@@ -61,6 +61,7 @@
61#define SMALL_ALLOCATION 4 61#define SMALL_ALLOCATION 4
62#define FREE_ALL_PAGES (~0U) 62#define FREE_ALL_PAGES (~0U)
63#define VADDR_FLAG_HUGE_POOL 1UL 63#define VADDR_FLAG_HUGE_POOL 1UL
64#define VADDR_FLAG_UPDATED_COUNT 2UL
64 65
65enum pool_type { 66enum pool_type {
66 IS_UNDEFINED = 0, 67 IS_UNDEFINED = 0,
@@ -874,18 +875,18 @@ static int ttm_dma_page_pool_fill_locked(struct dma_pool *pool,
874} 875}
875 876
876/* 877/*
877 * @return count of pages still required to fulfill the request.
878 * The populate list is actually a stack (not that is matters as TTM 878 * The populate list is actually a stack (not that is matters as TTM
879 * allocates one page at a time. 879 * allocates one page at a time.
880 * return dma_page pointer if success, otherwise NULL.
880 */ 881 */
881static int ttm_dma_pool_get_pages(struct dma_pool *pool, 882static struct dma_page *ttm_dma_pool_get_pages(struct dma_pool *pool,
882 struct ttm_dma_tt *ttm_dma, 883 struct ttm_dma_tt *ttm_dma,
883 unsigned index) 884 unsigned index)
884{ 885{
885 struct dma_page *d_page; 886 struct dma_page *d_page = NULL;
886 struct ttm_tt *ttm = &ttm_dma->ttm; 887 struct ttm_tt *ttm = &ttm_dma->ttm;
887 unsigned long irq_flags; 888 unsigned long irq_flags;
888 int count, r = -ENOMEM; 889 int count;
889 890
890 spin_lock_irqsave(&pool->lock, irq_flags); 891 spin_lock_irqsave(&pool->lock, irq_flags);
891 count = ttm_dma_page_pool_fill_locked(pool, &irq_flags); 892 count = ttm_dma_page_pool_fill_locked(pool, &irq_flags);
@@ -894,12 +895,11 @@ static int ttm_dma_pool_get_pages(struct dma_pool *pool,
894 ttm->pages[index] = d_page->p; 895 ttm->pages[index] = d_page->p;
895 ttm_dma->dma_address[index] = d_page->dma; 896 ttm_dma->dma_address[index] = d_page->dma;
896 list_move_tail(&d_page->page_list, &ttm_dma->pages_list); 897 list_move_tail(&d_page->page_list, &ttm_dma->pages_list);
897 r = 0;
898 pool->npages_in_use += 1; 898 pool->npages_in_use += 1;
899 pool->npages_free -= 1; 899 pool->npages_free -= 1;
900 } 900 }
901 spin_unlock_irqrestore(&pool->lock, irq_flags); 901 spin_unlock_irqrestore(&pool->lock, irq_flags);
902 return r; 902 return d_page;
903} 903}
904 904
905static gfp_t ttm_dma_pool_gfp_flags(struct ttm_dma_tt *ttm_dma, bool huge) 905static gfp_t ttm_dma_pool_gfp_flags(struct ttm_dma_tt *ttm_dma, bool huge)
@@ -934,6 +934,7 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev,
934 struct ttm_mem_global *mem_glob = ttm->glob->mem_glob; 934 struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
935 unsigned long num_pages = ttm->num_pages; 935 unsigned long num_pages = ttm->num_pages;
936 struct dma_pool *pool; 936 struct dma_pool *pool;
937 struct dma_page *d_page;
937 enum pool_type type; 938 enum pool_type type;
938 unsigned i; 939 unsigned i;
939 int ret; 940 int ret;
@@ -962,8 +963,8 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev,
962 while (num_pages >= HPAGE_PMD_NR) { 963 while (num_pages >= HPAGE_PMD_NR) {
963 unsigned j; 964 unsigned j;
964 965
965 ret = ttm_dma_pool_get_pages(pool, ttm_dma, i); 966 d_page = ttm_dma_pool_get_pages(pool, ttm_dma, i);
966 if (ret != 0) 967 if (!d_page)
967 break; 968 break;
968 969
969 ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i], 970 ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
@@ -973,6 +974,7 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev,
973 return -ENOMEM; 974 return -ENOMEM;
974 } 975 }
975 976
977 d_page->vaddr |= VADDR_FLAG_UPDATED_COUNT;
976 for (j = i + 1; j < (i + HPAGE_PMD_NR); ++j) { 978 for (j = i + 1; j < (i + HPAGE_PMD_NR); ++j) {
977 ttm->pages[j] = ttm->pages[j - 1] + 1; 979 ttm->pages[j] = ttm->pages[j - 1] + 1;
978 ttm_dma->dma_address[j] = ttm_dma->dma_address[j - 1] + 980 ttm_dma->dma_address[j] = ttm_dma->dma_address[j - 1] +
@@ -996,8 +998,8 @@ skip_huge:
996 } 998 }
997 999
998 while (num_pages) { 1000 while (num_pages) {
999 ret = ttm_dma_pool_get_pages(pool, ttm_dma, i); 1001 d_page = ttm_dma_pool_get_pages(pool, ttm_dma, i);
1000 if (ret != 0) { 1002 if (!d_page) {
1001 ttm_dma_unpopulate(ttm_dma, dev); 1003 ttm_dma_unpopulate(ttm_dma, dev);
1002 return -ENOMEM; 1004 return -ENOMEM;
1003 } 1005 }
@@ -1009,6 +1011,7 @@ skip_huge:
1009 return -ENOMEM; 1011 return -ENOMEM;
1010 } 1012 }
1011 1013
1014 d_page->vaddr |= VADDR_FLAG_UPDATED_COUNT;
1012 ++i; 1015 ++i;
1013 --num_pages; 1016 --num_pages;
1014 } 1017 }
@@ -1049,8 +1052,11 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
1049 continue; 1052 continue;
1050 1053
1051 count++; 1054 count++;
1052 ttm_mem_global_free_page(ttm->glob->mem_glob, 1055 if (d_page->vaddr & VADDR_FLAG_UPDATED_COUNT) {
1053 d_page->p, pool->size); 1056 ttm_mem_global_free_page(ttm->glob->mem_glob,
1057 d_page->p, pool->size);
1058 d_page->vaddr &= ~VADDR_FLAG_UPDATED_COUNT;
1059 }
1054 ttm_dma_page_put(pool, d_page); 1060 ttm_dma_page_put(pool, d_page);
1055 } 1061 }
1056 1062
@@ -1070,9 +1076,19 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
1070 1076
1071 /* make sure pages array match list and count number of pages */ 1077 /* make sure pages array match list and count number of pages */
1072 count = 0; 1078 count = 0;
1073 list_for_each_entry(d_page, &ttm_dma->pages_list, page_list) { 1079 list_for_each_entry_safe(d_page, next, &ttm_dma->pages_list,
1080 page_list) {
1074 ttm->pages[count] = d_page->p; 1081 ttm->pages[count] = d_page->p;
1075 count++; 1082 count++;
1083
1084 if (d_page->vaddr & VADDR_FLAG_UPDATED_COUNT) {
1085 ttm_mem_global_free_page(ttm->glob->mem_glob,
1086 d_page->p, pool->size);
1087 d_page->vaddr &= ~VADDR_FLAG_UPDATED_COUNT;
1088 }
1089
1090 if (is_cached)
1091 ttm_dma_page_put(pool, d_page);
1076 } 1092 }
1077 1093
1078 spin_lock_irqsave(&pool->lock, irq_flags); 1094 spin_lock_irqsave(&pool->lock, irq_flags);
@@ -1092,19 +1108,6 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
1092 } 1108 }
1093 spin_unlock_irqrestore(&pool->lock, irq_flags); 1109 spin_unlock_irqrestore(&pool->lock, irq_flags);
1094 1110
1095 if (is_cached) {
1096 list_for_each_entry_safe(d_page, next, &ttm_dma->pages_list, page_list) {
1097 ttm_mem_global_free_page(ttm->glob->mem_glob,
1098 d_page->p, pool->size);
1099 ttm_dma_page_put(pool, d_page);
1100 }
1101 } else {
1102 for (i = 0; i < count; i++) {
1103 ttm_mem_global_free_page(ttm->glob->mem_glob,
1104 ttm->pages[i], pool->size);
1105 }
1106 }
1107
1108 INIT_LIST_HEAD(&ttm_dma->pages_list); 1111 INIT_LIST_HEAD(&ttm_dma->pages_list);
1109 for (i = 0; i < ttm->num_pages; i++) { 1112 for (i = 0; i < ttm->num_pages; i++) {
1110 ttm->pages[i] = NULL; 1113 ttm->pages[i] = NULL;
@@ -1182,12 +1185,12 @@ ttm_dma_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
1182 return count; 1185 return count;
1183} 1186}
1184 1187
1185static void ttm_dma_pool_mm_shrink_init(struct ttm_pool_manager *manager) 1188static int ttm_dma_pool_mm_shrink_init(struct ttm_pool_manager *manager)
1186{ 1189{
1187 manager->mm_shrink.count_objects = ttm_dma_pool_shrink_count; 1190 manager->mm_shrink.count_objects = ttm_dma_pool_shrink_count;
1188 manager->mm_shrink.scan_objects = &ttm_dma_pool_shrink_scan; 1191 manager->mm_shrink.scan_objects = &ttm_dma_pool_shrink_scan;
1189 manager->mm_shrink.seeks = 1; 1192 manager->mm_shrink.seeks = 1;
1190 register_shrinker(&manager->mm_shrink); 1193 return register_shrinker(&manager->mm_shrink);
1191} 1194}
1192 1195
1193static void ttm_dma_pool_mm_shrink_fini(struct ttm_pool_manager *manager) 1196static void ttm_dma_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
@@ -1197,7 +1200,7 @@ static void ttm_dma_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
1197 1200
1198int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages) 1201int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
1199{ 1202{
1200 int ret = -ENOMEM; 1203 int ret;
1201 1204
1202 WARN_ON(_manager); 1205 WARN_ON(_manager);
1203 1206
@@ -1205,7 +1208,7 @@ int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
1205 1208
1206 _manager = kzalloc(sizeof(*_manager), GFP_KERNEL); 1209 _manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
1207 if (!_manager) 1210 if (!_manager)
1208 goto err; 1211 return -ENOMEM;
1209 1212
1210 mutex_init(&_manager->lock); 1213 mutex_init(&_manager->lock);
1211 INIT_LIST_HEAD(&_manager->pools); 1214 INIT_LIST_HEAD(&_manager->pools);
@@ -1217,13 +1220,17 @@ int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
1217 /* This takes care of auto-freeing the _manager */ 1220 /* This takes care of auto-freeing the _manager */
1218 ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type, 1221 ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type,
1219 &glob->kobj, "dma_pool"); 1222 &glob->kobj, "dma_pool");
1220 if (unlikely(ret != 0)) { 1223 if (unlikely(ret != 0))
1221 kobject_put(&_manager->kobj); 1224 goto error;
1222 goto err; 1225
1223 } 1226 ret = ttm_dma_pool_mm_shrink_init(_manager);
1224 ttm_dma_pool_mm_shrink_init(_manager); 1227 if (unlikely(ret != 0))
1228 goto error;
1225 return 0; 1229 return 0;
1226err: 1230
1231error:
1232 kobject_put(&_manager->kobj);
1233 _manager = NULL;
1227 return ret; 1234 return ret;
1228} 1235}
1229 1236