diff options
-rw-r--r-- | drivers/gpu/drm/i915/i915_drv.c | 96 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_drv.h | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_gtt.c | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_sysfs.c | 65 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_audio.c | 46 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_runtime_pm.c | 37 |
6 files changed, 128 insertions, 126 deletions
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 13ae340ef1f3..53f9535fb81e 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
@@ -77,7 +77,7 @@ __i915_printk(struct drm_i915_private *dev_priv, const char *level, | |||
77 | const char *fmt, ...) | 77 | const char *fmt, ...) |
78 | { | 78 | { |
79 | static bool shown_bug_once; | 79 | static bool shown_bug_once; |
80 | struct device *dev = dev_priv->drm.dev; | 80 | struct device *kdev = dev_priv->drm.dev; |
81 | bool is_error = level[1] <= KERN_ERR[1]; | 81 | bool is_error = level[1] <= KERN_ERR[1]; |
82 | bool is_debug = level[1] == KERN_DEBUG[1]; | 82 | bool is_debug = level[1] == KERN_DEBUG[1]; |
83 | struct va_format vaf; | 83 | struct va_format vaf; |
@@ -91,11 +91,11 @@ __i915_printk(struct drm_i915_private *dev_priv, const char *level, | |||
91 | vaf.fmt = fmt; | 91 | vaf.fmt = fmt; |
92 | vaf.va = &args; | 92 | vaf.va = &args; |
93 | 93 | ||
94 | dev_printk(level, dev, "[" DRM_NAME ":%ps] %pV", | 94 | dev_printk(level, kdev, "[" DRM_NAME ":%ps] %pV", |
95 | __builtin_return_address(0), &vaf); | 95 | __builtin_return_address(0), &vaf); |
96 | 96 | ||
97 | if (is_error && !shown_bug_once) { | 97 | if (is_error && !shown_bug_once) { |
98 | dev_notice(dev, "%s", FDO_BUG_MSG); | 98 | dev_notice(kdev, "%s", FDO_BUG_MSG); |
99 | shown_bug_once = true; | 99 | shown_bug_once = true; |
100 | } | 100 | } |
101 | 101 | ||
@@ -1462,9 +1462,9 @@ out: | |||
1462 | return error; | 1462 | return error; |
1463 | } | 1463 | } |
1464 | 1464 | ||
1465 | static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation) | 1465 | static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation) |
1466 | { | 1466 | { |
1467 | struct drm_i915_private *dev_priv = to_i915(drm_dev); | 1467 | struct drm_i915_private *dev_priv = to_i915(dev); |
1468 | bool fw_csr; | 1468 | bool fw_csr; |
1469 | int ret; | 1469 | int ret; |
1470 | 1470 | ||
@@ -1498,7 +1498,7 @@ static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation) | |||
1498 | goto out; | 1498 | goto out; |
1499 | } | 1499 | } |
1500 | 1500 | ||
1501 | pci_disable_device(drm_dev->pdev); | 1501 | pci_disable_device(dev->pdev); |
1502 | /* | 1502 | /* |
1503 | * During hibernation on some platforms the BIOS may try to access | 1503 | * During hibernation on some platforms the BIOS may try to access |
1504 | * the device even though it's already in D3 and hang the machine. So | 1504 | * the device even though it's already in D3 and hang the machine. So |
@@ -1512,7 +1512,7 @@ static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation) | |||
1512 | * Acer Aspire 1830T | 1512 | * Acer Aspire 1830T |
1513 | */ | 1513 | */ |
1514 | if (!(hibernation && INTEL_INFO(dev_priv)->gen < 6)) | 1514 | if (!(hibernation && INTEL_INFO(dev_priv)->gen < 6)) |
1515 | pci_set_power_state(drm_dev->pdev, PCI_D3hot); | 1515 | pci_set_power_state(dev->pdev, PCI_D3hot); |
1516 | 1516 | ||
1517 | dev_priv->suspended_to_idle = suspend_to_idle(dev_priv); | 1517 | dev_priv->suspended_to_idle = suspend_to_idle(dev_priv); |
1518 | 1518 | ||
@@ -1810,25 +1810,25 @@ error: | |||
1810 | return ret; | 1810 | return ret; |
1811 | } | 1811 | } |
1812 | 1812 | ||
1813 | static int i915_pm_suspend(struct device *dev) | 1813 | static int i915_pm_suspend(struct device *kdev) |
1814 | { | 1814 | { |
1815 | struct pci_dev *pdev = to_pci_dev(dev); | 1815 | struct pci_dev *pdev = to_pci_dev(kdev); |
1816 | struct drm_device *drm_dev = pci_get_drvdata(pdev); | 1816 | struct drm_device *dev = pci_get_drvdata(pdev); |
1817 | 1817 | ||
1818 | if (!drm_dev) { | 1818 | if (!dev) { |
1819 | dev_err(dev, "DRM not initialized, aborting suspend.\n"); | 1819 | dev_err(kdev, "DRM not initialized, aborting suspend.\n"); |
1820 | return -ENODEV; | 1820 | return -ENODEV; |
1821 | } | 1821 | } |
1822 | 1822 | ||
1823 | if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) | 1823 | if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) |
1824 | return 0; | 1824 | return 0; |
1825 | 1825 | ||
1826 | return i915_drm_suspend(drm_dev); | 1826 | return i915_drm_suspend(dev); |
1827 | } | 1827 | } |
1828 | 1828 | ||
1829 | static int i915_pm_suspend_late(struct device *dev) | 1829 | static int i915_pm_suspend_late(struct device *kdev) |
1830 | { | 1830 | { |
1831 | struct drm_device *drm_dev = &dev_to_i915(dev)->drm; | 1831 | struct drm_device *dev = &kdev_to_i915(kdev)->drm; |
1832 | 1832 | ||
1833 | /* | 1833 | /* |
1834 | * We have a suspend ordering issue with the snd-hda driver also | 1834 | * We have a suspend ordering issue with the snd-hda driver also |
@@ -1839,57 +1839,57 @@ static int i915_pm_suspend_late(struct device *dev) | |||
1839 | * FIXME: This should be solved with a special hdmi sink device or | 1839 | * FIXME: This should be solved with a special hdmi sink device or |
1840 | * similar so that power domains can be employed. | 1840 | * similar so that power domains can be employed. |
1841 | */ | 1841 | */ |
1842 | if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) | 1842 | if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) |
1843 | return 0; | 1843 | return 0; |
1844 | 1844 | ||
1845 | return i915_drm_suspend_late(drm_dev, false); | 1845 | return i915_drm_suspend_late(dev, false); |
1846 | } | 1846 | } |
1847 | 1847 | ||
1848 | static int i915_pm_poweroff_late(struct device *dev) | 1848 | static int i915_pm_poweroff_late(struct device *kdev) |
1849 | { | 1849 | { |
1850 | struct drm_device *drm_dev = &dev_to_i915(dev)->drm; | 1850 | struct drm_device *dev = &kdev_to_i915(kdev)->drm; |
1851 | 1851 | ||
1852 | if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) | 1852 | if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) |
1853 | return 0; | 1853 | return 0; |
1854 | 1854 | ||
1855 | return i915_drm_suspend_late(drm_dev, true); | 1855 | return i915_drm_suspend_late(dev, true); |
1856 | } | 1856 | } |
1857 | 1857 | ||
1858 | static int i915_pm_resume_early(struct device *dev) | 1858 | static int i915_pm_resume_early(struct device *kdev) |
1859 | { | 1859 | { |
1860 | struct drm_device *drm_dev = &dev_to_i915(dev)->drm; | 1860 | struct drm_device *dev = &kdev_to_i915(kdev)->drm; |
1861 | 1861 | ||
1862 | if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) | 1862 | if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) |
1863 | return 0; | 1863 | return 0; |
1864 | 1864 | ||
1865 | return i915_drm_resume_early(drm_dev); | 1865 | return i915_drm_resume_early(dev); |
1866 | } | 1866 | } |
1867 | 1867 | ||
1868 | static int i915_pm_resume(struct device *dev) | 1868 | static int i915_pm_resume(struct device *kdev) |
1869 | { | 1869 | { |
1870 | struct drm_device *drm_dev = &dev_to_i915(dev)->drm; | 1870 | struct drm_device *dev = &kdev_to_i915(kdev)->drm; |
1871 | 1871 | ||
1872 | if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) | 1872 | if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) |
1873 | return 0; | 1873 | return 0; |
1874 | 1874 | ||
1875 | return i915_drm_resume(drm_dev); | 1875 | return i915_drm_resume(dev); |
1876 | } | 1876 | } |
1877 | 1877 | ||
1878 | /* freeze: before creating the hibernation_image */ | 1878 | /* freeze: before creating the hibernation_image */ |
1879 | static int i915_pm_freeze(struct device *dev) | 1879 | static int i915_pm_freeze(struct device *kdev) |
1880 | { | 1880 | { |
1881 | return i915_pm_suspend(dev); | 1881 | return i915_pm_suspend(kdev); |
1882 | } | 1882 | } |
1883 | 1883 | ||
1884 | static int i915_pm_freeze_late(struct device *dev) | 1884 | static int i915_pm_freeze_late(struct device *kdev) |
1885 | { | 1885 | { |
1886 | int ret; | 1886 | int ret; |
1887 | 1887 | ||
1888 | ret = i915_pm_suspend_late(dev); | 1888 | ret = i915_pm_suspend_late(kdev); |
1889 | if (ret) | 1889 | if (ret) |
1890 | return ret; | 1890 | return ret; |
1891 | 1891 | ||
1892 | ret = i915_gem_freeze_late(dev_to_i915(dev)); | 1892 | ret = i915_gem_freeze_late(kdev_to_i915(kdev)); |
1893 | if (ret) | 1893 | if (ret) |
1894 | return ret; | 1894 | return ret; |
1895 | 1895 | ||
@@ -1897,25 +1897,25 @@ static int i915_pm_freeze_late(struct device *dev) | |||
1897 | } | 1897 | } |
1898 | 1898 | ||
1899 | /* thaw: called after creating the hibernation image, but before turning off. */ | 1899 | /* thaw: called after creating the hibernation image, but before turning off. */ |
1900 | static int i915_pm_thaw_early(struct device *dev) | 1900 | static int i915_pm_thaw_early(struct device *kdev) |
1901 | { | 1901 | { |
1902 | return i915_pm_resume_early(dev); | 1902 | return i915_pm_resume_early(kdev); |
1903 | } | 1903 | } |
1904 | 1904 | ||
1905 | static int i915_pm_thaw(struct device *dev) | 1905 | static int i915_pm_thaw(struct device *kdev) |
1906 | { | 1906 | { |
1907 | return i915_pm_resume(dev); | 1907 | return i915_pm_resume(kdev); |
1908 | } | 1908 | } |
1909 | 1909 | ||
1910 | /* restore: called after loading the hibernation image. */ | 1910 | /* restore: called after loading the hibernation image. */ |
1911 | static int i915_pm_restore_early(struct device *dev) | 1911 | static int i915_pm_restore_early(struct device *kdev) |
1912 | { | 1912 | { |
1913 | return i915_pm_resume_early(dev); | 1913 | return i915_pm_resume_early(kdev); |
1914 | } | 1914 | } |
1915 | 1915 | ||
1916 | static int i915_pm_restore(struct device *dev) | 1916 | static int i915_pm_restore(struct device *kdev) |
1917 | { | 1917 | { |
1918 | return i915_pm_resume(dev); | 1918 | return i915_pm_resume(kdev); |
1919 | } | 1919 | } |
1920 | 1920 | ||
1921 | /* | 1921 | /* |
@@ -2277,9 +2277,9 @@ static int vlv_resume_prepare(struct drm_i915_private *dev_priv, | |||
2277 | return ret; | 2277 | return ret; |
2278 | } | 2278 | } |
2279 | 2279 | ||
2280 | static int intel_runtime_suspend(struct device *device) | 2280 | static int intel_runtime_suspend(struct device *kdev) |
2281 | { | 2281 | { |
2282 | struct pci_dev *pdev = to_pci_dev(device); | 2282 | struct pci_dev *pdev = to_pci_dev(kdev); |
2283 | struct drm_device *dev = pci_get_drvdata(pdev); | 2283 | struct drm_device *dev = pci_get_drvdata(pdev); |
2284 | struct drm_i915_private *dev_priv = to_i915(dev); | 2284 | struct drm_i915_private *dev_priv = to_i915(dev); |
2285 | int ret; | 2285 | int ret; |
@@ -2305,7 +2305,7 @@ static int intel_runtime_suspend(struct device *device) | |||
2305 | * Bump the expiration timestamp, otherwise the suspend won't | 2305 | * Bump the expiration timestamp, otherwise the suspend won't |
2306 | * be rescheduled. | 2306 | * be rescheduled. |
2307 | */ | 2307 | */ |
2308 | pm_runtime_mark_last_busy(device); | 2308 | pm_runtime_mark_last_busy(kdev); |
2309 | 2309 | ||
2310 | return -EAGAIN; | 2310 | return -EAGAIN; |
2311 | } | 2311 | } |
@@ -2384,9 +2384,9 @@ static int intel_runtime_suspend(struct device *device) | |||
2384 | return 0; | 2384 | return 0; |
2385 | } | 2385 | } |
2386 | 2386 | ||
2387 | static int intel_runtime_resume(struct device *device) | 2387 | static int intel_runtime_resume(struct device *kdev) |
2388 | { | 2388 | { |
2389 | struct pci_dev *pdev = to_pci_dev(device); | 2389 | struct pci_dev *pdev = to_pci_dev(kdev); |
2390 | struct drm_device *dev = pci_get_drvdata(pdev); | 2390 | struct drm_device *dev = pci_get_drvdata(pdev); |
2391 | struct drm_i915_private *dev_priv = to_i915(dev); | 2391 | struct drm_i915_private *dev_priv = to_i915(dev); |
2392 | int ret = 0; | 2392 | int ret = 0; |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 2089066b5b87..79e67960ffc8 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -2064,9 +2064,9 @@ static inline struct drm_i915_private *to_i915(const struct drm_device *dev) | |||
2064 | return container_of(dev, struct drm_i915_private, drm); | 2064 | return container_of(dev, struct drm_i915_private, drm); |
2065 | } | 2065 | } |
2066 | 2066 | ||
2067 | static inline struct drm_i915_private *dev_to_i915(struct device *dev) | 2067 | static inline struct drm_i915_private *kdev_to_i915(struct device *kdev) |
2068 | { | 2068 | { |
2069 | return to_i915(dev_get_drvdata(dev)); | 2069 | return to_i915(dev_get_drvdata(kdev)); |
2070 | } | 2070 | } |
2071 | 2071 | ||
2072 | static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc) | 2072 | static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc) |
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index b3cd9e21c8da..d31ae3d0f83f 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c | |||
@@ -326,16 +326,16 @@ static gen6_pte_t iris_pte_encode(dma_addr_t addr, | |||
326 | static int __setup_page_dma(struct drm_device *dev, | 326 | static int __setup_page_dma(struct drm_device *dev, |
327 | struct i915_page_dma *p, gfp_t flags) | 327 | struct i915_page_dma *p, gfp_t flags) |
328 | { | 328 | { |
329 | struct device *device = &dev->pdev->dev; | 329 | struct device *kdev = &dev->pdev->dev; |
330 | 330 | ||
331 | p->page = alloc_page(flags); | 331 | p->page = alloc_page(flags); |
332 | if (!p->page) | 332 | if (!p->page) |
333 | return -ENOMEM; | 333 | return -ENOMEM; |
334 | 334 | ||
335 | p->daddr = dma_map_page(device, | 335 | p->daddr = dma_map_page(kdev, |
336 | p->page, 0, 4096, PCI_DMA_BIDIRECTIONAL); | 336 | p->page, 0, 4096, PCI_DMA_BIDIRECTIONAL); |
337 | 337 | ||
338 | if (dma_mapping_error(device, p->daddr)) { | 338 | if (dma_mapping_error(kdev, p->daddr)) { |
339 | __free_page(p->page); | 339 | __free_page(p->page); |
340 | return -EINVAL; | 340 | return -EINVAL; |
341 | } | 341 | } |
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c index f1ffde7f7c0b..05cb95bf2f4b 100644 --- a/drivers/gpu/drm/i915/i915_sysfs.c +++ b/drivers/gpu/drm/i915/i915_sysfs.c | |||
@@ -32,7 +32,10 @@ | |||
32 | #include "intel_drv.h" | 32 | #include "intel_drv.h" |
33 | #include "i915_drv.h" | 33 | #include "i915_drv.h" |
34 | 34 | ||
35 | #define dev_to_drm_minor(d) dev_get_drvdata((d)) | 35 | static inline struct drm_minor *kdev_to_drm_minor(struct device *kdev) |
36 | { | ||
37 | return dev_get_drvdata(kdev); | ||
38 | } | ||
36 | 39 | ||
37 | #ifdef CONFIG_PM | 40 | #ifdef CONFIG_PM |
38 | static u32 calc_residency(struct drm_device *dev, | 41 | static u32 calc_residency(struct drm_device *dev, |
@@ -84,7 +87,7 @@ show_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf) | |||
84 | static ssize_t | 87 | static ssize_t |
85 | show_rc6p_ms(struct device *kdev, struct device_attribute *attr, char *buf) | 88 | show_rc6p_ms(struct device *kdev, struct device_attribute *attr, char *buf) |
86 | { | 89 | { |
87 | struct drm_minor *dminor = dev_to_drm_minor(kdev); | 90 | struct drm_minor *dminor = kdev_to_drm_minor(kdev); |
88 | u32 rc6p_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6p); | 91 | u32 rc6p_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6p); |
89 | return snprintf(buf, PAGE_SIZE, "%u\n", rc6p_residency); | 92 | return snprintf(buf, PAGE_SIZE, "%u\n", rc6p_residency); |
90 | } | 93 | } |
@@ -92,7 +95,7 @@ show_rc6p_ms(struct device *kdev, struct device_attribute *attr, char *buf) | |||
92 | static ssize_t | 95 | static ssize_t |
93 | show_rc6pp_ms(struct device *kdev, struct device_attribute *attr, char *buf) | 96 | show_rc6pp_ms(struct device *kdev, struct device_attribute *attr, char *buf) |
94 | { | 97 | { |
95 | struct drm_minor *dminor = dev_to_drm_minor(kdev); | 98 | struct drm_minor *dminor = kdev_to_drm_minor(kdev); |
96 | u32 rc6pp_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6pp); | 99 | u32 rc6pp_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6pp); |
97 | return snprintf(buf, PAGE_SIZE, "%u\n", rc6pp_residency); | 100 | return snprintf(buf, PAGE_SIZE, "%u\n", rc6pp_residency); |
98 | } | 101 | } |
@@ -163,22 +166,22 @@ i915_l3_read(struct file *filp, struct kobject *kobj, | |||
163 | struct bin_attribute *attr, char *buf, | 166 | struct bin_attribute *attr, char *buf, |
164 | loff_t offset, size_t count) | 167 | loff_t offset, size_t count) |
165 | { | 168 | { |
166 | struct device *dev = kobj_to_dev(kobj); | 169 | struct device *kdev = kobj_to_dev(kobj); |
167 | struct drm_minor *dminor = dev_to_drm_minor(dev); | 170 | struct drm_minor *dminor = kdev_to_drm_minor(kdev); |
168 | struct drm_device *drm_dev = dminor->dev; | 171 | struct drm_device *dev = dminor->dev; |
169 | struct drm_i915_private *dev_priv = to_i915(drm_dev); | 172 | struct drm_i915_private *dev_priv = to_i915(dev); |
170 | int slice = (int)(uintptr_t)attr->private; | 173 | int slice = (int)(uintptr_t)attr->private; |
171 | int ret; | 174 | int ret; |
172 | 175 | ||
173 | count = round_down(count, 4); | 176 | count = round_down(count, 4); |
174 | 177 | ||
175 | ret = l3_access_valid(drm_dev, offset); | 178 | ret = l3_access_valid(dev, offset); |
176 | if (ret) | 179 | if (ret) |
177 | return ret; | 180 | return ret; |
178 | 181 | ||
179 | count = min_t(size_t, GEN7_L3LOG_SIZE - offset, count); | 182 | count = min_t(size_t, GEN7_L3LOG_SIZE - offset, count); |
180 | 183 | ||
181 | ret = i915_mutex_lock_interruptible(drm_dev); | 184 | ret = i915_mutex_lock_interruptible(dev); |
182 | if (ret) | 185 | if (ret) |
183 | return ret; | 186 | return ret; |
184 | 187 | ||
@@ -189,7 +192,7 @@ i915_l3_read(struct file *filp, struct kobject *kobj, | |||
189 | else | 192 | else |
190 | memset(buf, 0, count); | 193 | memset(buf, 0, count); |
191 | 194 | ||
192 | mutex_unlock(&drm_dev->struct_mutex); | 195 | mutex_unlock(&dev->struct_mutex); |
193 | 196 | ||
194 | return count; | 197 | return count; |
195 | } | 198 | } |
@@ -199,30 +202,30 @@ i915_l3_write(struct file *filp, struct kobject *kobj, | |||
199 | struct bin_attribute *attr, char *buf, | 202 | struct bin_attribute *attr, char *buf, |
200 | loff_t offset, size_t count) | 203 | loff_t offset, size_t count) |
201 | { | 204 | { |
202 | struct device *dev = kobj_to_dev(kobj); | 205 | struct device *kdev = kobj_to_dev(kobj); |
203 | struct drm_minor *dminor = dev_to_drm_minor(dev); | 206 | struct drm_minor *dminor = kdev_to_drm_minor(kdev); |
204 | struct drm_device *drm_dev = dminor->dev; | 207 | struct drm_device *dev = dminor->dev; |
205 | struct drm_i915_private *dev_priv = to_i915(drm_dev); | 208 | struct drm_i915_private *dev_priv = to_i915(dev); |
206 | struct i915_gem_context *ctx; | 209 | struct i915_gem_context *ctx; |
207 | u32 *temp = NULL; /* Just here to make handling failures easy */ | 210 | u32 *temp = NULL; /* Just here to make handling failures easy */ |
208 | int slice = (int)(uintptr_t)attr->private; | 211 | int slice = (int)(uintptr_t)attr->private; |
209 | int ret; | 212 | int ret; |
210 | 213 | ||
211 | if (!HAS_HW_CONTEXTS(drm_dev)) | 214 | if (!HAS_HW_CONTEXTS(dev)) |
212 | return -ENXIO; | 215 | return -ENXIO; |
213 | 216 | ||
214 | ret = l3_access_valid(drm_dev, offset); | 217 | ret = l3_access_valid(dev, offset); |
215 | if (ret) | 218 | if (ret) |
216 | return ret; | 219 | return ret; |
217 | 220 | ||
218 | ret = i915_mutex_lock_interruptible(drm_dev); | 221 | ret = i915_mutex_lock_interruptible(dev); |
219 | if (ret) | 222 | if (ret) |
220 | return ret; | 223 | return ret; |
221 | 224 | ||
222 | if (!dev_priv->l3_parity.remap_info[slice]) { | 225 | if (!dev_priv->l3_parity.remap_info[slice]) { |
223 | temp = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL); | 226 | temp = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL); |
224 | if (!temp) { | 227 | if (!temp) { |
225 | mutex_unlock(&drm_dev->struct_mutex); | 228 | mutex_unlock(&dev->struct_mutex); |
226 | return -ENOMEM; | 229 | return -ENOMEM; |
227 | } | 230 | } |
228 | } | 231 | } |
@@ -240,7 +243,7 @@ i915_l3_write(struct file *filp, struct kobject *kobj, | |||
240 | list_for_each_entry(ctx, &dev_priv->context_list, link) | 243 | list_for_each_entry(ctx, &dev_priv->context_list, link) |
241 | ctx->remap_slice |= (1<<slice); | 244 | ctx->remap_slice |= (1<<slice); |
242 | 245 | ||
243 | mutex_unlock(&drm_dev->struct_mutex); | 246 | mutex_unlock(&dev->struct_mutex); |
244 | 247 | ||
245 | return count; | 248 | return count; |
246 | } | 249 | } |
@@ -266,7 +269,7 @@ static struct bin_attribute dpf_attrs_1 = { | |||
266 | static ssize_t gt_act_freq_mhz_show(struct device *kdev, | 269 | static ssize_t gt_act_freq_mhz_show(struct device *kdev, |
267 | struct device_attribute *attr, char *buf) | 270 | struct device_attribute *attr, char *buf) |
268 | { | 271 | { |
269 | struct drm_minor *minor = dev_to_drm_minor(kdev); | 272 | struct drm_minor *minor = kdev_to_drm_minor(kdev); |
270 | struct drm_device *dev = minor->dev; | 273 | struct drm_device *dev = minor->dev; |
271 | struct drm_i915_private *dev_priv = to_i915(dev); | 274 | struct drm_i915_private *dev_priv = to_i915(dev); |
272 | int ret; | 275 | int ret; |
@@ -298,7 +301,7 @@ static ssize_t gt_act_freq_mhz_show(struct device *kdev, | |||
298 | static ssize_t gt_cur_freq_mhz_show(struct device *kdev, | 301 | static ssize_t gt_cur_freq_mhz_show(struct device *kdev, |
299 | struct device_attribute *attr, char *buf) | 302 | struct device_attribute *attr, char *buf) |
300 | { | 303 | { |
301 | struct drm_minor *minor = dev_to_drm_minor(kdev); | 304 | struct drm_minor *minor = kdev_to_drm_minor(kdev); |
302 | struct drm_device *dev = minor->dev; | 305 | struct drm_device *dev = minor->dev; |
303 | struct drm_i915_private *dev_priv = to_i915(dev); | 306 | struct drm_i915_private *dev_priv = to_i915(dev); |
304 | 307 | ||
@@ -309,7 +312,7 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev, | |||
309 | 312 | ||
310 | static ssize_t gt_boost_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) | 313 | static ssize_t gt_boost_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) |
311 | { | 314 | { |
312 | struct drm_minor *minor = dev_to_drm_minor(kdev); | 315 | struct drm_minor *minor = kdev_to_drm_minor(kdev); |
313 | struct drm_i915_private *dev_priv = to_i915(minor->dev); | 316 | struct drm_i915_private *dev_priv = to_i915(minor->dev); |
314 | 317 | ||
315 | return snprintf(buf, PAGE_SIZE, "%d\n", | 318 | return snprintf(buf, PAGE_SIZE, "%d\n", |
@@ -321,7 +324,7 @@ static ssize_t gt_boost_freq_mhz_store(struct device *kdev, | |||
321 | struct device_attribute *attr, | 324 | struct device_attribute *attr, |
322 | const char *buf, size_t count) | 325 | const char *buf, size_t count) |
323 | { | 326 | { |
324 | struct drm_minor *minor = dev_to_drm_minor(kdev); | 327 | struct drm_minor *minor = kdev_to_drm_minor(kdev); |
325 | struct drm_device *dev = minor->dev; | 328 | struct drm_device *dev = minor->dev; |
326 | struct drm_i915_private *dev_priv = to_i915(dev); | 329 | struct drm_i915_private *dev_priv = to_i915(dev); |
327 | u32 val; | 330 | u32 val; |
@@ -346,7 +349,7 @@ static ssize_t gt_boost_freq_mhz_store(struct device *kdev, | |||
346 | static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev, | 349 | static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev, |
347 | struct device_attribute *attr, char *buf) | 350 | struct device_attribute *attr, char *buf) |
348 | { | 351 | { |
349 | struct drm_minor *minor = dev_to_drm_minor(kdev); | 352 | struct drm_minor *minor = kdev_to_drm_minor(kdev); |
350 | struct drm_device *dev = minor->dev; | 353 | struct drm_device *dev = minor->dev; |
351 | struct drm_i915_private *dev_priv = to_i915(dev); | 354 | struct drm_i915_private *dev_priv = to_i915(dev); |
352 | 355 | ||
@@ -357,7 +360,7 @@ static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev, | |||
357 | 360 | ||
358 | static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) | 361 | static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) |
359 | { | 362 | { |
360 | struct drm_minor *minor = dev_to_drm_minor(kdev); | 363 | struct drm_minor *minor = kdev_to_drm_minor(kdev); |
361 | struct drm_device *dev = minor->dev; | 364 | struct drm_device *dev = minor->dev; |
362 | struct drm_i915_private *dev_priv = to_i915(dev); | 365 | struct drm_i915_private *dev_priv = to_i915(dev); |
363 | 366 | ||
@@ -370,7 +373,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev, | |||
370 | struct device_attribute *attr, | 373 | struct device_attribute *attr, |
371 | const char *buf, size_t count) | 374 | const char *buf, size_t count) |
372 | { | 375 | { |
373 | struct drm_minor *minor = dev_to_drm_minor(kdev); | 376 | struct drm_minor *minor = kdev_to_drm_minor(kdev); |
374 | struct drm_device *dev = minor->dev; | 377 | struct drm_device *dev = minor->dev; |
375 | struct drm_i915_private *dev_priv = to_i915(dev); | 378 | struct drm_i915_private *dev_priv = to_i915(dev); |
376 | u32 val; | 379 | u32 val; |
@@ -418,7 +421,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev, | |||
418 | 421 | ||
419 | static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) | 422 | static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) |
420 | { | 423 | { |
421 | struct drm_minor *minor = dev_to_drm_minor(kdev); | 424 | struct drm_minor *minor = kdev_to_drm_minor(kdev); |
422 | struct drm_device *dev = minor->dev; | 425 | struct drm_device *dev = minor->dev; |
423 | struct drm_i915_private *dev_priv = to_i915(dev); | 426 | struct drm_i915_private *dev_priv = to_i915(dev); |
424 | 427 | ||
@@ -431,7 +434,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev, | |||
431 | struct device_attribute *attr, | 434 | struct device_attribute *attr, |
432 | const char *buf, size_t count) | 435 | const char *buf, size_t count) |
433 | { | 436 | { |
434 | struct drm_minor *minor = dev_to_drm_minor(kdev); | 437 | struct drm_minor *minor = kdev_to_drm_minor(kdev); |
435 | struct drm_device *dev = minor->dev; | 438 | struct drm_device *dev = minor->dev; |
436 | struct drm_i915_private *dev_priv = to_i915(dev); | 439 | struct drm_i915_private *dev_priv = to_i915(dev); |
437 | u32 val; | 440 | u32 val; |
@@ -490,7 +493,7 @@ static DEVICE_ATTR(gt_RPn_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL); | |||
490 | /* For now we have a static number of RP states */ | 493 | /* For now we have a static number of RP states */ |
491 | static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) | 494 | static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) |
492 | { | 495 | { |
493 | struct drm_minor *minor = dev_to_drm_minor(kdev); | 496 | struct drm_minor *minor = kdev_to_drm_minor(kdev); |
494 | struct drm_device *dev = minor->dev; | 497 | struct drm_device *dev = minor->dev; |
495 | struct drm_i915_private *dev_priv = to_i915(dev); | 498 | struct drm_i915_private *dev_priv = to_i915(dev); |
496 | u32 val; | 499 | u32 val; |
@@ -538,7 +541,7 @@ static ssize_t error_state_read(struct file *filp, struct kobject *kobj, | |||
538 | { | 541 | { |
539 | 542 | ||
540 | struct device *kdev = kobj_to_dev(kobj); | 543 | struct device *kdev = kobj_to_dev(kobj); |
541 | struct drm_minor *minor = dev_to_drm_minor(kdev); | 544 | struct drm_minor *minor = kdev_to_drm_minor(kdev); |
542 | struct drm_device *dev = minor->dev; | 545 | struct drm_device *dev = minor->dev; |
543 | struct i915_error_state_file_priv error_priv; | 546 | struct i915_error_state_file_priv error_priv; |
544 | struct drm_i915_error_state_buf error_str; | 547 | struct drm_i915_error_state_buf error_str; |
@@ -573,7 +576,7 @@ static ssize_t error_state_write(struct file *file, struct kobject *kobj, | |||
573 | loff_t off, size_t count) | 576 | loff_t off, size_t count) |
574 | { | 577 | { |
575 | struct device *kdev = kobj_to_dev(kobj); | 578 | struct device *kdev = kobj_to_dev(kobj); |
576 | struct drm_minor *minor = dev_to_drm_minor(kdev); | 579 | struct drm_minor *minor = kdev_to_drm_minor(kdev); |
577 | struct drm_device *dev = minor->dev; | 580 | struct drm_device *dev = minor->dev; |
578 | int ret; | 581 | int ret; |
579 | 582 | ||
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c index d32f586f9c05..762572aeca23 100644 --- a/drivers/gpu/drm/i915/intel_audio.c +++ b/drivers/gpu/drm/i915/intel_audio.c | |||
@@ -581,26 +581,26 @@ void intel_init_audio_hooks(struct drm_i915_private *dev_priv) | |||
581 | } | 581 | } |
582 | } | 582 | } |
583 | 583 | ||
584 | static void i915_audio_component_get_power(struct device *dev) | 584 | static void i915_audio_component_get_power(struct device *kdev) |
585 | { | 585 | { |
586 | intel_display_power_get(dev_to_i915(dev), POWER_DOMAIN_AUDIO); | 586 | intel_display_power_get(kdev_to_i915(kdev), POWER_DOMAIN_AUDIO); |
587 | } | 587 | } |
588 | 588 | ||
589 | static void i915_audio_component_put_power(struct device *dev) | 589 | static void i915_audio_component_put_power(struct device *kdev) |
590 | { | 590 | { |
591 | intel_display_power_put(dev_to_i915(dev), POWER_DOMAIN_AUDIO); | 591 | intel_display_power_put(kdev_to_i915(kdev), POWER_DOMAIN_AUDIO); |
592 | } | 592 | } |
593 | 593 | ||
594 | static void i915_audio_component_codec_wake_override(struct device *dev, | 594 | static void i915_audio_component_codec_wake_override(struct device *kdev, |
595 | bool enable) | 595 | bool enable) |
596 | { | 596 | { |
597 | struct drm_i915_private *dev_priv = dev_to_i915(dev); | 597 | struct drm_i915_private *dev_priv = kdev_to_i915(kdev); |
598 | u32 tmp; | 598 | u32 tmp; |
599 | 599 | ||
600 | if (!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv)) | 600 | if (!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv)) |
601 | return; | 601 | return; |
602 | 602 | ||
603 | i915_audio_component_get_power(dev); | 603 | i915_audio_component_get_power(kdev); |
604 | 604 | ||
605 | /* | 605 | /* |
606 | * Enable/disable generating the codec wake signal, overriding the | 606 | * Enable/disable generating the codec wake signal, overriding the |
@@ -618,13 +618,13 @@ static void i915_audio_component_codec_wake_override(struct device *dev, | |||
618 | usleep_range(1000, 1500); | 618 | usleep_range(1000, 1500); |
619 | } | 619 | } |
620 | 620 | ||
621 | i915_audio_component_put_power(dev); | 621 | i915_audio_component_put_power(kdev); |
622 | } | 622 | } |
623 | 623 | ||
624 | /* Get CDCLK in kHz */ | 624 | /* Get CDCLK in kHz */ |
625 | static int i915_audio_component_get_cdclk_freq(struct device *dev) | 625 | static int i915_audio_component_get_cdclk_freq(struct device *kdev) |
626 | { | 626 | { |
627 | struct drm_i915_private *dev_priv = dev_to_i915(dev); | 627 | struct drm_i915_private *dev_priv = kdev_to_i915(kdev); |
628 | 628 | ||
629 | if (WARN_ON_ONCE(!HAS_DDI(dev_priv))) | 629 | if (WARN_ON_ONCE(!HAS_DDI(dev_priv))) |
630 | return -ENODEV; | 630 | return -ENODEV; |
@@ -632,10 +632,10 @@ static int i915_audio_component_get_cdclk_freq(struct device *dev) | |||
632 | return dev_priv->cdclk_freq; | 632 | return dev_priv->cdclk_freq; |
633 | } | 633 | } |
634 | 634 | ||
635 | static int i915_audio_component_sync_audio_rate(struct device *dev, | 635 | static int i915_audio_component_sync_audio_rate(struct device *kdev, |
636 | int port, int rate) | 636 | int port, int rate) |
637 | { | 637 | { |
638 | struct drm_i915_private *dev_priv = dev_to_i915(dev); | 638 | struct drm_i915_private *dev_priv = kdev_to_i915(kdev); |
639 | struct intel_encoder *intel_encoder; | 639 | struct intel_encoder *intel_encoder; |
640 | struct intel_crtc *crtc; | 640 | struct intel_crtc *crtc; |
641 | struct drm_display_mode *mode; | 641 | struct drm_display_mode *mode; |
@@ -652,7 +652,7 @@ static int i915_audio_component_sync_audio_rate(struct device *dev, | |||
652 | !IS_HASWELL(dev_priv)) | 652 | !IS_HASWELL(dev_priv)) |
653 | return 0; | 653 | return 0; |
654 | 654 | ||
655 | i915_audio_component_get_power(dev); | 655 | i915_audio_component_get_power(kdev); |
656 | mutex_lock(&dev_priv->av_mutex); | 656 | mutex_lock(&dev_priv->av_mutex); |
657 | /* 1. get the pipe */ | 657 | /* 1. get the pipe */ |
658 | intel_encoder = dev_priv->dig_port_map[port]; | 658 | intel_encoder = dev_priv->dig_port_map[port]; |
@@ -703,15 +703,15 @@ static int i915_audio_component_sync_audio_rate(struct device *dev, | |||
703 | 703 | ||
704 | unlock: | 704 | unlock: |
705 | mutex_unlock(&dev_priv->av_mutex); | 705 | mutex_unlock(&dev_priv->av_mutex); |
706 | i915_audio_component_put_power(dev); | 706 | i915_audio_component_put_power(kdev); |
707 | return err; | 707 | return err; |
708 | } | 708 | } |
709 | 709 | ||
710 | static int i915_audio_component_get_eld(struct device *dev, int port, | 710 | static int i915_audio_component_get_eld(struct device *kdev, int port, |
711 | bool *enabled, | 711 | bool *enabled, |
712 | unsigned char *buf, int max_bytes) | 712 | unsigned char *buf, int max_bytes) |
713 | { | 713 | { |
714 | struct drm_i915_private *dev_priv = dev_to_i915(dev); | 714 | struct drm_i915_private *dev_priv = kdev_to_i915(kdev); |
715 | struct intel_encoder *intel_encoder; | 715 | struct intel_encoder *intel_encoder; |
716 | struct intel_digital_port *intel_dig_port; | 716 | struct intel_digital_port *intel_dig_port; |
717 | const u8 *eld; | 717 | const u8 *eld; |
@@ -745,11 +745,11 @@ static const struct i915_audio_component_ops i915_audio_component_ops = { | |||
745 | .get_eld = i915_audio_component_get_eld, | 745 | .get_eld = i915_audio_component_get_eld, |
746 | }; | 746 | }; |
747 | 747 | ||
748 | static int i915_audio_component_bind(struct device *i915_dev, | 748 | static int i915_audio_component_bind(struct device *i915_kdev, |
749 | struct device *hda_dev, void *data) | 749 | struct device *hda_kdev, void *data) |
750 | { | 750 | { |
751 | struct i915_audio_component *acomp = data; | 751 | struct i915_audio_component *acomp = data; |
752 | struct drm_i915_private *dev_priv = dev_to_i915(i915_dev); | 752 | struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev); |
753 | int i; | 753 | int i; |
754 | 754 | ||
755 | if (WARN_ON(acomp->ops || acomp->dev)) | 755 | if (WARN_ON(acomp->ops || acomp->dev)) |
@@ -757,7 +757,7 @@ static int i915_audio_component_bind(struct device *i915_dev, | |||
757 | 757 | ||
758 | drm_modeset_lock_all(&dev_priv->drm); | 758 | drm_modeset_lock_all(&dev_priv->drm); |
759 | acomp->ops = &i915_audio_component_ops; | 759 | acomp->ops = &i915_audio_component_ops; |
760 | acomp->dev = i915_dev; | 760 | acomp->dev = i915_kdev; |
761 | BUILD_BUG_ON(MAX_PORTS != I915_MAX_PORTS); | 761 | BUILD_BUG_ON(MAX_PORTS != I915_MAX_PORTS); |
762 | for (i = 0; i < ARRAY_SIZE(acomp->aud_sample_rate); i++) | 762 | for (i = 0; i < ARRAY_SIZE(acomp->aud_sample_rate); i++) |
763 | acomp->aud_sample_rate[i] = 0; | 763 | acomp->aud_sample_rate[i] = 0; |
@@ -767,11 +767,11 @@ static int i915_audio_component_bind(struct device *i915_dev, | |||
767 | return 0; | 767 | return 0; |
768 | } | 768 | } |
769 | 769 | ||
770 | static void i915_audio_component_unbind(struct device *i915_dev, | 770 | static void i915_audio_component_unbind(struct device *i915_kdev, |
771 | struct device *hda_dev, void *data) | 771 | struct device *hda_kdev, void *data) |
772 | { | 772 | { |
773 | struct i915_audio_component *acomp = data; | 773 | struct i915_audio_component *acomp = data; |
774 | struct drm_i915_private *dev_priv = dev_to_i915(i915_dev); | 774 | struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev); |
775 | 775 | ||
776 | drm_modeset_lock_all(&dev_priv->drm); | 776 | drm_modeset_lock_all(&dev_priv->drm); |
777 | acomp->ops = NULL; | 777 | acomp->ops = NULL; |
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index a1d73c2de332..7686b7f1d599 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c | |||
@@ -2288,7 +2288,7 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv) | |||
2288 | */ | 2288 | */ |
2289 | void intel_power_domains_fini(struct drm_i915_private *dev_priv) | 2289 | void intel_power_domains_fini(struct drm_i915_private *dev_priv) |
2290 | { | 2290 | { |
2291 | struct device *device = &dev_priv->drm.pdev->dev; | 2291 | struct device *kdev = &dev_priv->drm.pdev->dev; |
2292 | 2292 | ||
2293 | /* | 2293 | /* |
2294 | * The i915.ko module is still not prepared to be loaded when | 2294 | * The i915.ko module is still not prepared to be loaded when |
@@ -2310,7 +2310,7 @@ void intel_power_domains_fini(struct drm_i915_private *dev_priv) | |||
2310 | * the platform doesn't support runtime PM. | 2310 | * the platform doesn't support runtime PM. |
2311 | */ | 2311 | */ |
2312 | if (!HAS_RUNTIME_PM(dev_priv)) | 2312 | if (!HAS_RUNTIME_PM(dev_priv)) |
2313 | pm_runtime_put(device); | 2313 | pm_runtime_put(kdev); |
2314 | } | 2314 | } |
2315 | 2315 | ||
2316 | static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv) | 2316 | static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv) |
@@ -2652,9 +2652,9 @@ void intel_power_domains_suspend(struct drm_i915_private *dev_priv) | |||
2652 | void intel_runtime_pm_get(struct drm_i915_private *dev_priv) | 2652 | void intel_runtime_pm_get(struct drm_i915_private *dev_priv) |
2653 | { | 2653 | { |
2654 | struct drm_device *dev = &dev_priv->drm; | 2654 | struct drm_device *dev = &dev_priv->drm; |
2655 | struct device *device = &dev->pdev->dev; | 2655 | struct device *kdev = &dev->pdev->dev; |
2656 | 2656 | ||
2657 | pm_runtime_get_sync(device); | 2657 | pm_runtime_get_sync(kdev); |
2658 | 2658 | ||
2659 | atomic_inc(&dev_priv->pm.wakeref_count); | 2659 | atomic_inc(&dev_priv->pm.wakeref_count); |
2660 | assert_rpm_wakelock_held(dev_priv); | 2660 | assert_rpm_wakelock_held(dev_priv); |
@@ -2673,10 +2673,10 @@ void intel_runtime_pm_get(struct drm_i915_private *dev_priv) | |||
2673 | bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv) | 2673 | bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv) |
2674 | { | 2674 | { |
2675 | struct drm_device *dev = &dev_priv->drm; | 2675 | struct drm_device *dev = &dev_priv->drm; |
2676 | struct device *device = &dev->pdev->dev; | 2676 | struct device *kdev = &dev->pdev->dev; |
2677 | 2677 | ||
2678 | if (IS_ENABLED(CONFIG_PM)) { | 2678 | if (IS_ENABLED(CONFIG_PM)) { |
2679 | int ret = pm_runtime_get_if_in_use(device); | 2679 | int ret = pm_runtime_get_if_in_use(kdev); |
2680 | 2680 | ||
2681 | /* | 2681 | /* |
2682 | * In cases runtime PM is disabled by the RPM core and we get | 2682 | * In cases runtime PM is disabled by the RPM core and we get |
@@ -2715,10 +2715,10 @@ bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv) | |||
2715 | void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv) | 2715 | void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv) |
2716 | { | 2716 | { |
2717 | struct drm_device *dev = &dev_priv->drm; | 2717 | struct drm_device *dev = &dev_priv->drm; |
2718 | struct device *device = &dev->pdev->dev; | 2718 | struct device *kdev = &dev->pdev->dev; |
2719 | 2719 | ||
2720 | assert_rpm_wakelock_held(dev_priv); | 2720 | assert_rpm_wakelock_held(dev_priv); |
2721 | pm_runtime_get_noresume(device); | 2721 | pm_runtime_get_noresume(kdev); |
2722 | 2722 | ||
2723 | atomic_inc(&dev_priv->pm.wakeref_count); | 2723 | atomic_inc(&dev_priv->pm.wakeref_count); |
2724 | } | 2724 | } |
@@ -2734,14 +2734,14 @@ void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv) | |||
2734 | void intel_runtime_pm_put(struct drm_i915_private *dev_priv) | 2734 | void intel_runtime_pm_put(struct drm_i915_private *dev_priv) |
2735 | { | 2735 | { |
2736 | struct drm_device *dev = &dev_priv->drm; | 2736 | struct drm_device *dev = &dev_priv->drm; |
2737 | struct device *device = &dev->pdev->dev; | 2737 | struct device *kdev = &dev->pdev->dev; |
2738 | 2738 | ||
2739 | assert_rpm_wakelock_held(dev_priv); | 2739 | assert_rpm_wakelock_held(dev_priv); |
2740 | if (atomic_dec_and_test(&dev_priv->pm.wakeref_count)) | 2740 | if (atomic_dec_and_test(&dev_priv->pm.wakeref_count)) |
2741 | atomic_inc(&dev_priv->pm.atomic_seq); | 2741 | atomic_inc(&dev_priv->pm.atomic_seq); |
2742 | 2742 | ||
2743 | pm_runtime_mark_last_busy(device); | 2743 | pm_runtime_mark_last_busy(kdev); |
2744 | pm_runtime_put_autosuspend(device); | 2744 | pm_runtime_put_autosuspend(kdev); |
2745 | } | 2745 | } |
2746 | 2746 | ||
2747 | /** | 2747 | /** |
@@ -2757,10 +2757,10 @@ void intel_runtime_pm_put(struct drm_i915_private *dev_priv) | |||
2757 | void intel_runtime_pm_enable(struct drm_i915_private *dev_priv) | 2757 | void intel_runtime_pm_enable(struct drm_i915_private *dev_priv) |
2758 | { | 2758 | { |
2759 | struct drm_device *dev = &dev_priv->drm; | 2759 | struct drm_device *dev = &dev_priv->drm; |
2760 | struct device *device = &dev->pdev->dev; | 2760 | struct device *kdev = &dev->pdev->dev; |
2761 | 2761 | ||
2762 | pm_runtime_set_autosuspend_delay(device, 10000); /* 10s */ | 2762 | pm_runtime_set_autosuspend_delay(kdev, 10000); /* 10s */ |
2763 | pm_runtime_mark_last_busy(device); | 2763 | pm_runtime_mark_last_busy(kdev); |
2764 | 2764 | ||
2765 | /* | 2765 | /* |
2766 | * Take a permanent reference to disable the RPM functionality and drop | 2766 | * Take a permanent reference to disable the RPM functionality and drop |
@@ -2769,10 +2769,10 @@ void intel_runtime_pm_enable(struct drm_i915_private *dev_priv) | |||
2769 | * platforms without RPM support. | 2769 | * platforms without RPM support. |
2770 | */ | 2770 | */ |
2771 | if (!HAS_RUNTIME_PM(dev)) { | 2771 | if (!HAS_RUNTIME_PM(dev)) { |
2772 | pm_runtime_dont_use_autosuspend(device); | 2772 | pm_runtime_dont_use_autosuspend(kdev); |
2773 | pm_runtime_get_sync(device); | 2773 | pm_runtime_get_sync(kdev); |
2774 | } else { | 2774 | } else { |
2775 | pm_runtime_use_autosuspend(device); | 2775 | pm_runtime_use_autosuspend(kdev); |
2776 | } | 2776 | } |
2777 | 2777 | ||
2778 | /* | 2778 | /* |
@@ -2780,6 +2780,5 @@ void intel_runtime_pm_enable(struct drm_i915_private *dev_priv) | |||
2780 | * We drop that here and will reacquire it during unloading in | 2780 | * We drop that here and will reacquire it during unloading in |
2781 | * intel_power_domains_fini(). | 2781 | * intel_power_domains_fini(). |
2782 | */ | 2782 | */ |
2783 | pm_runtime_put_autosuspend(device); | 2783 | pm_runtime_put_autosuspend(kdev); |
2784 | } | 2784 | } |
2785 | |||