aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-12-17 11:26:17 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2012-12-17 11:26:17 -0500
commit3c2e81ef344a90bb0a39d84af6878b4aeff568a2 (patch)
treebd8c8b23466174899d2fe4d35af6e1e838edb068 /include
parent221392c3ad0432e39fd74a349364f66cb0ed78f6 (diff)
parent55bde6b1442fed8af67b92d21acce67db454c9f9 (diff)
Merge branch 'drm-next' of git://people.freedesktop.org/~airlied/linux
Pull DRM updates from Dave Airlie: "This is the one and only next pull for 3.8, we had a regression we found last week, so I was waiting for that to resolve itself, and I ended up with some Intel fixes on top as well. Highlights: - new driver: nvidia tegra 20/30/hdmi support - radeon: add support for previously unused DMA engines, more HDMI regs, eviction speeds ups and fixes - i915: HSW support enable, agp removal on GEN6, seqno wrapping - exynos: IPP subsystem support (image post proc), HDMI - nouveau: display class reworking, nv20->40 z compression - ttm: start of locking fixes, rcu usage for lookups, - core: documentation updates, docbook integration, monotonic clock usage, move from connector to object properties" * 'drm-next' of git://people.freedesktop.org/~airlied/linux: (590 commits) drm/exynos: add gsc ipp driver drm/exynos: add rotator ipp driver drm/exynos: add fimc ipp driver drm/exynos: add iommu support for ipp drm/exynos: add ipp subsystem drm/exynos: support device tree for fimd radeon: fix regression with eviction since evict caching changes drm/radeon: add more pedantic checks in the CP DMA checker drm/radeon: bump version for CS ioctl support for async DMA drm/radeon: enable the async DMA rings in the CS ioctl drm/radeon: add VM CS parser support for async DMA on cayman/TN/SI drm/radeon/kms: add evergreen/cayman CS parser for async DMA (v2) drm/radeon/kms: add 6xx/7xx CS parser for async DMA (v2) drm/radeon: fix htile buffer size computation for command stream checker drm/radeon: fix fence locking in the pageflip callback drm/radeon: make indirect register access concurrency-safe drm/radeon: add W|RREG32_IDX for MM_INDEX|DATA based mmio accesss drm/exynos: support extended screen coordinate of fimd drm/exynos: fix x, y coordinates for right bottom pixel drm/exynos: fix fb offset calculation for plane ...
Diffstat (limited to 'include')
-rw-r--r--include/drm/drmP.h3
-rw-r--r--include/drm/drm_crtc.h19
-rw-r--r--include/drm/drm_crtc_helper.h3
-rw-r--r--include/drm/drm_dp_helper.h39
-rw-r--r--include/drm/drm_hashtab.h14
-rw-r--r--include/drm/exynos_drm.h26
-rw-r--r--include/drm/intel-gtt.h7
-rw-r--r--include/drm/ttm/ttm_bo_api.h33
-rw-r--r--include/drm/ttm/ttm_bo_driver.h45
-rw-r--r--include/drm/ttm/ttm_execbuf_util.h3
-rw-r--r--include/drm/ttm/ttm_memory.h2
-rw-r--r--include/drm/ttm/ttm_object.h4
-rw-r--r--include/linux/dma-attrs.h1
-rw-r--r--include/linux/kref.h21
-rw-r--r--include/uapi/drm/drm.h1
-rw-r--r--include/uapi/drm/exynos_drm.h203
-rw-r--r--include/uapi/drm/i915_drm.h6
-rw-r--r--include/uapi/drm/radeon_drm.h6
18 files changed, 362 insertions, 74 deletions
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 3fd82809b2d4..fad21c927a38 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -1431,6 +1431,8 @@ extern int drm_vblank_wait(struct drm_device *dev, unsigned int *vbl_seq);
1431extern u32 drm_vblank_count(struct drm_device *dev, int crtc); 1431extern u32 drm_vblank_count(struct drm_device *dev, int crtc);
1432extern u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc, 1432extern u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc,
1433 struct timeval *vblanktime); 1433 struct timeval *vblanktime);
1434extern void drm_send_vblank_event(struct drm_device *dev, int crtc,
1435 struct drm_pending_vblank_event *e);
1434extern bool drm_handle_vblank(struct drm_device *dev, int crtc); 1436extern bool drm_handle_vblank(struct drm_device *dev, int crtc);
1435extern int drm_vblank_get(struct drm_device *dev, int crtc); 1437extern int drm_vblank_get(struct drm_device *dev, int crtc);
1436extern void drm_vblank_put(struct drm_device *dev, int crtc); 1438extern void drm_vblank_put(struct drm_device *dev, int crtc);
@@ -1503,6 +1505,7 @@ extern unsigned int drm_debug;
1503 1505
1504extern unsigned int drm_vblank_offdelay; 1506extern unsigned int drm_vblank_offdelay;
1505extern unsigned int drm_timestamp_precision; 1507extern unsigned int drm_timestamp_precision;
1508extern unsigned int drm_timestamp_monotonic;
1506 1509
1507extern struct class *drm_class; 1510extern struct class *drm_class;
1508extern struct proc_dir_entry *drm_proc_root; 1511extern struct proc_dir_entry *drm_proc_root;
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index 3fa18b7e9497..00d78b5161c0 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -792,6 +792,7 @@ struct drm_mode_config {
792 792
793 /* output poll support */ 793 /* output poll support */
794 bool poll_enabled; 794 bool poll_enabled;
795 bool poll_running;
795 struct delayed_work output_poll_work; 796 struct delayed_work output_poll_work;
796 797
797 /* pointers to standard properties */ 798 /* pointers to standard properties */
@@ -887,14 +888,14 @@ extern void drm_mode_remove(struct drm_connector *connector, struct drm_display_
887extern void drm_mode_copy(struct drm_display_mode *dst, const struct drm_display_mode *src); 888extern void drm_mode_copy(struct drm_display_mode *dst, const struct drm_display_mode *src);
888extern struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev, 889extern struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev,
889 const struct drm_display_mode *mode); 890 const struct drm_display_mode *mode);
890extern void drm_mode_debug_printmodeline(struct drm_display_mode *mode); 891extern void drm_mode_debug_printmodeline(const struct drm_display_mode *mode);
891extern void drm_mode_config_init(struct drm_device *dev); 892extern void drm_mode_config_init(struct drm_device *dev);
892extern void drm_mode_config_reset(struct drm_device *dev); 893extern void drm_mode_config_reset(struct drm_device *dev);
893extern void drm_mode_config_cleanup(struct drm_device *dev); 894extern void drm_mode_config_cleanup(struct drm_device *dev);
894extern void drm_mode_set_name(struct drm_display_mode *mode); 895extern void drm_mode_set_name(struct drm_display_mode *mode);
895extern bool drm_mode_equal(struct drm_display_mode *mode1, struct drm_display_mode *mode2); 896extern bool drm_mode_equal(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2);
896extern int drm_mode_width(struct drm_display_mode *mode); 897extern int drm_mode_width(const struct drm_display_mode *mode);
897extern int drm_mode_height(struct drm_display_mode *mode); 898extern int drm_mode_height(const struct drm_display_mode *mode);
898 899
899/* for us by fb module */ 900/* for us by fb module */
900extern int drm_mode_attachmode_crtc(struct drm_device *dev, 901extern int drm_mode_attachmode_crtc(struct drm_device *dev,
@@ -919,12 +920,6 @@ extern void drm_mode_set_crtcinfo(struct drm_display_mode *p,
919extern void drm_mode_connector_list_update(struct drm_connector *connector); 920extern void drm_mode_connector_list_update(struct drm_connector *connector);
920extern int drm_mode_connector_update_edid_property(struct drm_connector *connector, 921extern int drm_mode_connector_update_edid_property(struct drm_connector *connector,
921 struct edid *edid); 922 struct edid *edid);
922extern int drm_connector_property_set_value(struct drm_connector *connector,
923 struct drm_property *property,
924 uint64_t value);
925extern int drm_connector_property_get_value(struct drm_connector *connector,
926 struct drm_property *property,
927 uint64_t *value);
928extern int drm_object_property_set_value(struct drm_mode_object *obj, 923extern int drm_object_property_set_value(struct drm_mode_object *obj,
929 struct drm_property *property, 924 struct drm_property *property,
930 uint64_t val); 925 uint64_t val);
@@ -946,8 +941,6 @@ extern int drmfb_remove(struct drm_device *dev, struct drm_framebuffer *fb);
946extern void drm_crtc_probe_connector_modes(struct drm_device *dev, int maxX, int maxY); 941extern void drm_crtc_probe_connector_modes(struct drm_device *dev, int maxX, int maxY);
947extern bool drm_crtc_in_use(struct drm_crtc *crtc); 942extern bool drm_crtc_in_use(struct drm_crtc *crtc);
948 943
949extern void drm_connector_attach_property(struct drm_connector *connector,
950 struct drm_property *property, uint64_t init_val);
951extern void drm_object_attach_property(struct drm_mode_object *obj, 944extern void drm_object_attach_property(struct drm_mode_object *obj,
952 struct drm_property *property, 945 struct drm_property *property,
953 uint64_t init_val); 946 uint64_t init_val);
@@ -1037,6 +1030,7 @@ extern int drm_mode_gamma_get_ioctl(struct drm_device *dev,
1037extern int drm_mode_gamma_set_ioctl(struct drm_device *dev, 1030extern int drm_mode_gamma_set_ioctl(struct drm_device *dev,
1038 void *data, struct drm_file *file_priv); 1031 void *data, struct drm_file *file_priv);
1039extern u8 *drm_find_cea_extension(struct edid *edid); 1032extern u8 *drm_find_cea_extension(struct edid *edid);
1033extern u8 drm_match_cea_mode(struct drm_display_mode *to_match);
1040extern bool drm_detect_hdmi_monitor(struct edid *edid); 1034extern bool drm_detect_hdmi_monitor(struct edid *edid);
1041extern bool drm_detect_monitor_audio(struct edid *edid); 1035extern bool drm_detect_monitor_audio(struct edid *edid);
1042extern int drm_mode_page_flip_ioctl(struct drm_device *dev, 1036extern int drm_mode_page_flip_ioctl(struct drm_device *dev,
@@ -1053,6 +1047,7 @@ extern struct drm_display_mode *drm_gtf_mode_complex(struct drm_device *dev,
1053 int GTF_2C, int GTF_K, int GTF_2J); 1047 int GTF_2C, int GTF_K, int GTF_2J);
1054extern int drm_add_modes_noedid(struct drm_connector *connector, 1048extern int drm_add_modes_noedid(struct drm_connector *connector,
1055 int hdisplay, int vdisplay); 1049 int hdisplay, int vdisplay);
1050extern uint8_t drm_mode_cea_vic(const struct drm_display_mode *mode);
1056 1051
1057extern int drm_edid_header_is_valid(const u8 *raw_edid); 1052extern int drm_edid_header_is_valid(const u8 *raw_edid);
1058extern bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid); 1053extern bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid);
diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
index e01cc80c9c30..f43d556bf40b 100644
--- a/include/drm/drm_crtc_helper.h
+++ b/include/drm/drm_crtc_helper.h
@@ -137,6 +137,8 @@ extern bool drm_helper_encoder_in_use(struct drm_encoder *encoder);
137 137
138extern void drm_helper_connector_dpms(struct drm_connector *connector, int mode); 138extern void drm_helper_connector_dpms(struct drm_connector *connector, int mode);
139 139
140extern void drm_helper_move_panel_connectors_to_head(struct drm_device *);
141
140extern int drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb, 142extern int drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
141 struct drm_mode_fb_cmd2 *mode_cmd); 143 struct drm_mode_fb_cmd2 *mode_cmd);
142 144
@@ -162,6 +164,7 @@ extern int drm_helper_resume_force_mode(struct drm_device *dev);
162extern void drm_kms_helper_poll_init(struct drm_device *dev); 164extern void drm_kms_helper_poll_init(struct drm_device *dev);
163extern void drm_kms_helper_poll_fini(struct drm_device *dev); 165extern void drm_kms_helper_poll_fini(struct drm_device *dev);
164extern void drm_helper_hpd_irq_event(struct drm_device *dev); 166extern void drm_helper_hpd_irq_event(struct drm_device *dev);
167extern void drm_kms_helper_hotplug_event(struct drm_device *dev);
165 168
166extern void drm_kms_helper_poll_disable(struct drm_device *dev); 169extern void drm_kms_helper_poll_disable(struct drm_device *dev);
167extern void drm_kms_helper_poll_enable(struct drm_device *dev); 170extern void drm_kms_helper_poll_enable(struct drm_device *dev);
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index fe061489f91f..e8e1417af3d9 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -25,6 +25,7 @@
25 25
26#include <linux/types.h> 26#include <linux/types.h>
27#include <linux/i2c.h> 27#include <linux/i2c.h>
28#include <linux/delay.h>
28 29
29/* 30/*
30 * Unless otherwise noted, all values are from the DP 1.1a spec. Note that 31 * Unless otherwise noted, all values are from the DP 1.1a spec. Note that
@@ -311,6 +312,14 @@
311#define MODE_I2C_READ 4 312#define MODE_I2C_READ 4
312#define MODE_I2C_STOP 8 313#define MODE_I2C_STOP 8
313 314
315/**
316 * struct i2c_algo_dp_aux_data - driver interface structure for i2c over dp
317 * aux algorithm
318 * @running: set by the algo indicating whether an i2c is ongoing or whether
319 * the i2c bus is quiescent
320 * @address: i2c target address for the currently ongoing transfer
321 * @aux_ch: driver callback to transfer a single byte of the i2c payload
322 */
314struct i2c_algo_dp_aux_data { 323struct i2c_algo_dp_aux_data {
315 bool running; 324 bool running;
316 u16 address; 325 u16 address;
@@ -322,4 +331,34 @@ struct i2c_algo_dp_aux_data {
322int 331int
323i2c_dp_aux_add_bus(struct i2c_adapter *adapter); 332i2c_dp_aux_add_bus(struct i2c_adapter *adapter);
324 333
334
335#define DP_LINK_STATUS_SIZE 6
336bool drm_dp_channel_eq_ok(u8 link_status[DP_LINK_STATUS_SIZE],
337 int lane_count);
338bool drm_dp_clock_recovery_ok(u8 link_status[DP_LINK_STATUS_SIZE],
339 int lane_count);
340u8 drm_dp_get_adjust_request_voltage(u8 link_status[DP_LINK_STATUS_SIZE],
341 int lane);
342u8 drm_dp_get_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE],
343 int lane);
344
345#define DP_RECEIVER_CAP_SIZE 0xf
346void drm_dp_link_train_clock_recovery_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]);
347void drm_dp_link_train_channel_eq_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]);
348
349u8 drm_dp_link_rate_to_bw_code(int link_rate);
350int drm_dp_bw_code_to_link_rate(u8 link_bw);
351
352static inline int
353drm_dp_max_link_rate(u8 dpcd[DP_RECEIVER_CAP_SIZE])
354{
355 return drm_dp_bw_code_to_link_rate(dpcd[DP_MAX_LINK_RATE]);
356}
357
358static inline u8
359drm_dp_max_lane_count(u8 dpcd[DP_RECEIVER_CAP_SIZE])
360{
361 return dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK;
362}
363
325#endif /* _DRM_DP_HELPER_H_ */ 364#endif /* _DRM_DP_HELPER_H_ */
diff --git a/include/drm/drm_hashtab.h b/include/drm/drm_hashtab.h
index 3650d5d011ee..fce2ef3fdfff 100644
--- a/include/drm/drm_hashtab.h
+++ b/include/drm/drm_hashtab.h
@@ -61,5 +61,19 @@ extern int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key);
61extern int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item); 61extern int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item);
62extern void drm_ht_remove(struct drm_open_hash *ht); 62extern void drm_ht_remove(struct drm_open_hash *ht);
63 63
64/*
65 * RCU-safe interface
66 *
67 * The user of this API needs to make sure that two or more instances of the
68 * hash table manipulation functions are never run simultaneously.
69 * The lookup function drm_ht_find_item_rcu may, however, run simultaneously
70 * with any of the manipulation functions as long as it's called from within
71 * an RCU read-locked section.
72 */
73#define drm_ht_insert_item_rcu drm_ht_insert_item
74#define drm_ht_just_insert_please_rcu drm_ht_just_insert_please
75#define drm_ht_remove_key_rcu drm_ht_remove_key
76#define drm_ht_remove_item_rcu drm_ht_remove_item
77#define drm_ht_find_item_rcu drm_ht_find_item
64 78
65#endif 79#endif
diff --git a/include/drm/exynos_drm.h b/include/drm/exynos_drm.h
index 3c13a3a4b158..808dad29607a 100644
--- a/include/drm/exynos_drm.h
+++ b/include/drm/exynos_drm.h
@@ -85,4 +85,30 @@ struct exynos_drm_hdmi_pdata {
85 int (*get_hpd)(void); 85 int (*get_hpd)(void);
86}; 86};
87 87
88/**
89 * Platform Specific Structure for DRM based IPP.
90 *
91 * @inv_pclk: if set 1. invert pixel clock
92 * @inv_vsync: if set 1. invert vsync signal for wb
93 * @inv_href: if set 1. invert href signal
94 * @inv_hsync: if set 1. invert hsync signal for wb
95 */
96struct exynos_drm_ipp_pol {
97 unsigned int inv_pclk;
98 unsigned int inv_vsync;
99 unsigned int inv_href;
100 unsigned int inv_hsync;
101};
102
103/**
104 * Platform Specific Structure for DRM based FIMC.
105 *
106 * @pol: current hardware block polarity settings.
107 * @clk_rate: current hardware clock rate.
108 */
109struct exynos_drm_fimc_pdata {
110 struct exynos_drm_ipp_pol pol;
111 int clk_rate;
112};
113
88#endif /* _EXYNOS_DRM_H_ */ 114#endif /* _EXYNOS_DRM_H_ */
diff --git a/include/drm/intel-gtt.h b/include/drm/intel-gtt.h
index 2e37e9f02e71..6eb76a1f11ab 100644
--- a/include/drm/intel-gtt.h
+++ b/include/drm/intel-gtt.h
@@ -3,7 +3,7 @@
3#ifndef _DRM_INTEL_GTT_H 3#ifndef _DRM_INTEL_GTT_H
4#define _DRM_INTEL_GTT_H 4#define _DRM_INTEL_GTT_H
5 5
6const struct intel_gtt { 6struct intel_gtt {
7 /* Size of memory reserved for graphics by the BIOS */ 7 /* Size of memory reserved for graphics by the BIOS */
8 unsigned int stolen_size; 8 unsigned int stolen_size;
9 /* Total number of gtt entries. */ 9 /* Total number of gtt entries. */
@@ -17,6 +17,7 @@ const struct intel_gtt {
17 unsigned int do_idle_maps : 1; 17 unsigned int do_idle_maps : 1;
18 /* Share the scratch page dma with ppgtts. */ 18 /* Share the scratch page dma with ppgtts. */
19 dma_addr_t scratch_page_dma; 19 dma_addr_t scratch_page_dma;
20 struct page *scratch_page;
20 /* for ppgtt PDE access */ 21 /* for ppgtt PDE access */
21 u32 __iomem *gtt; 22 u32 __iomem *gtt;
22 /* needed for ioremap in drm/i915 */ 23 /* needed for ioremap in drm/i915 */
@@ -39,10 +40,6 @@ void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries);
39#define AGP_DCACHE_MEMORY 1 40#define AGP_DCACHE_MEMORY 1
40#define AGP_PHYS_MEMORY 2 41#define AGP_PHYS_MEMORY 2
41 42
42/* New caching attributes for gen6/sandybridge */
43#define AGP_USER_CACHED_MEMORY_LLC_MLC (AGP_USER_TYPES + 2)
44#define AGP_USER_UNCACHED_MEMORY (AGP_USER_TYPES + 4)
45
46/* flag for GFDT type */ 43/* flag for GFDT type */
47#define AGP_USER_CACHED_MEMORY_GFDT (1 << 3) 44#define AGP_USER_CACHED_MEMORY_GFDT (1 << 3)
48 45
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index e8028ade567f..3cb5d848fb66 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -141,8 +141,6 @@ struct ttm_tt;
141 * struct ttm_buffer_object 141 * struct ttm_buffer_object
142 * 142 *
143 * @bdev: Pointer to the buffer object device structure. 143 * @bdev: Pointer to the buffer object device structure.
144 * @buffer_start: The virtual user-space start address of ttm_bo_type_user
145 * buffers.
146 * @type: The bo type. 144 * @type: The bo type.
147 * @destroy: Destruction function. If NULL, kfree is used. 145 * @destroy: Destruction function. If NULL, kfree is used.
148 * @num_pages: Actual number of pages. 146 * @num_pages: Actual number of pages.
@@ -172,7 +170,6 @@ struct ttm_tt;
172 * @seq_valid: The value of @val_seq is valid. This value is protected by 170 * @seq_valid: The value of @val_seq is valid. This value is protected by
173 * the bo_device::lru_lock. 171 * the bo_device::lru_lock.
174 * @reserved: Deadlock-free lock used for synchronization state transitions. 172 * @reserved: Deadlock-free lock used for synchronization state transitions.
175 * @sync_obj_arg: Opaque argument to synchronization object function.
176 * @sync_obj: Pointer to a synchronization object. 173 * @sync_obj: Pointer to a synchronization object.
177 * @priv_flags: Flags describing buffer object internal state. 174 * @priv_flags: Flags describing buffer object internal state.
178 * @vm_rb: Rb node for the vm rb tree. 175 * @vm_rb: Rb node for the vm rb tree.
@@ -200,7 +197,6 @@ struct ttm_buffer_object {
200 197
201 struct ttm_bo_global *glob; 198 struct ttm_bo_global *glob;
202 struct ttm_bo_device *bdev; 199 struct ttm_bo_device *bdev;
203 unsigned long buffer_start;
204 enum ttm_bo_type type; 200 enum ttm_bo_type type;
205 void (*destroy) (struct ttm_buffer_object *); 201 void (*destroy) (struct ttm_buffer_object *);
206 unsigned long num_pages; 202 unsigned long num_pages;
@@ -255,7 +251,6 @@ struct ttm_buffer_object {
255 * checking NULL while reserved but not holding the mentioned lock. 251 * checking NULL while reserved but not holding the mentioned lock.
256 */ 252 */
257 253
258 void *sync_obj_arg;
259 void *sync_obj; 254 void *sync_obj;
260 unsigned long priv_flags; 255 unsigned long priv_flags;
261 256
@@ -342,7 +337,6 @@ extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy,
342 * @bo: The buffer object. 337 * @bo: The buffer object.
343 * @placement: Proposed placement for the buffer object. 338 * @placement: Proposed placement for the buffer object.
344 * @interruptible: Sleep interruptible if sleeping. 339 * @interruptible: Sleep interruptible if sleeping.
345 * @no_wait_reserve: Return immediately if other buffers are busy.
346 * @no_wait_gpu: Return immediately if the GPU is busy. 340 * @no_wait_gpu: Return immediately if the GPU is busy.
347 * 341 *
348 * Changes placement and caching policy of the buffer object 342 * Changes placement and caching policy of the buffer object
@@ -355,7 +349,7 @@ extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy,
355 */ 349 */
356extern int ttm_bo_validate(struct ttm_buffer_object *bo, 350extern int ttm_bo_validate(struct ttm_buffer_object *bo,
357 struct ttm_placement *placement, 351 struct ttm_placement *placement,
358 bool interruptible, bool no_wait_reserve, 352 bool interruptible,
359 bool no_wait_gpu); 353 bool no_wait_gpu);
360 354
361/** 355/**
@@ -429,8 +423,9 @@ extern void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev,
429 * @no_wait: Return immediately if buffer is busy. 423 * @no_wait: Return immediately if buffer is busy.
430 * 424 *
431 * Synchronizes a buffer object for CPU RW access. This means 425 * Synchronizes a buffer object for CPU RW access. This means
432 * blocking command submission that affects the buffer and 426 * command submission that affects the buffer will return -EBUSY
433 * waiting for buffer idle. This lock is recursive. 427 * until ttm_bo_synccpu_write_release is called.
428 *
434 * Returns 429 * Returns
435 * -EBUSY if the buffer is busy and no_wait is true. 430 * -EBUSY if the buffer is busy and no_wait is true.
436 * -ERESTARTSYS if interrupted by a signal. 431 * -ERESTARTSYS if interrupted by a signal.
@@ -472,8 +467,6 @@ size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
472 * @type: Requested type of buffer object. 467 * @type: Requested type of buffer object.
473 * @flags: Initial placement flags. 468 * @flags: Initial placement flags.
474 * @page_alignment: Data alignment in pages. 469 * @page_alignment: Data alignment in pages.
475 * @buffer_start: Virtual address of user space data backing a
476 * user buffer object.
477 * @interruptible: If needing to sleep to wait for GPU resources, 470 * @interruptible: If needing to sleep to wait for GPU resources,
478 * sleep interruptible. 471 * sleep interruptible.
479 * @persistent_swap_storage: Usually the swap storage is deleted for buffers 472 * @persistent_swap_storage: Usually the swap storage is deleted for buffers
@@ -505,7 +498,6 @@ extern int ttm_bo_init(struct ttm_bo_device *bdev,
505 enum ttm_bo_type type, 498 enum ttm_bo_type type,
506 struct ttm_placement *placement, 499 struct ttm_placement *placement,
507 uint32_t page_alignment, 500 uint32_t page_alignment,
508 unsigned long buffer_start,
509 bool interrubtible, 501 bool interrubtible,
510 struct file *persistent_swap_storage, 502 struct file *persistent_swap_storage,
511 size_t acc_size, 503 size_t acc_size,
@@ -521,8 +513,6 @@ extern int ttm_bo_init(struct ttm_bo_device *bdev,
521 * @type: Requested type of buffer object. 513 * @type: Requested type of buffer object.
522 * @flags: Initial placement flags. 514 * @flags: Initial placement flags.
523 * @page_alignment: Data alignment in pages. 515 * @page_alignment: Data alignment in pages.
524 * @buffer_start: Virtual address of user space data backing a
525 * user buffer object.
526 * @interruptible: If needing to sleep while waiting for GPU resources, 516 * @interruptible: If needing to sleep while waiting for GPU resources,
527 * sleep interruptible. 517 * sleep interruptible.
528 * @persistent_swap_storage: Usually the swap storage is deleted for buffers 518 * @persistent_swap_storage: Usually the swap storage is deleted for buffers
@@ -545,7 +535,6 @@ extern int ttm_bo_create(struct ttm_bo_device *bdev,
545 enum ttm_bo_type type, 535 enum ttm_bo_type type,
546 struct ttm_placement *placement, 536 struct ttm_placement *placement,
547 uint32_t page_alignment, 537 uint32_t page_alignment,
548 unsigned long buffer_start,
549 bool interruptible, 538 bool interruptible,
550 struct file *persistent_swap_storage, 539 struct file *persistent_swap_storage,
551 struct ttm_buffer_object **p_bo); 540 struct ttm_buffer_object **p_bo);
@@ -736,4 +725,18 @@ extern ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
736 725
737extern void ttm_bo_swapout_all(struct ttm_bo_device *bdev); 726extern void ttm_bo_swapout_all(struct ttm_bo_device *bdev);
738 727
728/**
729 * ttm_bo_is_reserved - return an indication if a ttm buffer object is reserved
730 *
731 * @bo: The buffer object to check.
732 *
733 * This function returns an indication if a bo is reserved or not, and should
734 * only be used to print an error when it is not from incorrect api usage, since
735 * there's no guarantee that it is the caller that is holding the reservation.
736 */
737static inline bool ttm_bo_is_reserved(struct ttm_buffer_object *bo)
738{
739 return atomic_read(&bo->reserved);
740}
741
739#endif 742#endif
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index d803b92b0324..e3a43a47d78c 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -394,7 +394,7 @@ struct ttm_bo_driver {
394 */ 394 */
395 int (*move) (struct ttm_buffer_object *bo, 395 int (*move) (struct ttm_buffer_object *bo,
396 bool evict, bool interruptible, 396 bool evict, bool interruptible,
397 bool no_wait_reserve, bool no_wait_gpu, 397 bool no_wait_gpu,
398 struct ttm_mem_reg *new_mem); 398 struct ttm_mem_reg *new_mem);
399 399
400 /** 400 /**
@@ -422,10 +422,10 @@ struct ttm_bo_driver {
422 * documentation. 422 * documentation.
423 */ 423 */
424 424
425 bool (*sync_obj_signaled) (void *sync_obj, void *sync_arg); 425 bool (*sync_obj_signaled) (void *sync_obj);
426 int (*sync_obj_wait) (void *sync_obj, void *sync_arg, 426 int (*sync_obj_wait) (void *sync_obj,
427 bool lazy, bool interruptible); 427 bool lazy, bool interruptible);
428 int (*sync_obj_flush) (void *sync_obj, void *sync_arg); 428 int (*sync_obj_flush) (void *sync_obj);
429 void (*sync_obj_unref) (void **sync_obj); 429 void (*sync_obj_unref) (void **sync_obj);
430 void *(*sync_obj_ref) (void *sync_obj); 430 void *(*sync_obj_ref) (void *sync_obj);
431 431
@@ -521,8 +521,6 @@ struct ttm_bo_global {
521 * lru_lock: Spinlock that protects the buffer+device lru lists and 521 * lru_lock: Spinlock that protects the buffer+device lru lists and
522 * ddestroy lists. 522 * ddestroy lists.
523 * @val_seq: Current validation sequence. 523 * @val_seq: Current validation sequence.
524 * @nice_mode: Try nicely to wait for buffer idle when cleaning a manager.
525 * If a GPU lockup has been detected, this is forced to 0.
526 * @dev_mapping: A pointer to the struct address_space representing the 524 * @dev_mapping: A pointer to the struct address_space representing the
527 * device address space. 525 * device address space.
528 * @wq: Work queue structure for the delayed delete workqueue. 526 * @wq: Work queue structure for the delayed delete workqueue.
@@ -556,7 +554,6 @@ struct ttm_bo_device {
556 * Protected by load / firstopen / lastclose /unload sync. 554 * Protected by load / firstopen / lastclose /unload sync.
557 */ 555 */
558 556
559 bool nice_mode;
560 struct address_space *dev_mapping; 557 struct address_space *dev_mapping;
561 558
562 /* 559 /*
@@ -706,7 +703,6 @@ extern bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev,
706 * @proposed_placement: Proposed new placement for the buffer object. 703 * @proposed_placement: Proposed new placement for the buffer object.
707 * @mem: A struct ttm_mem_reg. 704 * @mem: A struct ttm_mem_reg.
708 * @interruptible: Sleep interruptible when sliping. 705 * @interruptible: Sleep interruptible when sliping.
709 * @no_wait_reserve: Return immediately if other buffers are busy.
710 * @no_wait_gpu: Return immediately if the GPU is busy. 706 * @no_wait_gpu: Return immediately if the GPU is busy.
711 * 707 *
712 * Allocate memory space for the buffer object pointed to by @bo, using 708 * Allocate memory space for the buffer object pointed to by @bo, using
@@ -722,27 +718,13 @@ extern int ttm_bo_mem_space(struct ttm_buffer_object *bo,
722 struct ttm_placement *placement, 718 struct ttm_placement *placement,
723 struct ttm_mem_reg *mem, 719 struct ttm_mem_reg *mem,
724 bool interruptible, 720 bool interruptible,
725 bool no_wait_reserve, bool no_wait_gpu); 721 bool no_wait_gpu);
726 722
727extern void ttm_bo_mem_put(struct ttm_buffer_object *bo, 723extern void ttm_bo_mem_put(struct ttm_buffer_object *bo,
728 struct ttm_mem_reg *mem); 724 struct ttm_mem_reg *mem);
729extern void ttm_bo_mem_put_locked(struct ttm_buffer_object *bo, 725extern void ttm_bo_mem_put_locked(struct ttm_buffer_object *bo,
730 struct ttm_mem_reg *mem); 726 struct ttm_mem_reg *mem);
731 727
732/**
733 * ttm_bo_wait_for_cpu
734 *
735 * @bo: Pointer to a struct ttm_buffer_object.
736 * @no_wait: Don't sleep while waiting.
737 *
738 * Wait until a buffer object is no longer sync'ed for CPU access.
739 * Returns:
740 * -EBUSY: Buffer object was sync'ed for CPU access. (only if no_wait == 1).
741 * -ERESTARTSYS: An interruptible sleep was interrupted by a signal.
742 */
743
744extern int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait);
745
746extern void ttm_bo_global_release(struct drm_global_reference *ref); 728extern void ttm_bo_global_release(struct drm_global_reference *ref);
747extern int ttm_bo_global_init(struct drm_global_reference *ref); 729extern int ttm_bo_global_init(struct drm_global_reference *ref);
748 730
@@ -918,7 +900,6 @@ extern int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo,
918 * 900 *
919 * @bo: A pointer to a struct ttm_buffer_object. 901 * @bo: A pointer to a struct ttm_buffer_object.
920 * @evict: 1: This is an eviction. Don't try to pipeline. 902 * @evict: 1: This is an eviction. Don't try to pipeline.
921 * @no_wait_reserve: Return immediately if other buffers are busy.
922 * @no_wait_gpu: Return immediately if the GPU is busy. 903 * @no_wait_gpu: Return immediately if the GPU is busy.
923 * @new_mem: struct ttm_mem_reg indicating where to move. 904 * @new_mem: struct ttm_mem_reg indicating where to move.
924 * 905 *
@@ -933,15 +914,14 @@ extern int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo,
933 */ 914 */
934 915
935extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo, 916extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
936 bool evict, bool no_wait_reserve, 917 bool evict, bool no_wait_gpu,
937 bool no_wait_gpu, struct ttm_mem_reg *new_mem); 918 struct ttm_mem_reg *new_mem);
938 919
939/** 920/**
940 * ttm_bo_move_memcpy 921 * ttm_bo_move_memcpy
941 * 922 *
942 * @bo: A pointer to a struct ttm_buffer_object. 923 * @bo: A pointer to a struct ttm_buffer_object.
943 * @evict: 1: This is an eviction. Don't try to pipeline. 924 * @evict: 1: This is an eviction. Don't try to pipeline.
944 * @no_wait_reserve: Return immediately if other buffers are busy.
945 * @no_wait_gpu: Return immediately if the GPU is busy. 925 * @no_wait_gpu: Return immediately if the GPU is busy.
946 * @new_mem: struct ttm_mem_reg indicating where to move. 926 * @new_mem: struct ttm_mem_reg indicating where to move.
947 * 927 *
@@ -956,8 +936,8 @@ extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
956 */ 936 */
957 937
958extern int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, 938extern int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
959 bool evict, bool no_wait_reserve, 939 bool evict, bool no_wait_gpu,
960 bool no_wait_gpu, struct ttm_mem_reg *new_mem); 940 struct ttm_mem_reg *new_mem);
961 941
962/** 942/**
963 * ttm_bo_free_old_node 943 * ttm_bo_free_old_node
@@ -973,10 +953,7 @@ extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo);
973 * 953 *
974 * @bo: A pointer to a struct ttm_buffer_object. 954 * @bo: A pointer to a struct ttm_buffer_object.
975 * @sync_obj: A sync object that signals when moving is complete. 955 * @sync_obj: A sync object that signals when moving is complete.
976 * @sync_obj_arg: An argument to pass to the sync object idle / wait
977 * functions.
978 * @evict: This is an evict move. Don't return until the buffer is idle. 956 * @evict: This is an evict move. Don't return until the buffer is idle.
979 * @no_wait_reserve: Return immediately if other buffers are busy.
980 * @no_wait_gpu: Return immediately if the GPU is busy. 957 * @no_wait_gpu: Return immediately if the GPU is busy.
981 * @new_mem: struct ttm_mem_reg indicating where to move. 958 * @new_mem: struct ttm_mem_reg indicating where to move.
982 * 959 *
@@ -990,9 +967,7 @@ extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo);
990 967
991extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, 968extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
992 void *sync_obj, 969 void *sync_obj,
993 void *sync_obj_arg, 970 bool evict, bool no_wait_gpu,
994 bool evict, bool no_wait_reserve,
995 bool no_wait_gpu,
996 struct ttm_mem_reg *new_mem); 971 struct ttm_mem_reg *new_mem);
997/** 972/**
998 * ttm_io_prot 973 * ttm_io_prot
diff --git a/include/drm/ttm/ttm_execbuf_util.h b/include/drm/ttm/ttm_execbuf_util.h
index 1926cae373ba..547e19f06e57 100644
--- a/include/drm/ttm/ttm_execbuf_util.h
+++ b/include/drm/ttm/ttm_execbuf_util.h
@@ -39,8 +39,6 @@
39 * 39 *
40 * @head: list head for thread-private list. 40 * @head: list head for thread-private list.
41 * @bo: refcounted buffer object pointer. 41 * @bo: refcounted buffer object pointer.
42 * @new_sync_obj_arg: New sync_obj_arg for @bo, to be used once
43 * adding a new sync object.
44 * @reserved: Indicates whether @bo has been reserved for validation. 42 * @reserved: Indicates whether @bo has been reserved for validation.
45 * @removed: Indicates whether @bo has been removed from lru lists. 43 * @removed: Indicates whether @bo has been removed from lru lists.
46 * @put_count: Number of outstanding references on bo::list_kref. 44 * @put_count: Number of outstanding references on bo::list_kref.
@@ -50,7 +48,6 @@
50struct ttm_validate_buffer { 48struct ttm_validate_buffer {
51 struct list_head head; 49 struct list_head head;
52 struct ttm_buffer_object *bo; 50 struct ttm_buffer_object *bo;
53 void *new_sync_obj_arg;
54 bool reserved; 51 bool reserved;
55 bool removed; 52 bool removed;
56 int put_count; 53 int put_count;
diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
index d6d1da468c97..72dcbe81dd07 100644
--- a/include/drm/ttm/ttm_memory.h
+++ b/include/drm/ttm/ttm_memory.h
@@ -60,7 +60,6 @@ struct ttm_mem_shrink {
60 * for the GPU, and this will otherwise block other workqueue tasks(?) 60 * for the GPU, and this will otherwise block other workqueue tasks(?)
61 * At this point we use only a single-threaded workqueue. 61 * At this point we use only a single-threaded workqueue.
62 * @work: The workqueue callback for the shrink queue. 62 * @work: The workqueue callback for the shrink queue.
63 * @queue: Wait queue for processes suspended waiting for memory.
64 * @lock: Lock to protect the @shrink - and the memory accounting members, 63 * @lock: Lock to protect the @shrink - and the memory accounting members,
65 * that is, essentially the whole structure with some exceptions. 64 * that is, essentially the whole structure with some exceptions.
66 * @zones: Array of pointers to accounting zones. 65 * @zones: Array of pointers to accounting zones.
@@ -80,7 +79,6 @@ struct ttm_mem_global {
80 struct ttm_mem_shrink *shrink; 79 struct ttm_mem_shrink *shrink;
81 struct workqueue_struct *swap_queue; 80 struct workqueue_struct *swap_queue;
82 struct work_struct work; 81 struct work_struct work;
83 wait_queue_head_t queue;
84 spinlock_t lock; 82 spinlock_t lock;
85 struct ttm_mem_zone *zones[TTM_MEM_MAX_ZONES]; 83 struct ttm_mem_zone *zones[TTM_MEM_MAX_ZONES];
86 unsigned int num_zones; 84 unsigned int num_zones;
diff --git a/include/drm/ttm/ttm_object.h b/include/drm/ttm/ttm_object.h
index b01c563b2751..fc0cf0649901 100644
--- a/include/drm/ttm/ttm_object.h
+++ b/include/drm/ttm/ttm_object.h
@@ -40,6 +40,7 @@
40#include <linux/list.h> 40#include <linux/list.h>
41#include <drm/drm_hashtab.h> 41#include <drm/drm_hashtab.h>
42#include <linux/kref.h> 42#include <linux/kref.h>
43#include <linux/rcupdate.h>
43#include <ttm/ttm_memory.h> 44#include <ttm/ttm_memory.h>
44 45
45/** 46/**
@@ -120,6 +121,7 @@ struct ttm_object_device;
120 */ 121 */
121 122
122struct ttm_base_object { 123struct ttm_base_object {
124 struct rcu_head rhead;
123 struct drm_hash_item hash; 125 struct drm_hash_item hash;
124 enum ttm_object_type object_type; 126 enum ttm_object_type object_type;
125 bool shareable; 127 bool shareable;
@@ -268,4 +270,6 @@ extern struct ttm_object_device *ttm_object_device_init
268 270
269extern void ttm_object_device_release(struct ttm_object_device **p_tdev); 271extern void ttm_object_device_release(struct ttm_object_device **p_tdev);
270 272
273#define ttm_base_object_kfree(__object, __base)\
274 kfree_rcu(__object, __base.rhead)
271#endif 275#endif
diff --git a/include/linux/dma-attrs.h b/include/linux/dma-attrs.h
index f83f793223ff..c8e1831d7572 100644
--- a/include/linux/dma-attrs.h
+++ b/include/linux/dma-attrs.h
@@ -17,6 +17,7 @@ enum dma_attr {
17 DMA_ATTR_NON_CONSISTENT, 17 DMA_ATTR_NON_CONSISTENT,
18 DMA_ATTR_NO_KERNEL_MAPPING, 18 DMA_ATTR_NO_KERNEL_MAPPING,
19 DMA_ATTR_SKIP_CPU_SYNC, 19 DMA_ATTR_SKIP_CPU_SYNC,
20 DMA_ATTR_FORCE_CONTIGUOUS,
20 DMA_ATTR_MAX, 21 DMA_ATTR_MAX,
21}; 22};
22 23
diff --git a/include/linux/kref.h b/include/linux/kref.h
index 65af6887872f..4972e6e9ca93 100644
--- a/include/linux/kref.h
+++ b/include/linux/kref.h
@@ -111,4 +111,25 @@ static inline int kref_put_mutex(struct kref *kref,
111 } 111 }
112 return 0; 112 return 0;
113} 113}
114
115/**
116 * kref_get_unless_zero - Increment refcount for object unless it is zero.
117 * @kref: object.
118 *
119 * Return non-zero if the increment succeeded. Otherwise return 0.
120 *
121 * This function is intended to simplify locking around refcounting for
122 * objects that can be looked up from a lookup structure, and which are
123 * removed from that lookup structure in the object destructor.
124 * Operations on such objects require at least a read lock around
125 * lookup + kref_get, and a write lock around kref_put + remove from lookup
126 * structure. Furthermore, RCU implementations become extremely tricky.
127 * With a lookup followed by a kref_get_unless_zero *with return value check*
128 * locking in the kref_put path can be deferred to the actual removal from
129 * the lookup structure and RCU lookups become trivial.
130 */
131static inline int __must_check kref_get_unless_zero(struct kref *kref)
132{
133 return atomic_add_unless(&kref->refcount, 1, 0);
134}
114#endif /* _KREF_H_ */ 135#endif /* _KREF_H_ */
diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h
index 1e3481edf062..8d1e2bbee83a 100644
--- a/include/uapi/drm/drm.h
+++ b/include/uapi/drm/drm.h
@@ -778,6 +778,7 @@ struct drm_event_vblank {
778#define DRM_CAP_DUMB_PREFERRED_DEPTH 0x3 778#define DRM_CAP_DUMB_PREFERRED_DEPTH 0x3
779#define DRM_CAP_DUMB_PREFER_SHADOW 0x4 779#define DRM_CAP_DUMB_PREFER_SHADOW 0x4
780#define DRM_CAP_PRIME 0x5 780#define DRM_CAP_PRIME 0x5
781#define DRM_CAP_TIMESTAMP_MONOTONIC 0x6
781 782
782#define DRM_PRIME_CAP_IMPORT 0x1 783#define DRM_PRIME_CAP_IMPORT 0x1
783#define DRM_PRIME_CAP_EXPORT 0x2 784#define DRM_PRIME_CAP_EXPORT 0x2
diff --git a/include/uapi/drm/exynos_drm.h b/include/uapi/drm/exynos_drm.h
index c0494d586e23..e7f52c334005 100644
--- a/include/uapi/drm/exynos_drm.h
+++ b/include/uapi/drm/exynos_drm.h
@@ -133,17 +133,26 @@ struct drm_exynos_g2d_cmd {
133 __u32 data; 133 __u32 data;
134}; 134};
135 135
136enum drm_exynos_g2d_buf_type {
137 G2D_BUF_USERPTR = 1 << 31,
138};
139
136enum drm_exynos_g2d_event_type { 140enum drm_exynos_g2d_event_type {
137 G2D_EVENT_NOT, 141 G2D_EVENT_NOT,
138 G2D_EVENT_NONSTOP, 142 G2D_EVENT_NONSTOP,
139 G2D_EVENT_STOP, /* not yet */ 143 G2D_EVENT_STOP, /* not yet */
140}; 144};
141 145
146struct drm_exynos_g2d_userptr {
147 unsigned long userptr;
148 unsigned long size;
149};
150
142struct drm_exynos_g2d_set_cmdlist { 151struct drm_exynos_g2d_set_cmdlist {
143 __u64 cmd; 152 __u64 cmd;
144 __u64 cmd_gem; 153 __u64 cmd_buf;
145 __u32 cmd_nr; 154 __u32 cmd_nr;
146 __u32 cmd_gem_nr; 155 __u32 cmd_buf_nr;
147 156
148 /* for g2d event */ 157 /* for g2d event */
149 __u64 event_type; 158 __u64 event_type;
@@ -154,6 +163,170 @@ struct drm_exynos_g2d_exec {
154 __u64 async; 163 __u64 async;
155}; 164};
156 165
166enum drm_exynos_ops_id {
167 EXYNOS_DRM_OPS_SRC,
168 EXYNOS_DRM_OPS_DST,
169 EXYNOS_DRM_OPS_MAX,
170};
171
172struct drm_exynos_sz {
173 __u32 hsize;
174 __u32 vsize;
175};
176
177struct drm_exynos_pos {
178 __u32 x;
179 __u32 y;
180 __u32 w;
181 __u32 h;
182};
183
184enum drm_exynos_flip {
185 EXYNOS_DRM_FLIP_NONE = (0 << 0),
186 EXYNOS_DRM_FLIP_VERTICAL = (1 << 0),
187 EXYNOS_DRM_FLIP_HORIZONTAL = (1 << 1),
188};
189
190enum drm_exynos_degree {
191 EXYNOS_DRM_DEGREE_0,
192 EXYNOS_DRM_DEGREE_90,
193 EXYNOS_DRM_DEGREE_180,
194 EXYNOS_DRM_DEGREE_270,
195};
196
197enum drm_exynos_planer {
198 EXYNOS_DRM_PLANAR_Y,
199 EXYNOS_DRM_PLANAR_CB,
200 EXYNOS_DRM_PLANAR_CR,
201 EXYNOS_DRM_PLANAR_MAX,
202};
203
204/**
205 * A structure for ipp supported property list.
206 *
207 * @version: version of this structure.
208 * @ipp_id: id of ipp driver.
209 * @count: count of ipp driver.
210 * @writeback: flag of writeback supporting.
211 * @flip: flag of flip supporting.
212 * @degree: flag of degree information.
213 * @csc: flag of csc supporting.
214 * @crop: flag of crop supporting.
215 * @scale: flag of scale supporting.
216 * @refresh_min: min hz of refresh.
217 * @refresh_max: max hz of refresh.
218 * @crop_min: crop min resolution.
219 * @crop_max: crop max resolution.
220 * @scale_min: scale min resolution.
221 * @scale_max: scale max resolution.
222 */
223struct drm_exynos_ipp_prop_list {
224 __u32 version;
225 __u32 ipp_id;
226 __u32 count;
227 __u32 writeback;
228 __u32 flip;
229 __u32 degree;
230 __u32 csc;
231 __u32 crop;
232 __u32 scale;
233 __u32 refresh_min;
234 __u32 refresh_max;
235 __u32 reserved;
236 struct drm_exynos_sz crop_min;
237 struct drm_exynos_sz crop_max;
238 struct drm_exynos_sz scale_min;
239 struct drm_exynos_sz scale_max;
240};
241
242/**
243 * A structure for ipp config.
244 *
245 * @ops_id: property of operation directions.
246 * @flip: property of mirror, flip.
247 * @degree: property of rotation degree.
248 * @fmt: property of image format.
249 * @sz: property of image size.
250 * @pos: property of image position(src-cropped,dst-scaler).
251 */
252struct drm_exynos_ipp_config {
253 enum drm_exynos_ops_id ops_id;
254 enum drm_exynos_flip flip;
255 enum drm_exynos_degree degree;
256 __u32 fmt;
257 struct drm_exynos_sz sz;
258 struct drm_exynos_pos pos;
259};
260
261enum drm_exynos_ipp_cmd {
262 IPP_CMD_NONE,
263 IPP_CMD_M2M,
264 IPP_CMD_WB,
265 IPP_CMD_OUTPUT,
266 IPP_CMD_MAX,
267};
268
269/**
270 * A structure for ipp property.
271 *
272 * @config: source, destination config.
273 * @cmd: definition of command.
274 * @ipp_id: id of ipp driver.
275 * @prop_id: id of property.
276 * @refresh_rate: refresh rate.
277 */
278struct drm_exynos_ipp_property {
279 struct drm_exynos_ipp_config config[EXYNOS_DRM_OPS_MAX];
280 enum drm_exynos_ipp_cmd cmd;
281 __u32 ipp_id;
282 __u32 prop_id;
283 __u32 refresh_rate;
284};
285
286enum drm_exynos_ipp_buf_type {
287 IPP_BUF_ENQUEUE,
288 IPP_BUF_DEQUEUE,
289};
290
291/**
292 * A structure for ipp buffer operations.
293 *
294 * @ops_id: operation directions.
295 * @buf_type: definition of buffer.
296 * @prop_id: id of property.
297 * @buf_id: id of buffer.
298 * @handle: Y, Cb, Cr each planar handle.
299 * @user_data: user data.
300 */
301struct drm_exynos_ipp_queue_buf {
302 enum drm_exynos_ops_id ops_id;
303 enum drm_exynos_ipp_buf_type buf_type;
304 __u32 prop_id;
305 __u32 buf_id;
306 __u32 handle[EXYNOS_DRM_PLANAR_MAX];
307 __u32 reserved;
308 __u64 user_data;
309};
310
311enum drm_exynos_ipp_ctrl {
312 IPP_CTRL_PLAY,
313 IPP_CTRL_STOP,
314 IPP_CTRL_PAUSE,
315 IPP_CTRL_RESUME,
316 IPP_CTRL_MAX,
317};
318
319/**
320 * A structure for ipp start/stop operations.
321 *
322 * @prop_id: id of property.
323 * @ctrl: definition of control.
324 */
325struct drm_exynos_ipp_cmd_ctrl {
326 __u32 prop_id;
327 enum drm_exynos_ipp_ctrl ctrl;
328};
329
157#define DRM_EXYNOS_GEM_CREATE 0x00 330#define DRM_EXYNOS_GEM_CREATE 0x00
158#define DRM_EXYNOS_GEM_MAP_OFFSET 0x01 331#define DRM_EXYNOS_GEM_MAP_OFFSET 0x01
159#define DRM_EXYNOS_GEM_MMAP 0x02 332#define DRM_EXYNOS_GEM_MMAP 0x02
@@ -166,6 +339,12 @@ struct drm_exynos_g2d_exec {
166#define DRM_EXYNOS_G2D_SET_CMDLIST 0x21 339#define DRM_EXYNOS_G2D_SET_CMDLIST 0x21
167#define DRM_EXYNOS_G2D_EXEC 0x22 340#define DRM_EXYNOS_G2D_EXEC 0x22
168 341
342/* IPP - Image Post Processing */
343#define DRM_EXYNOS_IPP_GET_PROPERTY 0x30
344#define DRM_EXYNOS_IPP_SET_PROPERTY 0x31
345#define DRM_EXYNOS_IPP_QUEUE_BUF 0x32
346#define DRM_EXYNOS_IPP_CMD_CTRL 0x33
347
169#define DRM_IOCTL_EXYNOS_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + \ 348#define DRM_IOCTL_EXYNOS_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + \
170 DRM_EXYNOS_GEM_CREATE, struct drm_exynos_gem_create) 349 DRM_EXYNOS_GEM_CREATE, struct drm_exynos_gem_create)
171 350
@@ -188,8 +367,18 @@ struct drm_exynos_g2d_exec {
188#define DRM_IOCTL_EXYNOS_G2D_EXEC DRM_IOWR(DRM_COMMAND_BASE + \ 367#define DRM_IOCTL_EXYNOS_G2D_EXEC DRM_IOWR(DRM_COMMAND_BASE + \
189 DRM_EXYNOS_G2D_EXEC, struct drm_exynos_g2d_exec) 368 DRM_EXYNOS_G2D_EXEC, struct drm_exynos_g2d_exec)
190 369
370#define DRM_IOCTL_EXYNOS_IPP_GET_PROPERTY DRM_IOWR(DRM_COMMAND_BASE + \
371 DRM_EXYNOS_IPP_GET_PROPERTY, struct drm_exynos_ipp_prop_list)
372#define DRM_IOCTL_EXYNOS_IPP_SET_PROPERTY DRM_IOWR(DRM_COMMAND_BASE + \
373 DRM_EXYNOS_IPP_SET_PROPERTY, struct drm_exynos_ipp_property)
374#define DRM_IOCTL_EXYNOS_IPP_QUEUE_BUF DRM_IOWR(DRM_COMMAND_BASE + \
375 DRM_EXYNOS_IPP_QUEUE_BUF, struct drm_exynos_ipp_queue_buf)
376#define DRM_IOCTL_EXYNOS_IPP_CMD_CTRL DRM_IOWR(DRM_COMMAND_BASE + \
377 DRM_EXYNOS_IPP_CMD_CTRL, struct drm_exynos_ipp_cmd_ctrl)
378
191/* EXYNOS specific events */ 379/* EXYNOS specific events */
192#define DRM_EXYNOS_G2D_EVENT 0x80000000 380#define DRM_EXYNOS_G2D_EVENT 0x80000000
381#define DRM_EXYNOS_IPP_EVENT 0x80000001
193 382
194struct drm_exynos_g2d_event { 383struct drm_exynos_g2d_event {
195 struct drm_event base; 384 struct drm_event base;
@@ -200,4 +389,14 @@ struct drm_exynos_g2d_event {
200 __u32 reserved; 389 __u32 reserved;
201}; 390};
202 391
392struct drm_exynos_ipp_event {
393 struct drm_event base;
394 __u64 user_data;
395 __u32 tv_sec;
396 __u32 tv_usec;
397 __u32 prop_id;
398 __u32 reserved;
399 __u32 buf_id[EXYNOS_DRM_OPS_MAX];
400};
401
203#endif /* _UAPI_EXYNOS_DRM_H_ */ 402#endif /* _UAPI_EXYNOS_DRM_H_ */
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index 4322b1e7d2ed..b746a3cf5fa9 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -306,6 +306,7 @@ typedef struct drm_i915_irq_wait {
306#define I915_PARAM_HAS_SEMAPHORES 20 306#define I915_PARAM_HAS_SEMAPHORES 20
307#define I915_PARAM_HAS_PRIME_VMAP_FLUSH 21 307#define I915_PARAM_HAS_PRIME_VMAP_FLUSH 21
308#define I915_PARAM_RSVD_FOR_FUTURE_USE 22 308#define I915_PARAM_RSVD_FOR_FUTURE_USE 22
309#define I915_PARAM_HAS_SECURE_BATCHES 23
309 310
310typedef struct drm_i915_getparam { 311typedef struct drm_i915_getparam {
311 int param; 312 int param;
@@ -671,6 +672,11 @@ struct drm_i915_gem_execbuffer2 {
671/** Resets the SO write offset registers for transform feedback on gen7. */ 672/** Resets the SO write offset registers for transform feedback on gen7. */
672#define I915_EXEC_GEN7_SOL_RESET (1<<8) 673#define I915_EXEC_GEN7_SOL_RESET (1<<8)
673 674
675/** Request a privileged ("secure") batch buffer. Note only available for
676 * DRM_ROOT_ONLY | DRM_MASTER processes.
677 */
678#define I915_EXEC_SECURE (1<<9)
679
674#define I915_EXEC_CONTEXT_ID_MASK (0xffffffff) 680#define I915_EXEC_CONTEXT_ID_MASK (0xffffffff)
675#define i915_execbuffer2_set_context_id(eb2, context) \ 681#define i915_execbuffer2_set_context_id(eb2, context) \
676 (eb2).rsvd1 = context & I915_EXEC_CONTEXT_ID_MASK 682 (eb2).rsvd1 = context & I915_EXEC_CONTEXT_ID_MASK
diff --git a/include/uapi/drm/radeon_drm.h b/include/uapi/drm/radeon_drm.h
index 4766c0f6a838..eeda91774c8a 100644
--- a/include/uapi/drm/radeon_drm.h
+++ b/include/uapi/drm/radeon_drm.h
@@ -913,9 +913,11 @@ struct drm_radeon_gem_va {
913/* The first dword of RADEON_CHUNK_ID_FLAGS is a uint32 of these flags: */ 913/* The first dword of RADEON_CHUNK_ID_FLAGS is a uint32 of these flags: */
914#define RADEON_CS_KEEP_TILING_FLAGS 0x01 914#define RADEON_CS_KEEP_TILING_FLAGS 0x01
915#define RADEON_CS_USE_VM 0x02 915#define RADEON_CS_USE_VM 0x02
916#define RADEON_CS_END_OF_FRAME 0x04 /* a hint from userspace which CS is the last one */
916/* The second dword of RADEON_CHUNK_ID_FLAGS is a uint32 that sets the ring type */ 917/* The second dword of RADEON_CHUNK_ID_FLAGS is a uint32 that sets the ring type */
917#define RADEON_CS_RING_GFX 0 918#define RADEON_CS_RING_GFX 0
918#define RADEON_CS_RING_COMPUTE 1 919#define RADEON_CS_RING_COMPUTE 1
920#define RADEON_CS_RING_DMA 2
919/* The third dword of RADEON_CHUNK_ID_FLAGS is a sint32 that sets the priority */ 921/* The third dword of RADEON_CHUNK_ID_FLAGS is a sint32 that sets the priority */
920/* 0 = normal, + = higher priority, - = lower priority */ 922/* 0 = normal, + = higher priority, - = lower priority */
921 923
@@ -966,6 +968,10 @@ struct drm_radeon_cs {
966#define RADEON_INFO_MAX_PIPES 0x10 968#define RADEON_INFO_MAX_PIPES 0x10
967/* timestamp for GL_ARB_timer_query (OpenGL), returns the current GPU clock */ 969/* timestamp for GL_ARB_timer_query (OpenGL), returns the current GPU clock */
968#define RADEON_INFO_TIMESTAMP 0x11 970#define RADEON_INFO_TIMESTAMP 0x11
971/* max shader engines (SE) - needed for geometry shaders, etc. */
972#define RADEON_INFO_MAX_SE 0x12
973/* max SH per SE */
974#define RADEON_INFO_MAX_SH_PER_SE 0x13
969 975
970struct drm_radeon_info { 976struct drm_radeon_info {
971 uint32_t request; 977 uint32_t request;