aboutsummaryrefslogtreecommitdiffstats
path: root/include/drm
diff options
context:
space:
mode:
Diffstat (limited to 'include/drm')
-rw-r--r--include/drm/drmP.h178
-rw-r--r--include/drm/drm_edid.h92
-rw-r--r--include/drm/drm_hashtab.h2
-rw-r--r--include/drm/drm_memory_debug.h309
-rw-r--r--include/drm/drm_mm.h99
-rw-r--r--include/drm/drm_pciids.h9
-rw-r--r--include/drm/radeon_drm.h130
-rw-r--r--include/drm/ttm/ttm_bo_api.h618
-rw-r--r--include/drm/ttm/ttm_bo_driver.h867
-rw-r--r--include/drm/ttm/ttm_memory.h153
-rw-r--r--include/drm/ttm/ttm_module.h58
-rw-r--r--include/drm/ttm/ttm_placement.h92
12 files changed, 2140 insertions, 467 deletions
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index b84d8ae35e6f..45b67d9c39c1 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -34,9 +34,6 @@
34#ifndef _DRM_P_H_ 34#ifndef _DRM_P_H_
35#define _DRM_P_H_ 35#define _DRM_P_H_
36 36
37/* If you want the memory alloc debug functionality, change define below */
38/* #define DEBUG_MEMORY */
39
40#ifdef __KERNEL__ 37#ifdef __KERNEL__
41#ifdef __alpha__ 38#ifdef __alpha__
42/* add include of current.h so that "current" is defined 39/* add include of current.h so that "current" is defined
@@ -86,7 +83,17 @@ struct drm_device;
86 83
87#include "drm_os_linux.h" 84#include "drm_os_linux.h"
88#include "drm_hashtab.h" 85#include "drm_hashtab.h"
86#include "drm_mm.h"
87
88#define DRM_UT_CORE 0x01
89#define DRM_UT_DRIVER 0x02
90#define DRM_UT_KMS 0x04
91#define DRM_UT_MODE 0x08
89 92
93extern void drm_ut_debug_printk(unsigned int request_level,
94 const char *prefix,
95 const char *function_name,
96 const char *format, ...);
90/***********************************************************************/ 97/***********************************************************************/
91/** \name DRM template customization defaults */ 98/** \name DRM template customization defaults */
92/*@{*/ 99/*@{*/
@@ -123,31 +130,6 @@ struct drm_device;
123 130
124#define DRM_FLAG_DEBUG 0x01 131#define DRM_FLAG_DEBUG 0x01
125 132
126#define DRM_MEM_DMA 0
127#define DRM_MEM_SAREA 1
128#define DRM_MEM_DRIVER 2
129#define DRM_MEM_MAGIC 3
130#define DRM_MEM_IOCTLS 4
131#define DRM_MEM_MAPS 5
132#define DRM_MEM_VMAS 6
133#define DRM_MEM_BUFS 7
134#define DRM_MEM_SEGS 8
135#define DRM_MEM_PAGES 9
136#define DRM_MEM_FILES 10
137#define DRM_MEM_QUEUES 11
138#define DRM_MEM_CMDS 12
139#define DRM_MEM_MAPPINGS 13
140#define DRM_MEM_BUFLISTS 14
141#define DRM_MEM_AGPLISTS 15
142#define DRM_MEM_TOTALAGP 16
143#define DRM_MEM_BOUNDAGP 17
144#define DRM_MEM_CTXBITMAP 18
145#define DRM_MEM_STUB 19
146#define DRM_MEM_SGLISTS 20
147#define DRM_MEM_CTXLIST 21
148#define DRM_MEM_MM 22
149#define DRM_MEM_HASHTAB 23
150
151#define DRM_MAX_CTXBITMAP (PAGE_SIZE * 8) 133#define DRM_MAX_CTXBITMAP (PAGE_SIZE * 8)
152#define DRM_MAP_HASH_OFFSET 0x10000000 134#define DRM_MAP_HASH_OFFSET 0x10000000
153 135
@@ -186,15 +168,57 @@ struct drm_device;
186 * \param arg arguments 168 * \param arg arguments
187 */ 169 */
188#if DRM_DEBUG_CODE 170#if DRM_DEBUG_CODE
189#define DRM_DEBUG(fmt, arg...) \ 171#define DRM_DEBUG(fmt, args...) \
172 do { \
173 drm_ut_debug_printk(DRM_UT_CORE, DRM_NAME, \
174 __func__, fmt, ##args); \
175 } while (0)
176
177#define DRM_DEBUG_DRIVER(prefix, fmt, args...) \
178 do { \
179 drm_ut_debug_printk(DRM_UT_DRIVER, prefix, \
180 __func__, fmt, ##args); \
181 } while (0)
182#define DRM_DEBUG_KMS(prefix, fmt, args...) \
183 do { \
184 drm_ut_debug_printk(DRM_UT_KMS, prefix, \
185 __func__, fmt, ##args); \
186 } while (0)
187#define DRM_DEBUG_MODE(prefix, fmt, args...) \
188 do { \
189 drm_ut_debug_printk(DRM_UT_MODE, prefix, \
190 __func__, fmt, ##args); \
191 } while (0)
192#define DRM_LOG(fmt, args...) \
193 do { \
194 drm_ut_debug_printk(DRM_UT_CORE, NULL, \
195 NULL, fmt, ##args); \
196 } while (0)
197#define DRM_LOG_KMS(fmt, args...) \
190 do { \ 198 do { \
191 if ( drm_debug ) \ 199 drm_ut_debug_printk(DRM_UT_KMS, NULL, \
192 printk(KERN_DEBUG \ 200 NULL, fmt, ##args); \
193 "[" DRM_NAME ":%s] " fmt , \ 201 } while (0)
194 __func__ , ##arg); \ 202#define DRM_LOG_MODE(fmt, args...) \
203 do { \
204 drm_ut_debug_printk(DRM_UT_MODE, NULL, \
205 NULL, fmt, ##args); \
206 } while (0)
207#define DRM_LOG_DRIVER(fmt, args...) \
208 do { \
209 drm_ut_debug_printk(DRM_UT_DRIVER, NULL, \
210 NULL, fmt, ##args); \
195 } while (0) 211 } while (0)
196#else 212#else
213#define DRM_DEBUG_DRIVER(prefix, fmt, args...) do { } while (0)
214#define DRM_DEBUG_KMS(prefix, fmt, args...) do { } while (0)
215#define DRM_DEBUG_MODE(prefix, fmt, args...) do { } while (0)
197#define DRM_DEBUG(fmt, arg...) do { } while (0) 216#define DRM_DEBUG(fmt, arg...) do { } while (0)
217#define DRM_LOG(fmt, arg...) do { } while (0)
218#define DRM_LOG_KMS(fmt, args...) do { } while (0)
219#define DRM_LOG_MODE(fmt, arg...) do { } while (0)
220#define DRM_LOG_DRIVER(fmt, arg...) do { } while (0)
221
198#endif 222#endif
199 223
200#define DRM_PROC_LIMIT (PAGE_SIZE-80) 224#define DRM_PROC_LIMIT (PAGE_SIZE-80)
@@ -237,15 +261,15 @@ struct drm_device;
237 * \param dev DRM device. 261 * \param dev DRM device.
238 * \param filp file pointer of the caller. 262 * \param filp file pointer of the caller.
239 */ 263 */
240#define LOCK_TEST_WITH_RETURN( dev, file_priv ) \ 264#define LOCK_TEST_WITH_RETURN( dev, _file_priv ) \
241do { \ 265do { \
242 if (!_DRM_LOCK_IS_HELD(file_priv->master->lock.hw_lock->lock) || \ 266 if (!_DRM_LOCK_IS_HELD(_file_priv->master->lock.hw_lock->lock) || \
243 file_priv->master->lock.file_priv != file_priv) { \ 267 _file_priv->master->lock.file_priv != _file_priv) { \
244 DRM_ERROR( "%s called without lock held, held %d owner %p %p\n",\ 268 DRM_ERROR( "%s called without lock held, held %d owner %p %p\n",\
245 __func__, _DRM_LOCK_IS_HELD(file_priv->master->lock.hw_lock->lock),\ 269 __func__, _DRM_LOCK_IS_HELD(_file_priv->master->lock.hw_lock->lock),\
246 file_priv->master->lock.file_priv, file_priv); \ 270 _file_priv->master->lock.file_priv, _file_priv); \
247 return -EINVAL; \ 271 return -EINVAL; \
248 } \ 272 } \
249} while (0) 273} while (0)
250 274
251/** 275/**
@@ -502,26 +526,6 @@ struct drm_sigdata {
502}; 526};
503 527
504 528
505/*
506 * Generic memory manager structs
507 */
508
509struct drm_mm_node {
510 struct list_head fl_entry;
511 struct list_head ml_entry;
512 int free;
513 unsigned long start;
514 unsigned long size;
515 struct drm_mm *mm;
516 void *private;
517};
518
519struct drm_mm {
520 struct list_head fl_entry;
521 struct list_head ml_entry;
522};
523
524
525/** 529/**
526 * Kernel side of a mapping 530 * Kernel side of a mapping
527 */ 531 */
@@ -1385,22 +1389,6 @@ extern char *drm_get_connector_status_name(enum drm_connector_status status);
1385extern int drm_sysfs_connector_add(struct drm_connector *connector); 1389extern int drm_sysfs_connector_add(struct drm_connector *connector);
1386extern void drm_sysfs_connector_remove(struct drm_connector *connector); 1390extern void drm_sysfs_connector_remove(struct drm_connector *connector);
1387 1391
1388/*
1389 * Basic memory manager support (drm_mm.c)
1390 */
1391extern struct drm_mm_node *drm_mm_get_block(struct drm_mm_node * parent,
1392 unsigned long size,
1393 unsigned alignment);
1394extern void drm_mm_put_block(struct drm_mm_node * cur);
1395extern struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm, unsigned long size,
1396 unsigned alignment, int best_match);
1397extern int drm_mm_init(struct drm_mm *mm, unsigned long start, unsigned long size);
1398extern void drm_mm_takedown(struct drm_mm *mm);
1399extern int drm_mm_clean(struct drm_mm *mm);
1400extern unsigned long drm_mm_tail_space(struct drm_mm *mm);
1401extern int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size);
1402extern int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size);
1403
1404/* Graphics Execution Manager library functions (drm_gem.c) */ 1392/* Graphics Execution Manager library functions (drm_gem.c) */
1405int drm_gem_init(struct drm_device *dev); 1393int drm_gem_init(struct drm_device *dev);
1406void drm_gem_destroy(struct drm_device *dev); 1394void drm_gem_destroy(struct drm_device *dev);
@@ -1501,39 +1489,17 @@ static __inline__ void drm_core_dropmap(struct drm_local_map *map)
1501{ 1489{
1502} 1490}
1503 1491
1504#ifndef DEBUG_MEMORY
1505/** Wrapper around kmalloc() */
1506static __inline__ void *drm_alloc(size_t size, int area)
1507{
1508 return kmalloc(size, GFP_KERNEL);
1509}
1510
1511/** Wrapper around kfree() */
1512static __inline__ void drm_free(void *pt, size_t size, int area)
1513{
1514 kfree(pt);
1515}
1516
1517/** Wrapper around kcalloc() */
1518static __inline__ void *drm_calloc(size_t nmemb, size_t size, int area)
1519{
1520 return kcalloc(nmemb, size, GFP_KERNEL);
1521}
1522 1492
1523static __inline__ void *drm_calloc_large(size_t nmemb, size_t size) 1493static __inline__ void *drm_calloc_large(size_t nmemb, size_t size)
1524{ 1494{
1525 u8 *addr; 1495 if (size * nmemb <= PAGE_SIZE)
1526
1527 if (size <= PAGE_SIZE)
1528 return kcalloc(nmemb, size, GFP_KERNEL); 1496 return kcalloc(nmemb, size, GFP_KERNEL);
1529 1497
1530 addr = vmalloc(nmemb * size); 1498 if (size != 0 && nmemb > ULONG_MAX / size)
1531 if (!addr)
1532 return NULL; 1499 return NULL;
1533 1500
1534 memset(addr, 0, nmemb * size); 1501 return __vmalloc(size * nmemb,
1535 1502 GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL);
1536 return addr;
1537} 1503}
1538 1504
1539static __inline void drm_free_large(void *ptr) 1505static __inline void drm_free_large(void *ptr)
@@ -1543,12 +1509,6 @@ static __inline void drm_free_large(void *ptr)
1543 1509
1544 vfree(ptr); 1510 vfree(ptr);
1545} 1511}
1546#else
1547extern void *drm_alloc(size_t size, int area);
1548extern void drm_free(void *pt, size_t size, int area);
1549extern void *drm_calloc(size_t nmemb, size_t size, int area);
1550#endif
1551
1552/*@}*/ 1512/*@}*/
1553 1513
1554#endif /* __KERNEL__ */ 1514#endif /* __KERNEL__ */
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index a11cc9d32591..c263e4d71754 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -28,53 +28,49 @@
28#define EDID_LENGTH 128 28#define EDID_LENGTH 128
29#define DDC_ADDR 0x50 29#define DDC_ADDR 0x50
30 30
31#ifdef BIG_ENDIAN
32#error "EDID structure is little endian, need big endian versions"
33#else
34
35struct est_timings { 31struct est_timings {
36 u8 t1; 32 u8 t1;
37 u8 t2; 33 u8 t2;
38 u8 mfg_rsvd; 34 u8 mfg_rsvd;
39} __attribute__((packed)); 35} __attribute__((packed));
40 36
37/* 00=16:10, 01=4:3, 10=5:4, 11=16:9 */
38#define EDID_TIMING_ASPECT_SHIFT 0
39#define EDID_TIMING_ASPECT_MASK (0x3 << EDID_TIMING_ASPECT_SHIFT)
40
41/* need to add 60 */
42#define EDID_TIMING_VFREQ_SHIFT 2
43#define EDID_TIMING_VFREQ_MASK (0x3f << EDID_TIMING_VFREQ_SHIFT)
44
41struct std_timing { 45struct std_timing {
42 u8 hsize; /* need to multiply by 8 then add 248 */ 46 u8 hsize; /* need to multiply by 8 then add 248 */
43 u8 vfreq:6; /* need to add 60 */ 47 u8 vfreq_aspect;
44 u8 aspect_ratio:2; /* 00=16:10, 01=4:3, 10=5:4, 11=16:9 */
45} __attribute__((packed)); 48} __attribute__((packed));
46 49
50#define DRM_EDID_PT_HSYNC_POSITIVE (1 << 6)
51#define DRM_EDID_PT_VSYNC_POSITIVE (1 << 5)
52#define DRM_EDID_PT_SEPARATE_SYNC (3 << 3)
53#define DRM_EDID_PT_STEREO (1 << 2)
54#define DRM_EDID_PT_INTERLACED (1 << 1)
55
47/* If detailed data is pixel timing */ 56/* If detailed data is pixel timing */
48struct detailed_pixel_timing { 57struct detailed_pixel_timing {
49 u8 hactive_lo; 58 u8 hactive_lo;
50 u8 hblank_lo; 59 u8 hblank_lo;
51 u8 hblank_hi:4; 60 u8 hactive_hblank_hi;
52 u8 hactive_hi:4;
53 u8 vactive_lo; 61 u8 vactive_lo;
54 u8 vblank_lo; 62 u8 vblank_lo;
55 u8 vblank_hi:4; 63 u8 vactive_vblank_hi;
56 u8 vactive_hi:4;
57 u8 hsync_offset_lo; 64 u8 hsync_offset_lo;
58 u8 hsync_pulse_width_lo; 65 u8 hsync_pulse_width_lo;
59 u8 vsync_pulse_width_lo:4; 66 u8 vsync_offset_pulse_width_lo;
60 u8 vsync_offset_lo:4; 67 u8 hsync_vsync_offset_pulse_width_hi;
61 u8 vsync_pulse_width_hi:2;
62 u8 vsync_offset_hi:2;
63 u8 hsync_pulse_width_hi:2;
64 u8 hsync_offset_hi:2;
65 u8 width_mm_lo; 68 u8 width_mm_lo;
66 u8 height_mm_lo; 69 u8 height_mm_lo;
67 u8 height_mm_hi:4; 70 u8 width_height_mm_hi;
68 u8 width_mm_hi:4;
69 u8 hborder; 71 u8 hborder;
70 u8 vborder; 72 u8 vborder;
71 u8 unknown0:1; 73 u8 misc;
72 u8 hsync_positive:1;
73 u8 vsync_positive:1;
74 u8 separate_sync:2;
75 u8 stereo:1;
76 u8 unknown6:1;
77 u8 interlaced:1;
78} __attribute__((packed)); 74} __attribute__((packed));
79 75
80/* If it's not pixel timing, it'll be one of the below */ 76/* If it's not pixel timing, it'll be one of the below */
@@ -88,18 +84,16 @@ struct detailed_data_monitor_range {
88 u8 min_hfreq_khz; 84 u8 min_hfreq_khz;
89 u8 max_hfreq_khz; 85 u8 max_hfreq_khz;
90 u8 pixel_clock_mhz; /* need to multiply by 10 */ 86 u8 pixel_clock_mhz; /* need to multiply by 10 */
91 u16 sec_gtf_toggle; /* A000=use above, 20=use below */ /* FIXME: byte order */ 87 __le16 sec_gtf_toggle; /* A000=use above, 20=use below */
92 u8 hfreq_start_khz; /* need to multiply by 2 */ 88 u8 hfreq_start_khz; /* need to multiply by 2 */
93 u8 c; /* need to divide by 2 */ 89 u8 c; /* need to divide by 2 */
94 u16 m; /* FIXME: byte order */ 90 __le16 m;
95 u8 k; 91 u8 k;
96 u8 j; /* need to divide by 2 */ 92 u8 j; /* need to divide by 2 */
97} __attribute__((packed)); 93} __attribute__((packed));
98 94
99struct detailed_data_wpindex { 95struct detailed_data_wpindex {
100 u8 white_y_lo:2; 96 u8 white_xy_lo; /* Upper 2 bits each */
101 u8 white_x_lo:2;
102 u8 pad:4;
103 u8 white_x_hi; 97 u8 white_x_hi;
104 u8 white_y_hi; 98 u8 white_y_hi;
105 u8 gamma; /* need to divide by 100 then add 1 */ 99 u8 gamma; /* need to divide by 100 then add 1 */
@@ -134,13 +128,29 @@ struct detailed_non_pixel {
134#define EDID_DETAIL_MONITOR_SERIAL 0xff 128#define EDID_DETAIL_MONITOR_SERIAL 0xff
135 129
136struct detailed_timing { 130struct detailed_timing {
137 u16 pixel_clock; /* need to multiply by 10 KHz */ /* FIXME: byte order */ 131 __le16 pixel_clock; /* need to multiply by 10 KHz */
138 union { 132 union {
139 struct detailed_pixel_timing pixel_data; 133 struct detailed_pixel_timing pixel_data;
140 struct detailed_non_pixel other_data; 134 struct detailed_non_pixel other_data;
141 } data; 135 } data;
142} __attribute__((packed)); 136} __attribute__((packed));
143 137
138#define DRM_EDID_INPUT_SERRATION_VSYNC (1 << 7)
139#define DRM_EDID_INPUT_SYNC_ON_GREEN (1 << 5)
140#define DRM_EDID_INPUT_COMPOSITE_SYNC (1 << 4)
141#define DRM_EDID_INPUT_SEPARATE_SYNCS (1 << 3)
142#define DRM_EDID_INPUT_BLANK_TO_BLACK (1 << 2)
143#define DRM_EDID_INPUT_VIDEO_LEVEL (3 << 1)
144#define DRM_EDID_INPUT_DIGITAL (1 << 0) /* bits above must be zero if set */
145
146#define DRM_EDID_FEATURE_DEFAULT_GTF (1 << 7)
147#define DRM_EDID_FEATURE_PREFERRED_TIMING (1 << 6)
148#define DRM_EDID_FEATURE_STANDARD_COLOR (1 << 5)
149#define DRM_EDID_FEATURE_DISPLAY_TYPE (3 << 3) /* 00=mono, 01=rgb, 10=non-rgb, 11=unknown */
150#define DRM_EDID_FEATURE_PM_ACTIVE_OFF (1 << 2)
151#define DRM_EDID_FEATURE_PM_SUSPEND (1 << 1)
152#define DRM_EDID_FEATURE_PM_STANDBY (1 << 0)
153
144struct edid { 154struct edid {
145 u8 header[8]; 155 u8 header[8];
146 /* Vendor & product info */ 156 /* Vendor & product info */
@@ -153,25 +163,11 @@ struct edid {
153 u8 version; 163 u8 version;
154 u8 revision; 164 u8 revision;
155 /* Display info: */ 165 /* Display info: */
156 /* input definition */ 166 u8 input;
157 u8 serration_vsync:1;
158 u8 sync_on_green:1;
159 u8 composite_sync:1;
160 u8 separate_syncs:1;
161 u8 blank_to_black:1;
162 u8 video_level:2;
163 u8 digital:1; /* bits below must be zero if set */
164 u8 width_cm; 167 u8 width_cm;
165 u8 height_cm; 168 u8 height_cm;
166 u8 gamma; 169 u8 gamma;
167 /* feature support */ 170 u8 features;
168 u8 default_gtf:1;
169 u8 preferred_timing:1;
170 u8 standard_color:1;
171 u8 display_type:2; /* 00=mono, 01=rgb, 10=non-rgb, 11=unknown */
172 u8 pm_active_off:1;
173 u8 pm_suspend:1;
174 u8 pm_standby:1;
175 /* Color characteristics */ 171 /* Color characteristics */
176 u8 red_green_lo; 172 u8 red_green_lo;
177 u8 black_white_lo; 173 u8 black_white_lo;
@@ -195,8 +191,6 @@ struct edid {
195 u8 checksum; 191 u8 checksum;
196} __attribute__((packed)); 192} __attribute__((packed));
197 193
198#endif /* little endian structs */
199
200#define EDID_PRODUCT_ID(e) ((e)->prod_code[0] | ((e)->prod_code[1] << 8)) 194#define EDID_PRODUCT_ID(e) ((e)->prod_code[0] | ((e)->prod_code[1] << 8))
201 195
202#endif /* __DRM_EDID_H__ */ 196#endif /* __DRM_EDID_H__ */
diff --git a/include/drm/drm_hashtab.h b/include/drm/drm_hashtab.h
index cd2b189e1be6..0af087a4d3b3 100644
--- a/include/drm/drm_hashtab.h
+++ b/include/drm/drm_hashtab.h
@@ -35,6 +35,8 @@
35#ifndef DRM_HASHTAB_H 35#ifndef DRM_HASHTAB_H
36#define DRM_HASHTAB_H 36#define DRM_HASHTAB_H
37 37
38#include <linux/list.h>
39
38#define drm_hash_entry(_ptr, _type, _member) container_of(_ptr, _type, _member) 40#define drm_hash_entry(_ptr, _type, _member) container_of(_ptr, _type, _member)
39 41
40struct drm_hash_item { 42struct drm_hash_item {
diff --git a/include/drm/drm_memory_debug.h b/include/drm/drm_memory_debug.h
deleted file mode 100644
index 6463271deea8..000000000000
--- a/include/drm/drm_memory_debug.h
+++ /dev/null
@@ -1,309 +0,0 @@
1/**
2 * \file drm_memory_debug.h
3 * Memory management wrappers for DRM.
4 *
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Gareth Hughes <gareth@valinux.com>
7 */
8
9/*
10 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
11 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
12 * All Rights Reserved.
13 *
14 * Permission is hereby granted, free of charge, to any person obtaining a
15 * copy of this software and associated documentation files (the "Software"),
16 * to deal in the Software without restriction, including without limitation
17 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
18 * and/or sell copies of the Software, and to permit persons to whom the
19 * Software is furnished to do so, subject to the following conditions:
20 *
21 * The above copyright notice and this permission notice (including the next
22 * paragraph) shall be included in all copies or substantial portions of the
23 * Software.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
26 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
27 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
28 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
29 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
30 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
31 * OTHER DEALINGS IN THE SOFTWARE.
32 */
33
34#include "drmP.h"
35
36typedef struct drm_mem_stats {
37 const char *name;
38 int succeed_count;
39 int free_count;
40 int fail_count;
41 unsigned long bytes_allocated;
42 unsigned long bytes_freed;
43} drm_mem_stats_t;
44
45static DEFINE_SPINLOCK(drm_mem_lock);
46static unsigned long drm_ram_available = 0; /* In pages */
47static unsigned long drm_ram_used = 0;
48static drm_mem_stats_t drm_mem_stats[] =
49{
50 [DRM_MEM_DMA] = {"dmabufs"},
51 [DRM_MEM_SAREA] = {"sareas"},
52 [DRM_MEM_DRIVER] = {"driver"},
53 [DRM_MEM_MAGIC] = {"magic"},
54 [DRM_MEM_IOCTLS] = {"ioctltab"},
55 [DRM_MEM_MAPS] = {"maplist"},
56 [DRM_MEM_VMAS] = {"vmalist"},
57 [DRM_MEM_BUFS] = {"buflist"},
58 [DRM_MEM_SEGS] = {"seglist"},
59 [DRM_MEM_PAGES] = {"pagelist"},
60 [DRM_MEM_FILES] = {"files"},
61 [DRM_MEM_QUEUES] = {"queues"},
62 [DRM_MEM_CMDS] = {"commands"},
63 [DRM_MEM_MAPPINGS] = {"mappings"},
64 [DRM_MEM_BUFLISTS] = {"buflists"},
65 [DRM_MEM_AGPLISTS] = {"agplist"},
66 [DRM_MEM_SGLISTS] = {"sglist"},
67 [DRM_MEM_TOTALAGP] = {"totalagp"},
68 [DRM_MEM_BOUNDAGP] = {"boundagp"},
69 [DRM_MEM_CTXBITMAP] = {"ctxbitmap"},
70 [DRM_MEM_CTXLIST] = {"ctxlist"},
71 [DRM_MEM_STUB] = {"stub"},
72 {NULL, 0,} /* Last entry must be null */
73};
74
75void drm_mem_init (void) {
76 drm_mem_stats_t *mem;
77 struct sysinfo si;
78
79 for (mem = drm_mem_stats; mem->name; ++mem) {
80 mem->succeed_count = 0;
81 mem->free_count = 0;
82 mem->fail_count = 0;
83 mem->bytes_allocated = 0;
84 mem->bytes_freed = 0;
85 }
86
87 si_meminfo(&si);
88 drm_ram_available = si.totalram;
89 drm_ram_used = 0;
90}
91
92/* drm_mem_info is called whenever a process reads /dev/drm/mem. */
93
94static int drm__mem_info (char *buf, char **start, off_t offset,
95 int request, int *eof, void *data) {
96 drm_mem_stats_t *pt;
97 int len = 0;
98
99 if (offset > DRM_PROC_LIMIT) {
100 *eof = 1;
101 return 0;
102 }
103
104 *eof = 0;
105 *start = &buf[offset];
106
107 DRM_PROC_PRINT(" total counts "
108 " | outstanding \n");
109 DRM_PROC_PRINT("type alloc freed fail bytes freed"
110 " | allocs bytes\n\n");
111 DRM_PROC_PRINT("%-9.9s %5d %5d %4d %10lu kB |\n",
112 "system", 0, 0, 0,
113 drm_ram_available << (PAGE_SHIFT - 10));
114 DRM_PROC_PRINT("%-9.9s %5d %5d %4d %10lu kB |\n",
115 "locked", 0, 0, 0, drm_ram_used >> 10);
116 DRM_PROC_PRINT("\n");
117 for (pt = drm_mem_stats; pt->name; pt++) {
118 DRM_PROC_PRINT("%-9.9s %5d %5d %4d %10lu %10lu | %6d %10ld\n",
119 pt->name,
120 pt->succeed_count,
121 pt->free_count,
122 pt->fail_count,
123 pt->bytes_allocated,
124 pt->bytes_freed,
125 pt->succeed_count - pt->free_count,
126 (long)pt->bytes_allocated
127 - (long)pt->bytes_freed);
128 }
129
130 if (len > request + offset)
131 return request;
132 *eof = 1;
133 return len - offset;
134}
135
136int drm_mem_info (char *buf, char **start, off_t offset,
137 int len, int *eof, void *data) {
138 int ret;
139
140 spin_lock(&drm_mem_lock);
141 ret = drm__mem_info (buf, start, offset, len, eof, data);
142 spin_unlock(&drm_mem_lock);
143 return ret;
144}
145
146void *drm_alloc (size_t size, int area) {
147 void *pt;
148
149 if (!size) {
150 DRM_MEM_ERROR(area, "Allocating 0 bytes\n");
151 return NULL;
152 }
153
154 if (!(pt = kmalloc(size, GFP_KERNEL))) {
155 spin_lock(&drm_mem_lock);
156 ++drm_mem_stats[area].fail_count;
157 spin_unlock(&drm_mem_lock);
158 return NULL;
159 }
160 spin_lock(&drm_mem_lock);
161 ++drm_mem_stats[area].succeed_count;
162 drm_mem_stats[area].bytes_allocated += size;
163 spin_unlock(&drm_mem_lock);
164 return pt;
165}
166
167void *drm_calloc (size_t nmemb, size_t size, int area) {
168 void *addr;
169
170 addr = drm_alloc (nmemb * size, area);
171 if (addr != NULL)
172 memset((void *)addr, 0, size * nmemb);
173
174 return addr;
175}
176
177void *drm_realloc (void *oldpt, size_t oldsize, size_t size, int area) {
178 void *pt;
179
180 if (!(pt = drm_alloc (size, area)))
181 return NULL;
182 if (oldpt && oldsize) {
183 memcpy(pt, oldpt, oldsize);
184 drm_free (oldpt, oldsize, area);
185 }
186 return pt;
187}
188
189void drm_free (void *pt, size_t size, int area) {
190 int alloc_count;
191 int free_count;
192
193 if (!pt)
194 DRM_MEM_ERROR(area, "Attempt to free NULL pointer\n");
195 else
196 kfree(pt);
197 spin_lock(&drm_mem_lock);
198 drm_mem_stats[area].bytes_freed += size;
199 free_count = ++drm_mem_stats[area].free_count;
200 alloc_count = drm_mem_stats[area].succeed_count;
201 spin_unlock(&drm_mem_lock);
202 if (free_count > alloc_count) {
203 DRM_MEM_ERROR(area, "Excess frees: %d frees, %d allocs\n",
204 free_count, alloc_count);
205 }
206}
207
208#if __OS_HAS_AGP
209
210DRM_AGP_MEM *drm_alloc_agp (drm_device_t *dev, int pages, u32 type) {
211 DRM_AGP_MEM *handle;
212
213 if (!pages) {
214 DRM_MEM_ERROR(DRM_MEM_TOTALAGP, "Allocating 0 pages\n");
215 return NULL;
216 }
217
218 if ((handle = drm_agp_allocate_memory (pages, type))) {
219 spin_lock(&drm_mem_lock);
220 ++drm_mem_stats[DRM_MEM_TOTALAGP].succeed_count;
221 drm_mem_stats[DRM_MEM_TOTALAGP].bytes_allocated
222 += pages << PAGE_SHIFT;
223 spin_unlock(&drm_mem_lock);
224 return handle;
225 }
226 spin_lock(&drm_mem_lock);
227 ++drm_mem_stats[DRM_MEM_TOTALAGP].fail_count;
228 spin_unlock(&drm_mem_lock);
229 return NULL;
230}
231
232int drm_free_agp (DRM_AGP_MEM * handle, int pages) {
233 int alloc_count;
234 int free_count;
235 int retval = -EINVAL;
236
237 if (!handle) {
238 DRM_MEM_ERROR(DRM_MEM_TOTALAGP,
239 "Attempt to free NULL AGP handle\n");
240 return retval;
241 }
242
243 if (drm_agp_free_memory (handle)) {
244 spin_lock(&drm_mem_lock);
245 free_count = ++drm_mem_stats[DRM_MEM_TOTALAGP].free_count;
246 alloc_count = drm_mem_stats[DRM_MEM_TOTALAGP].succeed_count;
247 drm_mem_stats[DRM_MEM_TOTALAGP].bytes_freed
248 += pages << PAGE_SHIFT;
249 spin_unlock(&drm_mem_lock);
250 if (free_count > alloc_count) {
251 DRM_MEM_ERROR(DRM_MEM_TOTALAGP,
252 "Excess frees: %d frees, %d allocs\n",
253 free_count, alloc_count);
254 }
255 return 0;
256 }
257 return retval;
258}
259
260int drm_bind_agp (DRM_AGP_MEM * handle, unsigned int start) {
261 int retcode = -EINVAL;
262
263 if (!handle) {
264 DRM_MEM_ERROR(DRM_MEM_BOUNDAGP,
265 "Attempt to bind NULL AGP handle\n");
266 return retcode;
267 }
268
269 if (!(retcode = drm_agp_bind_memory (handle, start))) {
270 spin_lock(&drm_mem_lock);
271 ++drm_mem_stats[DRM_MEM_BOUNDAGP].succeed_count;
272 drm_mem_stats[DRM_MEM_BOUNDAGP].bytes_allocated
273 += handle->page_count << PAGE_SHIFT;
274 spin_unlock(&drm_mem_lock);
275 return retcode;
276 }
277 spin_lock(&drm_mem_lock);
278 ++drm_mem_stats[DRM_MEM_BOUNDAGP].fail_count;
279 spin_unlock(&drm_mem_lock);
280 return retcode;
281}
282
283int drm_unbind_agp (DRM_AGP_MEM * handle) {
284 int alloc_count;
285 int free_count;
286 int retcode = -EINVAL;
287
288 if (!handle) {
289 DRM_MEM_ERROR(DRM_MEM_BOUNDAGP,
290 "Attempt to unbind NULL AGP handle\n");
291 return retcode;
292 }
293
294 if ((retcode = drm_agp_unbind_memory (handle)))
295 return retcode;
296 spin_lock(&drm_mem_lock);
297 free_count = ++drm_mem_stats[DRM_MEM_BOUNDAGP].free_count;
298 alloc_count = drm_mem_stats[DRM_MEM_BOUNDAGP].succeed_count;
299 drm_mem_stats[DRM_MEM_BOUNDAGP].bytes_freed
300 += handle->page_count << PAGE_SHIFT;
301 spin_unlock(&drm_mem_lock);
302 if (free_count > alloc_count) {
303 DRM_MEM_ERROR(DRM_MEM_BOUNDAGP,
304 "Excess frees: %d frees, %d allocs\n",
305 free_count, alloc_count);
306 }
307 return retcode;
308}
309#endif
diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h
new file mode 100644
index 000000000000..f8332073d277
--- /dev/null
+++ b/include/drm/drm_mm.h
@@ -0,0 +1,99 @@
1/**************************************************************************
2 *
3 * Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX. USA.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 *
27 **************************************************************************/
28/*
29 * Authors:
30 * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
31 */
32
33#ifndef _DRM_MM_H_
34#define _DRM_MM_H_
35
36/*
37 * Generic range manager structs
38 */
39#include <linux/list.h>
40
41struct drm_mm_node {
42 struct list_head fl_entry;
43 struct list_head ml_entry;
44 int free;
45 unsigned long start;
46 unsigned long size;
47 struct drm_mm *mm;
48 void *private;
49};
50
51struct drm_mm {
52 struct list_head fl_entry;
53 struct list_head ml_entry;
54 struct list_head unused_nodes;
55 int num_unused;
56 spinlock_t unused_lock;
57};
58
59/*
60 * Basic range manager support (drm_mm.c)
61 */
62extern struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
63 unsigned long size,
64 unsigned alignment,
65 int atomic);
66static inline struct drm_mm_node *drm_mm_get_block(struct drm_mm_node *parent,
67 unsigned long size,
68 unsigned alignment)
69{
70 return drm_mm_get_block_generic(parent, size, alignment, 0);
71}
72static inline struct drm_mm_node *drm_mm_get_block_atomic(struct drm_mm_node *parent,
73 unsigned long size,
74 unsigned alignment)
75{
76 return drm_mm_get_block_generic(parent, size, alignment, 1);
77}
78extern void drm_mm_put_block(struct drm_mm_node *cur);
79extern struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
80 unsigned long size,
81 unsigned alignment,
82 int best_match);
83extern int drm_mm_init(struct drm_mm *mm, unsigned long start,
84 unsigned long size);
85extern void drm_mm_takedown(struct drm_mm *mm);
86extern int drm_mm_clean(struct drm_mm *mm);
87extern unsigned long drm_mm_tail_space(struct drm_mm *mm);
88extern int drm_mm_remove_space_from_tail(struct drm_mm *mm,
89 unsigned long size);
90extern int drm_mm_add_space_to_tail(struct drm_mm *mm,
91 unsigned long size, int atomic);
92extern int drm_mm_pre_get(struct drm_mm *mm);
93
94static inline struct drm_mm *drm_get_mm(struct drm_mm_node *block)
95{
96 return block->mm;
97}
98
99#endif
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
index fc55db780199..45c18672b093 100644
--- a/include/drm/drm_pciids.h
+++ b/include/drm/drm_pciids.h
@@ -254,6 +254,11 @@
254 {0x1002, 0x940A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \ 254 {0x1002, 0x940A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \
255 {0x1002, 0x940B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \ 255 {0x1002, 0x940B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \
256 {0x1002, 0x940F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \ 256 {0x1002, 0x940F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \
257 {0x1002, 0x94A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
258 {0x1002, 0x94A1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
259 {0x1002, 0x94B1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_NEW_MEMMAP}, \
260 {0x1002, 0x94B3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_NEW_MEMMAP}, \
261 {0x1002, 0x94B5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_NEW_MEMMAP}, \
257 {0x1002, 0x9440, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \ 262 {0x1002, 0x9440, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \
258 {0x1002, 0x9441, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \ 263 {0x1002, 0x9441, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \
259 {0x1002, 0x9442, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \ 264 {0x1002, 0x9442, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \
@@ -268,6 +273,8 @@
268 {0x1002, 0x9456, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \ 273 {0x1002, 0x9456, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \
269 {0x1002, 0x945A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 274 {0x1002, 0x945A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
270 {0x1002, 0x945B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 275 {0x1002, 0x945B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
276 {0x1002, 0x9460, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \
277 {0x1002, 0x9462, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \
271 {0x1002, 0x946A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 278 {0x1002, 0x946A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
272 {0x1002, 0x946B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 279 {0x1002, 0x946B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
273 {0x1002, 0x947A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 280 {0x1002, 0x947A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
@@ -536,4 +543,6 @@
536 {0x8086, 0xa001, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ 543 {0x8086, 0xa001, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
537 {0x8086, 0xa011, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ 544 {0x8086, 0xa011, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
538 {0x8086, 0x35e8, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ 545 {0x8086, 0x35e8, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
546 {0x8086, 0x0042, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
547 {0x8086, 0x0046, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
539 {0, 0, 0} 548 {0, 0, 0}
diff --git a/include/drm/radeon_drm.h b/include/drm/radeon_drm.h
index fe3e3a4b4aed..41862e9a4c20 100644
--- a/include/drm/radeon_drm.h
+++ b/include/drm/radeon_drm.h
@@ -496,6 +496,16 @@ typedef struct {
496#define DRM_RADEON_SETPARAM 0x19 496#define DRM_RADEON_SETPARAM 0x19
497#define DRM_RADEON_SURF_ALLOC 0x1a 497#define DRM_RADEON_SURF_ALLOC 0x1a
498#define DRM_RADEON_SURF_FREE 0x1b 498#define DRM_RADEON_SURF_FREE 0x1b
499/* KMS ioctl */
500#define DRM_RADEON_GEM_INFO 0x1c
501#define DRM_RADEON_GEM_CREATE 0x1d
502#define DRM_RADEON_GEM_MMAP 0x1e
503#define DRM_RADEON_GEM_PREAD 0x21
504#define DRM_RADEON_GEM_PWRITE 0x22
505#define DRM_RADEON_GEM_SET_DOMAIN 0x23
506#define DRM_RADEON_GEM_WAIT_IDLE 0x24
507#define DRM_RADEON_CS 0x26
508#define DRM_RADEON_INFO 0x27
499 509
500#define DRM_IOCTL_RADEON_CP_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CP_INIT, drm_radeon_init_t) 510#define DRM_IOCTL_RADEON_CP_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CP_INIT, drm_radeon_init_t)
501#define DRM_IOCTL_RADEON_CP_START DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_CP_START) 511#define DRM_IOCTL_RADEON_CP_START DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_CP_START)
@@ -524,6 +534,17 @@ typedef struct {
524#define DRM_IOCTL_RADEON_SETPARAM DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_SETPARAM, drm_radeon_setparam_t) 534#define DRM_IOCTL_RADEON_SETPARAM DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_SETPARAM, drm_radeon_setparam_t)
525#define DRM_IOCTL_RADEON_SURF_ALLOC DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_SURF_ALLOC, drm_radeon_surface_alloc_t) 535#define DRM_IOCTL_RADEON_SURF_ALLOC DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_SURF_ALLOC, drm_radeon_surface_alloc_t)
526#define DRM_IOCTL_RADEON_SURF_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_SURF_FREE, drm_radeon_surface_free_t) 536#define DRM_IOCTL_RADEON_SURF_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_SURF_FREE, drm_radeon_surface_free_t)
537/* KMS */
538#define DRM_IOCTL_RADEON_GEM_INFO DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_INFO, struct drm_radeon_gem_info)
539#define DRM_IOCTL_RADEON_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_CREATE, struct drm_radeon_gem_create)
540#define DRM_IOCTL_RADEON_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_MMAP, struct drm_radeon_gem_mmap)
541#define DRM_IOCTL_RADEON_GEM_PREAD DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_PREAD, struct drm_radeon_gem_pread)
542#define DRM_IOCTL_RADEON_GEM_PWRITE DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_PWRITE, struct drm_radeon_gem_pwrite)
543#define DRM_IOCTL_RADEON_GEM_SET_DOMAIN DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_SET_DOMAIN, struct drm_radeon_gem_set_domain)
544#define DRM_IOCTL_RADEON_GEM_WAIT_IDLE DRM_IOW(DRM_COMMAND_BASE + DRM_RADEON_GEM_WAIT_IDLE, struct drm_radeon_gem_wait_idle)
545#define DRM_IOCTL_RADEON_CS DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_CS, struct drm_radeon_cs)
546#define DRM_IOCTL_RADEON_INFO DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_INFO, struct drm_radeon_info)
547
527 548
528typedef struct drm_radeon_init { 549typedef struct drm_radeon_init {
529 enum { 550 enum {
@@ -682,6 +703,7 @@ typedef struct drm_radeon_indirect {
682#define RADEON_PARAM_VBLANK_CRTC 13 /* VBLANK CRTC */ 703#define RADEON_PARAM_VBLANK_CRTC 13 /* VBLANK CRTC */
683#define RADEON_PARAM_FB_LOCATION 14 /* FB location */ 704#define RADEON_PARAM_FB_LOCATION 14 /* FB location */
684#define RADEON_PARAM_NUM_GB_PIPES 15 /* num GB pipes */ 705#define RADEON_PARAM_NUM_GB_PIPES 15 /* num GB pipes */
706#define RADEON_PARAM_DEVICE_ID 16
685 707
686typedef struct drm_radeon_getparam { 708typedef struct drm_radeon_getparam {
687 int param; 709 int param;
@@ -751,4 +773,112 @@ typedef struct drm_radeon_surface_free {
751#define DRM_RADEON_VBLANK_CRTC1 1 773#define DRM_RADEON_VBLANK_CRTC1 1
752#define DRM_RADEON_VBLANK_CRTC2 2 774#define DRM_RADEON_VBLANK_CRTC2 2
753 775
776/*
777 * Kernel modesetting world below.
778 */
779#define RADEON_GEM_DOMAIN_CPU 0x1
780#define RADEON_GEM_DOMAIN_GTT 0x2
781#define RADEON_GEM_DOMAIN_VRAM 0x4
782
783struct drm_radeon_gem_info {
784 uint64_t gart_size;
785 uint64_t vram_size;
786 uint64_t vram_visible;
787};
788
789#define RADEON_GEM_NO_BACKING_STORE 1
790
791struct drm_radeon_gem_create {
792 uint64_t size;
793 uint64_t alignment;
794 uint32_t handle;
795 uint32_t initial_domain;
796 uint32_t flags;
797};
798
799struct drm_radeon_gem_mmap {
800 uint32_t handle;
801 uint32_t pad;
802 uint64_t offset;
803 uint64_t size;
804 uint64_t addr_ptr;
805};
806
807struct drm_radeon_gem_set_domain {
808 uint32_t handle;
809 uint32_t read_domains;
810 uint32_t write_domain;
811};
812
813struct drm_radeon_gem_wait_idle {
814 uint32_t handle;
815 uint32_t pad;
816};
817
818struct drm_radeon_gem_busy {
819 uint32_t handle;
820 uint32_t busy;
821};
822
823struct drm_radeon_gem_pread {
824 /** Handle for the object being read. */
825 uint32_t handle;
826 uint32_t pad;
827 /** Offset into the object to read from */
828 uint64_t offset;
829 /** Length of data to read */
830 uint64_t size;
831 /** Pointer to write the data into. */
832 /* void *, but pointers are not 32/64 compatible */
833 uint64_t data_ptr;
834};
835
836struct drm_radeon_gem_pwrite {
837 /** Handle for the object being written to. */
838 uint32_t handle;
839 uint32_t pad;
840 /** Offset into the object to write to */
841 uint64_t offset;
842 /** Length of data to write */
843 uint64_t size;
844 /** Pointer to read the data from. */
845 /* void *, but pointers are not 32/64 compatible */
846 uint64_t data_ptr;
847};
848
849#define RADEON_CHUNK_ID_RELOCS 0x01
850#define RADEON_CHUNK_ID_IB 0x02
851
852struct drm_radeon_cs_chunk {
853 uint32_t chunk_id;
854 uint32_t length_dw;
855 uint64_t chunk_data;
856};
857
858struct drm_radeon_cs_reloc {
859 uint32_t handle;
860 uint32_t read_domains;
861 uint32_t write_domain;
862 uint32_t flags;
863};
864
865struct drm_radeon_cs {
866 uint32_t num_chunks;
867 uint32_t cs_id;
868 /* this points to uint64_t * which point to cs chunks */
869 uint64_t chunks;
870 /* updates to the limits after this CS ioctl */
871 uint64_t gart_limit;
872 uint64_t vram_limit;
873};
874
875#define RADEON_INFO_DEVICE_ID 0x00
876#define RADEON_INFO_NUM_GB_PIPES 0x01
877
878struct drm_radeon_info {
879 uint32_t request;
880 uint32_t pad;
881 uint64_t value;
882};
883
754#endif 884#endif
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
new file mode 100644
index 000000000000..cd22ab4b495c
--- /dev/null
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -0,0 +1,618 @@
1/**************************************************************************
2 *
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */
30
31#ifndef _TTM_BO_API_H_
32#define _TTM_BO_API_H_
33
34#include "drm_hashtab.h"
35#include <linux/kref.h>
36#include <linux/list.h>
37#include <linux/wait.h>
38#include <linux/mutex.h>
39#include <linux/mm.h>
40#include <linux/rbtree.h>
41#include <linux/bitmap.h>
42
43struct ttm_bo_device;
44
45struct drm_mm_node;
46
47/**
48 * struct ttm_mem_reg
49 *
50 * @mm_node: Memory manager node.
51 * @size: Requested size of memory region.
52 * @num_pages: Actual size of memory region in pages.
53 * @page_alignment: Page alignment.
54 * @placement: Placement flags.
55 *
56 * Structure indicating the placement and space resources used by a
57 * buffer object.
58 */
59
60struct ttm_mem_reg {
61 struct drm_mm_node *mm_node;
62 unsigned long size;
63 unsigned long num_pages;
64 uint32_t page_alignment;
65 uint32_t mem_type;
66 uint32_t placement;
67};
68
69/**
70 * enum ttm_bo_type
71 *
72 * @ttm_bo_type_device: These are 'normal' buffers that can
73 * be mmapped by user space. Each of these bos occupy a slot in the
74 * device address space, that can be used for normal vm operations.
75 *
76 * @ttm_bo_type_user: These are user-space memory areas that are made
77 * available to the GPU by mapping the buffer pages into the GPU aperture
78 * space. These buffers cannot be mmaped from the device address space.
79 *
80 * @ttm_bo_type_kernel: These buffers are like ttm_bo_type_device buffers,
81 * but they cannot be accessed from user-space. For kernel-only use.
82 */
83
84enum ttm_bo_type {
85 ttm_bo_type_device,
86 ttm_bo_type_user,
87 ttm_bo_type_kernel
88};
89
90struct ttm_tt;
91
92/**
93 * struct ttm_buffer_object
94 *
95 * @bdev: Pointer to the buffer object device structure.
96 * @buffer_start: The virtual user-space start address of ttm_bo_type_user
97 * buffers.
98 * @type: The bo type.
99 * @destroy: Destruction function. If NULL, kfree is used.
100 * @num_pages: Actual number of pages.
101 * @addr_space_offset: Address space offset.
102 * @acc_size: Accounted size for this object.
103 * @kref: Reference count of this buffer object. When this refcount reaches
104 * zero, the object is put on the delayed delete list.
105 * @list_kref: List reference count of this buffer object. This member is
106 * used to avoid destruction while the buffer object is still on a list.
107 * Lru lists may keep one refcount, the delayed delete list, and kref != 0
108 * keeps one refcount. When this refcount reaches zero,
109 * the object is destroyed.
110 * @event_queue: Queue for processes waiting on buffer object status change.
111 * @lock: spinlock protecting mostly synchronization members.
112 * @proposed_placement: Proposed placement for the buffer. Changed only by the
113 * creator prior to validation as opposed to bo->mem.proposed_flags which is
114 * changed by the implementation prior to a buffer move if it wants to outsmart
115 * the buffer creator / user. This latter happens, for example, at eviction.
116 * @mem: structure describing current placement.
117 * @persistant_swap_storage: Usually the swap storage is deleted for buffers
118 * pinned in physical memory. If this behaviour is not desired, this member
119 * holds a pointer to a persistant shmem object.
120 * @ttm: TTM structure holding system pages.
121 * @evicted: Whether the object was evicted without user-space knowing.
122 * @cpu_writes: For synchronization. Number of cpu writers.
123 * @lru: List head for the lru list.
124 * @ddestroy: List head for the delayed destroy list.
125 * @swap: List head for swap LRU list.
126 * @val_seq: Sequence of the validation holding the @reserved lock.
127 * Used to avoid starvation when many processes compete to validate the
128 * buffer. This member is protected by the bo_device::lru_lock.
129 * @seq_valid: The value of @val_seq is valid. This value is protected by
130 * the bo_device::lru_lock.
131 * @reserved: Deadlock-free lock used for synchronization state transitions.
132 * @sync_obj_arg: Opaque argument to synchronization object function.
133 * @sync_obj: Pointer to a synchronization object.
134 * @priv_flags: Flags describing buffer object internal state.
135 * @vm_rb: Rb node for the vm rb tree.
136 * @vm_node: Address space manager node.
137 * @offset: The current GPU offset, which can have different meanings
138 * depending on the memory type. For SYSTEM type memory, it should be 0.
139 * @cur_placement: Hint of current placement.
140 *
141 * Base class for TTM buffer object, that deals with data placement and CPU
142 * mappings. GPU mappings are really up to the driver, but for simpler GPUs
143 * the driver can usually use the placement offset @offset directly as the
144 * GPU virtual address. For drivers implementing multiple
145 * GPU memory manager contexts, the driver should manage the address space
146 * in these contexts separately and use these objects to get the correct
147 * placement and caching for these GPU maps. This makes it possible to use
148 * these objects for even quite elaborate memory management schemes.
149 * The destroy member, the API visibility of this object makes it possible
150 * to derive driver specific types.
151 */
152
153struct ttm_buffer_object {
154 /**
155 * Members constant at init.
156 */
157
158 struct ttm_bo_device *bdev;
159 unsigned long buffer_start;
160 enum ttm_bo_type type;
161 void (*destroy) (struct ttm_buffer_object *);
162 unsigned long num_pages;
163 uint64_t addr_space_offset;
164 size_t acc_size;
165
166 /**
167 * Members not needing protection.
168 */
169
170 struct kref kref;
171 struct kref list_kref;
172 wait_queue_head_t event_queue;
173 spinlock_t lock;
174
175 /**
176 * Members protected by the bo::reserved lock.
177 */
178
179 uint32_t proposed_placement;
180 struct ttm_mem_reg mem;
181 struct file *persistant_swap_storage;
182 struct ttm_tt *ttm;
183 bool evicted;
184
185 /**
186 * Members protected by the bo::reserved lock only when written to.
187 */
188
189 atomic_t cpu_writers;
190
191 /**
192 * Members protected by the bdev::lru_lock.
193 */
194
195 struct list_head lru;
196 struct list_head ddestroy;
197 struct list_head swap;
198 uint32_t val_seq;
199 bool seq_valid;
200
201 /**
202 * Members protected by the bdev::lru_lock
203 * only when written to.
204 */
205
206 atomic_t reserved;
207
208
209 /**
210 * Members protected by the bo::lock
211 */
212
213 void *sync_obj_arg;
214 void *sync_obj;
215 unsigned long priv_flags;
216
217 /**
218 * Members protected by the bdev::vm_lock
219 */
220
221 struct rb_node vm_rb;
222 struct drm_mm_node *vm_node;
223
224
225 /**
226 * Special members that are protected by the reserve lock
227 * and the bo::lock when written to. Can be read with
228 * either of these locks held.
229 */
230
231 unsigned long offset;
232 uint32_t cur_placement;
233};
234
235/**
236 * struct ttm_bo_kmap_obj
237 *
238 * @virtual: The current kernel virtual address.
239 * @page: The page when kmap'ing a single page.
240 * @bo_kmap_type: Type of bo_kmap.
241 *
242 * Object describing a kernel mapping. Since a TTM bo may be located
243 * in various memory types with various caching policies, the
244 * mapping can either be an ioremap, a vmap, a kmap or part of a
245 * premapped region.
246 */
247
248struct ttm_bo_kmap_obj {
249 void *virtual;
250 struct page *page;
251 enum {
252 ttm_bo_map_iomap,
253 ttm_bo_map_vmap,
254 ttm_bo_map_kmap,
255 ttm_bo_map_premapped,
256 } bo_kmap_type;
257};
258
259/**
260 * ttm_bo_reference - reference a struct ttm_buffer_object
261 *
262 * @bo: The buffer object.
263 *
264 * Returns a refcounted pointer to a buffer object.
265 */
266
267static inline struct ttm_buffer_object *
268ttm_bo_reference(struct ttm_buffer_object *bo)
269{
270 kref_get(&bo->kref);
271 return bo;
272}
273
274/**
275 * ttm_bo_wait - wait for buffer idle.
276 *
277 * @bo: The buffer object.
278 * @interruptible: Use interruptible wait.
279 * @no_wait: Return immediately if buffer is busy.
280 *
281 * This function must be called with the bo::mutex held, and makes
282 * sure any previous rendering to the buffer is completed.
283 * Note: It might be necessary to block validations before the
284 * wait by reserving the buffer.
285 * Returns -EBUSY if no_wait is true and the buffer is busy.
286 * Returns -ERESTART if interrupted by a signal.
287 */
288extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy,
289 bool interruptible, bool no_wait);
290/**
291 * ttm_buffer_object_validate
292 *
293 * @bo: The buffer object.
294 * @proposed_placement: Proposed_placement for the buffer object.
295 * @interruptible: Sleep interruptible if sleeping.
296 * @no_wait: Return immediately if the buffer is busy.
297 *
298 * Changes placement and caching policy of the buffer object
299 * according to bo::proposed_flags.
300 * Returns
301 * -EINVAL on invalid proposed_flags.
302 * -ENOMEM on out-of-memory condition.
303 * -EBUSY if no_wait is true and buffer busy.
304 * -ERESTART if interrupted by a signal.
305 */
306extern int ttm_buffer_object_validate(struct ttm_buffer_object *bo,
307 uint32_t proposed_placement,
308 bool interruptible, bool no_wait);
309/**
310 * ttm_bo_unref
311 *
312 * @bo: The buffer object.
313 *
314 * Unreference and clear a pointer to a buffer object.
315 */
316extern void ttm_bo_unref(struct ttm_buffer_object **bo);
317
318/**
319 * ttm_bo_synccpu_write_grab
320 *
321 * @bo: The buffer object:
322 * @no_wait: Return immediately if buffer is busy.
323 *
324 * Synchronizes a buffer object for CPU RW access. This means
325 * blocking command submission that affects the buffer and
326 * waiting for buffer idle. This lock is recursive.
327 * Returns
328 * -EBUSY if the buffer is busy and no_wait is true.
329 * -ERESTART if interrupted by a signal.
330 */
331
332extern int
333ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait);
334/**
335 * ttm_bo_synccpu_write_release:
336 *
337 * @bo : The buffer object.
338 *
339 * Releases a synccpu lock.
340 */
341extern void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo);
342
343/**
344 * ttm_buffer_object_init
345 *
346 * @bdev: Pointer to a ttm_bo_device struct.
347 * @bo: Pointer to a ttm_buffer_object to be initialized.
348 * @size: Requested size of buffer object.
349 * @type: Requested type of buffer object.
350 * @flags: Initial placement flags.
351 * @page_alignment: Data alignment in pages.
352 * @buffer_start: Virtual address of user space data backing a
353 * user buffer object.
354 * @interruptible: If needing to sleep to wait for GPU resources,
355 * sleep interruptible.
356 * @persistant_swap_storage: Usually the swap storage is deleted for buffers
357 * pinned in physical memory. If this behaviour is not desired, this member
358 * holds a pointer to a persistant shmem object. Typically, this would
359 * point to the shmem object backing a GEM object if TTM is used to back a
360 * GEM user interface.
361 * @acc_size: Accounted size for this object.
362 * @destroy: Destroy function. Use NULL for kfree().
363 *
364 * This function initializes a pre-allocated struct ttm_buffer_object.
365 * As this object may be part of a larger structure, this function,
366 * together with the @destroy function,
367 * enables driver-specific objects derived from a ttm_buffer_object.
368 * On successful return, the object kref and list_kref are set to 1.
369 * Returns
370 * -ENOMEM: Out of memory.
371 * -EINVAL: Invalid placement flags.
372 * -ERESTART: Interrupted by signal while sleeping waiting for resources.
373 */
374
375extern int ttm_buffer_object_init(struct ttm_bo_device *bdev,
376 struct ttm_buffer_object *bo,
377 unsigned long size,
378 enum ttm_bo_type type,
379 uint32_t flags,
380 uint32_t page_alignment,
381 unsigned long buffer_start,
382 bool interrubtible,
383 struct file *persistant_swap_storage,
384 size_t acc_size,
385 void (*destroy) (struct ttm_buffer_object *));
386/**
387 * ttm_bo_synccpu_object_init
388 *
389 * @bdev: Pointer to a ttm_bo_device struct.
390 * @bo: Pointer to a ttm_buffer_object to be initialized.
391 * @size: Requested size of buffer object.
392 * @type: Requested type of buffer object.
393 * @flags: Initial placement flags.
394 * @page_alignment: Data alignment in pages.
395 * @buffer_start: Virtual address of user space data backing a
396 * user buffer object.
397 * @interruptible: If needing to sleep while waiting for GPU resources,
398 * sleep interruptible.
399 * @persistant_swap_storage: Usually the swap storage is deleted for buffers
400 * pinned in physical memory. If this behaviour is not desired, this member
401 * holds a pointer to a persistant shmem object. Typically, this would
402 * point to the shmem object backing a GEM object if TTM is used to back a
403 * GEM user interface.
404 * @p_bo: On successful completion *p_bo points to the created object.
405 *
406 * This function allocates a ttm_buffer_object, and then calls
407 * ttm_buffer_object_init on that object.
408 * The destroy function is set to kfree().
409 * Returns
410 * -ENOMEM: Out of memory.
411 * -EINVAL: Invalid placement flags.
412 * -ERESTART: Interrupted by signal while waiting for resources.
413 */
414
415extern int ttm_buffer_object_create(struct ttm_bo_device *bdev,
416 unsigned long size,
417 enum ttm_bo_type type,
418 uint32_t flags,
419 uint32_t page_alignment,
420 unsigned long buffer_start,
421 bool interruptible,
422 struct file *persistant_swap_storage,
423 struct ttm_buffer_object **p_bo);
424
425/**
426 * ttm_bo_check_placement
427 *
428 * @bo: the buffer object.
429 * @set_flags: placement flags to set.
430 * @clr_flags: placement flags to clear.
431 *
432 * Performs minimal validity checking on an intended change of
433 * placement flags.
434 * Returns
435 * -EINVAL: Intended change is invalid or not allowed.
436 */
437
438extern int ttm_bo_check_placement(struct ttm_buffer_object *bo,
439 uint32_t set_flags, uint32_t clr_flags);
440
441/**
442 * ttm_bo_init_mm
443 *
444 * @bdev: Pointer to a ttm_bo_device struct.
445 * @mem_type: The memory type.
446 * @p_offset: offset for managed area in pages.
447 * @p_size: size managed area in pages.
448 *
449 * Initialize a manager for a given memory type.
450 * Note: if part of driver firstopen, it must be protected from a
451 * potentially racing lastclose.
452 * Returns:
453 * -EINVAL: invalid size or memory type.
454 * -ENOMEM: Not enough memory.
455 * May also return driver-specified errors.
456 */
457
458extern int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
459 unsigned long p_offset, unsigned long p_size);
460/**
461 * ttm_bo_clean_mm
462 *
463 * @bdev: Pointer to a ttm_bo_device struct.
464 * @mem_type: The memory type.
465 *
466 * Take down a manager for a given memory type after first walking
467 * the LRU list to evict any buffers left alive.
468 *
469 * Normally, this function is part of lastclose() or unload(), and at that
470 * point there shouldn't be any buffers left created by user-space, since
471 * there should've been removed by the file descriptor release() method.
472 * However, before this function is run, make sure to signal all sync objects,
473 * and verify that the delayed delete queue is empty. The driver must also
474 * make sure that there are no NO_EVICT buffers present in this memory type
475 * when the call is made.
476 *
477 * If this function is part of a VT switch, the caller must make sure that
478 * there are no appications currently validating buffers before this
479 * function is called. The caller can do that by first taking the
480 * struct ttm_bo_device::ttm_lock in write mode.
481 *
482 * Returns:
483 * -EINVAL: invalid or uninitialized memory type.
484 * -EBUSY: There are still buffers left in this memory type.
485 */
486
487extern int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type);
488
489/**
490 * ttm_bo_evict_mm
491 *
492 * @bdev: Pointer to a ttm_bo_device struct.
493 * @mem_type: The memory type.
494 *
495 * Evicts all buffers on the lru list of the memory type.
496 * This is normally part of a VT switch or an
497 * out-of-memory-space-due-to-fragmentation handler.
498 * The caller must make sure that there are no other processes
499 * currently validating buffers, and can do that by taking the
500 * struct ttm_bo_device::ttm_lock in write mode.
501 *
502 * Returns:
503 * -EINVAL: Invalid or uninitialized memory type.
504 * -ERESTART: The call was interrupted by a signal while waiting to
505 * evict a buffer.
506 */
507
508extern int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type);
509
510/**
511 * ttm_kmap_obj_virtual
512 *
513 * @map: A struct ttm_bo_kmap_obj returned from ttm_bo_kmap.
514 * @is_iomem: Pointer to an integer that on return indicates 1 if the
515 * virtual map is io memory, 0 if normal memory.
516 *
517 * Returns the virtual address of a buffer object area mapped by ttm_bo_kmap.
518 * If *is_iomem is 1 on return, the virtual address points to an io memory area,
519 * that should strictly be accessed by the iowriteXX() and similar functions.
520 */
521
522static inline void *ttm_kmap_obj_virtual(struct ttm_bo_kmap_obj *map,
523 bool *is_iomem)
524{
525 *is_iomem = (map->bo_kmap_type == ttm_bo_map_iomap ||
526 map->bo_kmap_type == ttm_bo_map_premapped);
527 return map->virtual;
528}
529
530/**
531 * ttm_bo_kmap
532 *
533 * @bo: The buffer object.
534 * @start_page: The first page to map.
535 * @num_pages: Number of pages to map.
536 * @map: pointer to a struct ttm_bo_kmap_obj representing the map.
537 *
538 * Sets up a kernel virtual mapping, using ioremap, vmap or kmap to the
539 * data in the buffer object. The ttm_kmap_obj_virtual function can then be
540 * used to obtain a virtual address to the data.
541 *
542 * Returns
543 * -ENOMEM: Out of memory.
544 * -EINVAL: Invalid range.
545 */
546
547extern int ttm_bo_kmap(struct ttm_buffer_object *bo, unsigned long start_page,
548 unsigned long num_pages, struct ttm_bo_kmap_obj *map);
549
550/**
551 * ttm_bo_kunmap
552 *
553 * @map: Object describing the map to unmap.
554 *
555 * Unmaps a kernel map set up by ttm_bo_kmap.
556 */
557
558extern void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map);
559
560#if 0
561#endif
562
563/**
564 * ttm_fbdev_mmap - mmap fbdev memory backed by a ttm buffer object.
565 *
566 * @vma: vma as input from the fbdev mmap method.
567 * @bo: The bo backing the address space. The address space will
568 * have the same size as the bo, and start at offset 0.
569 *
570 * This function is intended to be called by the fbdev mmap method
571 * if the fbdev address space is to be backed by a bo.
572 */
573
574extern int ttm_fbdev_mmap(struct vm_area_struct *vma,
575 struct ttm_buffer_object *bo);
576
577/**
578 * ttm_bo_mmap - mmap out of the ttm device address space.
579 *
580 * @filp: filp as input from the mmap method.
581 * @vma: vma as input from the mmap method.
582 * @bdev: Pointer to the ttm_bo_device with the address space manager.
583 *
584 * This function is intended to be called by the device mmap method.
585 * if the device address space is to be backed by the bo manager.
586 */
587
588extern int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
589 struct ttm_bo_device *bdev);
590
591/**
592 * ttm_bo_io
593 *
594 * @bdev: Pointer to the struct ttm_bo_device.
595 * @filp: Pointer to the struct file attempting to read / write.
596 * @wbuf: User-space pointer to address of buffer to write. NULL on read.
597 * @rbuf: User-space pointer to address of buffer to read into.
598 * Null on write.
599 * @count: Number of bytes to read / write.
600 * @f_pos: Pointer to current file position.
601 * @write: 1 for read, 0 for write.
602 *
603 * This function implements read / write into ttm buffer objects, and is
604 * intended to
605 * be called from the fops::read and fops::write method.
606 * Returns:
607 * See man (2) write, man(2) read. In particular,
608 * the function may return -EINTR if
609 * interrupted by a signal.
610 */
611
612extern ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
613 const char __user *wbuf, char __user *rbuf,
614 size_t count, loff_t *f_pos, bool write);
615
616extern void ttm_bo_swapout_all(struct ttm_bo_device *bdev);
617
618#endif
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
new file mode 100644
index 000000000000..62ed733c52a2
--- /dev/null
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -0,0 +1,867 @@
1/**************************************************************************
2 *
3 * Copyright (c) 2006-2009 Vmware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */
30#ifndef _TTM_BO_DRIVER_H_
31#define _TTM_BO_DRIVER_H_
32
33#include "ttm/ttm_bo_api.h"
34#include "ttm/ttm_memory.h"
35#include "drm_mm.h"
36#include "linux/workqueue.h"
37#include "linux/fs.h"
38#include "linux/spinlock.h"
39
40struct ttm_backend;
41
42struct ttm_backend_func {
43 /**
44 * struct ttm_backend_func member populate
45 *
46 * @backend: Pointer to a struct ttm_backend.
47 * @num_pages: Number of pages to populate.
48 * @pages: Array of pointers to ttm pages.
49 * @dummy_read_page: Page to be used instead of NULL pages in the
50 * array @pages.
51 *
52 * Populate the backend with ttm pages. Depending on the backend,
53 * it may or may not copy the @pages array.
54 */
55 int (*populate) (struct ttm_backend *backend,
56 unsigned long num_pages, struct page **pages,
57 struct page *dummy_read_page);
58 /**
59 * struct ttm_backend_func member clear
60 *
61 * @backend: Pointer to a struct ttm_backend.
62 *
63 * This is an "unpopulate" function. Release all resources
64 * allocated with populate.
65 */
66 void (*clear) (struct ttm_backend *backend);
67
68 /**
69 * struct ttm_backend_func member bind
70 *
71 * @backend: Pointer to a struct ttm_backend.
72 * @bo_mem: Pointer to a struct ttm_mem_reg describing the
73 * memory type and location for binding.
74 *
75 * Bind the backend pages into the aperture in the location
76 * indicated by @bo_mem. This function should be able to handle
77 * differences between aperture- and system page sizes.
78 */
79 int (*bind) (struct ttm_backend *backend, struct ttm_mem_reg *bo_mem);
80
81 /**
82 * struct ttm_backend_func member unbind
83 *
84 * @backend: Pointer to a struct ttm_backend.
85 *
86 * Unbind previously bound backend pages. This function should be
87 * able to handle differences between aperture- and system page sizes.
88 */
89 int (*unbind) (struct ttm_backend *backend);
90
91 /**
92 * struct ttm_backend_func member destroy
93 *
94 * @backend: Pointer to a struct ttm_backend.
95 *
96 * Destroy the backend.
97 */
98 void (*destroy) (struct ttm_backend *backend);
99};
100
101/**
102 * struct ttm_backend
103 *
104 * @bdev: Pointer to a struct ttm_bo_device.
105 * @flags: For driver use.
106 * @func: Pointer to a struct ttm_backend_func that describes
107 * the backend methods.
108 *
109 */
110
111struct ttm_backend {
112 struct ttm_bo_device *bdev;
113 uint32_t flags;
114 struct ttm_backend_func *func;
115};
116
117#define TTM_PAGE_FLAG_VMALLOC (1 << 0)
118#define TTM_PAGE_FLAG_USER (1 << 1)
119#define TTM_PAGE_FLAG_USER_DIRTY (1 << 2)
120#define TTM_PAGE_FLAG_WRITE (1 << 3)
121#define TTM_PAGE_FLAG_SWAPPED (1 << 4)
122#define TTM_PAGE_FLAG_PERSISTANT_SWAP (1 << 5)
123#define TTM_PAGE_FLAG_ZERO_ALLOC (1 << 6)
124
125enum ttm_caching_state {
126 tt_uncached,
127 tt_wc,
128 tt_cached
129};
130
131/**
132 * struct ttm_tt
133 *
134 * @dummy_read_page: Page to map where the ttm_tt page array contains a NULL
135 * pointer.
136 * @pages: Array of pages backing the data.
137 * @first_himem_page: Himem pages are put last in the page array, which
138 * enables us to run caching attribute changes on only the first part
139 * of the page array containing lomem pages. This is the index of the
140 * first himem page.
141 * @last_lomem_page: Index of the last lomem page in the page array.
142 * @num_pages: Number of pages in the page array.
143 * @bdev: Pointer to the current struct ttm_bo_device.
144 * @be: Pointer to the ttm backend.
145 * @tsk: The task for user ttm.
146 * @start: virtual address for user ttm.
147 * @swap_storage: Pointer to shmem struct file for swap storage.
148 * @caching_state: The current caching state of the pages.
149 * @state: The current binding state of the pages.
150 *
151 * This is a structure holding the pages, caching- and aperture binding
152 * status for a buffer object that isn't backed by fixed (VRAM / AGP)
153 * memory.
154 */
155
156struct ttm_tt {
157 struct page *dummy_read_page;
158 struct page **pages;
159 long first_himem_page;
160 long last_lomem_page;
161 uint32_t page_flags;
162 unsigned long num_pages;
163 struct ttm_bo_device *bdev;
164 struct ttm_backend *be;
165 struct task_struct *tsk;
166 unsigned long start;
167 struct file *swap_storage;
168 enum ttm_caching_state caching_state;
169 enum {
170 tt_bound,
171 tt_unbound,
172 tt_unpopulated,
173 } state;
174};
175
176#define TTM_MEMTYPE_FLAG_FIXED (1 << 0) /* Fixed (on-card) PCI memory */
177#define TTM_MEMTYPE_FLAG_MAPPABLE (1 << 1) /* Memory mappable */
178#define TTM_MEMTYPE_FLAG_NEEDS_IOREMAP (1 << 2) /* Fixed memory needs ioremap
179 before kernel access. */
180#define TTM_MEMTYPE_FLAG_CMA (1 << 3) /* Can't map aperture */
181
182/**
183 * struct ttm_mem_type_manager
184 *
185 * @has_type: The memory type has been initialized.
186 * @use_type: The memory type is enabled.
187 * @flags: TTM_MEMTYPE_XX flags identifying the traits of the memory
188 * managed by this memory type.
189 * @gpu_offset: If used, the GPU offset of the first managed page of
190 * fixed memory or the first managed location in an aperture.
191 * @io_offset: The io_offset of the first managed page of IO memory or
192 * the first managed location in an aperture. For TTM_MEMTYPE_FLAG_CMA
193 * memory, this should be set to NULL.
194 * @io_size: The size of a managed IO region (fixed memory or aperture).
195 * @io_addr: Virtual kernel address if the io region is pre-mapped. For
196 * TTM_MEMTYPE_FLAG_NEEDS_IOREMAP there is no pre-mapped io map and
197 * @io_addr should be set to NULL.
198 * @size: Size of the managed region.
199 * @available_caching: A mask of available caching types, TTM_PL_FLAG_XX,
200 * as defined in ttm_placement_common.h
201 * @default_caching: The default caching policy used for a buffer object
202 * placed in this memory type if the user doesn't provide one.
203 * @manager: The range manager used for this memory type. FIXME: If the aperture
204 * has a page size different from the underlying system, the granularity
205 * of this manager should take care of this. But the range allocating code
206 * in ttm_bo.c needs to be modified for this.
207 * @lru: The lru list for this memory type.
208 *
209 * This structure is used to identify and manage memory types for a device.
210 * It's set up by the ttm_bo_driver::init_mem_type method.
211 */
212
213struct ttm_mem_type_manager {
214
215 /*
216 * No protection. Constant from start.
217 */
218
219 bool has_type;
220 bool use_type;
221 uint32_t flags;
222 unsigned long gpu_offset;
223 unsigned long io_offset;
224 unsigned long io_size;
225 void *io_addr;
226 uint64_t size;
227 uint32_t available_caching;
228 uint32_t default_caching;
229
230 /*
231 * Protected by the bdev->lru_lock.
232 * TODO: Consider one lru_lock per ttm_mem_type_manager.
233 * Plays ill with list removal, though.
234 */
235
236 struct drm_mm manager;
237 struct list_head lru;
238};
239
240/**
241 * struct ttm_bo_driver
242 *
243 * @mem_type_prio: Priority array of memory types to place a buffer object in
244 * if it fits without evicting buffers from any of these memory types.
245 * @mem_busy_prio: Priority array of memory types to place a buffer object in
246 * if it needs to evict buffers to make room.
247 * @num_mem_type_prio: Number of elements in the @mem_type_prio array.
248 * @num_mem_busy_prio: Number of elements in the @num_mem_busy_prio array.
249 * @create_ttm_backend_entry: Callback to create a struct ttm_backend.
250 * @invalidate_caches: Callback to invalidate read caches when a buffer object
251 * has been evicted.
252 * @init_mem_type: Callback to initialize a struct ttm_mem_type_manager
253 * structure.
254 * @evict_flags: Callback to obtain placement flags when a buffer is evicted.
255 * @move: Callback for a driver to hook in accelerated functions to
256 * move a buffer.
257 * If set to NULL, a potentially slow memcpy() move is used.
258 * @sync_obj_signaled: See ttm_fence_api.h
259 * @sync_obj_wait: See ttm_fence_api.h
260 * @sync_obj_flush: See ttm_fence_api.h
261 * @sync_obj_unref: See ttm_fence_api.h
262 * @sync_obj_ref: See ttm_fence_api.h
263 */
264
265struct ttm_bo_driver {
266 const uint32_t *mem_type_prio;
267 const uint32_t *mem_busy_prio;
268 uint32_t num_mem_type_prio;
269 uint32_t num_mem_busy_prio;
270
271 /**
272 * struct ttm_bo_driver member create_ttm_backend_entry
273 *
274 * @bdev: The buffer object device.
275 *
276 * Create a driver specific struct ttm_backend.
277 */
278
279 struct ttm_backend *(*create_ttm_backend_entry)
280 (struct ttm_bo_device *bdev);
281
282 /**
283 * struct ttm_bo_driver member invalidate_caches
284 *
285 * @bdev: the buffer object device.
286 * @flags: new placement of the rebound buffer object.
287 *
288 * A previosly evicted buffer has been rebound in a
289 * potentially new location. Tell the driver that it might
290 * consider invalidating read (texture) caches on the next command
291 * submission as a consequence.
292 */
293
294 int (*invalidate_caches) (struct ttm_bo_device *bdev, uint32_t flags);
295 int (*init_mem_type) (struct ttm_bo_device *bdev, uint32_t type,
296 struct ttm_mem_type_manager *man);
297 /**
298 * struct ttm_bo_driver member evict_flags:
299 *
300 * @bo: the buffer object to be evicted
301 *
302 * Return the bo flags for a buffer which is not mapped to the hardware.
303 * These will be placed in proposed_flags so that when the move is
304 * finished, they'll end up in bo->mem.flags
305 */
306
307 uint32_t(*evict_flags) (struct ttm_buffer_object *bo);
308 /**
309 * struct ttm_bo_driver member move:
310 *
311 * @bo: the buffer to move
312 * @evict: whether this motion is evicting the buffer from
313 * the graphics address space
314 * @interruptible: Use interruptible sleeps if possible when sleeping.
315 * @no_wait: whether this should give up and return -EBUSY
316 * if this move would require sleeping
317 * @new_mem: the new memory region receiving the buffer
318 *
319 * Move a buffer between two memory regions.
320 */
321 int (*move) (struct ttm_buffer_object *bo,
322 bool evict, bool interruptible,
323 bool no_wait, struct ttm_mem_reg *new_mem);
324
325 /**
326 * struct ttm_bo_driver_member verify_access
327 *
328 * @bo: Pointer to a buffer object.
329 * @filp: Pointer to a struct file trying to access the object.
330 *
331 * Called from the map / write / read methods to verify that the
332 * caller is permitted to access the buffer object.
333 * This member may be set to NULL, which will refuse this kind of
334 * access for all buffer objects.
335 * This function should return 0 if access is granted, -EPERM otherwise.
336 */
337 int (*verify_access) (struct ttm_buffer_object *bo,
338 struct file *filp);
339
340 /**
341 * In case a driver writer dislikes the TTM fence objects,
342 * the driver writer can replace those with sync objects of
343 * his / her own. If it turns out that no driver writer is
344 * using these. I suggest we remove these hooks and plug in
345 * fences directly. The bo driver needs the following functionality:
346 * See the corresponding functions in the fence object API
347 * documentation.
348 */
349
350 bool (*sync_obj_signaled) (void *sync_obj, void *sync_arg);
351 int (*sync_obj_wait) (void *sync_obj, void *sync_arg,
352 bool lazy, bool interruptible);
353 int (*sync_obj_flush) (void *sync_obj, void *sync_arg);
354 void (*sync_obj_unref) (void **sync_obj);
355 void *(*sync_obj_ref) (void *sync_obj);
356};
357
358#define TTM_NUM_MEM_TYPES 8
359
360#define TTM_BO_PRIV_FLAG_MOVING 0 /* Buffer object is moving and needs
361 idling before CPU mapping */
362#define TTM_BO_PRIV_FLAG_MAX 1
363/**
364 * struct ttm_bo_device - Buffer object driver device-specific data.
365 *
366 * @mem_glob: Pointer to a struct ttm_mem_global object for accounting.
367 * @driver: Pointer to a struct ttm_bo_driver struct setup by the driver.
368 * @count: Current number of buffer object.
369 * @pages: Current number of pinned pages.
370 * @dummy_read_page: Pointer to a dummy page used for mapping requests
371 * of unpopulated pages.
372 * @shrink: A shrink callback object used for buffre object swap.
373 * @ttm_bo_extra_size: Extra size (sizeof(struct ttm_buffer_object) excluded)
374 * used by a buffer object. This is excluding page arrays and backing pages.
375 * @ttm_bo_size: This is @ttm_bo_extra_size + sizeof(struct ttm_buffer_object).
376 * @man: An array of mem_type_managers.
377 * @addr_space_mm: Range manager for the device address space.
378 * lru_lock: Spinlock that protects the buffer+device lru lists and
379 * ddestroy lists.
380 * @nice_mode: Try nicely to wait for buffer idle when cleaning a manager.
381 * If a GPU lockup has been detected, this is forced to 0.
382 * @dev_mapping: A pointer to the struct address_space representing the
383 * device address space.
384 * @wq: Work queue structure for the delayed delete workqueue.
385 *
386 */
387
388struct ttm_bo_device {
389
390 /*
391 * Constant after bo device init / atomic.
392 */
393
394 struct ttm_mem_global *mem_glob;
395 struct ttm_bo_driver *driver;
396 struct page *dummy_read_page;
397 struct ttm_mem_shrink shrink;
398
399 size_t ttm_bo_extra_size;
400 size_t ttm_bo_size;
401
402 rwlock_t vm_lock;
403 /*
404 * Protected by the vm lock.
405 */
406 struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES];
407 struct rb_root addr_space_rb;
408 struct drm_mm addr_space_mm;
409
410 /*
411 * Might want to change this to one lock per manager.
412 */
413 spinlock_t lru_lock;
414 /*
415 * Protected by the lru lock.
416 */
417 struct list_head ddestroy;
418 struct list_head swap_lru;
419
420 /*
421 * Protected by load / firstopen / lastclose /unload sync.
422 */
423
424 bool nice_mode;
425 struct address_space *dev_mapping;
426
427 /*
428 * Internal protection.
429 */
430
431 struct delayed_work wq;
432};
433
434/**
435 * ttm_flag_masked
436 *
437 * @old: Pointer to the result and original value.
438 * @new: New value of bits.
439 * @mask: Mask of bits to change.
440 *
441 * Convenience function to change a number of bits identified by a mask.
442 */
443
444static inline uint32_t
445ttm_flag_masked(uint32_t *old, uint32_t new, uint32_t mask)
446{
447 *old ^= (*old ^ new) & mask;
448 return *old;
449}
450
451/**
452 * ttm_tt_create
453 *
454 * @bdev: pointer to a struct ttm_bo_device:
455 * @size: Size of the data needed backing.
456 * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
457 * @dummy_read_page: See struct ttm_bo_device.
458 *
459 * Create a struct ttm_tt to back data with system memory pages.
460 * No pages are actually allocated.
461 * Returns:
462 * NULL: Out of memory.
463 */
464extern struct ttm_tt *ttm_tt_create(struct ttm_bo_device *bdev,
465 unsigned long size,
466 uint32_t page_flags,
467 struct page *dummy_read_page);
468
469/**
470 * ttm_tt_set_user:
471 *
472 * @ttm: The struct ttm_tt to populate.
473 * @tsk: A struct task_struct for which @start is a valid user-space address.
474 * @start: A valid user-space address.
475 * @num_pages: Size in pages of the user memory area.
476 *
477 * Populate a struct ttm_tt with a user-space memory area after first pinning
478 * the pages backing it.
479 * Returns:
480 * !0: Error.
481 */
482
483extern int ttm_tt_set_user(struct ttm_tt *ttm,
484 struct task_struct *tsk,
485 unsigned long start, unsigned long num_pages);
486
487/**
488 * ttm_ttm_bind:
489 *
490 * @ttm: The struct ttm_tt containing backing pages.
491 * @bo_mem: The struct ttm_mem_reg identifying the binding location.
492 *
493 * Bind the pages of @ttm to an aperture location identified by @bo_mem
494 */
495extern int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem);
496
497/**
498 * ttm_ttm_destroy:
499 *
500 * @ttm: The struct ttm_tt.
501 *
502 * Unbind, unpopulate and destroy a struct ttm_tt.
503 */
504extern void ttm_tt_destroy(struct ttm_tt *ttm);
505
506/**
507 * ttm_ttm_unbind:
508 *
509 * @ttm: The struct ttm_tt.
510 *
511 * Unbind a struct ttm_tt.
512 */
513extern void ttm_tt_unbind(struct ttm_tt *ttm);
514
515/**
516 * ttm_ttm_destroy:
517 *
518 * @ttm: The struct ttm_tt.
519 * @index: Index of the desired page.
520 *
521 * Return a pointer to the struct page backing @ttm at page
522 * index @index. If the page is unpopulated, one will be allocated to
523 * populate that index.
524 *
525 * Returns:
526 * NULL on OOM.
527 */
528extern struct page *ttm_tt_get_page(struct ttm_tt *ttm, int index);
529
530/**
531 * ttm_tt_cache_flush:
532 *
533 * @pages: An array of pointers to struct page:s to flush.
534 * @num_pages: Number of pages to flush.
535 *
536 * Flush the data of the indicated pages from the cpu caches.
537 * This is used when changing caching attributes of the pages from
538 * cache-coherent.
539 */
540extern void ttm_tt_cache_flush(struct page *pages[], unsigned long num_pages);
541
542/**
543 * ttm_tt_set_placement_caching:
544 *
545 * @ttm A struct ttm_tt the backing pages of which will change caching policy.
546 * @placement: Flag indicating the desired caching policy.
547 *
548 * This function will change caching policy of any default kernel mappings of
549 * the pages backing @ttm. If changing from cached to uncached or
550 * write-combined,
551 * all CPU caches will first be flushed to make sure the data of the pages
552 * hit RAM. This function may be very costly as it involves global TLB
553 * and cache flushes and potential page splitting / combining.
554 */
555extern int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement);
556extern int ttm_tt_swapout(struct ttm_tt *ttm,
557 struct file *persistant_swap_storage);
558
559/*
560 * ttm_bo.c
561 */
562
563/**
564 * ttm_mem_reg_is_pci
565 *
566 * @bdev: Pointer to a struct ttm_bo_device.
567 * @mem: A valid struct ttm_mem_reg.
568 *
569 * Returns true if the memory described by @mem is PCI memory,
570 * false otherwise.
571 */
572extern bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev,
573 struct ttm_mem_reg *mem);
574
575/**
576 * ttm_bo_mem_space
577 *
578 * @bo: Pointer to a struct ttm_buffer_object. the data of which
579 * we want to allocate space for.
580 * @proposed_placement: Proposed new placement for the buffer object.
581 * @mem: A struct ttm_mem_reg.
582 * @interruptible: Sleep interruptible when sliping.
583 * @no_wait: Don't sleep waiting for space to become available.
584 *
585 * Allocate memory space for the buffer object pointed to by @bo, using
586 * the placement flags in @mem, potentially evicting other idle buffer objects.
587 * This function may sleep while waiting for space to become available.
588 * Returns:
589 * -EBUSY: No space available (only if no_wait == 1).
590 * -ENOMEM: Could not allocate memory for the buffer object, either due to
591 * fragmentation or concurrent allocators.
592 * -ERESTART: An interruptible sleep was interrupted by a signal.
593 */
594extern int ttm_bo_mem_space(struct ttm_buffer_object *bo,
595 uint32_t proposed_placement,
596 struct ttm_mem_reg *mem,
597 bool interruptible, bool no_wait);
598/**
599 * ttm_bo_wait_for_cpu
600 *
601 * @bo: Pointer to a struct ttm_buffer_object.
602 * @no_wait: Don't sleep while waiting.
603 *
604 * Wait until a buffer object is no longer sync'ed for CPU access.
605 * Returns:
606 * -EBUSY: Buffer object was sync'ed for CPU access. (only if no_wait == 1).
607 * -ERESTART: An interruptible sleep was interrupted by a signal.
608 */
609
610extern int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait);
611
612/**
613 * ttm_bo_pci_offset - Get the PCI offset for the buffer object memory.
614 *
615 * @bo Pointer to a struct ttm_buffer_object.
616 * @bus_base On return the base of the PCI region
617 * @bus_offset On return the byte offset into the PCI region
618 * @bus_size On return the byte size of the buffer object or zero if
619 * the buffer object memory is not accessible through a PCI region.
620 *
621 * Returns:
622 * -EINVAL if the buffer object is currently not mappable.
623 * 0 otherwise.
624 */
625
626extern int ttm_bo_pci_offset(struct ttm_bo_device *bdev,
627 struct ttm_mem_reg *mem,
628 unsigned long *bus_base,
629 unsigned long *bus_offset,
630 unsigned long *bus_size);
631
632extern int ttm_bo_device_release(struct ttm_bo_device *bdev);
633
634/**
635 * ttm_bo_device_init
636 *
637 * @bdev: A pointer to a struct ttm_bo_device to initialize.
638 * @mem_global: A pointer to an initialized struct ttm_mem_global.
639 * @driver: A pointer to a struct ttm_bo_driver set up by the caller.
640 * @file_page_offset: Offset into the device address space that is available
641 * for buffer data. This ensures compatibility with other users of the
642 * address space.
643 *
644 * Initializes a struct ttm_bo_device:
645 * Returns:
646 * !0: Failure.
647 */
648extern int ttm_bo_device_init(struct ttm_bo_device *bdev,
649 struct ttm_mem_global *mem_glob,
650 struct ttm_bo_driver *driver,
651 uint64_t file_page_offset);
652
653/**
654 * ttm_bo_reserve:
655 *
656 * @bo: A pointer to a struct ttm_buffer_object.
657 * @interruptible: Sleep interruptible if waiting.
658 * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY.
659 * @use_sequence: If @bo is already reserved, Only sleep waiting for
660 * it to become unreserved if @sequence < (@bo)->sequence.
661 *
662 * Locks a buffer object for validation. (Or prevents other processes from
663 * locking it for validation) and removes it from lru lists, while taking
664 * a number of measures to prevent deadlocks.
665 *
666 * Deadlocks may occur when two processes try to reserve multiple buffers in
667 * different order, either by will or as a result of a buffer being evicted
668 * to make room for a buffer already reserved. (Buffers are reserved before
669 * they are evicted). The following algorithm prevents such deadlocks from
670 * occuring:
671 * 1) Buffers are reserved with the lru spinlock held. Upon successful
672 * reservation they are removed from the lru list. This stops a reserved buffer
673 * from being evicted. However the lru spinlock is released between the time
674 * a buffer is selected for eviction and the time it is reserved.
675 * Therefore a check is made when a buffer is reserved for eviction, that it
676 * is still the first buffer in the lru list, before it is removed from the
677 * list. @check_lru == 1 forces this check. If it fails, the function returns
678 * -EINVAL, and the caller should then choose a new buffer to evict and repeat
679 * the procedure.
680 * 2) Processes attempting to reserve multiple buffers other than for eviction,
681 * (typically execbuf), should first obtain a unique 32-bit
682 * validation sequence number,
683 * and call this function with @use_sequence == 1 and @sequence == the unique
684 * sequence number. If upon call of this function, the buffer object is already
685 * reserved, the validation sequence is checked against the validation
686 * sequence of the process currently reserving the buffer,
687 * and if the current validation sequence is greater than that of the process
688 * holding the reservation, the function returns -EAGAIN. Otherwise it sleeps
689 * waiting for the buffer to become unreserved, after which it retries
690 * reserving.
691 * The caller should, when receiving an -EAGAIN error
692 * release all its buffer reservations, wait for @bo to become unreserved, and
693 * then rerun the validation with the same validation sequence. This procedure
694 * will always guarantee that the process with the lowest validation sequence
695 * will eventually succeed, preventing both deadlocks and starvation.
696 *
697 * Returns:
698 * -EAGAIN: The reservation may cause a deadlock.
699 * Release all buffer reservations, wait for @bo to become unreserved and
700 * try again. (only if use_sequence == 1).
701 * -ERESTART: A wait for the buffer to become unreserved was interrupted by
702 * a signal. Release all buffer reservations and return to user-space.
703 */
704extern int ttm_bo_reserve(struct ttm_buffer_object *bo,
705 bool interruptible,
706 bool no_wait, bool use_sequence, uint32_t sequence);
707
708/**
709 * ttm_bo_unreserve
710 *
711 * @bo: A pointer to a struct ttm_buffer_object.
712 *
713 * Unreserve a previous reservation of @bo.
714 */
715extern void ttm_bo_unreserve(struct ttm_buffer_object *bo);
716
717/**
718 * ttm_bo_wait_unreserved
719 *
720 * @bo: A pointer to a struct ttm_buffer_object.
721 *
722 * Wait for a struct ttm_buffer_object to become unreserved.
723 * This is typically used in the execbuf code to relax cpu-usage when
724 * a potential deadlock condition backoff.
725 */
726extern int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo,
727 bool interruptible);
728
729/**
730 * ttm_bo_block_reservation
731 *
732 * @bo: A pointer to a struct ttm_buffer_object.
733 * @interruptible: Use interruptible sleep when waiting.
734 * @no_wait: Don't sleep, but rather return -EBUSY.
735 *
736 * Block reservation for validation by simply reserving the buffer.
737 * This is intended for single buffer use only without eviction,
738 * and thus needs no deadlock protection.
739 *
740 * Returns:
741 * -EBUSY: If no_wait == 1 and the buffer is already reserved.
742 * -ERESTART: If interruptible == 1 and the process received a signal
743 * while sleeping.
744 */
745extern int ttm_bo_block_reservation(struct ttm_buffer_object *bo,
746 bool interruptible, bool no_wait);
747
748/**
749 * ttm_bo_unblock_reservation
750 *
751 * @bo: A pointer to a struct ttm_buffer_object.
752 *
753 * Unblocks reservation leaving lru lists untouched.
754 */
755extern void ttm_bo_unblock_reservation(struct ttm_buffer_object *bo);
756
757/*
758 * ttm_bo_util.c
759 */
760
761/**
762 * ttm_bo_move_ttm
763 *
764 * @bo: A pointer to a struct ttm_buffer_object.
765 * @evict: 1: This is an eviction. Don't try to pipeline.
766 * @no_wait: Never sleep, but rather return with -EBUSY.
767 * @new_mem: struct ttm_mem_reg indicating where to move.
768 *
769 * Optimized move function for a buffer object with both old and
770 * new placement backed by a TTM. The function will, if successful,
771 * free any old aperture space, and set (@new_mem)->mm_node to NULL,
772 * and update the (@bo)->mem placement flags. If unsuccessful, the old
773 * data remains untouched, and it's up to the caller to free the
774 * memory space indicated by @new_mem.
775 * Returns:
776 * !0: Failure.
777 */
778
779extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
780 bool evict, bool no_wait,
781 struct ttm_mem_reg *new_mem);
782
783/**
784 * ttm_bo_move_memcpy
785 *
786 * @bo: A pointer to a struct ttm_buffer_object.
787 * @evict: 1: This is an eviction. Don't try to pipeline.
788 * @no_wait: Never sleep, but rather return with -EBUSY.
789 * @new_mem: struct ttm_mem_reg indicating where to move.
790 *
791 * Fallback move function for a mappable buffer object in mappable memory.
792 * The function will, if successful,
793 * free any old aperture space, and set (@new_mem)->mm_node to NULL,
794 * and update the (@bo)->mem placement flags. If unsuccessful, the old
795 * data remains untouched, and it's up to the caller to free the
796 * memory space indicated by @new_mem.
797 * Returns:
798 * !0: Failure.
799 */
800
801extern int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
802 bool evict,
803 bool no_wait, struct ttm_mem_reg *new_mem);
804
805/**
806 * ttm_bo_free_old_node
807 *
808 * @bo: A pointer to a struct ttm_buffer_object.
809 *
810 * Utility function to free an old placement after a successful move.
811 */
812extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo);
813
814/**
815 * ttm_bo_move_accel_cleanup.
816 *
817 * @bo: A pointer to a struct ttm_buffer_object.
818 * @sync_obj: A sync object that signals when moving is complete.
819 * @sync_obj_arg: An argument to pass to the sync object idle / wait
820 * functions.
821 * @evict: This is an evict move. Don't return until the buffer is idle.
822 * @no_wait: Never sleep, but rather return with -EBUSY.
823 * @new_mem: struct ttm_mem_reg indicating where to move.
824 *
825 * Accelerated move function to be called when an accelerated move
826 * has been scheduled. The function will create a new temporary buffer object
827 * representing the old placement, and put the sync object on both buffer
828 * objects. After that the newly created buffer object is unref'd to be
829 * destroyed when the move is complete. This will help pipeline
830 * buffer moves.
831 */
832
833extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
834 void *sync_obj,
835 void *sync_obj_arg,
836 bool evict, bool no_wait,
837 struct ttm_mem_reg *new_mem);
838/**
839 * ttm_io_prot
840 *
841 * @c_state: Caching state.
842 * @tmp: Page protection flag for a normal, cached mapping.
843 *
844 * Utility function that returns the pgprot_t that should be used for
845 * setting up a PTE with the caching model indicated by @c_state.
846 */
847extern pgprot_t ttm_io_prot(enum ttm_caching_state c_state, pgprot_t tmp);
848
849#if (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE)))
850#define TTM_HAS_AGP
851#include <linux/agp_backend.h>
852
853/**
854 * ttm_agp_backend_init
855 *
856 * @bdev: Pointer to a struct ttm_bo_device.
857 * @bridge: The agp bridge this device is sitting on.
858 *
859 * Create a TTM backend that uses the indicated AGP bridge as an aperture
860 * for TT memory. This function uses the linux agpgart interface to
861 * bind and unbind memory backing a ttm_tt.
862 */
863extern struct ttm_backend *ttm_agp_backend_init(struct ttm_bo_device *bdev,
864 struct agp_bridge_data *bridge);
865#endif
866
867#endif
diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
new file mode 100644
index 000000000000..d8b8f042c4f1
--- /dev/null
+++ b/include/drm/ttm/ttm_memory.h
@@ -0,0 +1,153 @@
1/**************************************************************************
2 *
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#ifndef TTM_MEMORY_H
29#define TTM_MEMORY_H
30
31#include <linux/workqueue.h>
32#include <linux/spinlock.h>
33#include <linux/wait.h>
34#include <linux/errno.h>
35
36/**
37 * struct ttm_mem_shrink - callback to shrink TTM memory usage.
38 *
39 * @do_shrink: The callback function.
40 *
41 * Arguments to the do_shrink functions are intended to be passed using
42 * inheritance. That is, the argument class derives from struct ttm_mem_srink,
43 * and can be accessed using container_of().
44 */
45
46struct ttm_mem_shrink {
47 int (*do_shrink) (struct ttm_mem_shrink *);
48};
49
50/**
51 * struct ttm_mem_global - Global memory accounting structure.
52 *
53 * @shrink: A single callback to shrink TTM memory usage. Extend this
54 * to a linked list to be able to handle multiple callbacks when needed.
55 * @swap_queue: A workqueue to handle shrinking in low memory situations. We
56 * need a separate workqueue since it will spend a lot of time waiting
57 * for the GPU, and this will otherwise block other workqueue tasks(?)
58 * At this point we use only a single-threaded workqueue.
59 * @work: The workqueue callback for the shrink queue.
60 * @queue: Wait queue for processes suspended waiting for memory.
61 * @lock: Lock to protect the @shrink - and the memory accounting members,
62 * that is, essentially the whole structure with some exceptions.
63 * @emer_memory: Lowmem memory limit available for root.
64 * @max_memory: Lowmem memory limit available for non-root.
65 * @swap_limit: Lowmem memory limit where the shrink workqueue kicks in.
66 * @used_memory: Currently used lowmem memory.
67 * @used_total_memory: Currently used total (lowmem + highmem) memory.
68 * @total_memory_swap_limit: Total memory limit where the shrink workqueue
69 * kicks in.
70 * @max_total_memory: Total memory available to non-root processes.
71 * @emer_total_memory: Total memory available to root processes.
72 *
73 * Note that this structure is not per device. It should be global for all
74 * graphics devices.
75 */
76
77struct ttm_mem_global {
78 struct ttm_mem_shrink *shrink;
79 struct workqueue_struct *swap_queue;
80 struct work_struct work;
81 wait_queue_head_t queue;
82 spinlock_t lock;
83 uint64_t emer_memory;
84 uint64_t max_memory;
85 uint64_t swap_limit;
86 uint64_t used_memory;
87 uint64_t used_total_memory;
88 uint64_t total_memory_swap_limit;
89 uint64_t max_total_memory;
90 uint64_t emer_total_memory;
91};
92
93/**
94 * ttm_mem_init_shrink - initialize a struct ttm_mem_shrink object
95 *
96 * @shrink: The object to initialize.
97 * @func: The callback function.
98 */
99
100static inline void ttm_mem_init_shrink(struct ttm_mem_shrink *shrink,
101 int (*func) (struct ttm_mem_shrink *))
102{
103 shrink->do_shrink = func;
104}
105
106/**
107 * ttm_mem_register_shrink - register a struct ttm_mem_shrink object.
108 *
109 * @glob: The struct ttm_mem_global object to register with.
110 * @shrink: An initialized struct ttm_mem_shrink object to register.
111 *
112 * Returns:
113 * -EBUSY: There's already a callback registered. (May change).
114 */
115
116static inline int ttm_mem_register_shrink(struct ttm_mem_global *glob,
117 struct ttm_mem_shrink *shrink)
118{
119 spin_lock(&glob->lock);
120 if (glob->shrink != NULL) {
121 spin_unlock(&glob->lock);
122 return -EBUSY;
123 }
124 glob->shrink = shrink;
125 spin_unlock(&glob->lock);
126 return 0;
127}
128
129/**
130 * ttm_mem_unregister_shrink - unregister a struct ttm_mem_shrink object.
131 *
132 * @glob: The struct ttm_mem_global object to unregister from.
133 * @shrink: A previously registert struct ttm_mem_shrink object.
134 *
135 */
136
137static inline void ttm_mem_unregister_shrink(struct ttm_mem_global *glob,
138 struct ttm_mem_shrink *shrink)
139{
140 spin_lock(&glob->lock);
141 BUG_ON(glob->shrink != shrink);
142 glob->shrink = NULL;
143 spin_unlock(&glob->lock);
144}
145
146extern int ttm_mem_global_init(struct ttm_mem_global *glob);
147extern void ttm_mem_global_release(struct ttm_mem_global *glob);
148extern int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
149 bool no_wait, bool interruptible, bool himem);
150extern void ttm_mem_global_free(struct ttm_mem_global *glob,
151 uint64_t amount, bool himem);
152extern size_t ttm_round_pot(size_t size);
153#endif
diff --git a/include/drm/ttm/ttm_module.h b/include/drm/ttm/ttm_module.h
new file mode 100644
index 000000000000..889a4c7958ae
--- /dev/null
+++ b/include/drm/ttm/ttm_module.h
@@ -0,0 +1,58 @@
1/**************************************************************************
2 *
3 * Copyright 2008-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */
30
31#ifndef _TTM_MODULE_H_
32#define _TTM_MODULE_H_
33
34#include <linux/kernel.h>
35
36#define TTM_PFX "[TTM]"
37
38enum ttm_global_types {
39 TTM_GLOBAL_TTM_MEM = 0,
40 TTM_GLOBAL_TTM_BO,
41 TTM_GLOBAL_TTM_OBJECT,
42 TTM_GLOBAL_NUM
43};
44
45struct ttm_global_reference {
46 enum ttm_global_types global_type;
47 size_t size;
48 void *object;
49 int (*init) (struct ttm_global_reference *);
50 void (*release) (struct ttm_global_reference *);
51};
52
53extern void ttm_global_init(void);
54extern void ttm_global_release(void);
55extern int ttm_global_item_ref(struct ttm_global_reference *ref);
56extern void ttm_global_item_unref(struct ttm_global_reference *ref);
57
58#endif /* _TTM_MODULE_H_ */
diff --git a/include/drm/ttm/ttm_placement.h b/include/drm/ttm/ttm_placement.h
new file mode 100644
index 000000000000..c84ff153a564
--- /dev/null
+++ b/include/drm/ttm/ttm_placement.h
@@ -0,0 +1,92 @@
1/**************************************************************************
2 *
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */
30
31#ifndef _TTM_PLACEMENT_H_
32#define _TTM_PLACEMENT_H_
33/*
34 * Memory regions for data placement.
35 */
36
37#define TTM_PL_SYSTEM 0
38#define TTM_PL_TT 1
39#define TTM_PL_VRAM 2
40#define TTM_PL_PRIV0 3
41#define TTM_PL_PRIV1 4
42#define TTM_PL_PRIV2 5
43#define TTM_PL_PRIV3 6
44#define TTM_PL_PRIV4 7
45#define TTM_PL_PRIV5 8
46#define TTM_PL_SWAPPED 15
47
48#define TTM_PL_FLAG_SYSTEM (1 << TTM_PL_SYSTEM)
49#define TTM_PL_FLAG_TT (1 << TTM_PL_TT)
50#define TTM_PL_FLAG_VRAM (1 << TTM_PL_VRAM)
51#define TTM_PL_FLAG_PRIV0 (1 << TTM_PL_PRIV0)
52#define TTM_PL_FLAG_PRIV1 (1 << TTM_PL_PRIV1)
53#define TTM_PL_FLAG_PRIV2 (1 << TTM_PL_PRIV2)
54#define TTM_PL_FLAG_PRIV3 (1 << TTM_PL_PRIV3)
55#define TTM_PL_FLAG_PRIV4 (1 << TTM_PL_PRIV4)
56#define TTM_PL_FLAG_PRIV5 (1 << TTM_PL_PRIV5)
57#define TTM_PL_FLAG_SWAPPED (1 << TTM_PL_SWAPPED)
58#define TTM_PL_MASK_MEM 0x0000FFFF
59
60/*
61 * Other flags that affects data placement.
62 * TTM_PL_FLAG_CACHED indicates cache-coherent mappings
63 * if available.
64 * TTM_PL_FLAG_SHARED means that another application may
65 * reference the buffer.
66 * TTM_PL_FLAG_NO_EVICT means that the buffer may never
67 * be evicted to make room for other buffers.
68 */
69
70#define TTM_PL_FLAG_CACHED (1 << 16)
71#define TTM_PL_FLAG_UNCACHED (1 << 17)
72#define TTM_PL_FLAG_WC (1 << 18)
73#define TTM_PL_FLAG_SHARED (1 << 20)
74#define TTM_PL_FLAG_NO_EVICT (1 << 21)
75
76#define TTM_PL_MASK_CACHING (TTM_PL_FLAG_CACHED | \
77 TTM_PL_FLAG_UNCACHED | \
78 TTM_PL_FLAG_WC)
79
80#define TTM_PL_MASK_MEMTYPE (TTM_PL_MASK_MEM | TTM_PL_MASK_CACHING)
81
82/*
83 * Access flags to be used for CPU- and GPU- mappings.
84 * The idea is that the TTM synchronization mechanism will
85 * allow concurrent READ access and exclusive write access.
86 * Currently GPU- and CPU accesses are exclusive.
87 */
88
89#define TTM_ACCESS_READ (1 << 0)
90#define TTM_ACCESS_WRITE (1 << 1)
91
92#endif