aboutsummaryrefslogtreecommitdiffstats
path: root/include/xen
diff options
context:
space:
mode:
Diffstat (limited to 'include/xen')
-rw-r--r--include/xen/gntalloc.h82
-rw-r--r--include/xen/gntdev.h150
-rw-r--r--include/xen/grant_table.h8
-rw-r--r--include/xen/interface/io/blkif.h34
-rw-r--r--include/xen/interface/xencomm.h41
-rw-r--r--include/xen/xencomm.h77
6 files changed, 16 insertions, 376 deletions
diff --git a/include/xen/gntalloc.h b/include/xen/gntalloc.h
deleted file mode 100644
index 76bd58065f4f..000000000000
--- a/include/xen/gntalloc.h
+++ /dev/null
@@ -1,82 +0,0 @@
1/******************************************************************************
2 * gntalloc.h
3 *
4 * Interface to /dev/xen/gntalloc.
5 *
6 * Author: Daniel De Graaf <dgdegra@tycho.nsa.gov>
7 *
8 * This file is in the public domain.
9 */
10
11#ifndef __LINUX_PUBLIC_GNTALLOC_H__
12#define __LINUX_PUBLIC_GNTALLOC_H__
13
14/*
15 * Allocates a new page and creates a new grant reference.
16 */
17#define IOCTL_GNTALLOC_ALLOC_GREF \
18_IOC(_IOC_NONE, 'G', 5, sizeof(struct ioctl_gntalloc_alloc_gref))
19struct ioctl_gntalloc_alloc_gref {
20 /* IN parameters */
21 /* The ID of the domain to be given access to the grants. */
22 uint16_t domid;
23 /* Flags for this mapping */
24 uint16_t flags;
25 /* Number of pages to map */
26 uint32_t count;
27 /* OUT parameters */
28 /* The offset to be used on a subsequent call to mmap(). */
29 uint64_t index;
30 /* The grant references of the newly created grant, one per page */
31 /* Variable size, depending on count */
32 uint32_t gref_ids[1];
33};
34
35#define GNTALLOC_FLAG_WRITABLE 1
36
37/*
38 * Deallocates the grant reference, allowing the associated page to be freed if
39 * no other domains are using it.
40 */
41#define IOCTL_GNTALLOC_DEALLOC_GREF \
42_IOC(_IOC_NONE, 'G', 6, sizeof(struct ioctl_gntalloc_dealloc_gref))
43struct ioctl_gntalloc_dealloc_gref {
44 /* IN parameters */
45 /* The offset returned in the map operation */
46 uint64_t index;
47 /* Number of references to unmap */
48 uint32_t count;
49};
50
51/*
52 * Sets up an unmap notification within the page, so that the other side can do
53 * cleanup if this side crashes. Required to implement cross-domain robust
54 * mutexes or close notification on communication channels.
55 *
56 * Each mapped page only supports one notification; multiple calls referring to
57 * the same page overwrite the previous notification. You must clear the
58 * notification prior to the IOCTL_GNTALLOC_DEALLOC_GREF if you do not want it
59 * to occur.
60 */
61#define IOCTL_GNTALLOC_SET_UNMAP_NOTIFY \
62_IOC(_IOC_NONE, 'G', 7, sizeof(struct ioctl_gntalloc_unmap_notify))
63struct ioctl_gntalloc_unmap_notify {
64 /* IN parameters */
65 /* Offset in the file descriptor for a byte within the page (same as
66 * used in mmap). If using UNMAP_NOTIFY_CLEAR_BYTE, this is the byte to
67 * be cleared. Otherwise, it can be any byte in the page whose
68 * notification we are adjusting.
69 */
70 uint64_t index;
71 /* Action(s) to take on unmap */
72 uint32_t action;
73 /* Event channel to notify */
74 uint32_t event_channel_port;
75};
76
77/* Clear (set to zero) the byte specified by index */
78#define UNMAP_NOTIFY_CLEAR_BYTE 0x1
79/* Send an interrupt on the indicated event channel */
80#define UNMAP_NOTIFY_SEND_EVENT 0x2
81
82#endif /* __LINUX_PUBLIC_GNTALLOC_H__ */
diff --git a/include/xen/gntdev.h b/include/xen/gntdev.h
deleted file mode 100644
index 5304bd3c84c5..000000000000
--- a/include/xen/gntdev.h
+++ /dev/null
@@ -1,150 +0,0 @@
1/******************************************************************************
2 * gntdev.h
3 *
4 * Interface to /dev/xen/gntdev.
5 *
6 * Copyright (c) 2007, D G Murray
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version 2
10 * as published by the Free Software Foundation; or, when distributed
11 * separately from the Linux kernel or incorporated into other
12 * software packages, subject to the following license:
13 *
14 * Permission is hereby granted, free of charge, to any person obtaining a copy
15 * of this source file (the "Software"), to deal in the Software without
16 * restriction, including without limitation the rights to use, copy, modify,
17 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
18 * and to permit persons to whom the Software is furnished to do so, subject to
19 * the following conditions:
20 *
21 * The above copyright notice and this permission notice shall be included in
22 * all copies or substantial portions of the Software.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
27 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
28 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
29 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 * IN THE SOFTWARE.
31 */
32
33#ifndef __LINUX_PUBLIC_GNTDEV_H__
34#define __LINUX_PUBLIC_GNTDEV_H__
35
36struct ioctl_gntdev_grant_ref {
37 /* The domain ID of the grant to be mapped. */
38 uint32_t domid;
39 /* The grant reference of the grant to be mapped. */
40 uint32_t ref;
41};
42
43/*
44 * Inserts the grant references into the mapping table of an instance
45 * of gntdev. N.B. This does not perform the mapping, which is deferred
46 * until mmap() is called with @index as the offset.
47 */
48#define IOCTL_GNTDEV_MAP_GRANT_REF \
49_IOC(_IOC_NONE, 'G', 0, sizeof(struct ioctl_gntdev_map_grant_ref))
50struct ioctl_gntdev_map_grant_ref {
51 /* IN parameters */
52 /* The number of grants to be mapped. */
53 uint32_t count;
54 uint32_t pad;
55 /* OUT parameters */
56 /* The offset to be used on a subsequent call to mmap(). */
57 uint64_t index;
58 /* Variable IN parameter. */
59 /* Array of grant references, of size @count. */
60 struct ioctl_gntdev_grant_ref refs[1];
61};
62
63/*
64 * Removes the grant references from the mapping table of an instance of
65 * of gntdev. N.B. munmap() must be called on the relevant virtual address(es)
66 * before this ioctl is called, or an error will result.
67 */
68#define IOCTL_GNTDEV_UNMAP_GRANT_REF \
69_IOC(_IOC_NONE, 'G', 1, sizeof(struct ioctl_gntdev_unmap_grant_ref))
70struct ioctl_gntdev_unmap_grant_ref {
71 /* IN parameters */
72 /* The offset was returned by the corresponding map operation. */
73 uint64_t index;
74 /* The number of pages to be unmapped. */
75 uint32_t count;
76 uint32_t pad;
77};
78
79/*
80 * Returns the offset in the driver's address space that corresponds
81 * to @vaddr. This can be used to perform a munmap(), followed by an
82 * UNMAP_GRANT_REF ioctl, where no state about the offset is retained by
83 * the caller. The number of pages that were allocated at the same time as
84 * @vaddr is returned in @count.
85 *
86 * N.B. Where more than one page has been mapped into a contiguous range, the
87 * supplied @vaddr must correspond to the start of the range; otherwise
88 * an error will result. It is only possible to munmap() the entire
89 * contiguously-allocated range at once, and not any subrange thereof.
90 */
91#define IOCTL_GNTDEV_GET_OFFSET_FOR_VADDR \
92_IOC(_IOC_NONE, 'G', 2, sizeof(struct ioctl_gntdev_get_offset_for_vaddr))
93struct ioctl_gntdev_get_offset_for_vaddr {
94 /* IN parameters */
95 /* The virtual address of the first mapped page in a range. */
96 uint64_t vaddr;
97 /* OUT parameters */
98 /* The offset that was used in the initial mmap() operation. */
99 uint64_t offset;
100 /* The number of pages mapped in the VM area that begins at @vaddr. */
101 uint32_t count;
102 uint32_t pad;
103};
104
105/*
106 * Sets the maximum number of grants that may mapped at once by this gntdev
107 * instance.
108 *
109 * N.B. This must be called before any other ioctl is performed on the device.
110 */
111#define IOCTL_GNTDEV_SET_MAX_GRANTS \
112_IOC(_IOC_NONE, 'G', 3, sizeof(struct ioctl_gntdev_set_max_grants))
113struct ioctl_gntdev_set_max_grants {
114 /* IN parameter */
115 /* The maximum number of grants that may be mapped at once. */
116 uint32_t count;
117};
118
119/*
120 * Sets up an unmap notification within the page, so that the other side can do
121 * cleanup if this side crashes. Required to implement cross-domain robust
122 * mutexes or close notification on communication channels.
123 *
124 * Each mapped page only supports one notification; multiple calls referring to
125 * the same page overwrite the previous notification. You must clear the
126 * notification prior to the IOCTL_GNTALLOC_DEALLOC_GREF if you do not want it
127 * to occur.
128 */
129#define IOCTL_GNTDEV_SET_UNMAP_NOTIFY \
130_IOC(_IOC_NONE, 'G', 7, sizeof(struct ioctl_gntdev_unmap_notify))
131struct ioctl_gntdev_unmap_notify {
132 /* IN parameters */
133 /* Offset in the file descriptor for a byte within the page (same as
134 * used in mmap). If using UNMAP_NOTIFY_CLEAR_BYTE, this is the byte to
135 * be cleared. Otherwise, it can be any byte in the page whose
136 * notification we are adjusting.
137 */
138 uint64_t index;
139 /* Action(s) to take on unmap */
140 uint32_t action;
141 /* Event channel to notify */
142 uint32_t event_channel_port;
143};
144
145/* Clear (set to zero) the byte specified by index */
146#define UNMAP_NOTIFY_CLEAR_BYTE 0x1
147/* Send an interrupt on the indicated event channel */
148#define UNMAP_NOTIFY_SEND_EVENT 0x2
149
150#endif /* __LINUX_PUBLIC_GNTDEV_H__ */
diff --git a/include/xen/grant_table.h b/include/xen/grant_table.h
index 7ad033dbc845..a5af2a26d94f 100644
--- a/include/xen/grant_table.h
+++ b/include/xen/grant_table.h
@@ -191,15 +191,11 @@ void gnttab_free_auto_xlat_frames(void);
191#define gnttab_map_vaddr(map) ((void *)(map.host_virt_addr)) 191#define gnttab_map_vaddr(map) ((void *)(map.host_virt_addr))
192 192
193int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops, 193int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
194 struct gnttab_map_grant_ref *kmap_ops,
194 struct page **pages, unsigned int count); 195 struct page **pages, unsigned int count);
195int gnttab_map_refs_userspace(struct gnttab_map_grant_ref *map_ops,
196 struct gnttab_map_grant_ref *kmap_ops,
197 struct page **pages, unsigned int count);
198int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops, 196int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
197 struct gnttab_map_grant_ref *kunmap_ops,
199 struct page **pages, unsigned int count); 198 struct page **pages, unsigned int count);
200int gnttab_unmap_refs_userspace(struct gnttab_unmap_grant_ref *unmap_ops,
201 struct gnttab_map_grant_ref *kunmap_ops,
202 struct page **pages, unsigned int count);
203 199
204/* Perform a batch of grant map/copy operations. Retry every batch slot 200/* Perform a batch of grant map/copy operations. Retry every batch slot
205 * for which the hypervisor returns GNTST_eagain. This is typically due 201 * for which the hypervisor returns GNTST_eagain. This is typically due
diff --git a/include/xen/interface/io/blkif.h b/include/xen/interface/io/blkif.h
index ae665ac59c36..32ec05a6572f 100644
--- a/include/xen/interface/io/blkif.h
+++ b/include/xen/interface/io/blkif.h
@@ -113,13 +113,13 @@ typedef uint64_t blkif_sector_t;
113 * it's less than the number provided by the backend. The indirect_grefs field 113 * it's less than the number provided by the backend. The indirect_grefs field
114 * in blkif_request_indirect should be filled by the frontend with the 114 * in blkif_request_indirect should be filled by the frontend with the
115 * grant references of the pages that are holding the indirect segments. 115 * grant references of the pages that are holding the indirect segments.
116 * This pages are filled with an array of blkif_request_segment_aligned 116 * These pages are filled with an array of blkif_request_segment that hold the
117 * that hold the information about the segments. The number of indirect 117 * information about the segments. The number of indirect pages to use is
118 * pages to use is determined by the maximum number of segments 118 * determined by the number of segments an indirect request contains. Every
119 * a indirect request contains. Every indirect page can contain a maximum 119 * indirect page can contain a maximum of
120 * of 512 segments (PAGE_SIZE/sizeof(blkif_request_segment_aligned)), 120 * (PAGE_SIZE / sizeof(struct blkif_request_segment)) segments, so to
121 * so to calculate the number of indirect pages to use we have to do 121 * calculate the number of indirect pages to use we have to do
122 * ceil(indirect_segments/512). 122 * ceil(indirect_segments / (PAGE_SIZE / sizeof(struct blkif_request_segment))).
123 * 123 *
124 * If a backend does not recognize BLKIF_OP_INDIRECT, it should *not* 124 * If a backend does not recognize BLKIF_OP_INDIRECT, it should *not*
125 * create the "feature-max-indirect-segments" node! 125 * create the "feature-max-indirect-segments" node!
@@ -135,13 +135,12 @@ typedef uint64_t blkif_sector_t;
135 135
136#define BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST 8 136#define BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST 8
137 137
138struct blkif_request_segment_aligned { 138struct blkif_request_segment {
139 grant_ref_t gref; /* reference to I/O buffer frame */ 139 grant_ref_t gref; /* reference to I/O buffer frame */
140 /* @first_sect: first sector in frame to transfer (inclusive). */ 140 /* @first_sect: first sector in frame to transfer (inclusive). */
141 /* @last_sect: last sector in frame to transfer (inclusive). */ 141 /* @last_sect: last sector in frame to transfer (inclusive). */
142 uint8_t first_sect, last_sect; 142 uint8_t first_sect, last_sect;
143 uint16_t _pad; /* padding to make it 8 bytes, so it's cache-aligned */ 143};
144} __attribute__((__packed__));
145 144
146struct blkif_request_rw { 145struct blkif_request_rw {
147 uint8_t nr_segments; /* number of segments */ 146 uint8_t nr_segments; /* number of segments */
@@ -151,12 +150,7 @@ struct blkif_request_rw {
151#endif 150#endif
152 uint64_t id; /* private guest value, echoed in resp */ 151 uint64_t id; /* private guest value, echoed in resp */
153 blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */ 152 blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
154 struct blkif_request_segment { 153 struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
155 grant_ref_t gref; /* reference to I/O buffer frame */
156 /* @first_sect: first sector in frame to transfer (inclusive). */
157 /* @last_sect: last sector in frame to transfer (inclusive). */
158 uint8_t first_sect, last_sect;
159 } seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
160} __attribute__((__packed__)); 154} __attribute__((__packed__));
161 155
162struct blkif_request_discard { 156struct blkif_request_discard {
diff --git a/include/xen/interface/xencomm.h b/include/xen/interface/xencomm.h
deleted file mode 100644
index ac45e0712afa..000000000000
--- a/include/xen/interface/xencomm.h
+++ /dev/null
@@ -1,41 +0,0 @@
1/*
2 * Permission is hereby granted, free of charge, to any person obtaining a copy
3 * of this software and associated documentation files (the "Software"), to
4 * deal in the Software without restriction, including without limitation the
5 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
6 * sell copies of the Software, and to permit persons to whom the Software is
7 * furnished to do so, subject to the following conditions:
8 *
9 * The above copyright notice and this permission notice shall be included in
10 * all copies or substantial portions of the Software.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
15 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
16 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
17 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
18 * DEALINGS IN THE SOFTWARE.
19 *
20 * Copyright (C) IBM Corp. 2006
21 */
22
23#ifndef _XEN_XENCOMM_H_
24#define _XEN_XENCOMM_H_
25
26/* A xencomm descriptor is a scatter/gather list containing physical
27 * addresses corresponding to a virtually contiguous memory area. The
28 * hypervisor translates these physical addresses to machine addresses to copy
29 * to and from the virtually contiguous area.
30 */
31
32#define XENCOMM_MAGIC 0x58434F4D /* 'XCOM' */
33#define XENCOMM_INVALID (~0UL)
34
35struct xencomm_desc {
36 uint32_t magic;
37 uint32_t nr_addrs; /* the number of entries in address[] */
38 uint64_t address[0];
39};
40
41#endif /* _XEN_XENCOMM_H_ */
diff --git a/include/xen/xencomm.h b/include/xen/xencomm.h
deleted file mode 100644
index e43b039be112..000000000000
--- a/include/xen/xencomm.h
+++ /dev/null
@@ -1,77 +0,0 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
15 *
16 * Copyright (C) IBM Corp. 2006
17 *
18 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
19 * Jerone Young <jyoung5@us.ibm.com>
20 */
21
22#ifndef _LINUX_XENCOMM_H_
23#define _LINUX_XENCOMM_H_
24
25#include <xen/interface/xencomm.h>
26
27#define XENCOMM_MINI_ADDRS 3
28struct xencomm_mini {
29 struct xencomm_desc _desc;
30 uint64_t address[XENCOMM_MINI_ADDRS];
31};
32
33/* To avoid additionnal virt to phys conversion, an opaque structure is
34 presented. */
35struct xencomm_handle;
36
37extern void xencomm_free(struct xencomm_handle *desc);
38extern struct xencomm_handle *xencomm_map(void *ptr, unsigned long bytes);
39extern struct xencomm_handle *__xencomm_map_no_alloc(void *ptr,
40 unsigned long bytes, struct xencomm_mini *xc_area);
41
42#if 0
43#define XENCOMM_MINI_ALIGNED(xc_desc, n) \
44 struct xencomm_mini xc_desc ## _base[(n)] \
45 __attribute__((__aligned__(sizeof(struct xencomm_mini)))); \
46 struct xencomm_mini *xc_desc = &xc_desc ## _base[0];
47#else
48/*
49 * gcc bug workaround:
50 * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=16660
51 * gcc doesn't handle properly stack variable with
52 * __attribute__((__align__(sizeof(struct xencomm_mini))))
53 */
54#define XENCOMM_MINI_ALIGNED(xc_desc, n) \
55 unsigned char xc_desc ## _base[((n) + 1 ) * \
56 sizeof(struct xencomm_mini)]; \
57 struct xencomm_mini *xc_desc = (struct xencomm_mini *) \
58 ((unsigned long)xc_desc ## _base + \
59 (sizeof(struct xencomm_mini) - \
60 ((unsigned long)xc_desc ## _base) % \
61 sizeof(struct xencomm_mini)));
62#endif
63#define xencomm_map_no_alloc(ptr, bytes) \
64 ({ XENCOMM_MINI_ALIGNED(xc_desc, 1); \
65 __xencomm_map_no_alloc(ptr, bytes, xc_desc); })
66
67/* provided by architecture code: */
68extern unsigned long xencomm_vtop(unsigned long vaddr);
69
70static inline void *xencomm_pa(void *ptr)
71{
72 return (void *)xencomm_vtop((unsigned long)ptr);
73}
74
75#define xen_guest_handle(hnd) ((hnd).p)
76
77#endif /* _LINUX_XENCOMM_H_ */