diff options
author | Andy King <acking@vmware.com> | 2013-01-10 18:41:39 -0500 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2013-01-17 15:07:38 -0500 |
commit | 42281d20cdf94a9d2aae67ee019f8bcc390ebed6 (patch) | |
tree | 110c09fd3be68927b4404475ca53b07f5b859a0e /drivers/misc/vmw_vmci | |
parent | bad7d9df274b03a0761913b6628fc7663ad3bfa6 (diff) |
VMCI: Remove dependency on BLOCK I/O
No need to bring in dm-mapper.h and along with it a dependency on BLOCK I/O
just to use dm_div_up(). Just use the existing DIV_ROUND_UP().
Reported-by: Randy Dunlap <rdunlap@infradead.org>
Signed-off-by: Andy King <acking@vmware.com>
Signed-off-by: Dmitry Torokhov <dtor@vmware.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/misc/vmw_vmci')
-rw-r--r-- | drivers/misc/vmw_vmci/vmci_queue_pair.c | 28 |
1 files changed, 16 insertions, 12 deletions
diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c index 1123111ba1bf..da47e457e158 100644 --- a/drivers/misc/vmw_vmci/vmci_queue_pair.c +++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c | |||
@@ -13,12 +13,16 @@ | |||
13 | * for more details. | 13 | * for more details. |
14 | */ | 14 | */ |
15 | 15 | ||
16 | #include <linux/device-mapper.h> | ||
17 | #include <linux/vmw_vmci_defs.h> | 16 | #include <linux/vmw_vmci_defs.h> |
18 | #include <linux/vmw_vmci_api.h> | 17 | #include <linux/vmw_vmci_api.h> |
18 | #include <linux/highmem.h> | ||
19 | #include <linux/kernel.h> | 19 | #include <linux/kernel.h> |
20 | #include <linux/mm.h> | ||
20 | #include <linux/module.h> | 21 | #include <linux/module.h> |
21 | #include <linux/mutex.h> | 22 | #include <linux/mutex.h> |
23 | #include <linux/pagemap.h> | ||
24 | #include <linux/sched.h> | ||
25 | #include <linux/slab.h> | ||
22 | #include <linux/socket.h> | 26 | #include <linux/socket.h> |
23 | #include <linux/wait.h> | 27 | #include <linux/wait.h> |
24 | 28 | ||
@@ -246,9 +250,9 @@ static struct qp_list qp_guest_endpoints = { | |||
246 | }; | 250 | }; |
247 | 251 | ||
248 | #define INVALID_VMCI_GUEST_MEM_ID 0 | 252 | #define INVALID_VMCI_GUEST_MEM_ID 0 |
249 | #define QPE_NUM_PAGES(_QPE) ((u32) \ | 253 | #define QPE_NUM_PAGES(_QPE) ((u32) \ |
250 | (dm_div_up(_QPE.produce_size, PAGE_SIZE) + \ | 254 | (DIV_ROUND_UP(_QPE.produce_size, PAGE_SIZE) + \ |
251 | dm_div_up(_QPE.consume_size, PAGE_SIZE) + 2)) | 255 | DIV_ROUND_UP(_QPE.consume_size, PAGE_SIZE) + 2)) |
252 | 256 | ||
253 | 257 | ||
254 | /* | 258 | /* |
@@ -260,7 +264,7 @@ static void qp_free_queue(void *q, u64 size) | |||
260 | struct vmci_queue *queue = q; | 264 | struct vmci_queue *queue = q; |
261 | 265 | ||
262 | if (queue) { | 266 | if (queue) { |
263 | u64 i = dm_div_up(size, PAGE_SIZE); | 267 | u64 i = DIV_ROUND_UP(size, PAGE_SIZE); |
264 | 268 | ||
265 | if (queue->kernel_if->mapped) { | 269 | if (queue->kernel_if->mapped) { |
266 | vunmap(queue->kernel_if->va); | 270 | vunmap(queue->kernel_if->va); |
@@ -289,7 +293,7 @@ static void *qp_alloc_queue(u64 size, u32 flags) | |||
289 | u64 i; | 293 | u64 i; |
290 | struct vmci_queue *queue; | 294 | struct vmci_queue *queue; |
291 | struct vmci_queue_header *q_header; | 295 | struct vmci_queue_header *q_header; |
292 | const u64 num_data_pages = dm_div_up(size, PAGE_SIZE); | 296 | const u64 num_data_pages = DIV_ROUND_UP(size, PAGE_SIZE); |
293 | const uint queue_size = | 297 | const uint queue_size = |
294 | PAGE_SIZE + | 298 | PAGE_SIZE + |
295 | sizeof(*queue) + sizeof(*(queue->kernel_if)) + | 299 | sizeof(*queue) + sizeof(*(queue->kernel_if)) + |
@@ -611,7 +615,7 @@ static int qp_memcpy_from_queue_iov(void *dest, | |||
611 | static struct vmci_queue *qp_host_alloc_queue(u64 size) | 615 | static struct vmci_queue *qp_host_alloc_queue(u64 size) |
612 | { | 616 | { |
613 | struct vmci_queue *queue; | 617 | struct vmci_queue *queue; |
614 | const size_t num_pages = dm_div_up(size, PAGE_SIZE) + 1; | 618 | const size_t num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1; |
615 | const size_t queue_size = sizeof(*queue) + sizeof(*(queue->kernel_if)); | 619 | const size_t queue_size = sizeof(*queue) + sizeof(*(queue->kernel_if)); |
616 | const size_t queue_page_size = | 620 | const size_t queue_page_size = |
617 | num_pages * sizeof(*queue->kernel_if->page); | 621 | num_pages * sizeof(*queue->kernel_if->page); |
@@ -963,8 +967,8 @@ qp_guest_endpoint_create(struct vmci_handle handle, | |||
963 | int result; | 967 | int result; |
964 | struct qp_guest_endpoint *entry; | 968 | struct qp_guest_endpoint *entry; |
965 | /* One page each for the queue headers. */ | 969 | /* One page each for the queue headers. */ |
966 | const u64 num_ppns = dm_div_up(produce_size, PAGE_SIZE) + | 970 | const u64 num_ppns = DIV_ROUND_UP(produce_size, PAGE_SIZE) + |
967 | dm_div_up(consume_size, PAGE_SIZE) + 2; | 971 | DIV_ROUND_UP(consume_size, PAGE_SIZE) + 2; |
968 | 972 | ||
969 | if (vmci_handle_is_invalid(handle)) { | 973 | if (vmci_handle_is_invalid(handle)) { |
970 | u32 context_id = vmci_get_context_id(); | 974 | u32 context_id = vmci_get_context_id(); |
@@ -1175,9 +1179,9 @@ static int qp_alloc_guest_work(struct vmci_handle *handle, | |||
1175 | u32 priv_flags) | 1179 | u32 priv_flags) |
1176 | { | 1180 | { |
1177 | const u64 num_produce_pages = | 1181 | const u64 num_produce_pages = |
1178 | dm_div_up(produce_size, PAGE_SIZE) + 1; | 1182 | DIV_ROUND_UP(produce_size, PAGE_SIZE) + 1; |
1179 | const u64 num_consume_pages = | 1183 | const u64 num_consume_pages = |
1180 | dm_div_up(consume_size, PAGE_SIZE) + 1; | 1184 | DIV_ROUND_UP(consume_size, PAGE_SIZE) + 1; |
1181 | void *my_produce_q = NULL; | 1185 | void *my_produce_q = NULL; |
1182 | void *my_consume_q = NULL; | 1186 | void *my_consume_q = NULL; |
1183 | int result; | 1187 | int result; |
@@ -1456,7 +1460,7 @@ static int qp_broker_create(struct vmci_handle handle, | |||
1456 | entry->state = VMCIQPB_CREATED_MEM; | 1460 | entry->state = VMCIQPB_CREATED_MEM; |
1457 | entry->produce_q->q_header = entry->local_mem; | 1461 | entry->produce_q->q_header = entry->local_mem; |
1458 | tmp = (u8 *)entry->local_mem + PAGE_SIZE * | 1462 | tmp = (u8 *)entry->local_mem + PAGE_SIZE * |
1459 | (dm_div_up(entry->qp.produce_size, PAGE_SIZE) + 1); | 1463 | (DIV_ROUND_UP(entry->qp.produce_size, PAGE_SIZE) + 1); |
1460 | entry->consume_q->q_header = (struct vmci_queue_header *)tmp; | 1464 | entry->consume_q->q_header = (struct vmci_queue_header *)tmp; |
1461 | } else if (page_store) { | 1465 | } else if (page_store) { |
1462 | /* | 1466 | /* |