aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorKristian Høgsberg <krh@redhat.com>2007-02-16 17:34:38 -0500
committerStefan Richter <stefanr@s5r6.in-berlin.de>2007-03-09 16:02:57 -0500
commit9aad8125389a7a2990dee72d7892e22330a945eb (patch)
tree2566a8985837b000990db7e16b17547d3747141b /drivers
parent6e2e8424d310507fa044649435114217826ed78a (diff)
firewire: Split the iso buffer out from fw_iso_context and avoid vmalloc.
This patch splits out the iso buffer so we can initialize it at mmap time with the size provided in the mmap call. Furthermore, allocate the backing pages using alloc_page to avoid setting up kernel side virtual memory mappings for the pages. Signed-off-by: Kristian Høgsberg <krh@redhat.com> Signed-off-by: Stefan Richter <stefanr@s5r6.in-berlin.de>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/firewire/fw-device-cdev.c65
-rw-r--r--drivers/firewire/fw-device-cdev.h2
-rw-r--r--drivers/firewire/fw-iso.c118
-rw-r--r--drivers/firewire/fw-ohci.c12
-rw-r--r--drivers/firewire/fw-transaction.h38
5 files changed, 151 insertions, 84 deletions
diff --git a/drivers/firewire/fw-device-cdev.c b/drivers/firewire/fw-device-cdev.c
index 1b9e5f7c0129..6284375c6390 100644
--- a/drivers/firewire/fw-device-cdev.c
+++ b/drivers/firewire/fw-device-cdev.c
@@ -71,8 +71,10 @@ struct client {
71 struct list_head event_list; 71 struct list_head event_list;
72 struct semaphore event_list_sem; 72 struct semaphore event_list_sem;
73 wait_queue_head_t wait; 73 wait_queue_head_t wait;
74 unsigned long vm_start; 74
75 struct fw_iso_context *iso_context; 75 struct fw_iso_context *iso_context;
76 struct fw_iso_buffer buffer;
77 unsigned long vm_start;
76}; 78};
77 79
78static inline void __user * 80static inline void __user *
@@ -406,7 +408,6 @@ static int ioctl_create_iso_context(struct client *client, void __user *arg)
406 408
407 client->iso_context = fw_iso_context_create(client->device->card, 409 client->iso_context = fw_iso_context_create(client->device->card,
408 FW_ISO_CONTEXT_TRANSMIT, 410 FW_ISO_CONTEXT_TRANSMIT,
409 request.buffer_size,
410 iso_callback, client); 411 iso_callback, client);
411 if (IS_ERR(client->iso_context)) 412 if (IS_ERR(client->iso_context))
412 return PTR_ERR(client->iso_context); 413 return PTR_ERR(client->iso_context);
@@ -418,8 +419,7 @@ static int ioctl_queue_iso(struct client *client, void __user *arg)
418{ 419{
419 struct fw_cdev_queue_iso request; 420 struct fw_cdev_queue_iso request;
420 struct fw_cdev_iso_packet __user *p, *end, *next; 421 struct fw_cdev_iso_packet __user *p, *end, *next;
421 void *payload, *payload_end; 422 unsigned long payload, payload_end;
422 unsigned long index;
423 int count; 423 int count;
424 struct { 424 struct {
425 struct fw_iso_packet packet; 425 struct fw_iso_packet packet;
@@ -434,20 +434,17 @@ static int ioctl_queue_iso(struct client *client, void __user *arg)
434 /* If the user passes a non-NULL data pointer, has mmap()'ed 434 /* If the user passes a non-NULL data pointer, has mmap()'ed
435 * the iso buffer, and the pointer points inside the buffer, 435 * the iso buffer, and the pointer points inside the buffer,
436 * we setup the payload pointers accordingly. Otherwise we 436 * we setup the payload pointers accordingly. Otherwise we
437 * set them both to NULL, which will still let packets with 437 * set them both to 0, which will still let packets with
438 * payload_length == 0 through. In other words, if no packets 438 * payload_length == 0 through. In other words, if no packets
439 * use the indirect payload, the iso buffer need not be mapped 439 * use the indirect payload, the iso buffer need not be mapped
440 * and the request.data pointer is ignored.*/ 440 * and the request.data pointer is ignored.*/
441 441
442 index = (unsigned long)request.data - client->vm_start; 442 payload = (unsigned long)request.data - client->vm_start;
443 if (request.data != 0 && client->vm_start != 0 && 443 payload_end = payload + (client->buffer.page_count << PAGE_SHIFT);
444 index <= client->iso_context->buffer_size) { 444 if (request.data == 0 || client->buffer.pages == NULL ||
445 payload = client->iso_context->buffer + index; 445 payload >= payload_end) {
446 payload_end = client->iso_context->buffer + 446 payload = 0;
447 client->iso_context->buffer_size; 447 payload_end = 0;
448 } else {
449 payload = NULL;
450 payload_end = NULL;
451 } 448 }
452 449
453 if (!access_ok(VERIFY_READ, request.packets, request.size)) 450 if (!access_ok(VERIFY_READ, request.packets, request.size))
@@ -473,7 +470,7 @@ static int ioctl_queue_iso(struct client *client, void __user *arg)
473 return -EINVAL; 470 return -EINVAL;
474 471
475 if (fw_iso_context_queue(client->iso_context, 472 if (fw_iso_context_queue(client->iso_context,
476 &u.packet, payload)) 473 &u.packet, &client->buffer, payload))
477 break; 474 break;
478 475
479 p = next; 476 p = next;
@@ -483,8 +480,7 @@ static int ioctl_queue_iso(struct client *client, void __user *arg)
483 480
484 request.size -= uptr_to_u64(p) - request.packets; 481 request.size -= uptr_to_u64(p) - request.packets;
485 request.packets = uptr_to_u64(p); 482 request.packets = uptr_to_u64(p);
486 request.data = 483 request.data = client->vm_start + payload;
487 client->vm_start + (payload - client->iso_context->buffer);
488 484
489 if (copy_to_user(arg, &request, sizeof request)) 485 if (copy_to_user(arg, &request, sizeof request))
490 return -EFAULT; 486 return -EFAULT;
@@ -549,13 +545,41 @@ fw_device_op_compat_ioctl(struct file *file,
549static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma) 545static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma)
550{ 546{
551 struct client *client = file->private_data; 547 struct client *client = file->private_data;
548 enum dma_data_direction direction;
549 unsigned long size;
550 int page_count, retval;
551
552 /* FIXME: We could support multiple buffers, but we don't. */
553 if (client->buffer.pages != NULL)
554 return -EBUSY;
555
556 if (!(vma->vm_flags & VM_SHARED))
557 return -EINVAL;
552 558
553 if (client->iso_context->buffer == NULL) 559 if (vma->vm_start & ~PAGE_MASK)
554 return -EINVAL; 560 return -EINVAL;
555 561
556 client->vm_start = vma->vm_start; 562 client->vm_start = vma->vm_start;
563 size = vma->vm_end - vma->vm_start;
564 page_count = size >> PAGE_SHIFT;
565 if (size & ~PAGE_MASK)
566 return -EINVAL;
567
568 if (vma->vm_flags & VM_WRITE)
569 direction = DMA_TO_DEVICE;
570 else
571 direction = DMA_FROM_DEVICE;
572
573 retval = fw_iso_buffer_init(&client->buffer, client->device->card,
574 page_count, direction);
575 if (retval < 0)
576 return retval;
557 577
558 return remap_vmalloc_range(vma, client->iso_context->buffer, 0); 578 retval = fw_iso_buffer_map(&client->buffer, vma);
579 if (retval < 0)
580 fw_iso_buffer_destroy(&client->buffer, client->device->card);
581
582 return retval;
559} 583}
560 584
561static int fw_device_op_release(struct inode *inode, struct file *file) 585static int fw_device_op_release(struct inode *inode, struct file *file)
@@ -564,6 +588,9 @@ static int fw_device_op_release(struct inode *inode, struct file *file)
564 struct address_handler *h, *next; 588 struct address_handler *h, *next;
565 struct request *r, *next_r; 589 struct request *r, *next_r;
566 590
591 if (client->buffer.pages)
592 fw_iso_buffer_destroy(&client->buffer, client->device->card);
593
567 if (client->iso_context) 594 if (client->iso_context)
568 fw_iso_context_destroy(client->iso_context); 595 fw_iso_context_destroy(client->iso_context);
569 596
diff --git a/drivers/firewire/fw-device-cdev.h b/drivers/firewire/fw-device-cdev.h
index ddbae43ca1a4..003cc669551b 100644
--- a/drivers/firewire/fw-device-cdev.h
+++ b/drivers/firewire/fw-device-cdev.h
@@ -126,7 +126,7 @@ struct fw_cdev_allocate {
126}; 126};
127 127
128struct fw_cdev_create_iso_context { 128struct fw_cdev_create_iso_context {
129 __u32 buffer_size; 129 __u32 handle;
130}; 130};
131 131
132struct fw_cdev_iso_packet { 132struct fw_cdev_iso_packet {
diff --git a/drivers/firewire/fw-iso.c b/drivers/firewire/fw-iso.c
index 6481e3df2c93..4e7ba8672929 100644
--- a/drivers/firewire/fw-iso.c
+++ b/drivers/firewire/fw-iso.c
@@ -28,68 +28,88 @@
28#include "fw-topology.h" 28#include "fw-topology.h"
29#include "fw-device.h" 29#include "fw-device.h"
30 30
31static int 31int
32setup_iso_buffer(struct fw_iso_context *ctx, size_t size, 32fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card,
33 enum dma_data_direction direction) 33 int page_count, enum dma_data_direction direction)
34{ 34{
35 struct page *page; 35 int i, j, retval = -ENOMEM;
36 int i, j; 36 dma_addr_t address;
37 void *p; 37
38 38 buffer->page_count = page_count;
39 ctx->buffer_size = PAGE_ALIGN(size); 39 buffer->direction = direction;
40 if (size == 0) 40
41 return 0; 41 buffer->pages = kmalloc(page_count * sizeof(buffer->pages[0]),
42 42 GFP_KERNEL);
43 ctx->buffer = vmalloc_32_user(ctx->buffer_size); 43 if (buffer->pages == NULL)
44 if (ctx->buffer == NULL) 44 goto out;
45 goto fail_buffer_alloc; 45
46 46 for (i = 0; i < buffer->page_count; i++) {
47 ctx->page_count = ctx->buffer_size >> PAGE_SHIFT; 47 buffer->pages[i] = alloc_page(GFP_KERNEL | GFP_DMA32);
48 ctx->pages = 48 if (buffer->pages[i] == NULL)
49 kzalloc(ctx->page_count * sizeof(ctx->pages[0]), GFP_KERNEL); 49 goto out_pages;
50 if (ctx->pages == NULL) 50
51 goto fail_pages_alloc; 51 address = dma_map_page(card->device, buffer->pages[i],
52 52 0, PAGE_SIZE, direction);
53 p = ctx->buffer; 53 if (dma_mapping_error(address)) {
54 for (i = 0; i < ctx->page_count; i++, p += PAGE_SIZE) { 54 __free_page(buffer->pages[i]);
55 page = vmalloc_to_page(p); 55 goto out_pages;
56 ctx->pages[i] = dma_map_page(ctx->card->device, 56 }
57 page, 0, PAGE_SIZE, direction); 57 set_page_private(buffer->pages[i], address);
58 if (dma_mapping_error(ctx->pages[i]))
59 goto fail_mapping;
60 } 58 }
61 59
62 return 0; 60 return 0;
63 61
64 fail_mapping: 62 out_pages:
65 for (j = 0; j < i; j++) 63 for (j = 0; j < i; j++) {
66 dma_unmap_page(ctx->card->device, ctx->pages[j], 64 address = page_private(buffer->pages[j]);
65 dma_unmap_page(card->device, address,
67 PAGE_SIZE, DMA_TO_DEVICE); 66 PAGE_SIZE, DMA_TO_DEVICE);
68 fail_pages_alloc: 67 __free_page(buffer->pages[j]);
69 vfree(ctx->buffer); 68 }
70 fail_buffer_alloc: 69 kfree(buffer->pages);
71 return -ENOMEM; 70 out:
71 buffer->pages = NULL;
72 return retval;
73}
74
75int fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma)
76{
77 unsigned long uaddr;
78 int i, retval;
79
80 uaddr = vma->vm_start;
81 for (i = 0; i < buffer->page_count; i++) {
82 retval = vm_insert_page(vma, uaddr, buffer->pages[i]);
83 if (retval)
84 return retval;
85 uaddr += PAGE_SIZE;
86 }
87
88 return 0;
72} 89}
73 90
74static void destroy_iso_buffer(struct fw_iso_context *ctx) 91void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer,
92 struct fw_card *card)
75{ 93{
76 int i; 94 int i;
95 dma_addr_t address;
77 96
78 for (i = 0; i < ctx->page_count; i++) 97 for (i = 0; i < buffer->page_count; i++) {
79 dma_unmap_page(ctx->card->device, ctx->pages[i], 98 address = page_private(buffer->pages[i]);
99 dma_unmap_page(card->device, address,
80 PAGE_SIZE, DMA_TO_DEVICE); 100 PAGE_SIZE, DMA_TO_DEVICE);
101 __free_page(buffer->pages[i]);
102 }
81 103
82 kfree(ctx->pages); 104 kfree(buffer->pages);
83 vfree(ctx->buffer); 105 buffer->pages = NULL;
84} 106}
85 107
86struct fw_iso_context *fw_iso_context_create(struct fw_card *card, int type, 108struct fw_iso_context *fw_iso_context_create(struct fw_card *card, int type,
87 size_t buffer_size,
88 fw_iso_callback_t callback, 109 fw_iso_callback_t callback,
89 void *callback_data) 110 void *callback_data)
90{ 111{
91 struct fw_iso_context *ctx; 112 struct fw_iso_context *ctx;
92 int retval;
93 113
94 ctx = card->driver->allocate_iso_context(card, type); 114 ctx = card->driver->allocate_iso_context(card, type);
95 if (IS_ERR(ctx)) 115 if (IS_ERR(ctx))
@@ -100,12 +120,6 @@ struct fw_iso_context *fw_iso_context_create(struct fw_card *card, int type,
100 ctx->callback = callback; 120 ctx->callback = callback;
101 ctx->callback_data = callback_data; 121 ctx->callback_data = callback_data;
102 122
103 retval = setup_iso_buffer(ctx, buffer_size, DMA_TO_DEVICE);
104 if (retval < 0) {
105 card->driver->free_iso_context(ctx);
106 return ERR_PTR(retval);
107 }
108
109 return ctx; 123 return ctx;
110} 124}
111EXPORT_SYMBOL(fw_iso_context_create); 125EXPORT_SYMBOL(fw_iso_context_create);
@@ -114,8 +128,6 @@ void fw_iso_context_destroy(struct fw_iso_context *ctx)
114{ 128{
115 struct fw_card *card = ctx->card; 129 struct fw_card *card = ctx->card;
116 130
117 destroy_iso_buffer(ctx);
118
119 card->driver->free_iso_context(ctx); 131 card->driver->free_iso_context(ctx);
120} 132}
121EXPORT_SYMBOL(fw_iso_context_destroy); 133EXPORT_SYMBOL(fw_iso_context_destroy);
@@ -133,10 +145,12 @@ EXPORT_SYMBOL(fw_iso_context_send);
133 145
134int 146int
135fw_iso_context_queue(struct fw_iso_context *ctx, 147fw_iso_context_queue(struct fw_iso_context *ctx,
136 struct fw_iso_packet *packet, void *payload) 148 struct fw_iso_packet *packet,
149 struct fw_iso_buffer *buffer,
150 unsigned long payload)
137{ 151{
138 struct fw_card *card = ctx->card; 152 struct fw_card *card = ctx->card;
139 153
140 return card->driver->queue_iso(ctx, packet, payload); 154 return card->driver->queue_iso(ctx, packet, buffer, payload);
141} 155}
142EXPORT_SYMBOL(fw_iso_context_queue); 156EXPORT_SYMBOL(fw_iso_context_queue);
diff --git a/drivers/firewire/fw-ohci.c b/drivers/firewire/fw-ohci.c
index 72f80361fc43..8499d70bf9ee 100644
--- a/drivers/firewire/fw-ohci.c
+++ b/drivers/firewire/fw-ohci.c
@@ -1251,14 +1251,16 @@ static void ohci_free_iso_context(struct fw_iso_context *base)
1251 1251
1252static int 1252static int
1253ohci_queue_iso(struct fw_iso_context *base, 1253ohci_queue_iso(struct fw_iso_context *base,
1254 struct fw_iso_packet *packet, void *payload) 1254 struct fw_iso_packet *packet,
1255 struct fw_iso_buffer *buffer,
1256 unsigned long payload)
1255{ 1257{
1256 struct iso_context *ctx = (struct iso_context *)base; 1258 struct iso_context *ctx = (struct iso_context *)base;
1257 struct fw_ohci *ohci = fw_ohci(ctx->base.card); 1259 struct fw_ohci *ohci = fw_ohci(ctx->base.card);
1258 struct descriptor *d, *end, *last, *tail, *pd; 1260 struct descriptor *d, *end, *last, *tail, *pd;
1259 struct fw_iso_packet *p; 1261 struct fw_iso_packet *p;
1260 __le32 *header; 1262 __le32 *header;
1261 dma_addr_t d_bus; 1263 dma_addr_t d_bus, page_bus;
1262 u32 z, header_z, payload_z, irq; 1264 u32 z, header_z, payload_z, irq;
1263 u32 payload_index, payload_end_index, next_page_index; 1265 u32 payload_index, payload_end_index, next_page_index;
1264 int index, page, end_page, i, length, offset; 1266 int index, page, end_page, i, length, offset;
@@ -1267,7 +1269,7 @@ ohci_queue_iso(struct fw_iso_context *base,
1267 * packet, retransmit or terminate.. */ 1269 * packet, retransmit or terminate.. */
1268 1270
1269 p = packet; 1271 p = packet;
1270 payload_index = payload - ctx->base.buffer; 1272 payload_index = payload;
1271 d = ctx->head_descriptor; 1273 d = ctx->head_descriptor;
1272 tail = ctx->tail_descriptor; 1274 tail = ctx->tail_descriptor;
1273 end = ctx->buffer + ISO_BUFFER_SIZE / sizeof(struct descriptor); 1275 end = ctx->buffer + ISO_BUFFER_SIZE / sizeof(struct descriptor);
@@ -1337,7 +1339,9 @@ ohci_queue_iso(struct fw_iso_context *base,
1337 length = 1339 length =
1338 min(next_page_index, payload_end_index) - payload_index; 1340 min(next_page_index, payload_end_index) - payload_index;
1339 pd[i].req_count = cpu_to_le16(length); 1341 pd[i].req_count = cpu_to_le16(length);
1340 pd[i].data_address = cpu_to_le32(ctx->base.pages[page] + offset); 1342
1343 page_bus = page_private(buffer->pages[page]);
1344 pd[i].data_address = cpu_to_le32(page_bus + offset);
1341 1345
1342 payload_index += length; 1346 payload_index += length;
1343 } 1347 }
diff --git a/drivers/firewire/fw-transaction.h b/drivers/firewire/fw-transaction.h
index 8f0283cf1a7a..89c6dda27936 100644
--- a/drivers/firewire/fw-transaction.h
+++ b/drivers/firewire/fw-transaction.h
@@ -27,6 +27,7 @@
27#include <linux/interrupt.h> 27#include <linux/interrupt.h>
28#include <linux/list.h> 28#include <linux/list.h>
29#include <linux/fs.h> 29#include <linux/fs.h>
30#include <linux/dma-mapping.h>
30 31
31#define TCODE_WRITE_QUADLET_REQUEST 0 32#define TCODE_WRITE_QUADLET_REQUEST 0
32#define TCODE_WRITE_BLOCK_REQUEST 1 33#define TCODE_WRITE_BLOCK_REQUEST 1
@@ -336,6 +337,18 @@ struct fw_iso_context;
336typedef void (*fw_iso_callback_t) (struct fw_iso_context *context, 337typedef void (*fw_iso_callback_t) (struct fw_iso_context *context,
337 int status, u32 cycle, void *data); 338 int status, u32 cycle, void *data);
338 339
340/* An iso buffer is just a set of pages mapped for DMA in the
341 * specified direction. Since the pages are to be used for DMA, they
342 * are not mapped into the kernel virtual address space. We store the
343 * DMA address in the page private. The helper function
344 * fw_iso_buffer_map() will map the pages into a given vma. */
345
346struct fw_iso_buffer {
347 enum dma_data_direction direction;
348 struct page **pages;
349 int page_count;
350};
351
339struct fw_iso_context { 352struct fw_iso_context {
340 struct fw_card *card; 353 struct fw_card *card;
341 int type; 354 int type;
@@ -343,19 +356,24 @@ struct fw_iso_context {
343 int speed; 356 int speed;
344 fw_iso_callback_t callback; 357 fw_iso_callback_t callback;
345 void *callback_data; 358 void *callback_data;
346
347 void *buffer;
348 size_t buffer_size;
349 dma_addr_t *pages;
350 int page_count;
351}; 359};
352 360
361int
362fw_iso_buffer_init(struct fw_iso_buffer *buffer,
363 struct fw_card *card,
364 int page_count,
365 enum dma_data_direction direction);
366int
367fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma);
368void
369fw_iso_buffer_destroy(struct fw_iso_buffer *buffer, struct fw_card *card);
370
353struct fw_iso_context * 371struct fw_iso_context *
354fw_iso_context_create(struct fw_card *card, int type, 372fw_iso_context_create(struct fw_card *card, int type,
355 size_t buffer_size,
356 fw_iso_callback_t callback, 373 fw_iso_callback_t callback,
357 void *callback_data); 374 void *callback_data);
358 375
376
359void 377void
360fw_iso_context_destroy(struct fw_iso_context *ctx); 378fw_iso_context_destroy(struct fw_iso_context *ctx);
361 379
@@ -365,7 +383,9 @@ fw_iso_context_start(struct fw_iso_context *ctx,
365 383
366int 384int
367fw_iso_context_queue(struct fw_iso_context *ctx, 385fw_iso_context_queue(struct fw_iso_context *ctx,
368 struct fw_iso_packet *packet, void *payload); 386 struct fw_iso_packet *packet,
387 struct fw_iso_buffer *buffer,
388 unsigned long payload);
369 389
370int 390int
371fw_iso_context_send(struct fw_iso_context *ctx, 391fw_iso_context_send(struct fw_iso_context *ctx,
@@ -410,7 +430,9 @@ struct fw_card_driver {
410 int (*send_iso)(struct fw_iso_context *ctx, s32 cycle); 430 int (*send_iso)(struct fw_iso_context *ctx, s32 cycle);
411 431
412 int (*queue_iso)(struct fw_iso_context *ctx, 432 int (*queue_iso)(struct fw_iso_context *ctx,
413 struct fw_iso_packet *packet, void *payload); 433 struct fw_iso_packet *packet,
434 struct fw_iso_buffer *buffer,
435 unsigned long payload);
414}; 436};
415 437
416int 438int