diff options
-rw-r--r-- | drivers/firewire/fw-iso.c | 21 | ||||
-rw-r--r-- | drivers/firewire/fw-ohci.c | 23 | ||||
-rw-r--r-- | drivers/firewire/fw-sbp2.c | 28 |
3 files changed, 47 insertions, 25 deletions
diff --git a/drivers/firewire/fw-iso.c b/drivers/firewire/fw-iso.c index 024fab4ef998..6481e3df2c93 100644 --- a/drivers/firewire/fw-iso.c +++ b/drivers/firewire/fw-iso.c | |||
@@ -33,7 +33,7 @@ setup_iso_buffer(struct fw_iso_context *ctx, size_t size, | |||
33 | enum dma_data_direction direction) | 33 | enum dma_data_direction direction) |
34 | { | 34 | { |
35 | struct page *page; | 35 | struct page *page; |
36 | int i; | 36 | int i, j; |
37 | void *p; | 37 | void *p; |
38 | 38 | ||
39 | ctx->buffer_size = PAGE_ALIGN(size); | 39 | ctx->buffer_size = PAGE_ALIGN(size); |
@@ -42,24 +42,33 @@ setup_iso_buffer(struct fw_iso_context *ctx, size_t size, | |||
42 | 42 | ||
43 | ctx->buffer = vmalloc_32_user(ctx->buffer_size); | 43 | ctx->buffer = vmalloc_32_user(ctx->buffer_size); |
44 | if (ctx->buffer == NULL) | 44 | if (ctx->buffer == NULL) |
45 | return -ENOMEM; | 45 | goto fail_buffer_alloc; |
46 | 46 | ||
47 | ctx->page_count = ctx->buffer_size >> PAGE_SHIFT; | 47 | ctx->page_count = ctx->buffer_size >> PAGE_SHIFT; |
48 | ctx->pages = | 48 | ctx->pages = |
49 | kzalloc(ctx->page_count * sizeof(ctx->pages[0]), GFP_KERNEL); | 49 | kzalloc(ctx->page_count * sizeof(ctx->pages[0]), GFP_KERNEL); |
50 | if (ctx->pages == NULL) { | 50 | if (ctx->pages == NULL) |
51 | vfree(ctx->buffer); | 51 | goto fail_pages_alloc; |
52 | return -ENOMEM; | ||
53 | } | ||
54 | 52 | ||
55 | p = ctx->buffer; | 53 | p = ctx->buffer; |
56 | for (i = 0; i < ctx->page_count; i++, p += PAGE_SIZE) { | 54 | for (i = 0; i < ctx->page_count; i++, p += PAGE_SIZE) { |
57 | page = vmalloc_to_page(p); | 55 | page = vmalloc_to_page(p); |
58 | ctx->pages[i] = dma_map_page(ctx->card->device, | 56 | ctx->pages[i] = dma_map_page(ctx->card->device, |
59 | page, 0, PAGE_SIZE, direction); | 57 | page, 0, PAGE_SIZE, direction); |
58 | if (dma_mapping_error(ctx->pages[i])) | ||
59 | goto fail_mapping; | ||
60 | } | 60 | } |
61 | 61 | ||
62 | return 0; | 62 | return 0; |
63 | |||
64 | fail_mapping: | ||
65 | for (j = 0; j < i; j++) | ||
66 | dma_unmap_page(ctx->card->device, ctx->pages[j], | ||
67 | PAGE_SIZE, DMA_TO_DEVICE); | ||
68 | fail_pages_alloc: | ||
69 | vfree(ctx->buffer); | ||
70 | fail_buffer_alloc: | ||
71 | return -ENOMEM; | ||
63 | } | 72 | } |
64 | 73 | ||
65 | static void destroy_iso_buffer(struct fw_iso_context *ctx) | 74 | static void destroy_iso_buffer(struct fw_iso_context *ctx) |
diff --git a/drivers/firewire/fw-ohci.c b/drivers/firewire/fw-ohci.c index e6fa3496183e..4512edba6cb0 100644 --- a/drivers/firewire/fw-ohci.c +++ b/drivers/firewire/fw-ohci.c | |||
@@ -431,7 +431,7 @@ at_context_setup_packet(struct at_context *ctx, struct list_head *list) | |||
431 | packet->payload, | 431 | packet->payload, |
432 | packet->payload_length, | 432 | packet->payload_length, |
433 | DMA_TO_DEVICE); | 433 | DMA_TO_DEVICE); |
434 | if (packet->payload_bus == 0) { | 434 | if (dma_mapping_error(packet->payload_bus)) { |
435 | complete_transmission(packet, RCODE_SEND_ERROR, list); | 435 | complete_transmission(packet, RCODE_SEND_ERROR, list); |
436 | return; | 436 | return; |
437 | } | 437 | } |
@@ -590,7 +590,7 @@ at_context_init(struct at_context *ctx, struct fw_ohci *ohci, u32 regs) | |||
590 | ctx->descriptor_bus = | 590 | ctx->descriptor_bus = |
591 | dma_map_single(ohci->card.device, &ctx->d, | 591 | dma_map_single(ohci->card.device, &ctx->d, |
592 | sizeof ctx->d, DMA_TO_DEVICE); | 592 | sizeof ctx->d, DMA_TO_DEVICE); |
593 | if (ctx->descriptor_bus == 0) | 593 | if (dma_mapping_error(ctx->descriptor_bus)) |
594 | return -ENOMEM; | 594 | return -ENOMEM; |
595 | 595 | ||
596 | ctx->regs = regs; | 596 | ctx->regs = regs; |
@@ -1159,16 +1159,14 @@ static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card, | |||
1159 | tasklet_init(&ctx->tasklet, tasklet, (unsigned long)ctx); | 1159 | tasklet_init(&ctx->tasklet, tasklet, (unsigned long)ctx); |
1160 | 1160 | ||
1161 | ctx->buffer = kmalloc(ISO_BUFFER_SIZE, GFP_KERNEL); | 1161 | ctx->buffer = kmalloc(ISO_BUFFER_SIZE, GFP_KERNEL); |
1162 | if (ctx->buffer == NULL) { | 1162 | if (ctx->buffer == NULL) |
1163 | spin_lock_irqsave(&ohci->lock, flags); | 1163 | goto buffer_alloc_failed; |
1164 | *mask |= 1 << index; | ||
1165 | spin_unlock_irqrestore(&ohci->lock, flags); | ||
1166 | return ERR_PTR(-ENOMEM); | ||
1167 | } | ||
1168 | 1164 | ||
1169 | ctx->buffer_bus = | 1165 | ctx->buffer_bus = |
1170 | dma_map_single(card->device, ctx->buffer, | 1166 | dma_map_single(card->device, ctx->buffer, |
1171 | ISO_BUFFER_SIZE, DMA_TO_DEVICE); | 1167 | ISO_BUFFER_SIZE, DMA_TO_DEVICE); |
1168 | if (dma_mapping_error(ctx->buffer_bus)) | ||
1169 | goto buffer_map_failed; | ||
1172 | 1170 | ||
1173 | ctx->head_descriptor = ctx->buffer; | 1171 | ctx->head_descriptor = ctx->buffer; |
1174 | ctx->prev_descriptor = ctx->buffer; | 1172 | ctx->prev_descriptor = ctx->buffer; |
@@ -1187,6 +1185,15 @@ static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card, | |||
1187 | ctx->head_descriptor++; | 1185 | ctx->head_descriptor++; |
1188 | 1186 | ||
1189 | return &ctx->base; | 1187 | return &ctx->base; |
1188 | |||
1189 | buffer_map_failed: | ||
1190 | kfree(ctx->buffer); | ||
1191 | buffer_alloc_failed: | ||
1192 | spin_lock_irqsave(&ohci->lock, flags); | ||
1193 | *mask |= 1 << index; | ||
1194 | spin_unlock_irqrestore(&ohci->lock, flags); | ||
1195 | |||
1196 | return ERR_PTR(-ENOMEM); | ||
1190 | } | 1197 | } |
1191 | 1198 | ||
1192 | static int ohci_send_iso(struct fw_iso_context *base, s32 cycle) | 1199 | static int ohci_send_iso(struct fw_iso_context *base, s32 cycle) |
diff --git a/drivers/firewire/fw-sbp2.c b/drivers/firewire/fw-sbp2.c index fa59e59766e1..2259e2225866 100644 --- a/drivers/firewire/fw-sbp2.c +++ b/drivers/firewire/fw-sbp2.c | |||
@@ -411,13 +411,13 @@ sbp2_send_management_orb(struct fw_unit *unit, int node_id, int generation, | |||
411 | orb->base.request_bus = | 411 | orb->base.request_bus = |
412 | dma_map_single(device->card->device, &orb->request, | 412 | dma_map_single(device->card->device, &orb->request, |
413 | sizeof orb->request, DMA_TO_DEVICE); | 413 | sizeof orb->request, DMA_TO_DEVICE); |
414 | if (orb->base.request_bus == 0) | 414 | if (dma_mapping_error(orb->base.request_bus)) |
415 | goto out; | 415 | goto out; |
416 | 416 | ||
417 | orb->response_bus = | 417 | orb->response_bus = |
418 | dma_map_single(device->card->device, &orb->response, | 418 | dma_map_single(device->card->device, &orb->response, |
419 | sizeof orb->response, DMA_FROM_DEVICE); | 419 | sizeof orb->response, DMA_FROM_DEVICE); |
420 | if (orb->response_bus == 0) | 420 | if (dma_mapping_error(orb->response_bus)) |
421 | goto out; | 421 | goto out; |
422 | 422 | ||
423 | orb->request.response.high = 0; | 423 | orb->request.response.high = 0; |
@@ -963,22 +963,20 @@ static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done) | |||
963 | * transfer direction not handled. */ | 963 | * transfer direction not handled. */ |
964 | if (cmd->sc_data_direction == DMA_BIDIRECTIONAL) { | 964 | if (cmd->sc_data_direction == DMA_BIDIRECTIONAL) { |
965 | fw_error("Cannot handle DMA_BIDIRECTIONAL - rejecting command"); | 965 | fw_error("Cannot handle DMA_BIDIRECTIONAL - rejecting command"); |
966 | cmd->result = DID_ERROR << 16; | 966 | goto fail_alloc; |
967 | done(cmd); | ||
968 | return 0; | ||
969 | } | 967 | } |
970 | 968 | ||
971 | orb = kzalloc(sizeof *orb, GFP_ATOMIC); | 969 | orb = kzalloc(sizeof *orb, GFP_ATOMIC); |
972 | if (orb == NULL) { | 970 | if (orb == NULL) { |
973 | fw_notify("failed to alloc orb\n"); | 971 | fw_notify("failed to alloc orb\n"); |
974 | cmd->result = DID_NO_CONNECT << 16; | 972 | goto fail_alloc; |
975 | done(cmd); | ||
976 | return 0; | ||
977 | } | 973 | } |
978 | 974 | ||
979 | orb->base.request_bus = | 975 | orb->base.request_bus = |
980 | dma_map_single(device->card->device, &orb->request, | 976 | dma_map_single(device->card->device, &orb->request, |
981 | sizeof orb->request, DMA_TO_DEVICE); | 977 | sizeof orb->request, DMA_TO_DEVICE); |
978 | if (dma_mapping_error(orb->base.request_bus)) | ||
979 | goto fail_mapping; | ||
982 | 980 | ||
983 | orb->unit = unit; | 981 | orb->unit = unit; |
984 | orb->done = done; | 982 | orb->done = done; |
@@ -1009,9 +1007,7 @@ static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done) | |||
1009 | * could we get the scsi or blk layer to do that by | 1007 | * could we get the scsi or blk layer to do that by |
1010 | * reporting our max supported block size? */ | 1008 | * reporting our max supported block size? */ |
1011 | fw_error("command > 64k\n"); | 1009 | fw_error("command > 64k\n"); |
1012 | cmd->result = DID_ERROR << 16; | 1010 | goto fail_bufflen; |
1013 | done(cmd); | ||
1014 | return 0; | ||
1015 | } else if (cmd->request_bufflen > 0) { | 1011 | } else if (cmd->request_bufflen > 0) { |
1016 | sbp2_command_orb_map_buffer(orb); | 1012 | sbp2_command_orb_map_buffer(orb); |
1017 | } | 1013 | } |
@@ -1028,6 +1024,16 @@ static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done) | |||
1028 | sd->command_block_agent_address + SBP2_ORB_POINTER); | 1024 | sd->command_block_agent_address + SBP2_ORB_POINTER); |
1029 | 1025 | ||
1030 | return 0; | 1026 | return 0; |
1027 | |||
1028 | fail_bufflen: | ||
1029 | dma_unmap_single(device->card->device, orb->base.request_bus, | ||
1030 | sizeof orb->request, DMA_TO_DEVICE); | ||
1031 | fail_mapping: | ||
1032 | kfree(orb); | ||
1033 | fail_alloc: | ||
1034 | cmd->result = DID_ERROR << 16; | ||
1035 | done(cmd); | ||
1036 | return 0; | ||
1031 | } | 1037 | } |
1032 | 1038 | ||
1033 | static int sbp2_scsi_slave_alloc(struct scsi_device *sdev) | 1039 | static int sbp2_scsi_slave_alloc(struct scsi_device *sdev) |