aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/ieee1394/ohci1394.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/ieee1394/ohci1394.c')
-rw-r--r--drivers/ieee1394/ohci1394.c272
1 files changed, 18 insertions, 254 deletions
diff --git a/drivers/ieee1394/ohci1394.c b/drivers/ieee1394/ohci1394.c
index 5dadfd296f79..5667c8102efc 100644
--- a/drivers/ieee1394/ohci1394.c
+++ b/drivers/ieee1394/ohci1394.c
@@ -138,19 +138,6 @@ printk(KERN_INFO "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->
138#define DBGMSG(fmt, args...) do {} while (0) 138#define DBGMSG(fmt, args...) do {} while (0)
139#endif 139#endif
140 140
141#ifdef CONFIG_IEEE1394_OHCI_DMA_DEBUG
142#define OHCI_DMA_ALLOC(fmt, args...) \
143 HPSB_ERR("%s(%s)alloc(%d): "fmt, OHCI1394_DRIVER_NAME, __FUNCTION__, \
144 ++global_outstanding_dmas, ## args)
145#define OHCI_DMA_FREE(fmt, args...) \
146 HPSB_ERR("%s(%s)free(%d): "fmt, OHCI1394_DRIVER_NAME, __FUNCTION__, \
147 --global_outstanding_dmas, ## args)
148static int global_outstanding_dmas = 0;
149#else
150#define OHCI_DMA_ALLOC(fmt, args...) do {} while (0)
151#define OHCI_DMA_FREE(fmt, args...) do {} while (0)
152#endif
153
154/* print general (card independent) information */ 141/* print general (card independent) information */
155#define PRINT_G(level, fmt, args...) \ 142#define PRINT_G(level, fmt, args...) \
156printk(level "%s: " fmt "\n" , OHCI1394_DRIVER_NAME , ## args) 143printk(level "%s: " fmt "\n" , OHCI1394_DRIVER_NAME , ## args)
@@ -170,7 +157,6 @@ static void dma_trm_reset(struct dma_trm_ctx *d);
170static int alloc_dma_rcv_ctx(struct ti_ohci *ohci, struct dma_rcv_ctx *d, 157static int alloc_dma_rcv_ctx(struct ti_ohci *ohci, struct dma_rcv_ctx *d,
171 enum context_type type, int ctx, int num_desc, 158 enum context_type type, int ctx, int num_desc,
172 int buf_size, int split_buf_size, int context_base); 159 int buf_size, int split_buf_size, int context_base);
173static void stop_dma_rcv_ctx(struct dma_rcv_ctx *d);
174static void free_dma_rcv_ctx(struct dma_rcv_ctx *d); 160static void free_dma_rcv_ctx(struct dma_rcv_ctx *d);
175 161
176static int alloc_dma_trm_ctx(struct ti_ohci *ohci, struct dma_trm_ctx *d, 162static int alloc_dma_trm_ctx(struct ti_ohci *ohci, struct dma_trm_ctx *d,
@@ -533,9 +519,6 @@ static void ohci_initialize(struct ti_ohci *ohci)
533 initialize_dma_trm_ctx(&ohci->at_req_context); 519 initialize_dma_trm_ctx(&ohci->at_req_context);
534 initialize_dma_trm_ctx(&ohci->at_resp_context); 520 initialize_dma_trm_ctx(&ohci->at_resp_context);
535 521
536 /* Initialize IR Legacy DMA channel mask */
537 ohci->ir_legacy_channels = 0;
538
539 /* Accept AR requests from all nodes */ 522 /* Accept AR requests from all nodes */
540 reg_write(ohci, OHCI1394_AsReqFilterHiSet, 0x80000000); 523 reg_write(ohci, OHCI1394_AsReqFilterHiSet, 0x80000000);
541 524
@@ -733,7 +716,6 @@ static void insert_packet(struct ti_ohci *ohci,
733 pci_map_single(ohci->dev, packet->data, 716 pci_map_single(ohci->dev, packet->data,
734 packet->data_size, 717 packet->data_size,
735 PCI_DMA_TODEVICE)); 718 PCI_DMA_TODEVICE));
736 OHCI_DMA_ALLOC("single, block transmit packet");
737 719
738 d->prg_cpu[idx]->end.branchAddress = 0; 720 d->prg_cpu[idx]->end.branchAddress = 0;
739 d->prg_cpu[idx]->end.status = 0; 721 d->prg_cpu[idx]->end.status = 0;
@@ -783,7 +765,6 @@ static void insert_packet(struct ti_ohci *ohci,
783 d->prg_cpu[idx]->end.address = cpu_to_le32( 765 d->prg_cpu[idx]->end.address = cpu_to_le32(
784 pci_map_single(ohci->dev, packet->data, 766 pci_map_single(ohci->dev, packet->data,
785 packet->data_size, PCI_DMA_TODEVICE)); 767 packet->data_size, PCI_DMA_TODEVICE));
786 OHCI_DMA_ALLOC("single, iso transmit packet");
787 768
788 d->prg_cpu[idx]->end.branchAddress = 0; 769 d->prg_cpu[idx]->end.branchAddress = 0;
789 d->prg_cpu[idx]->end.status = 0; 770 d->prg_cpu[idx]->end.status = 0;
@@ -884,36 +865,9 @@ static int ohci_transmit(struct hpsb_host *host, struct hpsb_packet *packet)
884 return -EOVERFLOW; 865 return -EOVERFLOW;
885 } 866 }
886 867
887 /* Decide whether we have an iso, a request, or a response packet */
888 if (packet->type == hpsb_raw) 868 if (packet->type == hpsb_raw)
889 d = &ohci->at_req_context; 869 d = &ohci->at_req_context;
890 else if ((packet->tcode == TCODE_ISO_DATA) && (packet->type == hpsb_iso)) { 870 else if ((packet->tcode & 0x02) && (packet->tcode != TCODE_ISO_DATA))
891 /* The legacy IT DMA context is initialized on first
892 * use. However, the alloc cannot be run from
893 * interrupt context, so we bail out if that is the
894 * case. I don't see anyone sending ISO packets from
895 * interrupt context anyway... */
896
897 if (ohci->it_legacy_context.ohci == NULL) {
898 if (in_interrupt()) {
899 PRINT(KERN_ERR,
900 "legacy IT context cannot be initialized during interrupt");
901 return -EINVAL;
902 }
903
904 if (alloc_dma_trm_ctx(ohci, &ohci->it_legacy_context,
905 DMA_CTX_ISO, 0, IT_NUM_DESC,
906 OHCI1394_IsoXmitContextBase) < 0) {
907 PRINT(KERN_ERR,
908 "error initializing legacy IT context");
909 return -ENOMEM;
910 }
911
912 initialize_dma_trm_ctx(&ohci->it_legacy_context);
913 }
914
915 d = &ohci->it_legacy_context;
916 } else if ((packet->tcode & 0x02) && (packet->tcode != TCODE_ISO_DATA))
917 d = &ohci->at_resp_context; 871 d = &ohci->at_resp_context;
918 else 872 else
919 d = &ohci->at_req_context; 873 d = &ohci->at_req_context;
@@ -932,9 +886,7 @@ static int ohci_transmit(struct hpsb_host *host, struct hpsb_packet *packet)
932static int ohci_devctl(struct hpsb_host *host, enum devctl_cmd cmd, int arg) 886static int ohci_devctl(struct hpsb_host *host, enum devctl_cmd cmd, int arg)
933{ 887{
934 struct ti_ohci *ohci = host->hostdata; 888 struct ti_ohci *ohci = host->hostdata;
935 int retval = 0; 889 int retval = 0, phy_reg;
936 unsigned long flags;
937 int phy_reg;
938 890
939 switch (cmd) { 891 switch (cmd) {
940 case RESET_BUS: 892 case RESET_BUS:
@@ -1027,117 +979,6 @@ static int ohci_devctl(struct hpsb_host *host, enum devctl_cmd cmd, int arg)
1027 dma_trm_reset(&ohci->at_resp_context); 979 dma_trm_reset(&ohci->at_resp_context);
1028 break; 980 break;
1029 981
1030 case ISO_LISTEN_CHANNEL:
1031 {
1032 u64 mask;
1033 struct dma_rcv_ctx *d = &ohci->ir_legacy_context;
1034 int ir_legacy_active;
1035
1036 if (arg<0 || arg>63) {
1037 PRINT(KERN_ERR,
1038 "%s: IS0 listen channel %d is out of range",
1039 __FUNCTION__, arg);
1040 return -EFAULT;
1041 }
1042
1043 mask = (u64)0x1<<arg;
1044
1045 spin_lock_irqsave(&ohci->IR_channel_lock, flags);
1046
1047 if (ohci->ISO_channel_usage & mask) {
1048 PRINT(KERN_ERR,
1049 "%s: IS0 listen channel %d is already used",
1050 __FUNCTION__, arg);
1051 spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1052 return -EFAULT;
1053 }
1054
1055 ir_legacy_active = ohci->ir_legacy_channels;
1056
1057 ohci->ISO_channel_usage |= mask;
1058 ohci->ir_legacy_channels |= mask;
1059
1060 spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1061
1062 if (!ir_legacy_active) {
1063 if (ohci1394_register_iso_tasklet(ohci,
1064 &ohci->ir_legacy_tasklet) < 0) {
1065 PRINT(KERN_ERR, "No IR DMA context available");
1066 return -EBUSY;
1067 }
1068
1069 /* the IR context can be assigned to any DMA context
1070 * by ohci1394_register_iso_tasklet */
1071 d->ctx = ohci->ir_legacy_tasklet.context;
1072 d->ctrlSet = OHCI1394_IsoRcvContextControlSet +
1073 32*d->ctx;
1074 d->ctrlClear = OHCI1394_IsoRcvContextControlClear +
1075 32*d->ctx;
1076 d->cmdPtr = OHCI1394_IsoRcvCommandPtr + 32*d->ctx;
1077 d->ctxtMatch = OHCI1394_IsoRcvContextMatch + 32*d->ctx;
1078
1079 initialize_dma_rcv_ctx(&ohci->ir_legacy_context, 1);
1080
1081 if (printk_ratelimit())
1082 DBGMSG("IR legacy activated");
1083 }
1084
1085 spin_lock_irqsave(&ohci->IR_channel_lock, flags);
1086
1087 if (arg>31)
1088 reg_write(ohci, OHCI1394_IRMultiChanMaskHiSet,
1089 1<<(arg-32));
1090 else
1091 reg_write(ohci, OHCI1394_IRMultiChanMaskLoSet,
1092 1<<arg);
1093
1094 spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1095 DBGMSG("Listening enabled on channel %d", arg);
1096 break;
1097 }
1098 case ISO_UNLISTEN_CHANNEL:
1099 {
1100 u64 mask;
1101
1102 if (arg<0 || arg>63) {
1103 PRINT(KERN_ERR,
1104 "%s: IS0 unlisten channel %d is out of range",
1105 __FUNCTION__, arg);
1106 return -EFAULT;
1107 }
1108
1109 mask = (u64)0x1<<arg;
1110
1111 spin_lock_irqsave(&ohci->IR_channel_lock, flags);
1112
1113 if (!(ohci->ISO_channel_usage & mask)) {
1114 PRINT(KERN_ERR,
1115 "%s: IS0 unlisten channel %d is not used",
1116 __FUNCTION__, arg);
1117 spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1118 return -EFAULT;
1119 }
1120
1121 ohci->ISO_channel_usage &= ~mask;
1122 ohci->ir_legacy_channels &= ~mask;
1123
1124 if (arg>31)
1125 reg_write(ohci, OHCI1394_IRMultiChanMaskHiClear,
1126 1<<(arg-32));
1127 else
1128 reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear,
1129 1<<arg);
1130
1131 spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1132 DBGMSG("Listening disabled on channel %d", arg);
1133
1134 if (ohci->ir_legacy_channels == 0) {
1135 stop_dma_rcv_ctx(&ohci->ir_legacy_context);
1136 DBGMSG("ISO legacy receive context stopped");
1137 }
1138
1139 break;
1140 }
1141 default: 982 default:
1142 PRINT_G(KERN_ERR, "ohci_devctl cmd %d not implemented yet", 983 PRINT_G(KERN_ERR, "ohci_devctl cmd %d not implemented yet",
1143 cmd); 984 cmd);
@@ -2869,12 +2710,10 @@ static void dma_trm_tasklet (unsigned long data)
2869 list_del_init(&packet->driver_list); 2710 list_del_init(&packet->driver_list);
2870 hpsb_packet_sent(ohci->host, packet, ack); 2711 hpsb_packet_sent(ohci->host, packet, ack);
2871 2712
2872 if (datasize) { 2713 if (datasize)
2873 pci_unmap_single(ohci->dev, 2714 pci_unmap_single(ohci->dev,
2874 cpu_to_le32(d->prg_cpu[d->sent_ind]->end.address), 2715 cpu_to_le32(d->prg_cpu[d->sent_ind]->end.address),
2875 datasize, PCI_DMA_TODEVICE); 2716 datasize, PCI_DMA_TODEVICE);
2876 OHCI_DMA_FREE("single Xmit data packet");
2877 }
2878 2717
2879 d->sent_ind = (d->sent_ind+1)%d->num_desc; 2718 d->sent_ind = (d->sent_ind+1)%d->num_desc;
2880 d->free_prgs++; 2719 d->free_prgs++;
@@ -2885,22 +2724,6 @@ static void dma_trm_tasklet (unsigned long data)
2885 spin_unlock_irqrestore(&d->lock, flags); 2724 spin_unlock_irqrestore(&d->lock, flags);
2886} 2725}
2887 2726
2888static void stop_dma_rcv_ctx(struct dma_rcv_ctx *d)
2889{
2890 if (d->ctrlClear) {
2891 ohci1394_stop_context(d->ohci, d->ctrlClear, NULL);
2892
2893 if (d->type == DMA_CTX_ISO) {
2894 /* disable interrupts */
2895 reg_write(d->ohci, OHCI1394_IsoRecvIntMaskClear, 1 << d->ctx);
2896 ohci1394_unregister_iso_tasklet(d->ohci, &d->ohci->ir_legacy_tasklet);
2897 } else {
2898 tasklet_kill(&d->task);
2899 }
2900 }
2901}
2902
2903
2904static void free_dma_rcv_ctx(struct dma_rcv_ctx *d) 2727static void free_dma_rcv_ctx(struct dma_rcv_ctx *d)
2905{ 2728{
2906 int i; 2729 int i;
@@ -2913,23 +2736,19 @@ static void free_dma_rcv_ctx(struct dma_rcv_ctx *d)
2913 2736
2914 if (d->buf_cpu) { 2737 if (d->buf_cpu) {
2915 for (i=0; i<d->num_desc; i++) 2738 for (i=0; i<d->num_desc; i++)
2916 if (d->buf_cpu[i] && d->buf_bus[i]) { 2739 if (d->buf_cpu[i] && d->buf_bus[i])
2917 pci_free_consistent( 2740 pci_free_consistent(
2918 ohci->dev, d->buf_size, 2741 ohci->dev, d->buf_size,
2919 d->buf_cpu[i], d->buf_bus[i]); 2742 d->buf_cpu[i], d->buf_bus[i]);
2920 OHCI_DMA_FREE("consistent dma_rcv buf[%d]", i);
2921 }
2922 kfree(d->buf_cpu); 2743 kfree(d->buf_cpu);
2923 kfree(d->buf_bus); 2744 kfree(d->buf_bus);
2924 } 2745 }
2925 if (d->prg_cpu) { 2746 if (d->prg_cpu) {
2926 for (i=0; i<d->num_desc; i++) 2747 for (i=0; i<d->num_desc; i++)
2927 if (d->prg_cpu[i] && d->prg_bus[i]) { 2748 if (d->prg_cpu[i] && d->prg_bus[i])
2928 pci_pool_free(d->prg_pool, d->prg_cpu[i], d->prg_bus[i]); 2749 pci_pool_free(d->prg_pool, d->prg_cpu[i],
2929 OHCI_DMA_FREE("consistent dma_rcv prg[%d]", i); 2750 d->prg_bus[i]);
2930 }
2931 pci_pool_destroy(d->prg_pool); 2751 pci_pool_destroy(d->prg_pool);
2932 OHCI_DMA_FREE("dma_rcv prg pool");
2933 kfree(d->prg_cpu); 2752 kfree(d->prg_cpu);
2934 kfree(d->prg_bus); 2753 kfree(d->prg_bus);
2935 } 2754 }
@@ -2998,13 +2817,10 @@ alloc_dma_rcv_ctx(struct ti_ohci *ohci, struct dma_rcv_ctx *d,
2998 } 2817 }
2999 num_allocs++; 2818 num_allocs++;
3000 2819
3001 OHCI_DMA_ALLOC("dma_rcv prg pool");
3002
3003 for (i=0; i<d->num_desc; i++) { 2820 for (i=0; i<d->num_desc; i++) {
3004 d->buf_cpu[i] = pci_alloc_consistent(ohci->dev, 2821 d->buf_cpu[i] = pci_alloc_consistent(ohci->dev,
3005 d->buf_size, 2822 d->buf_size,
3006 d->buf_bus+i); 2823 d->buf_bus+i);
3007 OHCI_DMA_ALLOC("consistent dma_rcv buf[%d]", i);
3008 2824
3009 if (d->buf_cpu[i] != NULL) { 2825 if (d->buf_cpu[i] != NULL) {
3010 memset(d->buf_cpu[i], 0, d->buf_size); 2826 memset(d->buf_cpu[i], 0, d->buf_size);
@@ -3016,7 +2832,6 @@ alloc_dma_rcv_ctx(struct ti_ohci *ohci, struct dma_rcv_ctx *d,
3016 } 2832 }
3017 2833
3018 d->prg_cpu[i] = pci_pool_alloc(d->prg_pool, GFP_KERNEL, d->prg_bus+i); 2834 d->prg_cpu[i] = pci_pool_alloc(d->prg_pool, GFP_KERNEL, d->prg_bus+i);
3019 OHCI_DMA_ALLOC("pool dma_rcv prg[%d]", i);
3020 2835
3021 if (d->prg_cpu[i] != NULL) { 2836 if (d->prg_cpu[i] != NULL) {
3022 memset(d->prg_cpu[i], 0, sizeof(struct dma_cmd)); 2837 memset(d->prg_cpu[i], 0, sizeof(struct dma_cmd));
@@ -3030,18 +2845,11 @@ alloc_dma_rcv_ctx(struct ti_ohci *ohci, struct dma_rcv_ctx *d,
3030 2845
3031 spin_lock_init(&d->lock); 2846 spin_lock_init(&d->lock);
3032 2847
3033 if (type == DMA_CTX_ISO) { 2848 d->ctrlSet = context_base + OHCI1394_ContextControlSet;
3034 ohci1394_init_iso_tasklet(&ohci->ir_legacy_tasklet, 2849 d->ctrlClear = context_base + OHCI1394_ContextControlClear;
3035 OHCI_ISO_MULTICHANNEL_RECEIVE, 2850 d->cmdPtr = context_base + OHCI1394_ContextCommandPtr;
3036 dma_rcv_tasklet, (unsigned long) d);
3037 } else {
3038 d->ctrlSet = context_base + OHCI1394_ContextControlSet;
3039 d->ctrlClear = context_base + OHCI1394_ContextControlClear;
3040 d->cmdPtr = context_base + OHCI1394_ContextCommandPtr;
3041
3042 tasklet_init (&d->task, dma_rcv_tasklet, (unsigned long) d);
3043 }
3044 2851
2852 tasklet_init(&d->task, dma_rcv_tasklet, (unsigned long) d);
3045 return 0; 2853 return 0;
3046} 2854}
3047 2855
@@ -3057,12 +2865,10 @@ static void free_dma_trm_ctx(struct dma_trm_ctx *d)
3057 2865
3058 if (d->prg_cpu) { 2866 if (d->prg_cpu) {
3059 for (i=0; i<d->num_desc; i++) 2867 for (i=0; i<d->num_desc; i++)
3060 if (d->prg_cpu[i] && d->prg_bus[i]) { 2868 if (d->prg_cpu[i] && d->prg_bus[i])
3061 pci_pool_free(d->prg_pool, d->prg_cpu[i], d->prg_bus[i]); 2869 pci_pool_free(d->prg_pool, d->prg_cpu[i],
3062 OHCI_DMA_FREE("pool dma_trm prg[%d]", i); 2870 d->prg_bus[i]);
3063 }
3064 pci_pool_destroy(d->prg_pool); 2871 pci_pool_destroy(d->prg_pool);
3065 OHCI_DMA_FREE("dma_trm prg pool");
3066 kfree(d->prg_cpu); 2872 kfree(d->prg_cpu);
3067 kfree(d->prg_bus); 2873 kfree(d->prg_bus);
3068 } 2874 }
@@ -3108,11 +2914,8 @@ alloc_dma_trm_ctx(struct ti_ohci *ohci, struct dma_trm_ctx *d,
3108 } 2914 }
3109 num_allocs++; 2915 num_allocs++;
3110 2916
3111 OHCI_DMA_ALLOC("dma_rcv prg pool");
3112
3113 for (i = 0; i < d->num_desc; i++) { 2917 for (i = 0; i < d->num_desc; i++) {
3114 d->prg_cpu[i] = pci_pool_alloc(d->prg_pool, GFP_KERNEL, d->prg_bus+i); 2918 d->prg_cpu[i] = pci_pool_alloc(d->prg_pool, GFP_KERNEL, d->prg_bus+i);
3115 OHCI_DMA_ALLOC("pool dma_trm prg[%d]", i);
3116 2919
3117 if (d->prg_cpu[i] != NULL) { 2920 if (d->prg_cpu[i] != NULL) {
3118 memset(d->prg_cpu[i], 0, sizeof(struct at_dma_prg)); 2921 memset(d->prg_cpu[i], 0, sizeof(struct at_dma_prg));
@@ -3127,28 +2930,10 @@ alloc_dma_trm_ctx(struct ti_ohci *ohci, struct dma_trm_ctx *d,
3127 spin_lock_init(&d->lock); 2930 spin_lock_init(&d->lock);
3128 2931
3129 /* initialize tasklet */ 2932 /* initialize tasklet */
3130 if (type == DMA_CTX_ISO) { 2933 d->ctrlSet = context_base + OHCI1394_ContextControlSet;
3131 ohci1394_init_iso_tasklet(&ohci->it_legacy_tasklet, OHCI_ISO_TRANSMIT, 2934 d->ctrlClear = context_base + OHCI1394_ContextControlClear;
3132 dma_trm_tasklet, (unsigned long) d); 2935 d->cmdPtr = context_base + OHCI1394_ContextCommandPtr;
3133 if (ohci1394_register_iso_tasklet(ohci, 2936 tasklet_init(&d->task, dma_trm_tasklet, (unsigned long)d);
3134 &ohci->it_legacy_tasklet) < 0) {
3135 PRINT(KERN_ERR, "No IT DMA context available");
3136 free_dma_trm_ctx(d);
3137 return -EBUSY;
3138 }
3139
3140 /* IT can be assigned to any context by register_iso_tasklet */
3141 d->ctx = ohci->it_legacy_tasklet.context;
3142 d->ctrlSet = OHCI1394_IsoXmitContextControlSet + 16 * d->ctx;
3143 d->ctrlClear = OHCI1394_IsoXmitContextControlClear + 16 * d->ctx;
3144 d->cmdPtr = OHCI1394_IsoXmitCommandPtr + 16 * d->ctx;
3145 } else {
3146 d->ctrlSet = context_base + OHCI1394_ContextControlSet;
3147 d->ctrlClear = context_base + OHCI1394_ContextControlClear;
3148 d->cmdPtr = context_base + OHCI1394_ContextCommandPtr;
3149 tasklet_init (&d->task, dma_trm_tasklet, (unsigned long)d);
3150 }
3151
3152 return 0; 2937 return 0;
3153} 2938}
3154 2939
@@ -3294,7 +3079,6 @@ static int __devinit ohci1394_pci_probe(struct pci_dev *dev,
3294 ohci->csr_config_rom_cpu = 3079 ohci->csr_config_rom_cpu =
3295 pci_alloc_consistent(ohci->dev, OHCI_CONFIG_ROM_LEN, 3080 pci_alloc_consistent(ohci->dev, OHCI_CONFIG_ROM_LEN,
3296 &ohci->csr_config_rom_bus); 3081 &ohci->csr_config_rom_bus);
3297 OHCI_DMA_ALLOC("consistent csr_config_rom");
3298 if (ohci->csr_config_rom_cpu == NULL) 3082 if (ohci->csr_config_rom_cpu == NULL)
3299 FAIL(-ENOMEM, "Failed to allocate buffer config rom"); 3083 FAIL(-ENOMEM, "Failed to allocate buffer config rom");
3300 ohci->init_state = OHCI_INIT_HAVE_CONFIG_ROM_BUFFER; 3084 ohci->init_state = OHCI_INIT_HAVE_CONFIG_ROM_BUFFER;
@@ -3303,8 +3087,6 @@ static int __devinit ohci1394_pci_probe(struct pci_dev *dev,
3303 ohci->selfid_buf_cpu = 3087 ohci->selfid_buf_cpu =
3304 pci_alloc_consistent(ohci->dev, OHCI1394_SI_DMA_BUF_SIZE, 3088 pci_alloc_consistent(ohci->dev, OHCI1394_SI_DMA_BUF_SIZE,
3305 &ohci->selfid_buf_bus); 3089 &ohci->selfid_buf_bus);
3306 OHCI_DMA_ALLOC("consistent selfid_buf");
3307
3308 if (ohci->selfid_buf_cpu == NULL) 3090 if (ohci->selfid_buf_cpu == NULL)
3309 FAIL(-ENOMEM, "Failed to allocate DMA buffer for self-id packets"); 3091 FAIL(-ENOMEM, "Failed to allocate DMA buffer for self-id packets");
3310 ohci->init_state = OHCI_INIT_HAVE_SELFID_BUFFER; 3092 ohci->init_state = OHCI_INIT_HAVE_SELFID_BUFFER;
@@ -3377,20 +3159,6 @@ static int __devinit ohci1394_pci_probe(struct pci_dev *dev,
3377 ohci->ISO_channel_usage = 0; 3159 ohci->ISO_channel_usage = 0;
3378 spin_lock_init(&ohci->IR_channel_lock); 3160 spin_lock_init(&ohci->IR_channel_lock);
3379 3161
3380 /* Allocate the IR DMA context right here so we don't have
3381 * to do it in interrupt path - note that this doesn't
3382 * waste much memory and avoids the jugglery required to
3383 * allocate it in IRQ path. */
3384 if (alloc_dma_rcv_ctx(ohci, &ohci->ir_legacy_context,
3385 DMA_CTX_ISO, 0, IR_NUM_DESC,
3386 IR_BUF_SIZE, IR_SPLIT_BUF_SIZE,
3387 OHCI1394_IsoRcvContextBase) < 0) {
3388 FAIL(-ENOMEM, "Cannot allocate IR Legacy DMA context");
3389 }
3390
3391 /* We hopefully don't have to pre-allocate IT DMA like we did
3392 * for IR DMA above. Allocate it on-demand and mark inactive. */
3393 ohci->it_legacy_context.ohci = NULL;
3394 spin_lock_init(&ohci->event_lock); 3162 spin_lock_init(&ohci->event_lock);
3395 3163
3396 /* 3164 /*
@@ -3483,20 +3251,16 @@ static void ohci1394_pci_remove(struct pci_dev *pdev)
3483 free_dma_rcv_ctx(&ohci->ar_resp_context); 3251 free_dma_rcv_ctx(&ohci->ar_resp_context);
3484 free_dma_trm_ctx(&ohci->at_req_context); 3252 free_dma_trm_ctx(&ohci->at_req_context);
3485 free_dma_trm_ctx(&ohci->at_resp_context); 3253 free_dma_trm_ctx(&ohci->at_resp_context);
3486 free_dma_rcv_ctx(&ohci->ir_legacy_context);
3487 free_dma_trm_ctx(&ohci->it_legacy_context);
3488 3254
3489 case OHCI_INIT_HAVE_SELFID_BUFFER: 3255 case OHCI_INIT_HAVE_SELFID_BUFFER:
3490 pci_free_consistent(ohci->dev, OHCI1394_SI_DMA_BUF_SIZE, 3256 pci_free_consistent(ohci->dev, OHCI1394_SI_DMA_BUF_SIZE,
3491 ohci->selfid_buf_cpu, 3257 ohci->selfid_buf_cpu,
3492 ohci->selfid_buf_bus); 3258 ohci->selfid_buf_bus);
3493 OHCI_DMA_FREE("consistent selfid_buf");
3494 3259
3495 case OHCI_INIT_HAVE_CONFIG_ROM_BUFFER: 3260 case OHCI_INIT_HAVE_CONFIG_ROM_BUFFER:
3496 pci_free_consistent(ohci->dev, OHCI_CONFIG_ROM_LEN, 3261 pci_free_consistent(ohci->dev, OHCI_CONFIG_ROM_LEN,
3497 ohci->csr_config_rom_cpu, 3262 ohci->csr_config_rom_cpu,
3498 ohci->csr_config_rom_bus); 3263 ohci->csr_config_rom_bus);
3499 OHCI_DMA_FREE("consistent csr_config_rom");
3500 3264
3501 case OHCI_INIT_HAVE_IOMAPPING: 3265 case OHCI_INIT_HAVE_IOMAPPING:
3502 iounmap(ohci->registers); 3266 iounmap(ohci->registers);