diff options
Diffstat (limited to 'drivers/net')
-rw-r--r-- | drivers/net/b44.c | 52 | ||||
-rw-r--r-- | drivers/net/fec_mpc52xx.c | 1 | ||||
-rw-r--r-- | drivers/net/fec_mpc52xx_phy.c | 1 | ||||
-rw-r--r-- | drivers/net/netconsole.c | 5 | ||||
-rw-r--r-- | drivers/net/ps3_gelic_wireless.c | 11 | ||||
-rw-r--r-- | drivers/net/smc91x.h | 8 | ||||
-rw-r--r-- | drivers/net/virtio_net.c | 1 | ||||
-rw-r--r-- | drivers/net/wireless/b43/dma.c | 27 | ||||
-rw-r--r-- | drivers/net/wireless/b43legacy/dma.c | 55 | ||||
-rw-r--r-- | drivers/net/wireless/b43legacy/main.c | 2 | ||||
-rw-r--r-- | drivers/net/wireless/rtl8187_dev.c | 3 |
11 files changed, 94 insertions, 72 deletions
diff --git a/drivers/net/b44.c b/drivers/net/b44.c index 25f1337cd02c..59dce6aa0865 100644 --- a/drivers/net/b44.c +++ b/drivers/net/b44.c | |||
@@ -148,7 +148,7 @@ static inline void b44_sync_dma_desc_for_device(struct ssb_device *sdev, | |||
148 | unsigned long offset, | 148 | unsigned long offset, |
149 | enum dma_data_direction dir) | 149 | enum dma_data_direction dir) |
150 | { | 150 | { |
151 | dma_sync_single_range_for_device(sdev->dev, dma_base, | 151 | dma_sync_single_range_for_device(sdev->dma_dev, dma_base, |
152 | offset & dma_desc_align_mask, | 152 | offset & dma_desc_align_mask, |
153 | dma_desc_sync_size, dir); | 153 | dma_desc_sync_size, dir); |
154 | } | 154 | } |
@@ -158,7 +158,7 @@ static inline void b44_sync_dma_desc_for_cpu(struct ssb_device *sdev, | |||
158 | unsigned long offset, | 158 | unsigned long offset, |
159 | enum dma_data_direction dir) | 159 | enum dma_data_direction dir) |
160 | { | 160 | { |
161 | dma_sync_single_range_for_cpu(sdev->dev, dma_base, | 161 | dma_sync_single_range_for_cpu(sdev->dma_dev, dma_base, |
162 | offset & dma_desc_align_mask, | 162 | offset & dma_desc_align_mask, |
163 | dma_desc_sync_size, dir); | 163 | dma_desc_sync_size, dir); |
164 | } | 164 | } |
@@ -613,7 +613,7 @@ static void b44_tx(struct b44 *bp) | |||
613 | 613 | ||
614 | BUG_ON(skb == NULL); | 614 | BUG_ON(skb == NULL); |
615 | 615 | ||
616 | dma_unmap_single(bp->sdev->dev, | 616 | dma_unmap_single(bp->sdev->dma_dev, |
617 | rp->mapping, | 617 | rp->mapping, |
618 | skb->len, | 618 | skb->len, |
619 | DMA_TO_DEVICE); | 619 | DMA_TO_DEVICE); |
@@ -653,7 +653,7 @@ static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked) | |||
653 | if (skb == NULL) | 653 | if (skb == NULL) |
654 | return -ENOMEM; | 654 | return -ENOMEM; |
655 | 655 | ||
656 | mapping = dma_map_single(bp->sdev->dev, skb->data, | 656 | mapping = dma_map_single(bp->sdev->dma_dev, skb->data, |
657 | RX_PKT_BUF_SZ, | 657 | RX_PKT_BUF_SZ, |
658 | DMA_FROM_DEVICE); | 658 | DMA_FROM_DEVICE); |
659 | 659 | ||
@@ -663,19 +663,19 @@ static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked) | |||
663 | mapping + RX_PKT_BUF_SZ > DMA_30BIT_MASK) { | 663 | mapping + RX_PKT_BUF_SZ > DMA_30BIT_MASK) { |
664 | /* Sigh... */ | 664 | /* Sigh... */ |
665 | if (!dma_mapping_error(mapping)) | 665 | if (!dma_mapping_error(mapping)) |
666 | dma_unmap_single(bp->sdev->dev, mapping, | 666 | dma_unmap_single(bp->sdev->dma_dev, mapping, |
667 | RX_PKT_BUF_SZ, DMA_FROM_DEVICE); | 667 | RX_PKT_BUF_SZ, DMA_FROM_DEVICE); |
668 | dev_kfree_skb_any(skb); | 668 | dev_kfree_skb_any(skb); |
669 | skb = __netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ, GFP_ATOMIC|GFP_DMA); | 669 | skb = __netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ, GFP_ATOMIC|GFP_DMA); |
670 | if (skb == NULL) | 670 | if (skb == NULL) |
671 | return -ENOMEM; | 671 | return -ENOMEM; |
672 | mapping = dma_map_single(bp->sdev->dev, skb->data, | 672 | mapping = dma_map_single(bp->sdev->dma_dev, skb->data, |
673 | RX_PKT_BUF_SZ, | 673 | RX_PKT_BUF_SZ, |
674 | DMA_FROM_DEVICE); | 674 | DMA_FROM_DEVICE); |
675 | if (dma_mapping_error(mapping) || | 675 | if (dma_mapping_error(mapping) || |
676 | mapping + RX_PKT_BUF_SZ > DMA_30BIT_MASK) { | 676 | mapping + RX_PKT_BUF_SZ > DMA_30BIT_MASK) { |
677 | if (!dma_mapping_error(mapping)) | 677 | if (!dma_mapping_error(mapping)) |
678 | dma_unmap_single(bp->sdev->dev, mapping, RX_PKT_BUF_SZ,DMA_FROM_DEVICE); | 678 | dma_unmap_single(bp->sdev->dma_dev, mapping, RX_PKT_BUF_SZ,DMA_FROM_DEVICE); |
679 | dev_kfree_skb_any(skb); | 679 | dev_kfree_skb_any(skb); |
680 | return -ENOMEM; | 680 | return -ENOMEM; |
681 | } | 681 | } |
@@ -750,7 +750,7 @@ static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked) | |||
750 | dest_idx * sizeof(dest_desc), | 750 | dest_idx * sizeof(dest_desc), |
751 | DMA_BIDIRECTIONAL); | 751 | DMA_BIDIRECTIONAL); |
752 | 752 | ||
753 | dma_sync_single_for_device(bp->sdev->dev, le32_to_cpu(src_desc->addr), | 753 | dma_sync_single_for_device(bp->sdev->dma_dev, le32_to_cpu(src_desc->addr), |
754 | RX_PKT_BUF_SZ, | 754 | RX_PKT_BUF_SZ, |
755 | DMA_FROM_DEVICE); | 755 | DMA_FROM_DEVICE); |
756 | } | 756 | } |
@@ -772,7 +772,7 @@ static int b44_rx(struct b44 *bp, int budget) | |||
772 | struct rx_header *rh; | 772 | struct rx_header *rh; |
773 | u16 len; | 773 | u16 len; |
774 | 774 | ||
775 | dma_sync_single_for_cpu(bp->sdev->dev, map, | 775 | dma_sync_single_for_cpu(bp->sdev->dma_dev, map, |
776 | RX_PKT_BUF_SZ, | 776 | RX_PKT_BUF_SZ, |
777 | DMA_FROM_DEVICE); | 777 | DMA_FROM_DEVICE); |
778 | rh = (struct rx_header *) skb->data; | 778 | rh = (struct rx_header *) skb->data; |
@@ -806,7 +806,7 @@ static int b44_rx(struct b44 *bp, int budget) | |||
806 | skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod); | 806 | skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod); |
807 | if (skb_size < 0) | 807 | if (skb_size < 0) |
808 | goto drop_it; | 808 | goto drop_it; |
809 | dma_unmap_single(bp->sdev->dev, map, | 809 | dma_unmap_single(bp->sdev->dma_dev, map, |
810 | skb_size, DMA_FROM_DEVICE); | 810 | skb_size, DMA_FROM_DEVICE); |
811 | /* Leave out rx_header */ | 811 | /* Leave out rx_header */ |
812 | skb_put(skb, len + RX_PKT_OFFSET); | 812 | skb_put(skb, len + RX_PKT_OFFSET); |
@@ -966,24 +966,24 @@ static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
966 | goto err_out; | 966 | goto err_out; |
967 | } | 967 | } |
968 | 968 | ||
969 | mapping = dma_map_single(bp->sdev->dev, skb->data, len, DMA_TO_DEVICE); | 969 | mapping = dma_map_single(bp->sdev->dma_dev, skb->data, len, DMA_TO_DEVICE); |
970 | if (dma_mapping_error(mapping) || mapping + len > DMA_30BIT_MASK) { | 970 | if (dma_mapping_error(mapping) || mapping + len > DMA_30BIT_MASK) { |
971 | struct sk_buff *bounce_skb; | 971 | struct sk_buff *bounce_skb; |
972 | 972 | ||
973 | /* Chip can't handle DMA to/from >1GB, use bounce buffer */ | 973 | /* Chip can't handle DMA to/from >1GB, use bounce buffer */ |
974 | if (!dma_mapping_error(mapping)) | 974 | if (!dma_mapping_error(mapping)) |
975 | dma_unmap_single(bp->sdev->dev, mapping, len, | 975 | dma_unmap_single(bp->sdev->dma_dev, mapping, len, |
976 | DMA_TO_DEVICE); | 976 | DMA_TO_DEVICE); |
977 | 977 | ||
978 | bounce_skb = __dev_alloc_skb(len, GFP_ATOMIC | GFP_DMA); | 978 | bounce_skb = __dev_alloc_skb(len, GFP_ATOMIC | GFP_DMA); |
979 | if (!bounce_skb) | 979 | if (!bounce_skb) |
980 | goto err_out; | 980 | goto err_out; |
981 | 981 | ||
982 | mapping = dma_map_single(bp->sdev->dev, bounce_skb->data, | 982 | mapping = dma_map_single(bp->sdev->dma_dev, bounce_skb->data, |
983 | len, DMA_TO_DEVICE); | 983 | len, DMA_TO_DEVICE); |
984 | if (dma_mapping_error(mapping) || mapping + len > DMA_30BIT_MASK) { | 984 | if (dma_mapping_error(mapping) || mapping + len > DMA_30BIT_MASK) { |
985 | if (!dma_mapping_error(mapping)) | 985 | if (!dma_mapping_error(mapping)) |
986 | dma_unmap_single(bp->sdev->dev, mapping, | 986 | dma_unmap_single(bp->sdev->dma_dev, mapping, |
987 | len, DMA_TO_DEVICE); | 987 | len, DMA_TO_DEVICE); |
988 | dev_kfree_skb_any(bounce_skb); | 988 | dev_kfree_skb_any(bounce_skb); |
989 | goto err_out; | 989 | goto err_out; |
@@ -1082,7 +1082,7 @@ static void b44_free_rings(struct b44 *bp) | |||
1082 | 1082 | ||
1083 | if (rp->skb == NULL) | 1083 | if (rp->skb == NULL) |
1084 | continue; | 1084 | continue; |
1085 | dma_unmap_single(bp->sdev->dev, rp->mapping, RX_PKT_BUF_SZ, | 1085 | dma_unmap_single(bp->sdev->dma_dev, rp->mapping, RX_PKT_BUF_SZ, |
1086 | DMA_FROM_DEVICE); | 1086 | DMA_FROM_DEVICE); |
1087 | dev_kfree_skb_any(rp->skb); | 1087 | dev_kfree_skb_any(rp->skb); |
1088 | rp->skb = NULL; | 1088 | rp->skb = NULL; |
@@ -1094,7 +1094,7 @@ static void b44_free_rings(struct b44 *bp) | |||
1094 | 1094 | ||
1095 | if (rp->skb == NULL) | 1095 | if (rp->skb == NULL) |
1096 | continue; | 1096 | continue; |
1097 | dma_unmap_single(bp->sdev->dev, rp->mapping, rp->skb->len, | 1097 | dma_unmap_single(bp->sdev->dma_dev, rp->mapping, rp->skb->len, |
1098 | DMA_TO_DEVICE); | 1098 | DMA_TO_DEVICE); |
1099 | dev_kfree_skb_any(rp->skb); | 1099 | dev_kfree_skb_any(rp->skb); |
1100 | rp->skb = NULL; | 1100 | rp->skb = NULL; |
@@ -1117,12 +1117,12 @@ static void b44_init_rings(struct b44 *bp) | |||
1117 | memset(bp->tx_ring, 0, B44_TX_RING_BYTES); | 1117 | memset(bp->tx_ring, 0, B44_TX_RING_BYTES); |
1118 | 1118 | ||
1119 | if (bp->flags & B44_FLAG_RX_RING_HACK) | 1119 | if (bp->flags & B44_FLAG_RX_RING_HACK) |
1120 | dma_sync_single_for_device(bp->sdev->dev, bp->rx_ring_dma, | 1120 | dma_sync_single_for_device(bp->sdev->dma_dev, bp->rx_ring_dma, |
1121 | DMA_TABLE_BYTES, | 1121 | DMA_TABLE_BYTES, |
1122 | DMA_BIDIRECTIONAL); | 1122 | DMA_BIDIRECTIONAL); |
1123 | 1123 | ||
1124 | if (bp->flags & B44_FLAG_TX_RING_HACK) | 1124 | if (bp->flags & B44_FLAG_TX_RING_HACK) |
1125 | dma_sync_single_for_device(bp->sdev->dev, bp->tx_ring_dma, | 1125 | dma_sync_single_for_device(bp->sdev->dma_dev, bp->tx_ring_dma, |
1126 | DMA_TABLE_BYTES, | 1126 | DMA_TABLE_BYTES, |
1127 | DMA_TO_DEVICE); | 1127 | DMA_TO_DEVICE); |
1128 | 1128 | ||
@@ -1144,24 +1144,24 @@ static void b44_free_consistent(struct b44 *bp) | |||
1144 | bp->tx_buffers = NULL; | 1144 | bp->tx_buffers = NULL; |
1145 | if (bp->rx_ring) { | 1145 | if (bp->rx_ring) { |
1146 | if (bp->flags & B44_FLAG_RX_RING_HACK) { | 1146 | if (bp->flags & B44_FLAG_RX_RING_HACK) { |
1147 | dma_unmap_single(bp->sdev->dev, bp->rx_ring_dma, | 1147 | dma_unmap_single(bp->sdev->dma_dev, bp->rx_ring_dma, |
1148 | DMA_TABLE_BYTES, | 1148 | DMA_TABLE_BYTES, |
1149 | DMA_BIDIRECTIONAL); | 1149 | DMA_BIDIRECTIONAL); |
1150 | kfree(bp->rx_ring); | 1150 | kfree(bp->rx_ring); |
1151 | } else | 1151 | } else |
1152 | dma_free_coherent(bp->sdev->dev, DMA_TABLE_BYTES, | 1152 | dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES, |
1153 | bp->rx_ring, bp->rx_ring_dma); | 1153 | bp->rx_ring, bp->rx_ring_dma); |
1154 | bp->rx_ring = NULL; | 1154 | bp->rx_ring = NULL; |
1155 | bp->flags &= ~B44_FLAG_RX_RING_HACK; | 1155 | bp->flags &= ~B44_FLAG_RX_RING_HACK; |
1156 | } | 1156 | } |
1157 | if (bp->tx_ring) { | 1157 | if (bp->tx_ring) { |
1158 | if (bp->flags & B44_FLAG_TX_RING_HACK) { | 1158 | if (bp->flags & B44_FLAG_TX_RING_HACK) { |
1159 | dma_unmap_single(bp->sdev->dev, bp->tx_ring_dma, | 1159 | dma_unmap_single(bp->sdev->dma_dev, bp->tx_ring_dma, |
1160 | DMA_TABLE_BYTES, | 1160 | DMA_TABLE_BYTES, |
1161 | DMA_TO_DEVICE); | 1161 | DMA_TO_DEVICE); |
1162 | kfree(bp->tx_ring); | 1162 | kfree(bp->tx_ring); |
1163 | } else | 1163 | } else |
1164 | dma_free_coherent(bp->sdev->dev, DMA_TABLE_BYTES, | 1164 | dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES, |
1165 | bp->tx_ring, bp->tx_ring_dma); | 1165 | bp->tx_ring, bp->tx_ring_dma); |
1166 | bp->tx_ring = NULL; | 1166 | bp->tx_ring = NULL; |
1167 | bp->flags &= ~B44_FLAG_TX_RING_HACK; | 1167 | bp->flags &= ~B44_FLAG_TX_RING_HACK; |
@@ -1187,7 +1187,7 @@ static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp) | |||
1187 | goto out_err; | 1187 | goto out_err; |
1188 | 1188 | ||
1189 | size = DMA_TABLE_BYTES; | 1189 | size = DMA_TABLE_BYTES; |
1190 | bp->rx_ring = dma_alloc_coherent(bp->sdev->dev, size, &bp->rx_ring_dma, gfp); | 1190 | bp->rx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size, &bp->rx_ring_dma, gfp); |
1191 | if (!bp->rx_ring) { | 1191 | if (!bp->rx_ring) { |
1192 | /* Allocation may have failed due to pci_alloc_consistent | 1192 | /* Allocation may have failed due to pci_alloc_consistent |
1193 | insisting on use of GFP_DMA, which is more restrictive | 1193 | insisting on use of GFP_DMA, which is more restrictive |
@@ -1199,7 +1199,7 @@ static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp) | |||
1199 | if (!rx_ring) | 1199 | if (!rx_ring) |
1200 | goto out_err; | 1200 | goto out_err; |
1201 | 1201 | ||
1202 | rx_ring_dma = dma_map_single(bp->sdev->dev, rx_ring, | 1202 | rx_ring_dma = dma_map_single(bp->sdev->dma_dev, rx_ring, |
1203 | DMA_TABLE_BYTES, | 1203 | DMA_TABLE_BYTES, |
1204 | DMA_BIDIRECTIONAL); | 1204 | DMA_BIDIRECTIONAL); |
1205 | 1205 | ||
@@ -1214,7 +1214,7 @@ static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp) | |||
1214 | bp->flags |= B44_FLAG_RX_RING_HACK; | 1214 | bp->flags |= B44_FLAG_RX_RING_HACK; |
1215 | } | 1215 | } |
1216 | 1216 | ||
1217 | bp->tx_ring = dma_alloc_coherent(bp->sdev->dev, size, &bp->tx_ring_dma, gfp); | 1217 | bp->tx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size, &bp->tx_ring_dma, gfp); |
1218 | if (!bp->tx_ring) { | 1218 | if (!bp->tx_ring) { |
1219 | /* Allocation may have failed due to dma_alloc_coherent | 1219 | /* Allocation may have failed due to dma_alloc_coherent |
1220 | insisting on use of GFP_DMA, which is more restrictive | 1220 | insisting on use of GFP_DMA, which is more restrictive |
@@ -1226,7 +1226,7 @@ static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp) | |||
1226 | if (!tx_ring) | 1226 | if (!tx_ring) |
1227 | goto out_err; | 1227 | goto out_err; |
1228 | 1228 | ||
1229 | tx_ring_dma = dma_map_single(bp->sdev->dev, tx_ring, | 1229 | tx_ring_dma = dma_map_single(bp->sdev->dma_dev, tx_ring, |
1230 | DMA_TABLE_BYTES, | 1230 | DMA_TABLE_BYTES, |
1231 | DMA_TO_DEVICE); | 1231 | DMA_TO_DEVICE); |
1232 | 1232 | ||
diff --git a/drivers/net/fec_mpc52xx.c b/drivers/net/fec_mpc52xx.c index 43b5f30743c2..e5e6352556fa 100644 --- a/drivers/net/fec_mpc52xx.c +++ b/drivers/net/fec_mpc52xx.c | |||
@@ -1057,6 +1057,7 @@ static int mpc52xx_fec_of_resume(struct of_device *op) | |||
1057 | #endif | 1057 | #endif |
1058 | 1058 | ||
1059 | static struct of_device_id mpc52xx_fec_match[] = { | 1059 | static struct of_device_id mpc52xx_fec_match[] = { |
1060 | { .type = "network", .compatible = "fsl,mpc5200b-fec", }, | ||
1060 | { .type = "network", .compatible = "fsl,mpc5200-fec", }, | 1061 | { .type = "network", .compatible = "fsl,mpc5200-fec", }, |
1061 | { .type = "network", .compatible = "mpc5200-fec", }, | 1062 | { .type = "network", .compatible = "mpc5200-fec", }, |
1062 | { } | 1063 | { } |
diff --git a/drivers/net/fec_mpc52xx_phy.c b/drivers/net/fec_mpc52xx_phy.c index 956836fc5ec0..f5634447276d 100644 --- a/drivers/net/fec_mpc52xx_phy.c +++ b/drivers/net/fec_mpc52xx_phy.c | |||
@@ -179,6 +179,7 @@ static int mpc52xx_fec_mdio_remove(struct of_device *of) | |||
179 | 179 | ||
180 | static struct of_device_id mpc52xx_fec_mdio_match[] = { | 180 | static struct of_device_id mpc52xx_fec_mdio_match[] = { |
181 | { .compatible = "fsl,mpc5200b-mdio", }, | 181 | { .compatible = "fsl,mpc5200b-mdio", }, |
182 | { .compatible = "fsl,mpc5200-mdio", }, | ||
182 | { .compatible = "mpc5200b-fec-phy", }, | 183 | { .compatible = "mpc5200b-fec-phy", }, |
183 | {} | 184 | {} |
184 | }; | 185 | }; |
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c index 501e451be911..665341e43055 100644 --- a/drivers/net/netconsole.c +++ b/drivers/net/netconsole.c | |||
@@ -730,7 +730,7 @@ static void write_msg(struct console *con, const char *msg, unsigned int len) | |||
730 | 730 | ||
731 | static struct console netconsole = { | 731 | static struct console netconsole = { |
732 | .name = "netcon", | 732 | .name = "netcon", |
733 | .flags = CON_ENABLED | CON_PRINTBUFFER, | 733 | .flags = CON_ENABLED, |
734 | .write = write_msg, | 734 | .write = write_msg, |
735 | }; | 735 | }; |
736 | 736 | ||
@@ -749,6 +749,9 @@ static int __init init_netconsole(void) | |||
749 | err = PTR_ERR(nt); | 749 | err = PTR_ERR(nt); |
750 | goto fail; | 750 | goto fail; |
751 | } | 751 | } |
752 | /* Dump existing printks when we register */ | ||
753 | netconsole.flags |= CON_PRINTBUFFER; | ||
754 | |||
752 | spin_lock_irqsave(&target_list_lock, flags); | 755 | spin_lock_irqsave(&target_list_lock, flags); |
753 | list_add(&nt->list, &target_list); | 756 | list_add(&nt->list, &target_list); |
754 | spin_unlock_irqrestore(&target_list_lock, flags); | 757 | spin_unlock_irqrestore(&target_list_lock, flags); |
diff --git a/drivers/net/ps3_gelic_wireless.c b/drivers/net/ps3_gelic_wireless.c index f9719cfa046c..0d32123085e9 100644 --- a/drivers/net/ps3_gelic_wireless.c +++ b/drivers/net/ps3_gelic_wireless.c | |||
@@ -512,13 +512,18 @@ static void gelic_wl_parse_ie(u8 *data, size_t len, | |||
512 | data, len); | 512 | data, len); |
513 | memset(ie_info, 0, sizeof(struct ie_info)); | 513 | memset(ie_info, 0, sizeof(struct ie_info)); |
514 | 514 | ||
515 | while (0 < data_left) { | 515 | while (2 <= data_left) { |
516 | item_id = *pos++; | 516 | item_id = *pos++; |
517 | item_len = *pos++; | 517 | item_len = *pos++; |
518 | data_left -= 2; | ||
519 | |||
520 | if (data_left < item_len) | ||
521 | break; | ||
518 | 522 | ||
519 | switch (item_id) { | 523 | switch (item_id) { |
520 | case MFIE_TYPE_GENERIC: | 524 | case MFIE_TYPE_GENERIC: |
521 | if (!memcmp(pos, wpa_oui, OUI_LEN) && | 525 | if ((OUI_LEN + 1 <= item_len) && |
526 | !memcmp(pos, wpa_oui, OUI_LEN) && | ||
522 | pos[OUI_LEN] == 0x01) { | 527 | pos[OUI_LEN] == 0x01) { |
523 | ie_info->wpa.data = pos - 2; | 528 | ie_info->wpa.data = pos - 2; |
524 | ie_info->wpa.len = item_len + 2; | 529 | ie_info->wpa.len = item_len + 2; |
@@ -535,7 +540,7 @@ static void gelic_wl_parse_ie(u8 *data, size_t len, | |||
535 | break; | 540 | break; |
536 | } | 541 | } |
537 | pos += item_len; | 542 | pos += item_len; |
538 | data_left -= item_len + 2; | 543 | data_left -= item_len; |
539 | } | 544 | } |
540 | pr_debug("%s: wpa=%p,%d wpa2=%p,%d\n", __func__, | 545 | pr_debug("%s: wpa=%p,%d wpa2=%p,%d\n", __func__, |
541 | ie_info->wpa.data, ie_info->wpa.len, | 546 | ie_info->wpa.data, ie_info->wpa.len, |
diff --git a/drivers/net/smc91x.h b/drivers/net/smc91x.h index 8606818653f8..69e97a1cb1c4 100644 --- a/drivers/net/smc91x.h +++ b/drivers/net/smc91x.h | |||
@@ -93,14 +93,14 @@ | |||
93 | #define SMC_insw(a, r, p, l) insw ((unsigned long *)((a) + (r)), p, l) | 93 | #define SMC_insw(a, r, p, l) insw ((unsigned long *)((a) + (r)), p, l) |
94 | # endif | 94 | # endif |
95 | /* check if the mac in reg is valid */ | 95 | /* check if the mac in reg is valid */ |
96 | #define SMC_GET_MAC_ADDR(lp, addr) \ | 96 | #define SMC_GET_MAC_ADDR(addr) \ |
97 | do { \ | 97 | do { \ |
98 | unsigned int __v; \ | 98 | unsigned int __v; \ |
99 | __v = SMC_inw(ioaddr, ADDR0_REG(lp)); \ | 99 | __v = SMC_inw(ioaddr, ADDR0_REG); \ |
100 | addr[0] = __v; addr[1] = __v >> 8; \ | 100 | addr[0] = __v; addr[1] = __v >> 8; \ |
101 | __v = SMC_inw(ioaddr, ADDR1_REG(lp)); \ | 101 | __v = SMC_inw(ioaddr, ADDR1_REG); \ |
102 | addr[2] = __v; addr[3] = __v >> 8; \ | 102 | addr[2] = __v; addr[3] = __v >> 8; \ |
103 | __v = SMC_inw(ioaddr, ADDR2_REG(lp)); \ | 103 | __v = SMC_inw(ioaddr, ADDR2_REG); \ |
104 | addr[4] = __v; addr[5] = __v >> 8; \ | 104 | addr[4] = __v; addr[5] = __v >> 8; \ |
105 | if (*(u32 *)(&addr[0]) == 0xFFFFFFFF) { \ | 105 | if (*(u32 *)(&addr[0]) == 0xFFFFFFFF) { \ |
106 | random_ether_addr(addr); \ | 106 | random_ether_addr(addr); \ |
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index d02d9d75fe14..555b70c8b863 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c | |||
@@ -285,7 +285,6 @@ again: | |||
285 | /* Activate callback for using skbs: if this returns false it | 285 | /* Activate callback for using skbs: if this returns false it |
286 | * means some were used in the meantime. */ | 286 | * means some were used in the meantime. */ |
287 | if (unlikely(!vi->svq->vq_ops->enable_cb(vi->svq))) { | 287 | if (unlikely(!vi->svq->vq_ops->enable_cb(vi->svq))) { |
288 | printk("Unlikely: restart svq race\n"); | ||
289 | vi->svq->vq_ops->disable_cb(vi->svq); | 288 | vi->svq->vq_ops->disable_cb(vi->svq); |
290 | netif_start_queue(dev); | 289 | netif_start_queue(dev); |
291 | goto again; | 290 | goto again; |
diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c index f1b983cb9c1f..21c886a9a1d9 100644 --- a/drivers/net/wireless/b43/dma.c +++ b/drivers/net/wireless/b43/dma.c | |||
@@ -328,10 +328,10 @@ static inline | |||
328 | dma_addr_t dmaaddr; | 328 | dma_addr_t dmaaddr; |
329 | 329 | ||
330 | if (tx) { | 330 | if (tx) { |
331 | dmaaddr = dma_map_single(ring->dev->dev->dev, | 331 | dmaaddr = dma_map_single(ring->dev->dev->dma_dev, |
332 | buf, len, DMA_TO_DEVICE); | 332 | buf, len, DMA_TO_DEVICE); |
333 | } else { | 333 | } else { |
334 | dmaaddr = dma_map_single(ring->dev->dev->dev, | 334 | dmaaddr = dma_map_single(ring->dev->dev->dma_dev, |
335 | buf, len, DMA_FROM_DEVICE); | 335 | buf, len, DMA_FROM_DEVICE); |
336 | } | 336 | } |
337 | 337 | ||
@@ -343,9 +343,10 @@ static inline | |||
343 | dma_addr_t addr, size_t len, int tx) | 343 | dma_addr_t addr, size_t len, int tx) |
344 | { | 344 | { |
345 | if (tx) { | 345 | if (tx) { |
346 | dma_unmap_single(ring->dev->dev->dev, addr, len, DMA_TO_DEVICE); | 346 | dma_unmap_single(ring->dev->dev->dma_dev, |
347 | addr, len, DMA_TO_DEVICE); | ||
347 | } else { | 348 | } else { |
348 | dma_unmap_single(ring->dev->dev->dev, | 349 | dma_unmap_single(ring->dev->dev->dma_dev, |
349 | addr, len, DMA_FROM_DEVICE); | 350 | addr, len, DMA_FROM_DEVICE); |
350 | } | 351 | } |
351 | } | 352 | } |
@@ -355,7 +356,7 @@ static inline | |||
355 | dma_addr_t addr, size_t len) | 356 | dma_addr_t addr, size_t len) |
356 | { | 357 | { |
357 | B43_WARN_ON(ring->tx); | 358 | B43_WARN_ON(ring->tx); |
358 | dma_sync_single_for_cpu(ring->dev->dev->dev, | 359 | dma_sync_single_for_cpu(ring->dev->dev->dma_dev, |
359 | addr, len, DMA_FROM_DEVICE); | 360 | addr, len, DMA_FROM_DEVICE); |
360 | } | 361 | } |
361 | 362 | ||
@@ -364,7 +365,7 @@ static inline | |||
364 | dma_addr_t addr, size_t len) | 365 | dma_addr_t addr, size_t len) |
365 | { | 366 | { |
366 | B43_WARN_ON(ring->tx); | 367 | B43_WARN_ON(ring->tx); |
367 | dma_sync_single_for_device(ring->dev->dev->dev, | 368 | dma_sync_single_for_device(ring->dev->dev->dma_dev, |
368 | addr, len, DMA_FROM_DEVICE); | 369 | addr, len, DMA_FROM_DEVICE); |
369 | } | 370 | } |
370 | 371 | ||
@@ -380,7 +381,7 @@ static inline | |||
380 | 381 | ||
381 | static int alloc_ringmemory(struct b43_dmaring *ring) | 382 | static int alloc_ringmemory(struct b43_dmaring *ring) |
382 | { | 383 | { |
383 | struct device *dev = ring->dev->dev->dev; | 384 | struct device *dma_dev = ring->dev->dev->dma_dev; |
384 | gfp_t flags = GFP_KERNEL; | 385 | gfp_t flags = GFP_KERNEL; |
385 | 386 | ||
386 | /* The specs call for 4K buffers for 30- and 32-bit DMA with 4K | 387 | /* The specs call for 4K buffers for 30- and 32-bit DMA with 4K |
@@ -394,7 +395,7 @@ static int alloc_ringmemory(struct b43_dmaring *ring) | |||
394 | */ | 395 | */ |
395 | if (ring->type == B43_DMA_64BIT) | 396 | if (ring->type == B43_DMA_64BIT) |
396 | flags |= GFP_DMA; | 397 | flags |= GFP_DMA; |
397 | ring->descbase = dma_alloc_coherent(dev, B43_DMA_RINGMEMSIZE, | 398 | ring->descbase = dma_alloc_coherent(dma_dev, B43_DMA_RINGMEMSIZE, |
398 | &(ring->dmabase), flags); | 399 | &(ring->dmabase), flags); |
399 | if (!ring->descbase) { | 400 | if (!ring->descbase) { |
400 | b43err(ring->dev->wl, "DMA ringmemory allocation failed\n"); | 401 | b43err(ring->dev->wl, "DMA ringmemory allocation failed\n"); |
@@ -407,9 +408,9 @@ static int alloc_ringmemory(struct b43_dmaring *ring) | |||
407 | 408 | ||
408 | static void free_ringmemory(struct b43_dmaring *ring) | 409 | static void free_ringmemory(struct b43_dmaring *ring) |
409 | { | 410 | { |
410 | struct device *dev = ring->dev->dev->dev; | 411 | struct device *dma_dev = ring->dev->dev->dma_dev; |
411 | 412 | ||
412 | dma_free_coherent(dev, B43_DMA_RINGMEMSIZE, | 413 | dma_free_coherent(dma_dev, B43_DMA_RINGMEMSIZE, |
413 | ring->descbase, ring->dmabase); | 414 | ring->descbase, ring->dmabase); |
414 | } | 415 | } |
415 | 416 | ||
@@ -818,7 +819,7 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev, | |||
818 | goto err_kfree_meta; | 819 | goto err_kfree_meta; |
819 | 820 | ||
820 | /* test for ability to dma to txhdr_cache */ | 821 | /* test for ability to dma to txhdr_cache */ |
821 | dma_test = dma_map_single(dev->dev->dev, | 822 | dma_test = dma_map_single(dev->dev->dma_dev, |
822 | ring->txhdr_cache, | 823 | ring->txhdr_cache, |
823 | b43_txhdr_size(dev), | 824 | b43_txhdr_size(dev), |
824 | DMA_TO_DEVICE); | 825 | DMA_TO_DEVICE); |
@@ -833,7 +834,7 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev, | |||
833 | if (!ring->txhdr_cache) | 834 | if (!ring->txhdr_cache) |
834 | goto err_kfree_meta; | 835 | goto err_kfree_meta; |
835 | 836 | ||
836 | dma_test = dma_map_single(dev->dev->dev, | 837 | dma_test = dma_map_single(dev->dev->dma_dev, |
837 | ring->txhdr_cache, | 838 | ring->txhdr_cache, |
838 | b43_txhdr_size(dev), | 839 | b43_txhdr_size(dev), |
839 | DMA_TO_DEVICE); | 840 | DMA_TO_DEVICE); |
@@ -847,7 +848,7 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev, | |||
847 | } | 848 | } |
848 | } | 849 | } |
849 | 850 | ||
850 | dma_unmap_single(dev->dev->dev, | 851 | dma_unmap_single(dev->dev->dma_dev, |
851 | dma_test, b43_txhdr_size(dev), | 852 | dma_test, b43_txhdr_size(dev), |
852 | DMA_TO_DEVICE); | 853 | DMA_TO_DEVICE); |
853 | } | 854 | } |
diff --git a/drivers/net/wireless/b43legacy/dma.c b/drivers/net/wireless/b43legacy/dma.c index e87b427d5e43..c990f87b107a 100644 --- a/drivers/net/wireless/b43legacy/dma.c +++ b/drivers/net/wireless/b43legacy/dma.c | |||
@@ -393,11 +393,11 @@ dma_addr_t map_descbuffer(struct b43legacy_dmaring *ring, | |||
393 | dma_addr_t dmaaddr; | 393 | dma_addr_t dmaaddr; |
394 | 394 | ||
395 | if (tx) | 395 | if (tx) |
396 | dmaaddr = dma_map_single(ring->dev->dev->dev, | 396 | dmaaddr = dma_map_single(ring->dev->dev->dma_dev, |
397 | buf, len, | 397 | buf, len, |
398 | DMA_TO_DEVICE); | 398 | DMA_TO_DEVICE); |
399 | else | 399 | else |
400 | dmaaddr = dma_map_single(ring->dev->dev->dev, | 400 | dmaaddr = dma_map_single(ring->dev->dev->dma_dev, |
401 | buf, len, | 401 | buf, len, |
402 | DMA_FROM_DEVICE); | 402 | DMA_FROM_DEVICE); |
403 | 403 | ||
@@ -411,11 +411,11 @@ void unmap_descbuffer(struct b43legacy_dmaring *ring, | |||
411 | int tx) | 411 | int tx) |
412 | { | 412 | { |
413 | if (tx) | 413 | if (tx) |
414 | dma_unmap_single(ring->dev->dev->dev, | 414 | dma_unmap_single(ring->dev->dev->dma_dev, |
415 | addr, len, | 415 | addr, len, |
416 | DMA_TO_DEVICE); | 416 | DMA_TO_DEVICE); |
417 | else | 417 | else |
418 | dma_unmap_single(ring->dev->dev->dev, | 418 | dma_unmap_single(ring->dev->dev->dma_dev, |
419 | addr, len, | 419 | addr, len, |
420 | DMA_FROM_DEVICE); | 420 | DMA_FROM_DEVICE); |
421 | } | 421 | } |
@@ -427,7 +427,7 @@ void sync_descbuffer_for_cpu(struct b43legacy_dmaring *ring, | |||
427 | { | 427 | { |
428 | B43legacy_WARN_ON(ring->tx); | 428 | B43legacy_WARN_ON(ring->tx); |
429 | 429 | ||
430 | dma_sync_single_for_cpu(ring->dev->dev->dev, | 430 | dma_sync_single_for_cpu(ring->dev->dev->dma_dev, |
431 | addr, len, DMA_FROM_DEVICE); | 431 | addr, len, DMA_FROM_DEVICE); |
432 | } | 432 | } |
433 | 433 | ||
@@ -438,7 +438,7 @@ void sync_descbuffer_for_device(struct b43legacy_dmaring *ring, | |||
438 | { | 438 | { |
439 | B43legacy_WARN_ON(ring->tx); | 439 | B43legacy_WARN_ON(ring->tx); |
440 | 440 | ||
441 | dma_sync_single_for_device(ring->dev->dev->dev, | 441 | dma_sync_single_for_device(ring->dev->dev->dma_dev, |
442 | addr, len, DMA_FROM_DEVICE); | 442 | addr, len, DMA_FROM_DEVICE); |
443 | } | 443 | } |
444 | 444 | ||
@@ -458,9 +458,9 @@ void free_descriptor_buffer(struct b43legacy_dmaring *ring, | |||
458 | 458 | ||
459 | static int alloc_ringmemory(struct b43legacy_dmaring *ring) | 459 | static int alloc_ringmemory(struct b43legacy_dmaring *ring) |
460 | { | 460 | { |
461 | struct device *dev = ring->dev->dev->dev; | 461 | struct device *dma_dev = ring->dev->dev->dma_dev; |
462 | 462 | ||
463 | ring->descbase = dma_alloc_coherent(dev, B43legacy_DMA_RINGMEMSIZE, | 463 | ring->descbase = dma_alloc_coherent(dma_dev, B43legacy_DMA_RINGMEMSIZE, |
464 | &(ring->dmabase), GFP_KERNEL); | 464 | &(ring->dmabase), GFP_KERNEL); |
465 | if (!ring->descbase) { | 465 | if (!ring->descbase) { |
466 | b43legacyerr(ring->dev->wl, "DMA ringmemory allocation" | 466 | b43legacyerr(ring->dev->wl, "DMA ringmemory allocation" |
@@ -474,9 +474,9 @@ static int alloc_ringmemory(struct b43legacy_dmaring *ring) | |||
474 | 474 | ||
475 | static void free_ringmemory(struct b43legacy_dmaring *ring) | 475 | static void free_ringmemory(struct b43legacy_dmaring *ring) |
476 | { | 476 | { |
477 | struct device *dev = ring->dev->dev->dev; | 477 | struct device *dma_dev = ring->dev->dev->dma_dev; |
478 | 478 | ||
479 | dma_free_coherent(dev, B43legacy_DMA_RINGMEMSIZE, | 479 | dma_free_coherent(dma_dev, B43legacy_DMA_RINGMEMSIZE, |
480 | ring->descbase, ring->dmabase); | 480 | ring->descbase, ring->dmabase); |
481 | } | 481 | } |
482 | 482 | ||
@@ -585,8 +585,9 @@ static int b43legacy_dmacontroller_tx_reset(struct b43legacy_wldev *dev, | |||
585 | 585 | ||
586 | /* Check if a DMA mapping address is invalid. */ | 586 | /* Check if a DMA mapping address is invalid. */ |
587 | static bool b43legacy_dma_mapping_error(struct b43legacy_dmaring *ring, | 587 | static bool b43legacy_dma_mapping_error(struct b43legacy_dmaring *ring, |
588 | dma_addr_t addr, | 588 | dma_addr_t addr, |
589 | size_t buffersize) | 589 | size_t buffersize, |
590 | bool dma_to_device) | ||
590 | { | 591 | { |
591 | if (unlikely(dma_mapping_error(addr))) | 592 | if (unlikely(dma_mapping_error(addr))) |
592 | return 1; | 593 | return 1; |
@@ -594,11 +595,11 @@ static bool b43legacy_dma_mapping_error(struct b43legacy_dmaring *ring, | |||
594 | switch (ring->type) { | 595 | switch (ring->type) { |
595 | case B43legacy_DMA_30BIT: | 596 | case B43legacy_DMA_30BIT: |
596 | if ((u64)addr + buffersize > (1ULL << 30)) | 597 | if ((u64)addr + buffersize > (1ULL << 30)) |
597 | return 1; | 598 | goto address_error; |
598 | break; | 599 | break; |
599 | case B43legacy_DMA_32BIT: | 600 | case B43legacy_DMA_32BIT: |
600 | if ((u64)addr + buffersize > (1ULL << 32)) | 601 | if ((u64)addr + buffersize > (1ULL << 32)) |
601 | return 1; | 602 | goto address_error; |
602 | break; | 603 | break; |
603 | case B43legacy_DMA_64BIT: | 604 | case B43legacy_DMA_64BIT: |
604 | /* Currently we can't have addresses beyond 64 bits in the kernel. */ | 605 | /* Currently we can't have addresses beyond 64 bits in the kernel. */ |
@@ -607,6 +608,12 @@ static bool b43legacy_dma_mapping_error(struct b43legacy_dmaring *ring, | |||
607 | 608 | ||
608 | /* The address is OK. */ | 609 | /* The address is OK. */ |
609 | return 0; | 610 | return 0; |
611 | |||
612 | address_error: | ||
613 | /* We can't support this address. Unmap it again. */ | ||
614 | unmap_descbuffer(ring, addr, buffersize, dma_to_device); | ||
615 | |||
616 | return 1; | ||
610 | } | 617 | } |
611 | 618 | ||
612 | static int setup_rx_descbuffer(struct b43legacy_dmaring *ring, | 619 | static int setup_rx_descbuffer(struct b43legacy_dmaring *ring, |
@@ -626,7 +633,7 @@ static int setup_rx_descbuffer(struct b43legacy_dmaring *ring, | |||
626 | return -ENOMEM; | 633 | return -ENOMEM; |
627 | dmaaddr = map_descbuffer(ring, skb->data, | 634 | dmaaddr = map_descbuffer(ring, skb->data, |
628 | ring->rx_buffersize, 0); | 635 | ring->rx_buffersize, 0); |
629 | if (b43legacy_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize)) { | 636 | if (b43legacy_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) { |
630 | /* ugh. try to realloc in zone_dma */ | 637 | /* ugh. try to realloc in zone_dma */ |
631 | gfp_flags |= GFP_DMA; | 638 | gfp_flags |= GFP_DMA; |
632 | 639 | ||
@@ -639,7 +646,7 @@ static int setup_rx_descbuffer(struct b43legacy_dmaring *ring, | |||
639 | ring->rx_buffersize, 0); | 646 | ring->rx_buffersize, 0); |
640 | } | 647 | } |
641 | 648 | ||
642 | if (b43legacy_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize)) { | 649 | if (b43legacy_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) { |
643 | dev_kfree_skb_any(skb); | 650 | dev_kfree_skb_any(skb); |
644 | return -EIO; | 651 | return -EIO; |
645 | } | 652 | } |
@@ -886,12 +893,12 @@ struct b43legacy_dmaring *b43legacy_setup_dmaring(struct b43legacy_wldev *dev, | |||
886 | goto err_kfree_meta; | 893 | goto err_kfree_meta; |
887 | 894 | ||
888 | /* test for ability to dma to txhdr_cache */ | 895 | /* test for ability to dma to txhdr_cache */ |
889 | dma_test = dma_map_single(dev->dev->dev, ring->txhdr_cache, | 896 | dma_test = dma_map_single(dev->dev->dma_dev, ring->txhdr_cache, |
890 | sizeof(struct b43legacy_txhdr_fw3), | 897 | sizeof(struct b43legacy_txhdr_fw3), |
891 | DMA_TO_DEVICE); | 898 | DMA_TO_DEVICE); |
892 | 899 | ||
893 | if (b43legacy_dma_mapping_error(ring, dma_test, | 900 | if (b43legacy_dma_mapping_error(ring, dma_test, |
894 | sizeof(struct b43legacy_txhdr_fw3))) { | 901 | sizeof(struct b43legacy_txhdr_fw3), 1)) { |
895 | /* ugh realloc */ | 902 | /* ugh realloc */ |
896 | kfree(ring->txhdr_cache); | 903 | kfree(ring->txhdr_cache); |
897 | ring->txhdr_cache = kcalloc(nr_slots, | 904 | ring->txhdr_cache = kcalloc(nr_slots, |
@@ -900,17 +907,17 @@ struct b43legacy_dmaring *b43legacy_setup_dmaring(struct b43legacy_wldev *dev, | |||
900 | if (!ring->txhdr_cache) | 907 | if (!ring->txhdr_cache) |
901 | goto err_kfree_meta; | 908 | goto err_kfree_meta; |
902 | 909 | ||
903 | dma_test = dma_map_single(dev->dev->dev, | 910 | dma_test = dma_map_single(dev->dev->dma_dev, |
904 | ring->txhdr_cache, | 911 | ring->txhdr_cache, |
905 | sizeof(struct b43legacy_txhdr_fw3), | 912 | sizeof(struct b43legacy_txhdr_fw3), |
906 | DMA_TO_DEVICE); | 913 | DMA_TO_DEVICE); |
907 | 914 | ||
908 | if (b43legacy_dma_mapping_error(ring, dma_test, | 915 | if (b43legacy_dma_mapping_error(ring, dma_test, |
909 | sizeof(struct b43legacy_txhdr_fw3))) | 916 | sizeof(struct b43legacy_txhdr_fw3), 1)) |
910 | goto err_kfree_txhdr_cache; | 917 | goto err_kfree_txhdr_cache; |
911 | } | 918 | } |
912 | 919 | ||
913 | dma_unmap_single(dev->dev->dev, | 920 | dma_unmap_single(dev->dev->dma_dev, |
914 | dma_test, sizeof(struct b43legacy_txhdr_fw3), | 921 | dma_test, sizeof(struct b43legacy_txhdr_fw3), |
915 | DMA_TO_DEVICE); | 922 | DMA_TO_DEVICE); |
916 | } | 923 | } |
@@ -1235,7 +1242,7 @@ static int dma_tx_fragment(struct b43legacy_dmaring *ring, | |||
1235 | meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header, | 1242 | meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header, |
1236 | sizeof(struct b43legacy_txhdr_fw3), 1); | 1243 | sizeof(struct b43legacy_txhdr_fw3), 1); |
1237 | if (b43legacy_dma_mapping_error(ring, meta_hdr->dmaaddr, | 1244 | if (b43legacy_dma_mapping_error(ring, meta_hdr->dmaaddr, |
1238 | sizeof(struct b43legacy_txhdr_fw3))) { | 1245 | sizeof(struct b43legacy_txhdr_fw3), 1)) { |
1239 | ring->current_slot = old_top_slot; | 1246 | ring->current_slot = old_top_slot; |
1240 | ring->used_slots = old_used_slots; | 1247 | ring->used_slots = old_used_slots; |
1241 | return -EIO; | 1248 | return -EIO; |
@@ -1254,7 +1261,7 @@ static int dma_tx_fragment(struct b43legacy_dmaring *ring, | |||
1254 | 1261 | ||
1255 | meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1); | 1262 | meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1); |
1256 | /* create a bounce buffer in zone_dma on mapping failure. */ | 1263 | /* create a bounce buffer in zone_dma on mapping failure. */ |
1257 | if (b43legacy_dma_mapping_error(ring, meta->dmaaddr, skb->len)) { | 1264 | if (b43legacy_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) { |
1258 | bounce_skb = __dev_alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA); | 1265 | bounce_skb = __dev_alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA); |
1259 | if (!bounce_skb) { | 1266 | if (!bounce_skb) { |
1260 | ring->current_slot = old_top_slot; | 1267 | ring->current_slot = old_top_slot; |
@@ -1268,7 +1275,7 @@ static int dma_tx_fragment(struct b43legacy_dmaring *ring, | |||
1268 | skb = bounce_skb; | 1275 | skb = bounce_skb; |
1269 | meta->skb = skb; | 1276 | meta->skb = skb; |
1270 | meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1); | 1277 | meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1); |
1271 | if (b43legacy_dma_mapping_error(ring, meta->dmaaddr, skb->len)) { | 1278 | if (b43legacy_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) { |
1272 | ring->current_slot = old_top_slot; | 1279 | ring->current_slot = old_top_slot; |
1273 | ring->used_slots = old_used_slots; | 1280 | ring->used_slots = old_used_slots; |
1274 | err = -EIO; | 1281 | err = -EIO; |
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c index 2422a5dab1d2..ef829ee8ffd4 100644 --- a/drivers/net/wireless/b43legacy/main.c +++ b/drivers/net/wireless/b43legacy/main.c | |||
@@ -1516,6 +1516,7 @@ static int b43legacy_request_firmware(struct b43legacy_wldev *dev) | |||
1516 | } | 1516 | } |
1517 | if (!fw->initvals) { | 1517 | if (!fw->initvals) { |
1518 | switch (dev->phy.type) { | 1518 | switch (dev->phy.type) { |
1519 | case B43legacy_PHYTYPE_B: | ||
1519 | case B43legacy_PHYTYPE_G: | 1520 | case B43legacy_PHYTYPE_G: |
1520 | if ((rev >= 5) && (rev <= 10)) | 1521 | if ((rev >= 5) && (rev <= 10)) |
1521 | filename = "b0g0initvals5"; | 1522 | filename = "b0g0initvals5"; |
@@ -1533,6 +1534,7 @@ static int b43legacy_request_firmware(struct b43legacy_wldev *dev) | |||
1533 | } | 1534 | } |
1534 | if (!fw->initvals_band) { | 1535 | if (!fw->initvals_band) { |
1535 | switch (dev->phy.type) { | 1536 | switch (dev->phy.type) { |
1537 | case B43legacy_PHYTYPE_B: | ||
1536 | case B43legacy_PHYTYPE_G: | 1538 | case B43legacy_PHYTYPE_G: |
1537 | if ((rev >= 5) && (rev <= 10)) | 1539 | if ((rev >= 5) && (rev <= 10)) |
1538 | filename = "b0g0bsinitvals5"; | 1540 | filename = "b0g0bsinitvals5"; |
diff --git a/drivers/net/wireless/rtl8187_dev.c b/drivers/net/wireless/rtl8187_dev.c index c03834d5cb0b..d5787b37e1fb 100644 --- a/drivers/net/wireless/rtl8187_dev.c +++ b/drivers/net/wireless/rtl8187_dev.c | |||
@@ -546,6 +546,8 @@ static int rtl8187_add_interface(struct ieee80211_hw *dev, | |||
546 | return -EOPNOTSUPP; | 546 | return -EOPNOTSUPP; |
547 | } | 547 | } |
548 | 548 | ||
549 | priv->vif = conf->vif; | ||
550 | |||
549 | rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG); | 551 | rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG); |
550 | for (i = 0; i < ETH_ALEN; i++) | 552 | for (i = 0; i < ETH_ALEN; i++) |
551 | rtl818x_iowrite8(priv, &priv->map->MAC[i], | 553 | rtl818x_iowrite8(priv, &priv->map->MAC[i], |
@@ -560,6 +562,7 @@ static void rtl8187_remove_interface(struct ieee80211_hw *dev, | |||
560 | { | 562 | { |
561 | struct rtl8187_priv *priv = dev->priv; | 563 | struct rtl8187_priv *priv = dev->priv; |
562 | priv->mode = IEEE80211_IF_TYPE_MNTR; | 564 | priv->mode = IEEE80211_IF_TYPE_MNTR; |
565 | priv->vif = NULL; | ||
563 | } | 566 | } |
564 | 567 | ||
565 | static int rtl8187_config(struct ieee80211_hw *dev, struct ieee80211_conf *conf) | 568 | static int rtl8187_config(struct ieee80211_hw *dev, struct ieee80211_conf *conf) |