diff options
-rw-r--r-- | Documentation/networking/can.txt | 8 | ||||
-rw-r--r-- | MAINTAINERS | 14 | ||||
-rw-r--r-- | block/Kconfig | 2 | ||||
-rw-r--r-- | drivers/net/b44.c | 52 | ||||
-rw-r--r-- | drivers/net/netconsole.c | 5 | ||||
-rw-r--r-- | drivers/net/ps3_gelic_wireless.c | 11 | ||||
-rw-r--r-- | drivers/net/wireless/b43/dma.c | 27 | ||||
-rw-r--r-- | drivers/net/wireless/b43legacy/dma.c | 55 | ||||
-rw-r--r-- | drivers/net/wireless/b43legacy/main.c | 2 | ||||
-rw-r--r-- | drivers/net/wireless/rtl8187_dev.c | 3 | ||||
-rw-r--r-- | drivers/ssb/main.c | 14 | ||||
-rw-r--r-- | drivers/usb/serial/option.c | 56 | ||||
-rw-r--r-- | drivers/usb/serial/usb-serial.c | 2 | ||||
-rw-r--r-- | drivers/usb/storage/unusual_devs.h | 96 | ||||
-rw-r--r-- | fs/afs/main.c | 2 | ||||
-rw-r--r-- | include/linux/iocontext.h | 4 | ||||
-rw-r--r-- | include/linux/ssb/ssb.h | 4 | ||||
-rw-r--r-- | net/ipv4/tcp_input.c | 78 | ||||
-rw-r--r-- | net/mac80211/rx.c | 7 | ||||
-rw-r--r-- | net/rfkill/rfkill.c | 2 | ||||
-rw-r--r-- | net/rxrpc/af_rxrpc.c | 2 | ||||
-rw-r--r-- | net/rxrpc/rxkad.c | 2 | ||||
-rw-r--r-- | net/sched/sch_api.c | 3 |
23 files changed, 328 insertions, 123 deletions
diff --git a/Documentation/networking/can.txt b/Documentation/networking/can.txt index f1b2de170929..641d2afacffa 100644 --- a/Documentation/networking/can.txt +++ b/Documentation/networking/can.txt | |||
@@ -281,10 +281,10 @@ solution for a couple of reasons: | |||
281 | sa_family_t can_family; | 281 | sa_family_t can_family; |
282 | int can_ifindex; | 282 | int can_ifindex; |
283 | union { | 283 | union { |
284 | struct { canid_t rx_id, tx_id; } tp16; | 284 | /* transport protocol class address info (e.g. ISOTP) */ |
285 | struct { canid_t rx_id, tx_id; } tp20; | 285 | struct { canid_t rx_id, tx_id; } tp; |
286 | struct { canid_t rx_id, tx_id; } mcnet; | 286 | |
287 | struct { canid_t rx_id, tx_id; } isotp; | 287 | /* reserved for future CAN protocols address information */ |
288 | } can_addr; | 288 | } can_addr; |
289 | }; | 289 | }; |
290 | 290 | ||
diff --git a/MAINTAINERS b/MAINTAINERS index 90dcbbcad91c..e46775868019 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -2116,7 +2116,7 @@ M: reinette.chatre@intel.com | |||
2116 | L: linux-wireless@vger.kernel.org | 2116 | L: linux-wireless@vger.kernel.org |
2117 | L: ipw3945-devel@lists.sourceforge.net | 2117 | L: ipw3945-devel@lists.sourceforge.net |
2118 | W: http://intellinuxwireless.org | 2118 | W: http://intellinuxwireless.org |
2119 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/rchatre/iwlwifi-2.6.git | 2119 | T: git kernel.org:/pub/scm/linux/kernel/git/iwlwifi/iwlwifi-2.6.git |
2120 | S: Supported | 2120 | S: Supported |
2121 | 2121 | ||
2122 | IOC3 ETHERNET DRIVER | 2122 | IOC3 ETHERNET DRIVER |
@@ -2197,7 +2197,7 @@ S: Maintained | |||
2197 | ISDN SUBSYSTEM | 2197 | ISDN SUBSYSTEM |
2198 | P: Karsten Keil | 2198 | P: Karsten Keil |
2199 | M: kkeil@suse.de | 2199 | M: kkeil@suse.de |
2200 | L: isdn4linux@listserv.isdn4linux.de | 2200 | L: isdn4linux@listserv.isdn4linux.de (subscribers-only) |
2201 | W: http://www.isdn4linux.de | 2201 | W: http://www.isdn4linux.de |
2202 | T: git kernel.org:/pub/scm/linux/kernel/kkeil/isdn-2.6.git | 2202 | T: git kernel.org:/pub/scm/linux/kernel/kkeil/isdn-2.6.git |
2203 | S: Maintained | 2203 | S: Maintained |
@@ -2205,7 +2205,7 @@ S: Maintained | |||
2205 | ISDN SUBSYSTEM (Eicon active card driver) | 2205 | ISDN SUBSYSTEM (Eicon active card driver) |
2206 | P: Armin Schindler | 2206 | P: Armin Schindler |
2207 | M: mac@melware.de | 2207 | M: mac@melware.de |
2208 | L: isdn4linux@listserv.isdn4linux.de | 2208 | L: isdn4linux@listserv.isdn4linux.de (subscribers-only) |
2209 | W: http://www.melware.de | 2209 | W: http://www.melware.de |
2210 | S: Maintained | 2210 | S: Maintained |
2211 | 2211 | ||
@@ -3280,6 +3280,7 @@ L: linux-wireless@vger.kernel.org | |||
3280 | L: rt2400-devel@lists.sourceforge.net | 3280 | L: rt2400-devel@lists.sourceforge.net |
3281 | W: http://rt2x00.serialmonkey.com/ | 3281 | W: http://rt2x00.serialmonkey.com/ |
3282 | S: Maintained | 3282 | S: Maintained |
3283 | T: git kernel.org:/pub/scm/linux/kernel/git/ivd/rt2x00.git | ||
3283 | F: drivers/net/wireless/rt2x00/ | 3284 | F: drivers/net/wireless/rt2x00/ |
3284 | 3285 | ||
3285 | RAMDISK RAM BLOCK DEVICE DRIVER | 3286 | RAMDISK RAM BLOCK DEVICE DRIVER |
@@ -3342,6 +3343,13 @@ L: reiserfs-devel@vger.kernel.org | |||
3342 | W: http://www.namesys.com | 3343 | W: http://www.namesys.com |
3343 | S: Supported | 3344 | S: Supported |
3344 | 3345 | ||
3346 | RFKILL | ||
3347 | P: Ivo van Doorn | ||
3348 | M: IvDoorn@gmail.com | ||
3349 | L: netdev@vger.kernel.org | ||
3350 | S: Maintained | ||
3351 | F: net/rfkill | ||
3352 | |||
3345 | ROCKETPORT DRIVER | 3353 | ROCKETPORT DRIVER |
3346 | P: Comtrol Corp. | 3354 | P: Comtrol Corp. |
3347 | W: http://www.comtrol.com | 3355 | W: http://www.comtrol.com |
diff --git a/block/Kconfig b/block/Kconfig index 9bda7bc80307..7db9a411649d 100644 --- a/block/Kconfig +++ b/block/Kconfig | |||
@@ -38,7 +38,7 @@ config BLK_DEV_IO_TRACE | |||
38 | on a block device queue. For more information (and the user space | 38 | on a block device queue. For more information (and the user space |
39 | support tools needed), fetch the blktrace app from: | 39 | support tools needed), fetch the blktrace app from: |
40 | 40 | ||
41 | git://brick.kernel.dk/data/git/blktrace.git | 41 | git://git.kernel.dk/blktrace.git |
42 | 42 | ||
43 | config LSF | 43 | config LSF |
44 | bool "Support for Large Single Files" | 44 | bool "Support for Large Single Files" |
diff --git a/drivers/net/b44.c b/drivers/net/b44.c index 25f1337cd02c..59dce6aa0865 100644 --- a/drivers/net/b44.c +++ b/drivers/net/b44.c | |||
@@ -148,7 +148,7 @@ static inline void b44_sync_dma_desc_for_device(struct ssb_device *sdev, | |||
148 | unsigned long offset, | 148 | unsigned long offset, |
149 | enum dma_data_direction dir) | 149 | enum dma_data_direction dir) |
150 | { | 150 | { |
151 | dma_sync_single_range_for_device(sdev->dev, dma_base, | 151 | dma_sync_single_range_for_device(sdev->dma_dev, dma_base, |
152 | offset & dma_desc_align_mask, | 152 | offset & dma_desc_align_mask, |
153 | dma_desc_sync_size, dir); | 153 | dma_desc_sync_size, dir); |
154 | } | 154 | } |
@@ -158,7 +158,7 @@ static inline void b44_sync_dma_desc_for_cpu(struct ssb_device *sdev, | |||
158 | unsigned long offset, | 158 | unsigned long offset, |
159 | enum dma_data_direction dir) | 159 | enum dma_data_direction dir) |
160 | { | 160 | { |
161 | dma_sync_single_range_for_cpu(sdev->dev, dma_base, | 161 | dma_sync_single_range_for_cpu(sdev->dma_dev, dma_base, |
162 | offset & dma_desc_align_mask, | 162 | offset & dma_desc_align_mask, |
163 | dma_desc_sync_size, dir); | 163 | dma_desc_sync_size, dir); |
164 | } | 164 | } |
@@ -613,7 +613,7 @@ static void b44_tx(struct b44 *bp) | |||
613 | 613 | ||
614 | BUG_ON(skb == NULL); | 614 | BUG_ON(skb == NULL); |
615 | 615 | ||
616 | dma_unmap_single(bp->sdev->dev, | 616 | dma_unmap_single(bp->sdev->dma_dev, |
617 | rp->mapping, | 617 | rp->mapping, |
618 | skb->len, | 618 | skb->len, |
619 | DMA_TO_DEVICE); | 619 | DMA_TO_DEVICE); |
@@ -653,7 +653,7 @@ static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked) | |||
653 | if (skb == NULL) | 653 | if (skb == NULL) |
654 | return -ENOMEM; | 654 | return -ENOMEM; |
655 | 655 | ||
656 | mapping = dma_map_single(bp->sdev->dev, skb->data, | 656 | mapping = dma_map_single(bp->sdev->dma_dev, skb->data, |
657 | RX_PKT_BUF_SZ, | 657 | RX_PKT_BUF_SZ, |
658 | DMA_FROM_DEVICE); | 658 | DMA_FROM_DEVICE); |
659 | 659 | ||
@@ -663,19 +663,19 @@ static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked) | |||
663 | mapping + RX_PKT_BUF_SZ > DMA_30BIT_MASK) { | 663 | mapping + RX_PKT_BUF_SZ > DMA_30BIT_MASK) { |
664 | /* Sigh... */ | 664 | /* Sigh... */ |
665 | if (!dma_mapping_error(mapping)) | 665 | if (!dma_mapping_error(mapping)) |
666 | dma_unmap_single(bp->sdev->dev, mapping, | 666 | dma_unmap_single(bp->sdev->dma_dev, mapping, |
667 | RX_PKT_BUF_SZ, DMA_FROM_DEVICE); | 667 | RX_PKT_BUF_SZ, DMA_FROM_DEVICE); |
668 | dev_kfree_skb_any(skb); | 668 | dev_kfree_skb_any(skb); |
669 | skb = __netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ, GFP_ATOMIC|GFP_DMA); | 669 | skb = __netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ, GFP_ATOMIC|GFP_DMA); |
670 | if (skb == NULL) | 670 | if (skb == NULL) |
671 | return -ENOMEM; | 671 | return -ENOMEM; |
672 | mapping = dma_map_single(bp->sdev->dev, skb->data, | 672 | mapping = dma_map_single(bp->sdev->dma_dev, skb->data, |
673 | RX_PKT_BUF_SZ, | 673 | RX_PKT_BUF_SZ, |
674 | DMA_FROM_DEVICE); | 674 | DMA_FROM_DEVICE); |
675 | if (dma_mapping_error(mapping) || | 675 | if (dma_mapping_error(mapping) || |
676 | mapping + RX_PKT_BUF_SZ > DMA_30BIT_MASK) { | 676 | mapping + RX_PKT_BUF_SZ > DMA_30BIT_MASK) { |
677 | if (!dma_mapping_error(mapping)) | 677 | if (!dma_mapping_error(mapping)) |
678 | dma_unmap_single(bp->sdev->dev, mapping, RX_PKT_BUF_SZ,DMA_FROM_DEVICE); | 678 | dma_unmap_single(bp->sdev->dma_dev, mapping, RX_PKT_BUF_SZ,DMA_FROM_DEVICE); |
679 | dev_kfree_skb_any(skb); | 679 | dev_kfree_skb_any(skb); |
680 | return -ENOMEM; | 680 | return -ENOMEM; |
681 | } | 681 | } |
@@ -750,7 +750,7 @@ static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked) | |||
750 | dest_idx * sizeof(dest_desc), | 750 | dest_idx * sizeof(dest_desc), |
751 | DMA_BIDIRECTIONAL); | 751 | DMA_BIDIRECTIONAL); |
752 | 752 | ||
753 | dma_sync_single_for_device(bp->sdev->dev, le32_to_cpu(src_desc->addr), | 753 | dma_sync_single_for_device(bp->sdev->dma_dev, le32_to_cpu(src_desc->addr), |
754 | RX_PKT_BUF_SZ, | 754 | RX_PKT_BUF_SZ, |
755 | DMA_FROM_DEVICE); | 755 | DMA_FROM_DEVICE); |
756 | } | 756 | } |
@@ -772,7 +772,7 @@ static int b44_rx(struct b44 *bp, int budget) | |||
772 | struct rx_header *rh; | 772 | struct rx_header *rh; |
773 | u16 len; | 773 | u16 len; |
774 | 774 | ||
775 | dma_sync_single_for_cpu(bp->sdev->dev, map, | 775 | dma_sync_single_for_cpu(bp->sdev->dma_dev, map, |
776 | RX_PKT_BUF_SZ, | 776 | RX_PKT_BUF_SZ, |
777 | DMA_FROM_DEVICE); | 777 | DMA_FROM_DEVICE); |
778 | rh = (struct rx_header *) skb->data; | 778 | rh = (struct rx_header *) skb->data; |
@@ -806,7 +806,7 @@ static int b44_rx(struct b44 *bp, int budget) | |||
806 | skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod); | 806 | skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod); |
807 | if (skb_size < 0) | 807 | if (skb_size < 0) |
808 | goto drop_it; | 808 | goto drop_it; |
809 | dma_unmap_single(bp->sdev->dev, map, | 809 | dma_unmap_single(bp->sdev->dma_dev, map, |
810 | skb_size, DMA_FROM_DEVICE); | 810 | skb_size, DMA_FROM_DEVICE); |
811 | /* Leave out rx_header */ | 811 | /* Leave out rx_header */ |
812 | skb_put(skb, len + RX_PKT_OFFSET); | 812 | skb_put(skb, len + RX_PKT_OFFSET); |
@@ -966,24 +966,24 @@ static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
966 | goto err_out; | 966 | goto err_out; |
967 | } | 967 | } |
968 | 968 | ||
969 | mapping = dma_map_single(bp->sdev->dev, skb->data, len, DMA_TO_DEVICE); | 969 | mapping = dma_map_single(bp->sdev->dma_dev, skb->data, len, DMA_TO_DEVICE); |
970 | if (dma_mapping_error(mapping) || mapping + len > DMA_30BIT_MASK) { | 970 | if (dma_mapping_error(mapping) || mapping + len > DMA_30BIT_MASK) { |
971 | struct sk_buff *bounce_skb; | 971 | struct sk_buff *bounce_skb; |
972 | 972 | ||
973 | /* Chip can't handle DMA to/from >1GB, use bounce buffer */ | 973 | /* Chip can't handle DMA to/from >1GB, use bounce buffer */ |
974 | if (!dma_mapping_error(mapping)) | 974 | if (!dma_mapping_error(mapping)) |
975 | dma_unmap_single(bp->sdev->dev, mapping, len, | 975 | dma_unmap_single(bp->sdev->dma_dev, mapping, len, |
976 | DMA_TO_DEVICE); | 976 | DMA_TO_DEVICE); |
977 | 977 | ||
978 | bounce_skb = __dev_alloc_skb(len, GFP_ATOMIC | GFP_DMA); | 978 | bounce_skb = __dev_alloc_skb(len, GFP_ATOMIC | GFP_DMA); |
979 | if (!bounce_skb) | 979 | if (!bounce_skb) |
980 | goto err_out; | 980 | goto err_out; |
981 | 981 | ||
982 | mapping = dma_map_single(bp->sdev->dev, bounce_skb->data, | 982 | mapping = dma_map_single(bp->sdev->dma_dev, bounce_skb->data, |
983 | len, DMA_TO_DEVICE); | 983 | len, DMA_TO_DEVICE); |
984 | if (dma_mapping_error(mapping) || mapping + len > DMA_30BIT_MASK) { | 984 | if (dma_mapping_error(mapping) || mapping + len > DMA_30BIT_MASK) { |
985 | if (!dma_mapping_error(mapping)) | 985 | if (!dma_mapping_error(mapping)) |
986 | dma_unmap_single(bp->sdev->dev, mapping, | 986 | dma_unmap_single(bp->sdev->dma_dev, mapping, |
987 | len, DMA_TO_DEVICE); | 987 | len, DMA_TO_DEVICE); |
988 | dev_kfree_skb_any(bounce_skb); | 988 | dev_kfree_skb_any(bounce_skb); |
989 | goto err_out; | 989 | goto err_out; |
@@ -1082,7 +1082,7 @@ static void b44_free_rings(struct b44 *bp) | |||
1082 | 1082 | ||
1083 | if (rp->skb == NULL) | 1083 | if (rp->skb == NULL) |
1084 | continue; | 1084 | continue; |
1085 | dma_unmap_single(bp->sdev->dev, rp->mapping, RX_PKT_BUF_SZ, | 1085 | dma_unmap_single(bp->sdev->dma_dev, rp->mapping, RX_PKT_BUF_SZ, |
1086 | DMA_FROM_DEVICE); | 1086 | DMA_FROM_DEVICE); |
1087 | dev_kfree_skb_any(rp->skb); | 1087 | dev_kfree_skb_any(rp->skb); |
1088 | rp->skb = NULL; | 1088 | rp->skb = NULL; |
@@ -1094,7 +1094,7 @@ static void b44_free_rings(struct b44 *bp) | |||
1094 | 1094 | ||
1095 | if (rp->skb == NULL) | 1095 | if (rp->skb == NULL) |
1096 | continue; | 1096 | continue; |
1097 | dma_unmap_single(bp->sdev->dev, rp->mapping, rp->skb->len, | 1097 | dma_unmap_single(bp->sdev->dma_dev, rp->mapping, rp->skb->len, |
1098 | DMA_TO_DEVICE); | 1098 | DMA_TO_DEVICE); |
1099 | dev_kfree_skb_any(rp->skb); | 1099 | dev_kfree_skb_any(rp->skb); |
1100 | rp->skb = NULL; | 1100 | rp->skb = NULL; |
@@ -1117,12 +1117,12 @@ static void b44_init_rings(struct b44 *bp) | |||
1117 | memset(bp->tx_ring, 0, B44_TX_RING_BYTES); | 1117 | memset(bp->tx_ring, 0, B44_TX_RING_BYTES); |
1118 | 1118 | ||
1119 | if (bp->flags & B44_FLAG_RX_RING_HACK) | 1119 | if (bp->flags & B44_FLAG_RX_RING_HACK) |
1120 | dma_sync_single_for_device(bp->sdev->dev, bp->rx_ring_dma, | 1120 | dma_sync_single_for_device(bp->sdev->dma_dev, bp->rx_ring_dma, |
1121 | DMA_TABLE_BYTES, | 1121 | DMA_TABLE_BYTES, |
1122 | DMA_BIDIRECTIONAL); | 1122 | DMA_BIDIRECTIONAL); |
1123 | 1123 | ||
1124 | if (bp->flags & B44_FLAG_TX_RING_HACK) | 1124 | if (bp->flags & B44_FLAG_TX_RING_HACK) |
1125 | dma_sync_single_for_device(bp->sdev->dev, bp->tx_ring_dma, | 1125 | dma_sync_single_for_device(bp->sdev->dma_dev, bp->tx_ring_dma, |
1126 | DMA_TABLE_BYTES, | 1126 | DMA_TABLE_BYTES, |
1127 | DMA_TO_DEVICE); | 1127 | DMA_TO_DEVICE); |
1128 | 1128 | ||
@@ -1144,24 +1144,24 @@ static void b44_free_consistent(struct b44 *bp) | |||
1144 | bp->tx_buffers = NULL; | 1144 | bp->tx_buffers = NULL; |
1145 | if (bp->rx_ring) { | 1145 | if (bp->rx_ring) { |
1146 | if (bp->flags & B44_FLAG_RX_RING_HACK) { | 1146 | if (bp->flags & B44_FLAG_RX_RING_HACK) { |
1147 | dma_unmap_single(bp->sdev->dev, bp->rx_ring_dma, | 1147 | dma_unmap_single(bp->sdev->dma_dev, bp->rx_ring_dma, |
1148 | DMA_TABLE_BYTES, | 1148 | DMA_TABLE_BYTES, |
1149 | DMA_BIDIRECTIONAL); | 1149 | DMA_BIDIRECTIONAL); |
1150 | kfree(bp->rx_ring); | 1150 | kfree(bp->rx_ring); |
1151 | } else | 1151 | } else |
1152 | dma_free_coherent(bp->sdev->dev, DMA_TABLE_BYTES, | 1152 | dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES, |
1153 | bp->rx_ring, bp->rx_ring_dma); | 1153 | bp->rx_ring, bp->rx_ring_dma); |
1154 | bp->rx_ring = NULL; | 1154 | bp->rx_ring = NULL; |
1155 | bp->flags &= ~B44_FLAG_RX_RING_HACK; | 1155 | bp->flags &= ~B44_FLAG_RX_RING_HACK; |
1156 | } | 1156 | } |
1157 | if (bp->tx_ring) { | 1157 | if (bp->tx_ring) { |
1158 | if (bp->flags & B44_FLAG_TX_RING_HACK) { | 1158 | if (bp->flags & B44_FLAG_TX_RING_HACK) { |
1159 | dma_unmap_single(bp->sdev->dev, bp->tx_ring_dma, | 1159 | dma_unmap_single(bp->sdev->dma_dev, bp->tx_ring_dma, |
1160 | DMA_TABLE_BYTES, | 1160 | DMA_TABLE_BYTES, |
1161 | DMA_TO_DEVICE); | 1161 | DMA_TO_DEVICE); |
1162 | kfree(bp->tx_ring); | 1162 | kfree(bp->tx_ring); |
1163 | } else | 1163 | } else |
1164 | dma_free_coherent(bp->sdev->dev, DMA_TABLE_BYTES, | 1164 | dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES, |
1165 | bp->tx_ring, bp->tx_ring_dma); | 1165 | bp->tx_ring, bp->tx_ring_dma); |
1166 | bp->tx_ring = NULL; | 1166 | bp->tx_ring = NULL; |
1167 | bp->flags &= ~B44_FLAG_TX_RING_HACK; | 1167 | bp->flags &= ~B44_FLAG_TX_RING_HACK; |
@@ -1187,7 +1187,7 @@ static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp) | |||
1187 | goto out_err; | 1187 | goto out_err; |
1188 | 1188 | ||
1189 | size = DMA_TABLE_BYTES; | 1189 | size = DMA_TABLE_BYTES; |
1190 | bp->rx_ring = dma_alloc_coherent(bp->sdev->dev, size, &bp->rx_ring_dma, gfp); | 1190 | bp->rx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size, &bp->rx_ring_dma, gfp); |
1191 | if (!bp->rx_ring) { | 1191 | if (!bp->rx_ring) { |
1192 | /* Allocation may have failed due to pci_alloc_consistent | 1192 | /* Allocation may have failed due to pci_alloc_consistent |
1193 | insisting on use of GFP_DMA, which is more restrictive | 1193 | insisting on use of GFP_DMA, which is more restrictive |
@@ -1199,7 +1199,7 @@ static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp) | |||
1199 | if (!rx_ring) | 1199 | if (!rx_ring) |
1200 | goto out_err; | 1200 | goto out_err; |
1201 | 1201 | ||
1202 | rx_ring_dma = dma_map_single(bp->sdev->dev, rx_ring, | 1202 | rx_ring_dma = dma_map_single(bp->sdev->dma_dev, rx_ring, |
1203 | DMA_TABLE_BYTES, | 1203 | DMA_TABLE_BYTES, |
1204 | DMA_BIDIRECTIONAL); | 1204 | DMA_BIDIRECTIONAL); |
1205 | 1205 | ||
@@ -1214,7 +1214,7 @@ static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp) | |||
1214 | bp->flags |= B44_FLAG_RX_RING_HACK; | 1214 | bp->flags |= B44_FLAG_RX_RING_HACK; |
1215 | } | 1215 | } |
1216 | 1216 | ||
1217 | bp->tx_ring = dma_alloc_coherent(bp->sdev->dev, size, &bp->tx_ring_dma, gfp); | 1217 | bp->tx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size, &bp->tx_ring_dma, gfp); |
1218 | if (!bp->tx_ring) { | 1218 | if (!bp->tx_ring) { |
1219 | /* Allocation may have failed due to dma_alloc_coherent | 1219 | /* Allocation may have failed due to dma_alloc_coherent |
1220 | insisting on use of GFP_DMA, which is more restrictive | 1220 | insisting on use of GFP_DMA, which is more restrictive |
@@ -1226,7 +1226,7 @@ static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp) | |||
1226 | if (!tx_ring) | 1226 | if (!tx_ring) |
1227 | goto out_err; | 1227 | goto out_err; |
1228 | 1228 | ||
1229 | tx_ring_dma = dma_map_single(bp->sdev->dev, tx_ring, | 1229 | tx_ring_dma = dma_map_single(bp->sdev->dma_dev, tx_ring, |
1230 | DMA_TABLE_BYTES, | 1230 | DMA_TABLE_BYTES, |
1231 | DMA_TO_DEVICE); | 1231 | DMA_TO_DEVICE); |
1232 | 1232 | ||
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c index 501e451be911..665341e43055 100644 --- a/drivers/net/netconsole.c +++ b/drivers/net/netconsole.c | |||
@@ -730,7 +730,7 @@ static void write_msg(struct console *con, const char *msg, unsigned int len) | |||
730 | 730 | ||
731 | static struct console netconsole = { | 731 | static struct console netconsole = { |
732 | .name = "netcon", | 732 | .name = "netcon", |
733 | .flags = CON_ENABLED | CON_PRINTBUFFER, | 733 | .flags = CON_ENABLED, |
734 | .write = write_msg, | 734 | .write = write_msg, |
735 | }; | 735 | }; |
736 | 736 | ||
@@ -749,6 +749,9 @@ static int __init init_netconsole(void) | |||
749 | err = PTR_ERR(nt); | 749 | err = PTR_ERR(nt); |
750 | goto fail; | 750 | goto fail; |
751 | } | 751 | } |
752 | /* Dump existing printks when we register */ | ||
753 | netconsole.flags |= CON_PRINTBUFFER; | ||
754 | |||
752 | spin_lock_irqsave(&target_list_lock, flags); | 755 | spin_lock_irqsave(&target_list_lock, flags); |
753 | list_add(&nt->list, &target_list); | 756 | list_add(&nt->list, &target_list); |
754 | spin_unlock_irqrestore(&target_list_lock, flags); | 757 | spin_unlock_irqrestore(&target_list_lock, flags); |
diff --git a/drivers/net/ps3_gelic_wireless.c b/drivers/net/ps3_gelic_wireless.c index ddbc6e475e28..c16de5129a71 100644 --- a/drivers/net/ps3_gelic_wireless.c +++ b/drivers/net/ps3_gelic_wireless.c | |||
@@ -512,13 +512,18 @@ static void gelic_wl_parse_ie(u8 *data, size_t len, | |||
512 | data, len); | 512 | data, len); |
513 | memset(ie_info, 0, sizeof(struct ie_info)); | 513 | memset(ie_info, 0, sizeof(struct ie_info)); |
514 | 514 | ||
515 | while (0 < data_left) { | 515 | while (2 <= data_left) { |
516 | item_id = *pos++; | 516 | item_id = *pos++; |
517 | item_len = *pos++; | 517 | item_len = *pos++; |
518 | data_left -= 2; | ||
519 | |||
520 | if (data_left < item_len) | ||
521 | break; | ||
518 | 522 | ||
519 | switch (item_id) { | 523 | switch (item_id) { |
520 | case MFIE_TYPE_GENERIC: | 524 | case MFIE_TYPE_GENERIC: |
521 | if (!memcmp(pos, wpa_oui, OUI_LEN) && | 525 | if ((OUI_LEN + 1 <= item_len) && |
526 | !memcmp(pos, wpa_oui, OUI_LEN) && | ||
522 | pos[OUI_LEN] == 0x01) { | 527 | pos[OUI_LEN] == 0x01) { |
523 | ie_info->wpa.data = pos - 2; | 528 | ie_info->wpa.data = pos - 2; |
524 | ie_info->wpa.len = item_len + 2; | 529 | ie_info->wpa.len = item_len + 2; |
@@ -535,7 +540,7 @@ static void gelic_wl_parse_ie(u8 *data, size_t len, | |||
535 | break; | 540 | break; |
536 | } | 541 | } |
537 | pos += item_len; | 542 | pos += item_len; |
538 | data_left -= item_len + 2; | 543 | data_left -= item_len; |
539 | } | 544 | } |
540 | pr_debug("%s: wpa=%p,%d wpa2=%p,%d\n", __func__, | 545 | pr_debug("%s: wpa=%p,%d wpa2=%p,%d\n", __func__, |
541 | ie_info->wpa.data, ie_info->wpa.len, | 546 | ie_info->wpa.data, ie_info->wpa.len, |
diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c index 948eb1fe916b..48e912487b16 100644 --- a/drivers/net/wireless/b43/dma.c +++ b/drivers/net/wireless/b43/dma.c | |||
@@ -373,10 +373,10 @@ static inline | |||
373 | dma_addr_t dmaaddr; | 373 | dma_addr_t dmaaddr; |
374 | 374 | ||
375 | if (tx) { | 375 | if (tx) { |
376 | dmaaddr = dma_map_single(ring->dev->dev->dev, | 376 | dmaaddr = dma_map_single(ring->dev->dev->dma_dev, |
377 | buf, len, DMA_TO_DEVICE); | 377 | buf, len, DMA_TO_DEVICE); |
378 | } else { | 378 | } else { |
379 | dmaaddr = dma_map_single(ring->dev->dev->dev, | 379 | dmaaddr = dma_map_single(ring->dev->dev->dma_dev, |
380 | buf, len, DMA_FROM_DEVICE); | 380 | buf, len, DMA_FROM_DEVICE); |
381 | } | 381 | } |
382 | 382 | ||
@@ -388,9 +388,10 @@ static inline | |||
388 | dma_addr_t addr, size_t len, int tx) | 388 | dma_addr_t addr, size_t len, int tx) |
389 | { | 389 | { |
390 | if (tx) { | 390 | if (tx) { |
391 | dma_unmap_single(ring->dev->dev->dev, addr, len, DMA_TO_DEVICE); | 391 | dma_unmap_single(ring->dev->dev->dma_dev, |
392 | addr, len, DMA_TO_DEVICE); | ||
392 | } else { | 393 | } else { |
393 | dma_unmap_single(ring->dev->dev->dev, | 394 | dma_unmap_single(ring->dev->dev->dma_dev, |
394 | addr, len, DMA_FROM_DEVICE); | 395 | addr, len, DMA_FROM_DEVICE); |
395 | } | 396 | } |
396 | } | 397 | } |
@@ -400,7 +401,7 @@ static inline | |||
400 | dma_addr_t addr, size_t len) | 401 | dma_addr_t addr, size_t len) |
401 | { | 402 | { |
402 | B43_WARN_ON(ring->tx); | 403 | B43_WARN_ON(ring->tx); |
403 | dma_sync_single_for_cpu(ring->dev->dev->dev, | 404 | dma_sync_single_for_cpu(ring->dev->dev->dma_dev, |
404 | addr, len, DMA_FROM_DEVICE); | 405 | addr, len, DMA_FROM_DEVICE); |
405 | } | 406 | } |
406 | 407 | ||
@@ -409,7 +410,7 @@ static inline | |||
409 | dma_addr_t addr, size_t len) | 410 | dma_addr_t addr, size_t len) |
410 | { | 411 | { |
411 | B43_WARN_ON(ring->tx); | 412 | B43_WARN_ON(ring->tx); |
412 | dma_sync_single_for_device(ring->dev->dev->dev, | 413 | dma_sync_single_for_device(ring->dev->dev->dma_dev, |
413 | addr, len, DMA_FROM_DEVICE); | 414 | addr, len, DMA_FROM_DEVICE); |
414 | } | 415 | } |
415 | 416 | ||
@@ -425,7 +426,7 @@ static inline | |||
425 | 426 | ||
426 | static int alloc_ringmemory(struct b43_dmaring *ring) | 427 | static int alloc_ringmemory(struct b43_dmaring *ring) |
427 | { | 428 | { |
428 | struct device *dev = ring->dev->dev->dev; | 429 | struct device *dma_dev = ring->dev->dev->dma_dev; |
429 | gfp_t flags = GFP_KERNEL; | 430 | gfp_t flags = GFP_KERNEL; |
430 | 431 | ||
431 | /* The specs call for 4K buffers for 30- and 32-bit DMA with 4K | 432 | /* The specs call for 4K buffers for 30- and 32-bit DMA with 4K |
@@ -439,7 +440,7 @@ static int alloc_ringmemory(struct b43_dmaring *ring) | |||
439 | */ | 440 | */ |
440 | if (ring->type == B43_DMA_64BIT) | 441 | if (ring->type == B43_DMA_64BIT) |
441 | flags |= GFP_DMA; | 442 | flags |= GFP_DMA; |
442 | ring->descbase = dma_alloc_coherent(dev, B43_DMA_RINGMEMSIZE, | 443 | ring->descbase = dma_alloc_coherent(dma_dev, B43_DMA_RINGMEMSIZE, |
443 | &(ring->dmabase), flags); | 444 | &(ring->dmabase), flags); |
444 | if (!ring->descbase) { | 445 | if (!ring->descbase) { |
445 | b43err(ring->dev->wl, "DMA ringmemory allocation failed\n"); | 446 | b43err(ring->dev->wl, "DMA ringmemory allocation failed\n"); |
@@ -452,9 +453,9 @@ static int alloc_ringmemory(struct b43_dmaring *ring) | |||
452 | 453 | ||
453 | static void free_ringmemory(struct b43_dmaring *ring) | 454 | static void free_ringmemory(struct b43_dmaring *ring) |
454 | { | 455 | { |
455 | struct device *dev = ring->dev->dev->dev; | 456 | struct device *dma_dev = ring->dev->dev->dma_dev; |
456 | 457 | ||
457 | dma_free_coherent(dev, B43_DMA_RINGMEMSIZE, | 458 | dma_free_coherent(dma_dev, B43_DMA_RINGMEMSIZE, |
458 | ring->descbase, ring->dmabase); | 459 | ring->descbase, ring->dmabase); |
459 | } | 460 | } |
460 | 461 | ||
@@ -854,7 +855,7 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev, | |||
854 | goto err_kfree_meta; | 855 | goto err_kfree_meta; |
855 | 856 | ||
856 | /* test for ability to dma to txhdr_cache */ | 857 | /* test for ability to dma to txhdr_cache */ |
857 | dma_test = dma_map_single(dev->dev->dev, | 858 | dma_test = dma_map_single(dev->dev->dma_dev, |
858 | ring->txhdr_cache, | 859 | ring->txhdr_cache, |
859 | b43_txhdr_size(dev), | 860 | b43_txhdr_size(dev), |
860 | DMA_TO_DEVICE); | 861 | DMA_TO_DEVICE); |
@@ -869,7 +870,7 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev, | |||
869 | if (!ring->txhdr_cache) | 870 | if (!ring->txhdr_cache) |
870 | goto err_kfree_meta; | 871 | goto err_kfree_meta; |
871 | 872 | ||
872 | dma_test = dma_map_single(dev->dev->dev, | 873 | dma_test = dma_map_single(dev->dev->dma_dev, |
873 | ring->txhdr_cache, | 874 | ring->txhdr_cache, |
874 | b43_txhdr_size(dev), | 875 | b43_txhdr_size(dev), |
875 | DMA_TO_DEVICE); | 876 | DMA_TO_DEVICE); |
@@ -883,7 +884,7 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev, | |||
883 | } | 884 | } |
884 | } | 885 | } |
885 | 886 | ||
886 | dma_unmap_single(dev->dev->dev, | 887 | dma_unmap_single(dev->dev->dma_dev, |
887 | dma_test, b43_txhdr_size(dev), | 888 | dma_test, b43_txhdr_size(dev), |
888 | DMA_TO_DEVICE); | 889 | DMA_TO_DEVICE); |
889 | } | 890 | } |
diff --git a/drivers/net/wireless/b43legacy/dma.c b/drivers/net/wireless/b43legacy/dma.c index e87b427d5e43..c990f87b107a 100644 --- a/drivers/net/wireless/b43legacy/dma.c +++ b/drivers/net/wireless/b43legacy/dma.c | |||
@@ -393,11 +393,11 @@ dma_addr_t map_descbuffer(struct b43legacy_dmaring *ring, | |||
393 | dma_addr_t dmaaddr; | 393 | dma_addr_t dmaaddr; |
394 | 394 | ||
395 | if (tx) | 395 | if (tx) |
396 | dmaaddr = dma_map_single(ring->dev->dev->dev, | 396 | dmaaddr = dma_map_single(ring->dev->dev->dma_dev, |
397 | buf, len, | 397 | buf, len, |
398 | DMA_TO_DEVICE); | 398 | DMA_TO_DEVICE); |
399 | else | 399 | else |
400 | dmaaddr = dma_map_single(ring->dev->dev->dev, | 400 | dmaaddr = dma_map_single(ring->dev->dev->dma_dev, |
401 | buf, len, | 401 | buf, len, |
402 | DMA_FROM_DEVICE); | 402 | DMA_FROM_DEVICE); |
403 | 403 | ||
@@ -411,11 +411,11 @@ void unmap_descbuffer(struct b43legacy_dmaring *ring, | |||
411 | int tx) | 411 | int tx) |
412 | { | 412 | { |
413 | if (tx) | 413 | if (tx) |
414 | dma_unmap_single(ring->dev->dev->dev, | 414 | dma_unmap_single(ring->dev->dev->dma_dev, |
415 | addr, len, | 415 | addr, len, |
416 | DMA_TO_DEVICE); | 416 | DMA_TO_DEVICE); |
417 | else | 417 | else |
418 | dma_unmap_single(ring->dev->dev->dev, | 418 | dma_unmap_single(ring->dev->dev->dma_dev, |
419 | addr, len, | 419 | addr, len, |
420 | DMA_FROM_DEVICE); | 420 | DMA_FROM_DEVICE); |
421 | } | 421 | } |
@@ -427,7 +427,7 @@ void sync_descbuffer_for_cpu(struct b43legacy_dmaring *ring, | |||
427 | { | 427 | { |
428 | B43legacy_WARN_ON(ring->tx); | 428 | B43legacy_WARN_ON(ring->tx); |
429 | 429 | ||
430 | dma_sync_single_for_cpu(ring->dev->dev->dev, | 430 | dma_sync_single_for_cpu(ring->dev->dev->dma_dev, |
431 | addr, len, DMA_FROM_DEVICE); | 431 | addr, len, DMA_FROM_DEVICE); |
432 | } | 432 | } |
433 | 433 | ||
@@ -438,7 +438,7 @@ void sync_descbuffer_for_device(struct b43legacy_dmaring *ring, | |||
438 | { | 438 | { |
439 | B43legacy_WARN_ON(ring->tx); | 439 | B43legacy_WARN_ON(ring->tx); |
440 | 440 | ||
441 | dma_sync_single_for_device(ring->dev->dev->dev, | 441 | dma_sync_single_for_device(ring->dev->dev->dma_dev, |
442 | addr, len, DMA_FROM_DEVICE); | 442 | addr, len, DMA_FROM_DEVICE); |
443 | } | 443 | } |
444 | 444 | ||
@@ -458,9 +458,9 @@ void free_descriptor_buffer(struct b43legacy_dmaring *ring, | |||
458 | 458 | ||
459 | static int alloc_ringmemory(struct b43legacy_dmaring *ring) | 459 | static int alloc_ringmemory(struct b43legacy_dmaring *ring) |
460 | { | 460 | { |
461 | struct device *dev = ring->dev->dev->dev; | 461 | struct device *dma_dev = ring->dev->dev->dma_dev; |
462 | 462 | ||
463 | ring->descbase = dma_alloc_coherent(dev, B43legacy_DMA_RINGMEMSIZE, | 463 | ring->descbase = dma_alloc_coherent(dma_dev, B43legacy_DMA_RINGMEMSIZE, |
464 | &(ring->dmabase), GFP_KERNEL); | 464 | &(ring->dmabase), GFP_KERNEL); |
465 | if (!ring->descbase) { | 465 | if (!ring->descbase) { |
466 | b43legacyerr(ring->dev->wl, "DMA ringmemory allocation" | 466 | b43legacyerr(ring->dev->wl, "DMA ringmemory allocation" |
@@ -474,9 +474,9 @@ static int alloc_ringmemory(struct b43legacy_dmaring *ring) | |||
474 | 474 | ||
475 | static void free_ringmemory(struct b43legacy_dmaring *ring) | 475 | static void free_ringmemory(struct b43legacy_dmaring *ring) |
476 | { | 476 | { |
477 | struct device *dev = ring->dev->dev->dev; | 477 | struct device *dma_dev = ring->dev->dev->dma_dev; |
478 | 478 | ||
479 | dma_free_coherent(dev, B43legacy_DMA_RINGMEMSIZE, | 479 | dma_free_coherent(dma_dev, B43legacy_DMA_RINGMEMSIZE, |
480 | ring->descbase, ring->dmabase); | 480 | ring->descbase, ring->dmabase); |
481 | } | 481 | } |
482 | 482 | ||
@@ -585,8 +585,9 @@ static int b43legacy_dmacontroller_tx_reset(struct b43legacy_wldev *dev, | |||
585 | 585 | ||
586 | /* Check if a DMA mapping address is invalid. */ | 586 | /* Check if a DMA mapping address is invalid. */ |
587 | static bool b43legacy_dma_mapping_error(struct b43legacy_dmaring *ring, | 587 | static bool b43legacy_dma_mapping_error(struct b43legacy_dmaring *ring, |
588 | dma_addr_t addr, | 588 | dma_addr_t addr, |
589 | size_t buffersize) | 589 | size_t buffersize, |
590 | bool dma_to_device) | ||
590 | { | 591 | { |
591 | if (unlikely(dma_mapping_error(addr))) | 592 | if (unlikely(dma_mapping_error(addr))) |
592 | return 1; | 593 | return 1; |
@@ -594,11 +595,11 @@ static bool b43legacy_dma_mapping_error(struct b43legacy_dmaring *ring, | |||
594 | switch (ring->type) { | 595 | switch (ring->type) { |
595 | case B43legacy_DMA_30BIT: | 596 | case B43legacy_DMA_30BIT: |
596 | if ((u64)addr + buffersize > (1ULL << 30)) | 597 | if ((u64)addr + buffersize > (1ULL << 30)) |
597 | return 1; | 598 | goto address_error; |
598 | break; | 599 | break; |
599 | case B43legacy_DMA_32BIT: | 600 | case B43legacy_DMA_32BIT: |
600 | if ((u64)addr + buffersize > (1ULL << 32)) | 601 | if ((u64)addr + buffersize > (1ULL << 32)) |
601 | return 1; | 602 | goto address_error; |
602 | break; | 603 | break; |
603 | case B43legacy_DMA_64BIT: | 604 | case B43legacy_DMA_64BIT: |
604 | /* Currently we can't have addresses beyond 64 bits in the kernel. */ | 605 | /* Currently we can't have addresses beyond 64 bits in the kernel. */ |
@@ -607,6 +608,12 @@ static bool b43legacy_dma_mapping_error(struct b43legacy_dmaring *ring, | |||
607 | 608 | ||
608 | /* The address is OK. */ | 609 | /* The address is OK. */ |
609 | return 0; | 610 | return 0; |
611 | |||
612 | address_error: | ||
613 | /* We can't support this address. Unmap it again. */ | ||
614 | unmap_descbuffer(ring, addr, buffersize, dma_to_device); | ||
615 | |||
616 | return 1; | ||
610 | } | 617 | } |
611 | 618 | ||
612 | static int setup_rx_descbuffer(struct b43legacy_dmaring *ring, | 619 | static int setup_rx_descbuffer(struct b43legacy_dmaring *ring, |
@@ -626,7 +633,7 @@ static int setup_rx_descbuffer(struct b43legacy_dmaring *ring, | |||
626 | return -ENOMEM; | 633 | return -ENOMEM; |
627 | dmaaddr = map_descbuffer(ring, skb->data, | 634 | dmaaddr = map_descbuffer(ring, skb->data, |
628 | ring->rx_buffersize, 0); | 635 | ring->rx_buffersize, 0); |
629 | if (b43legacy_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize)) { | 636 | if (b43legacy_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) { |
630 | /* ugh. try to realloc in zone_dma */ | 637 | /* ugh. try to realloc in zone_dma */ |
631 | gfp_flags |= GFP_DMA; | 638 | gfp_flags |= GFP_DMA; |
632 | 639 | ||
@@ -639,7 +646,7 @@ static int setup_rx_descbuffer(struct b43legacy_dmaring *ring, | |||
639 | ring->rx_buffersize, 0); | 646 | ring->rx_buffersize, 0); |
640 | } | 647 | } |
641 | 648 | ||
642 | if (b43legacy_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize)) { | 649 | if (b43legacy_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) { |
643 | dev_kfree_skb_any(skb); | 650 | dev_kfree_skb_any(skb); |
644 | return -EIO; | 651 | return -EIO; |
645 | } | 652 | } |
@@ -886,12 +893,12 @@ struct b43legacy_dmaring *b43legacy_setup_dmaring(struct b43legacy_wldev *dev, | |||
886 | goto err_kfree_meta; | 893 | goto err_kfree_meta; |
887 | 894 | ||
888 | /* test for ability to dma to txhdr_cache */ | 895 | /* test for ability to dma to txhdr_cache */ |
889 | dma_test = dma_map_single(dev->dev->dev, ring->txhdr_cache, | 896 | dma_test = dma_map_single(dev->dev->dma_dev, ring->txhdr_cache, |
890 | sizeof(struct b43legacy_txhdr_fw3), | 897 | sizeof(struct b43legacy_txhdr_fw3), |
891 | DMA_TO_DEVICE); | 898 | DMA_TO_DEVICE); |
892 | 899 | ||
893 | if (b43legacy_dma_mapping_error(ring, dma_test, | 900 | if (b43legacy_dma_mapping_error(ring, dma_test, |
894 | sizeof(struct b43legacy_txhdr_fw3))) { | 901 | sizeof(struct b43legacy_txhdr_fw3), 1)) { |
895 | /* ugh realloc */ | 902 | /* ugh realloc */ |
896 | kfree(ring->txhdr_cache); | 903 | kfree(ring->txhdr_cache); |
897 | ring->txhdr_cache = kcalloc(nr_slots, | 904 | ring->txhdr_cache = kcalloc(nr_slots, |
@@ -900,17 +907,17 @@ struct b43legacy_dmaring *b43legacy_setup_dmaring(struct b43legacy_wldev *dev, | |||
900 | if (!ring->txhdr_cache) | 907 | if (!ring->txhdr_cache) |
901 | goto err_kfree_meta; | 908 | goto err_kfree_meta; |
902 | 909 | ||
903 | dma_test = dma_map_single(dev->dev->dev, | 910 | dma_test = dma_map_single(dev->dev->dma_dev, |
904 | ring->txhdr_cache, | 911 | ring->txhdr_cache, |
905 | sizeof(struct b43legacy_txhdr_fw3), | 912 | sizeof(struct b43legacy_txhdr_fw3), |
906 | DMA_TO_DEVICE); | 913 | DMA_TO_DEVICE); |
907 | 914 | ||
908 | if (b43legacy_dma_mapping_error(ring, dma_test, | 915 | if (b43legacy_dma_mapping_error(ring, dma_test, |
909 | sizeof(struct b43legacy_txhdr_fw3))) | 916 | sizeof(struct b43legacy_txhdr_fw3), 1)) |
910 | goto err_kfree_txhdr_cache; | 917 | goto err_kfree_txhdr_cache; |
911 | } | 918 | } |
912 | 919 | ||
913 | dma_unmap_single(dev->dev->dev, | 920 | dma_unmap_single(dev->dev->dma_dev, |
914 | dma_test, sizeof(struct b43legacy_txhdr_fw3), | 921 | dma_test, sizeof(struct b43legacy_txhdr_fw3), |
915 | DMA_TO_DEVICE); | 922 | DMA_TO_DEVICE); |
916 | } | 923 | } |
@@ -1235,7 +1242,7 @@ static int dma_tx_fragment(struct b43legacy_dmaring *ring, | |||
1235 | meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header, | 1242 | meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header, |
1236 | sizeof(struct b43legacy_txhdr_fw3), 1); | 1243 | sizeof(struct b43legacy_txhdr_fw3), 1); |
1237 | if (b43legacy_dma_mapping_error(ring, meta_hdr->dmaaddr, | 1244 | if (b43legacy_dma_mapping_error(ring, meta_hdr->dmaaddr, |
1238 | sizeof(struct b43legacy_txhdr_fw3))) { | 1245 | sizeof(struct b43legacy_txhdr_fw3), 1)) { |
1239 | ring->current_slot = old_top_slot; | 1246 | ring->current_slot = old_top_slot; |
1240 | ring->used_slots = old_used_slots; | 1247 | ring->used_slots = old_used_slots; |
1241 | return -EIO; | 1248 | return -EIO; |
@@ -1254,7 +1261,7 @@ static int dma_tx_fragment(struct b43legacy_dmaring *ring, | |||
1254 | 1261 | ||
1255 | meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1); | 1262 | meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1); |
1256 | /* create a bounce buffer in zone_dma on mapping failure. */ | 1263 | /* create a bounce buffer in zone_dma on mapping failure. */ |
1257 | if (b43legacy_dma_mapping_error(ring, meta->dmaaddr, skb->len)) { | 1264 | if (b43legacy_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) { |
1258 | bounce_skb = __dev_alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA); | 1265 | bounce_skb = __dev_alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA); |
1259 | if (!bounce_skb) { | 1266 | if (!bounce_skb) { |
1260 | ring->current_slot = old_top_slot; | 1267 | ring->current_slot = old_top_slot; |
@@ -1268,7 +1275,7 @@ static int dma_tx_fragment(struct b43legacy_dmaring *ring, | |||
1268 | skb = bounce_skb; | 1275 | skb = bounce_skb; |
1269 | meta->skb = skb; | 1276 | meta->skb = skb; |
1270 | meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1); | 1277 | meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1); |
1271 | if (b43legacy_dma_mapping_error(ring, meta->dmaaddr, skb->len)) { | 1278 | if (b43legacy_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) { |
1272 | ring->current_slot = old_top_slot; | 1279 | ring->current_slot = old_top_slot; |
1273 | ring->used_slots = old_used_slots; | 1280 | ring->used_slots = old_used_slots; |
1274 | err = -EIO; | 1281 | err = -EIO; |
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c index 5f3f34e1dbfd..0f7a6e7bd96a 100644 --- a/drivers/net/wireless/b43legacy/main.c +++ b/drivers/net/wireless/b43legacy/main.c | |||
@@ -1488,6 +1488,7 @@ static int b43legacy_request_firmware(struct b43legacy_wldev *dev) | |||
1488 | } | 1488 | } |
1489 | if (!fw->initvals) { | 1489 | if (!fw->initvals) { |
1490 | switch (dev->phy.type) { | 1490 | switch (dev->phy.type) { |
1491 | case B43legacy_PHYTYPE_B: | ||
1491 | case B43legacy_PHYTYPE_G: | 1492 | case B43legacy_PHYTYPE_G: |
1492 | if ((rev >= 5) && (rev <= 10)) | 1493 | if ((rev >= 5) && (rev <= 10)) |
1493 | filename = "b0g0initvals5"; | 1494 | filename = "b0g0initvals5"; |
@@ -1505,6 +1506,7 @@ static int b43legacy_request_firmware(struct b43legacy_wldev *dev) | |||
1505 | } | 1506 | } |
1506 | if (!fw->initvals_band) { | 1507 | if (!fw->initvals_band) { |
1507 | switch (dev->phy.type) { | 1508 | switch (dev->phy.type) { |
1509 | case B43legacy_PHYTYPE_B: | ||
1508 | case B43legacy_PHYTYPE_G: | 1510 | case B43legacy_PHYTYPE_G: |
1509 | if ((rev >= 5) && (rev <= 10)) | 1511 | if ((rev >= 5) && (rev <= 10)) |
1510 | filename = "b0g0bsinitvals5"; | 1512 | filename = "b0g0bsinitvals5"; |
diff --git a/drivers/net/wireless/rtl8187_dev.c b/drivers/net/wireless/rtl8187_dev.c index f44505994a0e..133b3f39eeb6 100644 --- a/drivers/net/wireless/rtl8187_dev.c +++ b/drivers/net/wireless/rtl8187_dev.c | |||
@@ -509,6 +509,8 @@ static int rtl8187_add_interface(struct ieee80211_hw *dev, | |||
509 | return -EOPNOTSUPP; | 509 | return -EOPNOTSUPP; |
510 | } | 510 | } |
511 | 511 | ||
512 | priv->vif = conf->vif; | ||
513 | |||
512 | rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG); | 514 | rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG); |
513 | for (i = 0; i < ETH_ALEN; i++) | 515 | for (i = 0; i < ETH_ALEN; i++) |
514 | rtl818x_iowrite8(priv, &priv->map->MAC[i], | 516 | rtl818x_iowrite8(priv, &priv->map->MAC[i], |
@@ -523,6 +525,7 @@ static void rtl8187_remove_interface(struct ieee80211_hw *dev, | |||
523 | { | 525 | { |
524 | struct rtl8187_priv *priv = dev->priv; | 526 | struct rtl8187_priv *priv = dev->priv; |
525 | priv->mode = IEEE80211_IF_TYPE_MNTR; | 527 | priv->mode = IEEE80211_IF_TYPE_MNTR; |
528 | priv->vif = NULL; | ||
526 | } | 529 | } |
527 | 530 | ||
528 | static int rtl8187_config(struct ieee80211_hw *dev, struct ieee80211_conf *conf) | 531 | static int rtl8187_config(struct ieee80211_hw *dev, struct ieee80211_conf *conf) |
diff --git a/drivers/ssb/main.c b/drivers/ssb/main.c index 72017bf2e577..8003a9e55ac4 100644 --- a/drivers/ssb/main.c +++ b/drivers/ssb/main.c | |||
@@ -436,15 +436,18 @@ static int ssb_devices_register(struct ssb_bus *bus) | |||
436 | #ifdef CONFIG_SSB_PCIHOST | 436 | #ifdef CONFIG_SSB_PCIHOST |
437 | sdev->irq = bus->host_pci->irq; | 437 | sdev->irq = bus->host_pci->irq; |
438 | dev->parent = &bus->host_pci->dev; | 438 | dev->parent = &bus->host_pci->dev; |
439 | sdev->dma_dev = &bus->host_pci->dev; | ||
439 | #endif | 440 | #endif |
440 | break; | 441 | break; |
441 | case SSB_BUSTYPE_PCMCIA: | 442 | case SSB_BUSTYPE_PCMCIA: |
442 | #ifdef CONFIG_SSB_PCMCIAHOST | 443 | #ifdef CONFIG_SSB_PCMCIAHOST |
443 | sdev->irq = bus->host_pcmcia->irq.AssignedIRQ; | 444 | sdev->irq = bus->host_pcmcia->irq.AssignedIRQ; |
444 | dev->parent = &bus->host_pcmcia->dev; | 445 | dev->parent = &bus->host_pcmcia->dev; |
446 | sdev->dma_dev = &bus->host_pcmcia->dev; | ||
445 | #endif | 447 | #endif |
446 | break; | 448 | break; |
447 | case SSB_BUSTYPE_SSB: | 449 | case SSB_BUSTYPE_SSB: |
450 | sdev->dma_dev = dev; | ||
448 | break; | 451 | break; |
449 | } | 452 | } |
450 | 453 | ||
@@ -1018,15 +1021,14 @@ EXPORT_SYMBOL(ssb_dma_translation); | |||
1018 | 1021 | ||
1019 | int ssb_dma_set_mask(struct ssb_device *ssb_dev, u64 mask) | 1022 | int ssb_dma_set_mask(struct ssb_device *ssb_dev, u64 mask) |
1020 | { | 1023 | { |
1021 | struct device *dev = ssb_dev->dev; | 1024 | struct device *dma_dev = ssb_dev->dma_dev; |
1022 | 1025 | ||
1023 | #ifdef CONFIG_SSB_PCIHOST | 1026 | #ifdef CONFIG_SSB_PCIHOST |
1024 | if (ssb_dev->bus->bustype == SSB_BUSTYPE_PCI && | 1027 | if (ssb_dev->bus->bustype == SSB_BUSTYPE_PCI) |
1025 | !dma_supported(dev, mask)) | 1028 | return dma_set_mask(dma_dev, mask); |
1026 | return -EIO; | ||
1027 | #endif | 1029 | #endif |
1028 | dev->coherent_dma_mask = mask; | 1030 | dma_dev->coherent_dma_mask = mask; |
1029 | dev->dma_mask = &dev->coherent_dma_mask; | 1031 | dma_dev->dma_mask = &dma_dev->coherent_dma_mask; |
1030 | 1032 | ||
1031 | return 0; | 1033 | return 0; |
1032 | } | 1034 | } |
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index a396fbbdc9c2..d101025a4c63 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c | |||
@@ -109,6 +109,22 @@ static int option_send_setup(struct usb_serial_port *port); | |||
109 | #define HUAWEI_PRODUCT_E600 0x1001 | 109 | #define HUAWEI_PRODUCT_E600 0x1001 |
110 | #define HUAWEI_PRODUCT_E220 0x1003 | 110 | #define HUAWEI_PRODUCT_E220 0x1003 |
111 | #define HUAWEI_PRODUCT_E220BIS 0x1004 | 111 | #define HUAWEI_PRODUCT_E220BIS 0x1004 |
112 | #define HUAWEI_PRODUCT_E1401 0x1401 | ||
113 | #define HUAWEI_PRODUCT_E1403 0x1403 | ||
114 | #define HUAWEI_PRODUCT_E1405 0x1405 | ||
115 | #define HUAWEI_PRODUCT_E1406 0x1406 | ||
116 | #define HUAWEI_PRODUCT_E1408 0x1408 | ||
117 | #define HUAWEI_PRODUCT_E1409 0x1409 | ||
118 | #define HUAWEI_PRODUCT_E1410 0x1410 | ||
119 | #define HUAWEI_PRODUCT_E1411 0x1411 | ||
120 | #define HUAWEI_PRODUCT_E1412 0x1412 | ||
121 | #define HUAWEI_PRODUCT_E1413 0x1413 | ||
122 | #define HUAWEI_PRODUCT_E1414 0x1414 | ||
123 | #define HUAWEI_PRODUCT_E1415 0x1415 | ||
124 | #define HUAWEI_PRODUCT_E1416 0x1416 | ||
125 | #define HUAWEI_PRODUCT_E1417 0x1417 | ||
126 | #define HUAWEI_PRODUCT_E1418 0x1418 | ||
127 | #define HUAWEI_PRODUCT_E1419 0x1419 | ||
112 | 128 | ||
113 | #define NOVATELWIRELESS_VENDOR_ID 0x1410 | 129 | #define NOVATELWIRELESS_VENDOR_ID 0x1410 |
114 | 130 | ||
@@ -129,6 +145,7 @@ static int option_send_setup(struct usb_serial_port *port); | |||
129 | #define NOVATELWIRELESS_PRODUCT_EV620 0x2100 | 145 | #define NOVATELWIRELESS_PRODUCT_EV620 0x2100 |
130 | #define NOVATELWIRELESS_PRODUCT_ES720 0x2110 | 146 | #define NOVATELWIRELESS_PRODUCT_ES720 0x2110 |
131 | #define NOVATELWIRELESS_PRODUCT_E725 0x2120 | 147 | #define NOVATELWIRELESS_PRODUCT_E725 0x2120 |
148 | #define NOVATELWIRELESS_PRODUCT_ES620 0x2130 | ||
132 | #define NOVATELWIRELESS_PRODUCT_EU730 0x2400 | 149 | #define NOVATELWIRELESS_PRODUCT_EU730 0x2400 |
133 | #define NOVATELWIRELESS_PRODUCT_EU740 0x2410 | 150 | #define NOVATELWIRELESS_PRODUCT_EU740 0x2410 |
134 | #define NOVATELWIRELESS_PRODUCT_EU870D 0x2420 | 151 | #define NOVATELWIRELESS_PRODUCT_EU870D 0x2420 |
@@ -137,6 +154,8 @@ static int option_send_setup(struct usb_serial_port *port); | |||
137 | #define NOVATELWIRELESS_PRODUCT_MC727 0x4100 | 154 | #define NOVATELWIRELESS_PRODUCT_MC727 0x4100 |
138 | #define NOVATELWIRELESS_PRODUCT_MC950D 0x4400 | 155 | #define NOVATELWIRELESS_PRODUCT_MC950D 0x4400 |
139 | 156 | ||
157 | #define NOVATELWIRELESS_PRODUCT_U727 0x5010 | ||
158 | |||
140 | /* FUTURE NOVATEL PRODUCTS */ | 159 | /* FUTURE NOVATEL PRODUCTS */ |
141 | #define NOVATELWIRELESS_PRODUCT_EVDO_1 0x6000 | 160 | #define NOVATELWIRELESS_PRODUCT_EVDO_1 0x6000 |
142 | #define NOVATELWIRELESS_PRODUCT_HSPA_1 0x7000 | 161 | #define NOVATELWIRELESS_PRODUCT_HSPA_1 0x7000 |
@@ -147,6 +166,12 @@ static int option_send_setup(struct usb_serial_port *port); | |||
147 | #define NOVATELWIRELESS_PRODUCT_EMBEDDED_2 0x8001 | 166 | #define NOVATELWIRELESS_PRODUCT_EMBEDDED_2 0x8001 |
148 | #define NOVATELWIRELESS_PRODUCT_GLOBAL_2 0x9001 | 167 | #define NOVATELWIRELESS_PRODUCT_GLOBAL_2 0x9001 |
149 | 168 | ||
169 | /* AMOI PRODUCTS */ | ||
170 | #define AMOI_VENDOR_ID 0x1614 | ||
171 | #define AMOI_PRODUCT_H01 0x0800 | ||
172 | #define AMOI_PRODUCT_H01A 0x7002 | ||
173 | #define AMOI_PRODUCT_H02 0x0802 | ||
174 | |||
150 | #define DELL_VENDOR_ID 0x413C | 175 | #define DELL_VENDOR_ID 0x413C |
151 | 176 | ||
152 | #define KYOCERA_VENDOR_ID 0x0c88 | 177 | #define KYOCERA_VENDOR_ID 0x0c88 |
@@ -163,8 +188,13 @@ static int option_send_setup(struct usb_serial_port *port); | |||
163 | #define BANDRICH_PRODUCT_C100_1 0x1002 | 188 | #define BANDRICH_PRODUCT_C100_1 0x1002 |
164 | #define BANDRICH_PRODUCT_C100_2 0x1003 | 189 | #define BANDRICH_PRODUCT_C100_2 0x1003 |
165 | 190 | ||
191 | #define AMOI_VENDOR_ID 0x1614 | ||
192 | #define AMOI_PRODUCT_9508 0x0800 | ||
193 | |||
166 | #define QUALCOMM_VENDOR_ID 0x05C6 | 194 | #define QUALCOMM_VENDOR_ID 0x05C6 |
167 | 195 | ||
196 | #define MAXON_VENDOR_ID 0x16d8 | ||
197 | |||
168 | static struct usb_device_id option_ids[] = { | 198 | static struct usb_device_id option_ids[] = { |
169 | { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) }, | 199 | { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) }, |
170 | { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) }, | 200 | { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) }, |
@@ -204,6 +234,23 @@ static struct usb_device_id option_ids[] = { | |||
204 | { USB_DEVICE(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E600) }, | 234 | { USB_DEVICE(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E600) }, |
205 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E220, 0xff, 0xff, 0xff) }, | 235 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E220, 0xff, 0xff, 0xff) }, |
206 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E220BIS, 0xff, 0xff, 0xff) }, | 236 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E220BIS, 0xff, 0xff, 0xff) }, |
237 | { USB_DEVICE(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1401) }, | ||
238 | { USB_DEVICE(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1403) }, | ||
239 | { USB_DEVICE(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1405) }, | ||
240 | { USB_DEVICE(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1406) }, | ||
241 | { USB_DEVICE(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1408) }, | ||
242 | { USB_DEVICE(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1409) }, | ||
243 | { USB_DEVICE(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1410) }, | ||
244 | { USB_DEVICE(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1411) }, | ||
245 | { USB_DEVICE(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1412) }, | ||
246 | { USB_DEVICE(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1413) }, | ||
247 | { USB_DEVICE(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1414) }, | ||
248 | { USB_DEVICE(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1415) }, | ||
249 | { USB_DEVICE(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1416) }, | ||
250 | { USB_DEVICE(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1417) }, | ||
251 | { USB_DEVICE(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1418) }, | ||
252 | { USB_DEVICE(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1419) }, | ||
253 | { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_9508) }, | ||
207 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) }, /* Novatel Merlin V640/XV620 */ | 254 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) }, /* Novatel Merlin V640/XV620 */ |
208 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) }, /* Novatel Merlin V620/S620 */ | 255 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) }, /* Novatel Merlin V620/S620 */ |
209 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V740) }, /* Novatel Merlin EX720/V740/X720 */ | 256 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V740) }, /* Novatel Merlin EX720/V740/X720 */ |
@@ -216,13 +263,13 @@ static struct usb_device_id option_ids[] = { | |||
216 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EV620) }, /* Novatel EV620/ES620 CDMA/EV-DO */ | 263 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EV620) }, /* Novatel EV620/ES620 CDMA/EV-DO */ |
217 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_ES720) }, /* Novatel ES620/ES720/U720/USB720 */ | 264 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_ES720) }, /* Novatel ES620/ES720/U720/USB720 */ |
218 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_E725) }, /* Novatel E725/E726 */ | 265 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_E725) }, /* Novatel E725/E726 */ |
219 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, 0x2130) }, /* Novatel Merlin ES620 SM Bus */ | 266 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_ES620) }, /* Novatel Merlin ES620 SM Bus */ |
220 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU730) }, /* Novatel EU730 and Vodafone EU740 */ | 267 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU730) }, /* Novatel EU730 and Vodafone EU740 */ |
221 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU740) }, /* Novatel non-Vodafone EU740 */ | 268 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU740) }, /* Novatel non-Vodafone EU740 */ |
222 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU870D) }, /* Novatel EU850D/EU860D/EU870D */ | 269 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU870D) }, /* Novatel EU850D/EU860D/EU870D */ |
223 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC950D) }, /* Novatel MC930D/MC950D */ | 270 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC950D) }, /* Novatel MC930D/MC950D */ |
224 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC727) }, /* Novatel MC727/U727/USB727 */ | 271 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC727) }, /* Novatel MC727/U727/USB727 */ |
225 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, 0x5010) }, /* Novatel U727 */ | 272 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U727) }, /* Novatel U727 */ |
226 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_1) }, /* Novatel EVDO product */ | 273 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_1) }, /* Novatel EVDO product */ |
227 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_1) }, /* Novatel HSPA product */ | 274 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_1) }, /* Novatel HSPA product */ |
228 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EMBEDDED_1) }, /* Novatel Embedded product */ | 275 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EMBEDDED_1) }, /* Novatel Embedded product */ |
@@ -232,6 +279,10 @@ static struct usb_device_id option_ids[] = { | |||
232 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EMBEDDED_2) }, /* Novatel Embedded product */ | 279 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EMBEDDED_2) }, /* Novatel Embedded product */ |
233 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_GLOBAL_2) }, /* Novatel Global product */ | 280 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_GLOBAL_2) }, /* Novatel Global product */ |
234 | 281 | ||
282 | { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01) }, | ||
283 | { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01A) }, | ||
284 | { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H02) }, | ||
285 | |||
235 | { USB_DEVICE(DELL_VENDOR_ID, 0x8114) }, /* Dell Wireless 5700 Mobile Broadband CDMA/EVDO Mini-Card == Novatel Expedite EV620 CDMA/EV-DO */ | 286 | { USB_DEVICE(DELL_VENDOR_ID, 0x8114) }, /* Dell Wireless 5700 Mobile Broadband CDMA/EVDO Mini-Card == Novatel Expedite EV620 CDMA/EV-DO */ |
236 | { USB_DEVICE(DELL_VENDOR_ID, 0x8115) }, /* Dell Wireless 5500 Mobile Broadband HSDPA Mini-Card == Novatel Expedite EU740 HSDPA/3G */ | 287 | { USB_DEVICE(DELL_VENDOR_ID, 0x8115) }, /* Dell Wireless 5500 Mobile Broadband HSDPA Mini-Card == Novatel Expedite EU740 HSDPA/3G */ |
237 | { USB_DEVICE(DELL_VENDOR_ID, 0x8116) }, /* Dell Wireless 5505 Mobile Broadband HSDPA Mini-Card == Novatel Expedite EU740 HSDPA/3G */ | 288 | { USB_DEVICE(DELL_VENDOR_ID, 0x8116) }, /* Dell Wireless 5505 Mobile Broadband HSDPA Mini-Card == Novatel Expedite EU740 HSDPA/3G */ |
@@ -249,6 +300,7 @@ static struct usb_device_id option_ids[] = { | |||
249 | { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_C100_2) }, | 300 | { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_C100_2) }, |
250 | { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) }, | 301 | { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) }, |
251 | { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */ | 302 | { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */ |
303 | { USB_DEVICE(MAXON_VENDOR_ID, 0x6280) }, /* BP3-USB & BP3-EXT HSDPA */ | ||
252 | { } /* Terminating entry */ | 304 | { } /* Terminating entry */ |
253 | }; | 305 | }; |
254 | MODULE_DEVICE_TABLE(usb, option_ids); | 306 | MODULE_DEVICE_TABLE(usb, option_ids); |
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c index 3ce98e8d7bce..2138ba8aeb69 100644 --- a/drivers/usb/serial/usb-serial.c +++ b/drivers/usb/serial/usb-serial.c | |||
@@ -854,6 +854,7 @@ int usb_serial_probe(struct usb_interface *interface, | |||
854 | serial->num_interrupt_in = num_interrupt_in; | 854 | serial->num_interrupt_in = num_interrupt_in; |
855 | serial->num_interrupt_out = num_interrupt_out; | 855 | serial->num_interrupt_out = num_interrupt_out; |
856 | 856 | ||
857 | #if 0 | ||
857 | /* check that the device meets the driver's requirements */ | 858 | /* check that the device meets the driver's requirements */ |
858 | if ((type->num_interrupt_in != NUM_DONT_CARE && | 859 | if ((type->num_interrupt_in != NUM_DONT_CARE && |
859 | type->num_interrupt_in != num_interrupt_in) | 860 | type->num_interrupt_in != num_interrupt_in) |
@@ -867,6 +868,7 @@ int usb_serial_probe(struct usb_interface *interface, | |||
867 | kfree(serial); | 868 | kfree(serial); |
868 | return -EIO; | 869 | return -EIO; |
869 | } | 870 | } |
871 | #endif | ||
870 | 872 | ||
871 | /* found all that we need */ | 873 | /* found all that we need */ |
872 | dev_info(&interface->dev, "%s converter detected\n", | 874 | dev_info(&interface->dev, "%s converter detected\n", |
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index e5219a56947c..91252075e6e1 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h | |||
@@ -1530,16 +1530,104 @@ UNUSUAL_DEV( 0x1210, 0x0003, 0x0100, 0x0100, | |||
1530 | US_SC_DEVICE, US_PR_DEVICE, NULL, | 1530 | US_SC_DEVICE, US_PR_DEVICE, NULL, |
1531 | US_FL_IGNORE_RESIDUE ), | 1531 | US_FL_IGNORE_RESIDUE ), |
1532 | 1532 | ||
1533 | /* Reported by fangxiaozhi <fangxiaozhi60675@huawei.com> | 1533 | /* Reported by fangxiaozhi <huananhu@huawei.com> |
1534 | * and by linlei <linlei83@huawei.com> | 1534 | * This brings the HUAWEI data card devices into multi-port mode |
1535 | * Patch reworked by Johann Wilhelm <johann.wilhelm@student.tugraz.at> | ||
1536 | * This brings the HUAWEI E220 devices into multi-port mode | ||
1537 | */ | 1535 | */ |
1536 | UNUSUAL_DEV( 0x12d1, 0x1001, 0x0000, 0x0000, | ||
1537 | "HUAWEI MOBILE", | ||
1538 | "Mass Storage", | ||
1539 | US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, | ||
1540 | 0), | ||
1538 | UNUSUAL_DEV( 0x12d1, 0x1003, 0x0000, 0x0000, | 1541 | UNUSUAL_DEV( 0x12d1, 0x1003, 0x0000, 0x0000, |
1539 | "HUAWEI MOBILE", | 1542 | "HUAWEI MOBILE", |
1540 | "Mass Storage", | 1543 | "Mass Storage", |
1541 | US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, | 1544 | US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, |
1542 | 0), | 1545 | 0), |
1546 | UNUSUAL_DEV( 0x12d1, 0x1004, 0x0000, 0x0000, | ||
1547 | "HUAWEI MOBILE", | ||
1548 | "Mass Storage", | ||
1549 | US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, | ||
1550 | 0), | ||
1551 | UNUSUAL_DEV( 0x12d1, 0x1401, 0x0000, 0x0000, | ||
1552 | "HUAWEI MOBILE", | ||
1553 | "Mass Storage", | ||
1554 | US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, | ||
1555 | 0), | ||
1556 | UNUSUAL_DEV( 0x12d1, 0x1403, 0x0000, 0x0000, | ||
1557 | "HUAWEI MOBILE", | ||
1558 | "Mass Storage", | ||
1559 | US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, | ||
1560 | 0), | ||
1561 | UNUSUAL_DEV( 0x12d1, 0x1405, 0x0000, 0x0000, | ||
1562 | "HUAWEI MOBILE", | ||
1563 | "Mass Storage", | ||
1564 | US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, | ||
1565 | 0), | ||
1566 | UNUSUAL_DEV( 0x12d1, 0x1406, 0x0000, 0x0000, | ||
1567 | "HUAWEI MOBILE", | ||
1568 | "Mass Storage", | ||
1569 | US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, | ||
1570 | 0), | ||
1571 | UNUSUAL_DEV( 0x12d1, 0x1408, 0x0000, 0x0000, | ||
1572 | "HUAWEI MOBILE", | ||
1573 | "Mass Storage", | ||
1574 | US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, | ||
1575 | 0), | ||
1576 | UNUSUAL_DEV( 0x12d1, 0x1409, 0x0000, 0x0000, | ||
1577 | "HUAWEI MOBILE", | ||
1578 | "Mass Storage", | ||
1579 | US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, | ||
1580 | 0), | ||
1581 | UNUSUAL_DEV( 0x12d1, 0x1410, 0x0000, 0x0000, | ||
1582 | "HUAWEI MOBILE", | ||
1583 | "Mass Storage", | ||
1584 | US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, | ||
1585 | 0), | ||
1586 | UNUSUAL_DEV( 0x12d1, 0x1411, 0x0000, 0x0000, | ||
1587 | "HUAWEI MOBILE", | ||
1588 | "Mass Storage", | ||
1589 | US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, | ||
1590 | 0), | ||
1591 | UNUSUAL_DEV( 0x12d1, 0x1412, 0x0000, 0x0000, | ||
1592 | "HUAWEI MOBILE", | ||
1593 | "Mass Storage", | ||
1594 | US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, | ||
1595 | 0), | ||
1596 | UNUSUAL_DEV( 0x12d1, 0x1413, 0x0000, 0x0000, | ||
1597 | "HUAWEI MOBILE", | ||
1598 | "Mass Storage", | ||
1599 | US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, | ||
1600 | 0), | ||
1601 | UNUSUAL_DEV( 0x12d1, 0x1414, 0x0000, 0x0000, | ||
1602 | "HUAWEI MOBILE", | ||
1603 | "Mass Storage", | ||
1604 | US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, | ||
1605 | 0), | ||
1606 | UNUSUAL_DEV( 0x12d1, 0x1415, 0x0000, 0x0000, | ||
1607 | "HUAWEI MOBILE", | ||
1608 | "Mass Storage", | ||
1609 | US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, | ||
1610 | 0), | ||
1611 | UNUSUAL_DEV( 0x12d1, 0x1416, 0x0000, 0x0000, | ||
1612 | "HUAWEI MOBILE", | ||
1613 | "Mass Storage", | ||
1614 | US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, | ||
1615 | 0), | ||
1616 | UNUSUAL_DEV( 0x12d1, 0x1417, 0x0000, 0x0000, | ||
1617 | "HUAWEI MOBILE", | ||
1618 | "Mass Storage", | ||
1619 | US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, | ||
1620 | 0), | ||
1621 | UNUSUAL_DEV( 0x12d1, 0x1418, 0x0000, 0x0000, | ||
1622 | "HUAWEI MOBILE", | ||
1623 | "Mass Storage", | ||
1624 | US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, | ||
1625 | 0), | ||
1626 | UNUSUAL_DEV( 0x12d1, 0x1419, 0x0000, 0x0000, | ||
1627 | "HUAWEI MOBILE", | ||
1628 | "Mass Storage", | ||
1629 | US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, | ||
1630 | 0), | ||
1543 | 1631 | ||
1544 | /* Reported by Vilius Bilinkevicius <vilisas AT xxx DOT lt) */ | 1632 | /* Reported by Vilius Bilinkevicius <vilisas AT xxx DOT lt) */ |
1545 | UNUSUAL_DEV( 0x132b, 0x000b, 0x0001, 0x0001, | 1633 | UNUSUAL_DEV( 0x132b, 0x000b, 0x0001, 0x0001, |
diff --git a/fs/afs/main.c b/fs/afs/main.c index 0f60f6b35769..2d3e5d4fb9f7 100644 --- a/fs/afs/main.c +++ b/fs/afs/main.c | |||
@@ -22,7 +22,7 @@ MODULE_LICENSE("GPL"); | |||
22 | 22 | ||
23 | unsigned afs_debug; | 23 | unsigned afs_debug; |
24 | module_param_named(debug, afs_debug, uint, S_IWUSR | S_IRUGO); | 24 | module_param_named(debug, afs_debug, uint, S_IWUSR | S_IRUGO); |
25 | MODULE_PARM_DESC(afs_debug, "AFS debugging mask"); | 25 | MODULE_PARM_DESC(debug, "AFS debugging mask"); |
26 | 26 | ||
27 | static char *rootcell; | 27 | static char *rootcell; |
28 | 28 | ||
diff --git a/include/linux/iocontext.h b/include/linux/iocontext.h index cac4b364cd40..2b7a1187cb29 100644 --- a/include/linux/iocontext.h +++ b/include/linux/iocontext.h | |||
@@ -91,8 +91,10 @@ static inline struct io_context *ioc_task_link(struct io_context *ioc) | |||
91 | * if ref count is zero, don't allow sharing (ioc is going away, it's | 91 | * if ref count is zero, don't allow sharing (ioc is going away, it's |
92 | * a race). | 92 | * a race). |
93 | */ | 93 | */ |
94 | if (ioc && atomic_inc_not_zero(&ioc->refcount)) | 94 | if (ioc && atomic_inc_not_zero(&ioc->refcount)) { |
95 | atomic_inc(&ioc->nr_tasks); | ||
95 | return ioc; | 96 | return ioc; |
97 | } | ||
96 | 98 | ||
97 | return NULL; | 99 | return NULL; |
98 | } | 100 | } |
diff --git a/include/linux/ssb/ssb.h b/include/linux/ssb/ssb.h index 20add65215af..db53defde5ee 100644 --- a/include/linux/ssb/ssb.h +++ b/include/linux/ssb/ssb.h | |||
@@ -129,6 +129,10 @@ struct ssb_device { | |||
129 | const struct ssb_bus_ops *ops; | 129 | const struct ssb_bus_ops *ops; |
130 | 130 | ||
131 | struct device *dev; | 131 | struct device *dev; |
132 | /* Pointer to the device that has to be used for | ||
133 | * any DMA related operation. */ | ||
134 | struct device *dma_dev; | ||
135 | |||
132 | struct ssb_bus *bus; | 136 | struct ssb_bus *bus; |
133 | struct ssb_device_id id; | 137 | struct ssb_device_id id; |
134 | 138 | ||
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 5119856017ab..bbb7d88a16b4 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -3841,8 +3841,28 @@ static void tcp_ofo_queue(struct sock *sk) | |||
3841 | } | 3841 | } |
3842 | } | 3842 | } |
3843 | 3843 | ||
3844 | static int tcp_prune_ofo_queue(struct sock *sk); | ||
3844 | static int tcp_prune_queue(struct sock *sk); | 3845 | static int tcp_prune_queue(struct sock *sk); |
3845 | 3846 | ||
3847 | static inline int tcp_try_rmem_schedule(struct sock *sk, unsigned int size) | ||
3848 | { | ||
3849 | if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || | ||
3850 | !sk_rmem_schedule(sk, size)) { | ||
3851 | |||
3852 | if (tcp_prune_queue(sk) < 0) | ||
3853 | return -1; | ||
3854 | |||
3855 | if (!sk_rmem_schedule(sk, size)) { | ||
3856 | if (!tcp_prune_ofo_queue(sk)) | ||
3857 | return -1; | ||
3858 | |||
3859 | if (!sk_rmem_schedule(sk, size)) | ||
3860 | return -1; | ||
3861 | } | ||
3862 | } | ||
3863 | return 0; | ||
3864 | } | ||
3865 | |||
3846 | static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) | 3866 | static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) |
3847 | { | 3867 | { |
3848 | struct tcphdr *th = tcp_hdr(skb); | 3868 | struct tcphdr *th = tcp_hdr(skb); |
@@ -3892,12 +3912,9 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) | |||
3892 | if (eaten <= 0) { | 3912 | if (eaten <= 0) { |
3893 | queue_and_out: | 3913 | queue_and_out: |
3894 | if (eaten < 0 && | 3914 | if (eaten < 0 && |
3895 | (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || | 3915 | tcp_try_rmem_schedule(sk, skb->truesize)) |
3896 | !sk_rmem_schedule(sk, skb->truesize))) { | 3916 | goto drop; |
3897 | if (tcp_prune_queue(sk) < 0 || | 3917 | |
3898 | !sk_rmem_schedule(sk, skb->truesize)) | ||
3899 | goto drop; | ||
3900 | } | ||
3901 | skb_set_owner_r(skb, sk); | 3918 | skb_set_owner_r(skb, sk); |
3902 | __skb_queue_tail(&sk->sk_receive_queue, skb); | 3919 | __skb_queue_tail(&sk->sk_receive_queue, skb); |
3903 | } | 3920 | } |
@@ -3966,12 +3983,8 @@ drop: | |||
3966 | 3983 | ||
3967 | TCP_ECN_check_ce(tp, skb); | 3984 | TCP_ECN_check_ce(tp, skb); |
3968 | 3985 | ||
3969 | if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || | 3986 | if (tcp_try_rmem_schedule(sk, skb->truesize)) |
3970 | !sk_rmem_schedule(sk, skb->truesize)) { | 3987 | goto drop; |
3971 | if (tcp_prune_queue(sk) < 0 || | ||
3972 | !sk_rmem_schedule(sk, skb->truesize)) | ||
3973 | goto drop; | ||
3974 | } | ||
3975 | 3988 | ||
3976 | /* Disable header prediction. */ | 3989 | /* Disable header prediction. */ |
3977 | tp->pred_flags = 0; | 3990 | tp->pred_flags = 0; |
@@ -4198,6 +4211,32 @@ static void tcp_collapse_ofo_queue(struct sock *sk) | |||
4198 | } | 4211 | } |
4199 | } | 4212 | } |
4200 | 4213 | ||
4214 | /* | ||
4215 | * Purge the out-of-order queue. | ||
4216 | * Return true if queue was pruned. | ||
4217 | */ | ||
4218 | static int tcp_prune_ofo_queue(struct sock *sk) | ||
4219 | { | ||
4220 | struct tcp_sock *tp = tcp_sk(sk); | ||
4221 | int res = 0; | ||
4222 | |||
4223 | if (!skb_queue_empty(&tp->out_of_order_queue)) { | ||
4224 | NET_INC_STATS_BH(LINUX_MIB_OFOPRUNED); | ||
4225 | __skb_queue_purge(&tp->out_of_order_queue); | ||
4226 | |||
4227 | /* Reset SACK state. A conforming SACK implementation will | ||
4228 | * do the same at a timeout based retransmit. When a connection | ||
4229 | * is in a sad state like this, we care only about integrity | ||
4230 | * of the connection not performance. | ||
4231 | */ | ||
4232 | if (tp->rx_opt.sack_ok) | ||
4233 | tcp_sack_reset(&tp->rx_opt); | ||
4234 | sk_mem_reclaim(sk); | ||
4235 | res = 1; | ||
4236 | } | ||
4237 | return res; | ||
4238 | } | ||
4239 | |||
4201 | /* Reduce allocated memory if we can, trying to get | 4240 | /* Reduce allocated memory if we can, trying to get |
4202 | * the socket within its memory limits again. | 4241 | * the socket within its memory limits again. |
4203 | * | 4242 | * |
@@ -4231,20 +4270,7 @@ static int tcp_prune_queue(struct sock *sk) | |||
4231 | /* Collapsing did not help, destructive actions follow. | 4270 | /* Collapsing did not help, destructive actions follow. |
4232 | * This must not ever occur. */ | 4271 | * This must not ever occur. */ |
4233 | 4272 | ||
4234 | /* First, purge the out_of_order queue. */ | 4273 | tcp_prune_ofo_queue(sk); |
4235 | if (!skb_queue_empty(&tp->out_of_order_queue)) { | ||
4236 | NET_INC_STATS_BH(LINUX_MIB_OFOPRUNED); | ||
4237 | __skb_queue_purge(&tp->out_of_order_queue); | ||
4238 | |||
4239 | /* Reset SACK state. A conforming SACK implementation will | ||
4240 | * do the same at a timeout based retransmit. When a connection | ||
4241 | * is in a sad state like this, we care only about integrity | ||
4242 | * of the connection not performance. | ||
4243 | */ | ||
4244 | if (tcp_is_sack(tp)) | ||
4245 | tcp_sack_reset(&tp->rx_opt); | ||
4246 | sk_mem_reclaim(sk); | ||
4247 | } | ||
4248 | 4274 | ||
4249 | if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) | 4275 | if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) |
4250 | return 0; | 4276 | return 0; |
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 535407d07fa4..a8a40aba846b 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c | |||
@@ -1050,12 +1050,9 @@ ieee80211_drop_unencrypted(struct ieee80211_txrx_data *rx) | |||
1050 | if (unlikely(!(rx->fc & IEEE80211_FCTL_PROTECTED) && | 1050 | if (unlikely(!(rx->fc & IEEE80211_FCTL_PROTECTED) && |
1051 | (rx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA && | 1051 | (rx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA && |
1052 | (rx->fc & IEEE80211_FCTL_STYPE) != IEEE80211_STYPE_NULLFUNC && | 1052 | (rx->fc & IEEE80211_FCTL_STYPE) != IEEE80211_STYPE_NULLFUNC && |
1053 | (rx->key || rx->sdata->drop_unencrypted))) { | 1053 | (rx->key || rx->sdata->drop_unencrypted))) |
1054 | if (net_ratelimit()) | ||
1055 | printk(KERN_DEBUG "%s: RX non-WEP frame, but expected " | ||
1056 | "encryption\n", rx->dev->name); | ||
1057 | return -EACCES; | 1054 | return -EACCES; |
1058 | } | 1055 | |
1059 | return 0; | 1056 | return 0; |
1060 | } | 1057 | } |
1061 | 1058 | ||
diff --git a/net/rfkill/rfkill.c b/net/rfkill/rfkill.c index 140a0a8c6b02..4e10a95de832 100644 --- a/net/rfkill/rfkill.c +++ b/net/rfkill/rfkill.c | |||
@@ -92,7 +92,7 @@ void rfkill_switch_all(enum rfkill_type type, enum rfkill_state state) | |||
92 | rfkill_states[type] = state; | 92 | rfkill_states[type] = state; |
93 | 93 | ||
94 | list_for_each_entry(rfkill, &rfkill_list, node) { | 94 | list_for_each_entry(rfkill, &rfkill_list, node) { |
95 | if (!rfkill->user_claim) | 95 | if ((!rfkill->user_claim) && (rfkill->type == type)) |
96 | rfkill_toggle_radio(rfkill, state); | 96 | rfkill_toggle_radio(rfkill, state); |
97 | } | 97 | } |
98 | 98 | ||
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c index 2d0c29c837f7..4b2682feeedc 100644 --- a/net/rxrpc/af_rxrpc.c +++ b/net/rxrpc/af_rxrpc.c | |||
@@ -27,7 +27,7 @@ MODULE_ALIAS_NETPROTO(PF_RXRPC); | |||
27 | 27 | ||
28 | unsigned rxrpc_debug; // = RXRPC_DEBUG_KPROTO; | 28 | unsigned rxrpc_debug; // = RXRPC_DEBUG_KPROTO; |
29 | module_param_named(debug, rxrpc_debug, uint, S_IWUSR | S_IRUGO); | 29 | module_param_named(debug, rxrpc_debug, uint, S_IWUSR | S_IRUGO); |
30 | MODULE_PARM_DESC(rxrpc_debug, "RxRPC debugging mask"); | 30 | MODULE_PARM_DESC(debug, "RxRPC debugging mask"); |
31 | 31 | ||
32 | static int sysctl_rxrpc_max_qlen __read_mostly = 10; | 32 | static int sysctl_rxrpc_max_qlen __read_mostly = 10; |
33 | 33 | ||
diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c index d1c296f2d617..6d38a81b336d 100644 --- a/net/rxrpc/rxkad.c +++ b/net/rxrpc/rxkad.c | |||
@@ -31,7 +31,7 @@ | |||
31 | 31 | ||
32 | unsigned rxrpc_debug; | 32 | unsigned rxrpc_debug; |
33 | module_param_named(debug, rxrpc_debug, uint, S_IWUSR | S_IRUGO); | 33 | module_param_named(debug, rxrpc_debug, uint, S_IWUSR | S_IRUGO); |
34 | MODULE_PARM_DESC(rxrpc_debug, "rxkad debugging mask"); | 34 | MODULE_PARM_DESC(debug, "rxkad debugging mask"); |
35 | 35 | ||
36 | struct rxkad_level1_hdr { | 36 | struct rxkad_level1_hdr { |
37 | __be32 data_size; /* true data size (excluding padding) */ | 37 | __be32 data_size; /* true data size (excluding padding) */ |
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index 7e3c048ba9b1..fc8708a0a25e 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c | |||
@@ -386,6 +386,9 @@ void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n) | |||
386 | if (n == 0) | 386 | if (n == 0) |
387 | return; | 387 | return; |
388 | while ((parentid = sch->parent)) { | 388 | while ((parentid = sch->parent)) { |
389 | if (TC_H_MAJ(parentid) == TC_H_MAJ(TC_H_INGRESS)) | ||
390 | return; | ||
391 | |||
389 | sch = qdisc_lookup(sch->dev, TC_H_MAJ(parentid)); | 392 | sch = qdisc_lookup(sch->dev, TC_H_MAJ(parentid)); |
390 | if (sch == NULL) { | 393 | if (sch == NULL) { |
391 | WARN_ON(parentid != TC_H_ROOT); | 394 | WARN_ON(parentid != TC_H_ROOT); |