aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/rt2x00/rt2x00queue.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-02-10 23:01:30 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2015-02-10 23:01:30 -0500
commitc5ce28df0e7c01a1de23c36ebdefcd803f2b6cbb (patch)
tree9830baf38832769e1cf621708889111bbe3c93df /drivers/net/wireless/rt2x00/rt2x00queue.c
parent29afc4e9a408f2304e09c6dd0dbcfbd2356d0faa (diff)
parent9399f0c51489ae8c16d6559b82a452fdc1895e91 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Miller: 1) More iov_iter conversion work from Al Viro. [ The "crypto: switch af_alg_make_sg() to iov_iter" commit was wrong, and this pull actually adds an extra commit on top of the branch I'm pulling to fix that up, so that the pre-merge state is ok. - Linus ] 2) Various optimizations to the ipv4 forwarding information base trie lookup implementation. From Alexander Duyck. 3) Remove sock_iocb altogether, from CHristoph Hellwig. 4) Allow congestion control algorithm selection via routing metrics. From Daniel Borkmann. 5) Make ipv4 uncached route list per-cpu, from Eric Dumazet. 6) Handle rfs hash collisions more gracefully, also from Eric Dumazet. 7) Add xmit_more support to r8169, e1000, and e1000e drivers. From Florian Westphal. 8) Transparent Ethernet Bridging support for GRO, from Jesse Gross. 9) Add BPF packet actions to packet scheduler, from Jiri Pirko. 10) Add support for uniqu flow IDs to openvswitch, from Joe Stringer. 11) New NetCP ethernet driver, from Muralidharan Karicheri and Wingman Kwok. 12) More sanely handle out-of-window dupacks, which can result in serious ACK storms. From Neal Cardwell. 13) Various rhashtable bug fixes and enhancements, from Herbert Xu, Patrick McHardy, and Thomas Graf. 14) Support xmit_more in be2net, from Sathya Perla. 15) Group Policy extensions for vxlan, from Thomas Graf. 16) Remove Checksum Offload support for vxlan, from Tom Herbert. 17) Like ipv4, support lockless transmit over ipv6 UDP sockets. From Vlad Yasevich. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1494+1 commits) crypto: fix af_alg_make_sg() conversion to iov_iter ipv4: Namespecify TCP PMTU mechanism i40e: Fix for stats init function call in Rx setup tcp: don't include Fast Open option in SYN-ACK on pure SYN-data openvswitch: Only set TUNNEL_VXLAN_OPT if VXLAN-GBP metadata is set ipv6: Make __ipv6_select_ident static ipv6: Fix fragment id assignment on LE arches. bridge: Fix inability to add non-vlan fdb entry net: Mellanox: Delete unnecessary checks before the function call "vunmap" cxgb4: Add support in cxgb4 to get expansion rom version via ethtool ethtool: rename reserved1 memeber in ethtool_drvinfo for expansion ROM version net: dsa: Remove redundant phy_attach() IB/mlx4: Reset flow support for IB kernel ULPs IB/mlx4: Always use the correct port for mirrored multicast attachments net/bonding: Fix potential bad memory access during bonding events tipc: remove tipc_snprintf tipc: nl compat add noop and remove legacy nl framework tipc: convert legacy nl stats show to nl compat tipc: convert legacy nl net id get to nl compat tipc: convert legacy nl net id set to nl compat ...
Diffstat (limited to 'drivers/net/wireless/rt2x00/rt2x00queue.c')
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.c18
1 files changed, 9 insertions, 9 deletions
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
index 66ff36447b94..68b620b2462f 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -85,7 +85,7 @@ struct sk_buff *rt2x00queue_alloc_rxskb(struct queue_entry *entry, gfp_t gfp)
85 memset(skbdesc, 0, sizeof(*skbdesc)); 85 memset(skbdesc, 0, sizeof(*skbdesc));
86 skbdesc->entry = entry; 86 skbdesc->entry = entry;
87 87
88 if (test_bit(REQUIRE_DMA, &rt2x00dev->cap_flags)) { 88 if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_DMA)) {
89 dma_addr_t skb_dma; 89 dma_addr_t skb_dma;
90 90
91 skb_dma = dma_map_single(rt2x00dev->dev, skb->data, skb->len, 91 skb_dma = dma_map_single(rt2x00dev->dev, skb->data, skb->len,
@@ -198,7 +198,7 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
198 198
199 __set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags); 199 __set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
200 200
201 if (!test_bit(REQUIRE_SW_SEQNO, &rt2x00dev->cap_flags)) { 201 if (!rt2x00_has_cap_flag(rt2x00dev, REQUIRE_SW_SEQNO)) {
202 /* 202 /*
203 * rt2800 has a H/W (or F/W) bug, device incorrectly increase 203 * rt2800 has a H/W (or F/W) bug, device incorrectly increase
204 * seqno on retransmited data (non-QOS) frames. To workaround 204 * seqno on retransmited data (non-QOS) frames. To workaround
@@ -484,7 +484,7 @@ static void rt2x00queue_create_tx_descriptor(struct rt2x00_dev *rt2x00dev,
484 rt2x00crypto_create_tx_descriptor(rt2x00dev, skb, txdesc); 484 rt2x00crypto_create_tx_descriptor(rt2x00dev, skb, txdesc);
485 rt2x00queue_create_tx_descriptor_seq(rt2x00dev, skb, txdesc); 485 rt2x00queue_create_tx_descriptor_seq(rt2x00dev, skb, txdesc);
486 486
487 if (test_bit(REQUIRE_HT_TX_DESC, &rt2x00dev->cap_flags)) 487 if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_HT_TX_DESC))
488 rt2x00queue_create_tx_descriptor_ht(rt2x00dev, skb, txdesc, 488 rt2x00queue_create_tx_descriptor_ht(rt2x00dev, skb, txdesc,
489 sta, hwrate); 489 sta, hwrate);
490 else 490 else
@@ -526,7 +526,7 @@ static int rt2x00queue_write_tx_data(struct queue_entry *entry,
526 /* 526 /*
527 * Map the skb to DMA. 527 * Map the skb to DMA.
528 */ 528 */
529 if (test_bit(REQUIRE_DMA, &rt2x00dev->cap_flags) && 529 if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_DMA) &&
530 rt2x00queue_map_txskb(entry)) 530 rt2x00queue_map_txskb(entry))
531 return -ENOMEM; 531 return -ENOMEM;
532 532
@@ -646,7 +646,7 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
646 */ 646 */
647 if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc.flags) && 647 if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc.flags) &&
648 !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc.flags)) { 648 !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc.flags)) {
649 if (test_bit(REQUIRE_COPY_IV, &queue->rt2x00dev->cap_flags)) 649 if (rt2x00_has_cap_flag(queue->rt2x00dev, REQUIRE_COPY_IV))
650 rt2x00crypto_tx_copy_iv(skb, &txdesc); 650 rt2x00crypto_tx_copy_iv(skb, &txdesc);
651 else 651 else
652 rt2x00crypto_tx_remove_iv(skb, &txdesc); 652 rt2x00crypto_tx_remove_iv(skb, &txdesc);
@@ -660,9 +660,9 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
660 * PCI and USB devices, while header alignment only is valid 660 * PCI and USB devices, while header alignment only is valid
661 * for PCI devices. 661 * for PCI devices.
662 */ 662 */
663 if (test_bit(REQUIRE_L2PAD, &queue->rt2x00dev->cap_flags)) 663 if (rt2x00_has_cap_flag(queue->rt2x00dev, REQUIRE_L2PAD))
664 rt2x00queue_insert_l2pad(skb, txdesc.header_length); 664 rt2x00queue_insert_l2pad(skb, txdesc.header_length);
665 else if (test_bit(REQUIRE_DMA, &queue->rt2x00dev->cap_flags)) 665 else if (rt2x00_has_cap_flag(queue->rt2x00dev, REQUIRE_DMA))
666 rt2x00queue_align_frame(skb); 666 rt2x00queue_align_frame(skb);
667 667
668 /* 668 /*
@@ -1178,7 +1178,7 @@ int rt2x00queue_initialize(struct rt2x00_dev *rt2x00dev)
1178 if (status) 1178 if (status)
1179 goto exit; 1179 goto exit;
1180 1180
1181 if (test_bit(REQUIRE_ATIM_QUEUE, &rt2x00dev->cap_flags)) { 1181 if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_ATIM_QUEUE)) {
1182 status = rt2x00queue_alloc_entries(rt2x00dev->atim); 1182 status = rt2x00queue_alloc_entries(rt2x00dev->atim);
1183 if (status) 1183 if (status)
1184 goto exit; 1184 goto exit;
@@ -1234,7 +1234,7 @@ int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev)
1234 struct data_queue *queue; 1234 struct data_queue *queue;
1235 enum data_queue_qid qid; 1235 enum data_queue_qid qid;
1236 unsigned int req_atim = 1236 unsigned int req_atim =
1237 !!test_bit(REQUIRE_ATIM_QUEUE, &rt2x00dev->cap_flags); 1237 rt2x00_has_cap_flag(rt2x00dev, REQUIRE_ATIM_QUEUE);
1238 1238
1239 /* 1239 /*
1240 * We need the following queues: 1240 * We need the following queues: