diff options
Diffstat (limited to 'drivers/net/wireless/b43legacy/dma.c')
-rw-r--r-- | drivers/net/wireless/b43legacy/dma.c | 172 |
1 files changed, 103 insertions, 69 deletions
diff --git a/drivers/net/wireless/b43legacy/dma.c b/drivers/net/wireless/b43legacy/dma.c index 93ddc1cbcc8b..fb6819e40f38 100644 --- a/drivers/net/wireless/b43legacy/dma.c +++ b/drivers/net/wireless/b43legacy/dma.c | |||
@@ -393,13 +393,13 @@ dma_addr_t map_descbuffer(struct b43legacy_dmaring *ring, | |||
393 | dma_addr_t dmaaddr; | 393 | dma_addr_t dmaaddr; |
394 | 394 | ||
395 | if (tx) | 395 | if (tx) |
396 | dmaaddr = dma_map_single(ring->dev->dev->dma_dev, | 396 | dmaaddr = ssb_dma_map_single(ring->dev->dev, |
397 | buf, len, | 397 | buf, len, |
398 | DMA_TO_DEVICE); | 398 | DMA_TO_DEVICE); |
399 | else | 399 | else |
400 | dmaaddr = dma_map_single(ring->dev->dev->dma_dev, | 400 | dmaaddr = ssb_dma_map_single(ring->dev->dev, |
401 | buf, len, | 401 | buf, len, |
402 | DMA_FROM_DEVICE); | 402 | DMA_FROM_DEVICE); |
403 | 403 | ||
404 | return dmaaddr; | 404 | return dmaaddr; |
405 | } | 405 | } |
@@ -411,13 +411,13 @@ void unmap_descbuffer(struct b43legacy_dmaring *ring, | |||
411 | int tx) | 411 | int tx) |
412 | { | 412 | { |
413 | if (tx) | 413 | if (tx) |
414 | dma_unmap_single(ring->dev->dev->dma_dev, | 414 | ssb_dma_unmap_single(ring->dev->dev, |
415 | addr, len, | 415 | addr, len, |
416 | DMA_TO_DEVICE); | 416 | DMA_TO_DEVICE); |
417 | else | 417 | else |
418 | dma_unmap_single(ring->dev->dev->dma_dev, | 418 | ssb_dma_unmap_single(ring->dev->dev, |
419 | addr, len, | 419 | addr, len, |
420 | DMA_FROM_DEVICE); | 420 | DMA_FROM_DEVICE); |
421 | } | 421 | } |
422 | 422 | ||
423 | static inline | 423 | static inline |
@@ -427,8 +427,8 @@ void sync_descbuffer_for_cpu(struct b43legacy_dmaring *ring, | |||
427 | { | 427 | { |
428 | B43legacy_WARN_ON(ring->tx); | 428 | B43legacy_WARN_ON(ring->tx); |
429 | 429 | ||
430 | dma_sync_single_for_cpu(ring->dev->dev->dma_dev, | 430 | ssb_dma_sync_single_for_cpu(ring->dev->dev, |
431 | addr, len, DMA_FROM_DEVICE); | 431 | addr, len, DMA_FROM_DEVICE); |
432 | } | 432 | } |
433 | 433 | ||
434 | static inline | 434 | static inline |
@@ -438,8 +438,8 @@ void sync_descbuffer_for_device(struct b43legacy_dmaring *ring, | |||
438 | { | 438 | { |
439 | B43legacy_WARN_ON(ring->tx); | 439 | B43legacy_WARN_ON(ring->tx); |
440 | 440 | ||
441 | dma_sync_single_for_device(ring->dev->dev->dma_dev, | 441 | ssb_dma_sync_single_for_device(ring->dev->dev, |
442 | addr, len, DMA_FROM_DEVICE); | 442 | addr, len, DMA_FROM_DEVICE); |
443 | } | 443 | } |
444 | 444 | ||
445 | static inline | 445 | static inline |
@@ -458,10 +458,11 @@ void free_descriptor_buffer(struct b43legacy_dmaring *ring, | |||
458 | 458 | ||
459 | static int alloc_ringmemory(struct b43legacy_dmaring *ring) | 459 | static int alloc_ringmemory(struct b43legacy_dmaring *ring) |
460 | { | 460 | { |
461 | struct device *dma_dev = ring->dev->dev->dma_dev; | 461 | /* GFP flags must match the flags in free_ringmemory()! */ |
462 | 462 | ring->descbase = ssb_dma_alloc_consistent(ring->dev->dev, | |
463 | ring->descbase = dma_alloc_coherent(dma_dev, B43legacy_DMA_RINGMEMSIZE, | 463 | B43legacy_DMA_RINGMEMSIZE, |
464 | &(ring->dmabase), GFP_KERNEL); | 464 | &(ring->dmabase), |
465 | GFP_KERNEL); | ||
465 | if (!ring->descbase) { | 466 | if (!ring->descbase) { |
466 | b43legacyerr(ring->dev->wl, "DMA ringmemory allocation" | 467 | b43legacyerr(ring->dev->wl, "DMA ringmemory allocation" |
467 | " failed\n"); | 468 | " failed\n"); |
@@ -474,10 +475,8 @@ static int alloc_ringmemory(struct b43legacy_dmaring *ring) | |||
474 | 475 | ||
475 | static void free_ringmemory(struct b43legacy_dmaring *ring) | 476 | static void free_ringmemory(struct b43legacy_dmaring *ring) |
476 | { | 477 | { |
477 | struct device *dma_dev = ring->dev->dev->dma_dev; | 478 | ssb_dma_free_consistent(ring->dev->dev, B43legacy_DMA_RINGMEMSIZE, |
478 | 479 | ring->descbase, ring->dmabase, GFP_KERNEL); | |
479 | dma_free_coherent(dma_dev, B43legacy_DMA_RINGMEMSIZE, | ||
480 | ring->descbase, ring->dmabase); | ||
481 | } | 480 | } |
482 | 481 | ||
483 | /* Reset the RX DMA channel */ | 482 | /* Reset the RX DMA channel */ |
@@ -589,7 +588,7 @@ static bool b43legacy_dma_mapping_error(struct b43legacy_dmaring *ring, | |||
589 | size_t buffersize, | 588 | size_t buffersize, |
590 | bool dma_to_device) | 589 | bool dma_to_device) |
591 | { | 590 | { |
592 | if (unlikely(dma_mapping_error(addr))) | 591 | if (unlikely(ssb_dma_mapping_error(ring->dev->dev, addr))) |
593 | return 1; | 592 | return 1; |
594 | 593 | ||
595 | switch (ring->type) { | 594 | switch (ring->type) { |
@@ -860,6 +859,18 @@ static u64 supported_dma_mask(struct b43legacy_wldev *dev) | |||
860 | return DMA_30BIT_MASK; | 859 | return DMA_30BIT_MASK; |
861 | } | 860 | } |
862 | 861 | ||
862 | static enum b43legacy_dmatype dma_mask_to_engine_type(u64 dmamask) | ||
863 | { | ||
864 | if (dmamask == DMA_30BIT_MASK) | ||
865 | return B43legacy_DMA_30BIT; | ||
866 | if (dmamask == DMA_32BIT_MASK) | ||
867 | return B43legacy_DMA_32BIT; | ||
868 | if (dmamask == DMA_64BIT_MASK) | ||
869 | return B43legacy_DMA_64BIT; | ||
870 | B43legacy_WARN_ON(1); | ||
871 | return B43legacy_DMA_30BIT; | ||
872 | } | ||
873 | |||
863 | /* Main initialization function. */ | 874 | /* Main initialization function. */ |
864 | static | 875 | static |
865 | struct b43legacy_dmaring *b43legacy_setup_dmaring(struct b43legacy_wldev *dev, | 876 | struct b43legacy_dmaring *b43legacy_setup_dmaring(struct b43legacy_wldev *dev, |
@@ -894,9 +905,9 @@ struct b43legacy_dmaring *b43legacy_setup_dmaring(struct b43legacy_wldev *dev, | |||
894 | goto err_kfree_meta; | 905 | goto err_kfree_meta; |
895 | 906 | ||
896 | /* test for ability to dma to txhdr_cache */ | 907 | /* test for ability to dma to txhdr_cache */ |
897 | dma_test = dma_map_single(dev->dev->dma_dev, ring->txhdr_cache, | 908 | dma_test = ssb_dma_map_single(dev->dev, ring->txhdr_cache, |
898 | sizeof(struct b43legacy_txhdr_fw3), | 909 | sizeof(struct b43legacy_txhdr_fw3), |
899 | DMA_TO_DEVICE); | 910 | DMA_TO_DEVICE); |
900 | 911 | ||
901 | if (b43legacy_dma_mapping_error(ring, dma_test, | 912 | if (b43legacy_dma_mapping_error(ring, dma_test, |
902 | sizeof(struct b43legacy_txhdr_fw3), 1)) { | 913 | sizeof(struct b43legacy_txhdr_fw3), 1)) { |
@@ -908,7 +919,7 @@ struct b43legacy_dmaring *b43legacy_setup_dmaring(struct b43legacy_wldev *dev, | |||
908 | if (!ring->txhdr_cache) | 919 | if (!ring->txhdr_cache) |
909 | goto err_kfree_meta; | 920 | goto err_kfree_meta; |
910 | 921 | ||
911 | dma_test = dma_map_single(dev->dev->dma_dev, | 922 | dma_test = ssb_dma_map_single(dev->dev, |
912 | ring->txhdr_cache, | 923 | ring->txhdr_cache, |
913 | sizeof(struct b43legacy_txhdr_fw3), | 924 | sizeof(struct b43legacy_txhdr_fw3), |
914 | DMA_TO_DEVICE); | 925 | DMA_TO_DEVICE); |
@@ -918,9 +929,9 @@ struct b43legacy_dmaring *b43legacy_setup_dmaring(struct b43legacy_wldev *dev, | |||
918 | goto err_kfree_txhdr_cache; | 929 | goto err_kfree_txhdr_cache; |
919 | } | 930 | } |
920 | 931 | ||
921 | dma_unmap_single(dev->dev->dma_dev, | 932 | ssb_dma_unmap_single(dev->dev, dma_test, |
922 | dma_test, sizeof(struct b43legacy_txhdr_fw3), | 933 | sizeof(struct b43legacy_txhdr_fw3), |
923 | DMA_TO_DEVICE); | 934 | DMA_TO_DEVICE); |
924 | } | 935 | } |
925 | 936 | ||
926 | ring->nr_slots = nr_slots; | 937 | ring->nr_slots = nr_slots; |
@@ -1019,6 +1030,43 @@ void b43legacy_dma_free(struct b43legacy_wldev *dev) | |||
1019 | dma->tx_ring0 = NULL; | 1030 | dma->tx_ring0 = NULL; |
1020 | } | 1031 | } |
1021 | 1032 | ||
1033 | static int b43legacy_dma_set_mask(struct b43legacy_wldev *dev, u64 mask) | ||
1034 | { | ||
1035 | u64 orig_mask = mask; | ||
1036 | bool fallback = 0; | ||
1037 | int err; | ||
1038 | |||
1039 | /* Try to set the DMA mask. If it fails, try falling back to a | ||
1040 | * lower mask, as we can always also support a lower one. */ | ||
1041 | while (1) { | ||
1042 | err = ssb_dma_set_mask(dev->dev, mask); | ||
1043 | if (!err) | ||
1044 | break; | ||
1045 | if (mask == DMA_64BIT_MASK) { | ||
1046 | mask = DMA_32BIT_MASK; | ||
1047 | fallback = 1; | ||
1048 | continue; | ||
1049 | } | ||
1050 | if (mask == DMA_32BIT_MASK) { | ||
1051 | mask = DMA_30BIT_MASK; | ||
1052 | fallback = 1; | ||
1053 | continue; | ||
1054 | } | ||
1055 | b43legacyerr(dev->wl, "The machine/kernel does not support " | ||
1056 | "the required %u-bit DMA mask\n", | ||
1057 | (unsigned int)dma_mask_to_engine_type(orig_mask)); | ||
1058 | return -EOPNOTSUPP; | ||
1059 | } | ||
1060 | if (fallback) { | ||
1061 | b43legacyinfo(dev->wl, "DMA mask fallback from %u-bit to %u-" | ||
1062 | "bit\n", | ||
1063 | (unsigned int)dma_mask_to_engine_type(orig_mask), | ||
1064 | (unsigned int)dma_mask_to_engine_type(mask)); | ||
1065 | } | ||
1066 | |||
1067 | return 0; | ||
1068 | } | ||
1069 | |||
1022 | int b43legacy_dma_init(struct b43legacy_wldev *dev) | 1070 | int b43legacy_dma_init(struct b43legacy_wldev *dev) |
1023 | { | 1071 | { |
1024 | struct b43legacy_dma *dma = &dev->dma; | 1072 | struct b43legacy_dma *dma = &dev->dma; |
@@ -1028,21 +1076,8 @@ int b43legacy_dma_init(struct b43legacy_wldev *dev) | |||
1028 | enum b43legacy_dmatype type; | 1076 | enum b43legacy_dmatype type; |
1029 | 1077 | ||
1030 | dmamask = supported_dma_mask(dev); | 1078 | dmamask = supported_dma_mask(dev); |
1031 | switch (dmamask) { | 1079 | type = dma_mask_to_engine_type(dmamask); |
1032 | default: | 1080 | err = b43legacy_dma_set_mask(dev, dmamask); |
1033 | B43legacy_WARN_ON(1); | ||
1034 | case DMA_30BIT_MASK: | ||
1035 | type = B43legacy_DMA_30BIT; | ||
1036 | break; | ||
1037 | case DMA_32BIT_MASK: | ||
1038 | type = B43legacy_DMA_32BIT; | ||
1039 | break; | ||
1040 | case DMA_64BIT_MASK: | ||
1041 | type = B43legacy_DMA_64BIT; | ||
1042 | break; | ||
1043 | } | ||
1044 | |||
1045 | err = ssb_dma_set_mask(dev->dev, dmamask); | ||
1046 | if (err) { | 1081 | if (err) { |
1047 | #ifdef CONFIG_B43LEGACY_PIO | 1082 | #ifdef CONFIG_B43LEGACY_PIO |
1048 | b43legacywarn(dev->wl, "DMA for this device not supported. " | 1083 | b43legacywarn(dev->wl, "DMA for this device not supported. " |
@@ -1205,10 +1240,10 @@ struct b43legacy_dmaring *parse_cookie(struct b43legacy_wldev *dev, | |||
1205 | } | 1240 | } |
1206 | 1241 | ||
1207 | static int dma_tx_fragment(struct b43legacy_dmaring *ring, | 1242 | static int dma_tx_fragment(struct b43legacy_dmaring *ring, |
1208 | struct sk_buff *skb, | 1243 | struct sk_buff *skb) |
1209 | struct ieee80211_tx_control *ctl) | ||
1210 | { | 1244 | { |
1211 | const struct b43legacy_dma_ops *ops = ring->ops; | 1245 | const struct b43legacy_dma_ops *ops = ring->ops; |
1246 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | ||
1212 | u8 *header; | 1247 | u8 *header; |
1213 | int slot, old_top_slot, old_used_slots; | 1248 | int slot, old_top_slot, old_used_slots; |
1214 | int err; | 1249 | int err; |
@@ -1231,7 +1266,7 @@ static int dma_tx_fragment(struct b43legacy_dmaring *ring, | |||
1231 | header = &(ring->txhdr_cache[slot * sizeof( | 1266 | header = &(ring->txhdr_cache[slot * sizeof( |
1232 | struct b43legacy_txhdr_fw3)]); | 1267 | struct b43legacy_txhdr_fw3)]); |
1233 | err = b43legacy_generate_txhdr(ring->dev, header, | 1268 | err = b43legacy_generate_txhdr(ring->dev, header, |
1234 | skb->data, skb->len, ctl, | 1269 | skb->data, skb->len, info, |
1235 | generate_cookie(ring, slot)); | 1270 | generate_cookie(ring, slot)); |
1236 | if (unlikely(err)) { | 1271 | if (unlikely(err)) { |
1237 | ring->current_slot = old_top_slot; | 1272 | ring->current_slot = old_top_slot; |
@@ -1255,7 +1290,6 @@ static int dma_tx_fragment(struct b43legacy_dmaring *ring, | |||
1255 | desc = ops->idx2desc(ring, slot, &meta); | 1290 | desc = ops->idx2desc(ring, slot, &meta); |
1256 | memset(meta, 0, sizeof(*meta)); | 1291 | memset(meta, 0, sizeof(*meta)); |
1257 | 1292 | ||
1258 | memcpy(&meta->txstat.control, ctl, sizeof(*ctl)); | ||
1259 | meta->skb = skb; | 1293 | meta->skb = skb; |
1260 | meta->is_last_fragment = 1; | 1294 | meta->is_last_fragment = 1; |
1261 | 1295 | ||
@@ -1323,14 +1357,13 @@ int should_inject_overflow(struct b43legacy_dmaring *ring) | |||
1323 | } | 1357 | } |
1324 | 1358 | ||
1325 | int b43legacy_dma_tx(struct b43legacy_wldev *dev, | 1359 | int b43legacy_dma_tx(struct b43legacy_wldev *dev, |
1326 | struct sk_buff *skb, | 1360 | struct sk_buff *skb) |
1327 | struct ieee80211_tx_control *ctl) | ||
1328 | { | 1361 | { |
1329 | struct b43legacy_dmaring *ring; | 1362 | struct b43legacy_dmaring *ring; |
1330 | int err = 0; | 1363 | int err = 0; |
1331 | unsigned long flags; | 1364 | unsigned long flags; |
1332 | 1365 | ||
1333 | ring = priority_to_txring(dev, ctl->queue); | 1366 | ring = priority_to_txring(dev, skb_get_queue_mapping(skb)); |
1334 | spin_lock_irqsave(&ring->lock, flags); | 1367 | spin_lock_irqsave(&ring->lock, flags); |
1335 | B43legacy_WARN_ON(!ring->tx); | 1368 | B43legacy_WARN_ON(!ring->tx); |
1336 | if (unlikely(free_slots(ring) < SLOTS_PER_PACKET)) { | 1369 | if (unlikely(free_slots(ring) < SLOTS_PER_PACKET)) { |
@@ -1343,7 +1376,7 @@ int b43legacy_dma_tx(struct b43legacy_wldev *dev, | |||
1343 | * That would be a mac80211 bug. */ | 1376 | * That would be a mac80211 bug. */ |
1344 | B43legacy_BUG_ON(ring->stopped); | 1377 | B43legacy_BUG_ON(ring->stopped); |
1345 | 1378 | ||
1346 | err = dma_tx_fragment(ring, skb, ctl); | 1379 | err = dma_tx_fragment(ring, skb); |
1347 | if (unlikely(err == -ENOKEY)) { | 1380 | if (unlikely(err == -ENOKEY)) { |
1348 | /* Drop this packet, as we don't have the encryption key | 1381 | /* Drop this packet, as we don't have the encryption key |
1349 | * anymore and must not transmit it unencrypted. */ | 1382 | * anymore and must not transmit it unencrypted. */ |
@@ -1401,26 +1434,29 @@ void b43legacy_dma_handle_txstatus(struct b43legacy_wldev *dev, | |||
1401 | 1); | 1434 | 1); |
1402 | 1435 | ||
1403 | if (meta->is_last_fragment) { | 1436 | if (meta->is_last_fragment) { |
1404 | B43legacy_WARN_ON(!meta->skb); | 1437 | struct ieee80211_tx_info *info; |
1438 | BUG_ON(!meta->skb); | ||
1439 | info = IEEE80211_SKB_CB(meta->skb); | ||
1405 | /* Call back to inform the ieee80211 subsystem about the | 1440 | /* Call back to inform the ieee80211 subsystem about the |
1406 | * status of the transmission. | 1441 | * status of the transmission. |
1407 | * Some fields of txstat are already filled in dma_tx(). | 1442 | * Some fields of txstat are already filled in dma_tx(). |
1408 | */ | 1443 | */ |
1444 | |||
1445 | memset(&info->status, 0, sizeof(info->status)); | ||
1446 | |||
1409 | if (status->acked) { | 1447 | if (status->acked) { |
1410 | meta->txstat.flags |= IEEE80211_TX_STATUS_ACK; | 1448 | info->flags |= IEEE80211_TX_STAT_ACK; |
1411 | } else { | 1449 | } else { |
1412 | if (!(meta->txstat.control.flags | 1450 | if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) |
1413 | & IEEE80211_TXCTL_NO_ACK)) | 1451 | info->status.excessive_retries = 1; |
1414 | meta->txstat.excessive_retries = 1; | ||
1415 | } | 1452 | } |
1416 | if (status->frame_count == 0) { | 1453 | if (status->frame_count == 0) { |
1417 | /* The frame was not transmitted at all. */ | 1454 | /* The frame was not transmitted at all. */ |
1418 | meta->txstat.retry_count = 0; | 1455 | info->status.retry_count = 0; |
1419 | } else | 1456 | } else |
1420 | meta->txstat.retry_count = status->frame_count | 1457 | info->status.retry_count = status->frame_count |
1421 | - 1; | 1458 | - 1; |
1422 | ieee80211_tx_status_irqsafe(dev->wl->hw, meta->skb, | 1459 | ieee80211_tx_status_irqsafe(dev->wl->hw, meta->skb); |
1423 | &(meta->txstat)); | ||
1424 | /* skb is freed by ieee80211_tx_status_irqsafe() */ | 1460 | /* skb is freed by ieee80211_tx_status_irqsafe() */ |
1425 | meta->skb = NULL; | 1461 | meta->skb = NULL; |
1426 | } else { | 1462 | } else { |
@@ -1455,18 +1491,16 @@ void b43legacy_dma_get_tx_stats(struct b43legacy_wldev *dev, | |||
1455 | { | 1491 | { |
1456 | const int nr_queues = dev->wl->hw->queues; | 1492 | const int nr_queues = dev->wl->hw->queues; |
1457 | struct b43legacy_dmaring *ring; | 1493 | struct b43legacy_dmaring *ring; |
1458 | struct ieee80211_tx_queue_stats_data *data; | ||
1459 | unsigned long flags; | 1494 | unsigned long flags; |
1460 | int i; | 1495 | int i; |
1461 | 1496 | ||
1462 | for (i = 0; i < nr_queues; i++) { | 1497 | for (i = 0; i < nr_queues; i++) { |
1463 | data = &(stats->data[i]); | ||
1464 | ring = priority_to_txring(dev, i); | 1498 | ring = priority_to_txring(dev, i); |
1465 | 1499 | ||
1466 | spin_lock_irqsave(&ring->lock, flags); | 1500 | spin_lock_irqsave(&ring->lock, flags); |
1467 | data->len = ring->used_slots / SLOTS_PER_PACKET; | 1501 | stats[i].len = ring->used_slots / SLOTS_PER_PACKET; |
1468 | data->limit = ring->nr_slots / SLOTS_PER_PACKET; | 1502 | stats[i].limit = ring->nr_slots / SLOTS_PER_PACKET; |
1469 | data->count = ring->nr_tx_packets; | 1503 | stats[i].count = ring->nr_tx_packets; |
1470 | spin_unlock_irqrestore(&ring->lock, flags); | 1504 | spin_unlock_irqrestore(&ring->lock, flags); |
1471 | } | 1505 | } |
1472 | } | 1506 | } |