aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/atm/he.c
diff options
context:
space:
mode:
authorchas williams - CONTRACTOR <chas@cmf.nrl.navy.mil>2015-01-16 08:57:21 -0500
committerDavid S. Miller <davem@davemloft.net>2015-01-18 00:28:41 -0500
commitede58ef28e105de94475b2b69fa069c9a2ce6933 (patch)
treef7a4a505d2aa25808605b6ff74c2aa7e058ffd39 /drivers/atm/he.c
parentabee1cef7343197dd54e40df790ebbc8bd845d29 (diff)
atm: remove deprecated use of pci api
Signed-off-by: Chas Williams - CONTRACTOR <chas@cmf.nrl.navy.mil> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/atm/he.c')
-rw-r--r--drivers/atm/he.c125
1 files changed, 64 insertions, 61 deletions
diff --git a/drivers/atm/he.c b/drivers/atm/he.c
index c39702bc279d..93dca2e73bf5 100644
--- a/drivers/atm/he.c
+++ b/drivers/atm/he.c
@@ -359,7 +359,7 @@ static int he_init_one(struct pci_dev *pci_dev,
359 359
360 if (pci_enable_device(pci_dev)) 360 if (pci_enable_device(pci_dev))
361 return -EIO; 361 return -EIO;
362 if (pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32)) != 0) { 362 if (dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32)) != 0) {
363 printk(KERN_WARNING "he: no suitable dma available\n"); 363 printk(KERN_WARNING "he: no suitable dma available\n");
364 err = -EIO; 364 err = -EIO;
365 goto init_one_failure; 365 goto init_one_failure;
@@ -533,9 +533,9 @@ static void he_init_tx_lbfp(struct he_dev *he_dev)
533 533
534static int he_init_tpdrq(struct he_dev *he_dev) 534static int he_init_tpdrq(struct he_dev *he_dev)
535{ 535{
536 he_dev->tpdrq_base = pci_zalloc_consistent(he_dev->pci_dev, 536 he_dev->tpdrq_base = dma_zalloc_coherent(&he_dev->pci_dev->dev,
537 CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq), 537 CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq),
538 &he_dev->tpdrq_phys); 538 &he_dev->tpdrq_phys, GFP_KERNEL);
539 if (he_dev->tpdrq_base == NULL) { 539 if (he_dev->tpdrq_base == NULL) {
540 hprintk("failed to alloc tpdrq\n"); 540 hprintk("failed to alloc tpdrq\n");
541 return -ENOMEM; 541 return -ENOMEM;
@@ -796,16 +796,16 @@ static int he_init_group(struct he_dev *he_dev, int group)
796 } 796 }
797 797
798 /* large buffer pool */ 798 /* large buffer pool */
799 he_dev->rbpl_pool = pci_pool_create("rbpl", he_dev->pci_dev, 799 he_dev->rbpl_pool = dma_pool_create("rbpl", &he_dev->pci_dev->dev,
800 CONFIG_RBPL_BUFSIZE, 64, 0); 800 CONFIG_RBPL_BUFSIZE, 64, 0);
801 if (he_dev->rbpl_pool == NULL) { 801 if (he_dev->rbpl_pool == NULL) {
802 hprintk("unable to create rbpl pool\n"); 802 hprintk("unable to create rbpl pool\n");
803 goto out_free_rbpl_virt; 803 goto out_free_rbpl_virt;
804 } 804 }
805 805
806 he_dev->rbpl_base = pci_zalloc_consistent(he_dev->pci_dev, 806 he_dev->rbpl_base = dma_zalloc_coherent(&he_dev->pci_dev->dev,
807 CONFIG_RBPL_SIZE * sizeof(struct he_rbp), 807 CONFIG_RBPL_SIZE * sizeof(struct he_rbp),
808 &he_dev->rbpl_phys); 808 &he_dev->rbpl_phys, GFP_KERNEL);
809 if (he_dev->rbpl_base == NULL) { 809 if (he_dev->rbpl_base == NULL) {
810 hprintk("failed to alloc rbpl_base\n"); 810 hprintk("failed to alloc rbpl_base\n");
811 goto out_destroy_rbpl_pool; 811 goto out_destroy_rbpl_pool;
@@ -815,7 +815,7 @@ static int he_init_group(struct he_dev *he_dev, int group)
815 815
816 for (i = 0; i < CONFIG_RBPL_SIZE; ++i) { 816 for (i = 0; i < CONFIG_RBPL_SIZE; ++i) {
817 817
818 heb = pci_pool_alloc(he_dev->rbpl_pool, GFP_KERNEL|GFP_DMA, &mapping); 818 heb = dma_pool_alloc(he_dev->rbpl_pool, GFP_KERNEL, &mapping);
819 if (!heb) 819 if (!heb)
820 goto out_free_rbpl; 820 goto out_free_rbpl;
821 heb->mapping = mapping; 821 heb->mapping = mapping;
@@ -842,9 +842,9 @@ static int he_init_group(struct he_dev *he_dev, int group)
842 842
843 /* rx buffer ready queue */ 843 /* rx buffer ready queue */
844 844
845 he_dev->rbrq_base = pci_zalloc_consistent(he_dev->pci_dev, 845 he_dev->rbrq_base = dma_zalloc_coherent(&he_dev->pci_dev->dev,
846 CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq), 846 CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq),
847 &he_dev->rbrq_phys); 847 &he_dev->rbrq_phys, GFP_KERNEL);
848 if (he_dev->rbrq_base == NULL) { 848 if (he_dev->rbrq_base == NULL) {
849 hprintk("failed to allocate rbrq\n"); 849 hprintk("failed to allocate rbrq\n");
850 goto out_free_rbpl; 850 goto out_free_rbpl;
@@ -866,9 +866,9 @@ static int he_init_group(struct he_dev *he_dev, int group)
866 866
867 /* tx buffer ready queue */ 867 /* tx buffer ready queue */
868 868
869 he_dev->tbrq_base = pci_zalloc_consistent(he_dev->pci_dev, 869 he_dev->tbrq_base = dma_zalloc_coherent(&he_dev->pci_dev->dev,
870 CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq), 870 CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
871 &he_dev->tbrq_phys); 871 &he_dev->tbrq_phys, GFP_KERNEL);
872 if (he_dev->tbrq_base == NULL) { 872 if (he_dev->tbrq_base == NULL) {
873 hprintk("failed to allocate tbrq\n"); 873 hprintk("failed to allocate tbrq\n");
874 goto out_free_rbpq_base; 874 goto out_free_rbpq_base;
@@ -884,18 +884,18 @@ static int he_init_group(struct he_dev *he_dev, int group)
884 return 0; 884 return 0;
885 885
886out_free_rbpq_base: 886out_free_rbpq_base:
887 pci_free_consistent(he_dev->pci_dev, CONFIG_RBRQ_SIZE * 887 dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_RBRQ_SIZE *
888 sizeof(struct he_rbrq), he_dev->rbrq_base, 888 sizeof(struct he_rbrq), he_dev->rbrq_base,
889 he_dev->rbrq_phys); 889 he_dev->rbrq_phys);
890out_free_rbpl: 890out_free_rbpl:
891 list_for_each_entry_safe(heb, next, &he_dev->rbpl_outstanding, entry) 891 list_for_each_entry_safe(heb, next, &he_dev->rbpl_outstanding, entry)
892 pci_pool_free(he_dev->rbpl_pool, heb, heb->mapping); 892 dma_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
893 893
894 pci_free_consistent(he_dev->pci_dev, CONFIG_RBPL_SIZE * 894 dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_RBPL_SIZE *
895 sizeof(struct he_rbp), he_dev->rbpl_base, 895 sizeof(struct he_rbp), he_dev->rbpl_base,
896 he_dev->rbpl_phys); 896 he_dev->rbpl_phys);
897out_destroy_rbpl_pool: 897out_destroy_rbpl_pool:
898 pci_pool_destroy(he_dev->rbpl_pool); 898 dma_pool_destroy(he_dev->rbpl_pool);
899out_free_rbpl_virt: 899out_free_rbpl_virt:
900 kfree(he_dev->rbpl_virt); 900 kfree(he_dev->rbpl_virt);
901out_free_rbpl_table: 901out_free_rbpl_table:
@@ -911,8 +911,11 @@ static int he_init_irq(struct he_dev *he_dev)
911 /* 2.9.3.5 tail offset for each interrupt queue is located after the 911 /* 2.9.3.5 tail offset for each interrupt queue is located after the
912 end of the interrupt queue */ 912 end of the interrupt queue */
913 913
914 he_dev->irq_base = pci_alloc_consistent(he_dev->pci_dev, 914 he_dev->irq_base = dma_zalloc_coherent(&he_dev->pci_dev->dev,
915 (CONFIG_IRQ_SIZE+1) * sizeof(struct he_irq), &he_dev->irq_phys); 915 (CONFIG_IRQ_SIZE + 1)
916 * sizeof(struct he_irq),
917 &he_dev->irq_phys,
918 GFP_KERNEL);
916 if (he_dev->irq_base == NULL) { 919 if (he_dev->irq_base == NULL) {
917 hprintk("failed to allocate irq\n"); 920 hprintk("failed to allocate irq\n");
918 return -ENOMEM; 921 return -ENOMEM;
@@ -1419,10 +1422,10 @@ static int he_start(struct atm_dev *dev)
1419 1422
1420 he_init_tpdrq(he_dev); 1423 he_init_tpdrq(he_dev);
1421 1424
1422 he_dev->tpd_pool = pci_pool_create("tpd", he_dev->pci_dev, 1425 he_dev->tpd_pool = dma_pool_create("tpd", &he_dev->pci_dev->dev,
1423 sizeof(struct he_tpd), TPD_ALIGNMENT, 0); 1426 sizeof(struct he_tpd), TPD_ALIGNMENT, 0);
1424 if (he_dev->tpd_pool == NULL) { 1427 if (he_dev->tpd_pool == NULL) {
1425 hprintk("unable to create tpd pci_pool\n"); 1428 hprintk("unable to create tpd dma_pool\n");
1426 return -ENOMEM; 1429 return -ENOMEM;
1427 } 1430 }
1428 1431
@@ -1459,9 +1462,9 @@ static int he_start(struct atm_dev *dev)
1459 1462
1460 /* host status page */ 1463 /* host status page */
1461 1464
1462 he_dev->hsp = pci_zalloc_consistent(he_dev->pci_dev, 1465 he_dev->hsp = dma_zalloc_coherent(&he_dev->pci_dev->dev,
1463 sizeof(struct he_hsp), 1466 sizeof(struct he_hsp),
1464 &he_dev->hsp_phys); 1467 &he_dev->hsp_phys, GFP_KERNEL);
1465 if (he_dev->hsp == NULL) { 1468 if (he_dev->hsp == NULL) {
1466 hprintk("failed to allocate host status page\n"); 1469 hprintk("failed to allocate host status page\n");
1467 return -ENOMEM; 1470 return -ENOMEM;
@@ -1558,41 +1561,41 @@ he_stop(struct he_dev *he_dev)
1558 free_irq(he_dev->irq, he_dev); 1561 free_irq(he_dev->irq, he_dev);
1559 1562
1560 if (he_dev->irq_base) 1563 if (he_dev->irq_base)
1561 pci_free_consistent(he_dev->pci_dev, (CONFIG_IRQ_SIZE+1) 1564 dma_free_coherent(&he_dev->pci_dev->dev, (CONFIG_IRQ_SIZE + 1)
1562 * sizeof(struct he_irq), he_dev->irq_base, he_dev->irq_phys); 1565 * sizeof(struct he_irq), he_dev->irq_base, he_dev->irq_phys);
1563 1566
1564 if (he_dev->hsp) 1567 if (he_dev->hsp)
1565 pci_free_consistent(he_dev->pci_dev, sizeof(struct he_hsp), 1568 dma_free_coherent(&he_dev->pci_dev->dev, sizeof(struct he_hsp),
1566 he_dev->hsp, he_dev->hsp_phys); 1569 he_dev->hsp, he_dev->hsp_phys);
1567 1570
1568 if (he_dev->rbpl_base) { 1571 if (he_dev->rbpl_base) {
1569 list_for_each_entry_safe(heb, next, &he_dev->rbpl_outstanding, entry) 1572 list_for_each_entry_safe(heb, next, &he_dev->rbpl_outstanding, entry)
1570 pci_pool_free(he_dev->rbpl_pool, heb, heb->mapping); 1573 dma_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
1571 1574
1572 pci_free_consistent(he_dev->pci_dev, CONFIG_RBPL_SIZE 1575 dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_RBPL_SIZE
1573 * sizeof(struct he_rbp), he_dev->rbpl_base, he_dev->rbpl_phys); 1576 * sizeof(struct he_rbp), he_dev->rbpl_base, he_dev->rbpl_phys);
1574 } 1577 }
1575 1578
1576 kfree(he_dev->rbpl_virt); 1579 kfree(he_dev->rbpl_virt);
1577 kfree(he_dev->rbpl_table); 1580 kfree(he_dev->rbpl_table);
1578 1581
1579 if (he_dev->rbpl_pool) 1582 if (he_dev->rbpl_pool)
1580 pci_pool_destroy(he_dev->rbpl_pool); 1583 dma_pool_destroy(he_dev->rbpl_pool);
1581 1584
1582 if (he_dev->rbrq_base) 1585 if (he_dev->rbrq_base)
1583 pci_free_consistent(he_dev->pci_dev, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq), 1586 dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq),
1584 he_dev->rbrq_base, he_dev->rbrq_phys); 1587 he_dev->rbrq_base, he_dev->rbrq_phys);
1585 1588
1586 if (he_dev->tbrq_base) 1589 if (he_dev->tbrq_base)
1587 pci_free_consistent(he_dev->pci_dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq), 1590 dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
1588 he_dev->tbrq_base, he_dev->tbrq_phys); 1591 he_dev->tbrq_base, he_dev->tbrq_phys);
1589 1592
1590 if (he_dev->tpdrq_base) 1593 if (he_dev->tpdrq_base)
1591 pci_free_consistent(he_dev->pci_dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq), 1594 dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
1592 he_dev->tpdrq_base, he_dev->tpdrq_phys); 1595 he_dev->tpdrq_base, he_dev->tpdrq_phys);
1593 1596
1594 if (he_dev->tpd_pool) 1597 if (he_dev->tpd_pool)
1595 pci_pool_destroy(he_dev->tpd_pool); 1598 dma_pool_destroy(he_dev->tpd_pool);
1596 1599
1597 if (he_dev->pci_dev) { 1600 if (he_dev->pci_dev) {
1598 pci_read_config_word(he_dev->pci_dev, PCI_COMMAND, &command); 1601 pci_read_config_word(he_dev->pci_dev, PCI_COMMAND, &command);
@@ -1610,7 +1613,7 @@ __alloc_tpd(struct he_dev *he_dev)
1610 struct he_tpd *tpd; 1613 struct he_tpd *tpd;
1611 dma_addr_t mapping; 1614 dma_addr_t mapping;
1612 1615
1613 tpd = pci_pool_alloc(he_dev->tpd_pool, GFP_ATOMIC|GFP_DMA, &mapping); 1616 tpd = dma_pool_alloc(he_dev->tpd_pool, GFP_ATOMIC, &mapping);
1614 if (tpd == NULL) 1617 if (tpd == NULL)
1615 return NULL; 1618 return NULL;
1616 1619
@@ -1681,7 +1684,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
1681 if (!RBRQ_HBUF_ERR(he_dev->rbrq_head)) { 1684 if (!RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
1682 clear_bit(i, he_dev->rbpl_table); 1685 clear_bit(i, he_dev->rbpl_table);
1683 list_del(&heb->entry); 1686 list_del(&heb->entry);
1684 pci_pool_free(he_dev->rbpl_pool, heb, heb->mapping); 1687 dma_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
1685 } 1688 }
1686 1689
1687 goto next_rbrq_entry; 1690 goto next_rbrq_entry;
@@ -1774,7 +1777,7 @@ return_host_buffers:
1774 ++pdus_assembled; 1777 ++pdus_assembled;
1775 1778
1776 list_for_each_entry_safe(heb, next, &he_vcc->buffers, entry) 1779 list_for_each_entry_safe(heb, next, &he_vcc->buffers, entry)
1777 pci_pool_free(he_dev->rbpl_pool, heb, heb->mapping); 1780 dma_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
1778 INIT_LIST_HEAD(&he_vcc->buffers); 1781 INIT_LIST_HEAD(&he_vcc->buffers);
1779 he_vcc->pdu_len = 0; 1782 he_vcc->pdu_len = 0;
1780 1783
@@ -1843,10 +1846,10 @@ he_service_tbrq(struct he_dev *he_dev, int group)
1843 1846
1844 for (slot = 0; slot < TPD_MAXIOV; ++slot) { 1847 for (slot = 0; slot < TPD_MAXIOV; ++slot) {
1845 if (tpd->iovec[slot].addr) 1848 if (tpd->iovec[slot].addr)
1846 pci_unmap_single(he_dev->pci_dev, 1849 dma_unmap_single(&he_dev->pci_dev->dev,
1847 tpd->iovec[slot].addr, 1850 tpd->iovec[slot].addr,
1848 tpd->iovec[slot].len & TPD_LEN_MASK, 1851 tpd->iovec[slot].len & TPD_LEN_MASK,
1849 PCI_DMA_TODEVICE); 1852 DMA_TO_DEVICE);
1850 if (tpd->iovec[slot].len & TPD_LST) 1853 if (tpd->iovec[slot].len & TPD_LST)
1851 break; 1854 break;
1852 1855
@@ -1861,7 +1864,7 @@ he_service_tbrq(struct he_dev *he_dev, int group)
1861 1864
1862next_tbrq_entry: 1865next_tbrq_entry:
1863 if (tpd) 1866 if (tpd)
1864 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status)); 1867 dma_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
1865 he_dev->tbrq_head = (struct he_tbrq *) 1868 he_dev->tbrq_head = (struct he_tbrq *)
1866 ((unsigned long) he_dev->tbrq_base | 1869 ((unsigned long) he_dev->tbrq_base |
1867 TBRQ_MASK(he_dev->tbrq_head + 1)); 1870 TBRQ_MASK(he_dev->tbrq_head + 1));
@@ -1905,7 +1908,7 @@ he_service_rbpl(struct he_dev *he_dev, int group)
1905 } 1908 }
1906 he_dev->rbpl_hint = i + 1; 1909 he_dev->rbpl_hint = i + 1;
1907 1910
1908 heb = pci_pool_alloc(he_dev->rbpl_pool, GFP_ATOMIC|GFP_DMA, &mapping); 1911 heb = dma_pool_alloc(he_dev->rbpl_pool, GFP_ATOMIC, &mapping);
1909 if (!heb) 1912 if (!heb)
1910 break; 1913 break;
1911 heb->mapping = mapping; 1914 heb->mapping = mapping;
@@ -2084,10 +2087,10 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
2084 */ 2087 */
2085 for (slot = 0; slot < TPD_MAXIOV; ++slot) { 2088 for (slot = 0; slot < TPD_MAXIOV; ++slot) {
2086 if (tpd->iovec[slot].addr) 2089 if (tpd->iovec[slot].addr)
2087 pci_unmap_single(he_dev->pci_dev, 2090 dma_unmap_single(&he_dev->pci_dev->dev,
2088 tpd->iovec[slot].addr, 2091 tpd->iovec[slot].addr,
2089 tpd->iovec[slot].len & TPD_LEN_MASK, 2092 tpd->iovec[slot].len & TPD_LEN_MASK,
2090 PCI_DMA_TODEVICE); 2093 DMA_TO_DEVICE);
2091 } 2094 }
2092 if (tpd->skb) { 2095 if (tpd->skb) {
2093 if (tpd->vcc->pop) 2096 if (tpd->vcc->pop)
@@ -2096,7 +2099,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
2096 dev_kfree_skb_any(tpd->skb); 2099 dev_kfree_skb_any(tpd->skb);
2097 atomic_inc(&tpd->vcc->stats->tx_err); 2100 atomic_inc(&tpd->vcc->stats->tx_err);
2098 } 2101 }
2099 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status)); 2102 dma_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
2100 return; 2103 return;
2101 } 2104 }
2102 } 2105 }
@@ -2550,8 +2553,8 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
2550 } 2553 }
2551 2554
2552#ifdef USE_SCATTERGATHER 2555#ifdef USE_SCATTERGATHER
2553 tpd->iovec[slot].addr = pci_map_single(he_dev->pci_dev, skb->data, 2556 tpd->iovec[slot].addr = dma_map_single(&he_dev->pci_dev->dev, skb->data,
2554 skb_headlen(skb), PCI_DMA_TODEVICE); 2557 skb_headlen(skb), DMA_TO_DEVICE);
2555 tpd->iovec[slot].len = skb_headlen(skb); 2558 tpd->iovec[slot].len = skb_headlen(skb);
2556 ++slot; 2559 ++slot;
2557 2560
@@ -2579,9 +2582,9 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
2579 slot = 0; 2582 slot = 0;
2580 } 2583 }
2581 2584
2582 tpd->iovec[slot].addr = pci_map_single(he_dev->pci_dev, 2585 tpd->iovec[slot].addr = dma_map_single(&he_dev->pci_dev->dev,
2583 (void *) page_address(frag->page) + frag->page_offset, 2586 (void *) page_address(frag->page) + frag->page_offset,
2584 frag->size, PCI_DMA_TODEVICE); 2587 frag->size, DMA_TO_DEVICE);
2585 tpd->iovec[slot].len = frag->size; 2588 tpd->iovec[slot].len = frag->size;
2586 ++slot; 2589 ++slot;
2587 2590
@@ -2589,7 +2592,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
2589 2592
2590 tpd->iovec[slot - 1].len |= TPD_LST; 2593 tpd->iovec[slot - 1].len |= TPD_LST;
2591#else 2594#else
2592 tpd->address0 = pci_map_single(he_dev->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE); 2595 tpd->address0 = dma_map_single(&he_dev->pci_dev->dev, skb->data, skb->len, DMA_TO_DEVICE);
2593 tpd->length0 = skb->len | TPD_LST; 2596 tpd->length0 = skb->len | TPD_LST;
2594#endif 2597#endif
2595 tpd->status |= TPD_INT; 2598 tpd->status |= TPD_INT;