aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/atm
diff options
context:
space:
mode:
authorchas williams - CONTRACTOR <chas@cmf.nrl.navy.mil>2010-05-29 05:04:25 -0400
committerDavid S. Miller <davem@davemloft.net>2010-05-31 03:27:46 -0400
commit864a3ff635fa73dae15455524e048da5c89352ac (patch)
tree61b9abd5a99ce1a944224efee8090b85327dfb1b /drivers/atm
parent098fde114bf6655f4b75d71dbea208d039fc1de3 (diff)
atm: [nicstar] remove virt_to_bus() and support 64-bit platforms
Signed-off-by: Chas Williams - CONTRACTOR <chas@cmf.nrl.navy.mil> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/atm')
-rw-r--r--drivers/atm/Kconfig2
-rw-r--r--drivers/atm/nicstar.c382
-rw-r--r--drivers/atm/nicstar.h28
3 files changed, 237 insertions, 175 deletions
diff --git a/drivers/atm/Kconfig b/drivers/atm/Kconfig
index f1a0a00b3b07..be7461c9a87e 100644
--- a/drivers/atm/Kconfig
+++ b/drivers/atm/Kconfig
@@ -177,7 +177,7 @@ config ATM_ZATM_DEBUG
177 177
178config ATM_NICSTAR 178config ATM_NICSTAR
179 tristate "IDT 77201 (NICStAR) (ForeRunnerLE)" 179 tristate "IDT 77201 (NICStAR) (ForeRunnerLE)"
180 depends on PCI && !64BIT && VIRT_TO_BUS 180 depends on PCI
181 help 181 help
182 The NICStAR chipset family is used in a large number of ATM NICs for 182 The NICStAR chipset family is used in a large number of ATM NICs for
183 25 and for 155 Mbps, including IDT cards and the Fore ForeRunnerLE 183 25 and for 155 Mbps, including IDT cards and the Fore ForeRunnerLE
diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
index a07b6b7fc7d8..59876c66a92a 100644
--- a/drivers/atm/nicstar.c
+++ b/drivers/atm/nicstar.c
@@ -38,6 +38,7 @@
38#include <linux/atmdev.h> 38#include <linux/atmdev.h>
39#include <linux/atm.h> 39#include <linux/atm.h>
40#include <linux/pci.h> 40#include <linux/pci.h>
41#include <linux/dma-mapping.h>
41#include <linux/types.h> 42#include <linux/types.h>
42#include <linux/string.h> 43#include <linux/string.h>
43#include <linux/delay.h> 44#include <linux/delay.h>
@@ -47,6 +48,7 @@
47#include <linux/interrupt.h> 48#include <linux/interrupt.h>
48#include <linux/bitops.h> 49#include <linux/bitops.h>
49#include <linux/slab.h> 50#include <linux/slab.h>
51#include <linux/idr.h>
50#include <asm/io.h> 52#include <asm/io.h>
51#include <asm/uaccess.h> 53#include <asm/uaccess.h>
52#include <asm/atomic.h> 54#include <asm/atomic.h>
@@ -58,10 +60,6 @@
58#include "idt77105.h" 60#include "idt77105.h"
59#endif /* CONFIG_ATM_NICSTAR_USE_IDT77105 */ 61#endif /* CONFIG_ATM_NICSTAR_USE_IDT77105 */
60 62
61#if BITS_PER_LONG != 32
62# error FIXME: this driver requires a 32-bit platform
63#endif
64
65/* Additional code */ 63/* Additional code */
66 64
67#include "nicstarmac.c" 65#include "nicstarmac.c"
@@ -109,17 +107,15 @@
109 107
110#define NS_DELAY mdelay(1) 108#define NS_DELAY mdelay(1)
111 109
112#define ALIGN_BUS_ADDR(addr, alignment) \ 110#define PTR_DIFF(a, b) ((u32)((unsigned long)(a) - (unsigned long)(b)))
113 ((((u32) (addr)) + (((u32) (alignment)) - 1)) & ~(((u32) (alignment)) - 1))
114#define ALIGN_ADDRESS(addr, alignment) \
115 bus_to_virt(ALIGN_BUS_ADDR(virt_to_bus(addr), alignment))
116
117#undef CEIL
118 111
119#ifndef ATM_SKB 112#ifndef ATM_SKB
120#define ATM_SKB(s) (&(s)->atm) 113#define ATM_SKB(s) (&(s)->atm)
121#endif 114#endif
122 115
116#define scq_virt_to_bus(scq, p) \
117 (scq->dma + ((unsigned long)(p) - (unsigned long)(scq)->org))
118
123/* Function declarations */ 119/* Function declarations */
124 120
125static u32 ns_read_sram(ns_dev * card, u32 sram_address); 121static u32 ns_read_sram(ns_dev * card, u32 sram_address);
@@ -127,8 +123,8 @@ static void ns_write_sram(ns_dev * card, u32 sram_address, u32 * value,
127 int count); 123 int count);
128static int __devinit ns_init_card(int i, struct pci_dev *pcidev); 124static int __devinit ns_init_card(int i, struct pci_dev *pcidev);
129static void __devinit ns_init_card_error(ns_dev * card, int error); 125static void __devinit ns_init_card_error(ns_dev * card, int error);
130static scq_info *get_scq(int size, u32 scd); 126static scq_info *get_scq(ns_dev *card, int size, u32 scd);
131static void free_scq(scq_info * scq, struct atm_vcc *vcc); 127static void free_scq(ns_dev *card, scq_info * scq, struct atm_vcc *vcc);
132static void push_rxbufs(ns_dev *, struct sk_buff *); 128static void push_rxbufs(ns_dev *, struct sk_buff *);
133static irqreturn_t ns_irq_handler(int irq, void *dev_id); 129static irqreturn_t ns_irq_handler(int irq, void *dev_id);
134static int ns_open(struct atm_vcc *vcc); 130static int ns_open(struct atm_vcc *vcc);
@@ -153,7 +149,9 @@ static void dequeue_sm_buf(ns_dev * card, struct sk_buff *sb);
153static void dequeue_lg_buf(ns_dev * card, struct sk_buff *lb); 149static void dequeue_lg_buf(ns_dev * card, struct sk_buff *lb);
154static int ns_proc_read(struct atm_dev *dev, loff_t * pos, char *page); 150static int ns_proc_read(struct atm_dev *dev, loff_t * pos, char *page);
155static int ns_ioctl(struct atm_dev *dev, unsigned int cmd, void __user * arg); 151static int ns_ioctl(struct atm_dev *dev, unsigned int cmd, void __user * arg);
152#ifdef EXTRA_DEBUG
156static void which_list(ns_dev * card, struct sk_buff *skb); 153static void which_list(ns_dev * card, struct sk_buff *skb);
154#endif
157static void ns_poll(unsigned long arg); 155static void ns_poll(unsigned long arg);
158static int ns_parse_mac(char *mac, unsigned char *esi); 156static int ns_parse_mac(char *mac, unsigned char *esi);
159static short ns_h2i(char c); 157static short ns_h2i(char c);
@@ -249,13 +247,17 @@ static void __devexit nicstar_remove_one(struct pci_dev *pcidev)
249 dev_kfree_skb_any(lb); 247 dev_kfree_skb_any(lb);
250 while ((sb = skb_dequeue(&card->sbpool.queue)) != NULL) 248 while ((sb = skb_dequeue(&card->sbpool.queue)) != NULL)
251 dev_kfree_skb_any(sb); 249 dev_kfree_skb_any(sb);
252 free_scq(card->scq0, NULL); 250 free_scq(card, card->scq0, NULL);
253 for (j = 0; j < NS_FRSCD_NUM; j++) { 251 for (j = 0; j < NS_FRSCD_NUM; j++) {
254 if (card->scd2vc[j] != NULL) 252 if (card->scd2vc[j] != NULL)
255 free_scq(card->scd2vc[j]->scq, card->scd2vc[j]->tx_vcc); 253 free_scq(card, card->scd2vc[j]->scq, card->scd2vc[j]->tx_vcc);
256 } 254 }
257 kfree(card->rsq.org); 255 idr_remove_all(&card->idr);
258 kfree(card->tsq.org); 256 idr_destroy(&card->idr);
257 pci_free_consistent(card->pcidev, NS_RSQSIZE + NS_RSQ_ALIGNMENT,
258 card->rsq.org, card->rsq.dma);
259 pci_free_consistent(card->pcidev, NS_TSQSIZE + NS_TSQ_ALIGNMENT,
260 card->tsq.org, card->tsq.dma);
259 free_irq(card->pcidev->irq, card); 261 free_irq(card->pcidev->irq, card);
260 iounmap(card->membase); 262 iounmap(card->membase);
261 kfree(card); 263 kfree(card);
@@ -371,6 +373,14 @@ static int __devinit ns_init_card(int i, struct pci_dev *pcidev)
371 ns_init_card_error(card, error); 373 ns_init_card_error(card, error);
372 return error; 374 return error;
373 } 375 }
376 if ((pci_set_dma_mask(pcidev, DMA_BIT_MASK(32)) != 0) ||
377 (pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(32)) != 0)) {
378 printk(KERN_WARNING
379 "nicstar%d: No suitable DMA available.\n", i);
380 error = 2;
381 ns_init_card_error(card, error);
382 return error;
383 }
374 384
375 if ((card = kmalloc(sizeof(ns_dev), GFP_KERNEL)) == NULL) { 385 if ((card = kmalloc(sizeof(ns_dev), GFP_KERNEL)) == NULL) {
376 printk 386 printk
@@ -397,7 +407,7 @@ static int __devinit ns_init_card(int i, struct pci_dev *pcidev)
397 ns_init_card_error(card, error); 407 ns_init_card_error(card, error);
398 return error; 408 return error;
399 } 409 }
400 PRINTK("nicstar%d: membase at 0x%x.\n", i, card->membase); 410 PRINTK("nicstar%d: membase at 0x%p.\n", i, card->membase);
401 411
402 pci_set_master(pcidev); 412 pci_set_master(pcidev);
403 413
@@ -528,54 +538,54 @@ static int __devinit ns_init_card(int i, struct pci_dev *pcidev)
528 writel(0x00000000, card->membase + VPM); 538 writel(0x00000000, card->membase + VPM);
529 539
530 /* Initialize TSQ */ 540 /* Initialize TSQ */
531 card->tsq.org = kmalloc(NS_TSQSIZE + NS_TSQ_ALIGNMENT, GFP_KERNEL); 541 card->tsq.org = pci_alloc_consistent(card->pcidev,
542 NS_TSQSIZE + NS_TSQ_ALIGNMENT,
543 &card->tsq.dma);
532 if (card->tsq.org == NULL) { 544 if (card->tsq.org == NULL) {
533 printk("nicstar%d: can't allocate TSQ.\n", i); 545 printk("nicstar%d: can't allocate TSQ.\n", i);
534 error = 10; 546 error = 10;
535 ns_init_card_error(card, error); 547 ns_init_card_error(card, error);
536 return error; 548 return error;
537 } 549 }
538 card->tsq.base = 550 card->tsq.base = PTR_ALIGN(card->tsq.org, NS_TSQ_ALIGNMENT);
539 (ns_tsi *) ALIGN_ADDRESS(card->tsq.org, NS_TSQ_ALIGNMENT);
540 card->tsq.next = card->tsq.base; 551 card->tsq.next = card->tsq.base;
541 card->tsq.last = card->tsq.base + (NS_TSQ_NUM_ENTRIES - 1); 552 card->tsq.last = card->tsq.base + (NS_TSQ_NUM_ENTRIES - 1);
542 for (j = 0; j < NS_TSQ_NUM_ENTRIES; j++) 553 for (j = 0; j < NS_TSQ_NUM_ENTRIES; j++)
543 ns_tsi_init(card->tsq.base + j); 554 ns_tsi_init(card->tsq.base + j);
544 writel(0x00000000, card->membase + TSQH); 555 writel(0x00000000, card->membase + TSQH);
545 writel((u32) virt_to_bus(card->tsq.base), card->membase + TSQB); 556 writel(ALIGN(card->tsq.dma, NS_TSQ_ALIGNMENT), card->membase + TSQB);
546 PRINTK("nicstar%d: TSQ base at 0x%x 0x%x 0x%x.\n", i, 557 PRINTK("nicstar%d: TSQ base at 0x%p.\n", i, card->tsq.base);
547 (u32) card->tsq.base, (u32) virt_to_bus(card->tsq.base),
548 readl(card->membase + TSQB));
549 558
550 /* Initialize RSQ */ 559 /* Initialize RSQ */
551 card->rsq.org = kmalloc(NS_RSQSIZE + NS_RSQ_ALIGNMENT, GFP_KERNEL); 560 card->rsq.org = pci_alloc_consistent(card->pcidev,
561 NS_RSQSIZE + NS_RSQ_ALIGNMENT,
562 &card->rsq.dma);
552 if (card->rsq.org == NULL) { 563 if (card->rsq.org == NULL) {
553 printk("nicstar%d: can't allocate RSQ.\n", i); 564 printk("nicstar%d: can't allocate RSQ.\n", i);
554 error = 11; 565 error = 11;
555 ns_init_card_error(card, error); 566 ns_init_card_error(card, error);
556 return error; 567 return error;
557 } 568 }
558 card->rsq.base = 569 card->rsq.base = PTR_ALIGN(card->rsq.org, NS_RSQ_ALIGNMENT);
559 (ns_rsqe *) ALIGN_ADDRESS(card->rsq.org, NS_RSQ_ALIGNMENT);
560 card->rsq.next = card->rsq.base; 570 card->rsq.next = card->rsq.base;
561 card->rsq.last = card->rsq.base + (NS_RSQ_NUM_ENTRIES - 1); 571 card->rsq.last = card->rsq.base + (NS_RSQ_NUM_ENTRIES - 1);
562 for (j = 0; j < NS_RSQ_NUM_ENTRIES; j++) 572 for (j = 0; j < NS_RSQ_NUM_ENTRIES; j++)
563 ns_rsqe_init(card->rsq.base + j); 573 ns_rsqe_init(card->rsq.base + j);
564 writel(0x00000000, card->membase + RSQH); 574 writel(0x00000000, card->membase + RSQH);
565 writel((u32) virt_to_bus(card->rsq.base), card->membase + RSQB); 575 writel(ALIGN(card->rsq.dma, NS_RSQ_ALIGNMENT), card->membase + RSQB);
566 PRINTK("nicstar%d: RSQ base at 0x%x.\n", i, (u32) card->rsq.base); 576 PRINTK("nicstar%d: RSQ base at 0x%p.\n", i, card->rsq.base);
567 577
568 /* Initialize SCQ0, the only VBR SCQ used */ 578 /* Initialize SCQ0, the only VBR SCQ used */
569 card->scq1 = NULL; 579 card->scq1 = NULL;
570 card->scq2 = NULL; 580 card->scq2 = NULL;
571 card->scq0 = get_scq(VBR_SCQSIZE, NS_VRSCD0); 581 card->scq0 = get_scq(card, VBR_SCQSIZE, NS_VRSCD0);
572 if (card->scq0 == NULL) { 582 if (card->scq0 == NULL) {
573 printk("nicstar%d: can't get SCQ0.\n", i); 583 printk("nicstar%d: can't get SCQ0.\n", i);
574 error = 12; 584 error = 12;
575 ns_init_card_error(card, error); 585 ns_init_card_error(card, error);
576 return error; 586 return error;
577 } 587 }
578 u32d[0] = (u32) virt_to_bus(card->scq0->base); 588 u32d[0] = scq_virt_to_bus(card->scq0, card->scq0->base);
579 u32d[1] = (u32) 0x00000000; 589 u32d[1] = (u32) 0x00000000;
580 u32d[2] = (u32) 0xffffffff; 590 u32d[2] = (u32) 0xffffffff;
581 u32d[3] = (u32) 0x00000000; 591 u32d[3] = (u32) 0x00000000;
@@ -583,8 +593,7 @@ static int __devinit ns_init_card(int i, struct pci_dev *pcidev)
583 ns_write_sram(card, NS_VRSCD1, u32d, 4); /* These last two won't be used */ 593 ns_write_sram(card, NS_VRSCD1, u32d, 4); /* These last two won't be used */
584 ns_write_sram(card, NS_VRSCD2, u32d, 4); /* but are initialized, just in case... */ 594 ns_write_sram(card, NS_VRSCD2, u32d, 4); /* but are initialized, just in case... */
585 card->scq0->scd = NS_VRSCD0; 595 card->scq0->scd = NS_VRSCD0;
586 PRINTK("nicstar%d: VBR-SCQ0 base at 0x%x.\n", i, 596 PRINTK("nicstar%d: VBR-SCQ0 base at 0x%p.\n", i, card->scq0->base);
587 (u32) card->scq0->base);
588 597
589 /* Initialize TSTs */ 598 /* Initialize TSTs */
590 card->tst_addr = NS_TST0; 599 card->tst_addr = NS_TST0;
@@ -640,6 +649,8 @@ static int __devinit ns_init_card(int i, struct pci_dev *pcidev)
640 649
641 card->efbie = 1; /* To prevent push_rxbufs from enabling the interrupt */ 650 card->efbie = 1; /* To prevent push_rxbufs from enabling the interrupt */
642 651
652 idr_init(&card->idr);
653
643 /* Pre-allocate some huge buffers */ 654 /* Pre-allocate some huge buffers */
644 skb_queue_head_init(&card->hbpool.queue); 655 skb_queue_head_init(&card->hbpool.queue);
645 card->hbpool.count = 0; 656 card->hbpool.count = 0;
@@ -654,7 +665,7 @@ static int __devinit ns_init_card(int i, struct pci_dev *pcidev)
654 ns_init_card_error(card, error); 665 ns_init_card_error(card, error);
655 return error; 666 return error;
656 } 667 }
657 NS_SKB_CB(hb)->buf_type = BUF_NONE; 668 NS_PRV_BUFTYPE(hb) = BUF_NONE;
658 skb_queue_tail(&card->hbpool.queue, hb); 669 skb_queue_tail(&card->hbpool.queue, hb);
659 card->hbpool.count++; 670 card->hbpool.count++;
660 } 671 }
@@ -673,14 +684,15 @@ static int __devinit ns_init_card(int i, struct pci_dev *pcidev)
673 ns_init_card_error(card, error); 684 ns_init_card_error(card, error);
674 return error; 685 return error;
675 } 686 }
676 NS_SKB_CB(lb)->buf_type = BUF_LG; 687 NS_PRV_BUFTYPE(lb) = BUF_LG;
677 skb_queue_tail(&card->lbpool.queue, lb); 688 skb_queue_tail(&card->lbpool.queue, lb);
678 skb_reserve(lb, NS_SMBUFSIZE); 689 skb_reserve(lb, NS_SMBUFSIZE);
679 push_rxbufs(card, lb); 690 push_rxbufs(card, lb);
680 /* Due to the implementation of push_rxbufs() this is 1, not 0 */ 691 /* Due to the implementation of push_rxbufs() this is 1, not 0 */
681 if (j == 1) { 692 if (j == 1) {
682 card->rcbuf = lb; 693 card->rcbuf = lb;
683 card->rawch = (u32) virt_to_bus(lb->data); 694 card->rawcell = (struct ns_rcqe *) lb->data;
695 card->rawch = NS_PRV_DMA(lb);
684 } 696 }
685 } 697 }
686 /* Test for strange behaviour which leads to crashes */ 698 /* Test for strange behaviour which leads to crashes */
@@ -708,7 +720,7 @@ static int __devinit ns_init_card(int i, struct pci_dev *pcidev)
708 ns_init_card_error(card, error); 720 ns_init_card_error(card, error);
709 return error; 721 return error;
710 } 722 }
711 NS_SKB_CB(sb)->buf_type = BUF_SM; 723 NS_PRV_BUFTYPE(sb) = BUF_SM;
712 skb_queue_tail(&card->sbpool.queue, sb); 724 skb_queue_tail(&card->sbpool.queue, sb);
713 skb_reserve(sb, NS_AAL0_HEADER); 725 skb_reserve(sb, NS_AAL0_HEADER);
714 push_rxbufs(card, sb); 726 push_rxbufs(card, sb);
@@ -738,7 +750,7 @@ static int __devinit ns_init_card(int i, struct pci_dev *pcidev)
738 ns_init_card_error(card, error); 750 ns_init_card_error(card, error);
739 return error; 751 return error;
740 } 752 }
741 NS_SKB_CB(iovb)->buf_type = BUF_NONE; 753 NS_PRV_BUFTYPE(iovb) = BUF_NONE;
742 skb_queue_tail(&card->iovpool.queue, iovb); 754 skb_queue_tail(&card->iovpool.queue, iovb);
743 card->iovpool.count++; 755 card->iovpool.count++;
744 } 756 }
@@ -825,7 +837,7 @@ static void __devinit ns_init_card_error(ns_dev * card, int error)
825 struct sk_buff *sb; 837 struct sk_buff *sb;
826 while ((sb = skb_dequeue(&card->sbpool.queue)) != NULL) 838 while ((sb = skb_dequeue(&card->sbpool.queue)) != NULL)
827 dev_kfree_skb_any(sb); 839 dev_kfree_skb_any(sb);
828 free_scq(card->scq0, NULL); 840 free_scq(card, card->scq0, NULL);
829 } 841 }
830 if (error >= 14) { 842 if (error >= 14) {
831 struct sk_buff *lb; 843 struct sk_buff *lb;
@@ -855,7 +867,7 @@ static void __devinit ns_init_card_error(ns_dev * card, int error)
855 } 867 }
856} 868}
857 869
858static scq_info *get_scq(int size, u32 scd) 870static scq_info *get_scq(ns_dev *card, int size, u32 scd)
859{ 871{
860 scq_info *scq; 872 scq_info *scq;
861 int i; 873 int i;
@@ -864,22 +876,22 @@ static scq_info *get_scq(int size, u32 scd)
864 return NULL; 876 return NULL;
865 877
866 scq = kmalloc(sizeof(scq_info), GFP_KERNEL); 878 scq = kmalloc(sizeof(scq_info), GFP_KERNEL);
867 if (scq == NULL) 879 if (!scq)
868 return NULL; 880 return NULL;
869 scq->org = kmalloc(2 * size, GFP_KERNEL); 881 scq->org = pci_alloc_consistent(card->pcidev, 2 * size, &scq->dma);
870 if (scq->org == NULL) { 882 if (!scq->org) {
871 kfree(scq); 883 kfree(scq);
872 return NULL; 884 return NULL;
873 } 885 }
874 scq->skb = kmalloc(sizeof(struct sk_buff *) * 886 scq->skb = kmalloc(sizeof(struct sk_buff *) *
875 (size / NS_SCQE_SIZE), GFP_KERNEL); 887 (size / NS_SCQE_SIZE), GFP_KERNEL);
876 if (scq->skb == NULL) { 888 if (!scq->skb) {
877 kfree(scq->org); 889 kfree(scq->org);
878 kfree(scq); 890 kfree(scq);
879 return NULL; 891 return NULL;
880 } 892 }
881 scq->num_entries = size / NS_SCQE_SIZE; 893 scq->num_entries = size / NS_SCQE_SIZE;
882 scq->base = (ns_scqe *) ALIGN_ADDRESS(scq->org, size); 894 scq->base = PTR_ALIGN(scq->org, size);
883 scq->next = scq->base; 895 scq->next = scq->base;
884 scq->last = scq->base + (scq->num_entries - 1); 896 scq->last = scq->base + (scq->num_entries - 1);
885 scq->tail = scq->last; 897 scq->tail = scq->last;
@@ -897,7 +909,7 @@ static scq_info *get_scq(int size, u32 scd)
897} 909}
898 910
899/* For variable rate SCQ vcc must be NULL */ 911/* For variable rate SCQ vcc must be NULL */
900static void free_scq(scq_info * scq, struct atm_vcc *vcc) 912static void free_scq(ns_dev *card, scq_info *scq, struct atm_vcc *vcc)
901{ 913{
902 int i; 914 int i;
903 915
@@ -928,7 +940,10 @@ static void free_scq(scq_info * scq, struct atm_vcc *vcc)
928 } 940 }
929 } 941 }
930 kfree(scq->skb); 942 kfree(scq->skb);
931 kfree(scq->org); 943 pci_free_consistent(card->pcidev,
944 2 * (scq->num_entries == VBR_SCQ_NUM_ENTRIES ?
945 VBR_SCQSIZE : CBR_SCQSIZE),
946 scq->org, scq->dma);
932 kfree(scq); 947 kfree(scq);
933} 948}
934 949
@@ -936,16 +951,23 @@ static void free_scq(scq_info * scq, struct atm_vcc *vcc)
936 or large buffer(s) cast to u32. */ 951 or large buffer(s) cast to u32. */
937static void push_rxbufs(ns_dev * card, struct sk_buff *skb) 952static void push_rxbufs(ns_dev * card, struct sk_buff *skb)
938{ 953{
939 struct ns_skb_cb *cb = NS_SKB_CB(skb); 954 struct sk_buff *handle1, *handle2;
940 u32 handle1, addr1; 955 u32 id1 = 0, id2 = 0;
941 u32 handle2, addr2; 956 u32 addr1, addr2;
942 u32 stat; 957 u32 stat;
943 unsigned long flags; 958 unsigned long flags;
959 int err;
944 960
945 /* *BARF* */ 961 /* *BARF* */
946 handle2 = addr2 = 0; 962 handle2 = NULL;
947 handle1 = (u32) skb; 963 addr2 = 0;
948 addr1 = (u32) virt_to_bus(skb->data); 964 handle1 = skb;
965 addr1 = pci_map_single(card->pcidev,
966 skb->data,
967 (NS_PRV_BUFTYPE(skb) == BUF_SM
968 ? NS_SMSKBSIZE : NS_LGSKBSIZE),
969 PCI_DMA_TODEVICE);
970 NS_PRV_DMA(skb) = addr1; /* save so we can unmap later */
949 971
950#ifdef GENERAL_DEBUG 972#ifdef GENERAL_DEBUG
951 if (!addr1) 973 if (!addr1)
@@ -956,7 +978,7 @@ static void push_rxbufs(ns_dev * card, struct sk_buff *skb)
956 stat = readl(card->membase + STAT); 978 stat = readl(card->membase + STAT);
957 card->sbfqc = ns_stat_sfbqc_get(stat); 979 card->sbfqc = ns_stat_sfbqc_get(stat);
958 card->lbfqc = ns_stat_lfbqc_get(stat); 980 card->lbfqc = ns_stat_lfbqc_get(stat);
959 if (cb->buf_type == BUF_SM) { 981 if (NS_PRV_BUFTYPE(skb) == BUF_SM) {
960 if (!addr2) { 982 if (!addr2) {
961 if (card->sm_addr) { 983 if (card->sm_addr) {
962 addr2 = card->sm_addr; 984 addr2 = card->sm_addr;
@@ -986,47 +1008,60 @@ static void push_rxbufs(ns_dev * card, struct sk_buff *skb)
986 } 1008 }
987 1009
988 if (addr2) { 1010 if (addr2) {
989 if (cb->buf_type == BUF_SM) { 1011 if (NS_PRV_BUFTYPE(skb) == BUF_SM) {
990 if (card->sbfqc >= card->sbnr.max) { 1012 if (card->sbfqc >= card->sbnr.max) {
991 skb_unlink((struct sk_buff *)handle1, 1013 skb_unlink(handle1, &card->sbpool.queue);
992 &card->sbpool.queue); 1014 dev_kfree_skb_any(handle1);
993 dev_kfree_skb_any((struct sk_buff *)handle1); 1015 skb_unlink(handle2, &card->sbpool.queue);
994 skb_unlink((struct sk_buff *)handle2, 1016 dev_kfree_skb_any(handle2);
995 &card->sbpool.queue);
996 dev_kfree_skb_any((struct sk_buff *)handle2);
997 return; 1017 return;
998 } else 1018 } else
999 card->sbfqc += 2; 1019 card->sbfqc += 2;
1000 } else { /* (buf_type == BUF_LG) */ 1020 } else { /* (buf_type == BUF_LG) */
1001 1021
1002 if (card->lbfqc >= card->lbnr.max) { 1022 if (card->lbfqc >= card->lbnr.max) {
1003 skb_unlink((struct sk_buff *)handle1, 1023 skb_unlink(handle1, &card->lbpool.queue);
1004 &card->lbpool.queue); 1024 dev_kfree_skb_any(handle1);
1005 dev_kfree_skb_any((struct sk_buff *)handle1); 1025 skb_unlink(handle2, &card->lbpool.queue);
1006 skb_unlink((struct sk_buff *)handle2, 1026 dev_kfree_skb_any(handle2);
1007 &card->lbpool.queue);
1008 dev_kfree_skb_any((struct sk_buff *)handle2);
1009 return; 1027 return;
1010 } else 1028 } else
1011 card->lbfqc += 2; 1029 card->lbfqc += 2;
1012 } 1030 }
1013 1031
1014 spin_lock_irqsave(&card->res_lock, flags); 1032 do {
1033 if (!idr_pre_get(&card->idr, GFP_ATOMIC)) {
1034 printk(KERN_ERR
1035 "nicstar%d: no free memory for idr\n",
1036 card->index);
1037 goto out;
1038 }
1039
1040 if (!id1)
1041 err = idr_get_new_above(&card->idr, handle1, 0, &id1);
1042
1043 if (!id2 && err == 0)
1044 err = idr_get_new_above(&card->idr, handle2, 0, &id2);
1045
1046 } while (err == -EAGAIN);
1015 1047
1048 if (err)
1049 goto out;
1050
1051 spin_lock_irqsave(&card->res_lock, flags);
1016 while (CMD_BUSY(card)) ; 1052 while (CMD_BUSY(card)) ;
1017 writel(addr2, card->membase + DR3); 1053 writel(addr2, card->membase + DR3);
1018 writel(handle2, card->membase + DR2); 1054 writel(id2, card->membase + DR2);
1019 writel(addr1, card->membase + DR1); 1055 writel(addr1, card->membase + DR1);
1020 writel(handle1, card->membase + DR0); 1056 writel(id1, card->membase + DR0);
1021 writel(NS_CMD_WRITE_FREEBUFQ | cb->buf_type, 1057 writel(NS_CMD_WRITE_FREEBUFQ | NS_PRV_BUFTYPE(skb),
1022 card->membase + CMD); 1058 card->membase + CMD);
1023
1024 spin_unlock_irqrestore(&card->res_lock, flags); 1059 spin_unlock_irqrestore(&card->res_lock, flags);
1025 1060
1026 XPRINTK("nicstar%d: Pushing %s buffers at 0x%x and 0x%x.\n", 1061 XPRINTK("nicstar%d: Pushing %s buffers at 0x%x and 0x%x.\n",
1027 card->index, 1062 card->index,
1028 (cb->buf_type == BUF_SM ? "small" : "large"), addr1, 1063 (NS_PRV_BUFTYPE(skb) == BUF_SM ? "small" : "large"),
1029 addr2); 1064 addr1, addr2);
1030 } 1065 }
1031 1066
1032 if (!card->efbie && card->sbfqc >= card->sbnr.min && 1067 if (!card->efbie && card->sbfqc >= card->sbnr.min &&
@@ -1036,6 +1071,7 @@ static void push_rxbufs(ns_dev * card, struct sk_buff *skb)
1036 card->membase + CFG); 1071 card->membase + CFG);
1037 } 1072 }
1038 1073
1074out:
1039 return; 1075 return;
1040} 1076}
1041 1077
@@ -1131,21 +1167,21 @@ static irqreturn_t ns_irq_handler(int irq, void *dev_id)
1131 next interrupt. As this preliminary support is only meant to 1167 next interrupt. As this preliminary support is only meant to
1132 avoid buffer leakage, this is not an issue. */ 1168 avoid buffer leakage, this is not an issue. */
1133 while (readl(card->membase + RAWCT) != card->rawch) { 1169 while (readl(card->membase + RAWCT) != card->rawch) {
1134 ns_rcqe *rawcell;
1135 1170
1136 rawcell = (ns_rcqe *) bus_to_virt(card->rawch); 1171 if (ns_rcqe_islast(card->rawcell)) {
1137 if (ns_rcqe_islast(rawcell)) {
1138 struct sk_buff *oldbuf; 1172 struct sk_buff *oldbuf;
1139 1173
1140 oldbuf = card->rcbuf; 1174 oldbuf = card->rcbuf;
1141 card->rcbuf = 1175 card->rcbuf = idr_find(&card->idr,
1142 (struct sk_buff *) 1176 ns_rcqe_nextbufhandle(card->rawcell));
1143 ns_rcqe_nextbufhandle(rawcell); 1177 card->rawch = NS_PRV_DMA(card->rcbuf);
1144 card->rawch = 1178 card->rawcell = (struct ns_rcqe *)
1145 (u32) virt_to_bus(card->rcbuf->data); 1179 card->rcbuf->data;
1146 recycle_rx_buf(card, oldbuf); 1180 recycle_rx_buf(card, oldbuf);
1147 } else 1181 } else {
1148 card->rawch += NS_RCQE_SIZE; 1182 card->rawch += NS_RCQE_SIZE;
1183 card->rawcell++;
1184 }
1149 } 1185 }
1150 } 1186 }
1151 1187
@@ -1165,7 +1201,7 @@ static irqreturn_t ns_irq_handler(int irq, void *dev_id)
1165 card->efbie = 0; 1201 card->efbie = 0;
1166 break; 1202 break;
1167 } 1203 }
1168 NS_SKB_CB(sb)->buf_type = BUF_SM; 1204 NS_PRV_BUFTYPE(sb) = BUF_SM;
1169 skb_queue_tail(&card->sbpool.queue, sb); 1205 skb_queue_tail(&card->sbpool.queue, sb);
1170 skb_reserve(sb, NS_AAL0_HEADER); 1206 skb_reserve(sb, NS_AAL0_HEADER);
1171 push_rxbufs(card, sb); 1207 push_rxbufs(card, sb);
@@ -1190,7 +1226,7 @@ static irqreturn_t ns_irq_handler(int irq, void *dev_id)
1190 card->efbie = 0; 1226 card->efbie = 0;
1191 break; 1227 break;
1192 } 1228 }
1193 NS_SKB_CB(lb)->buf_type = BUF_LG; 1229 NS_PRV_BUFTYPE(lb) = BUF_LG;
1194 skb_queue_tail(&card->lbpool.queue, lb); 1230 skb_queue_tail(&card->lbpool.queue, lb);
1195 skb_reserve(lb, NS_SMBUFSIZE); 1231 skb_reserve(lb, NS_SMBUFSIZE);
1196 push_rxbufs(card, lb); 1232 push_rxbufs(card, lb);
@@ -1338,7 +1374,7 @@ static int ns_open(struct atm_vcc *vcc)
1338 1374
1339 vc->cbr_scd = NS_FRSCD + frscdi * NS_FRSCD_SIZE; 1375 vc->cbr_scd = NS_FRSCD + frscdi * NS_FRSCD_SIZE;
1340 1376
1341 scq = get_scq(CBR_SCQSIZE, vc->cbr_scd); 1377 scq = get_scq(card, CBR_SCQSIZE, vc->cbr_scd);
1342 if (scq == NULL) { 1378 if (scq == NULL) {
1343 PRINTK("nicstar%d: can't get fixed rate SCQ.\n", 1379 PRINTK("nicstar%d: can't get fixed rate SCQ.\n",
1344 card->index); 1380 card->index);
@@ -1349,7 +1385,7 @@ static int ns_open(struct atm_vcc *vcc)
1349 return -ENOMEM; 1385 return -ENOMEM;
1350 } 1386 }
1351 vc->scq = scq; 1387 vc->scq = scq;
1352 u32d[0] = (u32) virt_to_bus(scq->base); 1388 u32d[0] = scq_virt_to_bus(scq, scq->base);
1353 u32d[1] = (u32) 0x00000000; 1389 u32d[1] = (u32) 0x00000000;
1354 u32d[2] = (u32) 0xffffffff; 1390 u32d[2] = (u32) 0xffffffff;
1355 u32d[3] = (u32) 0x00000000; 1391 u32d[3] = (u32) 0x00000000;
@@ -1434,9 +1470,8 @@ static void ns_close(struct atm_vcc *vcc)
1434 card->index); 1470 card->index);
1435 iovb = vc->rx_iov; 1471 iovb = vc->rx_iov;
1436 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data, 1472 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
1437 NS_SKB(iovb)->iovcnt); 1473 NS_PRV_IOVCNT(iovb));
1438 NS_SKB(iovb)->iovcnt = 0; 1474 NS_PRV_IOVCNT(iovb) = 0;
1439 NS_SKB(iovb)->vcc = NULL;
1440 spin_lock_irqsave(&card->int_lock, flags); 1475 spin_lock_irqsave(&card->int_lock, flags);
1441 recycle_iov_buf(card, iovb); 1476 recycle_iov_buf(card, iovb);
1442 spin_unlock_irqrestore(&card->int_lock, flags); 1477 spin_unlock_irqrestore(&card->int_lock, flags);
@@ -1487,7 +1522,7 @@ static void ns_close(struct atm_vcc *vcc)
1487 scq->next = scq->base; 1522 scq->next = scq->base;
1488 else 1523 else
1489 scq->next++; 1524 scq->next++;
1490 data = (u32) virt_to_bus(scq->next); 1525 data = scq_virt_to_bus(scq, scq->next);
1491 ns_write_sram(card, scq->scd, &data, 1); 1526 ns_write_sram(card, scq->scd, &data, 1);
1492 } 1527 }
1493 spin_unlock_irqrestore(&scq->lock, flags); 1528 spin_unlock_irqrestore(&scq->lock, flags);
@@ -1506,7 +1541,7 @@ static void ns_close(struct atm_vcc *vcc)
1506 } 1541 }
1507 1542
1508 card->scd2vc[(vc->cbr_scd - NS_FRSCD) / NS_FRSCD_SIZE] = NULL; 1543 card->scd2vc[(vc->cbr_scd - NS_FRSCD) / NS_FRSCD_SIZE] = NULL;
1509 free_scq(vc->scq, vcc); 1544 free_scq(card, vc->scq, vcc);
1510 } 1545 }
1511 1546
1512 /* remove all references to vcc before deleting it */ 1547 /* remove all references to vcc before deleting it */
@@ -1539,13 +1574,13 @@ static void ns_close(struct atm_vcc *vcc)
1539 cfg = readl(card->membase + CFG); 1574 cfg = readl(card->membase + CFG);
1540 printk("STAT = 0x%08X CFG = 0x%08X \n", stat, cfg); 1575 printk("STAT = 0x%08X CFG = 0x%08X \n", stat, cfg);
1541 printk 1576 printk
1542 ("TSQ: base = 0x%08X next = 0x%08X last = 0x%08X TSQT = 0x%08X \n", 1577 ("TSQ: base = 0x%p next = 0x%p last = 0x%p TSQT = 0x%08X \n",
1543 (u32) card->tsq.base, (u32) card->tsq.next, 1578 card->tsq.base, card->tsq.next,
1544 (u32) card->tsq.last, readl(card->membase + TSQT)); 1579 card->tsq.last, readl(card->membase + TSQT));
1545 printk 1580 printk
1546 ("RSQ: base = 0x%08X next = 0x%08X last = 0x%08X RSQT = 0x%08X \n", 1581 ("RSQ: base = 0x%p next = 0x%p last = 0x%p RSQT = 0x%08X \n",
1547 (u32) card->rsq.base, (u32) card->rsq.next, 1582 card->rsq.base, card->rsq.next,
1548 (u32) card->rsq.last, readl(card->membase + RSQT)); 1583 card->rsq.last, readl(card->membase + RSQT));
1549 printk("Empty free buffer queue interrupt %s \n", 1584 printk("Empty free buffer queue interrupt %s \n",
1550 card->efbie ? "enabled" : "disabled"); 1585 card->efbie ? "enabled" : "disabled");
1551 printk("SBCNT = %d count = %d LBCNT = %d count = %d \n", 1586 printk("SBCNT = %d count = %d LBCNT = %d count = %d \n",
@@ -1651,11 +1686,14 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
1651 1686
1652 ATM_SKB(skb)->vcc = vcc; 1687 ATM_SKB(skb)->vcc = vcc;
1653 1688
1689 NS_PRV_DMA(skb) = pci_map_single(card->pcidev, skb->data,
1690 skb->len, PCI_DMA_TODEVICE);
1691
1654 if (vcc->qos.aal == ATM_AAL5) { 1692 if (vcc->qos.aal == ATM_AAL5) {
1655 buflen = (skb->len + 47 + 8) / 48 * 48; /* Multiple of 48 */ 1693 buflen = (skb->len + 47 + 8) / 48 * 48; /* Multiple of 48 */
1656 flags = NS_TBD_AAL5; 1694 flags = NS_TBD_AAL5;
1657 scqe.word_2 = cpu_to_le32((u32) virt_to_bus(skb->data)); 1695 scqe.word_2 = cpu_to_le32(NS_PRV_DMA(skb));
1658 scqe.word_3 = cpu_to_le32((u32) skb->len); 1696 scqe.word_3 = cpu_to_le32(skb->len);
1659 scqe.word_4 = 1697 scqe.word_4 =
1660 ns_tbd_mkword_4(0, (u32) vcc->vpi, (u32) vcc->vci, 0, 1698 ns_tbd_mkword_4(0, (u32) vcc->vpi, (u32) vcc->vci, 0,
1661 ATM_SKB(skb)-> 1699 ATM_SKB(skb)->
@@ -1665,8 +1703,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
1665 1703
1666 buflen = ATM_CELL_PAYLOAD; /* i.e., 48 bytes */ 1704 buflen = ATM_CELL_PAYLOAD; /* i.e., 48 bytes */
1667 flags = NS_TBD_AAL0; 1705 flags = NS_TBD_AAL0;
1668 scqe.word_2 = 1706 scqe.word_2 = cpu_to_le32(NS_PRV_DMA(skb) + NS_AAL0_HEADER);
1669 cpu_to_le32((u32) virt_to_bus(skb->data) + NS_AAL0_HEADER);
1670 scqe.word_3 = cpu_to_le32(0x00000000); 1707 scqe.word_3 = cpu_to_le32(0x00000000);
1671 if (*skb->data & 0x02) /* Payload type 1 - end of pdu */ 1708 if (*skb->data & 0x02) /* Payload type 1 - end of pdu */
1672 flags |= NS_TBD_EOPDU; 1709 flags |= NS_TBD_EOPDU;
@@ -1733,12 +1770,12 @@ static int push_scqe(ns_dev * card, vc_map * vc, scq_info * scq, ns_scqe * tbd,
1733 *scq->next = *tbd; 1770 *scq->next = *tbd;
1734 index = (int)(scq->next - scq->base); 1771 index = (int)(scq->next - scq->base);
1735 scq->skb[index] = skb; 1772 scq->skb[index] = skb;
1736 XPRINTK("nicstar%d: sending skb at 0x%x (pos %d).\n", 1773 XPRINTK("nicstar%d: sending skb at 0x%p (pos %d).\n",
1737 card->index, (u32) skb, index); 1774 card->index, skb, index);
1738 XPRINTK("nicstar%d: TBD written:\n0x%x\n0x%x\n0x%x\n0x%x\n at 0x%x.\n", 1775 XPRINTK("nicstar%d: TBD written:\n0x%x\n0x%x\n0x%x\n0x%x\n at 0x%p.\n",
1739 card->index, le32_to_cpu(tbd->word_1), le32_to_cpu(tbd->word_2), 1776 card->index, le32_to_cpu(tbd->word_1), le32_to_cpu(tbd->word_2),
1740 le32_to_cpu(tbd->word_3), le32_to_cpu(tbd->word_4), 1777 le32_to_cpu(tbd->word_3), le32_to_cpu(tbd->word_4),
1741 (u32) scq->next); 1778 scq->next);
1742 if (scq->next == scq->last) 1779 if (scq->next == scq->last)
1743 scq->next = scq->base; 1780 scq->next = scq->base;
1744 else 1781 else
@@ -1757,7 +1794,7 @@ static int push_scqe(ns_dev * card, vc_map * vc, scq_info * scq, ns_scqe * tbd,
1757 1794
1758 while (scq->tail == scq->next) { 1795 while (scq->tail == scq->next) {
1759 if (in_interrupt()) { 1796 if (in_interrupt()) {
1760 data = (u32) virt_to_bus(scq->next); 1797 data = scq_virt_to_bus(scq, scq->next);
1761 ns_write_sram(card, scq->scd, &data, 1); 1798 ns_write_sram(card, scq->scd, &data, 1);
1762 spin_unlock_irqrestore(&scq->lock, flags); 1799 spin_unlock_irqrestore(&scq->lock, flags);
1763 printk("nicstar%d: Error pushing TSR.\n", 1800 printk("nicstar%d: Error pushing TSR.\n",
@@ -1789,10 +1826,10 @@ static int push_scqe(ns_dev * card, vc_map * vc, scq_info * scq, ns_scqe * tbd,
1789 index = (int)scqi; 1826 index = (int)scqi;
1790 scq->skb[index] = NULL; 1827 scq->skb[index] = NULL;
1791 XPRINTK 1828 XPRINTK
1792 ("nicstar%d: TSR written:\n0x%x\n0x%x\n0x%x\n0x%x\n at 0x%x.\n", 1829 ("nicstar%d: TSR written:\n0x%x\n0x%x\n0x%x\n0x%x\n at 0x%p.\n",
1793 card->index, le32_to_cpu(tsr.word_1), 1830 card->index, le32_to_cpu(tsr.word_1),
1794 le32_to_cpu(tsr.word_2), le32_to_cpu(tsr.word_3), 1831 le32_to_cpu(tsr.word_2), le32_to_cpu(tsr.word_3),
1795 le32_to_cpu(tsr.word_4), (u32) scq->next); 1832 le32_to_cpu(tsr.word_4), scq->next);
1796 if (scq->next == scq->last) 1833 if (scq->next == scq->last)
1797 scq->next = scq->base; 1834 scq->next = scq->base;
1798 else 1835 else
@@ -1803,7 +1840,7 @@ static int push_scqe(ns_dev * card, vc_map * vc, scq_info * scq, ns_scqe * tbd,
1803 PRINTK("nicstar%d: Timeout pushing TSR.\n", 1840 PRINTK("nicstar%d: Timeout pushing TSR.\n",
1804 card->index); 1841 card->index);
1805 } 1842 }
1806 data = (u32) virt_to_bus(scq->next); 1843 data = scq_virt_to_bus(scq, scq->next);
1807 ns_write_sram(card, scq->scd, &data, 1); 1844 ns_write_sram(card, scq->scd, &data, 1);
1808 1845
1809 spin_unlock_irqrestore(&scq->lock, flags); 1846 spin_unlock_irqrestore(&scq->lock, flags);
@@ -1881,10 +1918,9 @@ static void process_tsq(ns_dev * card)
1881 two_ahead = one_ahead + 1; 1918 two_ahead = one_ahead + 1;
1882 } 1919 }
1883 1920
1884 if (serviced_entries) { 1921 if (serviced_entries)
1885 writel((((u32) previous) - ((u32) card->tsq.base)), 1922 writel(PTR_DIFF(previous, card->tsq.base),
1886 card->membase + TSQH); 1923 card->membase + TSQH);
1887 }
1888} 1924}
1889 1925
1890static void drain_scq(ns_dev * card, scq_info * scq, int pos) 1926static void drain_scq(ns_dev * card, scq_info * scq, int pos)
@@ -1894,8 +1930,8 @@ static void drain_scq(ns_dev * card, scq_info * scq, int pos)
1894 int i; 1930 int i;
1895 unsigned long flags; 1931 unsigned long flags;
1896 1932
1897 XPRINTK("nicstar%d: drain_scq() called, scq at 0x%x, pos %d.\n", 1933 XPRINTK("nicstar%d: drain_scq() called, scq at 0x%p, pos %d.\n",
1898 card->index, (u32) scq, pos); 1934 card->index, scq, pos);
1899 if (pos >= scq->num_entries) { 1935 if (pos >= scq->num_entries) {
1900 printk("nicstar%d: Bad index on drain_scq().\n", card->index); 1936 printk("nicstar%d: Bad index on drain_scq().\n", card->index);
1901 return; 1937 return;
@@ -1907,9 +1943,13 @@ static void drain_scq(ns_dev * card, scq_info * scq, int pos)
1907 i = 0; 1943 i = 0;
1908 while (i != pos) { 1944 while (i != pos) {
1909 skb = scq->skb[i]; 1945 skb = scq->skb[i];
1910 XPRINTK("nicstar%d: freeing skb at 0x%x (index %d).\n", 1946 XPRINTK("nicstar%d: freeing skb at 0x%p (index %d).\n",
1911 card->index, (u32) skb, i); 1947 card->index, skb, i);
1912 if (skb != NULL) { 1948 if (skb != NULL) {
1949 pci_unmap_single(card->pcidev,
1950 NS_PRV_DMA(skb),
1951 skb->len,
1952 PCI_DMA_TODEVICE);
1913 vcc = ATM_SKB(skb)->vcc; 1953 vcc = ATM_SKB(skb)->vcc;
1914 if (vcc && vcc->pop != NULL) { 1954 if (vcc && vcc->pop != NULL) {
1915 vcc->pop(vcc, skb); 1955 vcc->pop(vcc, skb);
@@ -1940,8 +1980,7 @@ static void process_rsq(ns_dev * card)
1940 else 1980 else
1941 card->rsq.next++; 1981 card->rsq.next++;
1942 } while (ns_rsqe_valid(card->rsq.next)); 1982 } while (ns_rsqe_valid(card->rsq.next));
1943 writel((((u32) previous) - ((u32) card->rsq.base)), 1983 writel(PTR_DIFF(previous, card->rsq.base), card->membase + RSQH);
1944 card->membase + RSQH);
1945} 1984}
1946 1985
1947static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe) 1986static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
@@ -1955,12 +1994,30 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
1955 unsigned short aal5_len; 1994 unsigned short aal5_len;
1956 int len; 1995 int len;
1957 u32 stat; 1996 u32 stat;
1997 u32 id;
1958 1998
1959 stat = readl(card->membase + STAT); 1999 stat = readl(card->membase + STAT);
1960 card->sbfqc = ns_stat_sfbqc_get(stat); 2000 card->sbfqc = ns_stat_sfbqc_get(stat);
1961 card->lbfqc = ns_stat_lfbqc_get(stat); 2001 card->lbfqc = ns_stat_lfbqc_get(stat);
1962 2002
1963 skb = (struct sk_buff *)le32_to_cpu(rsqe->buffer_handle); 2003 id = le32_to_cpu(rsqe->buffer_handle);
2004 skb = idr_find(&card->idr, id);
2005 if (!skb) {
2006 RXPRINTK(KERN_ERR
2007 "nicstar%d: idr_find() failed!\n", card->index);
2008 return;
2009 }
2010 idr_remove(&card->idr, id);
2011 pci_dma_sync_single_for_cpu(card->pcidev,
2012 NS_PRV_DMA(skb),
2013 (NS_PRV_BUFTYPE(skb) == BUF_SM
2014 ? NS_SMSKBSIZE : NS_LGSKBSIZE),
2015 PCI_DMA_FROMDEVICE);
2016 pci_unmap_single(card->pcidev,
2017 NS_PRV_DMA(skb),
2018 (NS_PRV_BUFTYPE(skb) == BUF_SM
2019 ? NS_SMSKBSIZE : NS_LGSKBSIZE),
2020 PCI_DMA_FROMDEVICE);
1964 vpi = ns_rsqe_vpi(rsqe); 2021 vpi = ns_rsqe_vpi(rsqe);
1965 vci = ns_rsqe_vci(rsqe); 2022 vci = ns_rsqe_vci(rsqe);
1966 if (vpi >= 1UL << card->vpibits || vci >= 1UL << card->vcibits) { 2023 if (vpi >= 1UL << card->vpibits || vci >= 1UL << card->vcibits) {
@@ -2034,43 +2091,42 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
2034 recycle_rx_buf(card, skb); 2091 recycle_rx_buf(card, skb);
2035 return; 2092 return;
2036 } 2093 }
2037 NS_SKB_CB(iovb)->buf_type = BUF_NONE; 2094 NS_PRV_BUFTYPE(iovb) = BUF_NONE;
2038 } else if (--card->iovpool.count < card->iovnr.min) { 2095 } else if (--card->iovpool.count < card->iovnr.min) {
2039 struct sk_buff *new_iovb; 2096 struct sk_buff *new_iovb;
2040 if ((new_iovb = 2097 if ((new_iovb =
2041 alloc_skb(NS_IOVBUFSIZE, GFP_ATOMIC)) != NULL) { 2098 alloc_skb(NS_IOVBUFSIZE, GFP_ATOMIC)) != NULL) {
2042 NS_SKB_CB(iovb)->buf_type = BUF_NONE; 2099 NS_PRV_BUFTYPE(iovb) = BUF_NONE;
2043 skb_queue_tail(&card->iovpool.queue, new_iovb); 2100 skb_queue_tail(&card->iovpool.queue, new_iovb);
2044 card->iovpool.count++; 2101 card->iovpool.count++;
2045 } 2102 }
2046 } 2103 }
2047 vc->rx_iov = iovb; 2104 vc->rx_iov = iovb;
2048 NS_SKB(iovb)->iovcnt = 0; 2105 NS_PRV_IOVCNT(iovb) = 0;
2049 iovb->len = 0; 2106 iovb->len = 0;
2050 iovb->data = iovb->head; 2107 iovb->data = iovb->head;
2051 skb_reset_tail_pointer(iovb); 2108 skb_reset_tail_pointer(iovb);
2052 NS_SKB(iovb)->vcc = vcc;
2053 /* IMPORTANT: a pointer to the sk_buff containing the small or large 2109 /* IMPORTANT: a pointer to the sk_buff containing the small or large
2054 buffer is stored as iovec base, NOT a pointer to the 2110 buffer is stored as iovec base, NOT a pointer to the
2055 small or large buffer itself. */ 2111 small or large buffer itself. */
2056 } else if (NS_SKB(iovb)->iovcnt >= NS_MAX_IOVECS) { 2112 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
2057 printk("nicstar%d: received too big AAL5 SDU.\n", card->index); 2113 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
2058 atomic_inc(&vcc->stats->rx_err); 2114 atomic_inc(&vcc->stats->rx_err);
2059 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data, 2115 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
2060 NS_MAX_IOVECS); 2116 NS_MAX_IOVECS);
2061 NS_SKB(iovb)->iovcnt = 0; 2117 NS_PRV_IOVCNT(iovb) = 0;
2062 iovb->len = 0; 2118 iovb->len = 0;
2063 iovb->data = iovb->head; 2119 iovb->data = iovb->head;
2064 skb_reset_tail_pointer(iovb); 2120 skb_reset_tail_pointer(iovb);
2065 NS_SKB(iovb)->vcc = vcc;
2066 } 2121 }
2067 iov = &((struct iovec *)iovb->data)[NS_SKB(iovb)->iovcnt++]; 2122 iov = &((struct iovec *)iovb->data)[NS_PRV_IOVCNT(iovb)++];
2068 iov->iov_base = (void *)skb; 2123 iov->iov_base = (void *)skb;
2069 iov->iov_len = ns_rsqe_cellcount(rsqe) * 48; 2124 iov->iov_len = ns_rsqe_cellcount(rsqe) * 48;
2070 iovb->len += iov->iov_len; 2125 iovb->len += iov->iov_len;
2071 2126
2072 if (NS_SKB(iovb)->iovcnt == 1) { 2127#ifdef EXTRA_DEBUG
2073 if (NS_SKB_CB(skb)->buf_type != BUF_SM) { 2128 if (NS_PRV_IOVCNT(iovb) == 1) {
2129 if (NS_PRV_BUFTYPE(skb) != BUF_SM) {
2074 printk 2130 printk
2075 ("nicstar%d: Expected a small buffer, and this is not one.\n", 2131 ("nicstar%d: Expected a small buffer, and this is not one.\n",
2076 card->index); 2132 card->index);
@@ -2081,26 +2137,27 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
2081 recycle_iov_buf(card, iovb); 2137 recycle_iov_buf(card, iovb);
2082 return; 2138 return;
2083 } 2139 }
2084 } else { /* NS_SKB(iovb)->iovcnt >= 2 */ 2140 } else { /* NS_PRV_IOVCNT(iovb) >= 2 */
2085 2141
2086 if (NS_SKB_CB(skb)->buf_type != BUF_LG) { 2142 if (NS_PRV_BUFTYPE(skb) != BUF_LG) {
2087 printk 2143 printk
2088 ("nicstar%d: Expected a large buffer, and this is not one.\n", 2144 ("nicstar%d: Expected a large buffer, and this is not one.\n",
2089 card->index); 2145 card->index);
2090 which_list(card, skb); 2146 which_list(card, skb);
2091 atomic_inc(&vcc->stats->rx_err); 2147 atomic_inc(&vcc->stats->rx_err);
2092 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data, 2148 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
2093 NS_SKB(iovb)->iovcnt); 2149 NS_PRV_IOVCNT(iovb));
2094 vc->rx_iov = NULL; 2150 vc->rx_iov = NULL;
2095 recycle_iov_buf(card, iovb); 2151 recycle_iov_buf(card, iovb);
2096 return; 2152 return;
2097 } 2153 }
2098 } 2154 }
2155#endif /* EXTRA_DEBUG */
2099 2156
2100 if (ns_rsqe_eopdu(rsqe)) { 2157 if (ns_rsqe_eopdu(rsqe)) {
2101 /* This works correctly regardless of the endianness of the host */ 2158 /* This works correctly regardless of the endianness of the host */
2102 unsigned char *L1L2 = (unsigned char *)((u32) skb->data + 2159 unsigned char *L1L2 = (unsigned char *)
2103 iov->iov_len - 6); 2160 (skb->data + iov->iov_len - 6);
2104 aal5_len = L1L2[0] << 8 | L1L2[1]; 2161 aal5_len = L1L2[0] << 8 | L1L2[1];
2105 len = (aal5_len == 0x0000) ? 0x10000 : aal5_len; 2162 len = (aal5_len == 0x0000) ? 0x10000 : aal5_len;
2106 if (ns_rsqe_crcerr(rsqe) || 2163 if (ns_rsqe_crcerr(rsqe) ||
@@ -2112,7 +2169,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
2112 printk(".\n"); 2169 printk(".\n");
2113 atomic_inc(&vcc->stats->rx_err); 2170 atomic_inc(&vcc->stats->rx_err);
2114 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data, 2171 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
2115 NS_SKB(iovb)->iovcnt); 2172 NS_PRV_IOVCNT(iovb));
2116 vc->rx_iov = NULL; 2173 vc->rx_iov = NULL;
2117 recycle_iov_buf(card, iovb); 2174 recycle_iov_buf(card, iovb);
2118 return; 2175 return;
@@ -2120,7 +2177,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
2120 2177
2121 /* By this point we (hopefully) have a complete SDU without errors. */ 2178 /* By this point we (hopefully) have a complete SDU without errors. */
2122 2179
2123 if (NS_SKB(iovb)->iovcnt == 1) { /* Just a small buffer */ 2180 if (NS_PRV_IOVCNT(iovb) == 1) { /* Just a small buffer */
2124 /* skb points to a small buffer */ 2181 /* skb points to a small buffer */
2125 if (!atm_charge(vcc, skb->truesize)) { 2182 if (!atm_charge(vcc, skb->truesize)) {
2126 push_rxbufs(card, skb); 2183 push_rxbufs(card, skb);
@@ -2136,7 +2193,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
2136 vcc->push(vcc, skb); 2193 vcc->push(vcc, skb);
2137 atomic_inc(&vcc->stats->rx); 2194 atomic_inc(&vcc->stats->rx);
2138 } 2195 }
2139 } else if (NS_SKB(iovb)->iovcnt == 2) { /* One small plus one large buffer */ 2196 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
2140 struct sk_buff *sb; 2197 struct sk_buff *sb;
2141 2198
2142 sb = (struct sk_buff *)(iov - 1)->iov_base; 2199 sb = (struct sk_buff *)(iov - 1)->iov_base;
@@ -2202,8 +2259,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
2202 recycle_iovec_rx_bufs(card, 2259 recycle_iovec_rx_bufs(card,
2203 (struct iovec *) 2260 (struct iovec *)
2204 iovb->data, 2261 iovb->data,
2205 NS_SKB(iovb)-> 2262 NS_PRV_IOVCNT(iovb));
2206 iovcnt);
2207 vc->rx_iov = NULL; 2263 vc->rx_iov = NULL;
2208 recycle_iov_buf(card, iovb); 2264 recycle_iov_buf(card, iovb);
2209 return; 2265 return;
@@ -2217,12 +2273,12 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
2217 card->hbpool.count++; 2273 card->hbpool.count++;
2218 } 2274 }
2219 } 2275 }
2220 NS_SKB_CB(hb)->buf_type = BUF_NONE; 2276 NS_PRV_BUFTYPE(hb) = BUF_NONE;
2221 } else if (--card->hbpool.count < card->hbnr.min) { 2277 } else if (--card->hbpool.count < card->hbnr.min) {
2222 struct sk_buff *new_hb; 2278 struct sk_buff *new_hb;
2223 if ((new_hb = 2279 if ((new_hb =
2224 dev_alloc_skb(NS_HBUFSIZE)) != NULL) { 2280 dev_alloc_skb(NS_HBUFSIZE)) != NULL) {
2225 NS_SKB_CB(new_hb)->buf_type = BUF_NONE; 2281 NS_PRV_BUFTYPE(new_hb) = BUF_NONE;
2226 skb_queue_tail(&card->hbpool.queue, 2282 skb_queue_tail(&card->hbpool.queue,
2227 new_hb); 2283 new_hb);
2228 card->hbpool.count++; 2284 card->hbpool.count++;
@@ -2231,7 +2287,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
2231 if ((new_hb = 2287 if ((new_hb =
2232 dev_alloc_skb(NS_HBUFSIZE)) != 2288 dev_alloc_skb(NS_HBUFSIZE)) !=
2233 NULL) { 2289 NULL) {
2234 NS_SKB_CB(new_hb)->buf_type = 2290 NS_PRV_BUFTYPE(new_hb) =
2235 BUF_NONE; 2291 BUF_NONE;
2236 skb_queue_tail(&card->hbpool. 2292 skb_queue_tail(&card->hbpool.
2237 queue, new_hb); 2293 queue, new_hb);
@@ -2244,7 +2300,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
2244 2300
2245 if (!atm_charge(vcc, hb->truesize)) { 2301 if (!atm_charge(vcc, hb->truesize)) {
2246 recycle_iovec_rx_bufs(card, iov, 2302 recycle_iovec_rx_bufs(card, iov,
2247 NS_SKB(iovb)->iovcnt); 2303 NS_PRV_IOVCNT(iovb));
2248 if (card->hbpool.count < card->hbnr.max) { 2304 if (card->hbpool.count < card->hbnr.max) {
2249 skb_queue_tail(&card->hbpool.queue, hb); 2305 skb_queue_tail(&card->hbpool.queue, hb);
2250 card->hbpool.count++; 2306 card->hbpool.count++;
@@ -2263,7 +2319,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
2263 push_rxbufs(card, sb); 2319 push_rxbufs(card, sb);
2264 2320
2265 /* Copy all large buffers to the huge buffer and free them */ 2321 /* Copy all large buffers to the huge buffer and free them */
2266 for (j = 1; j < NS_SKB(iovb)->iovcnt; j++) { 2322 for (j = 1; j < NS_PRV_IOVCNT(iovb); j++) {
2267 lb = (struct sk_buff *)iov->iov_base; 2323 lb = (struct sk_buff *)iov->iov_base;
2268 tocopy = 2324 tocopy =
2269 min_t(int, remaining, iov->iov_len); 2325 min_t(int, remaining, iov->iov_len);
@@ -2313,7 +2369,7 @@ static void ns_sb_destructor(struct sk_buff *sb)
2313 sb = __dev_alloc_skb(NS_SMSKBSIZE, GFP_KERNEL); 2369 sb = __dev_alloc_skb(NS_SMSKBSIZE, GFP_KERNEL);
2314 if (sb == NULL) 2370 if (sb == NULL)
2315 break; 2371 break;
2316 NS_SKB_CB(sb)->buf_type = BUF_SM; 2372 NS_PRV_BUFTYPE(sb) = BUF_SM;
2317 skb_queue_tail(&card->sbpool.queue, sb); 2373 skb_queue_tail(&card->sbpool.queue, sb);
2318 skb_reserve(sb, NS_AAL0_HEADER); 2374 skb_reserve(sb, NS_AAL0_HEADER);
2319 push_rxbufs(card, sb); 2375 push_rxbufs(card, sb);
@@ -2334,7 +2390,7 @@ static void ns_lb_destructor(struct sk_buff *lb)
2334 lb = __dev_alloc_skb(NS_LGSKBSIZE, GFP_KERNEL); 2390 lb = __dev_alloc_skb(NS_LGSKBSIZE, GFP_KERNEL);
2335 if (lb == NULL) 2391 if (lb == NULL)
2336 break; 2392 break;
2337 NS_SKB_CB(lb)->buf_type = BUF_LG; 2393 NS_PRV_BUFTYPE(lb) = BUF_LG;
2338 skb_queue_tail(&card->lbpool.queue, lb); 2394 skb_queue_tail(&card->lbpool.queue, lb);
2339 skb_reserve(lb, NS_SMBUFSIZE); 2395 skb_reserve(lb, NS_SMBUFSIZE);
2340 push_rxbufs(card, lb); 2396 push_rxbufs(card, lb);
@@ -2351,7 +2407,7 @@ static void ns_hb_destructor(struct sk_buff *hb)
2351 hb = __dev_alloc_skb(NS_HBUFSIZE, GFP_KERNEL); 2407 hb = __dev_alloc_skb(NS_HBUFSIZE, GFP_KERNEL);
2352 if (hb == NULL) 2408 if (hb == NULL)
2353 break; 2409 break;
2354 NS_SKB_CB(hb)->buf_type = BUF_NONE; 2410 NS_PRV_BUFTYPE(hb) = BUF_NONE;
2355 skb_queue_tail(&card->hbpool.queue, hb); 2411 skb_queue_tail(&card->hbpool.queue, hb);
2356 card->hbpool.count++; 2412 card->hbpool.count++;
2357 } 2413 }
@@ -2361,9 +2417,7 @@ static void ns_hb_destructor(struct sk_buff *hb)
2361 2417
2362static void recycle_rx_buf(ns_dev * card, struct sk_buff *skb) 2418static void recycle_rx_buf(ns_dev * card, struct sk_buff *skb)
2363{ 2419{
2364 struct ns_skb_cb *cb = NS_SKB_CB(skb); 2420 if (unlikely(NS_PRV_BUFTYPE(skb) == BUF_NONE)) {
2365
2366 if (unlikely(cb->buf_type == BUF_NONE)) {
2367 printk("nicstar%d: What kind of rx buffer is this?\n", 2421 printk("nicstar%d: What kind of rx buffer is this?\n",
2368 card->index); 2422 card->index);
2369 dev_kfree_skb_any(skb); 2423 dev_kfree_skb_any(skb);
@@ -2395,7 +2449,7 @@ static void dequeue_sm_buf(ns_dev * card, struct sk_buff *sb)
2395 if (card->sbfqc < card->sbnr.init) { 2449 if (card->sbfqc < card->sbnr.init) {
2396 struct sk_buff *new_sb; 2450 struct sk_buff *new_sb;
2397 if ((new_sb = dev_alloc_skb(NS_SMSKBSIZE)) != NULL) { 2451 if ((new_sb = dev_alloc_skb(NS_SMSKBSIZE)) != NULL) {
2398 NS_SKB_CB(new_sb)->buf_type = BUF_SM; 2452 NS_PRV_BUFTYPE(new_sb) = BUF_SM;
2399 skb_queue_tail(&card->sbpool.queue, new_sb); 2453 skb_queue_tail(&card->sbpool.queue, new_sb);
2400 skb_reserve(new_sb, NS_AAL0_HEADER); 2454 skb_reserve(new_sb, NS_AAL0_HEADER);
2401 push_rxbufs(card, new_sb); 2455 push_rxbufs(card, new_sb);
@@ -2406,7 +2460,7 @@ static void dequeue_sm_buf(ns_dev * card, struct sk_buff *sb)
2406 { 2460 {
2407 struct sk_buff *new_sb; 2461 struct sk_buff *new_sb;
2408 if ((new_sb = dev_alloc_skb(NS_SMSKBSIZE)) != NULL) { 2462 if ((new_sb = dev_alloc_skb(NS_SMSKBSIZE)) != NULL) {
2409 NS_SKB_CB(new_sb)->buf_type = BUF_SM; 2463 NS_PRV_BUFTYPE(new_sb) = BUF_SM;
2410 skb_queue_tail(&card->sbpool.queue, new_sb); 2464 skb_queue_tail(&card->sbpool.queue, new_sb);
2411 skb_reserve(new_sb, NS_AAL0_HEADER); 2465 skb_reserve(new_sb, NS_AAL0_HEADER);
2412 push_rxbufs(card, new_sb); 2466 push_rxbufs(card, new_sb);
@@ -2423,7 +2477,7 @@ static void dequeue_lg_buf(ns_dev * card, struct sk_buff *lb)
2423 if (card->lbfqc < card->lbnr.init) { 2477 if (card->lbfqc < card->lbnr.init) {
2424 struct sk_buff *new_lb; 2478 struct sk_buff *new_lb;
2425 if ((new_lb = dev_alloc_skb(NS_LGSKBSIZE)) != NULL) { 2479 if ((new_lb = dev_alloc_skb(NS_LGSKBSIZE)) != NULL) {
2426 NS_SKB_CB(new_lb)->buf_type = BUF_LG; 2480 NS_PRV_BUFTYPE(new_lb) = BUF_LG;
2427 skb_queue_tail(&card->lbpool.queue, new_lb); 2481 skb_queue_tail(&card->lbpool.queue, new_lb);
2428 skb_reserve(new_lb, NS_SMBUFSIZE); 2482 skb_reserve(new_lb, NS_SMBUFSIZE);
2429 push_rxbufs(card, new_lb); 2483 push_rxbufs(card, new_lb);
@@ -2434,7 +2488,7 @@ static void dequeue_lg_buf(ns_dev * card, struct sk_buff *lb)
2434 { 2488 {
2435 struct sk_buff *new_lb; 2489 struct sk_buff *new_lb;
2436 if ((new_lb = dev_alloc_skb(NS_LGSKBSIZE)) != NULL) { 2490 if ((new_lb = dev_alloc_skb(NS_LGSKBSIZE)) != NULL) {
2437 NS_SKB_CB(new_lb)->buf_type = BUF_LG; 2491 NS_PRV_BUFTYPE(new_lb) = BUF_LG;
2438 skb_queue_tail(&card->lbpool.queue, new_lb); 2492 skb_queue_tail(&card->lbpool.queue, new_lb);
2439 skb_reserve(new_lb, NS_SMBUFSIZE); 2493 skb_reserve(new_lb, NS_SMBUFSIZE);
2440 push_rxbufs(card, new_lb); 2494 push_rxbufs(card, new_lb);
@@ -2625,7 +2679,7 @@ static int ns_ioctl(struct atm_dev *dev, unsigned int cmd, void __user * arg)
2625 sb = __dev_alloc_skb(NS_SMSKBSIZE, GFP_KERNEL); 2679 sb = __dev_alloc_skb(NS_SMSKBSIZE, GFP_KERNEL);
2626 if (sb == NULL) 2680 if (sb == NULL)
2627 return -ENOMEM; 2681 return -ENOMEM;
2628 NS_SKB_CB(sb)->buf_type = BUF_SM; 2682 NS_PRV_BUFTYPE(sb) = BUF_SM;
2629 skb_queue_tail(&card->sbpool.queue, sb); 2683 skb_queue_tail(&card->sbpool.queue, sb);
2630 skb_reserve(sb, NS_AAL0_HEADER); 2684 skb_reserve(sb, NS_AAL0_HEADER);
2631 push_rxbufs(card, sb); 2685 push_rxbufs(card, sb);
@@ -2639,7 +2693,7 @@ static int ns_ioctl(struct atm_dev *dev, unsigned int cmd, void __user * arg)
2639 lb = __dev_alloc_skb(NS_LGSKBSIZE, GFP_KERNEL); 2693 lb = __dev_alloc_skb(NS_LGSKBSIZE, GFP_KERNEL);
2640 if (lb == NULL) 2694 if (lb == NULL)
2641 return -ENOMEM; 2695 return -ENOMEM;
2642 NS_SKB_CB(lb)->buf_type = BUF_LG; 2696 NS_PRV_BUFTYPE(lb) = BUF_LG;
2643 skb_queue_tail(&card->lbpool.queue, lb); 2697 skb_queue_tail(&card->lbpool.queue, lb);
2644 skb_reserve(lb, NS_SMBUFSIZE); 2698 skb_reserve(lb, NS_SMBUFSIZE);
2645 push_rxbufs(card, lb); 2699 push_rxbufs(card, lb);
@@ -2668,7 +2722,7 @@ static int ns_ioctl(struct atm_dev *dev, unsigned int cmd, void __user * arg)
2668 hb = __dev_alloc_skb(NS_HBUFSIZE, GFP_KERNEL); 2722 hb = __dev_alloc_skb(NS_HBUFSIZE, GFP_KERNEL);
2669 if (hb == NULL) 2723 if (hb == NULL)
2670 return -ENOMEM; 2724 return -ENOMEM;
2671 NS_SKB_CB(hb)->buf_type = BUF_NONE; 2725 NS_PRV_BUFTYPE(hb) = BUF_NONE;
2672 spin_lock_irqsave(&card->int_lock, flags); 2726 spin_lock_irqsave(&card->int_lock, flags);
2673 skb_queue_tail(&card->hbpool.queue, hb); 2727 skb_queue_tail(&card->hbpool.queue, hb);
2674 card->hbpool.count++; 2728 card->hbpool.count++;
@@ -2698,7 +2752,7 @@ static int ns_ioctl(struct atm_dev *dev, unsigned int cmd, void __user * arg)
2698 iovb = alloc_skb(NS_IOVBUFSIZE, GFP_KERNEL); 2752 iovb = alloc_skb(NS_IOVBUFSIZE, GFP_KERNEL);
2699 if (iovb == NULL) 2753 if (iovb == NULL)
2700 return -ENOMEM; 2754 return -ENOMEM;
2701 NS_SKB_CB(iovb)->buf_type = BUF_NONE; 2755 NS_PRV_BUFTYPE(iovb) = BUF_NONE;
2702 spin_lock_irqsave(&card->int_lock, flags); 2756 spin_lock_irqsave(&card->int_lock, flags);
2703 skb_queue_tail(&card->iovpool.queue, iovb); 2757 skb_queue_tail(&card->iovpool.queue, iovb);
2704 card->iovpool.count++; 2758 card->iovpool.count++;
@@ -2723,10 +2777,12 @@ static int ns_ioctl(struct atm_dev *dev, unsigned int cmd, void __user * arg)
2723 } 2777 }
2724} 2778}
2725 2779
2780#ifdef EXTRA_DEBUG
2726static void which_list(ns_dev * card, struct sk_buff *skb) 2781static void which_list(ns_dev * card, struct sk_buff *skb)
2727{ 2782{
2728 printk("skb buf_type: 0x%08x\n", NS_SKB_CB(skb)->buf_type); 2783 printk("skb buf_type: 0x%08x\n", NS_PRV_BUFTYPE(skb));
2729} 2784}
2785#endif /* EXTRA_DEBUG */
2730 2786
2731static void ns_poll(unsigned long arg) 2787static void ns_poll(unsigned long arg)
2732{ 2788{
@@ -2803,7 +2859,7 @@ static void ns_phy_put(struct atm_dev *dev, unsigned char value,
2803 card = dev->dev_data; 2859 card = dev->dev_data;
2804 spin_lock_irqsave(&card->res_lock, flags); 2860 spin_lock_irqsave(&card->res_lock, flags);
2805 while (CMD_BUSY(card)) ; 2861 while (CMD_BUSY(card)) ;
2806 writel((unsigned long)value, card->membase + DR0); 2862 writel((u32) value, card->membase + DR0);
2807 writel(NS_CMD_WRITE_UTILITY | 0x00000200 | (addr & 0x000000FF), 2863 writel(NS_CMD_WRITE_UTILITY | 0x00000200 | (addr & 0x000000FF),
2808 card->membase + CMD); 2864 card->membase + CMD);
2809 spin_unlock_irqrestore(&card->res_lock, flags); 2865 spin_unlock_irqrestore(&card->res_lock, flags);
@@ -2813,7 +2869,7 @@ static unsigned char ns_phy_get(struct atm_dev *dev, unsigned long addr)
2813{ 2869{
2814 ns_dev *card; 2870 ns_dev *card;
2815 unsigned long flags; 2871 unsigned long flags;
2816 unsigned long data; 2872 u32 data;
2817 2873
2818 card = dev->dev_data; 2874 card = dev->dev_data;
2819 spin_lock_irqsave(&card->res_lock, flags); 2875 spin_lock_irqsave(&card->res_lock, flags);
diff --git a/drivers/atm/nicstar.h b/drivers/atm/nicstar.h
index 43eb2db1fb88..9bc27ea5088e 100644
--- a/drivers/atm/nicstar.h
+++ b/drivers/atm/nicstar.h
@@ -16,6 +16,7 @@
16 16
17#include <linux/types.h> 17#include <linux/types.h>
18#include <linux/pci.h> 18#include <linux/pci.h>
19#include <linux/idr.h>
19#include <linux/uio.h> 20#include <linux/uio.h>
20#include <linux/skbuff.h> 21#include <linux/skbuff.h>
21#include <linux/atmdev.h> 22#include <linux/atmdev.h>
@@ -636,14 +637,22 @@ enum ns_regs {
636 637
637/* Device driver structures */ 638/* Device driver structures */
638 639
639struct ns_skb_cb { 640struct ns_skb_prv {
640 u32 buf_type; /* BUF_SM/BUF_LG/BUF_NONE */ 641 u32 buf_type; /* BUF_SM/BUF_LG/BUF_NONE */
642 u32 dma;
643 int iovcnt;
641}; 644};
642 645
643#define NS_SKB_CB(skb) ((struct ns_skb_cb *)((skb)->cb)) 646#define NS_PRV_BUFTYPE(skb) \
647 (((struct ns_skb_prv *)(ATM_SKB(skb)+1))->buf_type)
648#define NS_PRV_DMA(skb) \
649 (((struct ns_skb_prv *)(ATM_SKB(skb)+1))->dma)
650#define NS_PRV_IOVCNT(skb) \
651 (((struct ns_skb_prv *)(ATM_SKB(skb)+1))->iovcnt)
644 652
645typedef struct tsq_info { 653typedef struct tsq_info {
646 void *org; 654 void *org;
655 dma_addr_t dma;
647 ns_tsi *base; 656 ns_tsi *base;
648 ns_tsi *next; 657 ns_tsi *next;
649 ns_tsi *last; 658 ns_tsi *last;
@@ -651,6 +660,7 @@ typedef struct tsq_info {
651 660
652typedef struct scq_info { 661typedef struct scq_info {
653 void *org; 662 void *org;
663 dma_addr_t dma;
654 ns_scqe *base; 664 ns_scqe *base;
655 ns_scqe *last; 665 ns_scqe *last;
656 ns_scqe *next; 666 ns_scqe *next;
@@ -668,6 +678,7 @@ typedef struct scq_info {
668 678
669typedef struct rsq_info { 679typedef struct rsq_info {
670 void *org; 680 void *org;
681 dma_addr_t dma;
671 ns_rsqe *base; 682 ns_rsqe *base;
672 ns_rsqe *next; 683 ns_rsqe *next;
673 ns_rsqe *last; 684 ns_rsqe *last;
@@ -693,13 +704,6 @@ typedef struct vc_map {
693 int tbd_count; 704 int tbd_count;
694} vc_map; 705} vc_map;
695 706
696struct ns_skb_data {
697 struct atm_vcc *vcc;
698 int iovcnt;
699};
700
701#define NS_SKB(skb) (((struct ns_skb_data *) (skb)->cb))
702
703typedef struct ns_dev { 707typedef struct ns_dev {
704 int index; /* Card ID to the device driver */ 708 int index; /* Card ID to the device driver */
705 int sram_size; /* In k x 32bit words. 32 or 128 */ 709 int sram_size; /* In k x 32bit words. 32 or 128 */
@@ -709,6 +713,7 @@ typedef struct ns_dev {
709 int vpibits; 713 int vpibits;
710 int vcibits; 714 int vcibits;
711 struct pci_dev *pcidev; 715 struct pci_dev *pcidev;
716 struct idr idr;
712 struct atm_dev *atmdev; 717 struct atm_dev *atmdev;
713 tsq_info tsq; 718 tsq_info tsq;
714 rsq_info rsq; 719 rsq_info rsq;
@@ -729,11 +734,12 @@ typedef struct ns_dev {
729 buf_nr iovnr; 734 buf_nr iovnr;
730 int sbfqc; 735 int sbfqc;
731 int lbfqc; 736 int lbfqc;
732 u32 sm_handle; 737 struct sk_buff *sm_handle;
733 u32 sm_addr; 738 u32 sm_addr;
734 u32 lg_handle; 739 struct sk_buff *lg_handle;
735 u32 lg_addr; 740 u32 lg_addr;
736 struct sk_buff *rcbuf; /* Current raw cell buffer */ 741 struct sk_buff *rcbuf; /* Current raw cell buffer */
742 struct ns_rcqe *rawcell;
737 u32 rawch; /* Raw cell queue head */ 743 u32 rawch; /* Raw cell queue head */
738 unsigned intcnt; /* Interrupt counter */ 744 unsigned intcnt; /* Interrupt counter */
739 spinlock_t int_lock; /* Interrupt lock */ 745 spinlock_t int_lock; /* Interrupt lock */