aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/atm/ambassador.c2
-rw-r--r--drivers/atm/atmtcp.c2
-rw-r--r--drivers/atm/eni.c2
-rw-r--r--drivers/atm/firestream.c2
-rw-r--r--drivers/atm/fore200e.c2
-rw-r--r--drivers/atm/he.c2
-rw-r--r--drivers/atm/horizon.c2
-rw-r--r--drivers/atm/idt77252.c8
-rw-r--r--drivers/atm/lanai.c2
-rw-r--r--drivers/atm/nicstar.c167
-rw-r--r--drivers/atm/nicstar.h16
-rw-r--r--drivers/atm/zatm.c10
-rw-r--r--drivers/block/aoe/aoenet.c2
-rw-r--r--drivers/bluetooth/bfusb.c16
-rw-r--r--drivers/bluetooth/bluecard_cs.c24
-rw-r--r--drivers/bluetooth/bpa10x.c17
-rw-r--r--drivers/bluetooth/bt3c_cs.c12
-rw-r--r--drivers/bluetooth/btuart_cs.c10
-rw-r--r--drivers/bluetooth/dtl1_cs.c10
-rw-r--r--drivers/bluetooth/hci_bcsp.c18
-rw-r--r--drivers/bluetooth/hci_h4.c4
-rw-r--r--drivers/bluetooth/hci_ldisc.c4
-rw-r--r--drivers/bluetooth/hci_usb.c23
-rw-r--r--drivers/bluetooth/hci_vhci.c386
-rw-r--r--drivers/bluetooth/hci_vhci.h50
-rw-r--r--drivers/char/random.c34
-rw-r--r--drivers/ieee1394/ieee1394_core.c4
-rw-r--r--drivers/isdn/act2000/capi.c2
-rw-r--r--drivers/isdn/i4l/isdn_net.c1
-rw-r--r--drivers/isdn/i4l/isdn_ppp.c1
-rw-r--r--drivers/net/bnx2.c229
-rw-r--r--drivers/net/bnx2.h10
-rw-r--r--drivers/net/bonding/bond_3ad.c11
-rw-r--r--drivers/net/bonding/bond_3ad.h2
-rw-r--r--drivers/net/bonding/bond_alb.c5
-rw-r--r--drivers/net/hamradio/bpqether.c4
-rw-r--r--drivers/net/ppp_generic.c1
-rw-r--r--drivers/net/pppoe.c6
-rw-r--r--drivers/net/rrunner.c3
-rw-r--r--drivers/net/shaper.c50
-rw-r--r--drivers/net/tg3.c325
-rw-r--r--drivers/net/tg3.h10
-rw-r--r--drivers/net/wan/hdlc_generic.c2
-rw-r--r--drivers/net/wan/lapbether.c2
-rw-r--r--drivers/net/wan/sdla_fr.c22
-rw-r--r--drivers/net/wan/syncppp.c2
-rw-r--r--drivers/usb/net/usbnet.c21
-rw-r--r--drivers/w1/w1_int.c6
-rw-r--r--drivers/w1/w1_netlink.c2
49 files changed, 846 insertions, 702 deletions
diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
index 73c6b85299c1..d74a7c5e75dd 100644
--- a/drivers/atm/ambassador.c
+++ b/drivers/atm/ambassador.c
@@ -513,7 +513,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
513 513
514 // VC layer stats 514 // VC layer stats
515 atomic_inc(&atm_vcc->stats->rx); 515 atomic_inc(&atm_vcc->stats->rx);
516 do_gettimeofday(&skb->stamp); 516 __net_timestamp(skb);
517 // end of our responsability 517 // end of our responsability
518 atm_vcc->push (atm_vcc, skb); 518 atm_vcc->push (atm_vcc, skb);
519 return; 519 return;
diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
index f2f01cb82cb4..57f1810fdccd 100644
--- a/drivers/atm/atmtcp.c
+++ b/drivers/atm/atmtcp.c
@@ -325,7 +325,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
325 result = -ENOBUFS; 325 result = -ENOBUFS;
326 goto done; 326 goto done;
327 } 327 }
328 do_gettimeofday(&new_skb->stamp); 328 __net_timestamp(new_skb);
329 memcpy(skb_put(new_skb,skb->len),skb->data,skb->len); 329 memcpy(skb_put(new_skb,skb->len),skb->data,skb->len);
330 out_vcc->push(out_vcc,new_skb); 330 out_vcc->push(out_vcc,new_skb);
331 atomic_inc(&vcc->stats->tx); 331 atomic_inc(&vcc->stats->tx);
diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
index 10da36934769..c13c4d736ef5 100644
--- a/drivers/atm/eni.c
+++ b/drivers/atm/eni.c
@@ -537,7 +537,7 @@ static int rx_aal0(struct atm_vcc *vcc)
537 return 0; 537 return 0;
538 } 538 }
539 skb_put(skb,length); 539 skb_put(skb,length);
540 skb->stamp = eni_vcc->timestamp; 540 skb_set_timestamp(skb, &eni_vcc->timestamp);
541 DPRINTK("got len %ld\n",length); 541 DPRINTK("got len %ld\n",length);
542 if (do_rx_dma(vcc,skb,1,length >> 2,length >> 2)) return 1; 542 if (do_rx_dma(vcc,skb,1,length >> 2,length >> 2)) return 1;
543 eni_vcc->rxing++; 543 eni_vcc->rxing++;
diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
index b078fa548ebf..58219744f5db 100644
--- a/drivers/atm/firestream.c
+++ b/drivers/atm/firestream.c
@@ -815,7 +815,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
815 skb_put (skb, qe->p1 & 0xffff); 815 skb_put (skb, qe->p1 & 0xffff);
816 ATM_SKB(skb)->vcc = atm_vcc; 816 ATM_SKB(skb)->vcc = atm_vcc;
817 atomic_inc(&atm_vcc->stats->rx); 817 atomic_inc(&atm_vcc->stats->rx);
818 do_gettimeofday(&skb->stamp); 818 __net_timestamp(skb);
819 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb); 819 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
820 atm_vcc->push (atm_vcc, skb); 820 atm_vcc->push (atm_vcc, skb);
821 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-d: %p\n", pe); 821 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-d: %p\n", pe);
diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
index 5f702199543a..2bf723a7b6e6 100644
--- a/drivers/atm/fore200e.c
+++ b/drivers/atm/fore200e.c
@@ -1176,7 +1176,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
1176 return -ENOMEM; 1176 return -ENOMEM;
1177 } 1177 }
1178 1178
1179 do_gettimeofday(&skb->stamp); 1179 __net_timestamp(skb);
1180 1180
1181#ifdef FORE200E_52BYTE_AAL0_SDU 1181#ifdef FORE200E_52BYTE_AAL0_SDU
1182 if (cell_header) { 1182 if (cell_header) {
diff --git a/drivers/atm/he.c b/drivers/atm/he.c
index 28250c9b32d6..fde9334059af 100644
--- a/drivers/atm/he.c
+++ b/drivers/atm/he.c
@@ -1886,7 +1886,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
1886 if (rx_skb_reserve > 0) 1886 if (rx_skb_reserve > 0)
1887 skb_reserve(skb, rx_skb_reserve); 1887 skb_reserve(skb, rx_skb_reserve);
1888 1888
1889 do_gettimeofday(&skb->stamp); 1889 __net_timestamp(skb);
1890 1890
1891 for (iov = he_vcc->iov_head; 1891 for (iov = he_vcc->iov_head;
1892 iov < he_vcc->iov_tail; ++iov) { 1892 iov < he_vcc->iov_tail; ++iov) {
diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
index 924a2c8988bd..0cded0468003 100644
--- a/drivers/atm/horizon.c
+++ b/drivers/atm/horizon.c
@@ -1034,7 +1034,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
1034 struct atm_vcc * vcc = ATM_SKB(skb)->vcc; 1034 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
1035 // VC layer stats 1035 // VC layer stats
1036 atomic_inc(&vcc->stats->rx); 1036 atomic_inc(&vcc->stats->rx);
1037 do_gettimeofday(&skb->stamp); 1037 __net_timestamp(skb);
1038 // end of our responsability 1038 // end of our responsability
1039 vcc->push (vcc, skb); 1039 vcc->push (vcc, skb);
1040 } 1040 }
diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
index 30b7e990ed0b..b4a76cade646 100644
--- a/drivers/atm/idt77252.c
+++ b/drivers/atm/idt77252.c
@@ -1101,7 +1101,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
1101 cell, ATM_CELL_PAYLOAD); 1101 cell, ATM_CELL_PAYLOAD);
1102 1102
1103 ATM_SKB(sb)->vcc = vcc; 1103 ATM_SKB(sb)->vcc = vcc;
1104 do_gettimeofday(&sb->stamp); 1104 __net_timestamp(sb);
1105 vcc->push(vcc, sb); 1105 vcc->push(vcc, sb);
1106 atomic_inc(&vcc->stats->rx); 1106 atomic_inc(&vcc->stats->rx);
1107 1107
@@ -1179,7 +1179,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
1179 1179
1180 skb_trim(skb, len); 1180 skb_trim(skb, len);
1181 ATM_SKB(skb)->vcc = vcc; 1181 ATM_SKB(skb)->vcc = vcc;
1182 do_gettimeofday(&skb->stamp); 1182 __net_timestamp(skb);
1183 1183
1184 vcc->push(vcc, skb); 1184 vcc->push(vcc, skb);
1185 atomic_inc(&vcc->stats->rx); 1185 atomic_inc(&vcc->stats->rx);
@@ -1201,7 +1201,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
1201 1201
1202 skb_trim(skb, len); 1202 skb_trim(skb, len);
1203 ATM_SKB(skb)->vcc = vcc; 1203 ATM_SKB(skb)->vcc = vcc;
1204 do_gettimeofday(&skb->stamp); 1204 __net_timestamp(skb);
1205 1205
1206 vcc->push(vcc, skb); 1206 vcc->push(vcc, skb);
1207 atomic_inc(&vcc->stats->rx); 1207 atomic_inc(&vcc->stats->rx);
@@ -1340,7 +1340,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
1340 ATM_CELL_PAYLOAD); 1340 ATM_CELL_PAYLOAD);
1341 1341
1342 ATM_SKB(sb)->vcc = vcc; 1342 ATM_SKB(sb)->vcc = vcc;
1343 do_gettimeofday(&sb->stamp); 1343 __net_timestamp(sb);
1344 vcc->push(vcc, sb); 1344 vcc->push(vcc, sb);
1345 atomic_inc(&vcc->stats->rx); 1345 atomic_inc(&vcc->stats->rx);
1346 1346
diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
index ffe3afa723b8..51ec14787293 100644
--- a/drivers/atm/lanai.c
+++ b/drivers/atm/lanai.c
@@ -1427,7 +1427,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
1427 skb_put(skb, size); 1427 skb_put(skb, size);
1428 vcc_rx_memcpy(skb->data, lvcc, size); 1428 vcc_rx_memcpy(skb->data, lvcc, size);
1429 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc; 1429 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
1430 do_gettimeofday(&skb->stamp); 1430 __net_timestamp(skb);
1431 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb); 1431 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
1432 atomic_inc(&lvcc->rx.atmvcc->stats->rx); 1432 atomic_inc(&lvcc->rx.atmvcc->stats->rx);
1433 out: 1433 out:
diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
index b2a7b754fd14..c57e20dcb0f8 100644
--- a/drivers/atm/nicstar.c
+++ b/drivers/atm/nicstar.c
@@ -214,8 +214,7 @@ static int __devinit ns_init_card(int i, struct pci_dev *pcidev);
214static void __devinit ns_init_card_error(ns_dev *card, int error); 214static void __devinit ns_init_card_error(ns_dev *card, int error);
215static scq_info *get_scq(int size, u32 scd); 215static scq_info *get_scq(int size, u32 scd);
216static void free_scq(scq_info *scq, struct atm_vcc *vcc); 216static void free_scq(scq_info *scq, struct atm_vcc *vcc);
217static void push_rxbufs(ns_dev *card, u32 type, u32 handle1, u32 addr1, 217static void push_rxbufs(ns_dev *, struct sk_buff *);
218 u32 handle2, u32 addr2);
219static irqreturn_t ns_irq_handler(int irq, void *dev_id, struct pt_regs *regs); 218static irqreturn_t ns_irq_handler(int irq, void *dev_id, struct pt_regs *regs);
220static int ns_open(struct atm_vcc *vcc); 219static int ns_open(struct atm_vcc *vcc);
221static void ns_close(struct atm_vcc *vcc); 220static void ns_close(struct atm_vcc *vcc);
@@ -766,6 +765,7 @@ static int __devinit ns_init_card(int i, struct pci_dev *pcidev)
766 ns_init_card_error(card, error); 765 ns_init_card_error(card, error);
767 return error; 766 return error;
768 } 767 }
768 NS_SKB_CB(hb)->buf_type = BUF_NONE;
769 skb_queue_tail(&card->hbpool.queue, hb); 769 skb_queue_tail(&card->hbpool.queue, hb);
770 card->hbpool.count++; 770 card->hbpool.count++;
771 } 771 }
@@ -786,9 +786,10 @@ static int __devinit ns_init_card(int i, struct pci_dev *pcidev)
786 ns_init_card_error(card, error); 786 ns_init_card_error(card, error);
787 return error; 787 return error;
788 } 788 }
789 NS_SKB_CB(lb)->buf_type = BUF_LG;
789 skb_queue_tail(&card->lbpool.queue, lb); 790 skb_queue_tail(&card->lbpool.queue, lb);
790 skb_reserve(lb, NS_SMBUFSIZE); 791 skb_reserve(lb, NS_SMBUFSIZE);
791 push_rxbufs(card, BUF_LG, (u32) lb, (u32) virt_to_bus(lb->data), 0, 0); 792 push_rxbufs(card, lb);
792 /* Due to the implementation of push_rxbufs() this is 1, not 0 */ 793 /* Due to the implementation of push_rxbufs() this is 1, not 0 */
793 if (j == 1) 794 if (j == 1)
794 { 795 {
@@ -822,9 +823,10 @@ static int __devinit ns_init_card(int i, struct pci_dev *pcidev)
822 ns_init_card_error(card, error); 823 ns_init_card_error(card, error);
823 return error; 824 return error;
824 } 825 }
826 NS_SKB_CB(sb)->buf_type = BUF_SM;
825 skb_queue_tail(&card->sbpool.queue, sb); 827 skb_queue_tail(&card->sbpool.queue, sb);
826 skb_reserve(sb, NS_AAL0_HEADER); 828 skb_reserve(sb, NS_AAL0_HEADER);
827 push_rxbufs(card, BUF_SM, (u32) sb, (u32) virt_to_bus(sb->data), 0, 0); 829 push_rxbufs(card, sb);
828 } 830 }
829 /* Test for strange behaviour which leads to crashes */ 831 /* Test for strange behaviour which leads to crashes */
830 if ((bcount = ns_stat_sfbqc_get(readl(card->membase + STAT))) < card->sbnr.min) 832 if ((bcount = ns_stat_sfbqc_get(readl(card->membase + STAT))) < card->sbnr.min)
@@ -852,6 +854,7 @@ static int __devinit ns_init_card(int i, struct pci_dev *pcidev)
852 ns_init_card_error(card, error); 854 ns_init_card_error(card, error);
853 return error; 855 return error;
854 } 856 }
857 NS_SKB_CB(iovb)->buf_type = BUF_NONE;
855 skb_queue_tail(&card->iovpool.queue, iovb); 858 skb_queue_tail(&card->iovpool.queue, iovb);
856 card->iovpool.count++; 859 card->iovpool.count++;
857 } 860 }
@@ -1078,12 +1081,18 @@ static void free_scq(scq_info *scq, struct atm_vcc *vcc)
1078 1081
1079/* The handles passed must be pointers to the sk_buff containing the small 1082/* The handles passed must be pointers to the sk_buff containing the small
1080 or large buffer(s) cast to u32. */ 1083 or large buffer(s) cast to u32. */
1081static void push_rxbufs(ns_dev *card, u32 type, u32 handle1, u32 addr1, 1084static void push_rxbufs(ns_dev *card, struct sk_buff *skb)
1082 u32 handle2, u32 addr2)
1083{ 1085{
1086 struct ns_skb_cb *cb = NS_SKB_CB(skb);
1087 u32 handle1, addr1;
1088 u32 handle2, addr2;
1084 u32 stat; 1089 u32 stat;
1085 unsigned long flags; 1090 unsigned long flags;
1086 1091
1092 /* *BARF* */
1093 handle2 = addr2 = 0;
1094 handle1 = (u32)skb;
1095 addr1 = (u32)virt_to_bus(skb->data);
1087 1096
1088#ifdef GENERAL_DEBUG 1097#ifdef GENERAL_DEBUG
1089 if (!addr1) 1098 if (!addr1)
@@ -1093,7 +1102,7 @@ static void push_rxbufs(ns_dev *card, u32 type, u32 handle1, u32 addr1,
1093 stat = readl(card->membase + STAT); 1102 stat = readl(card->membase + STAT);
1094 card->sbfqc = ns_stat_sfbqc_get(stat); 1103 card->sbfqc = ns_stat_sfbqc_get(stat);
1095 card->lbfqc = ns_stat_lfbqc_get(stat); 1104 card->lbfqc = ns_stat_lfbqc_get(stat);
1096 if (type == BUF_SM) 1105 if (cb->buf_type == BUF_SM)
1097 { 1106 {
1098 if (!addr2) 1107 if (!addr2)
1099 { 1108 {
@@ -1111,7 +1120,7 @@ static void push_rxbufs(ns_dev *card, u32 type, u32 handle1, u32 addr1,
1111 } 1120 }
1112 } 1121 }
1113 } 1122 }
1114 else /* type == BUF_LG */ 1123 else /* buf_type == BUF_LG */
1115 { 1124 {
1116 if (!addr2) 1125 if (!addr2)
1117 { 1126 {
@@ -1132,26 +1141,26 @@ static void push_rxbufs(ns_dev *card, u32 type, u32 handle1, u32 addr1,
1132 1141
1133 if (addr2) 1142 if (addr2)
1134 { 1143 {
1135 if (type == BUF_SM) 1144 if (cb->buf_type == BUF_SM)
1136 { 1145 {
1137 if (card->sbfqc >= card->sbnr.max) 1146 if (card->sbfqc >= card->sbnr.max)
1138 { 1147 {
1139 skb_unlink((struct sk_buff *) handle1); 1148 skb_unlink((struct sk_buff *) handle1, &card->sbpool.queue);
1140 dev_kfree_skb_any((struct sk_buff *) handle1); 1149 dev_kfree_skb_any((struct sk_buff *) handle1);
1141 skb_unlink((struct sk_buff *) handle2); 1150 skb_unlink((struct sk_buff *) handle2, &card->sbpool.queue);
1142 dev_kfree_skb_any((struct sk_buff *) handle2); 1151 dev_kfree_skb_any((struct sk_buff *) handle2);
1143 return; 1152 return;
1144 } 1153 }
1145 else 1154 else
1146 card->sbfqc += 2; 1155 card->sbfqc += 2;
1147 } 1156 }
1148 else /* (type == BUF_LG) */ 1157 else /* (buf_type == BUF_LG) */
1149 { 1158 {
1150 if (card->lbfqc >= card->lbnr.max) 1159 if (card->lbfqc >= card->lbnr.max)
1151 { 1160 {
1152 skb_unlink((struct sk_buff *) handle1); 1161 skb_unlink((struct sk_buff *) handle1, &card->lbpool.queue);
1153 dev_kfree_skb_any((struct sk_buff *) handle1); 1162 dev_kfree_skb_any((struct sk_buff *) handle1);
1154 skb_unlink((struct sk_buff *) handle2); 1163 skb_unlink((struct sk_buff *) handle2, &card->lbpool.queue);
1155 dev_kfree_skb_any((struct sk_buff *) handle2); 1164 dev_kfree_skb_any((struct sk_buff *) handle2);
1156 return; 1165 return;
1157 } 1166 }
@@ -1166,12 +1175,12 @@ static void push_rxbufs(ns_dev *card, u32 type, u32 handle1, u32 addr1,
1166 writel(handle2, card->membase + DR2); 1175 writel(handle2, card->membase + DR2);
1167 writel(addr1, card->membase + DR1); 1176 writel(addr1, card->membase + DR1);
1168 writel(handle1, card->membase + DR0); 1177 writel(handle1, card->membase + DR0);
1169 writel(NS_CMD_WRITE_FREEBUFQ | (u32) type, card->membase + CMD); 1178 writel(NS_CMD_WRITE_FREEBUFQ | cb->buf_type, card->membase + CMD);
1170 1179
1171 spin_unlock_irqrestore(&card->res_lock, flags); 1180 spin_unlock_irqrestore(&card->res_lock, flags);
1172 1181
1173 XPRINTK("nicstar%d: Pushing %s buffers at 0x%x and 0x%x.\n", card->index, 1182 XPRINTK("nicstar%d: Pushing %s buffers at 0x%x and 0x%x.\n", card->index,
1174 (type == BUF_SM ? "small" : "large"), addr1, addr2); 1183 (cb->buf_type == BUF_SM ? "small" : "large"), addr1, addr2);
1175 } 1184 }
1176 1185
1177 if (!card->efbie && card->sbfqc >= card->sbnr.min && 1186 if (!card->efbie && card->sbfqc >= card->sbnr.min &&
@@ -1322,9 +1331,10 @@ static irqreturn_t ns_irq_handler(int irq, void *dev_id, struct pt_regs *regs)
1322 card->efbie = 0; 1331 card->efbie = 0;
1323 break; 1332 break;
1324 } 1333 }
1334 NS_SKB_CB(sb)->buf_type = BUF_SM;
1325 skb_queue_tail(&card->sbpool.queue, sb); 1335 skb_queue_tail(&card->sbpool.queue, sb);
1326 skb_reserve(sb, NS_AAL0_HEADER); 1336 skb_reserve(sb, NS_AAL0_HEADER);
1327 push_rxbufs(card, BUF_SM, (u32) sb, (u32) virt_to_bus(sb->data), 0, 0); 1337 push_rxbufs(card, sb);
1328 } 1338 }
1329 card->sbfqc = i; 1339 card->sbfqc = i;
1330 process_rsq(card); 1340 process_rsq(card);
@@ -1348,9 +1358,10 @@ static irqreturn_t ns_irq_handler(int irq, void *dev_id, struct pt_regs *regs)
1348 card->efbie = 0; 1358 card->efbie = 0;
1349 break; 1359 break;
1350 } 1360 }
1361 NS_SKB_CB(lb)->buf_type = BUF_LG;
1351 skb_queue_tail(&card->lbpool.queue, lb); 1362 skb_queue_tail(&card->lbpool.queue, lb);
1352 skb_reserve(lb, NS_SMBUFSIZE); 1363 skb_reserve(lb, NS_SMBUFSIZE);
1353 push_rxbufs(card, BUF_LG, (u32) lb, (u32) virt_to_bus(lb->data), 0, 0); 1364 push_rxbufs(card, lb);
1354 } 1365 }
1355 card->lbfqc = i; 1366 card->lbfqc = i;
1356 process_rsq(card); 1367 process_rsq(card);
@@ -2202,7 +2213,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
2202 memcpy(sb->tail, cell, ATM_CELL_PAYLOAD); 2213 memcpy(sb->tail, cell, ATM_CELL_PAYLOAD);
2203 skb_put(sb, ATM_CELL_PAYLOAD); 2214 skb_put(sb, ATM_CELL_PAYLOAD);
2204 ATM_SKB(sb)->vcc = vcc; 2215 ATM_SKB(sb)->vcc = vcc;
2205 do_gettimeofday(&sb->stamp); 2216 __net_timestamp(sb);
2206 vcc->push(vcc, sb); 2217 vcc->push(vcc, sb);
2207 atomic_inc(&vcc->stats->rx); 2218 atomic_inc(&vcc->stats->rx);
2208 cell += ATM_CELL_PAYLOAD; 2219 cell += ATM_CELL_PAYLOAD;
@@ -2227,6 +2238,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
2227 recycle_rx_buf(card, skb); 2238 recycle_rx_buf(card, skb);
2228 return; 2239 return;
2229 } 2240 }
2241 NS_SKB_CB(iovb)->buf_type = BUF_NONE;
2230 } 2242 }
2231 else 2243 else
2232 if (--card->iovpool.count < card->iovnr.min) 2244 if (--card->iovpool.count < card->iovnr.min)
@@ -2234,6 +2246,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
2234 struct sk_buff *new_iovb; 2246 struct sk_buff *new_iovb;
2235 if ((new_iovb = alloc_skb(NS_IOVBUFSIZE, GFP_ATOMIC)) != NULL) 2247 if ((new_iovb = alloc_skb(NS_IOVBUFSIZE, GFP_ATOMIC)) != NULL)
2236 { 2248 {
2249 NS_SKB_CB(iovb)->buf_type = BUF_NONE;
2237 skb_queue_tail(&card->iovpool.queue, new_iovb); 2250 skb_queue_tail(&card->iovpool.queue, new_iovb);
2238 card->iovpool.count++; 2251 card->iovpool.count++;
2239 } 2252 }
@@ -2264,7 +2277,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
2264 2277
2265 if (NS_SKB(iovb)->iovcnt == 1) 2278 if (NS_SKB(iovb)->iovcnt == 1)
2266 { 2279 {
2267 if (skb->list != &card->sbpool.queue) 2280 if (NS_SKB_CB(skb)->buf_type != BUF_SM)
2268 { 2281 {
2269 printk("nicstar%d: Expected a small buffer, and this is not one.\n", 2282 printk("nicstar%d: Expected a small buffer, and this is not one.\n",
2270 card->index); 2283 card->index);
@@ -2278,7 +2291,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
2278 } 2291 }
2279 else /* NS_SKB(iovb)->iovcnt >= 2 */ 2292 else /* NS_SKB(iovb)->iovcnt >= 2 */
2280 { 2293 {
2281 if (skb->list != &card->lbpool.queue) 2294 if (NS_SKB_CB(skb)->buf_type != BUF_LG)
2282 { 2295 {
2283 printk("nicstar%d: Expected a large buffer, and this is not one.\n", 2296 printk("nicstar%d: Expected a large buffer, and this is not one.\n",
2284 card->index); 2297 card->index);
@@ -2322,8 +2335,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
2322 /* skb points to a small buffer */ 2335 /* skb points to a small buffer */
2323 if (!atm_charge(vcc, skb->truesize)) 2336 if (!atm_charge(vcc, skb->truesize))
2324 { 2337 {
2325 push_rxbufs(card, BUF_SM, (u32) skb, (u32) virt_to_bus(skb->data), 2338 push_rxbufs(card, skb);
2326 0, 0);
2327 atomic_inc(&vcc->stats->rx_drop); 2339 atomic_inc(&vcc->stats->rx_drop);
2328 } 2340 }
2329 else 2341 else
@@ -2334,7 +2346,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
2334 skb->destructor = ns_sb_destructor; 2346 skb->destructor = ns_sb_destructor;
2335#endif /* NS_USE_DESTRUCTORS */ 2347#endif /* NS_USE_DESTRUCTORS */
2336 ATM_SKB(skb)->vcc = vcc; 2348 ATM_SKB(skb)->vcc = vcc;
2337 do_gettimeofday(&skb->stamp); 2349 __net_timestamp(skb);
2338 vcc->push(vcc, skb); 2350 vcc->push(vcc, skb);
2339 atomic_inc(&vcc->stats->rx); 2351 atomic_inc(&vcc->stats->rx);
2340 } 2352 }
@@ -2350,8 +2362,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
2350 { 2362 {
2351 if (!atm_charge(vcc, sb->truesize)) 2363 if (!atm_charge(vcc, sb->truesize))
2352 { 2364 {
2353 push_rxbufs(card, BUF_SM, (u32) sb, (u32) virt_to_bus(sb->data), 2365 push_rxbufs(card, sb);
2354 0, 0);
2355 atomic_inc(&vcc->stats->rx_drop); 2366 atomic_inc(&vcc->stats->rx_drop);
2356 } 2367 }
2357 else 2368 else
@@ -2362,21 +2373,19 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
2362 sb->destructor = ns_sb_destructor; 2373 sb->destructor = ns_sb_destructor;
2363#endif /* NS_USE_DESTRUCTORS */ 2374#endif /* NS_USE_DESTRUCTORS */
2364 ATM_SKB(sb)->vcc = vcc; 2375 ATM_SKB(sb)->vcc = vcc;
2365 do_gettimeofday(&sb->stamp); 2376 __net_timestamp(sb);
2366 vcc->push(vcc, sb); 2377 vcc->push(vcc, sb);
2367 atomic_inc(&vcc->stats->rx); 2378 atomic_inc(&vcc->stats->rx);
2368 } 2379 }
2369 2380
2370 push_rxbufs(card, BUF_LG, (u32) skb, 2381 push_rxbufs(card, skb);
2371 (u32) virt_to_bus(skb->data), 0, 0);
2372 2382
2373 } 2383 }
2374 else /* len > NS_SMBUFSIZE, the usual case */ 2384 else /* len > NS_SMBUFSIZE, the usual case */
2375 { 2385 {
2376 if (!atm_charge(vcc, skb->truesize)) 2386 if (!atm_charge(vcc, skb->truesize))
2377 { 2387 {
2378 push_rxbufs(card, BUF_LG, (u32) skb, 2388 push_rxbufs(card, skb);
2379 (u32) virt_to_bus(skb->data), 0, 0);
2380 atomic_inc(&vcc->stats->rx_drop); 2389 atomic_inc(&vcc->stats->rx_drop);
2381 } 2390 }
2382 else 2391 else
@@ -2389,13 +2398,12 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
2389 memcpy(skb->data, sb->data, NS_SMBUFSIZE); 2398 memcpy(skb->data, sb->data, NS_SMBUFSIZE);
2390 skb_put(skb, len - NS_SMBUFSIZE); 2399 skb_put(skb, len - NS_SMBUFSIZE);
2391 ATM_SKB(skb)->vcc = vcc; 2400 ATM_SKB(skb)->vcc = vcc;
2392 do_gettimeofday(&skb->stamp); 2401 __net_timestamp(skb);
2393 vcc->push(vcc, skb); 2402 vcc->push(vcc, skb);
2394 atomic_inc(&vcc->stats->rx); 2403 atomic_inc(&vcc->stats->rx);
2395 } 2404 }
2396 2405
2397 push_rxbufs(card, BUF_SM, (u32) sb, (u32) virt_to_bus(sb->data), 2406 push_rxbufs(card, sb);
2398 0, 0);
2399 2407
2400 } 2408 }
2401 2409
@@ -2430,6 +2438,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
2430 card->hbpool.count++; 2438 card->hbpool.count++;
2431 } 2439 }
2432 } 2440 }
2441 NS_SKB_CB(hb)->buf_type = BUF_NONE;
2433 } 2442 }
2434 else 2443 else
2435 if (--card->hbpool.count < card->hbnr.min) 2444 if (--card->hbpool.count < card->hbnr.min)
@@ -2437,6 +2446,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
2437 struct sk_buff *new_hb; 2446 struct sk_buff *new_hb;
2438 if ((new_hb = dev_alloc_skb(NS_HBUFSIZE)) != NULL) 2447 if ((new_hb = dev_alloc_skb(NS_HBUFSIZE)) != NULL)
2439 { 2448 {
2449 NS_SKB_CB(new_hb)->buf_type = BUF_NONE;
2440 skb_queue_tail(&card->hbpool.queue, new_hb); 2450 skb_queue_tail(&card->hbpool.queue, new_hb);
2441 card->hbpool.count++; 2451 card->hbpool.count++;
2442 } 2452 }
@@ -2444,6 +2454,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
2444 { 2454 {
2445 if ((new_hb = dev_alloc_skb(NS_HBUFSIZE)) != NULL) 2455 if ((new_hb = dev_alloc_skb(NS_HBUFSIZE)) != NULL)
2446 { 2456 {
2457 NS_SKB_CB(new_hb)->buf_type = BUF_NONE;
2447 skb_queue_tail(&card->hbpool.queue, new_hb); 2458 skb_queue_tail(&card->hbpool.queue, new_hb);
2448 card->hbpool.count++; 2459 card->hbpool.count++;
2449 } 2460 }
@@ -2473,8 +2484,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
2473 remaining = len - iov->iov_len; 2484 remaining = len - iov->iov_len;
2474 iov++; 2485 iov++;
2475 /* Free the small buffer */ 2486 /* Free the small buffer */
2476 push_rxbufs(card, BUF_SM, (u32) sb, (u32) virt_to_bus(sb->data), 2487 push_rxbufs(card, sb);
2477 0, 0);
2478 2488
2479 /* Copy all large buffers to the huge buffer and free them */ 2489 /* Copy all large buffers to the huge buffer and free them */
2480 for (j = 1; j < NS_SKB(iovb)->iovcnt; j++) 2490 for (j = 1; j < NS_SKB(iovb)->iovcnt; j++)
@@ -2485,8 +2495,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
2485 skb_put(hb, tocopy); 2495 skb_put(hb, tocopy);
2486 iov++; 2496 iov++;
2487 remaining -= tocopy; 2497 remaining -= tocopy;
2488 push_rxbufs(card, BUF_LG, (u32) lb, 2498 push_rxbufs(card, lb);
2489 (u32) virt_to_bus(lb->data), 0, 0);
2490 } 2499 }
2491#ifdef EXTRA_DEBUG 2500#ifdef EXTRA_DEBUG
2492 if (remaining != 0 || hb->len != len) 2501 if (remaining != 0 || hb->len != len)
@@ -2496,7 +2505,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
2496#ifdef NS_USE_DESTRUCTORS 2505#ifdef NS_USE_DESTRUCTORS
2497 hb->destructor = ns_hb_destructor; 2506 hb->destructor = ns_hb_destructor;
2498#endif /* NS_USE_DESTRUCTORS */ 2507#endif /* NS_USE_DESTRUCTORS */
2499 do_gettimeofday(&hb->stamp); 2508 __net_timestamp(hb);
2500 vcc->push(vcc, hb); 2509 vcc->push(vcc, hb);
2501 atomic_inc(&vcc->stats->rx); 2510 atomic_inc(&vcc->stats->rx);
2502 } 2511 }
@@ -2527,9 +2536,10 @@ static void ns_sb_destructor(struct sk_buff *sb)
2527 sb = __dev_alloc_skb(NS_SMSKBSIZE, GFP_KERNEL); 2536 sb = __dev_alloc_skb(NS_SMSKBSIZE, GFP_KERNEL);
2528 if (sb == NULL) 2537 if (sb == NULL)
2529 break; 2538 break;
2539 NS_SKB_CB(sb)->buf_type = BUF_SM;
2530 skb_queue_tail(&card->sbpool.queue, sb); 2540 skb_queue_tail(&card->sbpool.queue, sb);
2531 skb_reserve(sb, NS_AAL0_HEADER); 2541 skb_reserve(sb, NS_AAL0_HEADER);
2532 push_rxbufs(card, BUF_SM, (u32) sb, (u32) virt_to_bus(sb->data), 0, 0); 2542 push_rxbufs(card, sb);
2533 } while (card->sbfqc < card->sbnr.min); 2543 } while (card->sbfqc < card->sbnr.min);
2534} 2544}
2535 2545
@@ -2550,9 +2560,10 @@ static void ns_lb_destructor(struct sk_buff *lb)
2550 lb = __dev_alloc_skb(NS_LGSKBSIZE, GFP_KERNEL); 2560 lb = __dev_alloc_skb(NS_LGSKBSIZE, GFP_KERNEL);
2551 if (lb == NULL) 2561 if (lb == NULL)
2552 break; 2562 break;
2563 NS_SKB_CB(lb)->buf_type = BUF_LG;
2553 skb_queue_tail(&card->lbpool.queue, lb); 2564 skb_queue_tail(&card->lbpool.queue, lb);
2554 skb_reserve(lb, NS_SMBUFSIZE); 2565 skb_reserve(lb, NS_SMBUFSIZE);
2555 push_rxbufs(card, BUF_LG, (u32) lb, (u32) virt_to_bus(lb->data), 0, 0); 2566 push_rxbufs(card, lb);
2556 } while (card->lbfqc < card->lbnr.min); 2567 } while (card->lbfqc < card->lbnr.min);
2557} 2568}
2558 2569
@@ -2569,6 +2580,7 @@ static void ns_hb_destructor(struct sk_buff *hb)
2569 hb = __dev_alloc_skb(NS_HBUFSIZE, GFP_KERNEL); 2580 hb = __dev_alloc_skb(NS_HBUFSIZE, GFP_KERNEL);
2570 if (hb == NULL) 2581 if (hb == NULL)
2571 break; 2582 break;
2583 NS_SKB_CB(hb)->buf_type = BUF_NONE;
2572 skb_queue_tail(&card->hbpool.queue, hb); 2584 skb_queue_tail(&card->hbpool.queue, hb);
2573 card->hbpool.count++; 2585 card->hbpool.count++;
2574 } 2586 }
@@ -2577,45 +2589,25 @@ static void ns_hb_destructor(struct sk_buff *hb)
2577#endif /* NS_USE_DESTRUCTORS */ 2589#endif /* NS_USE_DESTRUCTORS */
2578 2590
2579 2591
2580
2581static void recycle_rx_buf(ns_dev *card, struct sk_buff *skb) 2592static void recycle_rx_buf(ns_dev *card, struct sk_buff *skb)
2582{ 2593{
2583 if (skb->list == &card->sbpool.queue) 2594 struct ns_skb_cb *cb = NS_SKB_CB(skb);
2584 push_rxbufs(card, BUF_SM, (u32) skb, (u32) virt_to_bus(skb->data), 0, 0);
2585 else if (skb->list == &card->lbpool.queue)
2586 push_rxbufs(card, BUF_LG, (u32) skb, (u32) virt_to_bus(skb->data), 0, 0);
2587 else
2588 {
2589 printk("nicstar%d: What kind of rx buffer is this?\n", card->index);
2590 dev_kfree_skb_any(skb);
2591 }
2592}
2593 2595
2596 if (unlikely(cb->buf_type == BUF_NONE)) {
2597 printk("nicstar%d: What kind of rx buffer is this?\n", card->index);
2598 dev_kfree_skb_any(skb);
2599 } else
2600 push_rxbufs(card, skb);
2601}
2594 2602
2595 2603
2596static void recycle_iovec_rx_bufs(ns_dev *card, struct iovec *iov, int count) 2604static void recycle_iovec_rx_bufs(ns_dev *card, struct iovec *iov, int count)
2597{ 2605{
2598 struct sk_buff *skb; 2606 while (count-- > 0)
2599 2607 recycle_rx_buf(card, (struct sk_buff *) (iov++)->iov_base);
2600 for (; count > 0; count--)
2601 {
2602 skb = (struct sk_buff *) (iov++)->iov_base;
2603 if (skb->list == &card->sbpool.queue)
2604 push_rxbufs(card, BUF_SM, (u32) skb, (u32) virt_to_bus(skb->data),
2605 0, 0);
2606 else if (skb->list == &card->lbpool.queue)
2607 push_rxbufs(card, BUF_LG, (u32) skb, (u32) virt_to_bus(skb->data),
2608 0, 0);
2609 else
2610 {
2611 printk("nicstar%d: What kind of rx buffer is this?\n", card->index);
2612 dev_kfree_skb_any(skb);
2613 }
2614 }
2615} 2608}
2616 2609
2617 2610
2618
2619static void recycle_iov_buf(ns_dev *card, struct sk_buff *iovb) 2611static void recycle_iov_buf(ns_dev *card, struct sk_buff *iovb)
2620{ 2612{
2621 if (card->iovpool.count < card->iovnr.max) 2613 if (card->iovpool.count < card->iovnr.max)
@@ -2631,7 +2623,7 @@ static void recycle_iov_buf(ns_dev *card, struct sk_buff *iovb)
2631 2623
2632static void dequeue_sm_buf(ns_dev *card, struct sk_buff *sb) 2624static void dequeue_sm_buf(ns_dev *card, struct sk_buff *sb)
2633{ 2625{
2634 skb_unlink(sb); 2626 skb_unlink(sb, &card->sbpool.queue);
2635#ifdef NS_USE_DESTRUCTORS 2627#ifdef NS_USE_DESTRUCTORS
2636 if (card->sbfqc < card->sbnr.min) 2628 if (card->sbfqc < card->sbnr.min)
2637#else 2629#else
@@ -2640,10 +2632,10 @@ static void dequeue_sm_buf(ns_dev *card, struct sk_buff *sb)
2640 struct sk_buff *new_sb; 2632 struct sk_buff *new_sb;
2641 if ((new_sb = dev_alloc_skb(NS_SMSKBSIZE)) != NULL) 2633 if ((new_sb = dev_alloc_skb(NS_SMSKBSIZE)) != NULL)
2642 { 2634 {
2635 NS_SKB_CB(new_sb)->buf_type = BUF_SM;
2643 skb_queue_tail(&card->sbpool.queue, new_sb); 2636 skb_queue_tail(&card->sbpool.queue, new_sb);
2644 skb_reserve(new_sb, NS_AAL0_HEADER); 2637 skb_reserve(new_sb, NS_AAL0_HEADER);
2645 push_rxbufs(card, BUF_SM, (u32) new_sb, 2638 push_rxbufs(card, new_sb);
2646 (u32) virt_to_bus(new_sb->data), 0, 0);
2647 } 2639 }
2648 } 2640 }
2649 if (card->sbfqc < card->sbnr.init) 2641 if (card->sbfqc < card->sbnr.init)
@@ -2652,10 +2644,10 @@ static void dequeue_sm_buf(ns_dev *card, struct sk_buff *sb)
2652 struct sk_buff *new_sb; 2644 struct sk_buff *new_sb;
2653 if ((new_sb = dev_alloc_skb(NS_SMSKBSIZE)) != NULL) 2645 if ((new_sb = dev_alloc_skb(NS_SMSKBSIZE)) != NULL)
2654 { 2646 {
2647 NS_SKB_CB(new_sb)->buf_type = BUF_SM;
2655 skb_queue_tail(&card->sbpool.queue, new_sb); 2648 skb_queue_tail(&card->sbpool.queue, new_sb);
2656 skb_reserve(new_sb, NS_AAL0_HEADER); 2649 skb_reserve(new_sb, NS_AAL0_HEADER);
2657 push_rxbufs(card, BUF_SM, (u32) new_sb, 2650 push_rxbufs(card, new_sb);
2658 (u32) virt_to_bus(new_sb->data), 0, 0);
2659 } 2651 }
2660 } 2652 }
2661} 2653}
@@ -2664,7 +2656,7 @@ static void dequeue_sm_buf(ns_dev *card, struct sk_buff *sb)
2664 2656
2665static void dequeue_lg_buf(ns_dev *card, struct sk_buff *lb) 2657static void dequeue_lg_buf(ns_dev *card, struct sk_buff *lb)
2666{ 2658{
2667 skb_unlink(lb); 2659 skb_unlink(lb, &card->lbpool.queue);
2668#ifdef NS_USE_DESTRUCTORS 2660#ifdef NS_USE_DESTRUCTORS
2669 if (card->lbfqc < card->lbnr.min) 2661 if (card->lbfqc < card->lbnr.min)
2670#else 2662#else
@@ -2673,10 +2665,10 @@ static void dequeue_lg_buf(ns_dev *card, struct sk_buff *lb)
2673 struct sk_buff *new_lb; 2665 struct sk_buff *new_lb;
2674 if ((new_lb = dev_alloc_skb(NS_LGSKBSIZE)) != NULL) 2666 if ((new_lb = dev_alloc_skb(NS_LGSKBSIZE)) != NULL)
2675 { 2667 {
2668 NS_SKB_CB(new_lb)->buf_type = BUF_LG;
2676 skb_queue_tail(&card->lbpool.queue, new_lb); 2669 skb_queue_tail(&card->lbpool.queue, new_lb);
2677 skb_reserve(new_lb, NS_SMBUFSIZE); 2670 skb_reserve(new_lb, NS_SMBUFSIZE);
2678 push_rxbufs(card, BUF_LG, (u32) new_lb, 2671 push_rxbufs(card, new_lb);
2679 (u32) virt_to_bus(new_lb->data), 0, 0);
2680 } 2672 }
2681 } 2673 }
2682 if (card->lbfqc < card->lbnr.init) 2674 if (card->lbfqc < card->lbnr.init)
@@ -2685,10 +2677,10 @@ static void dequeue_lg_buf(ns_dev *card, struct sk_buff *lb)
2685 struct sk_buff *new_lb; 2677 struct sk_buff *new_lb;
2686 if ((new_lb = dev_alloc_skb(NS_LGSKBSIZE)) != NULL) 2678 if ((new_lb = dev_alloc_skb(NS_LGSKBSIZE)) != NULL)
2687 { 2679 {
2680 NS_SKB_CB(new_lb)->buf_type = BUF_LG;
2688 skb_queue_tail(&card->lbpool.queue, new_lb); 2681 skb_queue_tail(&card->lbpool.queue, new_lb);
2689 skb_reserve(new_lb, NS_SMBUFSIZE); 2682 skb_reserve(new_lb, NS_SMBUFSIZE);
2690 push_rxbufs(card, BUF_LG, (u32) new_lb, 2683 push_rxbufs(card, new_lb);
2691 (u32) virt_to_bus(new_lb->data), 0, 0);
2692 } 2684 }
2693 } 2685 }
2694} 2686}
@@ -2880,9 +2872,10 @@ static int ns_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
2880 sb = __dev_alloc_skb(NS_SMSKBSIZE, GFP_KERNEL); 2872 sb = __dev_alloc_skb(NS_SMSKBSIZE, GFP_KERNEL);
2881 if (sb == NULL) 2873 if (sb == NULL)
2882 return -ENOMEM; 2874 return -ENOMEM;
2875 NS_SKB_CB(sb)->buf_type = BUF_SM;
2883 skb_queue_tail(&card->sbpool.queue, sb); 2876 skb_queue_tail(&card->sbpool.queue, sb);
2884 skb_reserve(sb, NS_AAL0_HEADER); 2877 skb_reserve(sb, NS_AAL0_HEADER);
2885 push_rxbufs(card, BUF_SM, (u32) sb, (u32) virt_to_bus(sb->data), 0, 0); 2878 push_rxbufs(card, sb);
2886 } 2879 }
2887 break; 2880 break;
2888 2881
@@ -2894,9 +2887,10 @@ static int ns_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
2894 lb = __dev_alloc_skb(NS_LGSKBSIZE, GFP_KERNEL); 2887 lb = __dev_alloc_skb(NS_LGSKBSIZE, GFP_KERNEL);
2895 if (lb == NULL) 2888 if (lb == NULL)
2896 return -ENOMEM; 2889 return -ENOMEM;
2890 NS_SKB_CB(lb)->buf_type = BUF_LG;
2897 skb_queue_tail(&card->lbpool.queue, lb); 2891 skb_queue_tail(&card->lbpool.queue, lb);
2898 skb_reserve(lb, NS_SMBUFSIZE); 2892 skb_reserve(lb, NS_SMBUFSIZE);
2899 push_rxbufs(card, BUF_LG, (u32) lb, (u32) virt_to_bus(lb->data), 0, 0); 2893 push_rxbufs(card, lb);
2900 } 2894 }
2901 break; 2895 break;
2902 2896
@@ -2923,6 +2917,7 @@ static int ns_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
2923 hb = __dev_alloc_skb(NS_HBUFSIZE, GFP_KERNEL); 2917 hb = __dev_alloc_skb(NS_HBUFSIZE, GFP_KERNEL);
2924 if (hb == NULL) 2918 if (hb == NULL)
2925 return -ENOMEM; 2919 return -ENOMEM;
2920 NS_SKB_CB(hb)->buf_type = BUF_NONE;
2926 ns_grab_int_lock(card, flags); 2921 ns_grab_int_lock(card, flags);
2927 skb_queue_tail(&card->hbpool.queue, hb); 2922 skb_queue_tail(&card->hbpool.queue, hb);
2928 card->hbpool.count++; 2923 card->hbpool.count++;
@@ -2953,6 +2948,7 @@ static int ns_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
2953 iovb = alloc_skb(NS_IOVBUFSIZE, GFP_KERNEL); 2948 iovb = alloc_skb(NS_IOVBUFSIZE, GFP_KERNEL);
2954 if (iovb == NULL) 2949 if (iovb == NULL)
2955 return -ENOMEM; 2950 return -ENOMEM;
2951 NS_SKB_CB(iovb)->buf_type = BUF_NONE;
2956 ns_grab_int_lock(card, flags); 2952 ns_grab_int_lock(card, flags);
2957 skb_queue_tail(&card->iovpool.queue, iovb); 2953 skb_queue_tail(&card->iovpool.queue, iovb);
2958 card->iovpool.count++; 2954 card->iovpool.count++;
@@ -2979,17 +2975,12 @@ static int ns_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
2979} 2975}
2980 2976
2981 2977
2982
2983static void which_list(ns_dev *card, struct sk_buff *skb) 2978static void which_list(ns_dev *card, struct sk_buff *skb)
2984{ 2979{
2985 printk("It's a %s buffer.\n", skb->list == &card->sbpool.queue ? 2980 printk("skb buf_type: 0x%08x\n", NS_SKB_CB(skb)->buf_type);
2986 "small" : skb->list == &card->lbpool.queue ? "large" :
2987 skb->list == &card->hbpool.queue ? "huge" :
2988 skb->list == &card->iovpool.queue ? "iovec" : "unknown");
2989} 2981}
2990 2982
2991 2983
2992
2993static void ns_poll(unsigned long arg) 2984static void ns_poll(unsigned long arg)
2994{ 2985{
2995 int i; 2986 int i;
diff --git a/drivers/atm/nicstar.h b/drivers/atm/nicstar.h
index ea83c46c8ba5..5997bcb45b59 100644
--- a/drivers/atm/nicstar.h
+++ b/drivers/atm/nicstar.h
@@ -103,8 +103,14 @@
103 103
104#define NS_IOREMAP_SIZE 4096 104#define NS_IOREMAP_SIZE 4096
105 105
106#define BUF_SM 0x00000000 /* These two are used for push_rxbufs() */ 106/*
107#define BUF_LG 0x00000001 /* CMD, Write_FreeBufQ, LBUF bit */ 107 * BUF_XX distinguish the Rx buffers depending on their (small/large) size.
108 * BUG_SM and BUG_LG are both used by the driver and the device.
109 * BUF_NONE is only used by the driver.
110 */
111#define BUF_SM 0x00000000 /* These two are used for push_rxbufs() */
112#define BUF_LG 0x00000001 /* CMD, Write_FreeBufQ, LBUF bit */
113#define BUF_NONE 0xffffffff /* Software only: */
108 114
109#define NS_HBUFSIZE 65568 /* Size of max. AAL5 PDU */ 115#define NS_HBUFSIZE 65568 /* Size of max. AAL5 PDU */
110#define NS_MAX_IOVECS (2 + (65568 - NS_SMBUFSIZE) / \ 116#define NS_MAX_IOVECS (2 + (65568 - NS_SMBUFSIZE) / \
@@ -684,6 +690,12 @@ enum ns_regs
684/* Device driver structures ***************************************************/ 690/* Device driver structures ***************************************************/
685 691
686 692
693struct ns_skb_cb {
694 u32 buf_type; /* BUF_SM/BUF_LG/BUF_NONE */
695};
696
697#define NS_SKB_CB(skb) ((struct ns_skb_cb *)((skb)->cb))
698
687typedef struct tsq_info 699typedef struct tsq_info
688{ 700{
689 void *org; 701 void *org;
diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
index a2b236a966e0..c4b75ecf9460 100644
--- a/drivers/atm/zatm.c
+++ b/drivers/atm/zatm.c
@@ -400,7 +400,7 @@ unsigned long *x;
400EVENT("error code 0x%x/0x%x\n",(here[3] & uPD98401_AAL5_ES) >> 400EVENT("error code 0x%x/0x%x\n",(here[3] & uPD98401_AAL5_ES) >>
401 uPD98401_AAL5_ES_SHIFT,error); 401 uPD98401_AAL5_ES_SHIFT,error);
402 skb = ((struct rx_buffer_head *) bus_to_virt(here[2]))->skb; 402 skb = ((struct rx_buffer_head *) bus_to_virt(here[2]))->skb;
403 do_gettimeofday(&skb->stamp); 403 __net_timestamp(skb);
404#if 0 404#if 0
405printk("[-3..0] 0x%08lx 0x%08lx 0x%08lx 0x%08lx\n",((unsigned *) skb->data)[-3], 405printk("[-3..0] 0x%08lx 0x%08lx 0x%08lx 0x%08lx\n",((unsigned *) skb->data)[-3],
406 ((unsigned *) skb->data)[-2],((unsigned *) skb->data)[-1], 406 ((unsigned *) skb->data)[-2],((unsigned *) skb->data)[-1],
@@ -417,10 +417,12 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
417 chan = (here[3] & uPD98401_AAL5_CHAN) >> 417 chan = (here[3] & uPD98401_AAL5_CHAN) >>
418 uPD98401_AAL5_CHAN_SHIFT; 418 uPD98401_AAL5_CHAN_SHIFT;
419 if (chan < zatm_dev->chans && zatm_dev->rx_map[chan]) { 419 if (chan < zatm_dev->chans && zatm_dev->rx_map[chan]) {
420 int pos = ZATM_VCC(vcc)->pool;
421
420 vcc = zatm_dev->rx_map[chan]; 422 vcc = zatm_dev->rx_map[chan];
421 if (skb == zatm_dev->last_free[ZATM_VCC(vcc)->pool]) 423 if (skb == zatm_dev->last_free[pos])
422 zatm_dev->last_free[ZATM_VCC(vcc)->pool] = NULL; 424 zatm_dev->last_free[pos] = NULL;
423 skb_unlink(skb); 425 skb_unlink(skb, zatm_dev->pool + pos);
424 } 426 }
425 else { 427 else {
426 printk(KERN_ERR DEV_LABEL "(itf %d): RX indication " 428 printk(KERN_ERR DEV_LABEL "(itf %d): RX indication "
diff --git a/drivers/block/aoe/aoenet.c b/drivers/block/aoe/aoenet.c
index 9e6f51c528b0..4be976940f69 100644
--- a/drivers/block/aoe/aoenet.c
+++ b/drivers/block/aoe/aoenet.c
@@ -120,7 +120,7 @@ aoenet_xmit(struct sk_buff *sl)
120 * (1) len doesn't include the header by default. I want this. 120 * (1) len doesn't include the header by default. I want this.
121 */ 121 */
122static int 122static int
123aoenet_rcv(struct sk_buff *skb, struct net_device *ifp, struct packet_type *pt) 123aoenet_rcv(struct sk_buff *skb, struct net_device *ifp, struct packet_type *pt, struct net_device *orig_dev)
124{ 124{
125 struct aoe_hdr *h; 125 struct aoe_hdr *h;
126 u32 n; 126 u32 n;
diff --git a/drivers/bluetooth/bfusb.c b/drivers/bluetooth/bfusb.c
index c42d7e6ac1c5..1e9db0156ea7 100644
--- a/drivers/bluetooth/bfusb.c
+++ b/drivers/bluetooth/bfusb.c
@@ -158,7 +158,7 @@ static int bfusb_send_bulk(struct bfusb *bfusb, struct sk_buff *skb)
158 if (err) { 158 if (err) {
159 BT_ERR("%s bulk tx submit failed urb %p err %d", 159 BT_ERR("%s bulk tx submit failed urb %p err %d",
160 bfusb->hdev->name, urb, err); 160 bfusb->hdev->name, urb, err);
161 skb_unlink(skb); 161 skb_unlink(skb, &bfusb->pending_q);
162 usb_free_urb(urb); 162 usb_free_urb(urb);
163 } else 163 } else
164 atomic_inc(&bfusb->pending_tx); 164 atomic_inc(&bfusb->pending_tx);
@@ -212,7 +212,7 @@ static void bfusb_tx_complete(struct urb *urb, struct pt_regs *regs)
212 212
213 read_lock(&bfusb->lock); 213 read_lock(&bfusb->lock);
214 214
215 skb_unlink(skb); 215 skb_unlink(skb, &bfusb->pending_q);
216 skb_queue_tail(&bfusb->completed_q, skb); 216 skb_queue_tail(&bfusb->completed_q, skb);
217 217
218 bfusb_tx_wakeup(bfusb); 218 bfusb_tx_wakeup(bfusb);
@@ -253,7 +253,7 @@ static int bfusb_rx_submit(struct bfusb *bfusb, struct urb *urb)
253 if (err) { 253 if (err) {
254 BT_ERR("%s bulk rx submit failed urb %p err %d", 254 BT_ERR("%s bulk rx submit failed urb %p err %d",
255 bfusb->hdev->name, urb, err); 255 bfusb->hdev->name, urb, err);
256 skb_unlink(skb); 256 skb_unlink(skb, &bfusb->pending_q);
257 kfree_skb(skb); 257 kfree_skb(skb);
258 usb_free_urb(urb); 258 usb_free_urb(urb);
259 } 259 }
@@ -330,7 +330,7 @@ static inline int bfusb_recv_block(struct bfusb *bfusb, int hdr, unsigned char *
330 } 330 }
331 331
332 skb->dev = (void *) bfusb->hdev; 332 skb->dev = (void *) bfusb->hdev;
333 skb->pkt_type = pkt_type; 333 bt_cb(skb)->pkt_type = pkt_type;
334 334
335 bfusb->reassembly = skb; 335 bfusb->reassembly = skb;
336 } else { 336 } else {
@@ -398,7 +398,7 @@ static void bfusb_rx_complete(struct urb *urb, struct pt_regs *regs)
398 buf += len; 398 buf += len;
399 } 399 }
400 400
401 skb_unlink(skb); 401 skb_unlink(skb, &bfusb->pending_q);
402 kfree_skb(skb); 402 kfree_skb(skb);
403 403
404 bfusb_rx_submit(bfusb, urb); 404 bfusb_rx_submit(bfusb, urb);
@@ -485,7 +485,7 @@ static int bfusb_send_frame(struct sk_buff *skb)
485 unsigned char buf[3]; 485 unsigned char buf[3];
486 int sent = 0, size, count; 486 int sent = 0, size, count;
487 487
488 BT_DBG("hdev %p skb %p type %d len %d", hdev, skb, skb->pkt_type, skb->len); 488 BT_DBG("hdev %p skb %p type %d len %d", hdev, skb, bt_cb(skb)->pkt_type, skb->len);
489 489
490 if (!hdev) { 490 if (!hdev) {
491 BT_ERR("Frame for unknown HCI device (hdev=NULL)"); 491 BT_ERR("Frame for unknown HCI device (hdev=NULL)");
@@ -497,7 +497,7 @@ static int bfusb_send_frame(struct sk_buff *skb)
497 497
498 bfusb = (struct bfusb *) hdev->driver_data; 498 bfusb = (struct bfusb *) hdev->driver_data;
499 499
500 switch (skb->pkt_type) { 500 switch (bt_cb(skb)->pkt_type) {
501 case HCI_COMMAND_PKT: 501 case HCI_COMMAND_PKT:
502 hdev->stat.cmd_tx++; 502 hdev->stat.cmd_tx++;
503 break; 503 break;
@@ -510,7 +510,7 @@ static int bfusb_send_frame(struct sk_buff *skb)
510 }; 510 };
511 511
512 /* Prepend skb with frame type */ 512 /* Prepend skb with frame type */
513 memcpy(skb_push(skb, 1), &(skb->pkt_type), 1); 513 memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
514 514
515 count = skb->len; 515 count = skb->len;
516 516
diff --git a/drivers/bluetooth/bluecard_cs.c b/drivers/bluetooth/bluecard_cs.c
index bd2ec7e284cc..26fe9c0e1d20 100644
--- a/drivers/bluetooth/bluecard_cs.c
+++ b/drivers/bluetooth/bluecard_cs.c
@@ -270,7 +270,7 @@ static void bluecard_write_wakeup(bluecard_info_t *info)
270 if (!(skb = skb_dequeue(&(info->txq)))) 270 if (!(skb = skb_dequeue(&(info->txq))))
271 break; 271 break;
272 272
273 if (skb->pkt_type & 0x80) { 273 if (bt_cb(skb)->pkt_type & 0x80) {
274 /* Disable RTS */ 274 /* Disable RTS */
275 info->ctrl_reg |= REG_CONTROL_RTS; 275 info->ctrl_reg |= REG_CONTROL_RTS;
276 outb(info->ctrl_reg, iobase + REG_CONTROL); 276 outb(info->ctrl_reg, iobase + REG_CONTROL);
@@ -288,13 +288,13 @@ static void bluecard_write_wakeup(bluecard_info_t *info)
288 /* Mark the buffer as dirty */ 288 /* Mark the buffer as dirty */
289 clear_bit(ready_bit, &(info->tx_state)); 289 clear_bit(ready_bit, &(info->tx_state));
290 290
291 if (skb->pkt_type & 0x80) { 291 if (bt_cb(skb)->pkt_type & 0x80) {
292 DECLARE_WAIT_QUEUE_HEAD(wq); 292 DECLARE_WAIT_QUEUE_HEAD(wq);
293 DEFINE_WAIT(wait); 293 DEFINE_WAIT(wait);
294 294
295 unsigned char baud_reg; 295 unsigned char baud_reg;
296 296
297 switch (skb->pkt_type) { 297 switch (bt_cb(skb)->pkt_type) {
298 case PKT_BAUD_RATE_460800: 298 case PKT_BAUD_RATE_460800:
299 baud_reg = REG_CONTROL_BAUD_RATE_460800; 299 baud_reg = REG_CONTROL_BAUD_RATE_460800;
300 break; 300 break;
@@ -410,9 +410,9 @@ static void bluecard_receive(bluecard_info_t *info, unsigned int offset)
410 if (info->rx_state == RECV_WAIT_PACKET_TYPE) { 410 if (info->rx_state == RECV_WAIT_PACKET_TYPE) {
411 411
412 info->rx_skb->dev = (void *) info->hdev; 412 info->rx_skb->dev = (void *) info->hdev;
413 info->rx_skb->pkt_type = buf[i]; 413 bt_cb(info->rx_skb)->pkt_type = buf[i];
414 414
415 switch (info->rx_skb->pkt_type) { 415 switch (bt_cb(info->rx_skb)->pkt_type) {
416 416
417 case 0x00: 417 case 0x00:
418 /* init packet */ 418 /* init packet */
@@ -444,7 +444,7 @@ static void bluecard_receive(bluecard_info_t *info, unsigned int offset)
444 444
445 default: 445 default:
446 /* unknown packet */ 446 /* unknown packet */
447 BT_ERR("Unknown HCI packet with type 0x%02x received", info->rx_skb->pkt_type); 447 BT_ERR("Unknown HCI packet with type 0x%02x received", bt_cb(info->rx_skb)->pkt_type);
448 info->hdev->stat.err_rx++; 448 info->hdev->stat.err_rx++;
449 449
450 kfree_skb(info->rx_skb); 450 kfree_skb(info->rx_skb);
@@ -586,21 +586,21 @@ static int bluecard_hci_set_baud_rate(struct hci_dev *hdev, int baud)
586 switch (baud) { 586 switch (baud) {
587 case 460800: 587 case 460800:
588 cmd[4] = 0x00; 588 cmd[4] = 0x00;
589 skb->pkt_type = PKT_BAUD_RATE_460800; 589 bt_cb(skb)->pkt_type = PKT_BAUD_RATE_460800;
590 break; 590 break;
591 case 230400: 591 case 230400:
592 cmd[4] = 0x01; 592 cmd[4] = 0x01;
593 skb->pkt_type = PKT_BAUD_RATE_230400; 593 bt_cb(skb)->pkt_type = PKT_BAUD_RATE_230400;
594 break; 594 break;
595 case 115200: 595 case 115200:
596 cmd[4] = 0x02; 596 cmd[4] = 0x02;
597 skb->pkt_type = PKT_BAUD_RATE_115200; 597 bt_cb(skb)->pkt_type = PKT_BAUD_RATE_115200;
598 break; 598 break;
599 case 57600: 599 case 57600:
600 /* Fall through... */ 600 /* Fall through... */
601 default: 601 default:
602 cmd[4] = 0x03; 602 cmd[4] = 0x03;
603 skb->pkt_type = PKT_BAUD_RATE_57600; 603 bt_cb(skb)->pkt_type = PKT_BAUD_RATE_57600;
604 break; 604 break;
605 } 605 }
606 606
@@ -680,7 +680,7 @@ static int bluecard_hci_send_frame(struct sk_buff *skb)
680 680
681 info = (bluecard_info_t *)(hdev->driver_data); 681 info = (bluecard_info_t *)(hdev->driver_data);
682 682
683 switch (skb->pkt_type) { 683 switch (bt_cb(skb)->pkt_type) {
684 case HCI_COMMAND_PKT: 684 case HCI_COMMAND_PKT:
685 hdev->stat.cmd_tx++; 685 hdev->stat.cmd_tx++;
686 break; 686 break;
@@ -693,7 +693,7 @@ static int bluecard_hci_send_frame(struct sk_buff *skb)
693 }; 693 };
694 694
695 /* Prepend skb with frame type */ 695 /* Prepend skb with frame type */
696 memcpy(skb_push(skb, 1), &(skb->pkt_type), 1); 696 memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
697 skb_queue_tail(&(info->txq), skb); 697 skb_queue_tail(&(info->txq), skb);
698 698
699 bluecard_write_wakeup(info); 699 bluecard_write_wakeup(info);
diff --git a/drivers/bluetooth/bpa10x.c b/drivers/bluetooth/bpa10x.c
index f696da6f417b..a1bf8f066c88 100644
--- a/drivers/bluetooth/bpa10x.c
+++ b/drivers/bluetooth/bpa10x.c
@@ -105,7 +105,7 @@ static void bpa10x_recv_bulk(struct bpa10x_data *data, unsigned char *buf, int c
105 if (skb) { 105 if (skb) {
106 memcpy(skb_put(skb, len), buf, len); 106 memcpy(skb_put(skb, len), buf, len);
107 skb->dev = (void *) data->hdev; 107 skb->dev = (void *) data->hdev;
108 skb->pkt_type = HCI_ACLDATA_PKT; 108 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
109 hci_recv_frame(skb); 109 hci_recv_frame(skb);
110 } 110 }
111 break; 111 break;
@@ -117,7 +117,7 @@ static void bpa10x_recv_bulk(struct bpa10x_data *data, unsigned char *buf, int c
117 if (skb) { 117 if (skb) {
118 memcpy(skb_put(skb, len), buf, len); 118 memcpy(skb_put(skb, len), buf, len);
119 skb->dev = (void *) data->hdev; 119 skb->dev = (void *) data->hdev;
120 skb->pkt_type = HCI_SCODATA_PKT; 120 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
121 hci_recv_frame(skb); 121 hci_recv_frame(skb);
122 } 122 }
123 break; 123 break;
@@ -129,7 +129,7 @@ static void bpa10x_recv_bulk(struct bpa10x_data *data, unsigned char *buf, int c
129 if (skb) { 129 if (skb) {
130 memcpy(skb_put(skb, len), buf, len); 130 memcpy(skb_put(skb, len), buf, len);
131 skb->dev = (void *) data->hdev; 131 skb->dev = (void *) data->hdev;
132 skb->pkt_type = HCI_VENDOR_PKT; 132 bt_cb(skb)->pkt_type = HCI_VENDOR_PKT;
133 hci_recv_frame(skb); 133 hci_recv_frame(skb);
134 } 134 }
135 break; 135 break;
@@ -190,7 +190,7 @@ static int bpa10x_recv_event(struct bpa10x_data *data, unsigned char *buf, int s
190 } 190 }
191 191
192 skb->dev = (void *) data->hdev; 192 skb->dev = (void *) data->hdev;
193 skb->pkt_type = pkt_type; 193 bt_cb(skb)->pkt_type = pkt_type;
194 194
195 memcpy(skb_put(skb, size), buf, size); 195 memcpy(skb_put(skb, size), buf, size);
196 196
@@ -307,7 +307,8 @@ unlock:
307 read_unlock(&data->lock); 307 read_unlock(&data->lock);
308} 308}
309 309
310static inline struct urb *bpa10x_alloc_urb(struct usb_device *udev, unsigned int pipe, size_t size, int flags, void *data) 310static inline struct urb *bpa10x_alloc_urb(struct usb_device *udev, unsigned int pipe,
311 size_t size, unsigned int __nocast flags, void *data)
311{ 312{
312 struct urb *urb; 313 struct urb *urb;
313 struct usb_ctrlrequest *cr; 314 struct usb_ctrlrequest *cr;
@@ -487,7 +488,7 @@ static int bpa10x_send_frame(struct sk_buff *skb)
487 struct hci_dev *hdev = (struct hci_dev *) skb->dev; 488 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
488 struct bpa10x_data *data; 489 struct bpa10x_data *data;
489 490
490 BT_DBG("hdev %p skb %p type %d len %d", hdev, skb, skb->pkt_type, skb->len); 491 BT_DBG("hdev %p skb %p type %d len %d", hdev, skb, bt_cb(skb)->pkt_type, skb->len);
491 492
492 if (!hdev) { 493 if (!hdev) {
493 BT_ERR("Frame for unknown HCI device"); 494 BT_ERR("Frame for unknown HCI device");
@@ -500,9 +501,9 @@ static int bpa10x_send_frame(struct sk_buff *skb)
500 data = hdev->driver_data; 501 data = hdev->driver_data;
501 502
502 /* Prepend skb with frame type */ 503 /* Prepend skb with frame type */
503 memcpy(skb_push(skb, 1), &(skb->pkt_type), 1); 504 memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
504 505
505 switch (skb->pkt_type) { 506 switch (bt_cb(skb)->pkt_type) {
506 case HCI_COMMAND_PKT: 507 case HCI_COMMAND_PKT:
507 hdev->stat.cmd_tx++; 508 hdev->stat.cmd_tx++;
508 skb_queue_tail(&data->cmd_queue, skb); 509 skb_queue_tail(&data->cmd_queue, skb);
diff --git a/drivers/bluetooth/bt3c_cs.c b/drivers/bluetooth/bt3c_cs.c
index adf1750ea58d..2e0338d80f32 100644
--- a/drivers/bluetooth/bt3c_cs.c
+++ b/drivers/bluetooth/bt3c_cs.c
@@ -259,11 +259,11 @@ static void bt3c_receive(bt3c_info_t *info)
259 if (info->rx_state == RECV_WAIT_PACKET_TYPE) { 259 if (info->rx_state == RECV_WAIT_PACKET_TYPE) {
260 260
261 info->rx_skb->dev = (void *) info->hdev; 261 info->rx_skb->dev = (void *) info->hdev;
262 info->rx_skb->pkt_type = inb(iobase + DATA_L); 262 bt_cb(info->rx_skb)->pkt_type = inb(iobase + DATA_L);
263 inb(iobase + DATA_H); 263 inb(iobase + DATA_H);
264 //printk("bt3c: PACKET_TYPE=%02x\n", info->rx_skb->pkt_type); 264 //printk("bt3c: PACKET_TYPE=%02x\n", bt_cb(info->rx_skb)->pkt_type);
265 265
266 switch (info->rx_skb->pkt_type) { 266 switch (bt_cb(info->rx_skb)->pkt_type) {
267 267
268 case HCI_EVENT_PKT: 268 case HCI_EVENT_PKT:
269 info->rx_state = RECV_WAIT_EVENT_HEADER; 269 info->rx_state = RECV_WAIT_EVENT_HEADER;
@@ -282,7 +282,7 @@ static void bt3c_receive(bt3c_info_t *info)
282 282
283 default: 283 default:
284 /* Unknown packet */ 284 /* Unknown packet */
285 BT_ERR("Unknown HCI packet with type 0x%02x received", info->rx_skb->pkt_type); 285 BT_ERR("Unknown HCI packet with type 0x%02x received", bt_cb(info->rx_skb)->pkt_type);
286 info->hdev->stat.err_rx++; 286 info->hdev->stat.err_rx++;
287 clear_bit(HCI_RUNNING, &(info->hdev->flags)); 287 clear_bit(HCI_RUNNING, &(info->hdev->flags));
288 288
@@ -439,7 +439,7 @@ static int bt3c_hci_send_frame(struct sk_buff *skb)
439 439
440 info = (bt3c_info_t *) (hdev->driver_data); 440 info = (bt3c_info_t *) (hdev->driver_data);
441 441
442 switch (skb->pkt_type) { 442 switch (bt_cb(skb)->pkt_type) {
443 case HCI_COMMAND_PKT: 443 case HCI_COMMAND_PKT:
444 hdev->stat.cmd_tx++; 444 hdev->stat.cmd_tx++;
445 break; 445 break;
@@ -452,7 +452,7 @@ static int bt3c_hci_send_frame(struct sk_buff *skb)
452 }; 452 };
453 453
454 /* Prepend skb with frame type */ 454 /* Prepend skb with frame type */
455 memcpy(skb_push(skb, 1), &(skb->pkt_type), 1); 455 memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
456 skb_queue_tail(&(info->txq), skb); 456 skb_queue_tail(&(info->txq), skb);
457 457
458 spin_lock_irqsave(&(info->lock), flags); 458 spin_lock_irqsave(&(info->lock), flags);
diff --git a/drivers/bluetooth/btuart_cs.c b/drivers/bluetooth/btuart_cs.c
index e4c59fdc0e12..89486ea7a021 100644
--- a/drivers/bluetooth/btuart_cs.c
+++ b/drivers/bluetooth/btuart_cs.c
@@ -211,9 +211,9 @@ static void btuart_receive(btuart_info_t *info)
211 if (info->rx_state == RECV_WAIT_PACKET_TYPE) { 211 if (info->rx_state == RECV_WAIT_PACKET_TYPE) {
212 212
213 info->rx_skb->dev = (void *) info->hdev; 213 info->rx_skb->dev = (void *) info->hdev;
214 info->rx_skb->pkt_type = inb(iobase + UART_RX); 214 bt_cb(info->rx_skb)->pkt_type = inb(iobase + UART_RX);
215 215
216 switch (info->rx_skb->pkt_type) { 216 switch (bt_cb(info->rx_skb)->pkt_type) {
217 217
218 case HCI_EVENT_PKT: 218 case HCI_EVENT_PKT:
219 info->rx_state = RECV_WAIT_EVENT_HEADER; 219 info->rx_state = RECV_WAIT_EVENT_HEADER;
@@ -232,7 +232,7 @@ static void btuart_receive(btuart_info_t *info)
232 232
233 default: 233 default:
234 /* Unknown packet */ 234 /* Unknown packet */
235 BT_ERR("Unknown HCI packet with type 0x%02x received", info->rx_skb->pkt_type); 235 BT_ERR("Unknown HCI packet with type 0x%02x received", bt_cb(info->rx_skb)->pkt_type);
236 info->hdev->stat.err_rx++; 236 info->hdev->stat.err_rx++;
237 clear_bit(HCI_RUNNING, &(info->hdev->flags)); 237 clear_bit(HCI_RUNNING, &(info->hdev->flags));
238 238
@@ -447,7 +447,7 @@ static int btuart_hci_send_frame(struct sk_buff *skb)
447 447
448 info = (btuart_info_t *)(hdev->driver_data); 448 info = (btuart_info_t *)(hdev->driver_data);
449 449
450 switch (skb->pkt_type) { 450 switch (bt_cb(skb)->pkt_type) {
451 case HCI_COMMAND_PKT: 451 case HCI_COMMAND_PKT:
452 hdev->stat.cmd_tx++; 452 hdev->stat.cmd_tx++;
453 break; 453 break;
@@ -460,7 +460,7 @@ static int btuart_hci_send_frame(struct sk_buff *skb)
460 }; 460 };
461 461
462 /* Prepend skb with frame type */ 462 /* Prepend skb with frame type */
463 memcpy(skb_push(skb, 1), &(skb->pkt_type), 1); 463 memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
464 skb_queue_tail(&(info->txq), skb); 464 skb_queue_tail(&(info->txq), skb);
465 465
466 btuart_write_wakeup(info); 466 btuart_write_wakeup(info);
diff --git a/drivers/bluetooth/dtl1_cs.c b/drivers/bluetooth/dtl1_cs.c
index e39868c3da48..84c1f8839422 100644
--- a/drivers/bluetooth/dtl1_cs.c
+++ b/drivers/bluetooth/dtl1_cs.c
@@ -251,7 +251,7 @@ static void dtl1_receive(dtl1_info_t *info)
251 info->rx_count = nsh->len + (nsh->len & 0x0001); 251 info->rx_count = nsh->len + (nsh->len & 0x0001);
252 break; 252 break;
253 case RECV_WAIT_DATA: 253 case RECV_WAIT_DATA:
254 info->rx_skb->pkt_type = nsh->type; 254 bt_cb(info->rx_skb)->pkt_type = nsh->type;
255 255
256 /* remove PAD byte if it exists */ 256 /* remove PAD byte if it exists */
257 if (nsh->len & 0x0001) { 257 if (nsh->len & 0x0001) {
@@ -262,7 +262,7 @@ static void dtl1_receive(dtl1_info_t *info)
262 /* remove NSH */ 262 /* remove NSH */
263 skb_pull(info->rx_skb, NSHL); 263 skb_pull(info->rx_skb, NSHL);
264 264
265 switch (info->rx_skb->pkt_type) { 265 switch (bt_cb(info->rx_skb)->pkt_type) {
266 case 0x80: 266 case 0x80:
267 /* control data for the Nokia Card */ 267 /* control data for the Nokia Card */
268 dtl1_control(info, info->rx_skb); 268 dtl1_control(info, info->rx_skb);
@@ -272,12 +272,12 @@ static void dtl1_receive(dtl1_info_t *info)
272 case 0x84: 272 case 0x84:
273 /* send frame to the HCI layer */ 273 /* send frame to the HCI layer */
274 info->rx_skb->dev = (void *) info->hdev; 274 info->rx_skb->dev = (void *) info->hdev;
275 info->rx_skb->pkt_type &= 0x0f; 275 bt_cb(info->rx_skb)->pkt_type &= 0x0f;
276 hci_recv_frame(info->rx_skb); 276 hci_recv_frame(info->rx_skb);
277 break; 277 break;
278 default: 278 default:
279 /* unknown packet */ 279 /* unknown packet */
280 BT_ERR("Unknown HCI packet with type 0x%02x received", info->rx_skb->pkt_type); 280 BT_ERR("Unknown HCI packet with type 0x%02x received", bt_cb(info->rx_skb)->pkt_type);
281 kfree_skb(info->rx_skb); 281 kfree_skb(info->rx_skb);
282 break; 282 break;
283 } 283 }
@@ -410,7 +410,7 @@ static int dtl1_hci_send_frame(struct sk_buff *skb)
410 410
411 info = (dtl1_info_t *)(hdev->driver_data); 411 info = (dtl1_info_t *)(hdev->driver_data);
412 412
413 switch (skb->pkt_type) { 413 switch (bt_cb(skb)->pkt_type) {
414 case HCI_COMMAND_PKT: 414 case HCI_COMMAND_PKT:
415 hdev->stat.cmd_tx++; 415 hdev->stat.cmd_tx++;
416 nsh.type = 0x81; 416 nsh.type = 0x81;
diff --git a/drivers/bluetooth/hci_bcsp.c b/drivers/bluetooth/hci_bcsp.c
index 858fddb046de..0ee324e1265d 100644
--- a/drivers/bluetooth/hci_bcsp.c
+++ b/drivers/bluetooth/hci_bcsp.c
@@ -149,7 +149,7 @@ static int bcsp_enqueue(struct hci_uart *hu, struct sk_buff *skb)
149 return 0; 149 return 0;
150 } 150 }
151 151
152 switch (skb->pkt_type) { 152 switch (bt_cb(skb)->pkt_type) {
153 case HCI_ACLDATA_PKT: 153 case HCI_ACLDATA_PKT:
154 case HCI_COMMAND_PKT: 154 case HCI_COMMAND_PKT:
155 skb_queue_tail(&bcsp->rel, skb); 155 skb_queue_tail(&bcsp->rel, skb);
@@ -227,7 +227,7 @@ static struct sk_buff *bcsp_prepare_pkt(struct bcsp_struct *bcsp, u8 *data,
227 if (!nskb) 227 if (!nskb)
228 return NULL; 228 return NULL;
229 229
230 nskb->pkt_type = pkt_type; 230 bt_cb(nskb)->pkt_type = pkt_type;
231 231
232 bcsp_slip_msgdelim(nskb); 232 bcsp_slip_msgdelim(nskb);
233 233
@@ -286,7 +286,7 @@ static struct sk_buff *bcsp_dequeue(struct hci_uart *hu)
286 since they have priority */ 286 since they have priority */
287 287
288 if ((skb = skb_dequeue(&bcsp->unrel)) != NULL) { 288 if ((skb = skb_dequeue(&bcsp->unrel)) != NULL) {
289 struct sk_buff *nskb = bcsp_prepare_pkt(bcsp, skb->data, skb->len, skb->pkt_type); 289 struct sk_buff *nskb = bcsp_prepare_pkt(bcsp, skb->data, skb->len, bt_cb(skb)->pkt_type);
290 if (nskb) { 290 if (nskb) {
291 kfree_skb(skb); 291 kfree_skb(skb);
292 return nskb; 292 return nskb;
@@ -303,7 +303,7 @@ static struct sk_buff *bcsp_dequeue(struct hci_uart *hu)
303 spin_lock_irqsave(&bcsp->unack.lock, flags); 303 spin_lock_irqsave(&bcsp->unack.lock, flags);
304 304
305 if (bcsp->unack.qlen < BCSP_TXWINSIZE && (skb = skb_dequeue(&bcsp->rel)) != NULL) { 305 if (bcsp->unack.qlen < BCSP_TXWINSIZE && (skb = skb_dequeue(&bcsp->rel)) != NULL) {
306 struct sk_buff *nskb = bcsp_prepare_pkt(bcsp, skb->data, skb->len, skb->pkt_type); 306 struct sk_buff *nskb = bcsp_prepare_pkt(bcsp, skb->data, skb->len, bt_cb(skb)->pkt_type);
307 if (nskb) { 307 if (nskb) {
308 __skb_queue_tail(&bcsp->unack, skb); 308 __skb_queue_tail(&bcsp->unack, skb);
309 mod_timer(&bcsp->tbcsp, jiffies + HZ / 4); 309 mod_timer(&bcsp->tbcsp, jiffies + HZ / 4);
@@ -401,7 +401,7 @@ static void bcsp_handle_le_pkt(struct hci_uart *hu)
401 if (!nskb) 401 if (!nskb)
402 return; 402 return;
403 memcpy(skb_put(nskb, 4), conf_rsp_pkt, 4); 403 memcpy(skb_put(nskb, 4), conf_rsp_pkt, 4);
404 nskb->pkt_type = BCSP_LE_PKT; 404 bt_cb(nskb)->pkt_type = BCSP_LE_PKT;
405 405
406 skb_queue_head(&bcsp->unrel, nskb); 406 skb_queue_head(&bcsp->unrel, nskb);
407 hci_uart_tx_wakeup(hu); 407 hci_uart_tx_wakeup(hu);
@@ -483,14 +483,14 @@ static inline void bcsp_complete_rx_pkt(struct hci_uart *hu)
483 bcsp_pkt_cull(bcsp); 483 bcsp_pkt_cull(bcsp);
484 if ((bcsp->rx_skb->data[1] & 0x0f) == 6 && 484 if ((bcsp->rx_skb->data[1] & 0x0f) == 6 &&
485 bcsp->rx_skb->data[0] & 0x80) { 485 bcsp->rx_skb->data[0] & 0x80) {
486 bcsp->rx_skb->pkt_type = HCI_ACLDATA_PKT; 486 bt_cb(bcsp->rx_skb)->pkt_type = HCI_ACLDATA_PKT;
487 pass_up = 1; 487 pass_up = 1;
488 } else if ((bcsp->rx_skb->data[1] & 0x0f) == 5 && 488 } else if ((bcsp->rx_skb->data[1] & 0x0f) == 5 &&
489 bcsp->rx_skb->data[0] & 0x80) { 489 bcsp->rx_skb->data[0] & 0x80) {
490 bcsp->rx_skb->pkt_type = HCI_EVENT_PKT; 490 bt_cb(bcsp->rx_skb)->pkt_type = HCI_EVENT_PKT;
491 pass_up = 1; 491 pass_up = 1;
492 } else if ((bcsp->rx_skb->data[1] & 0x0f) == 7) { 492 } else if ((bcsp->rx_skb->data[1] & 0x0f) == 7) {
493 bcsp->rx_skb->pkt_type = HCI_SCODATA_PKT; 493 bt_cb(bcsp->rx_skb)->pkt_type = HCI_SCODATA_PKT;
494 pass_up = 1; 494 pass_up = 1;
495 } else if ((bcsp->rx_skb->data[1] & 0x0f) == 1 && 495 } else if ((bcsp->rx_skb->data[1] & 0x0f) == 1 &&
496 !(bcsp->rx_skb->data[0] & 0x80)) { 496 !(bcsp->rx_skb->data[0] & 0x80)) {
@@ -512,7 +512,7 @@ static inline void bcsp_complete_rx_pkt(struct hci_uart *hu)
512 hdr.evt = 0xff; 512 hdr.evt = 0xff;
513 hdr.plen = bcsp->rx_skb->len; 513 hdr.plen = bcsp->rx_skb->len;
514 memcpy(skb_push(bcsp->rx_skb, HCI_EVENT_HDR_SIZE), &hdr, HCI_EVENT_HDR_SIZE); 514 memcpy(skb_push(bcsp->rx_skb, HCI_EVENT_HDR_SIZE), &hdr, HCI_EVENT_HDR_SIZE);
515 bcsp->rx_skb->pkt_type = HCI_EVENT_PKT; 515 bt_cb(bcsp->rx_skb)->pkt_type = HCI_EVENT_PKT;
516 516
517 hci_recv_frame(bcsp->rx_skb); 517 hci_recv_frame(bcsp->rx_skb);
518 } else { 518 } else {
diff --git a/drivers/bluetooth/hci_h4.c b/drivers/bluetooth/hci_h4.c
index 533323b60e63..cf8a22d58d96 100644
--- a/drivers/bluetooth/hci_h4.c
+++ b/drivers/bluetooth/hci_h4.c
@@ -112,7 +112,7 @@ static int h4_enqueue(struct hci_uart *hu, struct sk_buff *skb)
112 BT_DBG("hu %p skb %p", hu, skb); 112 BT_DBG("hu %p skb %p", hu, skb);
113 113
114 /* Prepend skb with frame type */ 114 /* Prepend skb with frame type */
115 memcpy(skb_push(skb, 1), &skb->pkt_type, 1); 115 memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
116 skb_queue_tail(&h4->txq, skb); 116 skb_queue_tail(&h4->txq, skb);
117 return 0; 117 return 0;
118} 118}
@@ -239,7 +239,7 @@ static int h4_recv(struct hci_uart *hu, void *data, int count)
239 return 0; 239 return 0;
240 } 240 }
241 h4->rx_skb->dev = (void *) hu->hdev; 241 h4->rx_skb->dev = (void *) hu->hdev;
242 h4->rx_skb->pkt_type = type; 242 bt_cb(h4->rx_skb)->pkt_type = type;
243 } 243 }
244 return count; 244 return count;
245} 245}
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index 90be2eae52e0..aed80cc22890 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -153,7 +153,7 @@ restart:
153 break; 153 break;
154 } 154 }
155 155
156 hci_uart_tx_complete(hu, skb->pkt_type); 156 hci_uart_tx_complete(hu, bt_cb(skb)->pkt_type);
157 kfree_skb(skb); 157 kfree_skb(skb);
158 } 158 }
159 159
@@ -229,7 +229,7 @@ static int hci_uart_send_frame(struct sk_buff *skb)
229 hu = (struct hci_uart *) hdev->driver_data; 229 hu = (struct hci_uart *) hdev->driver_data;
230 tty = hu->tty; 230 tty = hu->tty;
231 231
232 BT_DBG("%s: type %d len %d", hdev->name, skb->pkt_type, skb->len); 232 BT_DBG("%s: type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
233 233
234 hu->proto->enqueue(hu, skb); 234 hu->proto->enqueue(hu, skb);
235 235
diff --git a/drivers/bluetooth/hci_usb.c b/drivers/bluetooth/hci_usb.c
index 657719b8254f..67d96b5cbb96 100644
--- a/drivers/bluetooth/hci_usb.c
+++ b/drivers/bluetooth/hci_usb.c
@@ -127,7 +127,7 @@ static struct usb_device_id blacklist_ids[] = {
127 { } /* Terminating entry */ 127 { } /* Terminating entry */
128}; 128};
129 129
130static struct _urb *_urb_alloc(int isoc, int gfp) 130static struct _urb *_urb_alloc(int isoc, unsigned int __nocast gfp)
131{ 131{
132 struct _urb *_urb = kmalloc(sizeof(struct _urb) + 132 struct _urb *_urb = kmalloc(sizeof(struct _urb) +
133 sizeof(struct usb_iso_packet_descriptor) * isoc, gfp); 133 sizeof(struct usb_iso_packet_descriptor) * isoc, gfp);
@@ -443,7 +443,7 @@ static int __tx_submit(struct hci_usb *husb, struct _urb *_urb)
443 443
444static inline int hci_usb_send_ctrl(struct hci_usb *husb, struct sk_buff *skb) 444static inline int hci_usb_send_ctrl(struct hci_usb *husb, struct sk_buff *skb)
445{ 445{
446 struct _urb *_urb = __get_completed(husb, skb->pkt_type); 446 struct _urb *_urb = __get_completed(husb, bt_cb(skb)->pkt_type);
447 struct usb_ctrlrequest *dr; 447 struct usb_ctrlrequest *dr;
448 struct urb *urb; 448 struct urb *urb;
449 449
@@ -451,7 +451,7 @@ static inline int hci_usb_send_ctrl(struct hci_usb *husb, struct sk_buff *skb)
451 _urb = _urb_alloc(0, GFP_ATOMIC); 451 _urb = _urb_alloc(0, GFP_ATOMIC);
452 if (!_urb) 452 if (!_urb)
453 return -ENOMEM; 453 return -ENOMEM;
454 _urb->type = skb->pkt_type; 454 _urb->type = bt_cb(skb)->pkt_type;
455 455
456 dr = kmalloc(sizeof(*dr), GFP_ATOMIC); 456 dr = kmalloc(sizeof(*dr), GFP_ATOMIC);
457 if (!dr) { 457 if (!dr) {
@@ -479,7 +479,7 @@ static inline int hci_usb_send_ctrl(struct hci_usb *husb, struct sk_buff *skb)
479 479
480static inline int hci_usb_send_bulk(struct hci_usb *husb, struct sk_buff *skb) 480static inline int hci_usb_send_bulk(struct hci_usb *husb, struct sk_buff *skb)
481{ 481{
482 struct _urb *_urb = __get_completed(husb, skb->pkt_type); 482 struct _urb *_urb = __get_completed(husb, bt_cb(skb)->pkt_type);
483 struct urb *urb; 483 struct urb *urb;
484 int pipe; 484 int pipe;
485 485
@@ -487,7 +487,7 @@ static inline int hci_usb_send_bulk(struct hci_usb *husb, struct sk_buff *skb)
487 _urb = _urb_alloc(0, GFP_ATOMIC); 487 _urb = _urb_alloc(0, GFP_ATOMIC);
488 if (!_urb) 488 if (!_urb)
489 return -ENOMEM; 489 return -ENOMEM;
490 _urb->type = skb->pkt_type; 490 _urb->type = bt_cb(skb)->pkt_type;
491 } 491 }
492 492
493 urb = &_urb->urb; 493 urb = &_urb->urb;
@@ -505,14 +505,14 @@ static inline int hci_usb_send_bulk(struct hci_usb *husb, struct sk_buff *skb)
505#ifdef CONFIG_BT_HCIUSB_SCO 505#ifdef CONFIG_BT_HCIUSB_SCO
506static inline int hci_usb_send_isoc(struct hci_usb *husb, struct sk_buff *skb) 506static inline int hci_usb_send_isoc(struct hci_usb *husb, struct sk_buff *skb)
507{ 507{
508 struct _urb *_urb = __get_completed(husb, skb->pkt_type); 508 struct _urb *_urb = __get_completed(husb, bt_cb(skb)->pkt_type);
509 struct urb *urb; 509 struct urb *urb;
510 510
511 if (!_urb) { 511 if (!_urb) {
512 _urb = _urb_alloc(HCI_MAX_ISOC_FRAMES, GFP_ATOMIC); 512 _urb = _urb_alloc(HCI_MAX_ISOC_FRAMES, GFP_ATOMIC);
513 if (!_urb) 513 if (!_urb)
514 return -ENOMEM; 514 return -ENOMEM;
515 _urb->type = skb->pkt_type; 515 _urb->type = bt_cb(skb)->pkt_type;
516 } 516 }
517 517
518 BT_DBG("%s skb %p len %d", husb->hdev->name, skb, skb->len); 518 BT_DBG("%s skb %p len %d", husb->hdev->name, skb, skb->len);
@@ -601,11 +601,11 @@ static int hci_usb_send_frame(struct sk_buff *skb)
601 if (!test_bit(HCI_RUNNING, &hdev->flags)) 601 if (!test_bit(HCI_RUNNING, &hdev->flags))
602 return -EBUSY; 602 return -EBUSY;
603 603
604 BT_DBG("%s type %d len %d", hdev->name, skb->pkt_type, skb->len); 604 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
605 605
606 husb = (struct hci_usb *) hdev->driver_data; 606 husb = (struct hci_usb *) hdev->driver_data;
607 607
608 switch (skb->pkt_type) { 608 switch (bt_cb(skb)->pkt_type) {
609 case HCI_COMMAND_PKT: 609 case HCI_COMMAND_PKT:
610 hdev->stat.cmd_tx++; 610 hdev->stat.cmd_tx++;
611 break; 611 break;
@@ -627,7 +627,7 @@ static int hci_usb_send_frame(struct sk_buff *skb)
627 627
628 read_lock(&husb->completion_lock); 628 read_lock(&husb->completion_lock);
629 629
630 skb_queue_tail(__transmit_q(husb, skb->pkt_type), skb); 630 skb_queue_tail(__transmit_q(husb, bt_cb(skb)->pkt_type), skb);
631 hci_usb_tx_wakeup(husb); 631 hci_usb_tx_wakeup(husb);
632 632
633 read_unlock(&husb->completion_lock); 633 read_unlock(&husb->completion_lock);
@@ -682,7 +682,7 @@ static inline int __recv_frame(struct hci_usb *husb, int type, void *data, int c
682 return -ENOMEM; 682 return -ENOMEM;
683 } 683 }
684 skb->dev = (void *) husb->hdev; 684 skb->dev = (void *) husb->hdev;
685 skb->pkt_type = type; 685 bt_cb(skb)->pkt_type = type;
686 686
687 __reassembly(husb, type) = skb; 687 __reassembly(husb, type) = skb;
688 688
@@ -702,6 +702,7 @@ static inline int __recv_frame(struct hci_usb *husb, int type, void *data, int c
702 if (!scb->expect) { 702 if (!scb->expect) {
703 /* Complete frame */ 703 /* Complete frame */
704 __reassembly(husb, type) = NULL; 704 __reassembly(husb, type) = NULL;
705 bt_cb(skb)->pkt_type = type;
705 hci_recv_frame(skb); 706 hci_recv_frame(skb);
706 } 707 }
707 708
diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c
index f9b956fb2b8b..52cbd45c308f 100644
--- a/drivers/bluetooth/hci_vhci.c
+++ b/drivers/bluetooth/hci_vhci.c
@@ -1,229 +1,220 @@
1/*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23*/
24
25/* 1/*
26 * Bluetooth HCI virtual device driver.
27 * 2 *
28 * $Id: hci_vhci.c,v 1.3 2002/04/17 17:37:20 maxk Exp $ 3 * Bluetooth virtual HCI driver
4 *
5 * Copyright (C) 2000-2001 Qualcomm Incorporated
6 * Copyright (C) 2002-2003 Maxim Krasnyansky <maxk@qualcomm.com>
7 * Copyright (C) 2004-2005 Marcel Holtmann <marcel@holtmann.org>
8 *
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 *
29 */ 24 */
30#define VERSION "1.1"
31 25
32#include <linux/config.h> 26#include <linux/config.h>
33#include <linux/module.h> 27#include <linux/module.h>
34 28
35#include <linux/errno.h>
36#include <linux/kernel.h> 29#include <linux/kernel.h>
37#include <linux/major.h> 30#include <linux/init.h>
38#include <linux/sched.h>
39#include <linux/slab.h> 31#include <linux/slab.h>
32#include <linux/types.h>
33#include <linux/errno.h>
34#include <linux/sched.h>
40#include <linux/poll.h> 35#include <linux/poll.h>
41#include <linux/fcntl.h>
42#include <linux/init.h>
43#include <linux/random.h>
44 36
45#include <linux/skbuff.h> 37#include <linux/skbuff.h>
46#include <linux/miscdevice.h> 38#include <linux/miscdevice.h>
47 39
48#include <asm/system.h>
49#include <asm/uaccess.h>
50
51#include <net/bluetooth/bluetooth.h> 40#include <net/bluetooth/bluetooth.h>
52#include <net/bluetooth/hci_core.h> 41#include <net/bluetooth/hci_core.h>
53#include "hci_vhci.h"
54 42
55/* HCI device part */ 43#ifndef CONFIG_BT_HCIVHCI_DEBUG
44#undef BT_DBG
45#define BT_DBG(D...)
46#endif
47
48#define VERSION "1.2"
49
50static int minor = MISC_DYNAMIC_MINOR;
51
52struct vhci_data {
53 struct hci_dev *hdev;
54
55 unsigned long flags;
56
57 wait_queue_head_t read_wait;
58 struct sk_buff_head readq;
59
60 struct fasync_struct *fasync;
61};
56 62
57static int hci_vhci_open(struct hci_dev *hdev) 63#define VHCI_FASYNC 0x0010
64
65static struct miscdevice vhci_miscdev;
66
67static int vhci_open_dev(struct hci_dev *hdev)
58{ 68{
59 set_bit(HCI_RUNNING, &hdev->flags); 69 set_bit(HCI_RUNNING, &hdev->flags);
60 return 0;
61}
62 70
63static int hci_vhci_flush(struct hci_dev *hdev)
64{
65 struct hci_vhci_struct *hci_vhci = (struct hci_vhci_struct *) hdev->driver_data;
66 skb_queue_purge(&hci_vhci->readq);
67 return 0; 71 return 0;
68} 72}
69 73
70static int hci_vhci_close(struct hci_dev *hdev) 74static int vhci_close_dev(struct hci_dev *hdev)
71{ 75{
76 struct vhci_data *vhci = hdev->driver_data;
77
72 if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags)) 78 if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags))
73 return 0; 79 return 0;
74 80
75 hci_vhci_flush(hdev); 81 skb_queue_purge(&vhci->readq);
82
76 return 0; 83 return 0;
77} 84}
78 85
79static void hci_vhci_destruct(struct hci_dev *hdev) 86static int vhci_flush(struct hci_dev *hdev)
80{ 87{
81 struct hci_vhci_struct *vhci; 88 struct vhci_data *vhci = hdev->driver_data;
82 89
83 if (!hdev) return; 90 skb_queue_purge(&vhci->readq);
84 91
85 vhci = (struct hci_vhci_struct *) hdev->driver_data; 92 return 0;
86 kfree(vhci);
87} 93}
88 94
89static int hci_vhci_send_frame(struct sk_buff *skb) 95static int vhci_send_frame(struct sk_buff *skb)
90{ 96{
91 struct hci_dev* hdev = (struct hci_dev *) skb->dev; 97 struct hci_dev* hdev = (struct hci_dev *) skb->dev;
92 struct hci_vhci_struct *hci_vhci; 98 struct vhci_data *vhci;
93 99
94 if (!hdev) { 100 if (!hdev) {
95 BT_ERR("Frame for uknown device (hdev=NULL)"); 101 BT_ERR("Frame for unknown HCI device (hdev=NULL)");
96 return -ENODEV; 102 return -ENODEV;
97 } 103 }
98 104
99 if (!test_bit(HCI_RUNNING, &hdev->flags)) 105 if (!test_bit(HCI_RUNNING, &hdev->flags))
100 return -EBUSY; 106 return -EBUSY;
101 107
102 hci_vhci = (struct hci_vhci_struct *) hdev->driver_data; 108 vhci = hdev->driver_data;
109
110 memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
111 skb_queue_tail(&vhci->readq, skb);
103 112
104 memcpy(skb_push(skb, 1), &skb->pkt_type, 1); 113 if (vhci->flags & VHCI_FASYNC)
105 skb_queue_tail(&hci_vhci->readq, skb); 114 kill_fasync(&vhci->fasync, SIGIO, POLL_IN);
106 115
107 if (hci_vhci->flags & VHCI_FASYNC) 116 wake_up_interruptible(&vhci->read_wait);
108 kill_fasync(&hci_vhci->fasync, SIGIO, POLL_IN);
109 wake_up_interruptible(&hci_vhci->read_wait);
110 117
111 return 0; 118 return 0;
112} 119}
113 120
114/* Character device part */ 121static void vhci_destruct(struct hci_dev *hdev)
115 122{
116/* Poll */ 123 kfree(hdev->driver_data);
117static unsigned int hci_vhci_chr_poll(struct file *file, poll_table * wait)
118{
119 struct hci_vhci_struct *hci_vhci = (struct hci_vhci_struct *) file->private_data;
120
121 poll_wait(file, &hci_vhci->read_wait, wait);
122
123 if (!skb_queue_empty(&hci_vhci->readq))
124 return POLLIN | POLLRDNORM;
125
126 return POLLOUT | POLLWRNORM;
127} 124}
128 125
129/* Get packet from user space buffer(already verified) */ 126static inline ssize_t vhci_get_user(struct vhci_data *vhci,
130static inline ssize_t hci_vhci_get_user(struct hci_vhci_struct *hci_vhci, const char __user *buf, size_t count) 127 const char __user *buf, size_t count)
131{ 128{
132 struct sk_buff *skb; 129 struct sk_buff *skb;
133 130
134 if (count > HCI_MAX_FRAME_SIZE) 131 if (count > HCI_MAX_FRAME_SIZE)
135 return -EINVAL; 132 return -EINVAL;
136 133
137 if (!(skb = bt_skb_alloc(count, GFP_KERNEL))) 134 skb = bt_skb_alloc(count, GFP_KERNEL);
135 if (!skb)
138 return -ENOMEM; 136 return -ENOMEM;
139 137
140 if (copy_from_user(skb_put(skb, count), buf, count)) { 138 if (copy_from_user(skb_put(skb, count), buf, count)) {
141 kfree_skb(skb); 139 kfree_skb(skb);
142 return -EFAULT; 140 return -EFAULT;
143 } 141 }
144 142
145 skb->dev = (void *) hci_vhci->hdev; 143 skb->dev = (void *) vhci->hdev;
146 skb->pkt_type = *((__u8 *) skb->data); 144 bt_cb(skb)->pkt_type = *((__u8 *) skb->data);
147 skb_pull(skb, 1); 145 skb_pull(skb, 1);
148 146
149 hci_recv_frame(skb); 147 hci_recv_frame(skb);
150 148
151 return count; 149 return count;
152}
153
154/* Write */
155static ssize_t hci_vhci_chr_write(struct file * file, const char __user * buf,
156 size_t count, loff_t *pos)
157{
158 struct hci_vhci_struct *hci_vhci = (struct hci_vhci_struct *) file->private_data;
159
160 if (!access_ok(VERIFY_READ, buf, count))
161 return -EFAULT;
162
163 return hci_vhci_get_user(hci_vhci, buf, count);
164} 150}
165 151
166/* Put packet to user space buffer(already verified) */ 152static inline ssize_t vhci_put_user(struct vhci_data *vhci,
167static inline ssize_t hci_vhci_put_user(struct hci_vhci_struct *hci_vhci, 153 struct sk_buff *skb, char __user *buf, int count)
168 struct sk_buff *skb, char __user *buf,
169 int count)
170{ 154{
171 int len = count, total = 0;
172 char __user *ptr = buf; 155 char __user *ptr = buf;
156 int len, total = 0;
157
158 len = min_t(unsigned int, skb->len, count);
173 159
174 len = min_t(unsigned int, skb->len, len);
175 if (copy_to_user(ptr, skb->data, len)) 160 if (copy_to_user(ptr, skb->data, len))
176 return -EFAULT; 161 return -EFAULT;
162
177 total += len; 163 total += len;
178 164
179 hci_vhci->hdev->stat.byte_tx += len; 165 vhci->hdev->stat.byte_tx += len;
180 switch (skb->pkt_type) {
181 case HCI_COMMAND_PKT:
182 hci_vhci->hdev->stat.cmd_tx++;
183 break;
184 166
185 case HCI_ACLDATA_PKT: 167 switch (bt_cb(skb)->pkt_type) {
186 hci_vhci->hdev->stat.acl_tx++; 168 case HCI_COMMAND_PKT:
187 break; 169 vhci->hdev->stat.cmd_tx++;
170 break;
171
172 case HCI_ACLDATA_PKT:
173 vhci->hdev->stat.acl_tx++;
174 break;
188 175
189 case HCI_SCODATA_PKT: 176 case HCI_SCODATA_PKT:
190 hci_vhci->hdev->stat.cmd_tx++; 177 vhci->hdev->stat.cmd_tx++;
191 break; 178 break;
192 }; 179 };
193 180
194 return total; 181 return total;
195} 182}
196 183
197/* Read */ 184static loff_t vhci_llseek(struct file * file, loff_t offset, int origin)
198static ssize_t hci_vhci_chr_read(struct file * file, char __user * buf, size_t count, loff_t *pos) 185{
186 return -ESPIPE;
187}
188
189static ssize_t vhci_read(struct file * file, char __user * buf, size_t count, loff_t *pos)
199{ 190{
200 struct hci_vhci_struct *hci_vhci = (struct hci_vhci_struct *) file->private_data;
201 DECLARE_WAITQUEUE(wait, current); 191 DECLARE_WAITQUEUE(wait, current);
192 struct vhci_data *vhci = file->private_data;
202 struct sk_buff *skb; 193 struct sk_buff *skb;
203 ssize_t ret = 0; 194 ssize_t ret = 0;
204 195
205 add_wait_queue(&hci_vhci->read_wait, &wait); 196 add_wait_queue(&vhci->read_wait, &wait);
206 while (count) { 197 while (count) {
207 set_current_state(TASK_INTERRUPTIBLE); 198 set_current_state(TASK_INTERRUPTIBLE);
208 199
209 /* Read frames from device queue */ 200 skb = skb_dequeue(&vhci->readq);
210 if (!(skb = skb_dequeue(&hci_vhci->readq))) { 201 if (!skb) {
211 if (file->f_flags & O_NONBLOCK) { 202 if (file->f_flags & O_NONBLOCK) {
212 ret = -EAGAIN; 203 ret = -EAGAIN;
213 break; 204 break;
214 } 205 }
206
215 if (signal_pending(current)) { 207 if (signal_pending(current)) {
216 ret = -ERESTARTSYS; 208 ret = -ERESTARTSYS;
217 break; 209 break;
218 } 210 }
219 211
220 /* Nothing to read, let's sleep */
221 schedule(); 212 schedule();
222 continue; 213 continue;
223 } 214 }
224 215
225 if (access_ok(VERIFY_WRITE, buf, count)) 216 if (access_ok(VERIFY_WRITE, buf, count))
226 ret = hci_vhci_put_user(hci_vhci, skb, buf, count); 217 ret = vhci_put_user(vhci, skb, buf, count);
227 else 218 else
228 ret = -EFAULT; 219 ret = -EFAULT;
229 220
@@ -231,84 +222,90 @@ static ssize_t hci_vhci_chr_read(struct file * file, char __user * buf, size_t c
231 break; 222 break;
232 } 223 }
233 set_current_state(TASK_RUNNING); 224 set_current_state(TASK_RUNNING);
234 remove_wait_queue(&hci_vhci->read_wait, &wait); 225 remove_wait_queue(&vhci->read_wait, &wait);
235 226
236 return ret; 227 return ret;
237} 228}
238 229
239static loff_t hci_vhci_chr_lseek(struct file * file, loff_t offset, int origin) 230static ssize_t vhci_write(struct file *file,
231 const char __user *buf, size_t count, loff_t *pos)
240{ 232{
241 return -ESPIPE; 233 struct vhci_data *vhci = file->private_data;
242}
243 234
244static int hci_vhci_chr_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) 235 if (!access_ok(VERIFY_READ, buf, count))
245{ 236 return -EFAULT;
246 return -EINVAL; 237
238 return vhci_get_user(vhci, buf, count);
247} 239}
248 240
249static int hci_vhci_chr_fasync(int fd, struct file *file, int on) 241static unsigned int vhci_poll(struct file *file, poll_table *wait)
250{ 242{
251 struct hci_vhci_struct *hci_vhci = (struct hci_vhci_struct *) file->private_data; 243 struct vhci_data *vhci = file->private_data;
252 int ret;
253 244
254 if ((ret = fasync_helper(fd, file, on, &hci_vhci->fasync)) < 0) 245 poll_wait(file, &vhci->read_wait, wait);
255 return ret;
256
257 if (on)
258 hci_vhci->flags |= VHCI_FASYNC;
259 else
260 hci_vhci->flags &= ~VHCI_FASYNC;
261 246
262 return 0; 247 if (!skb_queue_empty(&vhci->readq))
248 return POLLIN | POLLRDNORM;
249
250 return POLLOUT | POLLWRNORM;
263} 251}
264 252
265static int hci_vhci_chr_open(struct inode *inode, struct file * file) 253static int vhci_ioctl(struct inode *inode, struct file *file,
254 unsigned int cmd, unsigned long arg)
266{ 255{
267 struct hci_vhci_struct *hci_vhci = NULL; 256 return -EINVAL;
257}
258
259static int vhci_open(struct inode *inode, struct file *file)
260{
261 struct vhci_data *vhci;
268 struct hci_dev *hdev; 262 struct hci_dev *hdev;
269 263
270 if (!(hci_vhci = kmalloc(sizeof(struct hci_vhci_struct), GFP_KERNEL))) 264 vhci = kmalloc(sizeof(struct vhci_data), GFP_KERNEL);
265 if (!vhci)
271 return -ENOMEM; 266 return -ENOMEM;
272 267
273 memset(hci_vhci, 0, sizeof(struct hci_vhci_struct)); 268 memset(vhci, 0, sizeof(struct vhci_data));
274 269
275 skb_queue_head_init(&hci_vhci->readq); 270 skb_queue_head_init(&vhci->readq);
276 init_waitqueue_head(&hci_vhci->read_wait); 271 init_waitqueue_head(&vhci->read_wait);
277 272
278 /* Initialize and register HCI device */
279 hdev = hci_alloc_dev(); 273 hdev = hci_alloc_dev();
280 if (!hdev) { 274 if (!hdev) {
281 kfree(hci_vhci); 275 kfree(vhci);
282 return -ENOMEM; 276 return -ENOMEM;
283 } 277 }
284 278
285 hci_vhci->hdev = hdev; 279 vhci->hdev = hdev;
286 280
287 hdev->type = HCI_VHCI; 281 hdev->type = HCI_VHCI;
288 hdev->driver_data = hci_vhci; 282 hdev->driver_data = vhci;
283 SET_HCIDEV_DEV(hdev, vhci_miscdev.dev);
289 284
290 hdev->open = hci_vhci_open; 285 hdev->open = vhci_open_dev;
291 hdev->close = hci_vhci_close; 286 hdev->close = vhci_close_dev;
292 hdev->flush = hci_vhci_flush; 287 hdev->flush = vhci_flush;
293 hdev->send = hci_vhci_send_frame; 288 hdev->send = vhci_send_frame;
294 hdev->destruct = hci_vhci_destruct; 289 hdev->destruct = vhci_destruct;
295 290
296 hdev->owner = THIS_MODULE; 291 hdev->owner = THIS_MODULE;
297 292
298 if (hci_register_dev(hdev) < 0) { 293 if (hci_register_dev(hdev) < 0) {
299 kfree(hci_vhci); 294 BT_ERR("Can't register HCI device");
295 kfree(vhci);
300 hci_free_dev(hdev); 296 hci_free_dev(hdev);
301 return -EBUSY; 297 return -EBUSY;
302 } 298 }
303 299
304 file->private_data = hci_vhci; 300 file->private_data = vhci;
305 return nonseekable_open(inode, file); 301
302 return nonseekable_open(inode, file);
306} 303}
307 304
308static int hci_vhci_chr_close(struct inode *inode, struct file *file) 305static int vhci_release(struct inode *inode, struct file *file)
309{ 306{
310 struct hci_vhci_struct *hci_vhci = (struct hci_vhci_struct *) file->private_data; 307 struct vhci_data *vhci = file->private_data;
311 struct hci_dev *hdev = hci_vhci->hdev; 308 struct hci_dev *hdev = vhci->hdev;
312 309
313 if (hci_unregister_dev(hdev) < 0) { 310 if (hci_unregister_dev(hdev) < 0) {
314 BT_ERR("Can't unregister HCI device %s", hdev->name); 311 BT_ERR("Can't unregister HCI device %s", hdev->name);
@@ -317,48 +314,71 @@ static int hci_vhci_chr_close(struct inode *inode, struct file *file)
317 hci_free_dev(hdev); 314 hci_free_dev(hdev);
318 315
319 file->private_data = NULL; 316 file->private_data = NULL;
317
320 return 0; 318 return 0;
321} 319}
322 320
323static struct file_operations hci_vhci_fops = { 321static int vhci_fasync(int fd, struct file *file, int on)
324 .owner = THIS_MODULE, 322{
325 .llseek = hci_vhci_chr_lseek, 323 struct vhci_data *vhci = file->private_data;
326 .read = hci_vhci_chr_read, 324 int err;
327 .write = hci_vhci_chr_write, 325
328 .poll = hci_vhci_chr_poll, 326 err = fasync_helper(fd, file, on, &vhci->fasync);
329 .ioctl = hci_vhci_chr_ioctl, 327 if (err < 0)
330 .open = hci_vhci_chr_open, 328 return err;
331 .release = hci_vhci_chr_close, 329
332 .fasync = hci_vhci_chr_fasync 330 if (on)
331 vhci->flags |= VHCI_FASYNC;
332 else
333 vhci->flags &= ~VHCI_FASYNC;
334
335 return 0;
336}
337
338static struct file_operations vhci_fops = {
339 .owner = THIS_MODULE,
340 .llseek = vhci_llseek,
341 .read = vhci_read,
342 .write = vhci_write,
343 .poll = vhci_poll,
344 .ioctl = vhci_ioctl,
345 .open = vhci_open,
346 .release = vhci_release,
347 .fasync = vhci_fasync,
333}; 348};
334 349
335static struct miscdevice hci_vhci_miscdev= 350static struct miscdevice vhci_miscdev= {
336{ 351 .name = "vhci",
337 VHCI_MINOR, 352 .fops = &vhci_fops,
338 "hci_vhci",
339 &hci_vhci_fops
340}; 353};
341 354
342static int __init hci_vhci_init(void) 355static int __init vhci_init(void)
343{ 356{
344 BT_INFO("VHCI driver ver %s", VERSION); 357 BT_INFO("Virtual HCI driver ver %s", VERSION);
345 358
346 if (misc_register(&hci_vhci_miscdev)) { 359 vhci_miscdev.minor = minor;
347 BT_ERR("Can't register misc device %d\n", VHCI_MINOR); 360
361 if (misc_register(&vhci_miscdev) < 0) {
362 BT_ERR("Can't register misc device with minor %d", minor);
348 return -EIO; 363 return -EIO;
349 } 364 }
350 365
351 return 0; 366 return 0;
352} 367}
353 368
354static void hci_vhci_cleanup(void) 369static void __exit vhci_exit(void)
355{ 370{
356 misc_deregister(&hci_vhci_miscdev); 371 if (misc_deregister(&vhci_miscdev) < 0)
372 BT_ERR("Can't unregister misc device with minor %d", minor);
357} 373}
358 374
359module_init(hci_vhci_init); 375module_init(vhci_init);
360module_exit(hci_vhci_cleanup); 376module_exit(vhci_exit);
377
378module_param(minor, int, 0444);
379MODULE_PARM_DESC(minor, "Miscellaneous minor device number");
361 380
362MODULE_AUTHOR("Maxim Krasnyansky <maxk@qualcomm.com>"); 381MODULE_AUTHOR("Maxim Krasnyansky <maxk@qualcomm.com>, Marcel Holtmann <marcel@holtmann.org>");
363MODULE_DESCRIPTION("Bluetooth VHCI driver ver " VERSION); 382MODULE_DESCRIPTION("Bluetooth virtual HCI driver ver " VERSION);
364MODULE_LICENSE("GPL"); 383MODULE_VERSION(VERSION);
384MODULE_LICENSE("GPL");
diff --git a/drivers/bluetooth/hci_vhci.h b/drivers/bluetooth/hci_vhci.h
deleted file mode 100644
index 53b11f9ef76d..000000000000
--- a/drivers/bluetooth/hci_vhci.h
+++ /dev/null
@@ -1,50 +0,0 @@
1/*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23*/
24
25/*
26 * $Id: hci_vhci.h,v 1.1.1.1 2002/03/08 21:03:15 maxk Exp $
27 */
28
29#ifndef __HCI_VHCI_H
30#define __HCI_VHCI_H
31
32#ifdef __KERNEL__
33
34struct hci_vhci_struct {
35 struct hci_dev *hdev;
36 __u32 flags;
37 wait_queue_head_t read_wait;
38 struct sk_buff_head readq;
39 struct fasync_struct *fasync;
40};
41
42/* VHCI device flags */
43#define VHCI_FASYNC 0x0010
44
45#endif /* __KERNEL__ */
46
47#define VHCI_DEV "/dev/vhci"
48#define VHCI_MINOR 250
49
50#endif /* __HCI_VHCI_H */
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 6b11d6b2129f..7999da25fe40 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -1589,6 +1589,40 @@ u32 secure_tcpv6_port_ephemeral(const __u32 *saddr, const __u32 *daddr, __u16 dp
1589EXPORT_SYMBOL(secure_tcpv6_port_ephemeral); 1589EXPORT_SYMBOL(secure_tcpv6_port_ephemeral);
1590#endif 1590#endif
1591 1591
1592#if defined(CONFIG_IP_DCCP) || defined(CONFIG_IP_DCCP_MODULE)
1593/* Similar to secure_tcp_sequence_number but generate a 48 bit value
1594 * bit's 32-47 increase every key exchange
1595 * 0-31 hash(source, dest)
1596 */
1597u64 secure_dccp_sequence_number(__u32 saddr, __u32 daddr,
1598 __u16 sport, __u16 dport)
1599{
1600 struct timeval tv;
1601 u64 seq;
1602 __u32 hash[4];
1603 struct keydata *keyptr = get_keyptr();
1604
1605 hash[0] = saddr;
1606 hash[1] = daddr;
1607 hash[2] = (sport << 16) + dport;
1608 hash[3] = keyptr->secret[11];
1609
1610 seq = half_md4_transform(hash, keyptr->secret);
1611 seq |= ((u64)keyptr->count) << (32 - HASH_BITS);
1612
1613 do_gettimeofday(&tv);
1614 seq += tv.tv_usec + tv.tv_sec * 1000000;
1615 seq &= (1ull << 48) - 1;
1616#if 0
1617 printk("dccp init_seq(%lx, %lx, %d, %d) = %d\n",
1618 saddr, daddr, sport, dport, seq);
1619#endif
1620 return seq;
1621}
1622
1623EXPORT_SYMBOL(secure_dccp_sequence_number);
1624#endif
1625
1592#endif /* CONFIG_INET */ 1626#endif /* CONFIG_INET */
1593 1627
1594 1628
diff --git a/drivers/ieee1394/ieee1394_core.c b/drivers/ieee1394/ieee1394_core.c
index b248d89de8b4..d633770fac8e 100644
--- a/drivers/ieee1394/ieee1394_core.c
+++ b/drivers/ieee1394/ieee1394_core.c
@@ -681,7 +681,7 @@ static void handle_packet_response(struct hpsb_host *host, int tcode,
681 return; 681 return;
682 } 682 }
683 683
684 __skb_unlink(skb, skb->list); 684 __skb_unlink(skb, &host->pending_packet_queue);
685 685
686 if (packet->state == hpsb_queued) { 686 if (packet->state == hpsb_queued) {
687 packet->sendtime = jiffies; 687 packet->sendtime = jiffies;
@@ -989,7 +989,7 @@ void abort_timedouts(unsigned long __opaque)
989 packet = (struct hpsb_packet *)skb->data; 989 packet = (struct hpsb_packet *)skb->data;
990 990
991 if (time_before(packet->sendtime + expire, jiffies)) { 991 if (time_before(packet->sendtime + expire, jiffies)) {
992 __skb_unlink(skb, skb->list); 992 __skb_unlink(skb, &host->pending_packet_queue);
993 packet->state = hpsb_complete; 993 packet->state = hpsb_complete;
994 packet->ack_code = ACKX_TIMEOUT; 994 packet->ack_code = ACKX_TIMEOUT;
995 queue_packet_complete(packet); 995 queue_packet_complete(packet);
diff --git a/drivers/isdn/act2000/capi.c b/drivers/isdn/act2000/capi.c
index afa46681f983..6ae6eb322111 100644
--- a/drivers/isdn/act2000/capi.c
+++ b/drivers/isdn/act2000/capi.c
@@ -606,7 +606,7 @@ handle_ack(act2000_card *card, act2000_chan *chan, __u8 blocknr) {
606 if ((((m->msg.data_b3_req.fakencci >> 8) & 0xff) == chan->ncci) && 606 if ((((m->msg.data_b3_req.fakencci >> 8) & 0xff) == chan->ncci) &&
607 (m->msg.data_b3_req.blocknr == blocknr)) { 607 (m->msg.data_b3_req.blocknr == blocknr)) {
608 /* found corresponding DATA_B3_REQ */ 608 /* found corresponding DATA_B3_REQ */
609 skb_unlink(tmp); 609 skb_unlink(tmp, &card->ackq);
610 chan->queued -= m->msg.data_b3_req.datalen; 610 chan->queued -= m->msg.data_b3_req.datalen;
611 if (m->msg.data_b3_req.flags) 611 if (m->msg.data_b3_req.flags)
612 ret = m->msg.data_b3_req.datalen; 612 ret = m->msg.data_b3_req.datalen;
diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c
index f30e8e63ae0d..96c115e13389 100644
--- a/drivers/isdn/i4l/isdn_net.c
+++ b/drivers/isdn/i4l/isdn_net.c
@@ -1786,7 +1786,6 @@ isdn_net_receive(struct net_device *ndev, struct sk_buff *skb)
1786 lp->stats.rx_bytes += skb->len; 1786 lp->stats.rx_bytes += skb->len;
1787 } 1787 }
1788 skb->dev = ndev; 1788 skb->dev = ndev;
1789 skb->input_dev = ndev;
1790 skb->pkt_type = PACKET_HOST; 1789 skb->pkt_type = PACKET_HOST;
1791 skb->mac.raw = skb->data; 1790 skb->mac.raw = skb->data;
1792#ifdef ISDN_DEBUG_NET_DUMP 1791#ifdef ISDN_DEBUG_NET_DUMP
diff --git a/drivers/isdn/i4l/isdn_ppp.c b/drivers/isdn/i4l/isdn_ppp.c
index 260a323a96d3..d97a9be5469c 100644
--- a/drivers/isdn/i4l/isdn_ppp.c
+++ b/drivers/isdn/i4l/isdn_ppp.c
@@ -1177,7 +1177,6 @@ isdn_ppp_push_higher(isdn_net_dev * net_dev, isdn_net_local * lp, struct sk_buff
1177 mlp->huptimer = 0; 1177 mlp->huptimer = 0;
1178#endif /* CONFIG_IPPP_FILTER */ 1178#endif /* CONFIG_IPPP_FILTER */
1179 skb->dev = dev; 1179 skb->dev = dev;
1180 skb->input_dev = dev;
1181 skb->mac.raw = skb->data; 1180 skb->mac.raw = skb->data;
1182 netif_rx(skb); 1181 netif_rx(skb);
1183 /* net_dev->local->stats.rx_packets++; done in isdn_net.c */ 1182 /* net_dev->local->stats.rx_packets++; done in isdn_net.c */
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index 8acc655ec1e8..7babf6af4e28 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -14,8 +14,8 @@
14 14
15#define DRV_MODULE_NAME "bnx2" 15#define DRV_MODULE_NAME "bnx2"
16#define PFX DRV_MODULE_NAME ": " 16#define PFX DRV_MODULE_NAME ": "
17#define DRV_MODULE_VERSION "1.2.19" 17#define DRV_MODULE_VERSION "1.2.20"
18#define DRV_MODULE_RELDATE "May 23, 2005" 18#define DRV_MODULE_RELDATE "August 22, 2005"
19 19
20#define RUN_AT(x) (jiffies + (x)) 20#define RUN_AT(x) (jiffies + (x))
21 21
@@ -52,7 +52,6 @@ static struct {
52 { "HP NC370i Multifunction Gigabit Server Adapter" }, 52 { "HP NC370i Multifunction Gigabit Server Adapter" },
53 { "Broadcom NetXtreme II BCM5706 1000Base-SX" }, 53 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
54 { "HP NC370F Multifunction Gigabit Server Adapter" }, 54 { "HP NC370F Multifunction Gigabit Server Adapter" },
55 { 0 },
56 }; 55 };
57 56
58static struct pci_device_id bnx2_pci_tbl[] = { 57static struct pci_device_id bnx2_pci_tbl[] = {
@@ -108,6 +107,15 @@ static struct flash_spec flash_table[] =
108 107
109MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl); 108MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
110 109
110static inline u32 bnx2_tx_avail(struct bnx2 *bp)
111{
112 u32 diff = TX_RING_IDX(bp->tx_prod) - TX_RING_IDX(bp->tx_cons);
113
114 if (diff > MAX_TX_DESC_CNT)
115 diff = (diff & MAX_TX_DESC_CNT) - 1;
116 return (bp->tx_ring_size - diff);
117}
118
111static u32 119static u32
112bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset) 120bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
113{ 121{
@@ -807,7 +815,19 @@ bnx2_setup_serdes_phy(struct bnx2 *bp)
807 bnx2_write_phy(bp, MII_ADVERTISE, new_adv); 815 bnx2_write_phy(bp, MII_ADVERTISE, new_adv);
808 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART | 816 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART |
809 BMCR_ANENABLE); 817 BMCR_ANENABLE);
810 bp->serdes_an_pending = SERDES_AN_TIMEOUT / bp->timer_interval; 818 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
819 /* Speed up link-up time when the link partner
820 * does not autonegotiate which is very common
821 * in blade servers. Some blade servers use
822 * IPMI for kerboard input and it's important
823 * to minimize link disruptions. Autoneg. involves
824 * exchanging base pages plus 3 next pages and
825 * normally completes in about 120 msec.
826 */
827 bp->current_interval = SERDES_AN_TIMEOUT;
828 bp->serdes_an_pending = 1;
829 mod_timer(&bp->timer, jiffies + bp->current_interval);
830 }
811 } 831 }
812 832
813 return 0; 833 return 0;
@@ -1327,22 +1347,17 @@ bnx2_tx_int(struct bnx2 *bp)
1327 } 1347 }
1328 } 1348 }
1329 1349
1330 atomic_add(tx_free_bd, &bp->tx_avail_bd); 1350 bp->tx_cons = sw_cons;
1331 1351
1332 if (unlikely(netif_queue_stopped(bp->dev))) { 1352 if (unlikely(netif_queue_stopped(bp->dev))) {
1333 unsigned long flags; 1353 spin_lock(&bp->tx_lock);
1334
1335 spin_lock_irqsave(&bp->tx_lock, flags);
1336 if ((netif_queue_stopped(bp->dev)) && 1354 if ((netif_queue_stopped(bp->dev)) &&
1337 (atomic_read(&bp->tx_avail_bd) > MAX_SKB_FRAGS)) { 1355 (bnx2_tx_avail(bp) > MAX_SKB_FRAGS)) {
1338 1356
1339 netif_wake_queue(bp->dev); 1357 netif_wake_queue(bp->dev);
1340 } 1358 }
1341 spin_unlock_irqrestore(&bp->tx_lock, flags); 1359 spin_unlock(&bp->tx_lock);
1342 } 1360 }
1343
1344 bp->tx_cons = sw_cons;
1345
1346} 1361}
1347 1362
1348static inline void 1363static inline void
@@ -1523,15 +1538,12 @@ bnx2_msi(int irq, void *dev_instance, struct pt_regs *regs)
1523 BNX2_PCICFG_INT_ACK_CMD_MASK_INT); 1538 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
1524 1539
1525 /* Return here if interrupt is disabled. */ 1540 /* Return here if interrupt is disabled. */
1526 if (unlikely(atomic_read(&bp->intr_sem) != 0)) { 1541 if (unlikely(atomic_read(&bp->intr_sem) != 0))
1527 return IRQ_RETVAL(1); 1542 return IRQ_HANDLED;
1528 }
1529 1543
1530 if (netif_rx_schedule_prep(dev)) { 1544 netif_rx_schedule(dev);
1531 __netif_rx_schedule(dev);
1532 }
1533 1545
1534 return IRQ_RETVAL(1); 1546 return IRQ_HANDLED;
1535} 1547}
1536 1548
1537static irqreturn_t 1549static irqreturn_t
@@ -1549,22 +1561,19 @@ bnx2_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
1549 if ((bp->status_blk->status_idx == bp->last_status_idx) || 1561 if ((bp->status_blk->status_idx == bp->last_status_idx) ||
1550 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) & 1562 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
1551 BNX2_PCICFG_MISC_STATUS_INTA_VALUE)) 1563 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
1552 return IRQ_RETVAL(0); 1564 return IRQ_NONE;
1553 1565
1554 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, 1566 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1555 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM | 1567 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
1556 BNX2_PCICFG_INT_ACK_CMD_MASK_INT); 1568 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
1557 1569
1558 /* Return here if interrupt is shared and is disabled. */ 1570 /* Return here if interrupt is shared and is disabled. */
1559 if (unlikely(atomic_read(&bp->intr_sem) != 0)) { 1571 if (unlikely(atomic_read(&bp->intr_sem) != 0))
1560 return IRQ_RETVAL(1); 1572 return IRQ_HANDLED;
1561 }
1562 1573
1563 if (netif_rx_schedule_prep(dev)) { 1574 netif_rx_schedule(dev);
1564 __netif_rx_schedule(dev);
1565 }
1566 1575
1567 return IRQ_RETVAL(1); 1576 return IRQ_HANDLED;
1568} 1577}
1569 1578
1570static int 1579static int
@@ -1581,11 +1590,9 @@ bnx2_poll(struct net_device *dev, int *budget)
1581 (bp->status_blk->status_attn_bits_ack & 1590 (bp->status_blk->status_attn_bits_ack &
1582 STATUS_ATTN_BITS_LINK_STATE)) { 1591 STATUS_ATTN_BITS_LINK_STATE)) {
1583 1592
1584 unsigned long flags; 1593 spin_lock(&bp->phy_lock);
1585
1586 spin_lock_irqsave(&bp->phy_lock, flags);
1587 bnx2_phy_int(bp); 1594 bnx2_phy_int(bp);
1588 spin_unlock_irqrestore(&bp->phy_lock, flags); 1595 spin_unlock(&bp->phy_lock);
1589 } 1596 }
1590 1597
1591 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_cons) { 1598 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_cons) {
@@ -1628,9 +1635,8 @@ bnx2_set_rx_mode(struct net_device *dev)
1628 struct bnx2 *bp = dev->priv; 1635 struct bnx2 *bp = dev->priv;
1629 u32 rx_mode, sort_mode; 1636 u32 rx_mode, sort_mode;
1630 int i; 1637 int i;
1631 unsigned long flags;
1632 1638
1633 spin_lock_irqsave(&bp->phy_lock, flags); 1639 spin_lock_bh(&bp->phy_lock);
1634 1640
1635 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS | 1641 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
1636 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG); 1642 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
@@ -1691,7 +1697,7 @@ bnx2_set_rx_mode(struct net_device *dev)
1691 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode); 1697 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
1692 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA); 1698 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
1693 1699
1694 spin_unlock_irqrestore(&bp->phy_lock, flags); 1700 spin_unlock_bh(&bp->phy_lock);
1695} 1701}
1696 1702
1697static void 1703static void
@@ -2960,7 +2966,6 @@ bnx2_init_tx_ring(struct bnx2 *bp)
2960 bp->tx_prod = 0; 2966 bp->tx_prod = 0;
2961 bp->tx_cons = 0; 2967 bp->tx_cons = 0;
2962 bp->tx_prod_bseq = 0; 2968 bp->tx_prod_bseq = 0;
2963 atomic_set(&bp->tx_avail_bd, bp->tx_ring_size);
2964 2969
2965 val = BNX2_L2CTX_TYPE_TYPE_L2; 2970 val = BNX2_L2CTX_TYPE_TYPE_L2;
2966 val |= BNX2_L2CTX_TYPE_SIZE_L2; 2971 val |= BNX2_L2CTX_TYPE_SIZE_L2;
@@ -3507,11 +3512,11 @@ bnx2_test_registers(struct bnx2 *bp)
3507 rw_mask = reg_tbl[i].rw_mask; 3512 rw_mask = reg_tbl[i].rw_mask;
3508 ro_mask = reg_tbl[i].ro_mask; 3513 ro_mask = reg_tbl[i].ro_mask;
3509 3514
3510 save_val = readl((u8 *) bp->regview + offset); 3515 save_val = readl(bp->regview + offset);
3511 3516
3512 writel(0, (u8 *) bp->regview + offset); 3517 writel(0, bp->regview + offset);
3513 3518
3514 val = readl((u8 *) bp->regview + offset); 3519 val = readl(bp->regview + offset);
3515 if ((val & rw_mask) != 0) { 3520 if ((val & rw_mask) != 0) {
3516 goto reg_test_err; 3521 goto reg_test_err;
3517 } 3522 }
@@ -3520,9 +3525,9 @@ bnx2_test_registers(struct bnx2 *bp)
3520 goto reg_test_err; 3525 goto reg_test_err;
3521 } 3526 }
3522 3527
3523 writel(0xffffffff, (u8 *) bp->regview + offset); 3528 writel(0xffffffff, bp->regview + offset);
3524 3529
3525 val = readl((u8 *) bp->regview + offset); 3530 val = readl(bp->regview + offset);
3526 if ((val & rw_mask) != rw_mask) { 3531 if ((val & rw_mask) != rw_mask) {
3527 goto reg_test_err; 3532 goto reg_test_err;
3528 } 3533 }
@@ -3531,11 +3536,11 @@ bnx2_test_registers(struct bnx2 *bp)
3531 goto reg_test_err; 3536 goto reg_test_err;
3532 } 3537 }
3533 3538
3534 writel(save_val, (u8 *) bp->regview + offset); 3539 writel(save_val, bp->regview + offset);
3535 continue; 3540 continue;
3536 3541
3537reg_test_err: 3542reg_test_err:
3538 writel(save_val, (u8 *) bp->regview + offset); 3543 writel(save_val, bp->regview + offset);
3539 ret = -ENODEV; 3544 ret = -ENODEV;
3540 break; 3545 break;
3541 } 3546 }
@@ -3752,10 +3757,10 @@ bnx2_test_link(struct bnx2 *bp)
3752{ 3757{
3753 u32 bmsr; 3758 u32 bmsr;
3754 3759
3755 spin_lock_irq(&bp->phy_lock); 3760 spin_lock_bh(&bp->phy_lock);
3756 bnx2_read_phy(bp, MII_BMSR, &bmsr); 3761 bnx2_read_phy(bp, MII_BMSR, &bmsr);
3757 bnx2_read_phy(bp, MII_BMSR, &bmsr); 3762 bnx2_read_phy(bp, MII_BMSR, &bmsr);
3758 spin_unlock_irq(&bp->phy_lock); 3763 spin_unlock_bh(&bp->phy_lock);
3759 3764
3760 if (bmsr & BMSR_LSTATUS) { 3765 if (bmsr & BMSR_LSTATUS) {
3761 return 0; 3766 return 0;
@@ -3801,6 +3806,9 @@ bnx2_timer(unsigned long data)
3801 struct bnx2 *bp = (struct bnx2 *) data; 3806 struct bnx2 *bp = (struct bnx2 *) data;
3802 u32 msg; 3807 u32 msg;
3803 3808
3809 if (!netif_running(bp->dev))
3810 return;
3811
3804 if (atomic_read(&bp->intr_sem) != 0) 3812 if (atomic_read(&bp->intr_sem) != 0)
3805 goto bnx2_restart_timer; 3813 goto bnx2_restart_timer;
3806 3814
@@ -3809,15 +3817,16 @@ bnx2_timer(unsigned long data)
3809 3817
3810 if ((bp->phy_flags & PHY_SERDES_FLAG) && 3818 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
3811 (CHIP_NUM(bp) == CHIP_NUM_5706)) { 3819 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
3812 unsigned long flags;
3813 3820
3814 spin_lock_irqsave(&bp->phy_lock, flags); 3821 spin_lock(&bp->phy_lock);
3815 if (bp->serdes_an_pending) { 3822 if (bp->serdes_an_pending) {
3816 bp->serdes_an_pending--; 3823 bp->serdes_an_pending--;
3817 } 3824 }
3818 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) { 3825 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
3819 u32 bmcr; 3826 u32 bmcr;
3820 3827
3828 bp->current_interval = bp->timer_interval;
3829
3821 bnx2_read_phy(bp, MII_BMCR, &bmcr); 3830 bnx2_read_phy(bp, MII_BMCR, &bmcr);
3822 3831
3823 if (bmcr & BMCR_ANENABLE) { 3832 if (bmcr & BMCR_ANENABLE) {
@@ -3860,14 +3869,14 @@ bnx2_timer(unsigned long data)
3860 3869
3861 } 3870 }
3862 } 3871 }
3872 else
3873 bp->current_interval = bp->timer_interval;
3863 3874
3864 spin_unlock_irqrestore(&bp->phy_lock, flags); 3875 spin_unlock(&bp->phy_lock);
3865 } 3876 }
3866 3877
3867bnx2_restart_timer: 3878bnx2_restart_timer:
3868 bp->timer.expires = RUN_AT(bp->timer_interval); 3879 mod_timer(&bp->timer, jiffies + bp->current_interval);
3869
3870 add_timer(&bp->timer);
3871} 3880}
3872 3881
3873/* Called with rtnl_lock */ 3882/* Called with rtnl_lock */
@@ -3920,12 +3929,7 @@ bnx2_open(struct net_device *dev)
3920 return rc; 3929 return rc;
3921 } 3930 }
3922 3931
3923 init_timer(&bp->timer); 3932 mod_timer(&bp->timer, jiffies + bp->current_interval);
3924
3925 bp->timer.expires = RUN_AT(bp->timer_interval);
3926 bp->timer.data = (unsigned long) bp;
3927 bp->timer.function = bnx2_timer;
3928 add_timer(&bp->timer);
3929 3933
3930 atomic_set(&bp->intr_sem, 0); 3934 atomic_set(&bp->intr_sem, 0);
3931 3935
@@ -3976,12 +3980,17 @@ bnx2_reset_task(void *data)
3976{ 3980{
3977 struct bnx2 *bp = data; 3981 struct bnx2 *bp = data;
3978 3982
3983 if (!netif_running(bp->dev))
3984 return;
3985
3986 bp->in_reset_task = 1;
3979 bnx2_netif_stop(bp); 3987 bnx2_netif_stop(bp);
3980 3988
3981 bnx2_init_nic(bp); 3989 bnx2_init_nic(bp);
3982 3990
3983 atomic_set(&bp->intr_sem, 1); 3991 atomic_set(&bp->intr_sem, 1);
3984 bnx2_netif_start(bp); 3992 bnx2_netif_start(bp);
3993 bp->in_reset_task = 0;
3985} 3994}
3986 3995
3987static void 3996static void
@@ -4041,9 +4050,7 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
4041 u16 prod, ring_prod; 4050 u16 prod, ring_prod;
4042 int i; 4051 int i;
4043 4052
4044 if (unlikely(atomic_read(&bp->tx_avail_bd) < 4053 if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
4045 (skb_shinfo(skb)->nr_frags + 1))) {
4046
4047 netif_stop_queue(dev); 4054 netif_stop_queue(dev);
4048 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n", 4055 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
4049 dev->name); 4056 dev->name);
@@ -4140,8 +4147,6 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
4140 prod = NEXT_TX_BD(prod); 4147 prod = NEXT_TX_BD(prod);
4141 bp->tx_prod_bseq += skb->len; 4148 bp->tx_prod_bseq += skb->len;
4142 4149
4143 atomic_sub(last_frag + 1, &bp->tx_avail_bd);
4144
4145 REG_WR16(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BIDX, prod); 4150 REG_WR16(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BIDX, prod);
4146 REG_WR(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BSEQ, bp->tx_prod_bseq); 4151 REG_WR(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BSEQ, bp->tx_prod_bseq);
4147 4152
@@ -4150,17 +4155,13 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
4150 bp->tx_prod = prod; 4155 bp->tx_prod = prod;
4151 dev->trans_start = jiffies; 4156 dev->trans_start = jiffies;
4152 4157
4153 if (unlikely(atomic_read(&bp->tx_avail_bd) <= MAX_SKB_FRAGS)) { 4158 if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
4154 unsigned long flags; 4159 spin_lock(&bp->tx_lock);
4155 4160 netif_stop_queue(dev);
4156 spin_lock_irqsave(&bp->tx_lock, flags); 4161
4157 if (atomic_read(&bp->tx_avail_bd) <= MAX_SKB_FRAGS) { 4162 if (bnx2_tx_avail(bp) > MAX_SKB_FRAGS)
4158 netif_stop_queue(dev); 4163 netif_wake_queue(dev);
4159 4164 spin_unlock(&bp->tx_lock);
4160 if (atomic_read(&bp->tx_avail_bd) > MAX_SKB_FRAGS)
4161 netif_wake_queue(dev);
4162 }
4163 spin_unlock_irqrestore(&bp->tx_lock, flags);
4164 } 4165 }
4165 4166
4166 return NETDEV_TX_OK; 4167 return NETDEV_TX_OK;
@@ -4173,7 +4174,13 @@ bnx2_close(struct net_device *dev)
4173 struct bnx2 *bp = dev->priv; 4174 struct bnx2 *bp = dev->priv;
4174 u32 reset_code; 4175 u32 reset_code;
4175 4176
4176 flush_scheduled_work(); 4177 /* Calling flush_scheduled_work() may deadlock because
4178 * linkwatch_event() may be on the workqueue and it will try to get
4179 * the rtnl_lock which we are holding.
4180 */
4181 while (bp->in_reset_task)
4182 msleep(1);
4183
4177 bnx2_netif_stop(bp); 4184 bnx2_netif_stop(bp);
4178 del_timer_sync(&bp->timer); 4185 del_timer_sync(&bp->timer);
4179 if (bp->wol) 4186 if (bp->wol)
@@ -4390,11 +4397,11 @@ bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4390 bp->req_line_speed = req_line_speed; 4397 bp->req_line_speed = req_line_speed;
4391 bp->req_duplex = req_duplex; 4398 bp->req_duplex = req_duplex;
4392 4399
4393 spin_lock_irq(&bp->phy_lock); 4400 spin_lock_bh(&bp->phy_lock);
4394 4401
4395 bnx2_setup_phy(bp); 4402 bnx2_setup_phy(bp);
4396 4403
4397 spin_unlock_irq(&bp->phy_lock); 4404 spin_unlock_bh(&bp->phy_lock);
4398 4405
4399 return 0; 4406 return 0;
4400} 4407}
@@ -4464,19 +4471,20 @@ bnx2_nway_reset(struct net_device *dev)
4464 return -EINVAL; 4471 return -EINVAL;
4465 } 4472 }
4466 4473
4467 spin_lock_irq(&bp->phy_lock); 4474 spin_lock_bh(&bp->phy_lock);
4468 4475
4469 /* Force a link down visible on the other side */ 4476 /* Force a link down visible on the other side */
4470 if (bp->phy_flags & PHY_SERDES_FLAG) { 4477 if (bp->phy_flags & PHY_SERDES_FLAG) {
4471 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK); 4478 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
4472 spin_unlock_irq(&bp->phy_lock); 4479 spin_unlock_bh(&bp->phy_lock);
4473 4480
4474 msleep(20); 4481 msleep(20);
4475 4482
4476 spin_lock_irq(&bp->phy_lock); 4483 spin_lock_bh(&bp->phy_lock);
4477 if (CHIP_NUM(bp) == CHIP_NUM_5706) { 4484 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
4478 bp->serdes_an_pending = SERDES_AN_TIMEOUT / 4485 bp->current_interval = SERDES_AN_TIMEOUT;
4479 bp->timer_interval; 4486 bp->serdes_an_pending = 1;
4487 mod_timer(&bp->timer, jiffies + bp->current_interval);
4480 } 4488 }
4481 } 4489 }
4482 4490
@@ -4484,7 +4492,7 @@ bnx2_nway_reset(struct net_device *dev)
4484 bmcr &= ~BMCR_LOOPBACK; 4492 bmcr &= ~BMCR_LOOPBACK;
4485 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART | BMCR_ANENABLE); 4493 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
4486 4494
4487 spin_unlock_irq(&bp->phy_lock); 4495 spin_unlock_bh(&bp->phy_lock);
4488 4496
4489 return 0; 4497 return 0;
4490} 4498}
@@ -4670,11 +4678,11 @@ bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
4670 bp->autoneg &= ~AUTONEG_FLOW_CTRL; 4678 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
4671 } 4679 }
4672 4680
4673 spin_lock_irq(&bp->phy_lock); 4681 spin_lock_bh(&bp->phy_lock);
4674 4682
4675 bnx2_setup_phy(bp); 4683 bnx2_setup_phy(bp);
4676 4684
4677 spin_unlock_irq(&bp->phy_lock); 4685 spin_unlock_bh(&bp->phy_lock);
4678 4686
4679 return 0; 4687 return 0;
4680} 4688}
@@ -4698,7 +4706,7 @@ bnx2_set_rx_csum(struct net_device *dev, u32 data)
4698 4706
4699#define BNX2_NUM_STATS 45 4707#define BNX2_NUM_STATS 45
4700 4708
4701struct { 4709static struct {
4702 char string[ETH_GSTRING_LEN]; 4710 char string[ETH_GSTRING_LEN];
4703} bnx2_stats_str_arr[BNX2_NUM_STATS] = { 4711} bnx2_stats_str_arr[BNX2_NUM_STATS] = {
4704 { "rx_bytes" }, 4712 { "rx_bytes" },
@@ -4750,7 +4758,7 @@ struct {
4750 4758
4751#define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4) 4759#define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
4752 4760
4753unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = { 4761static unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
4754 STATS_OFFSET32(stat_IfHCInOctets_hi), 4762 STATS_OFFSET32(stat_IfHCInOctets_hi),
4755 STATS_OFFSET32(stat_IfHCInBadOctets_hi), 4763 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
4756 STATS_OFFSET32(stat_IfHCOutOctets_hi), 4764 STATS_OFFSET32(stat_IfHCOutOctets_hi),
@@ -4801,7 +4809,7 @@ unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
4801/* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are 4809/* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
4802 * skipped because of errata. 4810 * skipped because of errata.
4803 */ 4811 */
4804u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = { 4812static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
4805 8,0,8,8,8,8,8,8,8,8, 4813 8,0,8,8,8,8,8,8,8,8,
4806 4,0,4,4,4,4,4,4,4,4, 4814 4,0,4,4,4,4,4,4,4,4,
4807 4,4,4,4,4,4,4,4,4,4, 4815 4,4,4,4,4,4,4,4,4,4,
@@ -4811,7 +4819,7 @@ u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
4811 4819
4812#define BNX2_NUM_TESTS 6 4820#define BNX2_NUM_TESTS 6
4813 4821
4814struct { 4822static struct {
4815 char string[ETH_GSTRING_LEN]; 4823 char string[ETH_GSTRING_LEN];
4816} bnx2_tests_str_arr[BNX2_NUM_TESTS] = { 4824} bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
4817 { "register_test (offline)" }, 4825 { "register_test (offline)" },
@@ -4910,7 +4918,7 @@ bnx2_get_ethtool_stats(struct net_device *dev,
4910 struct bnx2 *bp = dev->priv; 4918 struct bnx2 *bp = dev->priv;
4911 int i; 4919 int i;
4912 u32 *hw_stats = (u32 *) bp->stats_blk; 4920 u32 *hw_stats = (u32 *) bp->stats_blk;
4913 u8 *stats_len_arr = 0; 4921 u8 *stats_len_arr = NULL;
4914 4922
4915 if (hw_stats == NULL) { 4923 if (hw_stats == NULL) {
4916 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS); 4924 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
@@ -5012,7 +5020,7 @@ static struct ethtool_ops bnx2_ethtool_ops = {
5012static int 5020static int
5013bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 5021bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5014{ 5022{
5015 struct mii_ioctl_data *data = (struct mii_ioctl_data *)&ifr->ifr_data; 5023 struct mii_ioctl_data *data = if_mii(ifr);
5016 struct bnx2 *bp = dev->priv; 5024 struct bnx2 *bp = dev->priv;
5017 int err; 5025 int err;
5018 5026
@@ -5024,9 +5032,9 @@ bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5024 case SIOCGMIIREG: { 5032 case SIOCGMIIREG: {
5025 u32 mii_regval; 5033 u32 mii_regval;
5026 5034
5027 spin_lock_irq(&bp->phy_lock); 5035 spin_lock_bh(&bp->phy_lock);
5028 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval); 5036 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
5029 spin_unlock_irq(&bp->phy_lock); 5037 spin_unlock_bh(&bp->phy_lock);
5030 5038
5031 data->val_out = mii_regval; 5039 data->val_out = mii_regval;
5032 5040
@@ -5037,9 +5045,9 @@ bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5037 if (!capable(CAP_NET_ADMIN)) 5045 if (!capable(CAP_NET_ADMIN))
5038 return -EPERM; 5046 return -EPERM;
5039 5047
5040 spin_lock_irq(&bp->phy_lock); 5048 spin_lock_bh(&bp->phy_lock);
5041 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in); 5049 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
5042 spin_unlock_irq(&bp->phy_lock); 5050 spin_unlock_bh(&bp->phy_lock);
5043 5051
5044 return err; 5052 return err;
5045 5053
@@ -5057,6 +5065,9 @@ bnx2_change_mac_addr(struct net_device *dev, void *p)
5057 struct sockaddr *addr = p; 5065 struct sockaddr *addr = p;
5058 struct bnx2 *bp = dev->priv; 5066 struct bnx2 *bp = dev->priv;
5059 5067
5068 if (!is_valid_ether_addr(addr->sa_data))
5069 return -EINVAL;
5070
5060 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 5071 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5061 if (netif_running(dev)) 5072 if (netif_running(dev))
5062 bnx2_set_mac_addr(bp); 5073 bnx2_set_mac_addr(bp);
@@ -5305,6 +5316,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
5305 bp->stats_ticks = 1000000 & 0xffff00; 5316 bp->stats_ticks = 1000000 & 0xffff00;
5306 5317
5307 bp->timer_interval = HZ; 5318 bp->timer_interval = HZ;
5319 bp->current_interval = HZ;
5308 5320
5309 /* Disable WOL support if we are running on a SERDES chip. */ 5321 /* Disable WOL support if we are running on a SERDES chip. */
5310 if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT) { 5322 if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT) {
@@ -5328,6 +5340,15 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
5328 bp->req_line_speed = 0; 5340 bp->req_line_speed = 0;
5329 if (bp->phy_flags & PHY_SERDES_FLAG) { 5341 if (bp->phy_flags & PHY_SERDES_FLAG) {
5330 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg; 5342 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
5343
5344 reg = REG_RD_IND(bp, HOST_VIEW_SHMEM_BASE +
5345 BNX2_PORT_HW_CFG_CONFIG);
5346 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
5347 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
5348 bp->autoneg = 0;
5349 bp->req_line_speed = bp->line_speed = SPEED_1000;
5350 bp->req_duplex = DUPLEX_FULL;
5351 }
5331 } 5352 }
5332 else { 5353 else {
5333 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg; 5354 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
@@ -5335,11 +5356,17 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
5335 5356
5336 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX; 5357 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
5337 5358
5359 init_timer(&bp->timer);
5360 bp->timer.expires = RUN_AT(bp->timer_interval);
5361 bp->timer.data = (unsigned long) bp;
5362 bp->timer.function = bnx2_timer;
5363
5338 return 0; 5364 return 0;
5339 5365
5340err_out_unmap: 5366err_out_unmap:
5341 if (bp->regview) { 5367 if (bp->regview) {
5342 iounmap(bp->regview); 5368 iounmap(bp->regview);
5369 bp->regview = NULL;
5343 } 5370 }
5344 5371
5345err_out_release: 5372err_out_release:
@@ -5454,6 +5481,8 @@ bnx2_remove_one(struct pci_dev *pdev)
5454 struct net_device *dev = pci_get_drvdata(pdev); 5481 struct net_device *dev = pci_get_drvdata(pdev);
5455 struct bnx2 *bp = dev->priv; 5482 struct bnx2 *bp = dev->priv;
5456 5483
5484 flush_scheduled_work();
5485
5457 unregister_netdev(dev); 5486 unregister_netdev(dev);
5458 5487
5459 if (bp->regview) 5488 if (bp->regview)
@@ -5505,12 +5534,12 @@ bnx2_resume(struct pci_dev *pdev)
5505} 5534}
5506 5535
5507static struct pci_driver bnx2_pci_driver = { 5536static struct pci_driver bnx2_pci_driver = {
5508 name: DRV_MODULE_NAME, 5537 .name = DRV_MODULE_NAME,
5509 id_table: bnx2_pci_tbl, 5538 .id_table = bnx2_pci_tbl,
5510 probe: bnx2_init_one, 5539 .probe = bnx2_init_one,
5511 remove: __devexit_p(bnx2_remove_one), 5540 .remove = __devexit_p(bnx2_remove_one),
5512 suspend: bnx2_suspend, 5541 .suspend = bnx2_suspend,
5513 resume: bnx2_resume, 5542 .resume = bnx2_resume,
5514}; 5543};
5515 5544
5516static int __init bnx2_init(void) 5545static int __init bnx2_init(void)
diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h
index 8214a2853d0d..9ad3f5740cd8 100644
--- a/drivers/net/bnx2.h
+++ b/drivers/net/bnx2.h
@@ -3841,12 +3841,12 @@ struct bnx2 {
3841 struct status_block *status_blk; 3841 struct status_block *status_blk;
3842 u32 last_status_idx; 3842 u32 last_status_idx;
3843 3843
3844 atomic_t tx_avail_bd;
3845 struct tx_bd *tx_desc_ring; 3844 struct tx_bd *tx_desc_ring;
3846 struct sw_bd *tx_buf_ring; 3845 struct sw_bd *tx_buf_ring;
3847 u32 tx_prod_bseq; 3846 u32 tx_prod_bseq;
3848 u16 tx_prod; 3847 u16 tx_prod;
3849 u16 tx_cons; 3848 u16 tx_cons;
3849 int tx_ring_size;
3850 3850
3851#ifdef BCM_VLAN 3851#ifdef BCM_VLAN
3852 struct vlan_group *vlgrp; 3852 struct vlan_group *vlgrp;
@@ -3872,8 +3872,10 @@ struct bnx2 {
3872 char *name; 3872 char *name;
3873 3873
3874 int timer_interval; 3874 int timer_interval;
3875 int current_interval;
3875 struct timer_list timer; 3876 struct timer_list timer;
3876 struct work_struct reset_task; 3877 struct work_struct reset_task;
3878 int in_reset_task;
3877 3879
3878 /* Used to synchronize phy accesses. */ 3880 /* Used to synchronize phy accesses. */
3879 spinlock_t phy_lock; 3881 spinlock_t phy_lock;
@@ -3927,7 +3929,6 @@ struct bnx2 {
3927 u16 fw_wr_seq; 3929 u16 fw_wr_seq;
3928 u16 fw_drv_pulse_wr_seq; 3930 u16 fw_drv_pulse_wr_seq;
3929 3931
3930 int tx_ring_size;
3931 dma_addr_t tx_desc_mapping; 3932 dma_addr_t tx_desc_mapping;
3932 3933
3933 3934
@@ -3985,7 +3986,7 @@ struct bnx2 {
3985#define PHY_LOOPBACK 2 3986#define PHY_LOOPBACK 2
3986 3987
3987 u8 serdes_an_pending; 3988 u8 serdes_an_pending;
3988#define SERDES_AN_TIMEOUT (2 * HZ) 3989#define SERDES_AN_TIMEOUT (HZ / 3)
3989 3990
3990 u8 mac_addr[8]; 3991 u8 mac_addr[8];
3991 3992
@@ -4171,6 +4172,9 @@ struct fw_info {
4171 4172
4172#define BNX2_PORT_HW_CFG_MAC_LOWER 0x00000054 4173#define BNX2_PORT_HW_CFG_MAC_LOWER 0x00000054
4173#define BNX2_PORT_HW_CFG_CONFIG 0x00000058 4174#define BNX2_PORT_HW_CFG_CONFIG 0x00000058
4175#define BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK 0x001f0000
4176#define BNX2_PORT_HW_CFG_CFG_DFLT_LINK_AN 0x00000000
4177#define BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G 0x00030000
4174 4178
4175#define BNX2_PORT_HW_CFG_IMD_MAC_A_UPPER 0x00000068 4179#define BNX2_PORT_HW_CFG_IMD_MAC_A_UPPER 0x00000068
4176#define BNX2_PORT_HW_CFG_IMD_MAC_A_LOWER 0x0000006c 4180#define BNX2_PORT_HW_CFG_IMD_MAC_A_LOWER 0x0000006c
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index a2e8dda5afac..d2f34d5a8083 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -2419,22 +2419,19 @@ out:
2419 return 0; 2419 return 0;
2420} 2420}
2421 2421
2422int bond_3ad_lacpdu_recv(struct sk_buff *skb, struct net_device *dev, struct packet_type* ptype) 2422int bond_3ad_lacpdu_recv(struct sk_buff *skb, struct net_device *dev, struct packet_type* ptype, struct net_device *orig_dev)
2423{ 2423{
2424 struct bonding *bond = dev->priv; 2424 struct bonding *bond = dev->priv;
2425 struct slave *slave = NULL; 2425 struct slave *slave = NULL;
2426 int ret = NET_RX_DROP; 2426 int ret = NET_RX_DROP;
2427 2427
2428 if (!(dev->flags & IFF_MASTER)) { 2428 if (!(dev->flags & IFF_MASTER))
2429 goto out; 2429 goto out;
2430 }
2431 2430
2432 read_lock(&bond->lock); 2431 read_lock(&bond->lock);
2433 slave = bond_get_slave_by_dev((struct bonding *)dev->priv, 2432 slave = bond_get_slave_by_dev((struct bonding *)dev->priv, orig_dev);
2434 skb->real_dev); 2433 if (!slave)
2435 if (slave == NULL) {
2436 goto out_unlock; 2434 goto out_unlock;
2437 }
2438 2435
2439 bond_3ad_rx_indication((struct lacpdu *) skb->data, slave, skb->len); 2436 bond_3ad_rx_indication((struct lacpdu *) skb->data, slave, skb->len);
2440 2437
diff --git a/drivers/net/bonding/bond_3ad.h b/drivers/net/bonding/bond_3ad.h
index f46823894187..673a30af5660 100644
--- a/drivers/net/bonding/bond_3ad.h
+++ b/drivers/net/bonding/bond_3ad.h
@@ -295,6 +295,6 @@ void bond_3ad_adapter_duplex_changed(struct slave *slave);
295void bond_3ad_handle_link_change(struct slave *slave, char link); 295void bond_3ad_handle_link_change(struct slave *slave, char link);
296int bond_3ad_get_active_agg_info(struct bonding *bond, struct ad_info *ad_info); 296int bond_3ad_get_active_agg_info(struct bonding *bond, struct ad_info *ad_info);
297int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev); 297int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev);
298int bond_3ad_lacpdu_recv(struct sk_buff *skb, struct net_device *dev, struct packet_type* ptype); 298int bond_3ad_lacpdu_recv(struct sk_buff *skb, struct net_device *dev, struct packet_type* ptype, struct net_device *orig_dev);
299#endif //__BOND_3AD_H__ 299#endif //__BOND_3AD_H__
300 300
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 19e829b567d0..f8fce3961197 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -354,15 +354,14 @@ static void rlb_update_entry_from_arp(struct bonding *bond, struct arp_pkt *arp)
354 _unlock_rx_hashtbl(bond); 354 _unlock_rx_hashtbl(bond);
355} 355}
356 356
357static int rlb_arp_recv(struct sk_buff *skb, struct net_device *bond_dev, struct packet_type *ptype) 357static int rlb_arp_recv(struct sk_buff *skb, struct net_device *bond_dev, struct packet_type *ptype, struct net_device *orig_dev)
358{ 358{
359 struct bonding *bond = bond_dev->priv; 359 struct bonding *bond = bond_dev->priv;
360 struct arp_pkt *arp = (struct arp_pkt *)skb->data; 360 struct arp_pkt *arp = (struct arp_pkt *)skb->data;
361 int res = NET_RX_DROP; 361 int res = NET_RX_DROP;
362 362
363 if (!(bond_dev->flags & IFF_MASTER)) { 363 if (!(bond_dev->flags & IFF_MASTER))
364 goto out; 364 goto out;
365 }
366 365
367 if (!arp) { 366 if (!arp) {
368 dprintk("Packet has no ARP data\n"); 367 dprintk("Packet has no ARP data\n");
diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c
index ba9f0580e1f9..2946e037a9b1 100644
--- a/drivers/net/hamradio/bpqether.c
+++ b/drivers/net/hamradio/bpqether.c
@@ -98,7 +98,7 @@ static char bcast_addr[6]={0xFF,0xFF,0xFF,0xFF,0xFF,0xFF};
98 98
99static char bpq_eth_addr[6]; 99static char bpq_eth_addr[6];
100 100
101static int bpq_rcv(struct sk_buff *, struct net_device *, struct packet_type *); 101static int bpq_rcv(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *);
102static int bpq_device_event(struct notifier_block *, unsigned long, void *); 102static int bpq_device_event(struct notifier_block *, unsigned long, void *);
103static const char *bpq_print_ethaddr(const unsigned char *); 103static const char *bpq_print_ethaddr(const unsigned char *);
104 104
@@ -165,7 +165,7 @@ static inline int dev_is_ethdev(struct net_device *dev)
165/* 165/*
166 * Receive an AX.25 frame via an ethernet interface. 166 * Receive an AX.25 frame via an ethernet interface.
167 */ 167 */
168static int bpq_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *ptype) 168static int bpq_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *ptype, struct net_device *orig_dev)
169{ 169{
170 int len; 170 int len;
171 char * ptr; 171 char * ptr;
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index a32668e88e09..bb71638a7c44 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -1657,7 +1657,6 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
1657 skb->dev = ppp->dev; 1657 skb->dev = ppp->dev;
1658 skb->protocol = htons(npindex_to_ethertype[npi]); 1658 skb->protocol = htons(npindex_to_ethertype[npi]);
1659 skb->mac.raw = skb->data; 1659 skb->mac.raw = skb->data;
1660 skb->input_dev = ppp->dev;
1661 netif_rx(skb); 1660 netif_rx(skb);
1662 ppp->dev->last_rx = jiffies; 1661 ppp->dev->last_rx = jiffies;
1663 } 1662 }
diff --git a/drivers/net/pppoe.c b/drivers/net/pppoe.c
index ce1a9bf7b9a7..82f236cc3b9b 100644
--- a/drivers/net/pppoe.c
+++ b/drivers/net/pppoe.c
@@ -377,7 +377,8 @@ abort_kfree:
377 ***********************************************************************/ 377 ***********************************************************************/
378static int pppoe_rcv(struct sk_buff *skb, 378static int pppoe_rcv(struct sk_buff *skb,
379 struct net_device *dev, 379 struct net_device *dev,
380 struct packet_type *pt) 380 struct packet_type *pt,
381 struct net_device *orig_dev)
381 382
382{ 383{
383 struct pppoe_hdr *ph; 384 struct pppoe_hdr *ph;
@@ -426,7 +427,8 @@ out:
426 ***********************************************************************/ 427 ***********************************************************************/
427static int pppoe_disc_rcv(struct sk_buff *skb, 428static int pppoe_disc_rcv(struct sk_buff *skb,
428 struct net_device *dev, 429 struct net_device *dev,
429 struct packet_type *pt) 430 struct packet_type *pt,
431 struct net_device *orig_dev)
430 432
431{ 433{
432 struct pppoe_hdr *ph; 434 struct pppoe_hdr *ph;
diff --git a/drivers/net/rrunner.c b/drivers/net/rrunner.c
index 12a86f96d973..ec1a18d189a1 100644
--- a/drivers/net/rrunner.c
+++ b/drivers/net/rrunner.c
@@ -1429,6 +1429,7 @@ static int rr_start_xmit(struct sk_buff *skb, struct net_device *dev)
1429{ 1429{
1430 struct rr_private *rrpriv = netdev_priv(dev); 1430 struct rr_private *rrpriv = netdev_priv(dev);
1431 struct rr_regs __iomem *regs = rrpriv->regs; 1431 struct rr_regs __iomem *regs = rrpriv->regs;
1432 struct hippi_cb *hcb = (struct hippi_cb *) skb->cb;
1432 struct ring_ctrl *txctrl; 1433 struct ring_ctrl *txctrl;
1433 unsigned long flags; 1434 unsigned long flags;
1434 u32 index, len = skb->len; 1435 u32 index, len = skb->len;
@@ -1460,7 +1461,7 @@ static int rr_start_xmit(struct sk_buff *skb, struct net_device *dev)
1460 ifield = (u32 *)skb_push(skb, 8); 1461 ifield = (u32 *)skb_push(skb, 8);
1461 1462
1462 ifield[0] = 0; 1463 ifield[0] = 0;
1463 ifield[1] = skb->private.ifield; 1464 ifield[1] = hcb->ifield;
1464 1465
1465 /* 1466 /*
1466 * We don't need the lock before we are actually going to start 1467 * We don't need the lock before we are actually going to start
diff --git a/drivers/net/shaper.c b/drivers/net/shaper.c
index 3ad0b6751f6f..221354eea21f 100644
--- a/drivers/net/shaper.c
+++ b/drivers/net/shaper.c
@@ -156,52 +156,6 @@ static int shaper_start_xmit(struct sk_buff *skb, struct net_device *dev)
156 156
157 SHAPERCB(skb)->shapelen= shaper_clocks(shaper,skb); 157 SHAPERCB(skb)->shapelen= shaper_clocks(shaper,skb);
158 158
159#ifdef SHAPER_COMPLEX /* and broken.. */
160
161 while(ptr && ptr!=(struct sk_buff *)&shaper->sendq)
162 {
163 if(ptr->pri<skb->pri
164 && jiffies - SHAPERCB(ptr)->shapeclock < SHAPER_MAXSLIP)
165 {
166 struct sk_buff *tmp=ptr->prev;
167
168 /*
169 * It goes before us therefore we slip the length
170 * of the new frame.
171 */
172
173 SHAPERCB(ptr)->shapeclock+=SHAPERCB(skb)->shapelen;
174 SHAPERCB(ptr)->shapelatency+=SHAPERCB(skb)->shapelen;
175
176 /*
177 * The packet may have slipped so far back it
178 * fell off.
179 */
180 if(SHAPERCB(ptr)->shapelatency > SHAPER_LATENCY)
181 {
182 skb_unlink(ptr);
183 dev_kfree_skb(ptr);
184 }
185 ptr=tmp;
186 }
187 else
188 break;
189 }
190 if(ptr==NULL || ptr==(struct sk_buff *)&shaper->sendq)
191 skb_queue_head(&shaper->sendq,skb);
192 else
193 {
194 struct sk_buff *tmp;
195 /*
196 * Set the packet clock out time according to the
197 * frames ahead. Im sure a bit of thought could drop
198 * this loop.
199 */
200 for(tmp=skb_peek(&shaper->sendq); tmp!=NULL && tmp!=ptr; tmp=tmp->next)
201 SHAPERCB(skb)->shapeclock+=tmp->shapelen;
202 skb_append(ptr,skb);
203 }
204#else
205 { 159 {
206 struct sk_buff *tmp; 160 struct sk_buff *tmp;
207 /* 161 /*
@@ -220,7 +174,7 @@ static int shaper_start_xmit(struct sk_buff *skb, struct net_device *dev)
220 } else 174 } else
221 skb_queue_tail(&shaper->sendq, skb); 175 skb_queue_tail(&shaper->sendq, skb);
222 } 176 }
223#endif 177
224 if(sh_debug) 178 if(sh_debug)
225 printk("Frame queued.\n"); 179 printk("Frame queued.\n");
226 if(skb_queue_len(&shaper->sendq)>SHAPER_QLEN) 180 if(skb_queue_len(&shaper->sendq)>SHAPER_QLEN)
@@ -302,7 +256,7 @@ static void shaper_kick(struct shaper *shaper)
302 * Pull the frame and get interrupts back on. 256 * Pull the frame and get interrupts back on.
303 */ 257 */
304 258
305 skb_unlink(skb); 259 skb_unlink(skb, &shaper->sendq);
306 if (shaper->recovery < 260 if (shaper->recovery <
307 SHAPERCB(skb)->shapeclock + SHAPERCB(skb)->shapelen) 261 SHAPERCB(skb)->shapeclock + SHAPERCB(skb)->shapelen)
308 shaper->recovery = SHAPERCB(skb)->shapeclock + SHAPERCB(skb)->shapelen; 262 shaper->recovery = SHAPERCB(skb)->shapeclock + SHAPERCB(skb)->shapelen;
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 6d4ab1e333b5..af8263a1580e 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -340,41 +340,92 @@ static struct {
340 340
341static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val) 341static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
342{ 342{
343 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) { 343 unsigned long flags;
344 spin_lock_bh(&tp->indirect_lock); 344
345 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off); 345 spin_lock_irqsave(&tp->indirect_lock, flags);
346 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val); 346 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
347 spin_unlock_bh(&tp->indirect_lock); 347 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
348 } else { 348 spin_unlock_irqrestore(&tp->indirect_lock, flags);
349 writel(val, tp->regs + off); 349}
350 if ((tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG) != 0) 350
351 readl(tp->regs + off); 351static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
352{
353 writel(val, tp->regs + off);
354 readl(tp->regs + off);
355}
356
357static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
358{
359 unsigned long flags;
360 u32 val;
361
362 spin_lock_irqsave(&tp->indirect_lock, flags);
363 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
364 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
365 spin_unlock_irqrestore(&tp->indirect_lock, flags);
366 return val;
367}
368
369static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
370{
371 unsigned long flags;
372
373 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
374 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
375 TG3_64BIT_REG_LOW, val);
376 return;
377 }
378 if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
379 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
380 TG3_64BIT_REG_LOW, val);
381 return;
382 }
383
384 spin_lock_irqsave(&tp->indirect_lock, flags);
385 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
386 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
387 spin_unlock_irqrestore(&tp->indirect_lock, flags);
388
389 /* In indirect mode when disabling interrupts, we also need
390 * to clear the interrupt bit in the GRC local ctrl register.
391 */
392 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
393 (val == 0x1)) {
394 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
395 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
352 } 396 }
353} 397}
354 398
399static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
400{
401 unsigned long flags;
402 u32 val;
403
404 spin_lock_irqsave(&tp->indirect_lock, flags);
405 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
406 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
407 spin_unlock_irqrestore(&tp->indirect_lock, flags);
408 return val;
409}
410
355static void _tw32_flush(struct tg3 *tp, u32 off, u32 val) 411static void _tw32_flush(struct tg3 *tp, u32 off, u32 val)
356{ 412{
357 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) { 413 tp->write32(tp, off, val);
358 spin_lock_bh(&tp->indirect_lock); 414 if (!(tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) &&
359 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off); 415 !(tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG) &&
360 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val); 416 !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
361 spin_unlock_bh(&tp->indirect_lock); 417 tp->read32(tp, off); /* flush */
362 } else {
363 void __iomem *dest = tp->regs + off;
364 writel(val, dest);
365 readl(dest); /* always flush PCI write */
366 }
367} 418}
368 419
369static inline void _tw32_rx_mbox(struct tg3 *tp, u32 off, u32 val) 420static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
370{ 421{
371 void __iomem *mbox = tp->regs + off; 422 tp->write32_mbox(tp, off, val);
372 writel(val, mbox); 423 if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
373 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) 424 !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
374 readl(mbox); 425 tp->read32_mbox(tp, off);
375} 426}
376 427
377static inline void _tw32_tx_mbox(struct tg3 *tp, u32 off, u32 val) 428static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
378{ 429{
379 void __iomem *mbox = tp->regs + off; 430 void __iomem *mbox = tp->regs + off;
380 writel(val, mbox); 431 writel(val, mbox);
@@ -384,46 +435,57 @@ static inline void _tw32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
384 readl(mbox); 435 readl(mbox);
385} 436}
386 437
387#define tw32_mailbox(reg, val) writel(((val) & 0xffffffff), tp->regs + (reg)) 438static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
388#define tw32_rx_mbox(reg, val) _tw32_rx_mbox(tp, reg, val) 439{
389#define tw32_tx_mbox(reg, val) _tw32_tx_mbox(tp, reg, val) 440 writel(val, tp->regs + off);
441}
442
443static u32 tg3_read32(struct tg3 *tp, u32 off)
444{
445 return (readl(tp->regs + off));
446}
447
448#define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
449#define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
450#define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
451#define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
452#define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
390 453
391#define tw32(reg,val) tg3_write_indirect_reg32(tp,(reg),(val)) 454#define tw32(reg,val) tp->write32(tp, reg, val)
392#define tw32_f(reg,val) _tw32_flush(tp,(reg),(val)) 455#define tw32_f(reg,val) _tw32_flush(tp,(reg),(val))
393#define tw16(reg,val) writew(((val) & 0xffff), tp->regs + (reg)) 456#define tr32(reg) tp->read32(tp, reg)
394#define tw8(reg,val) writeb(((val) & 0xff), tp->regs + (reg))
395#define tr32(reg) readl(tp->regs + (reg))
396#define tr16(reg) readw(tp->regs + (reg))
397#define tr8(reg) readb(tp->regs + (reg))
398 457
399static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val) 458static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
400{ 459{
401 spin_lock_bh(&tp->indirect_lock); 460 unsigned long flags;
461
462 spin_lock_irqsave(&tp->indirect_lock, flags);
402 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off); 463 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
403 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val); 464 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
404 465
405 /* Always leave this as zero. */ 466 /* Always leave this as zero. */
406 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0); 467 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
407 spin_unlock_bh(&tp->indirect_lock); 468 spin_unlock_irqrestore(&tp->indirect_lock, flags);
408} 469}
409 470
410static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val) 471static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
411{ 472{
412 spin_lock_bh(&tp->indirect_lock); 473 unsigned long flags;
474
475 spin_lock_irqsave(&tp->indirect_lock, flags);
413 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off); 476 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
414 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val); 477 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
415 478
416 /* Always leave this as zero. */ 479 /* Always leave this as zero. */
417 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0); 480 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
418 spin_unlock_bh(&tp->indirect_lock); 481 spin_unlock_irqrestore(&tp->indirect_lock, flags);
419} 482}
420 483
421static void tg3_disable_ints(struct tg3 *tp) 484static void tg3_disable_ints(struct tg3 *tp)
422{ 485{
423 tw32(TG3PCI_MISC_HOST_CTRL, 486 tw32(TG3PCI_MISC_HOST_CTRL,
424 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT)); 487 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
425 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); 488 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
426 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
427} 489}
428 490
429static inline void tg3_cond_int(struct tg3 *tp) 491static inline void tg3_cond_int(struct tg3 *tp)
@@ -439,9 +501,8 @@ static void tg3_enable_ints(struct tg3 *tp)
439 501
440 tw32(TG3PCI_MISC_HOST_CTRL, 502 tw32(TG3PCI_MISC_HOST_CTRL,
441 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT)); 503 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
442 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 504 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
443 (tp->last_tag << 24)); 505 (tp->last_tag << 24));
444 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
445 tg3_cond_int(tp); 506 tg3_cond_int(tp);
446} 507}
447 508
@@ -472,8 +533,6 @@ static inline unsigned int tg3_has_work(struct tg3 *tp)
472 */ 533 */
473static void tg3_restart_ints(struct tg3 *tp) 534static void tg3_restart_ints(struct tg3 *tp)
474{ 535{
475 tw32(TG3PCI_MISC_HOST_CTRL,
476 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
477 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 536 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
478 tp->last_tag << 24); 537 tp->last_tag << 24);
479 mmiowb(); 538 mmiowb();
@@ -3278,9 +3337,8 @@ static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
3278 /* No work, shared interrupt perhaps? re-enable 3337 /* No work, shared interrupt perhaps? re-enable
3279 * interrupts, and flush that PCI write 3338 * interrupts, and flush that PCI write
3280 */ 3339 */
3281 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 3340 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3282 0x00000000); 3341 0x00000000);
3283 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
3284 } 3342 }
3285 } else { /* shared interrupt */ 3343 } else { /* shared interrupt */
3286 handled = 0; 3344 handled = 0;
@@ -3323,9 +3381,8 @@ static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id, struct pt_regs *r
3323 /* no work, shared interrupt perhaps? re-enable 3381 /* no work, shared interrupt perhaps? re-enable
3324 * interrupts, and flush that PCI write 3382 * interrupts, and flush that PCI write
3325 */ 3383 */
3326 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 3384 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3327 tp->last_tag << 24); 3385 tp->last_tag << 24);
3328 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
3329 } 3386 }
3330 } else { /* shared interrupt */ 3387 } else { /* shared interrupt */
3331 handled = 0; 3388 handled = 0;
@@ -4216,7 +4273,7 @@ static void tg3_stop_fw(struct tg3 *);
4216static int tg3_chip_reset(struct tg3 *tp) 4273static int tg3_chip_reset(struct tg3 *tp)
4217{ 4274{
4218 u32 val; 4275 u32 val;
4219 u32 flags_save; 4276 void (*write_op)(struct tg3 *, u32, u32);
4220 int i; 4277 int i;
4221 4278
4222 if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) 4279 if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X))
@@ -4228,8 +4285,9 @@ static int tg3_chip_reset(struct tg3 *tp)
4228 * fun things. So, temporarily disable the 5701 4285 * fun things. So, temporarily disable the 5701
4229 * hardware workaround, while we do the reset. 4286 * hardware workaround, while we do the reset.
4230 */ 4287 */
4231 flags_save = tp->tg3_flags; 4288 write_op = tp->write32;
4232 tp->tg3_flags &= ~TG3_FLAG_5701_REG_WRITE_BUG; 4289 if (write_op == tg3_write_flush_reg32)
4290 tp->write32 = tg3_write32;
4233 4291
4234 /* do the reset */ 4292 /* do the reset */
4235 val = GRC_MISC_CFG_CORECLK_RESET; 4293 val = GRC_MISC_CFG_CORECLK_RESET;
@@ -4248,8 +4306,8 @@ static int tg3_chip_reset(struct tg3 *tp)
4248 val |= GRC_MISC_CFG_KEEP_GPHY_POWER; 4306 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
4249 tw32(GRC_MISC_CFG, val); 4307 tw32(GRC_MISC_CFG, val);
4250 4308
4251 /* restore 5701 hardware bug workaround flag */ 4309 /* restore 5701 hardware bug workaround write method */
4252 tp->tg3_flags = flags_save; 4310 tp->write32 = write_op;
4253 4311
4254 /* Unfortunately, we have to delay before the PCI read back. 4312 /* Unfortunately, we have to delay before the PCI read back.
4255 * Some 575X chips even will not respond to a PCI cfg access 4313 * Some 575X chips even will not respond to a PCI cfg access
@@ -4635,7 +4693,6 @@ static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_b
4635 int cpu_scratch_size, struct fw_info *info) 4693 int cpu_scratch_size, struct fw_info *info)
4636{ 4694{
4637 int err, i; 4695 int err, i;
4638 u32 orig_tg3_flags = tp->tg3_flags;
4639 void (*write_op)(struct tg3 *, u32, u32); 4696 void (*write_op)(struct tg3 *, u32, u32);
4640 4697
4641 if (cpu_base == TX_CPU_BASE && 4698 if (cpu_base == TX_CPU_BASE &&
@@ -4651,11 +4708,6 @@ static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_b
4651 else 4708 else
4652 write_op = tg3_write_indirect_reg32; 4709 write_op = tg3_write_indirect_reg32;
4653 4710
4654 /* Force use of PCI config space for indirect register
4655 * write calls.
4656 */
4657 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
4658
4659 /* It is possible that bootcode is still loading at this point. 4711 /* It is possible that bootcode is still loading at this point.
4660 * Get the nvram lock first before halting the cpu. 4712 * Get the nvram lock first before halting the cpu.
4661 */ 4713 */
@@ -4691,7 +4743,6 @@ static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_b
4691 err = 0; 4743 err = 0;
4692 4744
4693out: 4745out:
4694 tp->tg3_flags = orig_tg3_flags;
4695 return err; 4746 return err;
4696} 4747}
4697 4748
@@ -5808,8 +5859,7 @@ static int tg3_reset_hw(struct tg3 *tp)
5808 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl); 5859 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
5809 udelay(100); 5860 udelay(100);
5810 5861
5811 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0); 5862 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
5812 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
5813 tp->last_tag = 0; 5863 tp->last_tag = 0;
5814 5864
5815 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { 5865 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
@@ -6198,7 +6248,8 @@ static int tg3_test_interrupt(struct tg3 *tp)
6198 HOSTCC_MODE_NOW); 6248 HOSTCC_MODE_NOW);
6199 6249
6200 for (i = 0; i < 5; i++) { 6250 for (i = 0; i < 5; i++) {
6201 int_mbox = tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW); 6251 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
6252 TG3_64BIT_REG_LOW);
6202 if (int_mbox != 0) 6253 if (int_mbox != 0)
6203 break; 6254 break;
6204 msleep(10); 6255 msleep(10);
@@ -6598,10 +6649,10 @@ static int tg3_open(struct net_device *dev)
6598 6649
6599 /* Mailboxes */ 6650 /* Mailboxes */
6600 printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n", 6651 printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
6601 tr32(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0), 6652 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
6602 tr32(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4), 6653 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
6603 tr32(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0), 6654 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
6604 tr32(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4)); 6655 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
6605 6656
6606 /* NIC side send descriptors. */ 6657 /* NIC side send descriptors. */
6607 for (i = 0; i < 6; i++) { 6658 for (i = 0; i < 6; i++) {
@@ -7901,7 +7952,7 @@ static int tg3_test_loopback(struct tg3 *tp)
7901 num_pkts++; 7952 num_pkts++;
7902 7953
7903 tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, send_idx); 7954 tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, send_idx);
7904 tr32(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW); 7955 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
7905 7956
7906 udelay(10); 7957 udelay(10);
7907 7958
@@ -9153,14 +9204,6 @@ static int __devinit tg3_is_sun_570X(struct tg3 *tp)
9153static int __devinit tg3_get_invariants(struct tg3 *tp) 9204static int __devinit tg3_get_invariants(struct tg3 *tp)
9154{ 9205{
9155 static struct pci_device_id write_reorder_chipsets[] = { 9206 static struct pci_device_id write_reorder_chipsets[] = {
9156 { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
9157 PCI_DEVICE_ID_INTEL_82801AA_8) },
9158 { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
9159 PCI_DEVICE_ID_INTEL_82801AB_8) },
9160 { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
9161 PCI_DEVICE_ID_INTEL_82801BA_11) },
9162 { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
9163 PCI_DEVICE_ID_INTEL_82801BA_6) },
9164 { PCI_DEVICE(PCI_VENDOR_ID_AMD, 9207 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
9165 PCI_DEVICE_ID_AMD_FE_GATE_700C) }, 9208 PCI_DEVICE_ID_AMD_FE_GATE_700C) },
9166 { }, 9209 { },
@@ -9177,7 +9220,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
9177 tp->tg3_flags2 |= TG3_FLG2_SUN_570X; 9220 tp->tg3_flags2 |= TG3_FLG2_SUN_570X;
9178#endif 9221#endif
9179 9222
9180 /* If we have an AMD 762 or Intel ICH/ICH0/ICH2 chipset, write 9223 /* If we have an AMD 762 chipset, write
9181 * reordering to the mailbox registers done by the host 9224 * reordering to the mailbox registers done by the host
9182 * controller can cause major troubles. We read back from 9225 * controller can cause major troubles. We read back from
9183 * every mailbox register write to force the writes to be 9226 * every mailbox register write to force the writes to be
@@ -9215,6 +9258,69 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
9215 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW) 9258 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
9216 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0; 9259 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
9217 9260
9261 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
9262 * we need to disable memory and use config. cycles
9263 * only to access all registers. The 5702/03 chips
9264 * can mistakenly decode the special cycles from the
9265 * ICH chipsets as memory write cycles, causing corruption
9266 * of register and memory space. Only certain ICH bridges
9267 * will drive special cycles with non-zero data during the
9268 * address phase which can fall within the 5703's address
9269 * range. This is not an ICH bug as the PCI spec allows
9270 * non-zero address during special cycles. However, only
9271 * these ICH bridges are known to drive non-zero addresses
9272 * during special cycles.
9273 *
9274 * Since special cycles do not cross PCI bridges, we only
9275 * enable this workaround if the 5703 is on the secondary
9276 * bus of these ICH bridges.
9277 */
9278 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
9279 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
9280 static struct tg3_dev_id {
9281 u32 vendor;
9282 u32 device;
9283 u32 rev;
9284 } ich_chipsets[] = {
9285 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
9286 PCI_ANY_ID },
9287 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
9288 PCI_ANY_ID },
9289 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
9290 0xa },
9291 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
9292 PCI_ANY_ID },
9293 { },
9294 };
9295 struct tg3_dev_id *pci_id = &ich_chipsets[0];
9296 struct pci_dev *bridge = NULL;
9297
9298 while (pci_id->vendor != 0) {
9299 bridge = pci_get_device(pci_id->vendor, pci_id->device,
9300 bridge);
9301 if (!bridge) {
9302 pci_id++;
9303 continue;
9304 }
9305 if (pci_id->rev != PCI_ANY_ID) {
9306 u8 rev;
9307
9308 pci_read_config_byte(bridge, PCI_REVISION_ID,
9309 &rev);
9310 if (rev > pci_id->rev)
9311 continue;
9312 }
9313 if (bridge->subordinate &&
9314 (bridge->subordinate->number ==
9315 tp->pdev->bus->number)) {
9316
9317 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
9318 pci_dev_put(bridge);
9319 break;
9320 }
9321 }
9322 }
9323
9218 /* Find msi capability. */ 9324 /* Find msi capability. */
9219 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) 9325 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
9220 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI); 9326 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
@@ -9302,6 +9408,12 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
9302 } 9408 }
9303 } 9409 }
9304 9410
9411 /* 5700 BX chips need to have their TX producer index mailboxes
9412 * written twice to workaround a bug.
9413 */
9414 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
9415 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
9416
9305 /* Back to back register writes can cause problems on this chip, 9417 /* Back to back register writes can cause problems on this chip,
9306 * the workaround is to read back all reg writes except those to 9418 * the workaround is to read back all reg writes except those to
9307 * mailbox regs. See tg3_write_indirect_reg32(). 9419 * mailbox regs. See tg3_write_indirect_reg32().
@@ -9325,6 +9437,43 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
9325 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg); 9437 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
9326 } 9438 }
9327 9439
9440 /* Default fast path register access methods */
9441 tp->read32 = tg3_read32;
9442 tp->write32 = tg3_write32;
9443 tp->read32_mbox = tg3_read32;
9444 tp->write32_mbox = tg3_write32;
9445 tp->write32_tx_mbox = tg3_write32;
9446 tp->write32_rx_mbox = tg3_write32;
9447
9448 /* Various workaround register access methods */
9449 if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
9450 tp->write32 = tg3_write_indirect_reg32;
9451 else if (tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG)
9452 tp->write32 = tg3_write_flush_reg32;
9453
9454 if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
9455 (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
9456 tp->write32_tx_mbox = tg3_write32_tx_mbox;
9457 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
9458 tp->write32_rx_mbox = tg3_write_flush_reg32;
9459 }
9460
9461 if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
9462 tp->read32 = tg3_read_indirect_reg32;
9463 tp->write32 = tg3_write_indirect_reg32;
9464 tp->read32_mbox = tg3_read_indirect_mbox;
9465 tp->write32_mbox = tg3_write_indirect_mbox;
9466 tp->write32_tx_mbox = tg3_write_indirect_mbox;
9467 tp->write32_rx_mbox = tg3_write_indirect_mbox;
9468
9469 iounmap(tp->regs);
9470 tp->regs = 0;
9471
9472 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9473 pci_cmd &= ~PCI_COMMAND_MEMORY;
9474 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9475 }
9476
9328 /* Get eeprom hw config before calling tg3_set_power_state(). 9477 /* Get eeprom hw config before calling tg3_set_power_state().
9329 * In particular, the TG3_FLAG_EEPROM_WRITE_PROT flag must be 9478 * In particular, the TG3_FLAG_EEPROM_WRITE_PROT flag must be
9330 * determined before calling tg3_set_power_state() so that 9479 * determined before calling tg3_set_power_state() so that
@@ -9539,14 +9688,6 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
9539 else 9688 else
9540 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES; 9689 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
9541 9690
9542 /* 5700 BX chips need to have their TX producer index mailboxes
9543 * written twice to workaround a bug.
9544 */
9545 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
9546 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
9547 else
9548 tp->tg3_flags &= ~TG3_FLAG_TXD_MBOX_HWBUG;
9549
9550 /* It seems all chips can get confused if TX buffers 9691 /* It seems all chips can get confused if TX buffers
9551 * straddle the 4GB address boundary in some cases. 9692 * straddle the 4GB address boundary in some cases.
9552 */ 9693 */
@@ -10469,7 +10610,10 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
10469 return 0; 10610 return 0;
10470 10611
10471err_out_iounmap: 10612err_out_iounmap:
10472 iounmap(tp->regs); 10613 if (tp->regs) {
10614 iounmap(tp->regs);
10615 tp->regs = 0;
10616 }
10473 10617
10474err_out_free_dev: 10618err_out_free_dev:
10475 free_netdev(dev); 10619 free_netdev(dev);
@@ -10491,7 +10635,10 @@ static void __devexit tg3_remove_one(struct pci_dev *pdev)
10491 struct tg3 *tp = netdev_priv(dev); 10635 struct tg3 *tp = netdev_priv(dev);
10492 10636
10493 unregister_netdev(dev); 10637 unregister_netdev(dev);
10494 iounmap(tp->regs); 10638 if (tp->regs) {
10639 iounmap(tp->regs);
10640 tp->regs = 0;
10641 }
10495 free_netdev(dev); 10642 free_netdev(dev);
10496 pci_release_regions(pdev); 10643 pci_release_regions(pdev);
10497 pci_disable_device(pdev); 10644 pci_disable_device(pdev);
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index 5c4433c147fa..c184b773e585 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -2049,6 +2049,11 @@ struct tg3 {
2049 spinlock_t lock; 2049 spinlock_t lock;
2050 spinlock_t indirect_lock; 2050 spinlock_t indirect_lock;
2051 2051
2052 u32 (*read32) (struct tg3 *, u32);
2053 void (*write32) (struct tg3 *, u32, u32);
2054 u32 (*read32_mbox) (struct tg3 *, u32);
2055 void (*write32_mbox) (struct tg3 *, u32,
2056 u32);
2052 void __iomem *regs; 2057 void __iomem *regs;
2053 struct net_device *dev; 2058 struct net_device *dev;
2054 struct pci_dev *pdev; 2059 struct pci_dev *pdev;
@@ -2060,6 +2065,8 @@ struct tg3 {
2060 u32 msg_enable; 2065 u32 msg_enable;
2061 2066
2062 /* begin "tx thread" cacheline section */ 2067 /* begin "tx thread" cacheline section */
2068 void (*write32_tx_mbox) (struct tg3 *, u32,
2069 u32);
2063 u32 tx_prod; 2070 u32 tx_prod;
2064 u32 tx_cons; 2071 u32 tx_cons;
2065 u32 tx_pending; 2072 u32 tx_pending;
@@ -2071,6 +2078,8 @@ struct tg3 {
2071 dma_addr_t tx_desc_mapping; 2078 dma_addr_t tx_desc_mapping;
2072 2079
2073 /* begin "rx thread" cacheline section */ 2080 /* begin "rx thread" cacheline section */
2081 void (*write32_rx_mbox) (struct tg3 *, u32,
2082 u32);
2074 u32 rx_rcb_ptr; 2083 u32 rx_rcb_ptr;
2075 u32 rx_std_ptr; 2084 u32 rx_std_ptr;
2076 u32 rx_jumbo_ptr; 2085 u32 rx_jumbo_ptr;
@@ -2165,6 +2174,7 @@ struct tg3 {
2165#define TG3_FLG2_ANY_SERDES (TG3_FLG2_PHY_SERDES | \ 2174#define TG3_FLG2_ANY_SERDES (TG3_FLG2_PHY_SERDES | \
2166 TG3_FLG2_MII_SERDES) 2175 TG3_FLG2_MII_SERDES)
2167#define TG3_FLG2_PARALLEL_DETECT 0x01000000 2176#define TG3_FLG2_PARALLEL_DETECT 0x01000000
2177#define TG3_FLG2_ICH_WORKAROUND 0x02000000
2168 2178
2169 u32 split_mode_max_reqs; 2179 u32 split_mode_max_reqs;
2170#define SPLIT_MODE_5704_MAX_REQ 3 2180#define SPLIT_MODE_5704_MAX_REQ 3
diff --git a/drivers/net/wan/hdlc_generic.c b/drivers/net/wan/hdlc_generic.c
index a63f6a2cc4f7..cdd4c09c2d90 100644
--- a/drivers/net/wan/hdlc_generic.c
+++ b/drivers/net/wan/hdlc_generic.c
@@ -61,7 +61,7 @@ static struct net_device_stats *hdlc_get_stats(struct net_device *dev)
61 61
62 62
63static int hdlc_rcv(struct sk_buff *skb, struct net_device *dev, 63static int hdlc_rcv(struct sk_buff *skb, struct net_device *dev,
64 struct packet_type *p) 64 struct packet_type *p, struct net_device *orig_dev)
65{ 65{
66 hdlc_device *hdlc = dev_to_hdlc(dev); 66 hdlc_device *hdlc = dev_to_hdlc(dev);
67 if (hdlc->proto.netif_rx) 67 if (hdlc->proto.netif_rx)
diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c
index 7f2e3653c5e5..6c302e9dbca2 100644
--- a/drivers/net/wan/lapbether.c
+++ b/drivers/net/wan/lapbether.c
@@ -86,7 +86,7 @@ static __inline__ int dev_is_ethdev(struct net_device *dev)
86/* 86/*
87 * Receive a LAPB frame via an ethernet interface. 87 * Receive a LAPB frame via an ethernet interface.
88 */ 88 */
89static int lapbeth_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *ptype) 89static int lapbeth_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *ptype, struct net_device *orig_dev)
90{ 90{
91 int len, err; 91 int len, err;
92 struct lapbethdev *lapbeth; 92 struct lapbethdev *lapbeth;
diff --git a/drivers/net/wan/sdla_fr.c b/drivers/net/wan/sdla_fr.c
index c5f5e62aab8b..0497dbdb8631 100644
--- a/drivers/net/wan/sdla_fr.c
+++ b/drivers/net/wan/sdla_fr.c
@@ -445,7 +445,7 @@ void s508_s514_unlock(sdla_t *card, unsigned long *smp_flags);
445void s508_s514_lock(sdla_t *card, unsigned long *smp_flags); 445void s508_s514_lock(sdla_t *card, unsigned long *smp_flags);
446 446
447unsigned short calc_checksum (char *, int); 447unsigned short calc_checksum (char *, int);
448static int setup_fr_header(struct sk_buff** skb, 448static int setup_fr_header(struct sk_buff *skb,
449 struct net_device* dev, char op_mode); 449 struct net_device* dev, char op_mode);
450 450
451 451
@@ -1372,7 +1372,7 @@ static int if_send(struct sk_buff* skb, struct net_device* dev)
1372 /* Move the if_header() code to here. By inserting frame 1372 /* Move the if_header() code to here. By inserting frame
1373 * relay header in if_header() we would break the 1373 * relay header in if_header() we would break the
1374 * tcpdump and other packet sniffers */ 1374 * tcpdump and other packet sniffers */
1375 chan->fr_header_len = setup_fr_header(&skb,dev,chan->common.usedby); 1375 chan->fr_header_len = setup_fr_header(skb,dev,chan->common.usedby);
1376 if (chan->fr_header_len < 0 ){ 1376 if (chan->fr_header_len < 0 ){
1377 ++chan->ifstats.tx_dropped; 1377 ++chan->ifstats.tx_dropped;
1378 ++card->wandev.stats.tx_dropped; 1378 ++card->wandev.stats.tx_dropped;
@@ -1597,8 +1597,6 @@ static int setup_for_delayed_transmit(struct net_device* dev,
1597 return 1; 1597 return 1;
1598 } 1598 }
1599 1599
1600 skb_unlink(skb);
1601
1602 chan->transmit_length = len; 1600 chan->transmit_length = len;
1603 chan->delay_skb = skb; 1601 chan->delay_skb = skb;
1604 1602
@@ -4871,18 +4869,15 @@ static void unconfig_fr (sdla_t *card)
4871 } 4869 }
4872} 4870}
4873 4871
4874static int setup_fr_header(struct sk_buff **skb_orig, struct net_device* dev, 4872static int setup_fr_header(struct sk_buff *skb, struct net_device* dev,
4875 char op_mode) 4873 char op_mode)
4876{ 4874{
4877 struct sk_buff *skb = *skb_orig;
4878 fr_channel_t *chan=dev->priv; 4875 fr_channel_t *chan=dev->priv;
4879 4876
4880 if (op_mode == WANPIPE){ 4877 if (op_mode == WANPIPE) {
4881
4882 chan->fr_header[0]=Q922_UI; 4878 chan->fr_header[0]=Q922_UI;
4883 4879
4884 switch (htons(skb->protocol)){ 4880 switch (htons(skb->protocol)){
4885
4886 case ETH_P_IP: 4881 case ETH_P_IP:
4887 chan->fr_header[1]=NLPID_IP; 4882 chan->fr_header[1]=NLPID_IP;
4888 break; 4883 break;
@@ -4894,16 +4889,14 @@ static int setup_fr_header(struct sk_buff **skb_orig, struct net_device* dev,
4894 } 4889 }
4895 4890
4896 /* If we are in bridging mode, we must apply 4891 /* If we are in bridging mode, we must apply
4897 * an Ethernet header */ 4892 * an Ethernet header
4898 if (op_mode == BRIDGE || op_mode == BRIDGE_NODE){ 4893 */
4899 4894 if (op_mode == BRIDGE || op_mode == BRIDGE_NODE) {
4900
4901 /* Encapsulate the packet as a bridged Ethernet frame. */ 4895 /* Encapsulate the packet as a bridged Ethernet frame. */
4902#ifdef DEBUG 4896#ifdef DEBUG
4903 printk(KERN_INFO "%s: encapsulating skb for frame relay\n", 4897 printk(KERN_INFO "%s: encapsulating skb for frame relay\n",
4904 dev->name); 4898 dev->name);
4905#endif 4899#endif
4906
4907 chan->fr_header[0] = 0x03; 4900 chan->fr_header[0] = 0x03;
4908 chan->fr_header[1] = 0x00; 4901 chan->fr_header[1] = 0x00;
4909 chan->fr_header[2] = 0x80; 4902 chan->fr_header[2] = 0x80;
@@ -4916,7 +4909,6 @@ static int setup_fr_header(struct sk_buff **skb_orig, struct net_device* dev,
4916 /* Yuck. */ 4909 /* Yuck. */
4917 skb->protocol = ETH_P_802_3; 4910 skb->protocol = ETH_P_802_3;
4918 return 8; 4911 return 8;
4919
4920 } 4912 }
4921 4913
4922 return 0; 4914 return 0;
diff --git a/drivers/net/wan/syncppp.c b/drivers/net/wan/syncppp.c
index 84b65c60c799..f58c794a963a 100644
--- a/drivers/net/wan/syncppp.c
+++ b/drivers/net/wan/syncppp.c
@@ -1447,7 +1447,7 @@ static void sppp_print_bytes (u_char *p, u16 len)
1447 * after interrupt servicing to process frames queued via netif_rx. 1447 * after interrupt servicing to process frames queued via netif_rx.
1448 */ 1448 */
1449 1449
1450static int sppp_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *p) 1450static int sppp_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *p, struct net_device *orig_dev)
1451{ 1451{
1452 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) 1452 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)
1453 return NET_RX_DROP; 1453 return NET_RX_DROP;
diff --git a/drivers/usb/net/usbnet.c b/drivers/usb/net/usbnet.c
index 4528a00c45b0..a2f67245f6da 100644
--- a/drivers/usb/net/usbnet.c
+++ b/drivers/usb/net/usbnet.c
@@ -2903,19 +2903,18 @@ static struct net_device_stats *usbnet_get_stats (struct net_device *net)
2903 * completion callbacks. 2.5 should have fixed those bugs... 2903 * completion callbacks. 2.5 should have fixed those bugs...
2904 */ 2904 */
2905 2905
2906static void defer_bh (struct usbnet *dev, struct sk_buff *skb) 2906static void defer_bh(struct usbnet *dev, struct sk_buff *skb, struct sk_buff_head *list)
2907{ 2907{
2908 struct sk_buff_head *list = skb->list;
2909 unsigned long flags; 2908 unsigned long flags;
2910 2909
2911 spin_lock_irqsave (&list->lock, flags); 2910 spin_lock_irqsave(&list->lock, flags);
2912 __skb_unlink (skb, list); 2911 __skb_unlink(skb, list);
2913 spin_unlock (&list->lock); 2912 spin_unlock(&list->lock);
2914 spin_lock (&dev->done.lock); 2913 spin_lock(&dev->done.lock);
2915 __skb_queue_tail (&dev->done, skb); 2914 __skb_queue_tail(&dev->done, skb);
2916 if (dev->done.qlen == 1) 2915 if (dev->done.qlen == 1)
2917 tasklet_schedule (&dev->bh); 2916 tasklet_schedule(&dev->bh);
2918 spin_unlock_irqrestore (&dev->done.lock, flags); 2917 spin_unlock_irqrestore(&dev->done.lock, flags);
2919} 2918}
2920 2919
2921/* some work can't be done in tasklets, so we use keventd 2920/* some work can't be done in tasklets, so we use keventd
@@ -3120,7 +3119,7 @@ block:
3120 break; 3119 break;
3121 } 3120 }
3122 3121
3123 defer_bh (dev, skb); 3122 defer_bh(dev, skb, &dev->rxq);
3124 3123
3125 if (urb) { 3124 if (urb) {
3126 if (netif_running (dev->net) 3125 if (netif_running (dev->net)
@@ -3490,7 +3489,7 @@ static void tx_complete (struct urb *urb, struct pt_regs *regs)
3490 3489
3491 urb->dev = NULL; 3490 urb->dev = NULL;
3492 entry->state = tx_done; 3491 entry->state = tx_done;
3493 defer_bh (dev, skb); 3492 defer_bh(dev, skb, &dev->txq);
3494} 3493}
3495 3494
3496/*-------------------------------------------------------------------------*/ 3495/*-------------------------------------------------------------------------*/
diff --git a/drivers/w1/w1_int.c b/drivers/w1/w1_int.c
index b5a5e04b6d37..498ad505fa5f 100644
--- a/drivers/w1/w1_int.c
+++ b/drivers/w1/w1_int.c
@@ -86,9 +86,9 @@ static struct w1_master * w1_alloc_dev(u32 id, int slave_count, int slave_ttl,
86 86
87 dev->driver = driver; 87 dev->driver = driver;
88 88
89 dev->groups = 23; 89 dev->groups = 1;
90 dev->seq = 1; 90 dev->seq = 1;
91 dev->nls = netlink_kernel_create(NETLINK_W1, NULL); 91 dev->nls = netlink_kernel_create(NETLINK_W1, 1, NULL, THIS_MODULE);
92 if (!dev->nls) { 92 if (!dev->nls) {
93 printk(KERN_ERR "Failed to create new netlink socket(%u) for w1 master %s.\n", 93 printk(KERN_ERR "Failed to create new netlink socket(%u) for w1 master %s.\n",
94 NETLINK_NFLOG, dev->dev.bus_id); 94 NETLINK_NFLOG, dev->dev.bus_id);
@@ -225,3 +225,5 @@ void w1_remove_master_device(struct w1_bus_master *bm)
225 225
226EXPORT_SYMBOL(w1_add_master_device); 226EXPORT_SYMBOL(w1_add_master_device);
227EXPORT_SYMBOL(w1_remove_master_device); 227EXPORT_SYMBOL(w1_remove_master_device);
228
229MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_W1);
diff --git a/drivers/w1/w1_netlink.c b/drivers/w1/w1_netlink.c
index 2a82fb055c70..e7b774423dd6 100644
--- a/drivers/w1/w1_netlink.c
+++ b/drivers/w1/w1_netlink.c
@@ -51,7 +51,7 @@ void w1_netlink_send(struct w1_master *dev, struct w1_netlink_msg *msg)
51 51
52 memcpy(data, msg, sizeof(struct w1_netlink_msg)); 52 memcpy(data, msg, sizeof(struct w1_netlink_msg));
53 53
54 NETLINK_CB(skb).dst_groups = dev->groups; 54 NETLINK_CB(skb).dst_group = dev->groups;
55 netlink_broadcast(dev->nls, skb, 0, dev->groups, GFP_ATOMIC); 55 netlink_broadcast(dev->nls, skb, 0, dev->groups, GFP_ATOMIC);
56 56
57nlmsg_failure: 57nlmsg_failure: