diff options
Diffstat (limited to 'drivers')
380 files changed, 8248 insertions, 7595 deletions
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c index 0ae8b9310cbf..589b98b7b216 100644 --- a/drivers/acpi/thermal.c +++ b/drivers/acpi/thermal.c | |||
@@ -758,7 +758,8 @@ static void acpi_thermal_check(void *data) | |||
758 | del_timer(&(tz->timer)); | 758 | del_timer(&(tz->timer)); |
759 | } else { | 759 | } else { |
760 | if (timer_pending(&(tz->timer))) | 760 | if (timer_pending(&(tz->timer))) |
761 | mod_timer(&(tz->timer), (HZ * sleep_time) / 1000); | 761 | mod_timer(&(tz->timer), |
762 | jiffies + (HZ * sleep_time) / 1000); | ||
762 | else { | 763 | else { |
763 | tz->timer.data = (unsigned long)tz; | 764 | tz->timer.data = (unsigned long)tz; |
764 | tz->timer.function = acpi_thermal_run; | 765 | tz->timer.function = acpi_thermal_run; |
diff --git a/drivers/ata/pata_sis.c b/drivers/ata/pata_sis.c index f48207865930..8dc3bc4f5863 100644 --- a/drivers/ata/pata_sis.c +++ b/drivers/ata/pata_sis.c | |||
@@ -878,6 +878,7 @@ static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) | |||
878 | struct ata_port_info *port; | 878 | struct ata_port_info *port; |
879 | struct pci_dev *host = NULL; | 879 | struct pci_dev *host = NULL; |
880 | struct sis_chipset *chipset = NULL; | 880 | struct sis_chipset *chipset = NULL; |
881 | struct sis_chipset *sets; | ||
881 | 882 | ||
882 | static struct sis_chipset sis_chipsets[] = { | 883 | static struct sis_chipset sis_chipsets[] = { |
883 | 884 | ||
@@ -932,10 +933,11 @@ static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) | |||
932 | 933 | ||
933 | /* We have to find the bridge first */ | 934 | /* We have to find the bridge first */ |
934 | 935 | ||
935 | for (chipset = &sis_chipsets[0]; chipset->device; chipset++) { | 936 | for (sets = &sis_chipsets[0]; sets->device; sets++) { |
936 | host = pci_get_device(PCI_VENDOR_ID_SI, chipset->device, NULL); | 937 | host = pci_get_device(PCI_VENDOR_ID_SI, sets->device, NULL); |
937 | if (host != NULL) { | 938 | if (host != NULL) { |
938 | if (chipset->device == 0x630) { /* SIS630 */ | 939 | chipset = sets; /* Match found */ |
940 | if (sets->device == 0x630) { /* SIS630 */ | ||
939 | u8 host_rev; | 941 | u8 host_rev; |
940 | pci_read_config_byte(host, PCI_REVISION_ID, &host_rev); | 942 | pci_read_config_byte(host, PCI_REVISION_ID, &host_rev); |
941 | if (host_rev >= 0x30) /* 630 ET */ | 943 | if (host_rev >= 0x30) /* 630 ET */ |
@@ -946,7 +948,7 @@ static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) | |||
946 | } | 948 | } |
947 | 949 | ||
948 | /* Look for concealed bridges */ | 950 | /* Look for concealed bridges */ |
949 | if (host == NULL) { | 951 | if (chipset == NULL) { |
950 | /* Second check */ | 952 | /* Second check */ |
951 | u32 idemisc; | 953 | u32 idemisc; |
952 | u16 trueid; | 954 | u16 trueid; |
diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c index 3c372e08f77d..59651abfa4f8 100644 --- a/drivers/atm/ambassador.c +++ b/drivers/atm/ambassador.c | |||
@@ -821,7 +821,7 @@ static inline void fill_rx_pool (amb_dev * dev, unsigned char pool, | |||
821 | } | 821 | } |
822 | // cast needed as there is no %? for pointer differences | 822 | // cast needed as there is no %? for pointer differences |
823 | PRINTD (DBG_SKB, "allocated skb at %p, head %p, area %li", | 823 | PRINTD (DBG_SKB, "allocated skb at %p, head %p, area %li", |
824 | skb, skb->head, (long) (skb->end - skb->head)); | 824 | skb, skb->head, (long) (skb_end_pointer(skb) - skb->head)); |
825 | rx.handle = virt_to_bus (skb); | 825 | rx.handle = virt_to_bus (skb); |
826 | rx.host_address = cpu_to_be32 (virt_to_bus (skb->data)); | 826 | rx.host_address = cpu_to_be32 (virt_to_bus (skb->data)); |
827 | if (rx_give (dev, &rx, pool)) | 827 | if (rx_give (dev, &rx, pool)) |
diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c index fc518d85543d..02ad83d6b562 100644 --- a/drivers/atm/atmtcp.c +++ b/drivers/atm/atmtcp.c | |||
@@ -221,7 +221,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb) | |||
221 | hdr->vpi = htons(vcc->vpi); | 221 | hdr->vpi = htons(vcc->vpi); |
222 | hdr->vci = htons(vcc->vci); | 222 | hdr->vci = htons(vcc->vci); |
223 | hdr->length = htonl(skb->len); | 223 | hdr->length = htonl(skb->len); |
224 | memcpy(skb_put(new_skb,skb->len),skb->data,skb->len); | 224 | skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len); |
225 | if (vcc->pop) vcc->pop(vcc,skb); | 225 | if (vcc->pop) vcc->pop(vcc,skb); |
226 | else dev_kfree_skb(skb); | 226 | else dev_kfree_skb(skb); |
227 | out_vcc->push(out_vcc,new_skb); | 227 | out_vcc->push(out_vcc,new_skb); |
@@ -310,7 +310,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb) | |||
310 | goto done; | 310 | goto done; |
311 | } | 311 | } |
312 | __net_timestamp(new_skb); | 312 | __net_timestamp(new_skb); |
313 | memcpy(skb_put(new_skb,skb->len),skb->data,skb->len); | 313 | skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len); |
314 | out_vcc->push(out_vcc,new_skb); | 314 | out_vcc->push(out_vcc,new_skb); |
315 | atomic_inc(&vcc->stats->tx); | 315 | atomic_inc(&vcc->stats->tx); |
316 | atomic_inc(&out_vcc->stats->rx); | 316 | atomic_inc(&out_vcc->stats->rx); |
@@ -352,7 +352,7 @@ static struct atm_dev atmtcp_control_dev = { | |||
352 | .ops = &atmtcp_c_dev_ops, | 352 | .ops = &atmtcp_c_dev_ops, |
353 | .type = "atmtcp", | 353 | .type = "atmtcp", |
354 | .number = 999, | 354 | .number = 999, |
355 | .lock = SPIN_LOCK_UNLOCKED | 355 | .lock = __SPIN_LOCK_UNLOCKED(atmtcp_control_dev.lock) |
356 | }; | 356 | }; |
357 | 357 | ||
358 | 358 | ||
diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c index 8fccf018f165..0d3a38b1cb0b 100644 --- a/drivers/atm/eni.c +++ b/drivers/atm/eni.c | |||
@@ -536,7 +536,7 @@ static int rx_aal0(struct atm_vcc *vcc) | |||
536 | return 0; | 536 | return 0; |
537 | } | 537 | } |
538 | skb_put(skb,length); | 538 | skb_put(skb,length); |
539 | skb_set_timestamp(skb, &eni_vcc->timestamp); | 539 | skb->tstamp = eni_vcc->timestamp; |
540 | DPRINTK("got len %ld\n",length); | 540 | DPRINTK("got len %ld\n",length); |
541 | if (do_rx_dma(vcc,skb,1,length >> 2,length >> 2)) return 1; | 541 | if (do_rx_dma(vcc,skb,1,length >> 2,length >> 2)) return 1; |
542 | eni_vcc->rxing++; | 542 | eni_vcc->rxing++; |
@@ -701,7 +701,7 @@ static void get_service(struct atm_dev *dev) | |||
701 | DPRINTK("Grr, servicing VCC %ld twice\n",vci); | 701 | DPRINTK("Grr, servicing VCC %ld twice\n",vci); |
702 | continue; | 702 | continue; |
703 | } | 703 | } |
704 | do_gettimeofday(&ENI_VCC(vcc)->timestamp); | 704 | ENI_VCC(vcc)->timestamp = ktime_get_real(); |
705 | ENI_VCC(vcc)->next = NULL; | 705 | ENI_VCC(vcc)->next = NULL; |
706 | if (vcc->qos.rxtp.traffic_class == ATM_CBR) { | 706 | if (vcc->qos.rxtp.traffic_class == ATM_CBR) { |
707 | if (eni_dev->fast) | 707 | if (eni_dev->fast) |
diff --git a/drivers/atm/eni.h b/drivers/atm/eni.h index 385090c2a580..d04fefb0841f 100644 --- a/drivers/atm/eni.h +++ b/drivers/atm/eni.h | |||
@@ -59,7 +59,7 @@ struct eni_vcc { | |||
59 | int rxing; /* number of pending PDUs */ | 59 | int rxing; /* number of pending PDUs */ |
60 | int servicing; /* number of waiting VCs (0 or 1) */ | 60 | int servicing; /* number of waiting VCs (0 or 1) */ |
61 | int txing; /* number of pending TX bytes */ | 61 | int txing; /* number of pending TX bytes */ |
62 | struct timeval timestamp; /* for RX timing */ | 62 | ktime_t timestamp; /* for RX timing */ |
63 | struct atm_vcc *next; /* next pending RX */ | 63 | struct atm_vcc *next; /* next pending RX */ |
64 | struct sk_buff *last; /* last PDU being DMAed (used to carry | 64 | struct sk_buff *last; /* last PDU being DMAed (used to carry |
65 | discard information) */ | 65 | discard information) */ |
diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c index a7c0ed3107e3..405ee5e09221 100644 --- a/drivers/atm/fore200e.c +++ b/drivers/atm/fore200e.c | |||
@@ -1,6 +1,4 @@ | |||
1 | /* | 1 | /* |
2 | $Id: fore200e.c,v 1.5 2000/04/14 10:10:34 davem Exp $ | ||
3 | |||
4 | A FORE Systems 200E-series driver for ATM on Linux. | 2 | A FORE Systems 200E-series driver for ATM on Linux. |
5 | Christophe Lizzi (lizzi@cnam.fr), October 1999-March 2003. | 3 | Christophe Lizzi (lizzi@cnam.fr), October 1999-March 2003. |
6 | 4 | ||
@@ -1502,9 +1500,9 @@ fore200e_open(struct atm_vcc *vcc) | |||
1502 | /* pseudo-CBR bandwidth requested? */ | 1500 | /* pseudo-CBR bandwidth requested? */ |
1503 | if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) { | 1501 | if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) { |
1504 | 1502 | ||
1505 | down(&fore200e->rate_sf); | 1503 | mutex_lock(&fore200e->rate_mtx); |
1506 | if (fore200e->available_cell_rate < vcc->qos.txtp.max_pcr) { | 1504 | if (fore200e->available_cell_rate < vcc->qos.txtp.max_pcr) { |
1507 | up(&fore200e->rate_sf); | 1505 | mutex_unlock(&fore200e->rate_mtx); |
1508 | 1506 | ||
1509 | kfree(fore200e_vcc); | 1507 | kfree(fore200e_vcc); |
1510 | vc_map->vcc = NULL; | 1508 | vc_map->vcc = NULL; |
@@ -1513,7 +1511,7 @@ fore200e_open(struct atm_vcc *vcc) | |||
1513 | 1511 | ||
1514 | /* reserve bandwidth */ | 1512 | /* reserve bandwidth */ |
1515 | fore200e->available_cell_rate -= vcc->qos.txtp.max_pcr; | 1513 | fore200e->available_cell_rate -= vcc->qos.txtp.max_pcr; |
1516 | up(&fore200e->rate_sf); | 1514 | mutex_unlock(&fore200e->rate_mtx); |
1517 | } | 1515 | } |
1518 | 1516 | ||
1519 | vcc->itf = vcc->dev->number; | 1517 | vcc->itf = vcc->dev->number; |
@@ -1599,9 +1597,9 @@ fore200e_close(struct atm_vcc* vcc) | |||
1599 | /* release reserved bandwidth, if any */ | 1597 | /* release reserved bandwidth, if any */ |
1600 | if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) { | 1598 | if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) { |
1601 | 1599 | ||
1602 | down(&fore200e->rate_sf); | 1600 | mutex_lock(&fore200e->rate_mtx); |
1603 | fore200e->available_cell_rate += vcc->qos.txtp.max_pcr; | 1601 | fore200e->available_cell_rate += vcc->qos.txtp.max_pcr; |
1604 | up(&fore200e->rate_sf); | 1602 | mutex_unlock(&fore200e->rate_mtx); |
1605 | 1603 | ||
1606 | clear_bit(ATM_VF_HASQOS, &vcc->flags); | 1604 | clear_bit(ATM_VF_HASQOS, &vcc->flags); |
1607 | } | 1605 | } |
@@ -2064,16 +2062,16 @@ fore200e_change_qos(struct atm_vcc* vcc,struct atm_qos* qos, int flags) | |||
2064 | 2062 | ||
2065 | if ((qos->txtp.traffic_class == ATM_CBR) && (qos->txtp.max_pcr > 0)) { | 2063 | if ((qos->txtp.traffic_class == ATM_CBR) && (qos->txtp.max_pcr > 0)) { |
2066 | 2064 | ||
2067 | down(&fore200e->rate_sf); | 2065 | mutex_lock(&fore200e->rate_mtx); |
2068 | if (fore200e->available_cell_rate + vcc->qos.txtp.max_pcr < qos->txtp.max_pcr) { | 2066 | if (fore200e->available_cell_rate + vcc->qos.txtp.max_pcr < qos->txtp.max_pcr) { |
2069 | up(&fore200e->rate_sf); | 2067 | mutex_unlock(&fore200e->rate_mtx); |
2070 | return -EAGAIN; | 2068 | return -EAGAIN; |
2071 | } | 2069 | } |
2072 | 2070 | ||
2073 | fore200e->available_cell_rate += vcc->qos.txtp.max_pcr; | 2071 | fore200e->available_cell_rate += vcc->qos.txtp.max_pcr; |
2074 | fore200e->available_cell_rate -= qos->txtp.max_pcr; | 2072 | fore200e->available_cell_rate -= qos->txtp.max_pcr; |
2075 | 2073 | ||
2076 | up(&fore200e->rate_sf); | 2074 | mutex_unlock(&fore200e->rate_mtx); |
2077 | 2075 | ||
2078 | memcpy(&vcc->qos, qos, sizeof(struct atm_qos)); | 2076 | memcpy(&vcc->qos, qos, sizeof(struct atm_qos)); |
2079 | 2077 | ||
@@ -2459,7 +2457,7 @@ fore200e_initialize(struct fore200e* fore200e) | |||
2459 | 2457 | ||
2460 | DPRINTK(2, "device %s being initialized\n", fore200e->name); | 2458 | DPRINTK(2, "device %s being initialized\n", fore200e->name); |
2461 | 2459 | ||
2462 | init_MUTEX(&fore200e->rate_sf); | 2460 | mutex_init(&fore200e->rate_mtx); |
2463 | spin_lock_init(&fore200e->q_lock); | 2461 | spin_lock_init(&fore200e->q_lock); |
2464 | 2462 | ||
2465 | cpq = fore200e->cp_queues = fore200e->virt_base + FORE200E_CP_QUEUES_OFFSET; | 2463 | cpq = fore200e->cp_queues = fore200e->virt_base + FORE200E_CP_QUEUES_OFFSET; |
diff --git a/drivers/atm/fore200e.h b/drivers/atm/fore200e.h index f9abfdac33e4..b85a54613dea 100644 --- a/drivers/atm/fore200e.h +++ b/drivers/atm/fore200e.h | |||
@@ -869,7 +869,7 @@ typedef struct fore200e { | |||
869 | 869 | ||
870 | struct stats* stats; /* last snapshot of the stats */ | 870 | struct stats* stats; /* last snapshot of the stats */ |
871 | 871 | ||
872 | struct semaphore rate_sf; /* protects rate reservation ops */ | 872 | struct mutex rate_mtx; /* protects rate reservation ops */ |
873 | spinlock_t q_lock; /* protects queue ops */ | 873 | spinlock_t q_lock; /* protects queue ops */ |
874 | #ifdef FORE200E_USE_TASKLET | 874 | #ifdef FORE200E_USE_TASKLET |
875 | struct tasklet_struct tx_tasklet; /* performs tx interrupt work */ | 875 | struct tasklet_struct tx_tasklet; /* performs tx interrupt work */ |
diff --git a/drivers/atm/he.c b/drivers/atm/he.c index 8510026b690a..d33aba6864c2 100644 --- a/drivers/atm/he.c +++ b/drivers/atm/he.c | |||
@@ -1901,13 +1901,13 @@ he_service_rbrq(struct he_dev *he_dev, int group) | |||
1901 | case ATM_AAL0: | 1901 | case ATM_AAL0: |
1902 | /* 2.10.1.5 raw cell receive */ | 1902 | /* 2.10.1.5 raw cell receive */ |
1903 | skb->len = ATM_AAL0_SDU; | 1903 | skb->len = ATM_AAL0_SDU; |
1904 | skb->tail = skb->data + skb->len; | 1904 | skb_set_tail_pointer(skb, skb->len); |
1905 | break; | 1905 | break; |
1906 | case ATM_AAL5: | 1906 | case ATM_AAL5: |
1907 | /* 2.10.1.2 aal5 receive */ | 1907 | /* 2.10.1.2 aal5 receive */ |
1908 | 1908 | ||
1909 | skb->len = AAL5_LEN(skb->data, he_vcc->pdu_len); | 1909 | skb->len = AAL5_LEN(skb->data, he_vcc->pdu_len); |
1910 | skb->tail = skb->data + skb->len; | 1910 | skb_set_tail_pointer(skb, skb->len); |
1911 | #ifdef USE_CHECKSUM_HW | 1911 | #ifdef USE_CHECKSUM_HW |
1912 | if (vcc->vpi == 0 && vcc->vci >= ATM_NOT_RSV_VCI) { | 1912 | if (vcc->vpi == 0 && vcc->vci >= ATM_NOT_RSV_VCI) { |
1913 | skb->ip_summed = CHECKSUM_COMPLETE; | 1913 | skb->ip_summed = CHECKSUM_COMPLETE; |
diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c index b4b80140c398..057efbc55d38 100644 --- a/drivers/atm/idt77252.c +++ b/drivers/atm/idt77252.c | |||
@@ -1065,7 +1065,8 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe) | |||
1065 | vcc = vc->rx_vcc; | 1065 | vcc = vc->rx_vcc; |
1066 | 1066 | ||
1067 | pci_dma_sync_single_for_cpu(card->pcidev, IDT77252_PRV_PADDR(skb), | 1067 | pci_dma_sync_single_for_cpu(card->pcidev, IDT77252_PRV_PADDR(skb), |
1068 | skb->end - skb->data, PCI_DMA_FROMDEVICE); | 1068 | skb_end_pointer(skb) - skb->data, |
1069 | PCI_DMA_FROMDEVICE); | ||
1069 | 1070 | ||
1070 | if ((vcc->qos.aal == ATM_AAL0) || | 1071 | if ((vcc->qos.aal == ATM_AAL0) || |
1071 | (vcc->qos.aal == ATM_AAL34)) { | 1072 | (vcc->qos.aal == ATM_AAL34)) { |
@@ -1194,7 +1195,8 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe) | |||
1194 | } | 1195 | } |
1195 | 1196 | ||
1196 | pci_unmap_single(card->pcidev, IDT77252_PRV_PADDR(skb), | 1197 | pci_unmap_single(card->pcidev, IDT77252_PRV_PADDR(skb), |
1197 | skb->end - skb->data, PCI_DMA_FROMDEVICE); | 1198 | skb_end_pointer(skb) - skb->data, |
1199 | PCI_DMA_FROMDEVICE); | ||
1198 | sb_pool_remove(card, skb); | 1200 | sb_pool_remove(card, skb); |
1199 | 1201 | ||
1200 | skb_trim(skb, len); | 1202 | skb_trim(skb, len); |
@@ -1267,7 +1269,7 @@ idt77252_rx_raw(struct idt77252_dev *card) | |||
1267 | tail = readl(SAR_REG_RAWCT); | 1269 | tail = readl(SAR_REG_RAWCT); |
1268 | 1270 | ||
1269 | pci_dma_sync_single_for_cpu(card->pcidev, IDT77252_PRV_PADDR(queue), | 1271 | pci_dma_sync_single_for_cpu(card->pcidev, IDT77252_PRV_PADDR(queue), |
1270 | queue->end - queue->head - 16, | 1272 | skb_end_pointer(queue) - queue->head - 16, |
1271 | PCI_DMA_FROMDEVICE); | 1273 | PCI_DMA_FROMDEVICE); |
1272 | 1274 | ||
1273 | while (head != tail) { | 1275 | while (head != tail) { |
@@ -1363,7 +1365,8 @@ drop: | |||
1363 | queue = card->raw_cell_head; | 1365 | queue = card->raw_cell_head; |
1364 | pci_dma_sync_single_for_cpu(card->pcidev, | 1366 | pci_dma_sync_single_for_cpu(card->pcidev, |
1365 | IDT77252_PRV_PADDR(queue), | 1367 | IDT77252_PRV_PADDR(queue), |
1366 | queue->end - queue->data, | 1368 | (skb_end_pointer(queue) - |
1369 | queue->data), | ||
1367 | PCI_DMA_FROMDEVICE); | 1370 | PCI_DMA_FROMDEVICE); |
1368 | } else { | 1371 | } else { |
1369 | card->raw_cell_head = NULL; | 1372 | card->raw_cell_head = NULL; |
@@ -1816,7 +1819,8 @@ push_rx_skb(struct idt77252_dev *card, struct sk_buff *skb, int queue) | |||
1816 | u32 handle; | 1819 | u32 handle; |
1817 | u32 addr; | 1820 | u32 addr; |
1818 | 1821 | ||
1819 | skb->data = skb->tail = skb->head; | 1822 | skb->data = skb->head; |
1823 | skb_reset_tail_pointer(skb); | ||
1820 | skb->len = 0; | 1824 | skb->len = 0; |
1821 | 1825 | ||
1822 | skb_reserve(skb, 16); | 1826 | skb_reserve(skb, 16); |
@@ -1835,7 +1839,6 @@ push_rx_skb(struct idt77252_dev *card, struct sk_buff *skb, int queue) | |||
1835 | skb_put(skb, SAR_FB_SIZE_3); | 1839 | skb_put(skb, SAR_FB_SIZE_3); |
1836 | break; | 1840 | break; |
1837 | default: | 1841 | default: |
1838 | dev_kfree_skb(skb); | ||
1839 | return -1; | 1842 | return -1; |
1840 | } | 1843 | } |
1841 | 1844 | ||
@@ -1874,7 +1877,7 @@ add_rx_skb(struct idt77252_dev *card, int queue, | |||
1874 | } | 1877 | } |
1875 | 1878 | ||
1876 | paddr = pci_map_single(card->pcidev, skb->data, | 1879 | paddr = pci_map_single(card->pcidev, skb->data, |
1877 | skb->end - skb->data, | 1880 | skb_end_pointer(skb) - skb->data, |
1878 | PCI_DMA_FROMDEVICE); | 1881 | PCI_DMA_FROMDEVICE); |
1879 | IDT77252_PRV_PADDR(skb) = paddr; | 1882 | IDT77252_PRV_PADDR(skb) = paddr; |
1880 | 1883 | ||
@@ -1888,7 +1891,7 @@ add_rx_skb(struct idt77252_dev *card, int queue, | |||
1888 | 1891 | ||
1889 | outunmap: | 1892 | outunmap: |
1890 | pci_unmap_single(card->pcidev, IDT77252_PRV_PADDR(skb), | 1893 | pci_unmap_single(card->pcidev, IDT77252_PRV_PADDR(skb), |
1891 | skb->end - skb->data, PCI_DMA_FROMDEVICE); | 1894 | skb_end_pointer(skb) - skb->data, PCI_DMA_FROMDEVICE); |
1892 | 1895 | ||
1893 | handle = IDT77252_PRV_POOL(skb); | 1896 | handle = IDT77252_PRV_POOL(skb); |
1894 | card->sbpool[POOL_QUEUE(handle)].skb[POOL_INDEX(handle)] = NULL; | 1897 | card->sbpool[POOL_QUEUE(handle)].skb[POOL_INDEX(handle)] = NULL; |
@@ -1905,12 +1908,14 @@ recycle_rx_skb(struct idt77252_dev *card, struct sk_buff *skb) | |||
1905 | int err; | 1908 | int err; |
1906 | 1909 | ||
1907 | pci_dma_sync_single_for_device(card->pcidev, IDT77252_PRV_PADDR(skb), | 1910 | pci_dma_sync_single_for_device(card->pcidev, IDT77252_PRV_PADDR(skb), |
1908 | skb->end - skb->data, PCI_DMA_FROMDEVICE); | 1911 | skb_end_pointer(skb) - skb->data, |
1912 | PCI_DMA_FROMDEVICE); | ||
1909 | 1913 | ||
1910 | err = push_rx_skb(card, skb, POOL_QUEUE(handle)); | 1914 | err = push_rx_skb(card, skb, POOL_QUEUE(handle)); |
1911 | if (err) { | 1915 | if (err) { |
1912 | pci_unmap_single(card->pcidev, IDT77252_PRV_PADDR(skb), | 1916 | pci_unmap_single(card->pcidev, IDT77252_PRV_PADDR(skb), |
1913 | skb->end - skb->data, PCI_DMA_FROMDEVICE); | 1917 | skb_end_pointer(skb) - skb->data, |
1918 | PCI_DMA_FROMDEVICE); | ||
1914 | sb_pool_remove(card, skb); | 1919 | sb_pool_remove(card, skb); |
1915 | dev_kfree_skb(skb); | 1920 | dev_kfree_skb(skb); |
1916 | } | 1921 | } |
@@ -3122,7 +3127,8 @@ deinit_card(struct idt77252_dev *card) | |||
3122 | if (skb) { | 3127 | if (skb) { |
3123 | pci_unmap_single(card->pcidev, | 3128 | pci_unmap_single(card->pcidev, |
3124 | IDT77252_PRV_PADDR(skb), | 3129 | IDT77252_PRV_PADDR(skb), |
3125 | skb->end - skb->data, | 3130 | (skb_end_pointer(skb) - |
3131 | skb->data), | ||
3126 | PCI_DMA_FROMDEVICE); | 3132 | PCI_DMA_FROMDEVICE); |
3127 | card->sbpool[i].skb[j] = NULL; | 3133 | card->sbpool[i].skb[j] = NULL; |
3128 | dev_kfree_skb(skb); | 3134 | dev_kfree_skb(skb); |
diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c index aab9b3733d52..14ced85b3f54 100644 --- a/drivers/atm/nicstar.c +++ b/drivers/atm/nicstar.c | |||
@@ -2208,7 +2208,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe) | |||
2208 | if (i == 1 && ns_rsqe_eopdu(rsqe)) | 2208 | if (i == 1 && ns_rsqe_eopdu(rsqe)) |
2209 | *((u32 *) sb->data) |= 0x00000002; | 2209 | *((u32 *) sb->data) |= 0x00000002; |
2210 | skb_put(sb, NS_AAL0_HEADER); | 2210 | skb_put(sb, NS_AAL0_HEADER); |
2211 | memcpy(sb->tail, cell, ATM_CELL_PAYLOAD); | 2211 | memcpy(skb_tail_pointer(sb), cell, ATM_CELL_PAYLOAD); |
2212 | skb_put(sb, ATM_CELL_PAYLOAD); | 2212 | skb_put(sb, ATM_CELL_PAYLOAD); |
2213 | ATM_SKB(sb)->vcc = vcc; | 2213 | ATM_SKB(sb)->vcc = vcc; |
2214 | __net_timestamp(sb); | 2214 | __net_timestamp(sb); |
@@ -2252,7 +2252,8 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe) | |||
2252 | vc->rx_iov = iovb; | 2252 | vc->rx_iov = iovb; |
2253 | NS_SKB(iovb)->iovcnt = 0; | 2253 | NS_SKB(iovb)->iovcnt = 0; |
2254 | iovb->len = 0; | 2254 | iovb->len = 0; |
2255 | iovb->tail = iovb->data = iovb->head; | 2255 | iovb->data = iovb->head; |
2256 | skb_reset_tail_pointer(iovb); | ||
2256 | NS_SKB(iovb)->vcc = vcc; | 2257 | NS_SKB(iovb)->vcc = vcc; |
2257 | /* IMPORTANT: a pointer to the sk_buff containing the small or large | 2258 | /* IMPORTANT: a pointer to the sk_buff containing the small or large |
2258 | buffer is stored as iovec base, NOT a pointer to the | 2259 | buffer is stored as iovec base, NOT a pointer to the |
@@ -2265,7 +2266,8 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe) | |||
2265 | recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data, NS_MAX_IOVECS); | 2266 | recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data, NS_MAX_IOVECS); |
2266 | NS_SKB(iovb)->iovcnt = 0; | 2267 | NS_SKB(iovb)->iovcnt = 0; |
2267 | iovb->len = 0; | 2268 | iovb->len = 0; |
2268 | iovb->tail = iovb->data = iovb->head; | 2269 | iovb->data = iovb->head; |
2270 | skb_reset_tail_pointer(iovb); | ||
2269 | NS_SKB(iovb)->vcc = vcc; | 2271 | NS_SKB(iovb)->vcc = vcc; |
2270 | } | 2272 | } |
2271 | iov = &((struct iovec *) iovb->data)[NS_SKB(iovb)->iovcnt++]; | 2273 | iov = &((struct iovec *) iovb->data)[NS_SKB(iovb)->iovcnt++]; |
@@ -2393,7 +2395,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe) | |||
2393 | skb->destructor = ns_lb_destructor; | 2395 | skb->destructor = ns_lb_destructor; |
2394 | #endif /* NS_USE_DESTRUCTORS */ | 2396 | #endif /* NS_USE_DESTRUCTORS */ |
2395 | skb_push(skb, NS_SMBUFSIZE); | 2397 | skb_push(skb, NS_SMBUFSIZE); |
2396 | memcpy(skb->data, sb->data, NS_SMBUFSIZE); | 2398 | skb_copy_from_linear_data(sb, skb->data, NS_SMBUFSIZE); |
2397 | skb_put(skb, len - NS_SMBUFSIZE); | 2399 | skb_put(skb, len - NS_SMBUFSIZE); |
2398 | ATM_SKB(skb)->vcc = vcc; | 2400 | ATM_SKB(skb)->vcc = vcc; |
2399 | __net_timestamp(skb); | 2401 | __net_timestamp(skb); |
@@ -2477,7 +2479,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe) | |||
2477 | { | 2479 | { |
2478 | /* Copy the small buffer to the huge buffer */ | 2480 | /* Copy the small buffer to the huge buffer */ |
2479 | sb = (struct sk_buff *) iov->iov_base; | 2481 | sb = (struct sk_buff *) iov->iov_base; |
2480 | memcpy(hb->data, sb->data, iov->iov_len); | 2482 | skb_copy_from_linear_data(sb, hb->data, iov->iov_len); |
2481 | skb_put(hb, iov->iov_len); | 2483 | skb_put(hb, iov->iov_len); |
2482 | remaining = len - iov->iov_len; | 2484 | remaining = len - iov->iov_len; |
2483 | iov++; | 2485 | iov++; |
@@ -2489,7 +2491,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe) | |||
2489 | { | 2491 | { |
2490 | lb = (struct sk_buff *) iov->iov_base; | 2492 | lb = (struct sk_buff *) iov->iov_base; |
2491 | tocopy = min_t(int, remaining, iov->iov_len); | 2493 | tocopy = min_t(int, remaining, iov->iov_len); |
2492 | memcpy(hb->tail, lb->data, tocopy); | 2494 | skb_copy_from_linear_data(lb, skb_tail_pointer(hb), tocopy); |
2493 | skb_put(hb, tocopy); | 2495 | skb_put(hb, tocopy); |
2494 | iov++; | 2496 | iov++; |
2495 | remaining -= tocopy; | 2497 | remaining -= tocopy; |
diff --git a/drivers/block/aoe/aoe.h b/drivers/block/aoe/aoe.h index 2308e83e5f33..1d8466817943 100644 --- a/drivers/block/aoe/aoe.h +++ b/drivers/block/aoe/aoe.h | |||
@@ -48,6 +48,15 @@ struct aoe_hdr { | |||
48 | __be32 tag; | 48 | __be32 tag; |
49 | }; | 49 | }; |
50 | 50 | ||
51 | #ifdef __KERNEL__ | ||
52 | #include <linux/skbuff.h> | ||
53 | |||
54 | static inline struct aoe_hdr *aoe_hdr(const struct sk_buff *skb) | ||
55 | { | ||
56 | return (struct aoe_hdr *)skb_mac_header(skb); | ||
57 | } | ||
58 | #endif | ||
59 | |||
51 | struct aoe_atahdr { | 60 | struct aoe_atahdr { |
52 | unsigned char aflags; | 61 | unsigned char aflags; |
53 | unsigned char errfeat; | 62 | unsigned char errfeat; |
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c index 8d17d8df3662..1a6aeac5a1c3 100644 --- a/drivers/block/aoe/aoecmd.c +++ b/drivers/block/aoe/aoecmd.c | |||
@@ -27,7 +27,8 @@ new_skb(ulong len) | |||
27 | 27 | ||
28 | skb = alloc_skb(len, GFP_ATOMIC); | 28 | skb = alloc_skb(len, GFP_ATOMIC); |
29 | if (skb) { | 29 | if (skb) { |
30 | skb->nh.raw = skb->mac.raw = skb->data; | 30 | skb_reset_mac_header(skb); |
31 | skb_reset_network_header(skb); | ||
31 | skb->protocol = __constant_htons(ETH_P_AOE); | 32 | skb->protocol = __constant_htons(ETH_P_AOE); |
32 | skb->priority = 0; | 33 | skb->priority = 0; |
33 | skb->next = skb->prev = NULL; | 34 | skb->next = skb->prev = NULL; |
@@ -118,7 +119,7 @@ aoecmd_ata_rw(struct aoedev *d, struct frame *f) | |||
118 | 119 | ||
119 | /* initialize the headers & frame */ | 120 | /* initialize the headers & frame */ |
120 | skb = f->skb; | 121 | skb = f->skb; |
121 | h = (struct aoe_hdr *) skb->mac.raw; | 122 | h = aoe_hdr(skb); |
122 | ah = (struct aoe_atahdr *) (h+1); | 123 | ah = (struct aoe_atahdr *) (h+1); |
123 | skb_put(skb, sizeof *h + sizeof *ah); | 124 | skb_put(skb, sizeof *h + sizeof *ah); |
124 | memset(h, 0, skb->len); | 125 | memset(h, 0, skb->len); |
@@ -207,7 +208,7 @@ aoecmd_cfg_pkts(ushort aoemajor, unsigned char aoeminor, struct sk_buff **tail) | |||
207 | skb->dev = ifp; | 208 | skb->dev = ifp; |
208 | if (sl_tail == NULL) | 209 | if (sl_tail == NULL) |
209 | sl_tail = skb; | 210 | sl_tail = skb; |
210 | h = (struct aoe_hdr *) skb->mac.raw; | 211 | h = aoe_hdr(skb); |
211 | memset(h, 0, sizeof *h + sizeof *ch); | 212 | memset(h, 0, sizeof *h + sizeof *ch); |
212 | 213 | ||
213 | memset(h->dst, 0xff, sizeof h->dst); | 214 | memset(h->dst, 0xff, sizeof h->dst); |
@@ -300,7 +301,7 @@ rexmit(struct aoedev *d, struct frame *f) | |||
300 | aoechr_error(buf); | 301 | aoechr_error(buf); |
301 | 302 | ||
302 | skb = f->skb; | 303 | skb = f->skb; |
303 | h = (struct aoe_hdr *) skb->mac.raw; | 304 | h = aoe_hdr(skb); |
304 | ah = (struct aoe_atahdr *) (h+1); | 305 | ah = (struct aoe_atahdr *) (h+1); |
305 | f->tag = n; | 306 | f->tag = n; |
306 | h->tag = cpu_to_be32(n); | 307 | h->tag = cpu_to_be32(n); |
@@ -529,7 +530,7 @@ aoecmd_ata_rsp(struct sk_buff *skb) | |||
529 | char ebuf[128]; | 530 | char ebuf[128]; |
530 | u16 aoemajor; | 531 | u16 aoemajor; |
531 | 532 | ||
532 | hin = (struct aoe_hdr *) skb->mac.raw; | 533 | hin = aoe_hdr(skb); |
533 | aoemajor = be16_to_cpu(get_unaligned(&hin->major)); | 534 | aoemajor = be16_to_cpu(get_unaligned(&hin->major)); |
534 | d = aoedev_by_aoeaddr(aoemajor, hin->minor); | 535 | d = aoedev_by_aoeaddr(aoemajor, hin->minor); |
535 | if (d == NULL) { | 536 | if (d == NULL) { |
@@ -561,7 +562,7 @@ aoecmd_ata_rsp(struct sk_buff *skb) | |||
561 | calc_rttavg(d, tsince(f->tag)); | 562 | calc_rttavg(d, tsince(f->tag)); |
562 | 563 | ||
563 | ahin = (struct aoe_atahdr *) (hin+1); | 564 | ahin = (struct aoe_atahdr *) (hin+1); |
564 | hout = (struct aoe_hdr *) f->skb->mac.raw; | 565 | hout = aoe_hdr(f->skb); |
565 | ahout = (struct aoe_atahdr *) (hout+1); | 566 | ahout = (struct aoe_atahdr *) (hout+1); |
566 | buf = f->buf; | 567 | buf = f->buf; |
567 | 568 | ||
@@ -695,7 +696,7 @@ aoecmd_ata_id(struct aoedev *d) | |||
695 | 696 | ||
696 | /* initialize the headers & frame */ | 697 | /* initialize the headers & frame */ |
697 | skb = f->skb; | 698 | skb = f->skb; |
698 | h = (struct aoe_hdr *) skb->mac.raw; | 699 | h = aoe_hdr(skb); |
699 | ah = (struct aoe_atahdr *) (h+1); | 700 | ah = (struct aoe_atahdr *) (h+1); |
700 | skb_put(skb, sizeof *h + sizeof *ah); | 701 | skb_put(skb, sizeof *h + sizeof *ah); |
701 | memset(h, 0, skb->len); | 702 | memset(h, 0, skb->len); |
@@ -726,7 +727,7 @@ aoecmd_cfg_rsp(struct sk_buff *skb) | |||
726 | enum { MAXFRAMES = 16 }; | 727 | enum { MAXFRAMES = 16 }; |
727 | u16 n; | 728 | u16 n; |
728 | 729 | ||
729 | h = (struct aoe_hdr *) skb->mac.raw; | 730 | h = aoe_hdr(skb); |
730 | ch = (struct aoe_cfghdr *) (h+1); | 731 | ch = (struct aoe_cfghdr *) (h+1); |
731 | 732 | ||
732 | /* | 733 | /* |
diff --git a/drivers/block/aoe/aoenet.c b/drivers/block/aoe/aoenet.c index aab6d91a2c22..f9ddfda4d9cb 100644 --- a/drivers/block/aoe/aoenet.c +++ b/drivers/block/aoe/aoenet.c | |||
@@ -123,7 +123,7 @@ aoenet_rcv(struct sk_buff *skb, struct net_device *ifp, struct packet_type *pt, | |||
123 | goto exit; | 123 | goto exit; |
124 | skb_push(skb, ETH_HLEN); /* (1) */ | 124 | skb_push(skb, ETH_HLEN); /* (1) */ |
125 | 125 | ||
126 | h = (struct aoe_hdr *) skb->mac.raw; | 126 | h = aoe_hdr(skb); |
127 | n = be32_to_cpu(get_unaligned(&h->tag)); | 127 | n = be32_to_cpu(get_unaligned(&h->tag)); |
128 | if ((h->verfl & AOEFL_RSP) == 0 || (n & 1<<31)) | 128 | if ((h->verfl & AOEFL_RSP) == 0 || (n & 1<<31)) |
129 | goto exit; | 129 | goto exit; |
diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c index c852eed91e4b..1eeb8f2cde71 100644 --- a/drivers/block/paride/pcd.c +++ b/drivers/block/paride/pcd.c | |||
@@ -140,7 +140,7 @@ enum {D_PRT, D_PRO, D_UNI, D_MOD, D_SLV, D_DLY}; | |||
140 | #include <linux/blkdev.h> | 140 | #include <linux/blkdev.h> |
141 | #include <asm/uaccess.h> | 141 | #include <asm/uaccess.h> |
142 | 142 | ||
143 | static spinlock_t pcd_lock; | 143 | static DEFINE_SPINLOCK(pcd_lock); |
144 | 144 | ||
145 | module_param(verbose, bool, 0644); | 145 | module_param(verbose, bool, 0644); |
146 | module_param(major, int, 0); | 146 | module_param(major, int, 0); |
diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c index 7cdaa1951260..5826508f6731 100644 --- a/drivers/block/paride/pf.c +++ b/drivers/block/paride/pf.c | |||
@@ -154,7 +154,7 @@ enum {D_PRT, D_PRO, D_UNI, D_MOD, D_SLV, D_LUN, D_DLY}; | |||
154 | #include <linux/blkpg.h> | 154 | #include <linux/blkpg.h> |
155 | #include <asm/uaccess.h> | 155 | #include <asm/uaccess.h> |
156 | 156 | ||
157 | static spinlock_t pf_spin_lock; | 157 | static DEFINE_SPINLOCK(pf_spin_lock); |
158 | 158 | ||
159 | module_param(verbose, bool, 0644); | 159 | module_param(verbose, bool, 0644); |
160 | module_param(major, int, 0); | 160 | module_param(major, int, 0); |
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index a4fb70383188..f1b9dd7d47d6 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c | |||
@@ -777,7 +777,8 @@ static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command * | |||
777 | rq->cmd_flags |= REQ_QUIET; | 777 | rq->cmd_flags |= REQ_QUIET; |
778 | 778 | ||
779 | blk_execute_rq(rq->q, pd->bdev->bd_disk, rq, 0); | 779 | blk_execute_rq(rq->q, pd->bdev->bd_disk, rq, 0); |
780 | ret = rq->errors; | 780 | if (rq->errors) |
781 | ret = -EIO; | ||
781 | out: | 782 | out: |
782 | blk_put_request(rq); | 783 | blk_put_request(rq); |
783 | return ret; | 784 | return ret; |
diff --git a/drivers/bluetooth/bfusb.c b/drivers/bluetooth/bfusb.c index 4c766f36d884..b990805806af 100644 --- a/drivers/bluetooth/bfusb.c +++ b/drivers/bluetooth/bfusb.c | |||
@@ -527,7 +527,7 @@ static int bfusb_send_frame(struct sk_buff *skb) | |||
527 | buf[2] = (size == BFUSB_MAX_BLOCK_SIZE) ? 0 : size; | 527 | buf[2] = (size == BFUSB_MAX_BLOCK_SIZE) ? 0 : size; |
528 | 528 | ||
529 | memcpy(skb_put(nskb, 3), buf, 3); | 529 | memcpy(skb_put(nskb, 3), buf, 3); |
530 | memcpy(skb_put(nskb, size), skb->data + sent, size); | 530 | skb_copy_from_linear_data_offset(skb, sent, skb_put(nskb, size), size); |
531 | 531 | ||
532 | sent += size; | 532 | sent += size; |
533 | count -= size; | 533 | count -= size; |
diff --git a/drivers/bluetooth/bluecard_cs.c b/drivers/bluetooth/bluecard_cs.c index acfb6a430dcc..851de4d5b7de 100644 --- a/drivers/bluetooth/bluecard_cs.c +++ b/drivers/bluetooth/bluecard_cs.c | |||
@@ -461,20 +461,20 @@ static void bluecard_receive(bluecard_info_t *info, unsigned int offset) | |||
461 | switch (info->rx_state) { | 461 | switch (info->rx_state) { |
462 | 462 | ||
463 | case RECV_WAIT_EVENT_HEADER: | 463 | case RECV_WAIT_EVENT_HEADER: |
464 | eh = (struct hci_event_hdr *)(info->rx_skb->data); | 464 | eh = hci_event_hdr(info->rx_skb); |
465 | info->rx_state = RECV_WAIT_DATA; | 465 | info->rx_state = RECV_WAIT_DATA; |
466 | info->rx_count = eh->plen; | 466 | info->rx_count = eh->plen; |
467 | break; | 467 | break; |
468 | 468 | ||
469 | case RECV_WAIT_ACL_HEADER: | 469 | case RECV_WAIT_ACL_HEADER: |
470 | ah = (struct hci_acl_hdr *)(info->rx_skb->data); | 470 | ah = hci_acl_hdr(info->rx_skb); |
471 | dlen = __le16_to_cpu(ah->dlen); | 471 | dlen = __le16_to_cpu(ah->dlen); |
472 | info->rx_state = RECV_WAIT_DATA; | 472 | info->rx_state = RECV_WAIT_DATA; |
473 | info->rx_count = dlen; | 473 | info->rx_count = dlen; |
474 | break; | 474 | break; |
475 | 475 | ||
476 | case RECV_WAIT_SCO_HEADER: | 476 | case RECV_WAIT_SCO_HEADER: |
477 | sh = (struct hci_sco_hdr *)(info->rx_skb->data); | 477 | sh = hci_sco_hdr(info->rx_skb); |
478 | info->rx_state = RECV_WAIT_DATA; | 478 | info->rx_state = RECV_WAIT_DATA; |
479 | info->rx_count = sh->dlen; | 479 | info->rx_count = sh->dlen; |
480 | break; | 480 | break; |
diff --git a/drivers/bluetooth/bpa10x.c b/drivers/bluetooth/bpa10x.c index 9fca6513562d..e8ebd5d3de86 100644 --- a/drivers/bluetooth/bpa10x.c +++ b/drivers/bluetooth/bpa10x.c | |||
@@ -231,7 +231,7 @@ static void bpa10x_wakeup(struct bpa10x_data *data) | |||
231 | cr = (struct usb_ctrlrequest *) urb->setup_packet; | 231 | cr = (struct usb_ctrlrequest *) urb->setup_packet; |
232 | cr->wLength = __cpu_to_le16(skb->len); | 232 | cr->wLength = __cpu_to_le16(skb->len); |
233 | 233 | ||
234 | memcpy(urb->transfer_buffer, skb->data, skb->len); | 234 | skb_copy_from_linear_data(skb, urb->transfer_buffer, skb->len); |
235 | urb->transfer_buffer_length = skb->len; | 235 | urb->transfer_buffer_length = skb->len; |
236 | 236 | ||
237 | err = usb_submit_urb(urb, GFP_ATOMIC); | 237 | err = usb_submit_urb(urb, GFP_ATOMIC); |
@@ -250,7 +250,7 @@ static void bpa10x_wakeup(struct bpa10x_data *data) | |||
250 | skb = skb_dequeue(&data->tx_queue); | 250 | skb = skb_dequeue(&data->tx_queue); |
251 | 251 | ||
252 | if (skb) { | 252 | if (skb) { |
253 | memcpy(urb->transfer_buffer, skb->data, skb->len); | 253 | skb_copy_from_linear_data(skb, urb->transfer_buffer, skb->len); |
254 | urb->transfer_buffer_length = skb->len; | 254 | urb->transfer_buffer_length = skb->len; |
255 | 255 | ||
256 | err = usb_submit_urb(urb, GFP_ATOMIC); | 256 | err = usb_submit_urb(urb, GFP_ATOMIC); |
diff --git a/drivers/bluetooth/bt3c_cs.c b/drivers/bluetooth/bt3c_cs.c index 18b0f3992c5b..39516074636b 100644 --- a/drivers/bluetooth/bt3c_cs.c +++ b/drivers/bluetooth/bt3c_cs.c | |||
@@ -303,20 +303,20 @@ static void bt3c_receive(bt3c_info_t *info) | |||
303 | switch (info->rx_state) { | 303 | switch (info->rx_state) { |
304 | 304 | ||
305 | case RECV_WAIT_EVENT_HEADER: | 305 | case RECV_WAIT_EVENT_HEADER: |
306 | eh = (struct hci_event_hdr *)(info->rx_skb->data); | 306 | eh = hci_event_hdr(info->rx_skb); |
307 | info->rx_state = RECV_WAIT_DATA; | 307 | info->rx_state = RECV_WAIT_DATA; |
308 | info->rx_count = eh->plen; | 308 | info->rx_count = eh->plen; |
309 | break; | 309 | break; |
310 | 310 | ||
311 | case RECV_WAIT_ACL_HEADER: | 311 | case RECV_WAIT_ACL_HEADER: |
312 | ah = (struct hci_acl_hdr *)(info->rx_skb->data); | 312 | ah = hci_acl_hdr(info->rx_skb); |
313 | dlen = __le16_to_cpu(ah->dlen); | 313 | dlen = __le16_to_cpu(ah->dlen); |
314 | info->rx_state = RECV_WAIT_DATA; | 314 | info->rx_state = RECV_WAIT_DATA; |
315 | info->rx_count = dlen; | 315 | info->rx_count = dlen; |
316 | break; | 316 | break; |
317 | 317 | ||
318 | case RECV_WAIT_SCO_HEADER: | 318 | case RECV_WAIT_SCO_HEADER: |
319 | sh = (struct hci_sco_hdr *)(info->rx_skb->data); | 319 | sh = hci_sco_hdr(info->rx_skb); |
320 | info->rx_state = RECV_WAIT_DATA; | 320 | info->rx_state = RECV_WAIT_DATA; |
321 | info->rx_count = sh->dlen; | 321 | info->rx_count = sh->dlen; |
322 | break; | 322 | break; |
diff --git a/drivers/bluetooth/btuart_cs.c b/drivers/bluetooth/btuart_cs.c index c1bce75148fe..d7d2ea0d86a1 100644 --- a/drivers/bluetooth/btuart_cs.c +++ b/drivers/bluetooth/btuart_cs.c | |||
@@ -250,20 +250,20 @@ static void btuart_receive(btuart_info_t *info) | |||
250 | switch (info->rx_state) { | 250 | switch (info->rx_state) { |
251 | 251 | ||
252 | case RECV_WAIT_EVENT_HEADER: | 252 | case RECV_WAIT_EVENT_HEADER: |
253 | eh = (struct hci_event_hdr *)(info->rx_skb->data); | 253 | eh = hci_event_hdr(info->rx_skb); |
254 | info->rx_state = RECV_WAIT_DATA; | 254 | info->rx_state = RECV_WAIT_DATA; |
255 | info->rx_count = eh->plen; | 255 | info->rx_count = eh->plen; |
256 | break; | 256 | break; |
257 | 257 | ||
258 | case RECV_WAIT_ACL_HEADER: | 258 | case RECV_WAIT_ACL_HEADER: |
259 | ah = (struct hci_acl_hdr *)(info->rx_skb->data); | 259 | ah = hci_acl_hdr(info->rx_skb); |
260 | dlen = __le16_to_cpu(ah->dlen); | 260 | dlen = __le16_to_cpu(ah->dlen); |
261 | info->rx_state = RECV_WAIT_DATA; | 261 | info->rx_state = RECV_WAIT_DATA; |
262 | info->rx_count = dlen; | 262 | info->rx_count = dlen; |
263 | break; | 263 | break; |
264 | 264 | ||
265 | case RECV_WAIT_SCO_HEADER: | 265 | case RECV_WAIT_SCO_HEADER: |
266 | sh = (struct hci_sco_hdr *)(info->rx_skb->data); | 266 | sh = hci_sco_hdr(info->rx_skb); |
267 | info->rx_state = RECV_WAIT_DATA; | 267 | info->rx_state = RECV_WAIT_DATA; |
268 | info->rx_count = sh->dlen; | 268 | info->rx_count = sh->dlen; |
269 | break; | 269 | break; |
diff --git a/drivers/bluetooth/dtl1_cs.c b/drivers/bluetooth/dtl1_cs.c index 459aa97937ab..7f9c54b9964a 100644 --- a/drivers/bluetooth/dtl1_cs.c +++ b/drivers/bluetooth/dtl1_cs.c | |||
@@ -425,7 +425,7 @@ static int dtl1_hci_send_frame(struct sk_buff *skb) | |||
425 | return -ENOMEM; | 425 | return -ENOMEM; |
426 | 426 | ||
427 | skb_reserve(s, NSHL); | 427 | skb_reserve(s, NSHL); |
428 | memcpy(skb_put(s, skb->len), skb->data, skb->len); | 428 | skb_copy_from_linear_data(skb, skb_put(s, skb->len), skb->len); |
429 | if (skb->len & 0x0001) | 429 | if (skb->len & 0x0001) |
430 | *skb_put(s, 1) = 0; /* PAD */ | 430 | *skb_put(s, 1) = 0; /* PAD */ |
431 | 431 | ||
diff --git a/drivers/bluetooth/hci_h4.c b/drivers/bluetooth/hci_h4.c index 34f0afc42407..bfbae14cf93d 100644 --- a/drivers/bluetooth/hci_h4.c +++ b/drivers/bluetooth/hci_h4.c | |||
@@ -188,7 +188,7 @@ static int h4_recv(struct hci_uart *hu, void *data, int count) | |||
188 | continue; | 188 | continue; |
189 | 189 | ||
190 | case H4_W4_EVENT_HDR: | 190 | case H4_W4_EVENT_HDR: |
191 | eh = (struct hci_event_hdr *) h4->rx_skb->data; | 191 | eh = hci_event_hdr(h4->rx_skb); |
192 | 192 | ||
193 | BT_DBG("Event header: evt 0x%2.2x plen %d", eh->evt, eh->plen); | 193 | BT_DBG("Event header: evt 0x%2.2x plen %d", eh->evt, eh->plen); |
194 | 194 | ||
@@ -196,7 +196,7 @@ static int h4_recv(struct hci_uart *hu, void *data, int count) | |||
196 | continue; | 196 | continue; |
197 | 197 | ||
198 | case H4_W4_ACL_HDR: | 198 | case H4_W4_ACL_HDR: |
199 | ah = (struct hci_acl_hdr *) h4->rx_skb->data; | 199 | ah = hci_acl_hdr(h4->rx_skb); |
200 | dlen = __le16_to_cpu(ah->dlen); | 200 | dlen = __le16_to_cpu(ah->dlen); |
201 | 201 | ||
202 | BT_DBG("ACL header: dlen %d", dlen); | 202 | BT_DBG("ACL header: dlen %d", dlen); |
@@ -205,7 +205,7 @@ static int h4_recv(struct hci_uart *hu, void *data, int count) | |||
205 | continue; | 205 | continue; |
206 | 206 | ||
207 | case H4_W4_SCO_HDR: | 207 | case H4_W4_SCO_HDR: |
208 | sh = (struct hci_sco_hdr *) h4->rx_skb->data; | 208 | sh = hci_sco_hdr(h4->rx_skb); |
209 | 209 | ||
210 | BT_DBG("SCO header: dlen %d", sh->dlen); | 210 | BT_DBG("SCO header: dlen %d", sh->dlen); |
211 | 211 | ||
diff --git a/drivers/char/mxser.c b/drivers/char/mxser.c index a61fb6da5d03..80a01150b86c 100644 --- a/drivers/char/mxser.c +++ b/drivers/char/mxser.c | |||
@@ -1338,43 +1338,23 @@ static int mxser_ioctl(struct tty_struct *tty, struct file *file, unsigned int c | |||
1338 | * (use |'ed TIOCM_RNG/DSR/CD/CTS for masking) | 1338 | * (use |'ed TIOCM_RNG/DSR/CD/CTS for masking) |
1339 | * Caller should use TIOCGICOUNT to see which one it was | 1339 | * Caller should use TIOCGICOUNT to see which one it was |
1340 | */ | 1340 | */ |
1341 | case TIOCMIWAIT: { | 1341 | case TIOCMIWAIT: |
1342 | DECLARE_WAITQUEUE(wait, current); | 1342 | spin_lock_irqsave(&info->slock, flags); |
1343 | int ret; | 1343 | cnow = info->icount; /* note the counters on entry */ |
1344 | spin_unlock_irqrestore(&info->slock, flags); | ||
1345 | |||
1346 | wait_event_interruptible(info->delta_msr_wait, ({ | ||
1347 | cprev = cnow; | ||
1344 | spin_lock_irqsave(&info->slock, flags); | 1348 | spin_lock_irqsave(&info->slock, flags); |
1345 | cprev = info->icount; /* note the counters on entry */ | 1349 | cnow = info->icount; /* atomic copy */ |
1346 | spin_unlock_irqrestore(&info->slock, flags); | 1350 | spin_unlock_irqrestore(&info->slock, flags); |
1347 | 1351 | ||
1348 | add_wait_queue(&info->delta_msr_wait, &wait); | 1352 | ((arg & TIOCM_RNG) && (cnow.rng != cprev.rng)) || |
1349 | while (1) { | 1353 | ((arg & TIOCM_DSR) && (cnow.dsr != cprev.dsr)) || |
1350 | spin_lock_irqsave(&info->slock, flags); | 1354 | ((arg & TIOCM_CD) && (cnow.dcd != cprev.dcd)) || |
1351 | cnow = info->icount; /* atomic copy */ | 1355 | ((arg & TIOCM_CTS) && (cnow.cts != cprev.cts)); |
1352 | spin_unlock_irqrestore(&info->slock, flags); | 1356 | })); |
1353 | 1357 | break; | |
1354 | set_current_state(TASK_INTERRUPTIBLE); | ||
1355 | if (((arg & TIOCM_RNG) && | ||
1356 | (cnow.rng != cprev.rng)) || | ||
1357 | ((arg & TIOCM_DSR) && | ||
1358 | (cnow.dsr != cprev.dsr)) || | ||
1359 | ((arg & TIOCM_CD) && | ||
1360 | (cnow.dcd != cprev.dcd)) || | ||
1361 | ((arg & TIOCM_CTS) && | ||
1362 | (cnow.cts != cprev.cts))) { | ||
1363 | ret = 0; | ||
1364 | break; | ||
1365 | } | ||
1366 | /* see if a signal did it */ | ||
1367 | if (signal_pending(current)) { | ||
1368 | ret = -ERESTARTSYS; | ||
1369 | break; | ||
1370 | } | ||
1371 | cprev = cnow; | ||
1372 | } | ||
1373 | current->state = TASK_RUNNING; | ||
1374 | remove_wait_queue(&info->delta_msr_wait, &wait); | ||
1375 | break; | ||
1376 | } | ||
1377 | /* NOTREACHED */ | ||
1378 | /* | 1358 | /* |
1379 | * Get counter of input serial line interrupts (DCD,RI,DSR,CTS) | 1359 | * Get counter of input serial line interrupts (DCD,RI,DSR,CTS) |
1380 | * Return: write counters to the user passed counter struct | 1360 | * Return: write counters to the user passed counter struct |
diff --git a/drivers/char/mxser_new.c b/drivers/char/mxser_new.c index 9af07e4999d5..f7603b6aeb87 100644 --- a/drivers/char/mxser_new.c +++ b/drivers/char/mxser_new.c | |||
@@ -1758,43 +1758,23 @@ static int mxser_ioctl(struct tty_struct *tty, struct file *file, | |||
1758 | * (use |'ed TIOCM_RNG/DSR/CD/CTS for masking) | 1758 | * (use |'ed TIOCM_RNG/DSR/CD/CTS for masking) |
1759 | * Caller should use TIOCGICOUNT to see which one it was | 1759 | * Caller should use TIOCGICOUNT to see which one it was |
1760 | */ | 1760 | */ |
1761 | case TIOCMIWAIT: { | 1761 | case TIOCMIWAIT: |
1762 | DECLARE_WAITQUEUE(wait, current); | ||
1763 | int ret; | ||
1764 | spin_lock_irqsave(&info->slock, flags); | 1762 | spin_lock_irqsave(&info->slock, flags); |
1765 | cprev = info->icount; /* note the counters on entry */ | 1763 | cnow = info->icount; /* note the counters on entry */ |
1766 | spin_unlock_irqrestore(&info->slock, flags); | 1764 | spin_unlock_irqrestore(&info->slock, flags); |
1767 | 1765 | ||
1768 | add_wait_queue(&info->delta_msr_wait, &wait); | 1766 | wait_event_interruptible(info->delta_msr_wait, ({ |
1769 | while (1) { | 1767 | cprev = cnow; |
1770 | spin_lock_irqsave(&info->slock, flags); | 1768 | spin_lock_irqsave(&info->slock, flags); |
1771 | cnow = info->icount; /* atomic copy */ | 1769 | cnow = info->icount; /* atomic copy */ |
1772 | spin_unlock_irqrestore(&info->slock, flags); | 1770 | spin_unlock_irqrestore(&info->slock, flags); |
1773 | 1771 | ||
1774 | set_current_state(TASK_INTERRUPTIBLE); | 1772 | ((arg & TIOCM_RNG) && (cnow.rng != cprev.rng)) || |
1775 | if (((arg & TIOCM_RNG) && | 1773 | ((arg & TIOCM_DSR) && (cnow.dsr != cprev.dsr)) || |
1776 | (cnow.rng != cprev.rng)) || | 1774 | ((arg & TIOCM_CD) && (cnow.dcd != cprev.dcd)) || |
1777 | ((arg & TIOCM_DSR) && | 1775 | ((arg & TIOCM_CTS) && (cnow.cts != cprev.cts)); |
1778 | (cnow.dsr != cprev.dsr)) || | 1776 | })); |
1779 | ((arg & TIOCM_CD) && | ||
1780 | (cnow.dcd != cprev.dcd)) || | ||
1781 | ((arg & TIOCM_CTS) && | ||
1782 | (cnow.cts != cprev.cts))) { | ||
1783 | ret = 0; | ||
1784 | break; | ||
1785 | } | ||
1786 | /* see if a signal did it */ | ||
1787 | if (signal_pending(current)) { | ||
1788 | ret = -ERESTARTSYS; | ||
1789 | break; | ||
1790 | } | ||
1791 | cprev = cnow; | ||
1792 | } | ||
1793 | current->state = TASK_RUNNING; | ||
1794 | remove_wait_queue(&info->delta_msr_wait, &wait); | ||
1795 | break; | 1777 | break; |
1796 | } | ||
1797 | /* NOTREACHED */ | ||
1798 | /* | 1778 | /* |
1799 | * Get counter of input serial line interrupts (DCD,RI,DSR,CTS) | 1779 | * Get counter of input serial line interrupts (DCD,RI,DSR,CTS) |
1800 | * Return: write counters to the user passed counter struct | 1780 | * Return: write counters to the user passed counter struct |
@@ -2230,7 +2210,14 @@ end_intr: | |||
2230 | port->mon_data.rxcnt += cnt; | 2210 | port->mon_data.rxcnt += cnt; |
2231 | port->mon_data.up_rxcnt += cnt; | 2211 | port->mon_data.up_rxcnt += cnt; |
2232 | 2212 | ||
2213 | /* | ||
2214 | * We are called from an interrupt context with &port->slock | ||
2215 | * being held. Drop it temporarily in order to prevent | ||
2216 | * recursive locking. | ||
2217 | */ | ||
2218 | spin_unlock(&port->slock); | ||
2233 | tty_flip_buffer_push(tty); | 2219 | tty_flip_buffer_push(tty); |
2220 | spin_lock(&port->slock); | ||
2234 | } | 2221 | } |
2235 | 2222 | ||
2236 | static void mxser_transmit_chars(struct mxser_port *port) | 2223 | static void mxser_transmit_chars(struct mxser_port *port) |
diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c index 8d025e9b5bce..157b1d09ab55 100644 --- a/drivers/char/pcmcia/synclink_cs.c +++ b/drivers/char/pcmcia/synclink_cs.c | |||
@@ -4169,7 +4169,7 @@ static int hdlcdev_xmit(struct sk_buff *skb, struct net_device *dev) | |||
4169 | netif_stop_queue(dev); | 4169 | netif_stop_queue(dev); |
4170 | 4170 | ||
4171 | /* copy data to device buffers */ | 4171 | /* copy data to device buffers */ |
4172 | memcpy(info->tx_buf, skb->data, skb->len); | 4172 | skb_copy_from_linear_data(skb, info->tx_buf, skb->len); |
4173 | info->tx_get = 0; | 4173 | info->tx_get = 0; |
4174 | info->tx_put = info->tx_count = skb->len; | 4174 | info->tx_put = info->tx_count = skb->len; |
4175 | 4175 | ||
diff --git a/drivers/char/random.c b/drivers/char/random.c index b9dc7aa1dfb3..46c1b97748b6 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c | |||
@@ -881,15 +881,15 @@ EXPORT_SYMBOL(get_random_bytes); | |||
881 | */ | 881 | */ |
882 | static void init_std_data(struct entropy_store *r) | 882 | static void init_std_data(struct entropy_store *r) |
883 | { | 883 | { |
884 | struct timeval tv; | 884 | ktime_t now; |
885 | unsigned long flags; | 885 | unsigned long flags; |
886 | 886 | ||
887 | spin_lock_irqsave(&r->lock, flags); | 887 | spin_lock_irqsave(&r->lock, flags); |
888 | r->entropy_count = 0; | 888 | r->entropy_count = 0; |
889 | spin_unlock_irqrestore(&r->lock, flags); | 889 | spin_unlock_irqrestore(&r->lock, flags); |
890 | 890 | ||
891 | do_gettimeofday(&tv); | 891 | now = ktime_get_real(); |
892 | add_entropy_words(r, (__u32 *)&tv, sizeof(tv)/4); | 892 | add_entropy_words(r, (__u32 *)&now, sizeof(now)/4); |
893 | add_entropy_words(r, (__u32 *)utsname(), | 893 | add_entropy_words(r, (__u32 *)utsname(), |
894 | sizeof(*(utsname()))/4); | 894 | sizeof(*(utsname()))/4); |
895 | } | 895 | } |
@@ -911,14 +911,12 @@ void rand_initialize_irq(int irq) | |||
911 | return; | 911 | return; |
912 | 912 | ||
913 | /* | 913 | /* |
914 | * If kmalloc returns null, we just won't use that entropy | 914 | * If kzalloc returns null, we just won't use that entropy |
915 | * source. | 915 | * source. |
916 | */ | 916 | */ |
917 | state = kmalloc(sizeof(struct timer_rand_state), GFP_KERNEL); | 917 | state = kzalloc(sizeof(struct timer_rand_state), GFP_KERNEL); |
918 | if (state) { | 918 | if (state) |
919 | memset(state, 0, sizeof(struct timer_rand_state)); | ||
920 | irq_timer_state[irq] = state; | 919 | irq_timer_state[irq] = state; |
921 | } | ||
922 | } | 920 | } |
923 | 921 | ||
924 | #ifdef CONFIG_BLOCK | 922 | #ifdef CONFIG_BLOCK |
@@ -927,14 +925,12 @@ void rand_initialize_disk(struct gendisk *disk) | |||
927 | struct timer_rand_state *state; | 925 | struct timer_rand_state *state; |
928 | 926 | ||
929 | /* | 927 | /* |
930 | * If kmalloc returns null, we just won't use that entropy | 928 | * If kzalloc returns null, we just won't use that entropy |
931 | * source. | 929 | * source. |
932 | */ | 930 | */ |
933 | state = kmalloc(sizeof(struct timer_rand_state), GFP_KERNEL); | 931 | state = kzalloc(sizeof(struct timer_rand_state), GFP_KERNEL); |
934 | if (state) { | 932 | if (state) |
935 | memset(state, 0, sizeof(struct timer_rand_state)); | ||
936 | disk->random = state; | 933 | disk->random = state; |
937 | } | ||
938 | } | 934 | } |
939 | #endif | 935 | #endif |
940 | 936 | ||
@@ -1469,7 +1465,6 @@ late_initcall(seqgen_init); | |||
1469 | __u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr, | 1465 | __u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr, |
1470 | __be16 sport, __be16 dport) | 1466 | __be16 sport, __be16 dport) |
1471 | { | 1467 | { |
1472 | struct timeval tv; | ||
1473 | __u32 seq; | 1468 | __u32 seq; |
1474 | __u32 hash[12]; | 1469 | __u32 hash[12]; |
1475 | struct keydata *keyptr = get_keyptr(); | 1470 | struct keydata *keyptr = get_keyptr(); |
@@ -1485,8 +1480,7 @@ __u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr, | |||
1485 | seq = twothirdsMD4Transform((const __u32 *)daddr, hash) & HASH_MASK; | 1480 | seq = twothirdsMD4Transform((const __u32 *)daddr, hash) & HASH_MASK; |
1486 | seq += keyptr->count; | 1481 | seq += keyptr->count; |
1487 | 1482 | ||
1488 | do_gettimeofday(&tv); | 1483 | seq += ktime_get_real().tv64; |
1489 | seq += tv.tv_usec + tv.tv_sec * 1000000; | ||
1490 | 1484 | ||
1491 | return seq; | 1485 | return seq; |
1492 | } | 1486 | } |
@@ -1521,7 +1515,6 @@ __u32 secure_ip_id(__be32 daddr) | |||
1521 | __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr, | 1515 | __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr, |
1522 | __be16 sport, __be16 dport) | 1516 | __be16 sport, __be16 dport) |
1523 | { | 1517 | { |
1524 | struct timeval tv; | ||
1525 | __u32 seq; | 1518 | __u32 seq; |
1526 | __u32 hash[4]; | 1519 | __u32 hash[4]; |
1527 | struct keydata *keyptr = get_keyptr(); | 1520 | struct keydata *keyptr = get_keyptr(); |
@@ -1543,12 +1536,11 @@ __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr, | |||
1543 | * As close as possible to RFC 793, which | 1536 | * As close as possible to RFC 793, which |
1544 | * suggests using a 250 kHz clock. | 1537 | * suggests using a 250 kHz clock. |
1545 | * Further reading shows this assumes 2 Mb/s networks. | 1538 | * Further reading shows this assumes 2 Mb/s networks. |
1546 | * For 10 Mb/s Ethernet, a 1 MHz clock is appropriate. | 1539 | * For 10 Gb/s Ethernet, a 1 GHz clock is appropriate. |
1547 | * That's funny, Linux has one built in! Use it! | 1540 | * That's funny, Linux has one built in! Use it! |
1548 | * (Networks are faster now - should this be increased?) | 1541 | * (Networks are faster now - should this be increased?) |
1549 | */ | 1542 | */ |
1550 | do_gettimeofday(&tv); | 1543 | seq += ktime_get_real().tv64; |
1551 | seq += tv.tv_usec + tv.tv_sec * 1000000; | ||
1552 | #if 0 | 1544 | #if 0 |
1553 | printk("init_seq(%lx, %lx, %d, %d) = %d\n", | 1545 | printk("init_seq(%lx, %lx, %d, %d) = %d\n", |
1554 | saddr, daddr, sport, dport, seq); | 1546 | saddr, daddr, sport, dport, seq); |
@@ -1556,8 +1548,6 @@ __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr, | |||
1556 | return seq; | 1548 | return seq; |
1557 | } | 1549 | } |
1558 | 1550 | ||
1559 | EXPORT_SYMBOL(secure_tcp_sequence_number); | ||
1560 | |||
1561 | /* Generate secure starting point for ephemeral IPV4 transport port search */ | 1551 | /* Generate secure starting point for ephemeral IPV4 transport port search */ |
1562 | u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport) | 1552 | u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport) |
1563 | { | 1553 | { |
@@ -1598,7 +1588,6 @@ u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr, __be16 | |||
1598 | u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr, | 1588 | u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr, |
1599 | __be16 sport, __be16 dport) | 1589 | __be16 sport, __be16 dport) |
1600 | { | 1590 | { |
1601 | struct timeval tv; | ||
1602 | u64 seq; | 1591 | u64 seq; |
1603 | __u32 hash[4]; | 1592 | __u32 hash[4]; |
1604 | struct keydata *keyptr = get_keyptr(); | 1593 | struct keydata *keyptr = get_keyptr(); |
@@ -1611,8 +1600,7 @@ u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr, | |||
1611 | seq = half_md4_transform(hash, keyptr->secret); | 1600 | seq = half_md4_transform(hash, keyptr->secret); |
1612 | seq |= ((u64)keyptr->count) << (32 - HASH_BITS); | 1601 | seq |= ((u64)keyptr->count) << (32 - HASH_BITS); |
1613 | 1602 | ||
1614 | do_gettimeofday(&tv); | 1603 | seq += ktime_get_real().tv64; |
1615 | seq += tv.tv_usec + tv.tv_sec * 1000000; | ||
1616 | seq &= (1ull << 48) - 1; | 1604 | seq &= (1ull << 48) - 1; |
1617 | #if 0 | 1605 | #if 0 |
1618 | printk("dccp init_seq(%lx, %lx, %d, %d) = %d\n", | 1606 | printk("dccp init_seq(%lx, %lx, %d, %d) = %d\n", |
diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c index a905f7820331..a7b9e9bb3e8d 100644 --- a/drivers/connector/connector.c +++ b/drivers/connector/connector.c | |||
@@ -212,7 +212,7 @@ static void cn_rx_skb(struct sk_buff *__skb) | |||
212 | skb = skb_get(__skb); | 212 | skb = skb_get(__skb); |
213 | 213 | ||
214 | if (skb->len >= NLMSG_SPACE(0)) { | 214 | if (skb->len >= NLMSG_SPACE(0)) { |
215 | nlh = (struct nlmsghdr *)skb->data; | 215 | nlh = nlmsg_hdr(skb); |
216 | 216 | ||
217 | if (nlh->nlmsg_len < sizeof(struct cn_msg) || | 217 | if (nlh->nlmsg_len < sizeof(struct cn_msg) || |
218 | skb->len < nlh->nlmsg_len || | 218 | skb->len < nlh->nlmsg_len || |
@@ -448,7 +448,7 @@ static int __devinit cn_init(void) | |||
448 | 448 | ||
449 | dev->nls = netlink_kernel_create(NETLINK_CONNECTOR, | 449 | dev->nls = netlink_kernel_create(NETLINK_CONNECTOR, |
450 | CN_NETLINK_USERS + 0xf, | 450 | CN_NETLINK_USERS + 0xf, |
451 | dev->input, THIS_MODULE); | 451 | dev->input, NULL, THIS_MODULE); |
452 | if (!dev->nls) | 452 | if (!dev->nls) |
453 | return -EIO; | 453 | return -EIO; |
454 | 454 | ||
diff --git a/drivers/hwmon/w83627ehf.c b/drivers/hwmon/w83627ehf.c index 01206ebb1cf2..30a76404f0af 100644 --- a/drivers/hwmon/w83627ehf.c +++ b/drivers/hwmon/w83627ehf.c | |||
@@ -121,9 +121,9 @@ superio_exit(void) | |||
121 | * ISA constants | 121 | * ISA constants |
122 | */ | 122 | */ |
123 | 123 | ||
124 | #define REGION_ALIGNMENT ~7 | 124 | #define IOREGION_ALIGNMENT ~7 |
125 | #define REGION_OFFSET 5 | 125 | #define IOREGION_OFFSET 5 |
126 | #define REGION_LENGTH 2 | 126 | #define IOREGION_LENGTH 2 |
127 | #define ADDR_REG_OFFSET 5 | 127 | #define ADDR_REG_OFFSET 5 |
128 | #define DATA_REG_OFFSET 6 | 128 | #define DATA_REG_OFFSET 6 |
129 | 129 | ||
@@ -1194,7 +1194,7 @@ static int w83627ehf_detect(struct i2c_adapter *adapter) | |||
1194 | u8 fan4pin, fan5pin; | 1194 | u8 fan4pin, fan5pin; |
1195 | int i, err = 0; | 1195 | int i, err = 0; |
1196 | 1196 | ||
1197 | if (!request_region(address + REGION_OFFSET, REGION_LENGTH, | 1197 | if (!request_region(address + IOREGION_OFFSET, IOREGION_LENGTH, |
1198 | w83627ehf_driver.driver.name)) { | 1198 | w83627ehf_driver.driver.name)) { |
1199 | err = -EBUSY; | 1199 | err = -EBUSY; |
1200 | goto exit; | 1200 | goto exit; |
@@ -1322,7 +1322,7 @@ exit_remove: | |||
1322 | exit_free: | 1322 | exit_free: |
1323 | kfree(data); | 1323 | kfree(data); |
1324 | exit_release: | 1324 | exit_release: |
1325 | release_region(address + REGION_OFFSET, REGION_LENGTH); | 1325 | release_region(address + IOREGION_OFFSET, IOREGION_LENGTH); |
1326 | exit: | 1326 | exit: |
1327 | return err; | 1327 | return err; |
1328 | } | 1328 | } |
@@ -1337,7 +1337,7 @@ static int w83627ehf_detach_client(struct i2c_client *client) | |||
1337 | 1337 | ||
1338 | if ((err = i2c_detach_client(client))) | 1338 | if ((err = i2c_detach_client(client))) |
1339 | return err; | 1339 | return err; |
1340 | release_region(client->addr + REGION_OFFSET, REGION_LENGTH); | 1340 | release_region(client->addr + IOREGION_OFFSET, IOREGION_LENGTH); |
1341 | kfree(data); | 1341 | kfree(data); |
1342 | 1342 | ||
1343 | return 0; | 1343 | return 0; |
@@ -1380,7 +1380,7 @@ static int __init w83627ehf_find(int sioaddr, unsigned short *addr) | |||
1380 | superio_select(W83627EHF_LD_HWM); | 1380 | superio_select(W83627EHF_LD_HWM); |
1381 | val = (superio_inb(SIO_REG_ADDR) << 8) | 1381 | val = (superio_inb(SIO_REG_ADDR) << 8) |
1382 | | superio_inb(SIO_REG_ADDR + 1); | 1382 | | superio_inb(SIO_REG_ADDR + 1); |
1383 | *addr = val & REGION_ALIGNMENT; | 1383 | *addr = val & IOREGION_ALIGNMENT; |
1384 | if (*addr == 0) { | 1384 | if (*addr == 0) { |
1385 | superio_exit(); | 1385 | superio_exit(); |
1386 | return -ENODEV; | 1386 | return -ENODEV; |
diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig index ca2e4f830c39..5bdf64b77913 100644 --- a/drivers/ide/Kconfig +++ b/drivers/ide/Kconfig | |||
@@ -57,6 +57,7 @@ if IDE | |||
57 | config IDE_MAX_HWIFS | 57 | config IDE_MAX_HWIFS |
58 | int "Max IDE interfaces" | 58 | int "Max IDE interfaces" |
59 | depends on ALPHA || SUPERH || IA64 || EMBEDDED | 59 | depends on ALPHA || SUPERH || IA64 || EMBEDDED |
60 | range 1 10 | ||
60 | default 4 | 61 | default 4 |
61 | help | 62 | help |
62 | This is the maximum number of IDE hardware interfaces that will | 63 | This is the maximum number of IDE hardware interfaces that will |
diff --git a/drivers/ide/pci/delkin_cb.c b/drivers/ide/pci/delkin_cb.c index d4b753e70119..dd7ec37fdeab 100644 --- a/drivers/ide/pci/delkin_cb.c +++ b/drivers/ide/pci/delkin_cb.c | |||
@@ -108,6 +108,7 @@ delkin_cb_remove (struct pci_dev *dev) | |||
108 | 108 | ||
109 | static struct pci_device_id delkin_cb_pci_tbl[] __devinitdata = { | 109 | static struct pci_device_id delkin_cb_pci_tbl[] __devinitdata = { |
110 | { 0x1145, 0xf021, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, | 110 | { 0x1145, 0xf021, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, |
111 | { 0x1145, 0xf024, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, | ||
111 | { 0, }, | 112 | { 0, }, |
112 | }; | 113 | }; |
113 | MODULE_DEVICE_TABLE(pci, delkin_cb_pci_tbl); | 114 | MODULE_DEVICE_TABLE(pci, delkin_cb_pci_tbl); |
diff --git a/drivers/ide/pci/hpt366.c b/drivers/ide/pci/hpt366.c index 60ecdc258c7c..ab6fa271aeb3 100644 --- a/drivers/ide/pci/hpt366.c +++ b/drivers/ide/pci/hpt366.c | |||
@@ -1,10 +1,10 @@ | |||
1 | /* | 1 | /* |
2 | * linux/drivers/ide/pci/hpt366.c Version 1.01 Dec 23, 2006 | 2 | * linux/drivers/ide/pci/hpt366.c Version 1.02 Apr 18, 2007 |
3 | * | 3 | * |
4 | * Copyright (C) 1999-2003 Andre Hedrick <andre@linux-ide.org> | 4 | * Copyright (C) 1999-2003 Andre Hedrick <andre@linux-ide.org> |
5 | * Portions Copyright (C) 2001 Sun Microsystems, Inc. | 5 | * Portions Copyright (C) 2001 Sun Microsystems, Inc. |
6 | * Portions Copyright (C) 2003 Red Hat Inc | 6 | * Portions Copyright (C) 2003 Red Hat Inc |
7 | * Portions Copyright (C) 2005-2006 MontaVista Software, Inc. | 7 | * Portions Copyright (C) 2005-2007 MontaVista Software, Inc. |
8 | * | 8 | * |
9 | * Thanks to HighPoint Technologies for their assistance, and hardware. | 9 | * Thanks to HighPoint Technologies for their assistance, and hardware. |
10 | * Special Thanks to Jon Burchmore in SanDiego for the deep pockets, his | 10 | * Special Thanks to Jon Burchmore in SanDiego for the deep pockets, his |
@@ -494,6 +494,7 @@ static struct hpt_info hpt302n __devinitdata = { | |||
494 | .chip_type = HPT302N, | 494 | .chip_type = HPT302N, |
495 | .max_mode = HPT302_ALLOW_ATA133_6 ? 4 : 3, | 495 | .max_mode = HPT302_ALLOW_ATA133_6 ? 4 : 3, |
496 | .dpll_clk = 77, | 496 | .dpll_clk = 77, |
497 | .settings = hpt37x_settings | ||
497 | }; | 498 | }; |
498 | 499 | ||
499 | static struct hpt_info hpt371n __devinitdata = { | 500 | static struct hpt_info hpt371n __devinitdata = { |
diff --git a/drivers/ieee1394/eth1394.c b/drivers/ieee1394/eth1394.c index 03e44b337eb0..a364003ba47f 100644 --- a/drivers/ieee1394/eth1394.c +++ b/drivers/ieee1394/eth1394.c | |||
@@ -834,7 +834,7 @@ static inline u16 ether1394_type_trans(struct sk_buff *skb, | |||
834 | struct eth1394hdr *eth; | 834 | struct eth1394hdr *eth; |
835 | unsigned char *rawp; | 835 | unsigned char *rawp; |
836 | 836 | ||
837 | skb->mac.raw = skb->data; | 837 | skb_reset_mac_header(skb); |
838 | skb_pull (skb, ETH1394_HLEN); | 838 | skb_pull (skb, ETH1394_HLEN); |
839 | eth = eth1394_hdr(skb); | 839 | eth = eth1394_hdr(skb); |
840 | 840 | ||
@@ -1668,7 +1668,7 @@ static int ether1394_tx (struct sk_buff *skb, struct net_device *dev) | |||
1668 | if (memcmp(eth->h_dest, dev->broadcast, ETH1394_ALEN) == 0 || | 1668 | if (memcmp(eth->h_dest, dev->broadcast, ETH1394_ALEN) == 0 || |
1669 | proto == htons(ETH_P_ARP) || | 1669 | proto == htons(ETH_P_ARP) || |
1670 | (proto == htons(ETH_P_IP) && | 1670 | (proto == htons(ETH_P_IP) && |
1671 | IN_MULTICAST(ntohl(skb->nh.iph->daddr)))) { | 1671 | IN_MULTICAST(ntohl(ip_hdr(skb)->daddr)))) { |
1672 | tx_type = ETH1394_GASP; | 1672 | tx_type = ETH1394_GASP; |
1673 | dest_node = LOCAL_BUS | ALL_NODES; | 1673 | dest_node = LOCAL_BUS | ALL_NODES; |
1674 | max_payload = priv->bc_maxpayload - ETHER1394_GASP_OVERHEAD; | 1674 | max_payload = priv->bc_maxpayload - ETHER1394_GASP_OVERHEAD; |
diff --git a/drivers/ieee1394/eth1394.h b/drivers/ieee1394/eth1394.h index c45cbff9138d..1e8356535149 100644 --- a/drivers/ieee1394/eth1394.h +++ b/drivers/ieee1394/eth1394.h | |||
@@ -90,7 +90,7 @@ struct eth1394hdr { | |||
90 | 90 | ||
91 | static inline struct eth1394hdr *eth1394_hdr(const struct sk_buff *skb) | 91 | static inline struct eth1394hdr *eth1394_hdr(const struct sk_buff *skb) |
92 | { | 92 | { |
93 | return (struct eth1394hdr *)skb->mac.raw; | 93 | return (struct eth1394hdr *)skb_mac_header(skb); |
94 | } | 94 | } |
95 | #endif | 95 | #endif |
96 | 96 | ||
diff --git a/drivers/infiniband/hw/amso1100/c2.c b/drivers/infiniband/hw/amso1100/c2.c index 59243d9aedd6..58bc272bd407 100644 --- a/drivers/infiniband/hw/amso1100/c2.c +++ b/drivers/infiniband/hw/amso1100/c2.c | |||
@@ -439,7 +439,8 @@ static void c2_rx_error(struct c2_port *c2_port, struct c2_element *elem) | |||
439 | } | 439 | } |
440 | 440 | ||
441 | /* Setup the skb for reuse since we're dropping this pkt */ | 441 | /* Setup the skb for reuse since we're dropping this pkt */ |
442 | elem->skb->tail = elem->skb->data = elem->skb->head; | 442 | elem->skb->data = elem->skb->head; |
443 | skb_reset_tail_pointer(elem->skb); | ||
443 | 444 | ||
444 | /* Zero out the rxp hdr in the sk_buff */ | 445 | /* Zero out the rxp hdr in the sk_buff */ |
445 | memset(elem->skb->data, 0, sizeof(*rxp_hdr)); | 446 | memset(elem->skb->data, 0, sizeof(*rxp_hdr)); |
@@ -521,9 +522,8 @@ static void c2_rx_interrupt(struct net_device *netdev) | |||
521 | * "sizeof(struct c2_rxp_hdr)". | 522 | * "sizeof(struct c2_rxp_hdr)". |
522 | */ | 523 | */ |
523 | skb->data += sizeof(*rxp_hdr); | 524 | skb->data += sizeof(*rxp_hdr); |
524 | skb->tail = skb->data + buflen; | 525 | skb_set_tail_pointer(skb, buflen); |
525 | skb->len = buflen; | 526 | skb->len = buflen; |
526 | skb->dev = netdev; | ||
527 | skb->protocol = eth_type_trans(skb, netdev); | 527 | skb->protocol = eth_type_trans(skb, netdev); |
528 | 528 | ||
529 | netif_rx(skb); | 529 | netif_rx(skb); |
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c index 2d2de9b8b729..3b4b0acd707f 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_cm.c +++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c | |||
@@ -477,7 +477,7 @@ static void send_mpa_req(struct iwch_ep *ep, struct sk_buff *skb) | |||
477 | BUG_ON(skb_cloned(skb)); | 477 | BUG_ON(skb_cloned(skb)); |
478 | 478 | ||
479 | mpalen = sizeof(*mpa) + ep->plen; | 479 | mpalen = sizeof(*mpa) + ep->plen; |
480 | if (skb->data + mpalen + sizeof(*req) > skb->end) { | 480 | if (skb->data + mpalen + sizeof(*req) > skb_end_pointer(skb)) { |
481 | kfree_skb(skb); | 481 | kfree_skb(skb); |
482 | skb=alloc_skb(mpalen + sizeof(*req), GFP_KERNEL); | 482 | skb=alloc_skb(mpalen + sizeof(*req), GFP_KERNEL); |
483 | if (!skb) { | 483 | if (!skb) { |
@@ -507,7 +507,7 @@ static void send_mpa_req(struct iwch_ep *ep, struct sk_buff *skb) | |||
507 | */ | 507 | */ |
508 | skb_get(skb); | 508 | skb_get(skb); |
509 | set_arp_failure_handler(skb, arp_failure_discard); | 509 | set_arp_failure_handler(skb, arp_failure_discard); |
510 | skb->h.raw = skb->data; | 510 | skb_reset_transport_header(skb); |
511 | len = skb->len; | 511 | len = skb->len; |
512 | req = (struct tx_data_wr *) skb_push(skb, sizeof(*req)); | 512 | req = (struct tx_data_wr *) skb_push(skb, sizeof(*req)); |
513 | req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA)); | 513 | req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA)); |
@@ -559,7 +559,7 @@ static int send_mpa_reject(struct iwch_ep *ep, const void *pdata, u8 plen) | |||
559 | skb_get(skb); | 559 | skb_get(skb); |
560 | skb->priority = CPL_PRIORITY_DATA; | 560 | skb->priority = CPL_PRIORITY_DATA; |
561 | set_arp_failure_handler(skb, arp_failure_discard); | 561 | set_arp_failure_handler(skb, arp_failure_discard); |
562 | skb->h.raw = skb->data; | 562 | skb_reset_transport_header(skb); |
563 | req = (struct tx_data_wr *) skb_push(skb, sizeof(*req)); | 563 | req = (struct tx_data_wr *) skb_push(skb, sizeof(*req)); |
564 | req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA)); | 564 | req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA)); |
565 | req->wr_lo = htonl(V_WR_TID(ep->hwtid)); | 565 | req->wr_lo = htonl(V_WR_TID(ep->hwtid)); |
@@ -610,7 +610,7 @@ static int send_mpa_reply(struct iwch_ep *ep, const void *pdata, u8 plen) | |||
610 | */ | 610 | */ |
611 | skb_get(skb); | 611 | skb_get(skb); |
612 | set_arp_failure_handler(skb, arp_failure_discard); | 612 | set_arp_failure_handler(skb, arp_failure_discard); |
613 | skb->h.raw = skb->data; | 613 | skb_reset_transport_header(skb); |
614 | len = skb->len; | 614 | len = skb->len; |
615 | req = (struct tx_data_wr *) skb_push(skb, sizeof(*req)); | 615 | req = (struct tx_data_wr *) skb_push(skb, sizeof(*req)); |
616 | req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA)); | 616 | req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA)); |
@@ -821,7 +821,8 @@ static void process_mpa_reply(struct iwch_ep *ep, struct sk_buff *skb) | |||
821 | /* | 821 | /* |
822 | * copy the new data into our accumulation buffer. | 822 | * copy the new data into our accumulation buffer. |
823 | */ | 823 | */ |
824 | memcpy(&(ep->mpa_pkt[ep->mpa_pkt_len]), skb->data, skb->len); | 824 | skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]), |
825 | skb->len); | ||
825 | ep->mpa_pkt_len += skb->len; | 826 | ep->mpa_pkt_len += skb->len; |
826 | 827 | ||
827 | /* | 828 | /* |
@@ -940,7 +941,8 @@ static void process_mpa_request(struct iwch_ep *ep, struct sk_buff *skb) | |||
940 | /* | 941 | /* |
941 | * Copy the new data into our accumulation buffer. | 942 | * Copy the new data into our accumulation buffer. |
942 | */ | 943 | */ |
943 | memcpy(&(ep->mpa_pkt[ep->mpa_pkt_len]), skb->data, skb->len); | 944 | skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]), |
945 | skb->len); | ||
944 | ep->mpa_pkt_len += skb->len; | 946 | ep->mpa_pkt_len += skb->len; |
945 | 947 | ||
946 | /* | 948 | /* |
@@ -1619,7 +1621,8 @@ static int terminate(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) | |||
1619 | PDBG("%s ep %p\n", __FUNCTION__, ep); | 1621 | PDBG("%s ep %p\n", __FUNCTION__, ep); |
1620 | skb_pull(skb, sizeof(struct cpl_rdma_terminate)); | 1622 | skb_pull(skb, sizeof(struct cpl_rdma_terminate)); |
1621 | PDBG("%s saving %d bytes of term msg\n", __FUNCTION__, skb->len); | 1623 | PDBG("%s saving %d bytes of term msg\n", __FUNCTION__, skb->len); |
1622 | memcpy(ep->com.qp->attr.terminate_buffer, skb->data, skb->len); | 1624 | skb_copy_from_linear_data(skb, ep->com.qp->attr.terminate_buffer, |
1625 | skb->len); | ||
1623 | ep->com.qp->attr.terminate_msg_len = skb->len; | 1626 | ep->com.qp->attr.terminate_msg_len = skb->len; |
1624 | ep->com.qp->attr.is_terminate_local = 0; | 1627 | ep->com.qp->attr.is_terminate_local = 0; |
1625 | return CPL_RET_BUF_DONE; | 1628 | return CPL_RET_BUF_DONE; |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c index da7e10230cf8..0c4e59b906cd 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c | |||
@@ -406,7 +406,7 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) | |||
406 | skb_put_frags(skb, IPOIB_CM_HEAD_SIZE, wc->byte_len, newskb); | 406 | skb_put_frags(skb, IPOIB_CM_HEAD_SIZE, wc->byte_len, newskb); |
407 | 407 | ||
408 | skb->protocol = ((struct ipoib_header *) skb->data)->proto; | 408 | skb->protocol = ((struct ipoib_header *) skb->data)->proto; |
409 | skb->mac.raw = skb->data; | 409 | skb_reset_mac_header(skb); |
410 | skb_pull(skb, IPOIB_ENCAP_LEN); | 410 | skb_pull(skb, IPOIB_ENCAP_LEN); |
411 | 411 | ||
412 | dev->last_rx = jiffies; | 412 | dev->last_rx = jiffies; |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index c17e777cdc47..1bdb9101911a 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c | |||
@@ -216,7 +216,7 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) | |||
216 | if (wc->slid != priv->local_lid || | 216 | if (wc->slid != priv->local_lid || |
217 | wc->src_qp != priv->qp->qp_num) { | 217 | wc->src_qp != priv->qp->qp_num) { |
218 | skb->protocol = ((struct ipoib_header *) skb->data)->proto; | 218 | skb->protocol = ((struct ipoib_header *) skb->data)->proto; |
219 | skb->mac.raw = skb->data; | 219 | skb_reset_mac_header(skb); |
220 | skb_pull(skb, IPOIB_ENCAP_LEN); | 220 | skb_pull(skb, IPOIB_ENCAP_LEN); |
221 | 221 | ||
222 | dev->last_rx = jiffies; | 222 | dev->last_rx = jiffies; |
diff --git a/drivers/isdn/act2000/module.c b/drivers/isdn/act2000/module.c index e3e5c1399076..ee2b0b9f8f46 100644 --- a/drivers/isdn/act2000/module.c +++ b/drivers/isdn/act2000/module.c | |||
@@ -442,7 +442,7 @@ act2000_sendbuf(act2000_card *card, int channel, int ack, struct sk_buff *skb) | |||
442 | return 0; | 442 | return 0; |
443 | } | 443 | } |
444 | skb_reserve(xmit_skb, 19); | 444 | skb_reserve(xmit_skb, 19); |
445 | memcpy(skb_put(xmit_skb, len), skb->data, len); | 445 | skb_copy_from_linear_data(skb, skb_put(xmit_skb, len), len); |
446 | } else { | 446 | } else { |
447 | xmit_skb = skb_clone(skb, GFP_ATOMIC); | 447 | xmit_skb = skb_clone(skb, GFP_ATOMIC); |
448 | if (!xmit_skb) { | 448 | if (!xmit_skb) { |
diff --git a/drivers/isdn/gigaset/usb-gigaset.c b/drivers/isdn/gigaset/usb-gigaset.c index 2baef349c12d..c8e1c357cec8 100644 --- a/drivers/isdn/gigaset/usb-gigaset.c +++ b/drivers/isdn/gigaset/usb-gigaset.c | |||
@@ -652,7 +652,7 @@ static int write_modem(struct cardstate *cs) | |||
652 | * transmit data | 652 | * transmit data |
653 | */ | 653 | */ |
654 | count = min(bcs->tx_skb->len, (unsigned) ucs->bulk_out_size); | 654 | count = min(bcs->tx_skb->len, (unsigned) ucs->bulk_out_size); |
655 | memcpy(ucs->bulk_out_buffer, bcs->tx_skb->data, count); | 655 | skb_copy_from_linear_data(bcs->tx_skb, ucs->bulk_out_buffer, count); |
656 | skb_pull(bcs->tx_skb, count); | 656 | skb_pull(bcs->tx_skb, count); |
657 | atomic_set(&ucs->busy, 1); | 657 | atomic_set(&ucs->busy, 1); |
658 | gig_dbg(DEBUG_OUTPUT, "write_modem: send %d bytes", count); | 658 | gig_dbg(DEBUG_OUTPUT, "write_modem: send %d bytes", count); |
diff --git a/drivers/isdn/hardware/avm/b1dma.c b/drivers/isdn/hardware/avm/b1dma.c index 1e2d38e3d68c..428872b653e9 100644 --- a/drivers/isdn/hardware/avm/b1dma.c +++ b/drivers/isdn/hardware/avm/b1dma.c | |||
@@ -404,7 +404,8 @@ static void b1dma_dispatch_tx(avmcard *card) | |||
404 | printk(KERN_DEBUG "tx: put 0x%x len=%d\n", | 404 | printk(KERN_DEBUG "tx: put 0x%x len=%d\n", |
405 | skb->data[2], txlen); | 405 | skb->data[2], txlen); |
406 | #endif | 406 | #endif |
407 | memcpy(dma->sendbuf.dmabuf, skb->data+2, skb->len-2); | 407 | skb_copy_from_linear_data_offset(skb, 2, dma->sendbuf.dmabuf, |
408 | skb->len - 2); | ||
408 | } | 409 | } |
409 | txlen = (txlen + 3) & ~3; | 410 | txlen = (txlen + 3) & ~3; |
410 | 411 | ||
diff --git a/drivers/isdn/hardware/avm/c4.c b/drivers/isdn/hardware/avm/c4.c index 6f5efa8d78cb..d58f927e766a 100644 --- a/drivers/isdn/hardware/avm/c4.c +++ b/drivers/isdn/hardware/avm/c4.c | |||
@@ -457,7 +457,8 @@ static void c4_dispatch_tx(avmcard *card) | |||
457 | printk(KERN_DEBUG "%s: tx put 0x%x len=%d\n", | 457 | printk(KERN_DEBUG "%s: tx put 0x%x len=%d\n", |
458 | card->name, skb->data[2], txlen); | 458 | card->name, skb->data[2], txlen); |
459 | #endif | 459 | #endif |
460 | memcpy(dma->sendbuf.dmabuf, skb->data+2, skb->len-2); | 460 | skb_copy_from_linear_data_offset(skb, 2, dma->sendbuf.dmabuf, |
461 | skb->len - 2); | ||
461 | } | 462 | } |
462 | txlen = (txlen + 3) & ~3; | 463 | txlen = (txlen + 3) & ~3; |
463 | 464 | ||
diff --git a/drivers/isdn/hisax/elsa_ser.c b/drivers/isdn/hisax/elsa_ser.c index ae377e812775..1642dca988a1 100644 --- a/drivers/isdn/hisax/elsa_ser.c +++ b/drivers/isdn/hisax/elsa_ser.c | |||
@@ -254,14 +254,16 @@ write_modem(struct BCState *bcs) { | |||
254 | count = len; | 254 | count = len; |
255 | if (count > MAX_MODEM_BUF - fp) { | 255 | if (count > MAX_MODEM_BUF - fp) { |
256 | count = MAX_MODEM_BUF - fp; | 256 | count = MAX_MODEM_BUF - fp; |
257 | memcpy(cs->hw.elsa.transbuf + fp, bcs->tx_skb->data, count); | 257 | skb_copy_from_linear_data(bcs->tx_skb, |
258 | cs->hw.elsa.transbuf + fp, count); | ||
258 | skb_pull(bcs->tx_skb, count); | 259 | skb_pull(bcs->tx_skb, count); |
259 | cs->hw.elsa.transcnt += count; | 260 | cs->hw.elsa.transcnt += count; |
260 | ret = count; | 261 | ret = count; |
261 | count = len - count; | 262 | count = len - count; |
262 | fp = 0; | 263 | fp = 0; |
263 | } | 264 | } |
264 | memcpy((cs->hw.elsa.transbuf + fp), bcs->tx_skb->data, count); | 265 | skb_copy_from_linear_data(bcs->tx_skb, |
266 | cs->hw.elsa.transbuf + fp, count); | ||
265 | skb_pull(bcs->tx_skb, count); | 267 | skb_pull(bcs->tx_skb, count); |
266 | cs->hw.elsa.transcnt += count; | 268 | cs->hw.elsa.transcnt += count; |
267 | ret += count; | 269 | ret += count; |
diff --git a/drivers/isdn/hisax/isdnl2.c b/drivers/isdn/hisax/isdnl2.c index cd3b5ad53491..3446f249d675 100644 --- a/drivers/isdn/hisax/isdnl2.c +++ b/drivers/isdn/hisax/isdnl2.c | |||
@@ -1293,7 +1293,8 @@ l2_pull_iqueue(struct FsmInst *fi, int event, void *arg) | |||
1293 | oskb = skb; | 1293 | oskb = skb; |
1294 | skb = alloc_skb(oskb->len + i, GFP_ATOMIC); | 1294 | skb = alloc_skb(oskb->len + i, GFP_ATOMIC); |
1295 | memcpy(skb_put(skb, i), header, i); | 1295 | memcpy(skb_put(skb, i), header, i); |
1296 | memcpy(skb_put(skb, oskb->len), oskb->data, oskb->len); | 1296 | skb_copy_from_linear_data(oskb, |
1297 | skb_put(skb, oskb->len), oskb->len); | ||
1297 | dev_kfree_skb(oskb); | 1298 | dev_kfree_skb(oskb); |
1298 | } | 1299 | } |
1299 | st->l2.l2l1(st, PH_PULL | INDICATION, skb); | 1300 | st->l2.l2l1(st, PH_PULL | INDICATION, skb); |
diff --git a/drivers/isdn/hysdn/hycapi.c b/drivers/isdn/hysdn/hycapi.c index b2ae4ec1e49e..f85450146bdc 100644 --- a/drivers/isdn/hysdn/hycapi.c +++ b/drivers/isdn/hysdn/hycapi.c | |||
@@ -398,8 +398,9 @@ static u16 hycapi_send_message(struct capi_ctr *ctrl, struct sk_buff *skb) | |||
398 | _len = CAPIMSG_LEN(skb->data); | 398 | _len = CAPIMSG_LEN(skb->data); |
399 | if (_len > 22) { | 399 | if (_len > 22) { |
400 | _len2 = _len - 22; | 400 | _len2 = _len - 22; |
401 | memcpy(msghead, skb->data, 22); | 401 | skb_copy_from_linear_data(skb, msghead, 22); |
402 | memcpy(skb->data + _len2, msghead, 22); | 402 | skb_copy_to_linear_data_offset(skb, _len2, |
403 | msghead, 22); | ||
403 | skb_pull(skb, _len2); | 404 | skb_pull(skb, _len2); |
404 | CAPIMSG_SETLEN(skb->data, 22); | 405 | CAPIMSG_SETLEN(skb->data, 22); |
405 | retval = capilib_data_b3_req(&cinfo->ncci_head, | 406 | retval = capilib_data_b3_req(&cinfo->ncci_head, |
diff --git a/drivers/isdn/hysdn/hysdn_net.c b/drivers/isdn/hysdn/hysdn_net.c index 557d96c78a62..cfa8fa5e44ab 100644 --- a/drivers/isdn/hysdn/hysdn_net.c +++ b/drivers/isdn/hysdn/hysdn_net.c | |||
@@ -214,8 +214,6 @@ hysdn_rx_netpkt(hysdn_card * card, unsigned char *buf, unsigned short len) | |||
214 | lp->stats.rx_dropped++; | 214 | lp->stats.rx_dropped++; |
215 | return; | 215 | return; |
216 | } | 216 | } |
217 | skb->dev = &lp->netdev; | ||
218 | |||
219 | /* copy the data */ | 217 | /* copy the data */ |
220 | memcpy(skb_put(skb, len), buf, len); | 218 | memcpy(skb_put(skb, len), buf, len); |
221 | 219 | ||
diff --git a/drivers/isdn/hysdn/hysdn_sched.c b/drivers/isdn/hysdn/hysdn_sched.c index b7b5aa4748a0..81db4a190d41 100644 --- a/drivers/isdn/hysdn/hysdn_sched.c +++ b/drivers/isdn/hysdn/hysdn_sched.c | |||
@@ -113,7 +113,8 @@ hysdn_sched_tx(hysdn_card *card, unsigned char *buf, | |||
113 | (skb = hysdn_tx_netget(card)) != NULL) | 113 | (skb = hysdn_tx_netget(card)) != NULL) |
114 | { | 114 | { |
115 | if (skb->len <= maxlen) { | 115 | if (skb->len <= maxlen) { |
116 | memcpy(buf, skb->data, skb->len); /* copy the packet to the buffer */ | 116 | /* copy the packet to the buffer */ |
117 | skb_copy_from_linear_data(skb, buf, skb->len); | ||
117 | *len = skb->len; | 118 | *len = skb->len; |
118 | *chan = CHAN_NDIS_DATA; | 119 | *chan = CHAN_NDIS_DATA; |
119 | card->net_tx_busy = 1; /* we are busy sending network data */ | 120 | card->net_tx_busy = 1; /* we are busy sending network data */ |
@@ -126,7 +127,7 @@ hysdn_sched_tx(hysdn_card *card, unsigned char *buf, | |||
126 | ((skb = hycapi_tx_capiget(card)) != NULL) ) | 127 | ((skb = hycapi_tx_capiget(card)) != NULL) ) |
127 | { | 128 | { |
128 | if (skb->len <= maxlen) { | 129 | if (skb->len <= maxlen) { |
129 | memcpy(buf, skb->data, skb->len); | 130 | skb_copy_from_linear_data(skb, buf, skb->len); |
130 | *len = skb->len; | 131 | *len = skb->len; |
131 | *chan = CHAN_CAPI; | 132 | *chan = CHAN_CAPI; |
132 | hycapi_tx_capiack(card); | 133 | hycapi_tx_capiack(card); |
diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c index 9c926e41b114..c97330b19877 100644 --- a/drivers/isdn/i4l/isdn_common.c +++ b/drivers/isdn/i4l/isdn_common.c | |||
@@ -829,7 +829,7 @@ isdn_readbchan(int di, int channel, u_char * buf, u_char * fp, int len, wait_que | |||
829 | dflag = 0; | 829 | dflag = 0; |
830 | } | 830 | } |
831 | count_put = count_pull; | 831 | count_put = count_pull; |
832 | memcpy(cp, skb->data, count_put); | 832 | skb_copy_from_linear_data(skb, cp, count_put); |
833 | cp += count_put; | 833 | cp += count_put; |
834 | len -= count_put; | 834 | len -= count_put; |
835 | #ifdef CONFIG_ISDN_AUDIO | 835 | #ifdef CONFIG_ISDN_AUDIO |
diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c index 838b3734e2b6..aa83277aba74 100644 --- a/drivers/isdn/i4l/isdn_net.c +++ b/drivers/isdn/i4l/isdn_net.c | |||
@@ -872,7 +872,8 @@ typedef struct { | |||
872 | static void | 872 | static void |
873 | isdn_net_log_skb(struct sk_buff * skb, isdn_net_local * lp) | 873 | isdn_net_log_skb(struct sk_buff * skb, isdn_net_local * lp) |
874 | { | 874 | { |
875 | u_char *p = skb->nh.raw; /* hopefully, this was set correctly */ | 875 | /* hopefully, this was set correctly */ |
876 | const u_char *p = skb_network_header(skb); | ||
876 | unsigned short proto = ntohs(skb->protocol); | 877 | unsigned short proto = ntohs(skb->protocol); |
877 | int data_ofs; | 878 | int data_ofs; |
878 | ip_ports *ipp; | 879 | ip_ports *ipp; |
@@ -880,7 +881,7 @@ isdn_net_log_skb(struct sk_buff * skb, isdn_net_local * lp) | |||
880 | 881 | ||
881 | addinfo[0] = '\0'; | 882 | addinfo[0] = '\0'; |
882 | /* This check stolen from 2.1.72 dev_queue_xmit_nit() */ | 883 | /* This check stolen from 2.1.72 dev_queue_xmit_nit() */ |
883 | if (skb->nh.raw < skb->data || skb->nh.raw >= skb->tail) { | 884 | if (p < skb->data || skb->network_header >= skb->tail) { |
884 | /* fall back to old isdn_net_log_packet method() */ | 885 | /* fall back to old isdn_net_log_packet method() */ |
885 | char * buf = skb->data; | 886 | char * buf = skb->data; |
886 | 887 | ||
@@ -1121,7 +1122,7 @@ isdn_net_adjust_hdr(struct sk_buff *skb, struct net_device *dev) | |||
1121 | if (!skb) | 1122 | if (!skb) |
1122 | return; | 1123 | return; |
1123 | if (lp->p_encap == ISDN_NET_ENCAP_ETHER) { | 1124 | if (lp->p_encap == ISDN_NET_ENCAP_ETHER) { |
1124 | int pullsize = (ulong)skb->nh.raw - (ulong)skb->data - ETH_HLEN; | 1125 | const int pullsize = skb_network_offset(skb) - ETH_HLEN; |
1125 | if (pullsize > 0) { | 1126 | if (pullsize > 0) { |
1126 | printk(KERN_DEBUG "isdn_net: Pull junk %d\n", pullsize); | 1127 | printk(KERN_DEBUG "isdn_net: Pull junk %d\n", pullsize); |
1127 | skb_pull(skb, pullsize); | 1128 | skb_pull(skb, pullsize); |
@@ -1366,7 +1367,7 @@ isdn_net_type_trans(struct sk_buff *skb, struct net_device *dev) | |||
1366 | struct ethhdr *eth; | 1367 | struct ethhdr *eth; |
1367 | unsigned char *rawp; | 1368 | unsigned char *rawp; |
1368 | 1369 | ||
1369 | skb->mac.raw = skb->data; | 1370 | skb_reset_mac_header(skb); |
1370 | skb_pull(skb, ETH_HLEN); | 1371 | skb_pull(skb, ETH_HLEN); |
1371 | eth = eth_hdr(skb); | 1372 | eth = eth_hdr(skb); |
1372 | 1373 | ||
@@ -1786,7 +1787,7 @@ isdn_net_receive(struct net_device *ndev, struct sk_buff *skb) | |||
1786 | } | 1787 | } |
1787 | skb->dev = ndev; | 1788 | skb->dev = ndev; |
1788 | skb->pkt_type = PACKET_HOST; | 1789 | skb->pkt_type = PACKET_HOST; |
1789 | skb->mac.raw = skb->data; | 1790 | skb_reset_mac_header(skb); |
1790 | #ifdef ISDN_DEBUG_NET_DUMP | 1791 | #ifdef ISDN_DEBUG_NET_DUMP |
1791 | isdn_dumppkt("R:", skb->data, skb->len, 40); | 1792 | isdn_dumppkt("R:", skb->data, skb->len, 40); |
1792 | #endif | 1793 | #endif |
diff --git a/drivers/isdn/i4l/isdn_ppp.c b/drivers/isdn/i4l/isdn_ppp.c index 1b2df80c3bce..387392cb3d68 100644 --- a/drivers/isdn/i4l/isdn_ppp.c +++ b/drivers/isdn/i4l/isdn_ppp.c | |||
@@ -1100,7 +1100,8 @@ isdn_ppp_push_higher(isdn_net_dev * net_dev, isdn_net_local * lp, struct sk_buff | |||
1100 | goto drop_packet; | 1100 | goto drop_packet; |
1101 | } | 1101 | } |
1102 | skb_put(skb, skb_old->len + 128); | 1102 | skb_put(skb, skb_old->len + 128); |
1103 | memcpy(skb->data, skb_old->data, skb_old->len); | 1103 | skb_copy_from_linear_data(skb_old, skb->data, |
1104 | skb_old->len); | ||
1104 | if (net_dev->local->ppp_slot < 0) { | 1105 | if (net_dev->local->ppp_slot < 0) { |
1105 | printk(KERN_ERR "%s: net_dev->local->ppp_slot(%d) out of range\n", | 1106 | printk(KERN_ERR "%s: net_dev->local->ppp_slot(%d) out of range\n", |
1106 | __FUNCTION__, net_dev->local->ppp_slot); | 1107 | __FUNCTION__, net_dev->local->ppp_slot); |
@@ -1167,7 +1168,7 @@ isdn_ppp_push_higher(isdn_net_dev * net_dev, isdn_net_local * lp, struct sk_buff | |||
1167 | mlp->huptimer = 0; | 1168 | mlp->huptimer = 0; |
1168 | #endif /* CONFIG_IPPP_FILTER */ | 1169 | #endif /* CONFIG_IPPP_FILTER */ |
1169 | skb->dev = dev; | 1170 | skb->dev = dev; |
1170 | skb->mac.raw = skb->data; | 1171 | skb_reset_mac_header(skb); |
1171 | netif_rx(skb); | 1172 | netif_rx(skb); |
1172 | /* net_dev->local->stats.rx_packets++; done in isdn_net.c */ | 1173 | /* net_dev->local->stats.rx_packets++; done in isdn_net.c */ |
1173 | return; | 1174 | return; |
@@ -1902,7 +1903,9 @@ void isdn_ppp_mp_reassembly( isdn_net_dev * net_dev, isdn_net_local * lp, | |||
1902 | while( from != to ) { | 1903 | while( from != to ) { |
1903 | unsigned int len = from->len - MP_HEADER_LEN; | 1904 | unsigned int len = from->len - MP_HEADER_LEN; |
1904 | 1905 | ||
1905 | memcpy(skb_put(skb,len), from->data+MP_HEADER_LEN, len); | 1906 | skb_copy_from_linear_data_offset(from, MP_HEADER_LEN, |
1907 | skb_put(skb,len), | ||
1908 | len); | ||
1906 | frag = from->next; | 1909 | frag = from->next; |
1907 | isdn_ppp_mp_free_skb(mp, from); | 1910 | isdn_ppp_mp_free_skb(mp, from); |
1908 | from = frag; | 1911 | from = frag; |
diff --git a/drivers/isdn/isdnloop/isdnloop.c b/drivers/isdn/isdnloop/isdnloop.c index e3add27dd0e1..e93ad59f60bf 100644 --- a/drivers/isdn/isdnloop/isdnloop.c +++ b/drivers/isdn/isdnloop/isdnloop.c | |||
@@ -415,7 +415,8 @@ isdnloop_sendbuf(int channel, struct sk_buff *skb, isdnloop_card * card) | |||
415 | spin_lock_irqsave(&card->isdnloop_lock, flags); | 415 | spin_lock_irqsave(&card->isdnloop_lock, flags); |
416 | nskb = dev_alloc_skb(skb->len); | 416 | nskb = dev_alloc_skb(skb->len); |
417 | if (nskb) { | 417 | if (nskb) { |
418 | memcpy(skb_put(nskb, len), skb->data, len); | 418 | skb_copy_from_linear_data(skb, |
419 | skb_put(nskb, len), len); | ||
419 | skb_queue_tail(&card->bqueue[channel], nskb); | 420 | skb_queue_tail(&card->bqueue[channel], nskb); |
420 | dev_kfree_skb(skb); | 421 | dev_kfree_skb(skb); |
421 | } else | 422 | } else |
diff --git a/drivers/isdn/pcbit/capi.c b/drivers/isdn/pcbit/capi.c index 47c59e95898d..7b55e151f1b0 100644 --- a/drivers/isdn/pcbit/capi.c +++ b/drivers/isdn/pcbit/capi.c | |||
@@ -429,8 +429,9 @@ int capi_decode_conn_ind(struct pcbit_chan * chan, | |||
429 | if (!(info->data.setup.CallingPN = kmalloc(len - count + 1, GFP_ATOMIC))) | 429 | if (!(info->data.setup.CallingPN = kmalloc(len - count + 1, GFP_ATOMIC))) |
430 | return -1; | 430 | return -1; |
431 | 431 | ||
432 | memcpy(info->data.setup.CallingPN, skb->data + count + 1, | 432 | skb_copy_from_linear_data_offset(skb, count + 1, |
433 | len - count); | 433 | info->data.setup.CallingPN, |
434 | len - count); | ||
434 | info->data.setup.CallingPN[len - count] = 0; | 435 | info->data.setup.CallingPN[len - count] = 0; |
435 | 436 | ||
436 | } | 437 | } |
@@ -457,8 +458,9 @@ int capi_decode_conn_ind(struct pcbit_chan * chan, | |||
457 | if (!(info->data.setup.CalledPN = kmalloc(len - count + 1, GFP_ATOMIC))) | 458 | if (!(info->data.setup.CalledPN = kmalloc(len - count + 1, GFP_ATOMIC))) |
458 | return -1; | 459 | return -1; |
459 | 460 | ||
460 | memcpy(info->data.setup.CalledPN, skb->data + count + 1, | 461 | skb_copy_from_linear_data_offset(skb, count + 1, |
461 | len - count); | 462 | info->data.setup.CalledPN, |
463 | len - count); | ||
462 | info->data.setup.CalledPN[len - count] = 0; | 464 | info->data.setup.CalledPN[len - count] = 0; |
463 | 465 | ||
464 | } | 466 | } |
@@ -539,7 +541,7 @@ int capi_decode_conn_actv_ind(struct pcbit_chan * chan, struct sk_buff *skb) | |||
539 | 541 | ||
540 | #ifdef DEBUG | 542 | #ifdef DEBUG |
541 | if (len > 1 && len < 31) { | 543 | if (len > 1 && len < 31) { |
542 | memcpy(str, skb->data + 2, len - 1); | 544 | skb_copy_from_linear_data_offset(skb, 2, str, len - 1); |
543 | str[len] = 0; | 545 | str[len] = 0; |
544 | printk(KERN_DEBUG "Connected Party Number: %s\n", str); | 546 | printk(KERN_DEBUG "Connected Party Number: %s\n", str); |
545 | } | 547 | } |
diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c index e85b4c7c36f7..cab26f301eab 100644 --- a/drivers/kvm/mmu.c +++ b/drivers/kvm/mmu.c | |||
@@ -1171,6 +1171,7 @@ void kvm_mmu_pre_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes) | |||
1171 | * and zap two pdes instead of one. | 1171 | * and zap two pdes instead of one. |
1172 | */ | 1172 | */ |
1173 | if (level == PT32_ROOT_LEVEL) { | 1173 | if (level == PT32_ROOT_LEVEL) { |
1174 | page_offset &= ~7; /* kill rounding error */ | ||
1174 | page_offset <<= 1; | 1175 | page_offset <<= 1; |
1175 | npte = 2; | 1176 | npte = 2; |
1176 | } | 1177 | } |
diff --git a/drivers/media/dvb/dvb-core/dvb_net.c b/drivers/media/dvb/dvb-core/dvb_net.c index 76e9c36597eb..6a5ab409c4e7 100644 --- a/drivers/media/dvb/dvb-core/dvb_net.c +++ b/drivers/media/dvb/dvb-core/dvb_net.c | |||
@@ -174,7 +174,7 @@ static unsigned short dvb_net_eth_type_trans(struct sk_buff *skb, | |||
174 | struct ethhdr *eth; | 174 | struct ethhdr *eth; |
175 | unsigned char *rawp; | 175 | unsigned char *rawp; |
176 | 176 | ||
177 | skb->mac.raw=skb->data; | 177 | skb_reset_mac_header(skb); |
178 | skb_pull(skb,dev->hard_header_len); | 178 | skb_pull(skb,dev->hard_header_len); |
179 | eth = eth_hdr(skb); | 179 | eth = eth_hdr(skb); |
180 | 180 | ||
@@ -600,6 +600,7 @@ static void dvb_net_ule( struct net_device *dev, const u8 *buf, size_t buf_len ) | |||
600 | /* Check CRC32, we've got it in our skb already. */ | 600 | /* Check CRC32, we've got it in our skb already. */ |
601 | unsigned short ulen = htons(priv->ule_sndu_len); | 601 | unsigned short ulen = htons(priv->ule_sndu_len); |
602 | unsigned short utype = htons(priv->ule_sndu_type); | 602 | unsigned short utype = htons(priv->ule_sndu_type); |
603 | const u8 *tail; | ||
603 | struct kvec iov[3] = { | 604 | struct kvec iov[3] = { |
604 | { &ulen, sizeof ulen }, | 605 | { &ulen, sizeof ulen }, |
605 | { &utype, sizeof utype }, | 606 | { &utype, sizeof utype }, |
@@ -613,10 +614,11 @@ static void dvb_net_ule( struct net_device *dev, const u8 *buf, size_t buf_len ) | |||
613 | } | 614 | } |
614 | 615 | ||
615 | ule_crc = iov_crc32(ule_crc, iov, 3); | 616 | ule_crc = iov_crc32(ule_crc, iov, 3); |
616 | expected_crc = *((u8 *)priv->ule_skb->tail - 4) << 24 | | 617 | tail = skb_tail_pointer(priv->ule_skb); |
617 | *((u8 *)priv->ule_skb->tail - 3) << 16 | | 618 | expected_crc = *(tail - 4) << 24 | |
618 | *((u8 *)priv->ule_skb->tail - 2) << 8 | | 619 | *(tail - 3) << 16 | |
619 | *((u8 *)priv->ule_skb->tail - 1); | 620 | *(tail - 2) << 8 | |
621 | *(tail - 1); | ||
620 | if (ule_crc != expected_crc) { | 622 | if (ule_crc != expected_crc) { |
621 | printk(KERN_WARNING "%lu: CRC32 check FAILED: %08x / %08x, SNDU len %d type %#x, ts_remain %d, next 2: %x.\n", | 623 | printk(KERN_WARNING "%lu: CRC32 check FAILED: %08x / %08x, SNDU len %d type %#x, ts_remain %d, next 2: %x.\n", |
622 | priv->ts_count, ule_crc, expected_crc, priv->ule_sndu_len, priv->ule_sndu_type, ts_remain, ts_remain > 2 ? *(unsigned short *)from_where : 0); | 624 | priv->ts_count, ule_crc, expected_crc, priv->ule_sndu_len, priv->ule_sndu_type, ts_remain, ts_remain > 2 ? *(unsigned short *)from_where : 0); |
@@ -695,7 +697,9 @@ static void dvb_net_ule( struct net_device *dev, const u8 *buf, size_t buf_len ) | |||
695 | } | 697 | } |
696 | else | 698 | else |
697 | { | 699 | { |
698 | memcpy(dest_addr, priv->ule_skb->data, ETH_ALEN); | 700 | skb_copy_from_linear_data(priv->ule_skb, |
701 | dest_addr, | ||
702 | ETH_ALEN); | ||
699 | skb_pull(priv->ule_skb, ETH_ALEN); | 703 | skb_pull(priv->ule_skb, ETH_ALEN); |
700 | } | 704 | } |
701 | } | 705 | } |
diff --git a/drivers/message/fusion/mptlan.c b/drivers/message/fusion/mptlan.c index b691292ff599..7dd34bd28efc 100644 --- a/drivers/message/fusion/mptlan.c +++ b/drivers/message/fusion/mptlan.c | |||
@@ -714,6 +714,7 @@ mpt_lan_sdu_send (struct sk_buff *skb, struct net_device *dev) | |||
714 | LANSendRequest_t *pSendReq; | 714 | LANSendRequest_t *pSendReq; |
715 | SGETransaction32_t *pTrans; | 715 | SGETransaction32_t *pTrans; |
716 | SGESimple64_t *pSimple; | 716 | SGESimple64_t *pSimple; |
717 | const unsigned char *mac; | ||
717 | dma_addr_t dma; | 718 | dma_addr_t dma; |
718 | unsigned long flags; | 719 | unsigned long flags; |
719 | int ctx; | 720 | int ctx; |
@@ -753,7 +754,7 @@ mpt_lan_sdu_send (struct sk_buff *skb, struct net_device *dev) | |||
753 | /* Set the mac.raw pointer, since this apparently isn't getting | 754 | /* Set the mac.raw pointer, since this apparently isn't getting |
754 | * done before we get the skb. Pull the data pointer past the mac data. | 755 | * done before we get the skb. Pull the data pointer past the mac data. |
755 | */ | 756 | */ |
756 | skb->mac.raw = skb->data; | 757 | skb_reset_mac_header(skb); |
757 | skb_pull(skb, 12); | 758 | skb_pull(skb, 12); |
758 | 759 | ||
759 | dma = pci_map_single(mpt_dev->pcidev, skb->data, skb->len, | 760 | dma = pci_map_single(mpt_dev->pcidev, skb->data, skb->len, |
@@ -784,6 +785,7 @@ mpt_lan_sdu_send (struct sk_buff *skb, struct net_device *dev) | |||
784 | // IOC_AND_NETDEV_NAMES_s_s(dev), | 785 | // IOC_AND_NETDEV_NAMES_s_s(dev), |
785 | // ctx, skb, skb->data)); | 786 | // ctx, skb, skb->data)); |
786 | 787 | ||
788 | mac = skb_mac_header(skb); | ||
787 | #ifdef QLOGIC_NAA_WORKAROUND | 789 | #ifdef QLOGIC_NAA_WORKAROUND |
788 | { | 790 | { |
789 | struct NAA_Hosed *nh; | 791 | struct NAA_Hosed *nh; |
@@ -793,12 +795,12 @@ mpt_lan_sdu_send (struct sk_buff *skb, struct net_device *dev) | |||
793 | drops. */ | 795 | drops. */ |
794 | read_lock_irq(&bad_naa_lock); | 796 | read_lock_irq(&bad_naa_lock); |
795 | for (nh = mpt_bad_naa; nh != NULL; nh=nh->next) { | 797 | for (nh = mpt_bad_naa; nh != NULL; nh=nh->next) { |
796 | if ((nh->ieee[0] == skb->mac.raw[0]) && | 798 | if ((nh->ieee[0] == mac[0]) && |
797 | (nh->ieee[1] == skb->mac.raw[1]) && | 799 | (nh->ieee[1] == mac[1]) && |
798 | (nh->ieee[2] == skb->mac.raw[2]) && | 800 | (nh->ieee[2] == mac[2]) && |
799 | (nh->ieee[3] == skb->mac.raw[3]) && | 801 | (nh->ieee[3] == mac[3]) && |
800 | (nh->ieee[4] == skb->mac.raw[4]) && | 802 | (nh->ieee[4] == mac[4]) && |
801 | (nh->ieee[5] == skb->mac.raw[5])) { | 803 | (nh->ieee[5] == mac[5])) { |
802 | cur_naa = nh->NAA; | 804 | cur_naa = nh->NAA; |
803 | dlprintk ((KERN_INFO "mptlan/sdu_send: using NAA value " | 805 | dlprintk ((KERN_INFO "mptlan/sdu_send: using NAA value " |
804 | "= %04x.\n", cur_naa)); | 806 | "= %04x.\n", cur_naa)); |
@@ -810,12 +812,12 @@ mpt_lan_sdu_send (struct sk_buff *skb, struct net_device *dev) | |||
810 | #endif | 812 | #endif |
811 | 813 | ||
812 | pTrans->TransactionDetails[0] = cpu_to_le32((cur_naa << 16) | | 814 | pTrans->TransactionDetails[0] = cpu_to_le32((cur_naa << 16) | |
813 | (skb->mac.raw[0] << 8) | | 815 | (mac[0] << 8) | |
814 | (skb->mac.raw[1] << 0)); | 816 | (mac[1] << 0)); |
815 | pTrans->TransactionDetails[1] = cpu_to_le32((skb->mac.raw[2] << 24) | | 817 | pTrans->TransactionDetails[1] = cpu_to_le32((mac[2] << 24) | |
816 | (skb->mac.raw[3] << 16) | | 818 | (mac[3] << 16) | |
817 | (skb->mac.raw[4] << 8) | | 819 | (mac[4] << 8) | |
818 | (skb->mac.raw[5] << 0)); | 820 | (mac[5] << 0)); |
819 | 821 | ||
820 | pSimple = (SGESimple64_t *) &pTrans->TransactionDetails[2]; | 822 | pSimple = (SGESimple64_t *) &pTrans->TransactionDetails[2]; |
821 | 823 | ||
@@ -930,7 +932,7 @@ mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg) | |||
930 | pci_dma_sync_single_for_cpu(mpt_dev->pcidev, priv->RcvCtl[ctx].dma, | 932 | pci_dma_sync_single_for_cpu(mpt_dev->pcidev, priv->RcvCtl[ctx].dma, |
931 | priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE); | 933 | priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE); |
932 | 934 | ||
933 | memcpy(skb_put(skb, len), old_skb->data, len); | 935 | skb_copy_from_linear_data(old_skb, skb_put(skb, len), len); |
934 | 936 | ||
935 | pci_dma_sync_single_for_device(mpt_dev->pcidev, priv->RcvCtl[ctx].dma, | 937 | pci_dma_sync_single_for_device(mpt_dev->pcidev, priv->RcvCtl[ctx].dma, |
936 | priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE); | 938 | priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE); |
@@ -1091,7 +1093,7 @@ mpt_lan_receive_post_reply(struct net_device *dev, | |||
1091 | priv->RcvCtl[ctx].dma, | 1093 | priv->RcvCtl[ctx].dma, |
1092 | priv->RcvCtl[ctx].len, | 1094 | priv->RcvCtl[ctx].len, |
1093 | PCI_DMA_FROMDEVICE); | 1095 | PCI_DMA_FROMDEVICE); |
1094 | memcpy(skb_put(skb, l), old_skb->data, l); | 1096 | skb_copy_from_linear_data(old_skb, skb_put(skb, l), l); |
1095 | 1097 | ||
1096 | pci_dma_sync_single_for_device(mpt_dev->pcidev, | 1098 | pci_dma_sync_single_for_device(mpt_dev->pcidev, |
1097 | priv->RcvCtl[ctx].dma, | 1099 | priv->RcvCtl[ctx].dma, |
@@ -1120,7 +1122,7 @@ mpt_lan_receive_post_reply(struct net_device *dev, | |||
1120 | priv->RcvCtl[ctx].len, | 1122 | priv->RcvCtl[ctx].len, |
1121 | PCI_DMA_FROMDEVICE); | 1123 | PCI_DMA_FROMDEVICE); |
1122 | 1124 | ||
1123 | memcpy(skb_put(skb, len), old_skb->data, len); | 1125 | skb_copy_from_linear_data(old_skb, skb_put(skb, len), len); |
1124 | 1126 | ||
1125 | pci_dma_sync_single_for_device(mpt_dev->pcidev, | 1127 | pci_dma_sync_single_for_device(mpt_dev->pcidev, |
1126 | priv->RcvCtl[ctx].dma, | 1128 | priv->RcvCtl[ctx].dma, |
@@ -1549,7 +1551,7 @@ mpt_lan_type_trans(struct sk_buff *skb, struct net_device *dev) | |||
1549 | struct mpt_lan_ohdr *fch = (struct mpt_lan_ohdr *)skb->data; | 1551 | struct mpt_lan_ohdr *fch = (struct mpt_lan_ohdr *)skb->data; |
1550 | struct fcllc *fcllc; | 1552 | struct fcllc *fcllc; |
1551 | 1553 | ||
1552 | skb->mac.raw = skb->data; | 1554 | skb_reset_mac_header(skb); |
1553 | skb_pull(skb, sizeof(struct mpt_lan_ohdr)); | 1555 | skb_pull(skb, sizeof(struct mpt_lan_ohdr)); |
1554 | 1556 | ||
1555 | if (fch->dtype == htons(0xffff)) { | 1557 | if (fch->dtype == htons(0xffff)) { |
diff --git a/drivers/mtd/maps/sun_uflash.c b/drivers/mtd/maps/sun_uflash.c index 4db2055cee31..001af7f7ddda 100644 --- a/drivers/mtd/maps/sun_uflash.c +++ b/drivers/mtd/maps/sun_uflash.c | |||
@@ -39,7 +39,7 @@ MODULE_VERSION("2.0"); | |||
39 | 39 | ||
40 | static LIST_HEAD(device_list); | 40 | static LIST_HEAD(device_list); |
41 | struct uflash_dev { | 41 | struct uflash_dev { |
42 | char *name; /* device name */ | 42 | const char *name; /* device name */ |
43 | struct map_info map; /* mtd map info */ | 43 | struct map_info map; /* mtd map info */ |
44 | struct mtd_info *mtd; /* mtd info */ | 44 | struct mtd_info *mtd; /* mtd info */ |
45 | }; | 45 | }; |
@@ -80,7 +80,7 @@ int uflash_devinit(struct linux_ebus_device *edev, struct device_node *dp) | |||
80 | 80 | ||
81 | up->name = of_get_property(dp, "model", NULL); | 81 | up->name = of_get_property(dp, "model", NULL); |
82 | if (up->name && 0 < strlen(up->name)) | 82 | if (up->name && 0 < strlen(up->name)) |
83 | up->map.name = up->name; | 83 | up->map.name = (char *)up->name; |
84 | 84 | ||
85 | up->map.phys = res->start; | 85 | up->map.phys = res->start; |
86 | 86 | ||
diff --git a/drivers/net/3c501.c b/drivers/net/3c501.c index 06e33786078d..4bee99ba7dbb 100644 --- a/drivers/net/3c501.c +++ b/drivers/net/3c501.c | |||
@@ -735,7 +735,6 @@ static void el_receive(struct net_device *dev) | |||
735 | else | 735 | else |
736 | { | 736 | { |
737 | skb_reserve(skb,2); /* Force 16 byte alignment */ | 737 | skb_reserve(skb,2); /* Force 16 byte alignment */ |
738 | skb->dev = dev; | ||
739 | /* | 738 | /* |
740 | * The read increments through the bytes. The interrupt | 739 | * The read increments through the bytes. The interrupt |
741 | * handler will fix the pointer when it returns to | 740 | * handler will fix the pointer when it returns to |
diff --git a/drivers/net/3c505.c b/drivers/net/3c505.c index 702bfb2a5e99..e985a85a5623 100644 --- a/drivers/net/3c505.c +++ b/drivers/net/3c505.c | |||
@@ -615,7 +615,6 @@ static void receive_packet(struct net_device *dev, int len) | |||
615 | if (test_and_set_bit(0, (void *) &adapter->dmaing)) | 615 | if (test_and_set_bit(0, (void *) &adapter->dmaing)) |
616 | printk(KERN_ERR "%s: rx blocked, DMA in progress, dir %d\n", dev->name, adapter->current_dma.direction); | 616 | printk(KERN_ERR "%s: rx blocked, DMA in progress, dir %d\n", dev->name, adapter->current_dma.direction); |
617 | 617 | ||
618 | skb->dev = dev; | ||
619 | adapter->current_dma.direction = 0; | 618 | adapter->current_dma.direction = 0; |
620 | adapter->current_dma.length = rlen; | 619 | adapter->current_dma.length = rlen; |
621 | adapter->current_dma.skb = skb; | 620 | adapter->current_dma.skb = skb; |
@@ -1026,7 +1025,7 @@ static int send_packet(struct net_device *dev, struct sk_buff *skb) | |||
1026 | adapter->current_dma.start_time = jiffies; | 1025 | adapter->current_dma.start_time = jiffies; |
1027 | 1026 | ||
1028 | if ((unsigned long)(skb->data + nlen) >= MAX_DMA_ADDRESS || nlen != skb->len) { | 1027 | if ((unsigned long)(skb->data + nlen) >= MAX_DMA_ADDRESS || nlen != skb->len) { |
1029 | memcpy(adapter->dma_buffer, skb->data, nlen); | 1028 | skb_copy_from_linear_data(skb, adapter->dma_buffer, nlen); |
1030 | memset(adapter->dma_buffer+skb->len, 0, nlen-skb->len); | 1029 | memset(adapter->dma_buffer+skb->len, 0, nlen-skb->len); |
1031 | target = isa_virt_to_bus(adapter->dma_buffer); | 1030 | target = isa_virt_to_bus(adapter->dma_buffer); |
1032 | } | 1031 | } |
diff --git a/drivers/net/3c507.c b/drivers/net/3c507.c index 54e1d5aebed3..eed4299dc426 100644 --- a/drivers/net/3c507.c +++ b/drivers/net/3c507.c | |||
@@ -873,7 +873,6 @@ static void el16_rx(struct net_device *dev) | |||
873 | } | 873 | } |
874 | 874 | ||
875 | skb_reserve(skb,2); | 875 | skb_reserve(skb,2); |
876 | skb->dev = dev; | ||
877 | 876 | ||
878 | /* 'skb->data' points to the start of sk_buff data area. */ | 877 | /* 'skb->data' points to the start of sk_buff data area. */ |
879 | memcpy_fromio(skb_put(skb,pkt_len), data_frame + 10, pkt_len); | 878 | memcpy_fromio(skb_put(skb,pkt_len), data_frame + 10, pkt_len); |
diff --git a/drivers/net/3c509.c b/drivers/net/3c509.c index f791bf026e51..c7511c4d3b68 100644 --- a/drivers/net/3c509.c +++ b/drivers/net/3c509.c | |||
@@ -1091,7 +1091,6 @@ el3_rx(struct net_device *dev) | |||
1091 | printk("Receiving packet size %d status %4.4x.\n", | 1091 | printk("Receiving packet size %d status %4.4x.\n", |
1092 | pkt_len, rx_status); | 1092 | pkt_len, rx_status); |
1093 | if (skb != NULL) { | 1093 | if (skb != NULL) { |
1094 | skb->dev = dev; | ||
1095 | skb_reserve(skb, 2); /* Align IP on 16 byte */ | 1094 | skb_reserve(skb, 2); /* Align IP on 16 byte */ |
1096 | 1095 | ||
1097 | /* 'skb->data' points to the start of sk_buff data area. */ | 1096 | /* 'skb->data' points to the start of sk_buff data area. */ |
diff --git a/drivers/net/3c515.c b/drivers/net/3c515.c index c307ce66145c..290166d5e7d1 100644 --- a/drivers/net/3c515.c +++ b/drivers/net/3c515.c | |||
@@ -1292,7 +1292,6 @@ static int corkscrew_rx(struct net_device *dev) | |||
1292 | printk("Receiving packet size %d status %4.4x.\n", | 1292 | printk("Receiving packet size %d status %4.4x.\n", |
1293 | pkt_len, rx_status); | 1293 | pkt_len, rx_status); |
1294 | if (skb != NULL) { | 1294 | if (skb != NULL) { |
1295 | skb->dev = dev; | ||
1296 | skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ | 1295 | skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ |
1297 | /* 'skb_put()' points to the start of sk_buff data area. */ | 1296 | /* 'skb_put()' points to the start of sk_buff data area. */ |
1298 | insl(ioaddr + RX_FIFO, | 1297 | insl(ioaddr + RX_FIFO, |
@@ -1363,7 +1362,6 @@ static int boomerang_rx(struct net_device *dev) | |||
1363 | copying to a properly sized skbuff. */ | 1362 | copying to a properly sized skbuff. */ |
1364 | if (pkt_len < rx_copybreak | 1363 | if (pkt_len < rx_copybreak |
1365 | && (skb = dev_alloc_skb(pkt_len + 4)) != 0) { | 1364 | && (skb = dev_alloc_skb(pkt_len + 4)) != 0) { |
1366 | skb->dev = dev; | ||
1367 | skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ | 1365 | skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ |
1368 | /* 'skb_put()' points to the start of sk_buff data area. */ | 1366 | /* 'skb_put()' points to the start of sk_buff data area. */ |
1369 | memcpy(skb_put(skb, pkt_len), | 1367 | memcpy(skb_put(skb, pkt_len), |
diff --git a/drivers/net/3c523.c b/drivers/net/3c523.c index 17d61eb0a7e5..da1a22c13865 100644 --- a/drivers/net/3c523.c +++ b/drivers/net/3c523.c | |||
@@ -988,7 +988,6 @@ static void elmc_rcv_int(struct net_device *dev) | |||
988 | rbd->status = 0; | 988 | rbd->status = 0; |
989 | skb = (struct sk_buff *) dev_alloc_skb(totlen + 2); | 989 | skb = (struct sk_buff *) dev_alloc_skb(totlen + 2); |
990 | if (skb != NULL) { | 990 | if (skb != NULL) { |
991 | skb->dev = dev; | ||
992 | skb_reserve(skb, 2); /* 16 byte alignment */ | 991 | skb_reserve(skb, 2); /* 16 byte alignment */ |
993 | skb_put(skb,totlen); | 992 | skb_put(skb,totlen); |
994 | eth_copy_and_sum(skb, (char *) p->base+(unsigned long) rbd->buffer,totlen,0); | 993 | eth_copy_and_sum(skb, (char *) p->base+(unsigned long) rbd->buffer,totlen,0); |
@@ -1146,7 +1145,7 @@ static int elmc_send_packet(struct sk_buff *skb, struct net_device *dev) | |||
1146 | 1145 | ||
1147 | if (len != skb->len) | 1146 | if (len != skb->len) |
1148 | memset((char *) p->xmit_cbuffs[p->xmit_count], 0, ETH_ZLEN); | 1147 | memset((char *) p->xmit_cbuffs[p->xmit_count], 0, ETH_ZLEN); |
1149 | memcpy((char *) p->xmit_cbuffs[p->xmit_count], (char *) (skb->data), skb->len); | 1148 | skb_copy_from_linear_data(skb, (char *) p->xmit_cbuffs[p->xmit_count], skb->len); |
1150 | 1149 | ||
1151 | #if (NUM_XMIT_BUFFS == 1) | 1150 | #if (NUM_XMIT_BUFFS == 1) |
1152 | #ifdef NO_NOPCOMMANDS | 1151 | #ifdef NO_NOPCOMMANDS |
diff --git a/drivers/net/3c527.c b/drivers/net/3c527.c index 6c7437e60bd2..c7b571be20e0 100644 --- a/drivers/net/3c527.c +++ b/drivers/net/3c527.c | |||
@@ -1189,7 +1189,6 @@ static void mc32_rx_ring(struct net_device *dev) | |||
1189 | } | 1189 | } |
1190 | 1190 | ||
1191 | skb->protocol=eth_type_trans(skb,dev); | 1191 | skb->protocol=eth_type_trans(skb,dev); |
1192 | skb->dev=dev; | ||
1193 | dev->last_rx = jiffies; | 1192 | dev->last_rx = jiffies; |
1194 | lp->net_stats.rx_packets++; | 1193 | lp->net_stats.rx_packets++; |
1195 | lp->net_stats.rx_bytes += length; | 1194 | lp->net_stats.rx_bytes += length; |
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c index b406ecfa7268..80924f76dee8 100644 --- a/drivers/net/3c59x.c +++ b/drivers/net/3c59x.c | |||
@@ -2414,7 +2414,6 @@ static int vortex_rx(struct net_device *dev) | |||
2414 | printk(KERN_DEBUG "Receiving packet size %d status %4.4x.\n", | 2414 | printk(KERN_DEBUG "Receiving packet size %d status %4.4x.\n", |
2415 | pkt_len, rx_status); | 2415 | pkt_len, rx_status); |
2416 | if (skb != NULL) { | 2416 | if (skb != NULL) { |
2417 | skb->dev = dev; | ||
2418 | skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ | 2417 | skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ |
2419 | /* 'skb_put()' points to the start of sk_buff data area. */ | 2418 | /* 'skb_put()' points to the start of sk_buff data area. */ |
2420 | if (vp->bus_master && | 2419 | if (vp->bus_master && |
@@ -2491,7 +2490,6 @@ boomerang_rx(struct net_device *dev) | |||
2491 | /* Check if the packet is long enough to just accept without | 2490 | /* Check if the packet is long enough to just accept without |
2492 | copying to a properly sized skbuff. */ | 2491 | copying to a properly sized skbuff. */ |
2493 | if (pkt_len < rx_copybreak && (skb = dev_alloc_skb(pkt_len + 2)) != 0) { | 2492 | if (pkt_len < rx_copybreak && (skb = dev_alloc_skb(pkt_len + 2)) != 0) { |
2494 | skb->dev = dev; | ||
2495 | skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ | 2493 | skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ |
2496 | pci_dma_sync_single_for_cpu(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE); | 2494 | pci_dma_sync_single_for_cpu(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE); |
2497 | /* 'skb_put()' points to the start of sk_buff data area. */ | 2495 | /* 'skb_put()' points to the start of sk_buff data area. */ |
diff --git a/drivers/net/7990.c b/drivers/net/7990.c index 1b3d11ed6cff..d396f996af57 100644 --- a/drivers/net/7990.c +++ b/drivers/net/7990.c | |||
@@ -331,7 +331,6 @@ static int lance_rx (struct net_device *dev) | |||
331 | return 0; | 331 | return 0; |
332 | } | 332 | } |
333 | 333 | ||
334 | skb->dev = dev; | ||
335 | skb_reserve (skb, 2); /* 16 byte align */ | 334 | skb_reserve (skb, 2); /* 16 byte align */ |
336 | skb_put (skb, len); /* make room */ | 335 | skb_put (skb, len); /* make room */ |
337 | eth_copy_and_sum(skb, | 336 | eth_copy_and_sum(skb, |
@@ -568,7 +567,7 @@ int lance_start_xmit (struct sk_buff *skb, struct net_device *dev) | |||
568 | 567 | ||
569 | if (skb->len < ETH_ZLEN) | 568 | if (skb->len < ETH_ZLEN) |
570 | memset((char *)&ib->tx_buf[entry][0], 0, ETH_ZLEN); | 569 | memset((char *)&ib->tx_buf[entry][0], 0, ETH_ZLEN); |
571 | memcpy ((char *)&ib->tx_buf [entry][0], skb->data, skblen); | 570 | skb_copy_from_linear_data(skb, &ib->tx_buf[entry][0], skblen); |
572 | 571 | ||
573 | /* Now, give the packet to the lance */ | 572 | /* Now, give the packet to the lance */ |
574 | ib->btx_ring [entry].tmd1_bits = (LE_T1_POK|LE_T1_OWN); | 573 | ib->btx_ring [entry].tmd1_bits = (LE_T1_POK|LE_T1_OWN); |
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c index 12c8453f44bc..e8c9f27817b0 100644 --- a/drivers/net/8139cp.c +++ b/drivers/net/8139cp.c | |||
@@ -573,7 +573,6 @@ rx_status_loop: | |||
573 | } | 573 | } |
574 | 574 | ||
575 | skb_reserve(new_skb, RX_OFFSET); | 575 | skb_reserve(new_skb, RX_OFFSET); |
576 | new_skb->dev = dev; | ||
577 | 576 | ||
578 | pci_unmap_single(cp->pdev, mapping, | 577 | pci_unmap_single(cp->pdev, mapping, |
579 | buflen, PCI_DMA_FROMDEVICE); | 578 | buflen, PCI_DMA_FROMDEVICE); |
@@ -807,7 +806,7 @@ static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev) | |||
807 | if (mss) | 806 | if (mss) |
808 | flags |= LargeSend | ((mss & MSSMask) << MSSShift); | 807 | flags |= LargeSend | ((mss & MSSMask) << MSSShift); |
809 | else if (skb->ip_summed == CHECKSUM_PARTIAL) { | 808 | else if (skb->ip_summed == CHECKSUM_PARTIAL) { |
810 | const struct iphdr *ip = skb->nh.iph; | 809 | const struct iphdr *ip = ip_hdr(skb); |
811 | if (ip->protocol == IPPROTO_TCP) | 810 | if (ip->protocol == IPPROTO_TCP) |
812 | flags |= IPCS | TCPCS; | 811 | flags |= IPCS | TCPCS; |
813 | else if (ip->protocol == IPPROTO_UDP) | 812 | else if (ip->protocol == IPPROTO_UDP) |
@@ -826,7 +825,7 @@ static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev) | |||
826 | u32 first_len, first_eor; | 825 | u32 first_len, first_eor; |
827 | dma_addr_t first_mapping; | 826 | dma_addr_t first_mapping; |
828 | int frag, first_entry = entry; | 827 | int frag, first_entry = entry; |
829 | const struct iphdr *ip = skb->nh.iph; | 828 | const struct iphdr *ip = ip_hdr(skb); |
830 | 829 | ||
831 | /* We must give this initial chunk to the device last. | 830 | /* We must give this initial chunk to the device last. |
832 | * Otherwise we could race with the device. | 831 | * Otherwise we could race with the device. |
@@ -1082,7 +1081,6 @@ static int cp_refill_rx (struct cp_private *cp) | |||
1082 | if (!skb) | 1081 | if (!skb) |
1083 | goto err_out; | 1082 | goto err_out; |
1084 | 1083 | ||
1085 | skb->dev = cp->dev; | ||
1086 | skb_reserve(skb, RX_OFFSET); | 1084 | skb_reserve(skb, RX_OFFSET); |
1087 | 1085 | ||
1088 | mapping = pci_map_single(cp->pdev, skb->data, cp->rx_buf_sz, | 1086 | mapping = pci_map_single(cp->pdev, skb->data, cp->rx_buf_sz, |
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c index 99304b2aa86e..a844b1fe2dc4 100644 --- a/drivers/net/8139too.c +++ b/drivers/net/8139too.c | |||
@@ -1904,10 +1904,10 @@ static __inline__ void wrap_copy(struct sk_buff *skb, const unsigned char *ring, | |||
1904 | u32 left = RX_BUF_LEN - offset; | 1904 | u32 left = RX_BUF_LEN - offset; |
1905 | 1905 | ||
1906 | if (size > left) { | 1906 | if (size > left) { |
1907 | memcpy(skb->data, ring + offset, left); | 1907 | skb_copy_to_linear_data(skb, ring + offset, left); |
1908 | memcpy(skb->data+left, ring, size - left); | 1908 | skb_copy_to_linear_data_offset(skb, left, ring, size - left); |
1909 | } else | 1909 | } else |
1910 | memcpy(skb->data, ring + offset, size); | 1910 | skb_copy_to_linear_data(skb, ring + offset, size); |
1911 | } | 1911 | } |
1912 | #endif | 1912 | #endif |
1913 | 1913 | ||
@@ -2013,7 +2013,6 @@ no_early_rx: | |||
2013 | 2013 | ||
2014 | skb = dev_alloc_skb (pkt_size + 2); | 2014 | skb = dev_alloc_skb (pkt_size + 2); |
2015 | if (likely(skb)) { | 2015 | if (likely(skb)) { |
2016 | skb->dev = dev; | ||
2017 | skb_reserve (skb, 2); /* 16 byte align the IP fields. */ | 2016 | skb_reserve (skb, 2); /* 16 byte align the IP fields. */ |
2018 | #if RX_BUF_IDX == 3 | 2017 | #if RX_BUF_IDX == 3 |
2019 | wrap_copy(skb, rx_ring, ring_offset+4, pkt_size); | 2018 | wrap_copy(skb, rx_ring, ring_offset+4, pkt_size); |
diff --git a/drivers/net/82596.c b/drivers/net/82596.c index 640d7ca2ebcf..3ff1155459a3 100644 --- a/drivers/net/82596.c +++ b/drivers/net/82596.c | |||
@@ -830,7 +830,6 @@ memory_squeeze: | |||
830 | lp->stats.rx_dropped++; | 830 | lp->stats.rx_dropped++; |
831 | } | 831 | } |
832 | else { | 832 | else { |
833 | skb->dev = dev; | ||
834 | if (!rx_in_place) { | 833 | if (!rx_in_place) { |
835 | /* 16 byte align the data fields */ | 834 | /* 16 byte align the data fields */ |
836 | skb_reserve(skb, 2); | 835 | skb_reserve(skb, 2); |
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index c3f9f599f134..a3d46ea37126 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig | |||
@@ -2263,6 +2263,7 @@ config GIANFAR | |||
2263 | tristate "Gianfar Ethernet" | 2263 | tristate "Gianfar Ethernet" |
2264 | depends on 85xx || 83xx || PPC_86xx | 2264 | depends on 85xx || 83xx || PPC_86xx |
2265 | select PHYLIB | 2265 | select PHYLIB |
2266 | select CRC32 | ||
2266 | help | 2267 | help |
2267 | This driver supports the Gigabit TSEC on the MPC83xx, MPC85xx, | 2268 | This driver supports the Gigabit TSEC on the MPC83xx, MPC85xx, |
2268 | and MPC86xx family of chips, and the FEC on the 8540. | 2269 | and MPC86xx family of chips, and the FEC on the 8540. |
diff --git a/drivers/net/Makefile b/drivers/net/Makefile index 33af833667da..58527322a39d 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile | |||
@@ -206,7 +206,7 @@ obj-$(CONFIG_TR) += tokenring/ | |||
206 | obj-$(CONFIG_WAN) += wan/ | 206 | obj-$(CONFIG_WAN) += wan/ |
207 | obj-$(CONFIG_ARCNET) += arcnet/ | 207 | obj-$(CONFIG_ARCNET) += arcnet/ |
208 | obj-$(CONFIG_NET_PCMCIA) += pcmcia/ | 208 | obj-$(CONFIG_NET_PCMCIA) += pcmcia/ |
209 | obj-$(CONFIG_NET_RADIO) += wireless/ | 209 | obj-y += wireless/ |
210 | obj-$(CONFIG_NET_TULIP) += tulip/ | 210 | obj-$(CONFIG_NET_TULIP) += tulip/ |
211 | obj-$(CONFIG_HAMRADIO) += hamradio/ | 211 | obj-$(CONFIG_HAMRADIO) += hamradio/ |
212 | obj-$(CONFIG_IRDA) += irda/ | 212 | obj-$(CONFIG_IRDA) += irda/ |
diff --git a/drivers/net/a2065.c b/drivers/net/a2065.c index d76548e75350..1226cbba0450 100644 --- a/drivers/net/a2065.c +++ b/drivers/net/a2065.c | |||
@@ -320,7 +320,6 @@ static int lance_rx (struct net_device *dev) | |||
320 | return 0; | 320 | return 0; |
321 | } | 321 | } |
322 | 322 | ||
323 | skb->dev = dev; | ||
324 | skb_reserve (skb, 2); /* 16 byte align */ | 323 | skb_reserve (skb, 2); /* 16 byte align */ |
325 | skb_put (skb, len); /* make room */ | 324 | skb_put (skb, len); /* make room */ |
326 | eth_copy_and_sum(skb, | 325 | eth_copy_and_sum(skb, |
@@ -599,7 +598,7 @@ static int lance_start_xmit (struct sk_buff *skb, struct net_device *dev) | |||
599 | ib->btx_ring [entry].length = (-len) | 0xf000; | 598 | ib->btx_ring [entry].length = (-len) | 0xf000; |
600 | ib->btx_ring [entry].misc = 0; | 599 | ib->btx_ring [entry].misc = 0; |
601 | 600 | ||
602 | memcpy ((char *)&ib->tx_buf [entry][0], skb->data, skblen); | 601 | skb_copy_from_linear_data(skb, &ib->tx_buf [entry][0], skblen); |
603 | 602 | ||
604 | /* Clear the slack of the packet, do I need this? */ | 603 | /* Clear the slack of the packet, do I need this? */ |
605 | if (len != skblen) | 604 | if (len != skblen) |
diff --git a/drivers/net/acenic.c b/drivers/net/acenic.c index 7138e0e025bc..7122b7ba8d61 100644 --- a/drivers/net/acenic.c +++ b/drivers/net/acenic.c | |||
@@ -2027,7 +2027,6 @@ static void ace_rx_int(struct net_device *dev, u32 rxretprd, u32 rxretcsm) | |||
2027 | */ | 2027 | */ |
2028 | csum = retdesc->tcp_udp_csum; | 2028 | csum = retdesc->tcp_udp_csum; |
2029 | 2029 | ||
2030 | skb->dev = dev; | ||
2031 | skb->protocol = eth_type_trans(skb, dev); | 2030 | skb->protocol = eth_type_trans(skb, dev); |
2032 | 2031 | ||
2033 | /* | 2032 | /* |
diff --git a/drivers/net/amd8111e.c b/drivers/net/amd8111e.c index 962c954c2d56..675fe918421b 100644 --- a/drivers/net/amd8111e.c +++ b/drivers/net/amd8111e.c | |||
@@ -798,9 +798,7 @@ static int amd8111e_rx_poll(struct net_device *dev, int * budget) | |||
798 | pci_unmap_single(lp->pci_dev,lp->rx_dma_addr[rx_index], | 798 | pci_unmap_single(lp->pci_dev,lp->rx_dma_addr[rx_index], |
799 | lp->rx_buff_len-2, PCI_DMA_FROMDEVICE); | 799 | lp->rx_buff_len-2, PCI_DMA_FROMDEVICE); |
800 | skb_put(skb, pkt_len); | 800 | skb_put(skb, pkt_len); |
801 | skb->dev = dev; | ||
802 | lp->rx_skbuff[rx_index] = new_skb; | 801 | lp->rx_skbuff[rx_index] = new_skb; |
803 | new_skb->dev = dev; | ||
804 | lp->rx_dma_addr[rx_index] = pci_map_single(lp->pci_dev, | 802 | lp->rx_dma_addr[rx_index] = pci_map_single(lp->pci_dev, |
805 | new_skb->data, | 803 | new_skb->data, |
806 | lp->rx_buff_len-2, | 804 | lp->rx_buff_len-2, |
@@ -926,9 +924,7 @@ static int amd8111e_rx(struct net_device *dev) | |||
926 | pci_unmap_single(lp->pci_dev,lp->rx_dma_addr[rx_index], | 924 | pci_unmap_single(lp->pci_dev,lp->rx_dma_addr[rx_index], |
927 | lp->rx_buff_len-2, PCI_DMA_FROMDEVICE); | 925 | lp->rx_buff_len-2, PCI_DMA_FROMDEVICE); |
928 | skb_put(skb, pkt_len); | 926 | skb_put(skb, pkt_len); |
929 | skb->dev = dev; | ||
930 | lp->rx_skbuff[rx_index] = new_skb; | 927 | lp->rx_skbuff[rx_index] = new_skb; |
931 | new_skb->dev = dev; | ||
932 | lp->rx_dma_addr[rx_index] = pci_map_single(lp->pci_dev, | 928 | lp->rx_dma_addr[rx_index] = pci_map_single(lp->pci_dev, |
933 | new_skb->data, lp->rx_buff_len-2,PCI_DMA_FROMDEVICE); | 929 | new_skb->data, lp->rx_buff_len-2,PCI_DMA_FROMDEVICE); |
934 | 930 | ||
diff --git a/drivers/net/appletalk/cops.c b/drivers/net/appletalk/cops.c index dba5e5165452..da6ffa8cd81e 100644 --- a/drivers/net/appletalk/cops.c +++ b/drivers/net/appletalk/cops.c | |||
@@ -853,9 +853,9 @@ static void cops_rx(struct net_device *dev) | |||
853 | return; | 853 | return; |
854 | } | 854 | } |
855 | 855 | ||
856 | skb->mac.raw = skb->data; /* Point to entire packet. */ | 856 | skb_reset_mac_header(skb); /* Point to entire packet. */ |
857 | skb_pull(skb,3); | 857 | skb_pull(skb,3); |
858 | skb->h.raw = skb->data; /* Point to data (Skip header). */ | 858 | skb_reset_transport_header(skb); /* Point to data (Skip header). */ |
859 | 859 | ||
860 | /* Update the counters. */ | 860 | /* Update the counters. */ |
861 | lp->stats.rx_packets++; | 861 | lp->stats.rx_packets++; |
diff --git a/drivers/net/appletalk/ltpc.c b/drivers/net/appletalk/ltpc.c index 2ea44ce49810..6a6cbd331a16 100644 --- a/drivers/net/appletalk/ltpc.c +++ b/drivers/net/appletalk/ltpc.c | |||
@@ -770,13 +770,13 @@ static int sendup_buffer (struct net_device *dev) | |||
770 | skb->data[0] = dnode; | 770 | skb->data[0] = dnode; |
771 | skb->data[1] = snode; | 771 | skb->data[1] = snode; |
772 | skb->data[2] = llaptype; | 772 | skb->data[2] = llaptype; |
773 | skb->mac.raw = skb->data; /* save pointer to llap header */ | 773 | skb_reset_mac_header(skb); /* save pointer to llap header */ |
774 | skb_pull(skb,3); | 774 | skb_pull(skb,3); |
775 | 775 | ||
776 | /* copy ddp(s,e)hdr + contents */ | 776 | /* copy ddp(s,e)hdr + contents */ |
777 | memcpy(skb->data,(void*)ltdmabuf,len); | 777 | skb_copy_to_linear_data(skb, ltdmabuf, len); |
778 | 778 | ||
779 | skb->h.raw = skb->data; | 779 | skb_reset_transport_header(skb); |
780 | 780 | ||
781 | stats->rx_packets++; | 781 | stats->rx_packets++; |
782 | stats->rx_bytes+=skb->len; | 782 | stats->rx_bytes+=skb->len; |
@@ -917,13 +917,14 @@ static int ltpc_xmit(struct sk_buff *skb, struct net_device *dev) | |||
917 | 917 | ||
918 | int i; | 918 | int i; |
919 | struct lt_sendlap cbuf; | 919 | struct lt_sendlap cbuf; |
920 | unsigned char *hdr; | ||
920 | 921 | ||
921 | cbuf.command = LT_SENDLAP; | 922 | cbuf.command = LT_SENDLAP; |
922 | cbuf.dnode = skb->data[0]; | 923 | cbuf.dnode = skb->data[0]; |
923 | cbuf.laptype = skb->data[2]; | 924 | cbuf.laptype = skb->data[2]; |
924 | skb_pull(skb,3); /* skip past LLAP header */ | 925 | skb_pull(skb,3); /* skip past LLAP header */ |
925 | cbuf.length = skb->len; /* this is host order */ | 926 | cbuf.length = skb->len; /* this is host order */ |
926 | skb->h.raw=skb->data; | 927 | skb_reset_transport_header(skb); |
927 | 928 | ||
928 | if(debug & DEBUG_UPPER) { | 929 | if(debug & DEBUG_UPPER) { |
929 | printk("command "); | 930 | printk("command "); |
@@ -932,11 +933,13 @@ static int ltpc_xmit(struct sk_buff *skb, struct net_device *dev) | |||
932 | printk("\n"); | 933 | printk("\n"); |
933 | } | 934 | } |
934 | 935 | ||
935 | do_write(dev,&cbuf,sizeof(cbuf),skb->h.raw,skb->len); | 936 | hdr = skb_transport_header(skb); |
937 | do_write(dev, &cbuf, sizeof(cbuf), hdr, skb->len); | ||
936 | 938 | ||
937 | if(debug & DEBUG_UPPER) { | 939 | if(debug & DEBUG_UPPER) { |
938 | printk("sent %d ddp bytes\n",skb->len); | 940 | printk("sent %d ddp bytes\n",skb->len); |
939 | for(i=0;i<skb->len;i++) printk("%02x ",skb->h.raw[i]); | 941 | for (i = 0; i < skb->len; i++) |
942 | printk("%02x ", hdr[i]); | ||
940 | printk("\n"); | 943 | printk("\n"); |
941 | } | 944 | } |
942 | 945 | ||
diff --git a/drivers/net/arcnet/arc-rawmode.c b/drivers/net/arcnet/arc-rawmode.c index 6318814a11a8..e0a18e7c73cb 100644 --- a/drivers/net/arcnet/arc-rawmode.c +++ b/drivers/net/arcnet/arc-rawmode.c | |||
@@ -110,7 +110,7 @@ static void rx(struct net_device *dev, int bufnum, | |||
110 | 110 | ||
111 | pkt = (struct archdr *) skb->data; | 111 | pkt = (struct archdr *) skb->data; |
112 | 112 | ||
113 | skb->mac.raw = skb->data; | 113 | skb_reset_mac_header(skb); |
114 | skb_pull(skb, ARC_HDR_SIZE); | 114 | skb_pull(skb, ARC_HDR_SIZE); |
115 | 115 | ||
116 | /* up to sizeof(pkt->soft) has already been copied from the card */ | 116 | /* up to sizeof(pkt->soft) has already been copied from the card */ |
diff --git a/drivers/net/arcnet/arcnet.c b/drivers/net/arcnet/arcnet.c index 83004fdab0a4..681e20b8466f 100644 --- a/drivers/net/arcnet/arcnet.c +++ b/drivers/net/arcnet/arcnet.c | |||
@@ -519,9 +519,12 @@ static int arcnet_header(struct sk_buff *skb, struct net_device *dev, | |||
519 | * real header when we do rebuild_header. | 519 | * real header when we do rebuild_header. |
520 | */ | 520 | */ |
521 | *(uint16_t *) skb_push(skb, 2) = type; | 521 | *(uint16_t *) skb_push(skb, 2) = type; |
522 | if (skb->nh.raw - skb->mac.raw != 2) | 522 | /* |
523 | * XXX: Why not use skb->mac_len? | ||
524 | */ | ||
525 | if (skb->network_header - skb->mac_header != 2) | ||
523 | BUGMSG(D_NORMAL, "arcnet_header: Yikes! diff (%d) is not 2!\n", | 526 | BUGMSG(D_NORMAL, "arcnet_header: Yikes! diff (%d) is not 2!\n", |
524 | (int)(skb->nh.raw - skb->mac.raw)); | 527 | (int)(skb->network_header - skb->mac_header)); |
525 | return -2; /* return error -- can't transmit yet! */ | 528 | return -2; /* return error -- can't transmit yet! */ |
526 | } | 529 | } |
527 | else { | 530 | else { |
@@ -554,11 +557,13 @@ static int arcnet_rebuild_header(struct sk_buff *skb) | |||
554 | unsigned short type; | 557 | unsigned short type; |
555 | uint8_t daddr=0; | 558 | uint8_t daddr=0; |
556 | struct ArcProto *proto; | 559 | struct ArcProto *proto; |
557 | 560 | /* | |
558 | if (skb->nh.raw - skb->mac.raw != 2) { | 561 | * XXX: Why not use skb->mac_len? |
562 | */ | ||
563 | if (skb->network_header - skb->mac_header != 2) { | ||
559 | BUGMSG(D_NORMAL, | 564 | BUGMSG(D_NORMAL, |
560 | "rebuild_header: shouldn't be here! (hdrsize=%d)\n", | 565 | "rebuild_header: shouldn't be here! (hdrsize=%d)\n", |
561 | (int)(skb->nh.raw - skb->mac.raw)); | 566 | (int)(skb->network_header - skb->mac_header)); |
562 | return 0; | 567 | return 0; |
563 | } | 568 | } |
564 | type = *(uint16_t *) skb_pull(skb, 2); | 569 | type = *(uint16_t *) skb_pull(skb, 2); |
diff --git a/drivers/net/arcnet/capmode.c b/drivers/net/arcnet/capmode.c index 66485585ab39..cc4610db6395 100644 --- a/drivers/net/arcnet/capmode.c +++ b/drivers/net/arcnet/capmode.c | |||
@@ -122,10 +122,8 @@ static void rx(struct net_device *dev, int bufnum, | |||
122 | } | 122 | } |
123 | skb_put(skb, length + ARC_HDR_SIZE + sizeof(int)); | 123 | skb_put(skb, length + ARC_HDR_SIZE + sizeof(int)); |
124 | skb->dev = dev; | 124 | skb->dev = dev; |
125 | 125 | skb_reset_mac_header(skb); | |
126 | pkt = (struct archdr *) skb->data; | 126 | pkt = (struct archdr *)skb_mac_header(skb); |
127 | |||
128 | skb->mac.raw = skb->data; | ||
129 | skb_pull(skb, ARC_HDR_SIZE); | 127 | skb_pull(skb, ARC_HDR_SIZE); |
130 | 128 | ||
131 | /* up to sizeof(pkt->soft) has already been copied from the card */ | 129 | /* up to sizeof(pkt->soft) has already been copied from the card */ |
@@ -270,13 +268,13 @@ static int ack_tx(struct net_device *dev, int acked) | |||
270 | skb_put(ackskb, length + ARC_HDR_SIZE ); | 268 | skb_put(ackskb, length + ARC_HDR_SIZE ); |
271 | ackskb->dev = dev; | 269 | ackskb->dev = dev; |
272 | 270 | ||
273 | ackpkt = (struct archdr *) ackskb->data; | 271 | skb_reset_mac_header(ackskb); |
274 | 272 | ackpkt = (struct archdr *)skb_mac_header(ackskb); | |
275 | ackskb->mac.raw = ackskb->data; | ||
276 | /* skb_pull(ackskb, ARC_HDR_SIZE); */ | 273 | /* skb_pull(ackskb, ARC_HDR_SIZE); */ |
277 | 274 | ||
278 | 275 | ||
279 | memcpy(ackpkt, lp->outgoing.skb->data, ARC_HDR_SIZE+sizeof(struct arc_cap)); | 276 | skb_copy_from_linear_data(lp->outgoing.skb, ackpkt, |
277 | ARC_HDR_SIZE + sizeof(struct arc_cap)); | ||
280 | ackpkt->soft.cap.proto=0; /* using protocol 0 for acknowledge */ | 278 | ackpkt->soft.cap.proto=0; /* using protocol 0 for acknowledge */ |
281 | ackpkt->soft.cap.mes.ack=acked; | 279 | ackpkt->soft.cap.mes.ack=acked; |
282 | 280 | ||
diff --git a/drivers/net/arcnet/rfc1051.c b/drivers/net/arcnet/rfc1051.c index 6d6c69f036ef..2de8877ece29 100644 --- a/drivers/net/arcnet/rfc1051.c +++ b/drivers/net/arcnet/rfc1051.c | |||
@@ -94,7 +94,7 @@ static unsigned short type_trans(struct sk_buff *skb, struct net_device *dev) | |||
94 | int hdr_size = ARC_HDR_SIZE + RFC1051_HDR_SIZE; | 94 | int hdr_size = ARC_HDR_SIZE + RFC1051_HDR_SIZE; |
95 | 95 | ||
96 | /* Pull off the arcnet header. */ | 96 | /* Pull off the arcnet header. */ |
97 | skb->mac.raw = skb->data; | 97 | skb_reset_mac_header(skb); |
98 | skb_pull(skb, hdr_size); | 98 | skb_pull(skb, hdr_size); |
99 | 99 | ||
100 | if (pkt->hard.dest == 0) | 100 | if (pkt->hard.dest == 0) |
diff --git a/drivers/net/arcnet/rfc1201.c b/drivers/net/arcnet/rfc1201.c index bee34226abfa..460a095000c2 100644 --- a/drivers/net/arcnet/rfc1201.c +++ b/drivers/net/arcnet/rfc1201.c | |||
@@ -96,7 +96,7 @@ static unsigned short type_trans(struct sk_buff *skb, struct net_device *dev) | |||
96 | int hdr_size = ARC_HDR_SIZE + RFC1201_HDR_SIZE; | 96 | int hdr_size = ARC_HDR_SIZE + RFC1201_HDR_SIZE; |
97 | 97 | ||
98 | /* Pull off the arcnet header. */ | 98 | /* Pull off the arcnet header. */ |
99 | skb->mac.raw = skb->data; | 99 | skb_reset_mac_header(skb); |
100 | skb_pull(skb, hdr_size); | 100 | skb_pull(skb, hdr_size); |
101 | 101 | ||
102 | if (pkt->hard.dest == 0) | 102 | if (pkt->hard.dest == 0) |
diff --git a/drivers/net/ariadne.c b/drivers/net/ariadne.c index 9dfc09b181c1..a0e68e718531 100644 --- a/drivers/net/ariadne.c +++ b/drivers/net/ariadne.c | |||
@@ -743,7 +743,6 @@ static int ariadne_rx(struct net_device *dev) | |||
743 | } | 743 | } |
744 | 744 | ||
745 | 745 | ||
746 | skb->dev = dev; | ||
747 | skb_reserve(skb,2); /* 16 byte align */ | 746 | skb_reserve(skb,2); /* 16 byte align */ |
748 | skb_put(skb,pkt_len); /* Make room */ | 747 | skb_put(skb,pkt_len); /* Make room */ |
749 | eth_copy_and_sum(skb, (char *)priv->rx_buff[entry], pkt_len,0); | 748 | eth_copy_and_sum(skb, (char *)priv->rx_buff[entry], pkt_len,0); |
diff --git a/drivers/net/arm/am79c961a.c b/drivers/net/arm/am79c961a.c index ddd12d44ff22..8f0d7ce503c9 100644 --- a/drivers/net/arm/am79c961a.c +++ b/drivers/net/arm/am79c961a.c | |||
@@ -526,7 +526,6 @@ am79c961_rx(struct net_device *dev, struct dev_priv *priv) | |||
526 | skb = dev_alloc_skb(len + 2); | 526 | skb = dev_alloc_skb(len + 2); |
527 | 527 | ||
528 | if (skb) { | 528 | if (skb) { |
529 | skb->dev = dev; | ||
530 | skb_reserve(skb, 2); | 529 | skb_reserve(skb, 2); |
531 | 530 | ||
532 | am_readbuffer(dev, pktaddr, skb_put(skb, len), len); | 531 | am_readbuffer(dev, pktaddr, skb_put(skb, len), len); |
diff --git a/drivers/net/arm/at91_ether.c b/drivers/net/arm/at91_ether.c index 1621b8fe35cf..152fa7a042b8 100644 --- a/drivers/net/arm/at91_ether.c +++ b/drivers/net/arm/at91_ether.c | |||
@@ -858,7 +858,6 @@ static void at91ether_rx(struct net_device *dev) | |||
858 | skb_reserve(skb, 2); | 858 | skb_reserve(skb, 2); |
859 | memcpy(skb_put(skb, pktlen), p_recv, pktlen); | 859 | memcpy(skb_put(skb, pktlen), p_recv, pktlen); |
860 | 860 | ||
861 | skb->dev = dev; | ||
862 | skb->protocol = eth_type_trans(skb, dev); | 861 | skb->protocol = eth_type_trans(skb, dev); |
863 | dev->last_rx = jiffies; | 862 | dev->last_rx = jiffies; |
864 | lp->stats.rx_bytes += pktlen; | 863 | lp->stats.rx_bytes += pktlen; |
diff --git a/drivers/net/arm/ep93xx_eth.c b/drivers/net/arm/ep93xx_eth.c index dd698b033a62..2438c5bff237 100644 --- a/drivers/net/arm/ep93xx_eth.c +++ b/drivers/net/arm/ep93xx_eth.c | |||
@@ -255,7 +255,6 @@ static int ep93xx_rx(struct net_device *dev, int *budget) | |||
255 | 255 | ||
256 | skb = dev_alloc_skb(length + 2); | 256 | skb = dev_alloc_skb(length + 2); |
257 | if (likely(skb != NULL)) { | 257 | if (likely(skb != NULL)) { |
258 | skb->dev = dev; | ||
259 | skb_reserve(skb, 2); | 258 | skb_reserve(skb, 2); |
260 | dma_sync_single(NULL, ep->descs->rdesc[entry].buf_addr, | 259 | dma_sync_single(NULL, ep->descs->rdesc[entry].buf_addr, |
261 | length, DMA_FROM_DEVICE); | 260 | length, DMA_FROM_DEVICE); |
diff --git a/drivers/net/arm/ether1.c b/drivers/net/arm/ether1.c index a2921882eba8..f075cebe84ad 100644 --- a/drivers/net/arm/ether1.c +++ b/drivers/net/arm/ether1.c | |||
@@ -875,7 +875,6 @@ ether1_recv_done (struct net_device *dev) | |||
875 | skb = dev_alloc_skb (length + 2); | 875 | skb = dev_alloc_skb (length + 2); |
876 | 876 | ||
877 | if (skb) { | 877 | if (skb) { |
878 | skb->dev = dev; | ||
879 | skb_reserve (skb, 2); | 878 | skb_reserve (skb, 2); |
880 | 879 | ||
881 | ether1_readbuffer (dev, skb_put (skb, length), rbd.rbd_bufl, length); | 880 | ether1_readbuffer (dev, skb_put (skb, length), rbd.rbd_bufl, length); |
diff --git a/drivers/net/arm/ether3.c b/drivers/net/arm/ether3.c index 841178343a07..32da2eb9bcee 100644 --- a/drivers/net/arm/ether3.c +++ b/drivers/net/arm/ether3.c | |||
@@ -661,7 +661,6 @@ if (next_ptr < RX_START || next_ptr >= RX_END) { | |||
661 | if (skb) { | 661 | if (skb) { |
662 | unsigned char *buf; | 662 | unsigned char *buf; |
663 | 663 | ||
664 | skb->dev = dev; | ||
665 | skb_reserve(skb, 2); | 664 | skb_reserve(skb, 2); |
666 | buf = skb_put(skb, length); | 665 | buf = skb_put(skb, length); |
667 | ether3_readbuffer(dev, buf + 12, length - 12); | 666 | ether3_readbuffer(dev, buf + 12, length - 12); |
diff --git a/drivers/net/at1700.c b/drivers/net/at1700.c index 56ae8babd919..bed8e0ebaf19 100644 --- a/drivers/net/at1700.c +++ b/drivers/net/at1700.c | |||
@@ -768,7 +768,6 @@ net_rx(struct net_device *dev) | |||
768 | lp->stats.rx_dropped++; | 768 | lp->stats.rx_dropped++; |
769 | break; | 769 | break; |
770 | } | 770 | } |
771 | skb->dev = dev; | ||
772 | skb_reserve(skb,2); | 771 | skb_reserve(skb,2); |
773 | 772 | ||
774 | insw(ioaddr + DATAPORT, skb_put(skb,pkt_len), (pkt_len + 1) >> 1); | 773 | insw(ioaddr + DATAPORT, skb_put(skb,pkt_len), (pkt_len + 1) >> 1); |
diff --git a/drivers/net/atari_bionet.c b/drivers/net/atari_bionet.c index 4e3bf6a1f22c..3d87bd2b4194 100644 --- a/drivers/net/atari_bionet.c +++ b/drivers/net/atari_bionet.c | |||
@@ -453,7 +453,8 @@ bionet_send_packet(struct sk_buff *skb, struct net_device *dev) { | |||
453 | stdma_lock(bionet_intr, NULL); | 453 | stdma_lock(bionet_intr, NULL); |
454 | local_irq_restore(flags); | 454 | local_irq_restore(flags); |
455 | if( !STRAM_ADDR(buf+length-1) ) { | 455 | if( !STRAM_ADDR(buf+length-1) ) { |
456 | memcpy(nic_packet->buffer, skb->data, length); | 456 | skb_copy_from_linear_data(skb, nic_packet->buffer, |
457 | length); | ||
457 | buf = (unsigned long)&((struct nic_pkt_s *)phys_nic_packet)->buffer; | 458 | buf = (unsigned long)&((struct nic_pkt_s *)phys_nic_packet)->buffer; |
458 | } | 459 | } |
459 | 460 | ||
@@ -544,13 +545,13 @@ bionet_poll_rx(struct net_device *dev) { | |||
544 | break; | 545 | break; |
545 | } | 546 | } |
546 | 547 | ||
547 | skb->dev = dev; | ||
548 | skb_reserve( skb, 2 ); /* 16 Byte align */ | 548 | skb_reserve( skb, 2 ); /* 16 Byte align */ |
549 | skb_put( skb, pkt_len ); /* make room */ | 549 | skb_put( skb, pkt_len ); /* make room */ |
550 | 550 | ||
551 | /* 'skb->data' points to the start of sk_buff data area. | 551 | /* 'skb->data' points to the start of sk_buff data area. |
552 | */ | 552 | */ |
553 | memcpy(skb->data, nic_packet->buffer, pkt_len); | 553 | skb_copy_to_linear_data(skb, nic_packet->buffer, |
554 | pkt_len); | ||
554 | skb->protocol = eth_type_trans( skb, dev ); | 555 | skb->protocol = eth_type_trans( skb, dev ); |
555 | netif_rx(skb); | 556 | netif_rx(skb); |
556 | dev->last_rx = jiffies; | 557 | dev->last_rx = jiffies; |
diff --git a/drivers/net/atari_pamsnet.c b/drivers/net/atari_pamsnet.c index 3b5436149286..54714409a09b 100644 --- a/drivers/net/atari_pamsnet.c +++ b/drivers/net/atari_pamsnet.c | |||
@@ -717,7 +717,8 @@ pamsnet_send_packet(struct sk_buff *skb, struct net_device *dev) { | |||
717 | 717 | ||
718 | local_irq_restore(flags); | 718 | local_irq_restore(flags); |
719 | if( !STRAM_ADDR(buf+length-1) ) { | 719 | if( !STRAM_ADDR(buf+length-1) ) { |
720 | memcpy(nic_packet->buffer, skb->data, length); | 720 | skb_copy_from_linear_data(skb, nic_packet->buffer, |
721 | length); | ||
721 | buf = (unsigned long)phys_nic_packet; | 722 | buf = (unsigned long)phys_nic_packet; |
722 | } | 723 | } |
723 | 724 | ||
@@ -792,7 +793,8 @@ pamsnet_poll_rx(struct net_device *dev) { | |||
792 | 793 | ||
793 | /* 'skb->data' points to the start of sk_buff data area. | 794 | /* 'skb->data' points to the start of sk_buff data area. |
794 | */ | 795 | */ |
795 | memcpy(skb->data, nic_packet->buffer, pkt_len); | 796 | skb_copy_to_linear_data(skb, nic_packet->buffer, |
797 | pkt_len); | ||
796 | netif_rx(skb); | 798 | netif_rx(skb); |
797 | dev->last_rx = jiffies; | 799 | dev->last_rx = jiffies; |
798 | lp->stats.rx_packets++; | 800 | lp->stats.rx_packets++; |
diff --git a/drivers/net/atarilance.c b/drivers/net/atarilance.c index 7e37ac86a69a..dfa8b9ba4c80 100644 --- a/drivers/net/atarilance.c +++ b/drivers/net/atarilance.c | |||
@@ -1047,7 +1047,6 @@ static int lance_rx( struct net_device *dev ) | |||
1047 | pkt_len ); | 1047 | pkt_len ); |
1048 | } | 1048 | } |
1049 | 1049 | ||
1050 | skb->dev = dev; | ||
1051 | skb_reserve( skb, 2 ); /* 16 byte align */ | 1050 | skb_reserve( skb, 2 ); /* 16 byte align */ |
1052 | skb_put( skb, pkt_len ); /* Make room */ | 1051 | skb_put( skb, pkt_len ); /* Make room */ |
1053 | lp->memcpy_f( skb->data, PKTBUF_ADDR(head), pkt_len ); | 1052 | lp->memcpy_f( skb->data, PKTBUF_ADDR(head), pkt_len ); |
diff --git a/drivers/net/atl1/atl1_main.c b/drivers/net/atl1/atl1_main.c index 8606eac5bec8..4b1d4d153ecf 100644 --- a/drivers/net/atl1/atl1_main.c +++ b/drivers/net/atl1/atl1_main.c | |||
@@ -408,7 +408,6 @@ static void atl1_rx_checksum(struct atl1_adapter *adapter, | |||
408 | static u16 atl1_alloc_rx_buffers(struct atl1_adapter *adapter) | 408 | static u16 atl1_alloc_rx_buffers(struct atl1_adapter *adapter) |
409 | { | 409 | { |
410 | struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; | 410 | struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; |
411 | struct net_device *netdev = adapter->netdev; | ||
412 | struct pci_dev *pdev = adapter->pdev; | 411 | struct pci_dev *pdev = adapter->pdev; |
413 | struct page *page; | 412 | struct page *page; |
414 | unsigned long offset; | 413 | unsigned long offset; |
@@ -444,7 +443,6 @@ static u16 atl1_alloc_rx_buffers(struct atl1_adapter *adapter) | |||
444 | * the 14 byte MAC header is removed | 443 | * the 14 byte MAC header is removed |
445 | */ | 444 | */ |
446 | skb_reserve(skb, NET_IP_ALIGN); | 445 | skb_reserve(skb, NET_IP_ALIGN); |
447 | skb->dev = netdev; | ||
448 | 446 | ||
449 | buffer_info->alloced = 1; | 447 | buffer_info->alloced = 1; |
450 | buffer_info->skb = skb; | 448 | buffer_info->skb = skb; |
@@ -1296,19 +1294,21 @@ static int atl1_tso(struct atl1_adapter *adapter, struct sk_buff *skb, | |||
1296 | } | 1294 | } |
1297 | 1295 | ||
1298 | if (skb->protocol == ntohs(ETH_P_IP)) { | 1296 | if (skb->protocol == ntohs(ETH_P_IP)) { |
1299 | skb->nh.iph->tot_len = 0; | 1297 | struct iphdr *iph = ip_hdr(skb); |
1300 | skb->nh.iph->check = 0; | 1298 | |
1301 | skb->h.th->check = | 1299 | iph->tot_len = 0; |
1302 | ~csum_tcpudp_magic(skb->nh.iph->saddr, | 1300 | iph->check = 0; |
1303 | skb->nh.iph->daddr, 0, | 1301 | tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, |
1304 | IPPROTO_TCP, 0); | 1302 | iph->daddr, 0, |
1305 | ipofst = skb->nh.raw - skb->data; | 1303 | IPPROTO_TCP, |
1304 | 0); | ||
1305 | ipofst = skb_network_offset(skb); | ||
1306 | if (ipofst != ENET_HEADER_SIZE) /* 802.3 frame */ | 1306 | if (ipofst != ENET_HEADER_SIZE) /* 802.3 frame */ |
1307 | tso->tsopl |= 1 << TSO_PARAM_ETHTYPE_SHIFT; | 1307 | tso->tsopl |= 1 << TSO_PARAM_ETHTYPE_SHIFT; |
1308 | 1308 | ||
1309 | tso->tsopl |= (skb->nh.iph->ihl & | 1309 | tso->tsopl |= (iph->ihl & |
1310 | CSUM_PARAM_IPHL_MASK) << CSUM_PARAM_IPHL_SHIFT; | 1310 | CSUM_PARAM_IPHL_MASK) << CSUM_PARAM_IPHL_SHIFT; |
1311 | tso->tsopl |= ((skb->h.th->doff << 2) & | 1311 | tso->tsopl |= (tcp_hdrlen(skb) & |
1312 | TSO_PARAM_TCPHDRLEN_MASK) << TSO_PARAM_TCPHDRLEN_SHIFT; | 1312 | TSO_PARAM_TCPHDRLEN_MASK) << TSO_PARAM_TCPHDRLEN_SHIFT; |
1313 | tso->tsopl |= (skb_shinfo(skb)->gso_size & | 1313 | tso->tsopl |= (skb_shinfo(skb)->gso_size & |
1314 | TSO_PARAM_MSS_MASK) << TSO_PARAM_MSS_SHIFT; | 1314 | TSO_PARAM_MSS_MASK) << TSO_PARAM_MSS_SHIFT; |
@@ -1327,8 +1327,8 @@ static int atl1_tx_csum(struct atl1_adapter *adapter, struct sk_buff *skb, | |||
1327 | u8 css, cso; | 1327 | u8 css, cso; |
1328 | 1328 | ||
1329 | if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { | 1329 | if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { |
1330 | cso = skb->h.raw - skb->data; | 1330 | cso = skb_transport_offset(skb); |
1331 | css = (skb->h.raw + skb->csum_offset) - skb->data; | 1331 | css = cso + skb->csum_offset; |
1332 | if (unlikely(cso & 0x1)) { | 1332 | if (unlikely(cso & 0x1)) { |
1333 | printk(KERN_DEBUG "%s: payload offset != even number\n", | 1333 | printk(KERN_DEBUG "%s: payload offset != even number\n", |
1334 | atl1_driver_name); | 1334 | atl1_driver_name); |
@@ -1370,8 +1370,7 @@ static void atl1_tx_map(struct atl1_adapter *adapter, | |||
1370 | 1370 | ||
1371 | if (tcp_seg) { | 1371 | if (tcp_seg) { |
1372 | /* TSO/GSO */ | 1372 | /* TSO/GSO */ |
1373 | proto_hdr_len = | 1373 | proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); |
1374 | ((skb->h.raw - skb->data) + (skb->h.th->doff << 2)); | ||
1375 | buffer_info->length = proto_hdr_len; | 1374 | buffer_info->length = proto_hdr_len; |
1376 | page = virt_to_page(skb->data); | 1375 | page = virt_to_page(skb->data); |
1377 | offset = (unsigned long)skb->data & ~PAGE_MASK; | 1376 | offset = (unsigned long)skb->data & ~PAGE_MASK; |
@@ -1563,8 +1562,8 @@ static int atl1_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
1563 | mss = skb_shinfo(skb)->gso_size; | 1562 | mss = skb_shinfo(skb)->gso_size; |
1564 | if (mss) { | 1563 | if (mss) { |
1565 | if (skb->protocol == htons(ETH_P_IP)) { | 1564 | if (skb->protocol == htons(ETH_P_IP)) { |
1566 | proto_hdr_len = ((skb->h.raw - skb->data) + | 1565 | proto_hdr_len = (skb_transport_offset(skb) + |
1567 | (skb->h.th->doff << 2)); | 1566 | tcp_hdrlen(skb)); |
1568 | if (unlikely(proto_hdr_len > len)) { | 1567 | if (unlikely(proto_hdr_len > len)) { |
1569 | dev_kfree_skb_any(skb); | 1568 | dev_kfree_skb_any(skb); |
1570 | return NETDEV_TX_OK; | 1569 | return NETDEV_TX_OK; |
diff --git a/drivers/net/atp.c b/drivers/net/atp.c index 2d306fcb7f36..18aba838c1ff 100644 --- a/drivers/net/atp.c +++ b/drivers/net/atp.c | |||
@@ -793,7 +793,6 @@ static void net_rx(struct net_device *dev) | |||
793 | lp->stats.rx_dropped++; | 793 | lp->stats.rx_dropped++; |
794 | goto done; | 794 | goto done; |
795 | } | 795 | } |
796 | skb->dev = dev; | ||
797 | 796 | ||
798 | skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ | 797 | skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ |
799 | read_block(ioaddr, pkt_len, skb_put(skb,pkt_len), dev->if_port); | 798 | read_block(ioaddr, pkt_len, skb_put(skb,pkt_len), dev->if_port); |
diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c index 69ae229b680e..d10fb80e9a63 100644 --- a/drivers/net/au1000_eth.c +++ b/drivers/net/au1000_eth.c | |||
@@ -1125,7 +1125,7 @@ static int au1000_tx(struct sk_buff *skb, struct net_device *dev) | |||
1125 | } | 1125 | } |
1126 | 1126 | ||
1127 | pDB = aup->tx_db_inuse[aup->tx_head]; | 1127 | pDB = aup->tx_db_inuse[aup->tx_head]; |
1128 | memcpy((void *)pDB->vaddr, skb->data, skb->len); | 1128 | skb_copy_from_linear_data(skb, pDB->vaddr, skb->len); |
1129 | if (skb->len < ETH_ZLEN) { | 1129 | if (skb->len < ETH_ZLEN) { |
1130 | for (i=skb->len; i<ETH_ZLEN; i++) { | 1130 | for (i=skb->len; i<ETH_ZLEN; i++) { |
1131 | ((char *)pDB->vaddr)[i] = 0; | 1131 | ((char *)pDB->vaddr)[i] = 0; |
@@ -1205,7 +1205,6 @@ static int au1000_rx(struct net_device *dev) | |||
1205 | aup->stats.rx_dropped++; | 1205 | aup->stats.rx_dropped++; |
1206 | continue; | 1206 | continue; |
1207 | } | 1207 | } |
1208 | skb->dev = dev; | ||
1209 | skb_reserve(skb, 2); /* 16 byte IP header align */ | 1208 | skb_reserve(skb, 2); /* 16 byte IP header align */ |
1210 | eth_copy_and_sum(skb, | 1209 | eth_copy_and_sum(skb, |
1211 | (unsigned char *)pDB->vaddr, frmlen, 0); | 1210 | (unsigned char *)pDB->vaddr, frmlen, 0); |
diff --git a/drivers/net/b44.c b/drivers/net/b44.c index d742bfe24471..879a2fff474e 100644 --- a/drivers/net/b44.c +++ b/drivers/net/b44.c | |||
@@ -825,12 +825,11 @@ static int b44_rx(struct b44 *bp, int budget) | |||
825 | if (copy_skb == NULL) | 825 | if (copy_skb == NULL) |
826 | goto drop_it_no_recycle; | 826 | goto drop_it_no_recycle; |
827 | 827 | ||
828 | copy_skb->dev = bp->dev; | ||
829 | skb_reserve(copy_skb, 2); | 828 | skb_reserve(copy_skb, 2); |
830 | skb_put(copy_skb, len); | 829 | skb_put(copy_skb, len); |
831 | /* DMA sync done above, copy just the actual packet */ | 830 | /* DMA sync done above, copy just the actual packet */ |
832 | memcpy(copy_skb->data, skb->data+bp->rx_offset, len); | 831 | skb_copy_from_linear_data_offset(skb, bp->rx_offset, |
833 | 832 | copy_skb->data, len); | |
834 | skb = copy_skb; | 833 | skb = copy_skb; |
835 | } | 834 | } |
836 | skb->ip_summed = CHECKSUM_NONE; | 835 | skb->ip_summed = CHECKSUM_NONE; |
@@ -1007,7 +1006,8 @@ static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1007 | goto err_out; | 1006 | goto err_out; |
1008 | } | 1007 | } |
1009 | 1008 | ||
1010 | memcpy(skb_put(bounce_skb, len), skb->data, skb->len); | 1009 | skb_copy_from_linear_data(skb, skb_put(bounce_skb, len), |
1010 | skb->len); | ||
1011 | dev_kfree_skb_any(skb); | 1011 | dev_kfree_skb_any(skb); |
1012 | skb = bounce_skb; | 1012 | skb = bounce_skb; |
1013 | } | 1013 | } |
diff --git a/drivers/net/bmac.c b/drivers/net/bmac.c index c143304dcff5..4612725965df 100644 --- a/drivers/net/bmac.c +++ b/drivers/net/bmac.c | |||
@@ -715,7 +715,6 @@ static irqreturn_t bmac_rxdma_intr(int irq, void *dev_id) | |||
715 | if (skb != NULL) { | 715 | if (skb != NULL) { |
716 | nb -= ETHERCRC; | 716 | nb -= ETHERCRC; |
717 | skb_put(skb, nb); | 717 | skb_put(skb, nb); |
718 | skb->dev = dev; | ||
719 | skb->protocol = eth_type_trans(skb, dev); | 718 | skb->protocol = eth_type_trans(skb, dev); |
720 | netif_rx(skb); | 719 | netif_rx(skb); |
721 | dev->last_rx = jiffies; | 720 | dev->last_rx = jiffies; |
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c index 0b7aded8dcfd..f98a2205a090 100644 --- a/drivers/net/bnx2.c +++ b/drivers/net/bnx2.c | |||
@@ -54,8 +54,8 @@ | |||
54 | 54 | ||
55 | #define DRV_MODULE_NAME "bnx2" | 55 | #define DRV_MODULE_NAME "bnx2" |
56 | #define PFX DRV_MODULE_NAME ": " | 56 | #define PFX DRV_MODULE_NAME ": " |
57 | #define DRV_MODULE_VERSION "1.5.7" | 57 | #define DRV_MODULE_VERSION "1.5.8" |
58 | #define DRV_MODULE_RELDATE "March 29, 2007" | 58 | #define DRV_MODULE_RELDATE "April 24, 2007" |
59 | 59 | ||
60 | #define RUN_AT(x) (jiffies + (x)) | 60 | #define RUN_AT(x) (jiffies + (x)) |
61 | 61 | ||
@@ -1884,10 +1884,8 @@ bnx2_rx_int(struct bnx2 *bp, int budget) | |||
1884 | goto reuse_rx; | 1884 | goto reuse_rx; |
1885 | 1885 | ||
1886 | /* aligned copy */ | 1886 | /* aligned copy */ |
1887 | memcpy(new_skb->data, | 1887 | skb_copy_from_linear_data_offset(skb, bp->rx_offset - 2, |
1888 | skb->data + bp->rx_offset - 2, | 1888 | new_skb->data, len + 2); |
1889 | len + 2); | ||
1890 | |||
1891 | skb_reserve(new_skb, 2); | 1889 | skb_reserve(new_skb, 2); |
1892 | skb_put(new_skb, len); | 1890 | skb_put(new_skb, len); |
1893 | 1891 | ||
@@ -3421,6 +3419,9 @@ bnx2_init_chip(struct bnx2 *bp) | |||
3421 | val = REG_RD(bp, BNX2_MQ_CONFIG); | 3419 | val = REG_RD(bp, BNX2_MQ_CONFIG); |
3422 | val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE; | 3420 | val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE; |
3423 | val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256; | 3421 | val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256; |
3422 | if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1) | ||
3423 | val |= BNX2_MQ_CONFIG_HALT_DIS; | ||
3424 | |||
3424 | REG_WR(bp, BNX2_MQ_CONFIG, val); | 3425 | REG_WR(bp, BNX2_MQ_CONFIG, val); |
3425 | 3426 | ||
3426 | val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE); | 3427 | val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE); |
@@ -4510,6 +4511,7 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
4510 | if ((mss = skb_shinfo(skb)->gso_size) && | 4511 | if ((mss = skb_shinfo(skb)->gso_size) && |
4511 | (skb->len > (bp->dev->mtu + ETH_HLEN))) { | 4512 | (skb->len > (bp->dev->mtu + ETH_HLEN))) { |
4512 | u32 tcp_opt_len, ip_tcp_len; | 4513 | u32 tcp_opt_len, ip_tcp_len; |
4514 | struct iphdr *iph; | ||
4513 | 4515 | ||
4514 | if (skb_header_cloned(skb) && | 4516 | if (skb_header_cloned(skb) && |
4515 | pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) { | 4517 | pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) { |
@@ -4517,25 +4519,23 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
4517 | return NETDEV_TX_OK; | 4519 | return NETDEV_TX_OK; |
4518 | } | 4520 | } |
4519 | 4521 | ||
4520 | tcp_opt_len = ((skb->h.th->doff - 5) * 4); | ||
4521 | vlan_tag_flags |= TX_BD_FLAGS_SW_LSO; | 4522 | vlan_tag_flags |= TX_BD_FLAGS_SW_LSO; |
4522 | 4523 | ||
4523 | tcp_opt_len = 0; | 4524 | tcp_opt_len = 0; |
4524 | if (skb->h.th->doff > 5) { | 4525 | if (tcp_hdr(skb)->doff > 5) |
4525 | tcp_opt_len = (skb->h.th->doff - 5) << 2; | 4526 | tcp_opt_len = tcp_optlen(skb); |
4526 | } | 4527 | |
4527 | ip_tcp_len = (skb->nh.iph->ihl << 2) + sizeof(struct tcphdr); | 4528 | ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr); |
4528 | 4529 | ||
4529 | skb->nh.iph->check = 0; | 4530 | iph = ip_hdr(skb); |
4530 | skb->nh.iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len); | 4531 | iph->check = 0; |
4531 | skb->h.th->check = | 4532 | iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len); |
4532 | ~csum_tcpudp_magic(skb->nh.iph->saddr, | 4533 | tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, |
4533 | skb->nh.iph->daddr, | 4534 | iph->daddr, 0, |
4534 | 0, IPPROTO_TCP, 0); | 4535 | IPPROTO_TCP, 0); |
4535 | 4536 | if (tcp_opt_len || (iph->ihl > 5)) { | |
4536 | if (tcp_opt_len || (skb->nh.iph->ihl > 5)) { | 4537 | vlan_tag_flags |= ((iph->ihl - 5) + |
4537 | vlan_tag_flags |= ((skb->nh.iph->ihl - 5) + | 4538 | (tcp_opt_len >> 2)) << 8; |
4538 | (tcp_opt_len >> 2)) << 8; | ||
4539 | } | 4539 | } |
4540 | } | 4540 | } |
4541 | else | 4541 | else |
diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h index ccbdf81c6599..878eee58f12a 100644 --- a/drivers/net/bnx2.h +++ b/drivers/net/bnx2.h | |||
@@ -6518,6 +6518,7 @@ struct bnx2 { | |||
6518 | #define CHIP_ID_5708_B0 0x57081000 | 6518 | #define CHIP_ID_5708_B0 0x57081000 |
6519 | #define CHIP_ID_5708_B1 0x57081010 | 6519 | #define CHIP_ID_5708_B1 0x57081010 |
6520 | #define CHIP_ID_5709_A0 0x57090000 | 6520 | #define CHIP_ID_5709_A0 0x57090000 |
6521 | #define CHIP_ID_5709_A1 0x57090010 | ||
6521 | 6522 | ||
6522 | #define CHIP_BOND_ID(bp) (((bp)->chip_id) & 0xf) | 6523 | #define CHIP_BOND_ID(bp) (((bp)->chip_id) & 0xf) |
6523 | 6524 | ||
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c index 3fb354d9c515..7e03f41ae2c2 100644 --- a/drivers/net/bonding/bond_3ad.c +++ b/drivers/net/bonding/bond_3ad.c | |||
@@ -884,8 +884,8 @@ static int ad_lacpdu_send(struct port *port) | |||
884 | } | 884 | } |
885 | 885 | ||
886 | skb->dev = slave->dev; | 886 | skb->dev = slave->dev; |
887 | skb->mac.raw = skb->data; | 887 | skb_reset_mac_header(skb); |
888 | skb->nh.raw = skb->data + ETH_HLEN; | 888 | skb->network_header = skb->mac_header + ETH_HLEN; |
889 | skb->protocol = PKT_TYPE_LACPDU; | 889 | skb->protocol = PKT_TYPE_LACPDU; |
890 | skb->priority = TC_PRIO_CONTROL; | 890 | skb->priority = TC_PRIO_CONTROL; |
891 | 891 | ||
@@ -928,8 +928,8 @@ static int ad_marker_send(struct port *port, struct marker *marker) | |||
928 | skb_reserve(skb, 16); | 928 | skb_reserve(skb, 16); |
929 | 929 | ||
930 | skb->dev = slave->dev; | 930 | skb->dev = slave->dev; |
931 | skb->mac.raw = skb->data; | 931 | skb_reset_mac_header(skb); |
932 | skb->nh.raw = skb->data + ETH_HLEN; | 932 | skb->network_header = skb->mac_header + ETH_HLEN; |
933 | skb->protocol = PKT_TYPE_LACPDU; | 933 | skb->protocol = PKT_TYPE_LACPDU; |
934 | 934 | ||
935 | marker_header = (struct marker_header *)skb_put(skb, length); | 935 | marker_header = (struct marker_header *)skb_put(skb, length); |
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index 217a2eedee0a..92c3b6f6a8e7 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c | |||
@@ -104,10 +104,15 @@ struct arp_pkt { | |||
104 | }; | 104 | }; |
105 | #pragma pack() | 105 | #pragma pack() |
106 | 106 | ||
107 | static inline struct arp_pkt *arp_pkt(const struct sk_buff *skb) | ||
108 | { | ||
109 | return (struct arp_pkt *)skb_network_header(skb); | ||
110 | } | ||
111 | |||
107 | /* Forward declaration */ | 112 | /* Forward declaration */ |
108 | static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[]); | 113 | static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[]); |
109 | 114 | ||
110 | static inline u8 _simple_hash(u8 *hash_start, int hash_size) | 115 | static inline u8 _simple_hash(const u8 *hash_start, int hash_size) |
111 | { | 116 | { |
112 | int i; | 117 | int i; |
113 | u8 hash = 0; | 118 | u8 hash = 0; |
@@ -613,7 +618,7 @@ static void rlb_req_update_subnet_clients(struct bonding *bond, u32 src_ip) | |||
613 | static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bond) | 618 | static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bond) |
614 | { | 619 | { |
615 | struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); | 620 | struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); |
616 | struct arp_pkt *arp = (struct arp_pkt *)skb->nh.raw; | 621 | struct arp_pkt *arp = arp_pkt(skb); |
617 | struct slave *assigned_slave; | 622 | struct slave *assigned_slave; |
618 | struct rlb_client_info *client_info; | 623 | struct rlb_client_info *client_info; |
619 | u32 hash_index = 0; | 624 | u32 hash_index = 0; |
@@ -701,7 +706,7 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon | |||
701 | */ | 706 | */ |
702 | static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond) | 707 | static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond) |
703 | { | 708 | { |
704 | struct arp_pkt *arp = (struct arp_pkt *)skb->nh.raw; | 709 | struct arp_pkt *arp = arp_pkt(skb); |
705 | struct slave *tx_slave = NULL; | 710 | struct slave *tx_slave = NULL; |
706 | 711 | ||
707 | if (arp->op_code == __constant_htons(ARPOP_REPLY)) { | 712 | if (arp->op_code == __constant_htons(ARPOP_REPLY)) { |
@@ -890,8 +895,8 @@ static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[]) | |||
890 | data = skb_put(skb, size); | 895 | data = skb_put(skb, size); |
891 | memcpy(data, &pkt, size); | 896 | memcpy(data, &pkt, size); |
892 | 897 | ||
893 | skb->mac.raw = data; | 898 | skb_reset_mac_header(skb); |
894 | skb->nh.raw = data + ETH_HLEN; | 899 | skb->network_header = skb->mac_header + ETH_HLEN; |
895 | skb->protocol = pkt.type; | 900 | skb->protocol = pkt.type; |
896 | skb->priority = TC_PRIO_CONTROL; | 901 | skb->priority = TC_PRIO_CONTROL; |
897 | skb->dev = slave->dev; | 902 | skb->dev = slave->dev; |
@@ -1263,10 +1268,10 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev) | |||
1263 | int hash_size = 0; | 1268 | int hash_size = 0; |
1264 | int do_tx_balance = 1; | 1269 | int do_tx_balance = 1; |
1265 | u32 hash_index = 0; | 1270 | u32 hash_index = 0; |
1266 | u8 *hash_start = NULL; | 1271 | const u8 *hash_start = NULL; |
1267 | int res = 1; | 1272 | int res = 1; |
1268 | 1273 | ||
1269 | skb->mac.raw = (unsigned char *)skb->data; | 1274 | skb_reset_mac_header(skb); |
1270 | eth_data = eth_hdr(skb); | 1275 | eth_data = eth_hdr(skb); |
1271 | 1276 | ||
1272 | /* make sure that the curr_active_slave and the slaves list do | 1277 | /* make sure that the curr_active_slave and the slaves list do |
@@ -1280,15 +1285,18 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev) | |||
1280 | } | 1285 | } |
1281 | 1286 | ||
1282 | switch (ntohs(skb->protocol)) { | 1287 | switch (ntohs(skb->protocol)) { |
1283 | case ETH_P_IP: | 1288 | case ETH_P_IP: { |
1289 | const struct iphdr *iph = ip_hdr(skb); | ||
1290 | |||
1284 | if ((memcmp(eth_data->h_dest, mac_bcast, ETH_ALEN) == 0) || | 1291 | if ((memcmp(eth_data->h_dest, mac_bcast, ETH_ALEN) == 0) || |
1285 | (skb->nh.iph->daddr == ip_bcast) || | 1292 | (iph->daddr == ip_bcast) || |
1286 | (skb->nh.iph->protocol == IPPROTO_IGMP)) { | 1293 | (iph->protocol == IPPROTO_IGMP)) { |
1287 | do_tx_balance = 0; | 1294 | do_tx_balance = 0; |
1288 | break; | 1295 | break; |
1289 | } | 1296 | } |
1290 | hash_start = (char*)&(skb->nh.iph->daddr); | 1297 | hash_start = (char *)&(iph->daddr); |
1291 | hash_size = sizeof(skb->nh.iph->daddr); | 1298 | hash_size = sizeof(iph->daddr); |
1299 | } | ||
1292 | break; | 1300 | break; |
1293 | case ETH_P_IPV6: | 1301 | case ETH_P_IPV6: |
1294 | if (memcmp(eth_data->h_dest, mac_bcast, ETH_ALEN) == 0) { | 1302 | if (memcmp(eth_data->h_dest, mac_bcast, ETH_ALEN) == 0) { |
@@ -1296,8 +1304,8 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev) | |||
1296 | break; | 1304 | break; |
1297 | } | 1305 | } |
1298 | 1306 | ||
1299 | hash_start = (char*)&(skb->nh.ipv6h->daddr); | 1307 | hash_start = (char *)&(ipv6_hdr(skb)->daddr); |
1300 | hash_size = sizeof(skb->nh.ipv6h->daddr); | 1308 | hash_size = sizeof(ipv6_hdr(skb)->daddr); |
1301 | break; | 1309 | break; |
1302 | case ETH_P_IPX: | 1310 | case ETH_P_IPX: |
1303 | if (ipx_hdr(skb)->ipx_checksum != | 1311 | if (ipx_hdr(skb)->ipx_checksum != |
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index e4724d874e7c..cea3783c92c5 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c | |||
@@ -2524,7 +2524,7 @@ static int bond_arp_rcv(struct sk_buff *skb, struct net_device *dev, struct pack | |||
2524 | (2 * sizeof(u32))))) | 2524 | (2 * sizeof(u32))))) |
2525 | goto out_unlock; | 2525 | goto out_unlock; |
2526 | 2526 | ||
2527 | arp = skb->nh.arph; | 2527 | arp = arp_hdr(skb); |
2528 | if (arp->ar_hln != dev->addr_len || | 2528 | if (arp->ar_hln != dev->addr_len || |
2529 | skb->pkt_type == PACKET_OTHERHOST || | 2529 | skb->pkt_type == PACKET_OTHERHOST || |
2530 | skb->pkt_type == PACKET_LOOPBACK || | 2530 | skb->pkt_type == PACKET_LOOPBACK || |
@@ -3476,7 +3476,7 @@ static int bond_xmit_hash_policy_l34(struct sk_buff *skb, | |||
3476 | struct net_device *bond_dev, int count) | 3476 | struct net_device *bond_dev, int count) |
3477 | { | 3477 | { |
3478 | struct ethhdr *data = (struct ethhdr *)skb->data; | 3478 | struct ethhdr *data = (struct ethhdr *)skb->data; |
3479 | struct iphdr *iph = skb->nh.iph; | 3479 | struct iphdr *iph = ip_hdr(skb); |
3480 | u16 *layer4hdr = (u16 *)((u32 *)iph + iph->ihl); | 3480 | u16 *layer4hdr = (u16 *)((u32 *)iph + iph->ihl); |
3481 | int layer4_xor = 0; | 3481 | int layer4_xor = 0; |
3482 | 3482 | ||
@@ -3640,9 +3640,8 @@ static struct net_device_stats *bond_get_stats(struct net_device *bond_dev) | |||
3640 | read_lock_bh(&bond->lock); | 3640 | read_lock_bh(&bond->lock); |
3641 | 3641 | ||
3642 | bond_for_each_slave(bond, slave, i) { | 3642 | bond_for_each_slave(bond, slave, i) { |
3643 | if (slave->dev->get_stats) { | 3643 | sstats = slave->dev->get_stats(slave->dev); |
3644 | sstats = slave->dev->get_stats(slave->dev); | 3644 | if (sstats) { |
3645 | |||
3646 | stats->rx_packets += sstats->rx_packets; | 3645 | stats->rx_packets += sstats->rx_packets; |
3647 | stats->rx_bytes += sstats->rx_bytes; | 3646 | stats->rx_bytes += sstats->rx_bytes; |
3648 | stats->rx_errors += sstats->rx_errors; | 3647 | stats->rx_errors += sstats->rx_errors; |
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c index c8126484c2be..4aec747d9e43 100644 --- a/drivers/net/cassini.c +++ b/drivers/net/cassini.c | |||
@@ -1995,7 +1995,6 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc, | |||
1995 | return -1; | 1995 | return -1; |
1996 | 1996 | ||
1997 | *skbref = skb; | 1997 | *skbref = skb; |
1998 | skb->dev = cp->dev; | ||
1999 | skb_reserve(skb, swivel); | 1998 | skb_reserve(skb, swivel); |
2000 | 1999 | ||
2001 | p = skb->data; | 2000 | p = skb->data; |
@@ -2822,10 +2821,8 @@ static inline int cas_xmit_tx_ringN(struct cas *cp, int ring, | |||
2822 | 2821 | ||
2823 | ctrl = 0; | 2822 | ctrl = 0; |
2824 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | 2823 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
2825 | u64 csum_start_off, csum_stuff_off; | 2824 | const u64 csum_start_off = skb_transport_offset(skb); |
2826 | 2825 | const u64 csum_stuff_off = csum_start_off + skb->csum_offset; | |
2827 | csum_start_off = (u64) (skb->h.raw - skb->data); | ||
2828 | csum_stuff_off = csum_start_off + skb->csum_offset; | ||
2829 | 2826 | ||
2830 | ctrl = TX_DESC_CSUM_EN | | 2827 | ctrl = TX_DESC_CSUM_EN | |
2831 | CAS_BASE(TX_DESC_CSUM_START, csum_start_off) | | 2828 | CAS_BASE(TX_DESC_CSUM_START, csum_start_off) | |
@@ -2849,8 +2846,8 @@ static inline int cas_xmit_tx_ringN(struct cas *cp, int ring, | |||
2849 | ctrl | TX_DESC_SOF, 0); | 2846 | ctrl | TX_DESC_SOF, 0); |
2850 | entry = TX_DESC_NEXT(ring, entry); | 2847 | entry = TX_DESC_NEXT(ring, entry); |
2851 | 2848 | ||
2852 | memcpy(tx_tiny_buf(cp, ring, entry), skb->data + | 2849 | skb_copy_from_linear_data_offset(skb, len - tabort, |
2853 | len - tabort, tabort); | 2850 | tx_tiny_buf(cp, ring, entry), tabort); |
2854 | mapping = tx_tiny_map(cp, ring, entry, tentry); | 2851 | mapping = tx_tiny_map(cp, ring, entry, tentry); |
2855 | cas_write_txd(cp, ring, entry, mapping, tabort, ctrl, | 2852 | cas_write_txd(cp, ring, entry, mapping, tabort, ctrl, |
2856 | (nr_frags == 0)); | 2853 | (nr_frags == 0)); |
diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c index 326d4a665123..e4f874a70fe5 100644 --- a/drivers/net/chelsio/sge.c +++ b/drivers/net/chelsio/sge.c | |||
@@ -1062,7 +1062,7 @@ static inline struct sk_buff *get_packet(struct pci_dev *pdev, | |||
1062 | pci_unmap_addr(ce, dma_addr), | 1062 | pci_unmap_addr(ce, dma_addr), |
1063 | pci_unmap_len(ce, dma_len), | 1063 | pci_unmap_len(ce, dma_len), |
1064 | PCI_DMA_FROMDEVICE); | 1064 | PCI_DMA_FROMDEVICE); |
1065 | memcpy(skb->data, ce->skb->data, len); | 1065 | skb_copy_from_linear_data(ce->skb, skb->data, len); |
1066 | pci_dma_sync_single_for_device(pdev, | 1066 | pci_dma_sync_single_for_device(pdev, |
1067 | pci_unmap_addr(ce, dma_addr), | 1067 | pci_unmap_addr(ce, dma_addr), |
1068 | pci_unmap_len(ce, dma_len), | 1068 | pci_unmap_len(ce, dma_len), |
@@ -1379,12 +1379,11 @@ static void sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len) | |||
1379 | } | 1379 | } |
1380 | __skb_pull(skb, sizeof(*p)); | 1380 | __skb_pull(skb, sizeof(*p)); |
1381 | 1381 | ||
1382 | skb->dev = adapter->port[p->iff].dev; | ||
1383 | skb->dev->last_rx = jiffies; | 1382 | skb->dev->last_rx = jiffies; |
1384 | st = per_cpu_ptr(sge->port_stats[p->iff], smp_processor_id()); | 1383 | st = per_cpu_ptr(sge->port_stats[p->iff], smp_processor_id()); |
1385 | st->rx_packets++; | 1384 | st->rx_packets++; |
1386 | 1385 | ||
1387 | skb->protocol = eth_type_trans(skb, skb->dev); | 1386 | skb->protocol = eth_type_trans(skb, adapter->port[p->iff].dev); |
1388 | if ((adapter->flags & RX_CSUM_ENABLED) && p->csum == 0xffff && | 1387 | if ((adapter->flags & RX_CSUM_ENABLED) && p->csum == 0xffff && |
1389 | skb->protocol == htons(ETH_P_IP) && | 1388 | skb->protocol == htons(ETH_P_IP) && |
1390 | (skb->data[9] == IPPROTO_TCP || skb->data[9] == IPPROTO_UDP)) { | 1389 | (skb->data[9] == IPPROTO_TCP || skb->data[9] == IPPROTO_UDP)) { |
@@ -1866,14 +1865,14 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1866 | 1865 | ||
1867 | ++st->tx_tso; | 1866 | ++st->tx_tso; |
1868 | 1867 | ||
1869 | eth_type = skb->nh.raw - skb->data == ETH_HLEN ? | 1868 | eth_type = skb_network_offset(skb) == ETH_HLEN ? |
1870 | CPL_ETH_II : CPL_ETH_II_VLAN; | 1869 | CPL_ETH_II : CPL_ETH_II_VLAN; |
1871 | 1870 | ||
1872 | hdr = (struct cpl_tx_pkt_lso *)skb_push(skb, sizeof(*hdr)); | 1871 | hdr = (struct cpl_tx_pkt_lso *)skb_push(skb, sizeof(*hdr)); |
1873 | hdr->opcode = CPL_TX_PKT_LSO; | 1872 | hdr->opcode = CPL_TX_PKT_LSO; |
1874 | hdr->ip_csum_dis = hdr->l4_csum_dis = 0; | 1873 | hdr->ip_csum_dis = hdr->l4_csum_dis = 0; |
1875 | hdr->ip_hdr_words = skb->nh.iph->ihl; | 1874 | hdr->ip_hdr_words = ip_hdr(skb)->ihl; |
1876 | hdr->tcp_hdr_words = skb->h.th->doff; | 1875 | hdr->tcp_hdr_words = tcp_hdr(skb)->doff; |
1877 | hdr->eth_type_mss = htons(MK_ETH_TYPE_MSS(eth_type, | 1876 | hdr->eth_type_mss = htons(MK_ETH_TYPE_MSS(eth_type, |
1878 | skb_shinfo(skb)->gso_size)); | 1877 | skb_shinfo(skb)->gso_size)); |
1879 | hdr->len = htonl(skb->len - sizeof(*hdr)); | 1878 | hdr->len = htonl(skb->len - sizeof(*hdr)); |
@@ -1913,7 +1912,7 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1913 | 1912 | ||
1914 | if (!(adapter->flags & UDP_CSUM_CAPABLE) && | 1913 | if (!(adapter->flags & UDP_CSUM_CAPABLE) && |
1915 | skb->ip_summed == CHECKSUM_PARTIAL && | 1914 | skb->ip_summed == CHECKSUM_PARTIAL && |
1916 | skb->nh.iph->protocol == IPPROTO_UDP) { | 1915 | ip_hdr(skb)->protocol == IPPROTO_UDP) { |
1917 | if (unlikely(skb_checksum_help(skb))) { | 1916 | if (unlikely(skb_checksum_help(skb))) { |
1918 | pr_debug("%s: unable to do udp checksum\n", dev->name); | 1917 | pr_debug("%s: unable to do udp checksum\n", dev->name); |
1919 | dev_kfree_skb_any(skb); | 1918 | dev_kfree_skb_any(skb); |
@@ -1926,7 +1925,7 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1926 | */ | 1925 | */ |
1927 | if ((unlikely(!adapter->sge->espibug_skb[dev->if_port]))) { | 1926 | if ((unlikely(!adapter->sge->espibug_skb[dev->if_port]))) { |
1928 | if (skb->protocol == htons(ETH_P_ARP) && | 1927 | if (skb->protocol == htons(ETH_P_ARP) && |
1929 | skb->nh.arph->ar_op == htons(ARPOP_REQUEST)) { | 1928 | arp_hdr(skb)->ar_op == htons(ARPOP_REQUEST)) { |
1930 | adapter->sge->espibug_skb[dev->if_port] = skb; | 1929 | adapter->sge->espibug_skb[dev->if_port] = skb; |
1931 | /* We want to re-use this skb later. We | 1930 | /* We want to re-use this skb later. We |
1932 | * simply bump the reference count and it | 1931 | * simply bump the reference count and it |
@@ -2096,10 +2095,14 @@ static void espibug_workaround_t204(unsigned long data) | |||
2096 | 0x0, 0x7, 0x43, 0x0, 0x0, 0x0 | 2095 | 0x0, 0x7, 0x43, 0x0, 0x0, 0x0 |
2097 | }; | 2096 | }; |
2098 | 2097 | ||
2099 | memcpy(skb->data + sizeof(struct cpl_tx_pkt), | 2098 | skb_copy_to_linear_data_offset(skb, |
2100 | ch_mac_addr, ETH_ALEN); | 2099 | sizeof(struct cpl_tx_pkt), |
2101 | memcpy(skb->data + skb->len - 10, | 2100 | ch_mac_addr, |
2102 | ch_mac_addr, ETH_ALEN); | 2101 | ETH_ALEN); |
2102 | skb_copy_to_linear_data_offset(skb, | ||
2103 | skb->len - 10, | ||
2104 | ch_mac_addr, | ||
2105 | ETH_ALEN); | ||
2103 | skb->cb[0] = 0xff; | 2106 | skb->cb[0] = 0xff; |
2104 | } | 2107 | } |
2105 | 2108 | ||
@@ -2126,10 +2129,14 @@ static void espibug_workaround(unsigned long data) | |||
2126 | if (!skb->cb[0]) { | 2129 | if (!skb->cb[0]) { |
2127 | u8 ch_mac_addr[ETH_ALEN] = | 2130 | u8 ch_mac_addr[ETH_ALEN] = |
2128 | {0x0, 0x7, 0x43, 0x0, 0x0, 0x0}; | 2131 | {0x0, 0x7, 0x43, 0x0, 0x0, 0x0}; |
2129 | memcpy(skb->data + sizeof(struct cpl_tx_pkt), | 2132 | skb_copy_to_linear_data_offset(skb, |
2130 | ch_mac_addr, ETH_ALEN); | 2133 | sizeof(struct cpl_tx_pkt), |
2131 | memcpy(skb->data + skb->len - 10, ch_mac_addr, | 2134 | ch_mac_addr, |
2132 | ETH_ALEN); | 2135 | ETH_ALEN); |
2136 | skb_copy_to_linear_data_offset(skb, | ||
2137 | skb->len - 10, | ||
2138 | ch_mac_addr, | ||
2139 | ETH_ALEN); | ||
2133 | skb->cb[0] = 0xff; | 2140 | skb->cb[0] = 0xff; |
2134 | } | 2141 | } |
2135 | 2142 | ||
diff --git a/drivers/net/cris/eth_v10.c b/drivers/net/cris/eth_v10.c index 8eb571276000..5bdf5ca85a65 100644 --- a/drivers/net/cris/eth_v10.c +++ b/drivers/net/cris/eth_v10.c | |||
@@ -1348,7 +1348,8 @@ e100_rx(struct net_device *dev) | |||
1348 | 1348 | ||
1349 | #ifdef ETHDEBUG | 1349 | #ifdef ETHDEBUG |
1350 | printk("head = 0x%x, data = 0x%x, tail = 0x%x, end = 0x%x\n", | 1350 | printk("head = 0x%x, data = 0x%x, tail = 0x%x, end = 0x%x\n", |
1351 | skb->head, skb->data, skb->tail, skb->end); | 1351 | skb->head, skb->data, skb_tail_pointer(skb), |
1352 | skb_end_pointer(skb)); | ||
1352 | printk("copying packet to 0x%x.\n", skb_data_ptr); | 1353 | printk("copying packet to 0x%x.\n", skb_data_ptr); |
1353 | #endif | 1354 | #endif |
1354 | 1355 | ||
@@ -1375,7 +1376,6 @@ e100_rx(struct net_device *dev) | |||
1375 | myNextRxDesc->descr.buf = L1_CACHE_ALIGN(virt_to_phys(myNextRxDesc->skb->data)); | 1376 | myNextRxDesc->descr.buf = L1_CACHE_ALIGN(virt_to_phys(myNextRxDesc->skb->data)); |
1376 | } | 1377 | } |
1377 | 1378 | ||
1378 | skb->dev = dev; | ||
1379 | skb->protocol = eth_type_trans(skb, dev); | 1379 | skb->protocol = eth_type_trans(skb, dev); |
1380 | 1380 | ||
1381 | /* Send the packet to the upper layers */ | 1381 | /* Send the packet to the upper layers */ |
diff --git a/drivers/net/cs89x0.c b/drivers/net/cs89x0.c index 4612f71a7106..9774bb1b3e80 100644 --- a/drivers/net/cs89x0.c +++ b/drivers/net/cs89x0.c | |||
@@ -1004,7 +1004,6 @@ skip_this_frame: | |||
1004 | return; | 1004 | return; |
1005 | } | 1005 | } |
1006 | skb_reserve(skb, 2); /* longword align L3 header */ | 1006 | skb_reserve(skb, 2); /* longword align L3 header */ |
1007 | skb->dev = dev; | ||
1008 | 1007 | ||
1009 | if (bp + length > lp->end_dma_buff) { | 1008 | if (bp + length > lp->end_dma_buff) { |
1010 | int semi_cnt = lp->end_dma_buff - bp; | 1009 | int semi_cnt = lp->end_dma_buff - bp; |
@@ -1702,7 +1701,6 @@ net_rx(struct net_device *dev) | |||
1702 | return; | 1701 | return; |
1703 | } | 1702 | } |
1704 | skb_reserve(skb, 2); /* longword align L3 header */ | 1703 | skb_reserve(skb, 2); /* longword align L3 header */ |
1705 | skb->dev = dev; | ||
1706 | 1704 | ||
1707 | readwords(ioaddr, RX_FRAME_PORT, skb_put(skb, length), length >> 1); | 1705 | readwords(ioaddr, RX_FRAME_PORT, skb_put(skb, length), length >> 1); |
1708 | if (length & 1) | 1706 | if (length & 1) |
diff --git a/drivers/net/cxgb3/cxgb3_defs.h b/drivers/net/cxgb3/cxgb3_defs.h index e14862b43d17..483a594210a7 100644 --- a/drivers/net/cxgb3/cxgb3_defs.h +++ b/drivers/net/cxgb3/cxgb3_defs.h | |||
@@ -67,7 +67,10 @@ static inline union listen_entry *stid2entry(const struct tid_info *t, | |||
67 | static inline struct t3c_tid_entry *lookup_tid(const struct tid_info *t, | 67 | static inline struct t3c_tid_entry *lookup_tid(const struct tid_info *t, |
68 | unsigned int tid) | 68 | unsigned int tid) |
69 | { | 69 | { |
70 | return tid < t->ntids ? &(t->tid_tab[tid]) : NULL; | 70 | struct t3c_tid_entry *t3c_tid = tid < t->ntids ? |
71 | &(t->tid_tab[tid]) : NULL; | ||
72 | |||
73 | return (t3c_tid && t3c_tid->client) ? t3c_tid : NULL; | ||
71 | } | 74 | } |
72 | 75 | ||
73 | /* | 76 | /* |
diff --git a/drivers/net/cxgb3/cxgb3_offload.c b/drivers/net/cxgb3/cxgb3_offload.c index 48649244673e..ebcf35e4cf5b 100644 --- a/drivers/net/cxgb3/cxgb3_offload.c +++ b/drivers/net/cxgb3/cxgb3_offload.c | |||
@@ -508,6 +508,7 @@ void cxgb3_queue_tid_release(struct t3cdev *tdev, unsigned int tid) | |||
508 | 508 | ||
509 | spin_lock_bh(&td->tid_release_lock); | 509 | spin_lock_bh(&td->tid_release_lock); |
510 | p->ctx = (void *)td->tid_release_list; | 510 | p->ctx = (void *)td->tid_release_list; |
511 | p->client = NULL; | ||
511 | td->tid_release_list = p; | 512 | td->tid_release_list = p; |
512 | if (!p->ctx) | 513 | if (!p->ctx) |
513 | schedule_work(&td->tid_release_task); | 514 | schedule_work(&td->tid_release_task); |
@@ -623,7 +624,8 @@ static int do_act_open_rpl(struct t3cdev *dev, struct sk_buff *skb) | |||
623 | struct t3c_tid_entry *t3c_tid; | 624 | struct t3c_tid_entry *t3c_tid; |
624 | 625 | ||
625 | t3c_tid = lookup_atid(&(T3C_DATA(dev))->tid_maps, atid); | 626 | t3c_tid = lookup_atid(&(T3C_DATA(dev))->tid_maps, atid); |
626 | if (t3c_tid->ctx && t3c_tid->client && t3c_tid->client->handlers && | 627 | if (t3c_tid && t3c_tid->ctx && t3c_tid->client && |
628 | t3c_tid->client->handlers && | ||
627 | t3c_tid->client->handlers[CPL_ACT_OPEN_RPL]) { | 629 | t3c_tid->client->handlers[CPL_ACT_OPEN_RPL]) { |
628 | return t3c_tid->client->handlers[CPL_ACT_OPEN_RPL] (dev, skb, | 630 | return t3c_tid->client->handlers[CPL_ACT_OPEN_RPL] (dev, skb, |
629 | t3c_tid-> | 631 | t3c_tid-> |
@@ -642,7 +644,7 @@ static int do_stid_rpl(struct t3cdev *dev, struct sk_buff *skb) | |||
642 | struct t3c_tid_entry *t3c_tid; | 644 | struct t3c_tid_entry *t3c_tid; |
643 | 645 | ||
644 | t3c_tid = lookup_stid(&(T3C_DATA(dev))->tid_maps, stid); | 646 | t3c_tid = lookup_stid(&(T3C_DATA(dev))->tid_maps, stid); |
645 | if (t3c_tid->ctx && t3c_tid->client->handlers && | 647 | if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers && |
646 | t3c_tid->client->handlers[p->opcode]) { | 648 | t3c_tid->client->handlers[p->opcode]) { |
647 | return t3c_tid->client->handlers[p->opcode] (dev, skb, | 649 | return t3c_tid->client->handlers[p->opcode] (dev, skb, |
648 | t3c_tid->ctx); | 650 | t3c_tid->ctx); |
@@ -660,7 +662,7 @@ static int do_hwtid_rpl(struct t3cdev *dev, struct sk_buff *skb) | |||
660 | struct t3c_tid_entry *t3c_tid; | 662 | struct t3c_tid_entry *t3c_tid; |
661 | 663 | ||
662 | t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid); | 664 | t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid); |
663 | if (t3c_tid->ctx && t3c_tid->client->handlers && | 665 | if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers && |
664 | t3c_tid->client->handlers[p->opcode]) { | 666 | t3c_tid->client->handlers[p->opcode]) { |
665 | return t3c_tid->client->handlers[p->opcode] | 667 | return t3c_tid->client->handlers[p->opcode] |
666 | (dev, skb, t3c_tid->ctx); | 668 | (dev, skb, t3c_tid->ctx); |
@@ -689,6 +691,28 @@ static int do_cr(struct t3cdev *dev, struct sk_buff *skb) | |||
689 | } | 691 | } |
690 | } | 692 | } |
691 | 693 | ||
694 | /* | ||
695 | * Returns an sk_buff for a reply CPL message of size len. If the input | ||
696 | * sk_buff has no other users it is trimmed and reused, otherwise a new buffer | ||
697 | * is allocated. The input skb must be of size at least len. Note that this | ||
698 | * operation does not destroy the original skb data even if it decides to reuse | ||
699 | * the buffer. | ||
700 | */ | ||
701 | static struct sk_buff *cxgb3_get_cpl_reply_skb(struct sk_buff *skb, size_t len, | ||
702 | int gfp) | ||
703 | { | ||
704 | if (likely(!skb_cloned(skb))) { | ||
705 | BUG_ON(skb->len < len); | ||
706 | __skb_trim(skb, len); | ||
707 | skb_get(skb); | ||
708 | } else { | ||
709 | skb = alloc_skb(len, gfp); | ||
710 | if (skb) | ||
711 | __skb_put(skb, len); | ||
712 | } | ||
713 | return skb; | ||
714 | } | ||
715 | |||
692 | static int do_abort_req_rss(struct t3cdev *dev, struct sk_buff *skb) | 716 | static int do_abort_req_rss(struct t3cdev *dev, struct sk_buff *skb) |
693 | { | 717 | { |
694 | union opcode_tid *p = cplhdr(skb); | 718 | union opcode_tid *p = cplhdr(skb); |
@@ -696,30 +720,39 @@ static int do_abort_req_rss(struct t3cdev *dev, struct sk_buff *skb) | |||
696 | struct t3c_tid_entry *t3c_tid; | 720 | struct t3c_tid_entry *t3c_tid; |
697 | 721 | ||
698 | t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid); | 722 | t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid); |
699 | if (t3c_tid->ctx && t3c_tid->client->handlers && | 723 | if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers && |
700 | t3c_tid->client->handlers[p->opcode]) { | 724 | t3c_tid->client->handlers[p->opcode]) { |
701 | return t3c_tid->client->handlers[p->opcode] | 725 | return t3c_tid->client->handlers[p->opcode] |
702 | (dev, skb, t3c_tid->ctx); | 726 | (dev, skb, t3c_tid->ctx); |
703 | } else { | 727 | } else { |
704 | struct cpl_abort_req_rss *req = cplhdr(skb); | 728 | struct cpl_abort_req_rss *req = cplhdr(skb); |
705 | struct cpl_abort_rpl *rpl; | 729 | struct cpl_abort_rpl *rpl; |
730 | struct sk_buff *reply_skb; | ||
731 | unsigned int tid = GET_TID(req); | ||
732 | u8 cmd = req->status; | ||
733 | |||
734 | if (req->status == CPL_ERR_RTX_NEG_ADVICE || | ||
735 | req->status == CPL_ERR_PERSIST_NEG_ADVICE) | ||
736 | goto out; | ||
706 | 737 | ||
707 | struct sk_buff *skb = | 738 | reply_skb = cxgb3_get_cpl_reply_skb(skb, |
708 | alloc_skb(sizeof(struct cpl_abort_rpl), GFP_ATOMIC); | 739 | sizeof(struct |
709 | if (!skb) { | 740 | cpl_abort_rpl), |
741 | GFP_ATOMIC); | ||
742 | |||
743 | if (!reply_skb) { | ||
710 | printk("do_abort_req_rss: couldn't get skb!\n"); | 744 | printk("do_abort_req_rss: couldn't get skb!\n"); |
711 | goto out; | 745 | goto out; |
712 | } | 746 | } |
713 | skb->priority = CPL_PRIORITY_DATA; | 747 | reply_skb->priority = CPL_PRIORITY_DATA; |
714 | __skb_put(skb, sizeof(struct cpl_abort_rpl)); | 748 | __skb_put(reply_skb, sizeof(struct cpl_abort_rpl)); |
715 | rpl = cplhdr(skb); | 749 | rpl = cplhdr(reply_skb); |
716 | rpl->wr.wr_hi = | 750 | rpl->wr.wr_hi = |
717 | htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL)); | 751 | htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL)); |
718 | rpl->wr.wr_lo = htonl(V_WR_TID(GET_TID(req))); | 752 | rpl->wr.wr_lo = htonl(V_WR_TID(tid)); |
719 | OPCODE_TID(rpl) = | 753 | OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, tid)); |
720 | htonl(MK_OPCODE_TID(CPL_ABORT_RPL, GET_TID(req))); | 754 | rpl->cmd = cmd; |
721 | rpl->cmd = req->status; | 755 | cxgb3_ofld_send(dev, reply_skb); |
722 | cxgb3_ofld_send(dev, skb); | ||
723 | out: | 756 | out: |
724 | return CPL_RET_BUF_DONE; | 757 | return CPL_RET_BUF_DONE; |
725 | } | 758 | } |
@@ -732,7 +765,7 @@ static int do_act_establish(struct t3cdev *dev, struct sk_buff *skb) | |||
732 | struct t3c_tid_entry *t3c_tid; | 765 | struct t3c_tid_entry *t3c_tid; |
733 | 766 | ||
734 | t3c_tid = lookup_atid(&(T3C_DATA(dev))->tid_maps, atid); | 767 | t3c_tid = lookup_atid(&(T3C_DATA(dev))->tid_maps, atid); |
735 | if (t3c_tid->ctx && t3c_tid->client->handlers && | 768 | if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers && |
736 | t3c_tid->client->handlers[CPL_ACT_ESTABLISH]) { | 769 | t3c_tid->client->handlers[CPL_ACT_ESTABLISH]) { |
737 | return t3c_tid->client->handlers[CPL_ACT_ESTABLISH] | 770 | return t3c_tid->client->handlers[CPL_ACT_ESTABLISH] |
738 | (dev, skb, t3c_tid->ctx); | 771 | (dev, skb, t3c_tid->ctx); |
@@ -750,7 +783,7 @@ static int do_trace(struct t3cdev *dev, struct sk_buff *skb) | |||
750 | skb->protocol = htons(0xffff); | 783 | skb->protocol = htons(0xffff); |
751 | skb->dev = dev->lldev; | 784 | skb->dev = dev->lldev; |
752 | skb_pull(skb, sizeof(*p)); | 785 | skb_pull(skb, sizeof(*p)); |
753 | skb->mac.raw = skb->data; | 786 | skb_reset_mac_header(skb); |
754 | netif_receive_skb(skb); | 787 | netif_receive_skb(skb); |
755 | return 0; | 788 | return 0; |
756 | } | 789 | } |
@@ -762,7 +795,7 @@ static int do_term(struct t3cdev *dev, struct sk_buff *skb) | |||
762 | struct t3c_tid_entry *t3c_tid; | 795 | struct t3c_tid_entry *t3c_tid; |
763 | 796 | ||
764 | t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid); | 797 | t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid); |
765 | if (t3c_tid->ctx && t3c_tid->client->handlers && | 798 | if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers && |
766 | t3c_tid->client->handlers[opcode]) { | 799 | t3c_tid->client->handlers[opcode]) { |
767 | return t3c_tid->client->handlers[opcode] (dev, skb, | 800 | return t3c_tid->client->handlers[opcode] (dev, skb, |
768 | t3c_tid->ctx); | 801 | t3c_tid->ctx); |
@@ -961,7 +994,7 @@ void cxgb_redirect(struct dst_entry *old, struct dst_entry *new) | |||
961 | for (tid = 0; tid < ti->ntids; tid++) { | 994 | for (tid = 0; tid < ti->ntids; tid++) { |
962 | te = lookup_tid(ti, tid); | 995 | te = lookup_tid(ti, tid); |
963 | BUG_ON(!te); | 996 | BUG_ON(!te); |
964 | if (te->ctx && te->client && te->client->redirect) { | 997 | if (te && te->ctx && te->client && te->client->redirect) { |
965 | update_tcb = te->client->redirect(te->ctx, old, new, e); | 998 | update_tcb = te->client->redirect(te->ctx, old, new, e); |
966 | if (update_tcb) { | 999 | if (update_tcb) { |
967 | l2t_hold(L2DATA(tdev), e); | 1000 | l2t_hold(L2DATA(tdev), e); |
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c index 027ab2c3825c..3666586a4831 100644 --- a/drivers/net/cxgb3/sge.c +++ b/drivers/net/cxgb3/sge.c | |||
@@ -661,7 +661,7 @@ static inline struct sk_buff *get_imm_packet(const struct rsp_desc *resp) | |||
661 | 661 | ||
662 | if (skb) { | 662 | if (skb) { |
663 | __skb_put(skb, IMMED_PKT_SIZE); | 663 | __skb_put(skb, IMMED_PKT_SIZE); |
664 | memcpy(skb->data, resp->imm_data, IMMED_PKT_SIZE); | 664 | skb_copy_to_linear_data(skb, resp->imm_data, IMMED_PKT_SIZE); |
665 | } | 665 | } |
666 | return skb; | 666 | return skb; |
667 | } | 667 | } |
@@ -897,11 +897,11 @@ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb, | |||
897 | d->flit[2] = 0; | 897 | d->flit[2] = 0; |
898 | cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT_LSO); | 898 | cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT_LSO); |
899 | hdr->cntrl = htonl(cntrl); | 899 | hdr->cntrl = htonl(cntrl); |
900 | eth_type = skb->nh.raw - skb->data == ETH_HLEN ? | 900 | eth_type = skb_network_offset(skb) == ETH_HLEN ? |
901 | CPL_ETH_II : CPL_ETH_II_VLAN; | 901 | CPL_ETH_II : CPL_ETH_II_VLAN; |
902 | tso_info |= V_LSO_ETH_TYPE(eth_type) | | 902 | tso_info |= V_LSO_ETH_TYPE(eth_type) | |
903 | V_LSO_IPHDR_WORDS(skb->nh.iph->ihl) | | 903 | V_LSO_IPHDR_WORDS(ip_hdr(skb)->ihl) | |
904 | V_LSO_TCPHDR_WORDS(skb->h.th->doff); | 904 | V_LSO_TCPHDR_WORDS(tcp_hdr(skb)->doff); |
905 | hdr->lso_info = htonl(tso_info); | 905 | hdr->lso_info = htonl(tso_info); |
906 | flits = 3; | 906 | flits = 3; |
907 | } else { | 907 | } else { |
@@ -913,7 +913,8 @@ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb, | |||
913 | if (skb->len <= WR_LEN - sizeof(*cpl)) { | 913 | if (skb->len <= WR_LEN - sizeof(*cpl)) { |
914 | q->sdesc[pidx].skb = NULL; | 914 | q->sdesc[pidx].skb = NULL; |
915 | if (!skb->data_len) | 915 | if (!skb->data_len) |
916 | memcpy(&d->flit[2], skb->data, skb->len); | 916 | skb_copy_from_linear_data(skb, &d->flit[2], |
917 | skb->len); | ||
917 | else | 918 | else |
918 | skb_copy_bits(skb, 0, &d->flit[2], skb->len); | 919 | skb_copy_bits(skb, 0, &d->flit[2], skb->len); |
919 | 920 | ||
@@ -1319,16 +1320,19 @@ static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb, | |||
1319 | /* Only TX_DATA builds SGLs */ | 1320 | /* Only TX_DATA builds SGLs */ |
1320 | 1321 | ||
1321 | from = (struct work_request_hdr *)skb->data; | 1322 | from = (struct work_request_hdr *)skb->data; |
1322 | memcpy(&d->flit[1], &from[1], skb->h.raw - skb->data - sizeof(*from)); | 1323 | memcpy(&d->flit[1], &from[1], |
1324 | skb_transport_offset(skb) - sizeof(*from)); | ||
1323 | 1325 | ||
1324 | flits = (skb->h.raw - skb->data) / 8; | 1326 | flits = skb_transport_offset(skb) / 8; |
1325 | sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl; | 1327 | sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl; |
1326 | sgl_flits = make_sgl(skb, sgp, skb->h.raw, skb->tail - skb->h.raw, | 1328 | sgl_flits = make_sgl(skb, sgp, skb_transport_header(skb), |
1329 | skb->tail - skb->transport_header, | ||
1327 | adap->pdev); | 1330 | adap->pdev); |
1328 | if (need_skb_unmap()) { | 1331 | if (need_skb_unmap()) { |
1329 | setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits); | 1332 | setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits); |
1330 | skb->destructor = deferred_unmap_destructor; | 1333 | skb->destructor = deferred_unmap_destructor; |
1331 | ((struct unmap_info *)skb->cb)->len = skb->tail - skb->h.raw; | 1334 | ((struct unmap_info *)skb->cb)->len = (skb->tail - |
1335 | skb->transport_header); | ||
1332 | } | 1336 | } |
1333 | 1337 | ||
1334 | write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, | 1338 | write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, |
@@ -1349,8 +1353,8 @@ static inline unsigned int calc_tx_descs_ofld(const struct sk_buff *skb) | |||
1349 | if (skb->len <= WR_LEN && cnt == 0) | 1353 | if (skb->len <= WR_LEN && cnt == 0) |
1350 | return 1; /* packet fits as immediate data */ | 1354 | return 1; /* packet fits as immediate data */ |
1351 | 1355 | ||
1352 | flits = (skb->h.raw - skb->data) / 8; /* headers */ | 1356 | flits = skb_transport_offset(skb) / 8; /* headers */ |
1353 | if (skb->tail != skb->h.raw) | 1357 | if (skb->tail != skb->transport_header) |
1354 | cnt++; | 1358 | cnt++; |
1355 | return flits_to_desc(flits + sgl_len(cnt)); | 1359 | return flits_to_desc(flits + sgl_len(cnt)); |
1356 | } | 1360 | } |
@@ -1620,7 +1624,9 @@ static inline int rx_offload(struct t3cdev *tdev, struct sge_rspq *rq, | |||
1620 | unsigned int gather_idx) | 1624 | unsigned int gather_idx) |
1621 | { | 1625 | { |
1622 | rq->offload_pkts++; | 1626 | rq->offload_pkts++; |
1623 | skb->mac.raw = skb->nh.raw = skb->h.raw = skb->data; | 1627 | skb_reset_mac_header(skb); |
1628 | skb_reset_network_header(skb); | ||
1629 | skb_reset_transport_header(skb); | ||
1624 | 1630 | ||
1625 | if (rq->polling) { | 1631 | if (rq->polling) { |
1626 | rx_gather[gather_idx++] = skb; | 1632 | rx_gather[gather_idx++] = skb; |
@@ -1684,9 +1690,8 @@ static void rx_eth(struct adapter *adap, struct sge_rspq *rq, | |||
1684 | struct port_info *pi; | 1690 | struct port_info *pi; |
1685 | 1691 | ||
1686 | skb_pull(skb, sizeof(*p) + pad); | 1692 | skb_pull(skb, sizeof(*p) + pad); |
1687 | skb->dev = adap->port[p->iff]; | ||
1688 | skb->dev->last_rx = jiffies; | 1693 | skb->dev->last_rx = jiffies; |
1689 | skb->protocol = eth_type_trans(skb, skb->dev); | 1694 | skb->protocol = eth_type_trans(skb, adap->port[p->iff]); |
1690 | pi = netdev_priv(skb->dev); | 1695 | pi = netdev_priv(skb->dev); |
1691 | if (pi->rx_csum_offload && p->csum_valid && p->csum == 0xffff && | 1696 | if (pi->rx_csum_offload && p->csum_valid && p->csum == 0xffff && |
1692 | !p->fragment) { | 1697 | !p->fragment) { |
@@ -1717,11 +1722,11 @@ static void skb_data_init(struct sk_buff *skb, struct sge_fl_page *p, | |||
1717 | { | 1722 | { |
1718 | skb->len = len; | 1723 | skb->len = len; |
1719 | if (len <= SKB_DATA_SIZE) { | 1724 | if (len <= SKB_DATA_SIZE) { |
1720 | memcpy(skb->data, p->va, len); | 1725 | skb_copy_to_linear_data(skb, p->va, len); |
1721 | skb->tail += len; | 1726 | skb->tail += len; |
1722 | put_page(p->frag.page); | 1727 | put_page(p->frag.page); |
1723 | } else { | 1728 | } else { |
1724 | memcpy(skb->data, p->va, SKB_DATA_SIZE); | 1729 | skb_copy_to_linear_data(skb, p->va, SKB_DATA_SIZE); |
1725 | skb_shinfo(skb)->frags[0].page = p->frag.page; | 1730 | skb_shinfo(skb)->frags[0].page = p->frag.page; |
1726 | skb_shinfo(skb)->frags[0].page_offset = | 1731 | skb_shinfo(skb)->frags[0].page_offset = |
1727 | p->frag.page_offset + SKB_DATA_SIZE; | 1732 | p->frag.page_offset + SKB_DATA_SIZE; |
@@ -1767,7 +1772,7 @@ static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl, | |||
1767 | __skb_put(skb, len); | 1772 | __skb_put(skb, len); |
1768 | pci_dma_sync_single_for_cpu(adap->pdev, mapping, len, | 1773 | pci_dma_sync_single_for_cpu(adap->pdev, mapping, len, |
1769 | PCI_DMA_FROMDEVICE); | 1774 | PCI_DMA_FROMDEVICE); |
1770 | memcpy(skb->data, sd->t.skb->data, len); | 1775 | skb_copy_from_linear_data(sd->t.skb, skb->data, len); |
1771 | pci_dma_sync_single_for_device(adap->pdev, mapping, len, | 1776 | pci_dma_sync_single_for_device(adap->pdev, mapping, len, |
1772 | PCI_DMA_FROMDEVICE); | 1777 | PCI_DMA_FROMDEVICE); |
1773 | } else if (!drop_thres) | 1778 | } else if (!drop_thres) |
diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c index d83f075ef2d7..fb485d0a43d8 100644 --- a/drivers/net/cxgb3/t3_hw.c +++ b/drivers/net/cxgb3/t3_hw.c | |||
@@ -1523,19 +1523,25 @@ static int mac_intr_handler(struct adapter *adap, unsigned int idx) | |||
1523 | */ | 1523 | */ |
1524 | int t3_phy_intr_handler(struct adapter *adapter) | 1524 | int t3_phy_intr_handler(struct adapter *adapter) |
1525 | { | 1525 | { |
1526 | static const int intr_gpio_bits[] = { 8, 0x20 }; | 1526 | u32 mask, gpi = adapter_info(adapter)->gpio_intr; |
1527 | |||
1528 | u32 i, cause = t3_read_reg(adapter, A_T3DBG_INT_CAUSE); | 1527 | u32 i, cause = t3_read_reg(adapter, A_T3DBG_INT_CAUSE); |
1529 | 1528 | ||
1530 | for_each_port(adapter, i) { | 1529 | for_each_port(adapter, i) { |
1531 | if (cause & intr_gpio_bits[i]) { | 1530 | struct port_info *p = adap2pinfo(adapter, i); |
1532 | struct cphy *phy = &adap2pinfo(adapter, i)->phy; | 1531 | |
1533 | int phy_cause = phy->ops->intr_handler(phy); | 1532 | mask = gpi - (gpi & (gpi - 1)); |
1533 | gpi -= mask; | ||
1534 | |||
1535 | if (!(p->port_type->caps & SUPPORTED_IRQ)) | ||
1536 | continue; | ||
1537 | |||
1538 | if (cause & mask) { | ||
1539 | int phy_cause = p->phy.ops->intr_handler(&p->phy); | ||
1534 | 1540 | ||
1535 | if (phy_cause & cphy_cause_link_change) | 1541 | if (phy_cause & cphy_cause_link_change) |
1536 | t3_link_changed(adapter, i); | 1542 | t3_link_changed(adapter, i); |
1537 | if (phy_cause & cphy_cause_fifo_error) | 1543 | if (phy_cause & cphy_cause_fifo_error) |
1538 | phy->fifo_errors++; | 1544 | p->phy.fifo_errors++; |
1539 | } | 1545 | } |
1540 | } | 1546 | } |
1541 | 1547 | ||
diff --git a/drivers/net/de600.c b/drivers/net/de600.c index e547ce14eefe..dae97b860daa 100644 --- a/drivers/net/de600.c +++ b/drivers/net/de600.c | |||
@@ -359,7 +359,6 @@ static void de600_rx_intr(struct net_device *dev) | |||
359 | } | 359 | } |
360 | /* else */ | 360 | /* else */ |
361 | 361 | ||
362 | skb->dev = dev; | ||
363 | skb_reserve(skb,2); /* Align */ | 362 | skb_reserve(skb,2); /* Align */ |
364 | 363 | ||
365 | /* 'skb->data' points to the start of sk_buff data area. */ | 364 | /* 'skb->data' points to the start of sk_buff data area. */ |
diff --git a/drivers/net/de620.c b/drivers/net/de620.c index b6ad0cb50552..dc4892426174 100644 --- a/drivers/net/de620.c +++ b/drivers/net/de620.c | |||
@@ -697,7 +697,6 @@ static int de620_rx_intr(struct net_device *dev) | |||
697 | } | 697 | } |
698 | else { /* Yep! Go get it! */ | 698 | else { /* Yep! Go get it! */ |
699 | skb_reserve(skb,2); /* Align */ | 699 | skb_reserve(skb,2); /* Align */ |
700 | skb->dev = dev; | ||
701 | /* skb->data points to the start of sk_buff data area */ | 700 | /* skb->data points to the start of sk_buff data area */ |
702 | buffer = skb_put(skb,size); | 701 | buffer = skb_put(skb,size); |
703 | /* copy the packet into the buffer */ | 702 | /* copy the packet into the buffer */ |
diff --git a/drivers/net/declance.c b/drivers/net/declance.c index 9f7e1db8ce62..95d854e2295c 100644 --- a/drivers/net/declance.c +++ b/drivers/net/declance.c | |||
@@ -616,7 +616,6 @@ static int lance_rx(struct net_device *dev) | |||
616 | } | 616 | } |
617 | lp->stats.rx_bytes += len; | 617 | lp->stats.rx_bytes += len; |
618 | 618 | ||
619 | skb->dev = dev; | ||
620 | skb_reserve(skb, 2); /* 16 byte align */ | 619 | skb_reserve(skb, 2); /* 16 byte align */ |
621 | skb_put(skb, len); /* make room */ | 620 | skb_put(skb, len); /* make room */ |
622 | 621 | ||
diff --git a/drivers/net/defxx.c b/drivers/net/defxx.c index 07d2731c1aa8..571d82f8008c 100644 --- a/drivers/net/defxx.c +++ b/drivers/net/defxx.c | |||
@@ -3091,13 +3091,13 @@ static void dfx_rcv_queue_process( | |||
3091 | { | 3091 | { |
3092 | /* Receive buffer allocated, pass receive packet up */ | 3092 | /* Receive buffer allocated, pass receive packet up */ |
3093 | 3093 | ||
3094 | memcpy(skb->data, p_buff + RCV_BUFF_K_PADDING, pkt_len+3); | 3094 | skb_copy_to_linear_data(skb, |
3095 | p_buff + RCV_BUFF_K_PADDING, | ||
3096 | pkt_len + 3); | ||
3095 | } | 3097 | } |
3096 | 3098 | ||
3097 | skb_reserve(skb,3); /* adjust data field so that it points to FC byte */ | 3099 | skb_reserve(skb,3); /* adjust data field so that it points to FC byte */ |
3098 | skb_put(skb, pkt_len); /* pass up packet length, NOT including CRC */ | 3100 | skb_put(skb, pkt_len); /* pass up packet length, NOT including CRC */ |
3099 | skb->dev = bp->dev; /* pass up device pointer */ | ||
3100 | |||
3101 | skb->protocol = fddi_type_trans(skb, bp->dev); | 3101 | skb->protocol = fddi_type_trans(skb, bp->dev); |
3102 | bp->rcv_total_bytes += skb->len; | 3102 | bp->rcv_total_bytes += skb->len; |
3103 | netif_rx(skb); | 3103 | netif_rx(skb); |
diff --git a/drivers/net/depca.c b/drivers/net/depca.c index 5113eef755b9..183497020bfc 100644 --- a/drivers/net/depca.c +++ b/drivers/net/depca.c | |||
@@ -1044,7 +1044,6 @@ static int depca_rx(struct net_device *dev) | |||
1044 | unsigned char *buf; | 1044 | unsigned char *buf; |
1045 | skb_reserve(skb, 2); /* 16 byte align the IP header */ | 1045 | skb_reserve(skb, 2); /* 16 byte align the IP header */ |
1046 | buf = skb_put(skb, pkt_len); | 1046 | buf = skb_put(skb, pkt_len); |
1047 | skb->dev = dev; | ||
1048 | if (entry < lp->rx_old) { /* Wrapped buffer */ | 1047 | if (entry < lp->rx_old) { /* Wrapped buffer */ |
1049 | len = (lp->rxRingMask - lp->rx_old + 1) * RX_BUFF_SZ; | 1048 | len = (lp->rxRingMask - lp->rx_old + 1) * RX_BUFF_SZ; |
1050 | memcpy_fromio(buf, lp->rx_buff[lp->rx_old], len); | 1049 | memcpy_fromio(buf, lp->rx_buff[lp->rx_old], len); |
@@ -1491,8 +1490,9 @@ static void __init depca_platform_probe (void) | |||
1491 | depca_io_ports[i].device = pldev; | 1490 | depca_io_ports[i].device = pldev; |
1492 | 1491 | ||
1493 | if (platform_device_add(pldev)) { | 1492 | if (platform_device_add(pldev)) { |
1494 | platform_device_put(pldev); | ||
1495 | depca_io_ports[i].device = NULL; | 1493 | depca_io_ports[i].device = NULL; |
1494 | pldev->dev.platform_data = NULL; | ||
1495 | platform_device_put(pldev); | ||
1496 | continue; | 1496 | continue; |
1497 | } | 1497 | } |
1498 | 1498 | ||
diff --git a/drivers/net/dgrs.c b/drivers/net/dgrs.c index a79520295fd0..df62c0232f36 100644 --- a/drivers/net/dgrs.c +++ b/drivers/net/dgrs.c | |||
@@ -503,7 +503,6 @@ dgrs_rcv_frame( | |||
503 | /* discarding the frame */ | 503 | /* discarding the frame */ |
504 | goto out; | 504 | goto out; |
505 | } | 505 | } |
506 | skb->dev = devN; | ||
507 | skb_reserve(skb, 2); /* Align IP header */ | 506 | skb_reserve(skb, 2); /* Align IP header */ |
508 | 507 | ||
509 | again: | 508 | again: |
@@ -742,7 +741,7 @@ static int dgrs_start_xmit(struct sk_buff *skb, struct net_device *devN) | |||
742 | } | 741 | } |
743 | 742 | ||
744 | amt = min_t(unsigned int, len, rbdp->size - count); | 743 | amt = min_t(unsigned int, len, rbdp->size - count); |
745 | memcpy( (char *) S2H(rbdp->buf) + count, skb->data + i, amt); | 744 | skb_copy_from_linear_data_offset(skb, i, S2H(rbdp->buf) + count, amt); |
746 | i += amt; | 745 | i += amt; |
747 | count += amt; | 746 | count += amt; |
748 | len -= amt; | 747 | len -= amt; |
diff --git a/drivers/net/dl2k.c b/drivers/net/dl2k.c index 9d446a0fe0bf..74ec64a1625d 100644 --- a/drivers/net/dl2k.c +++ b/drivers/net/dl2k.c | |||
@@ -504,7 +504,6 @@ rio_timer (unsigned long data) | |||
504 | break; | 504 | break; |
505 | } | 505 | } |
506 | np->rx_skbuff[entry] = skb; | 506 | np->rx_skbuff[entry] = skb; |
507 | skb->dev = dev; | ||
508 | /* 16 byte align the IP header */ | 507 | /* 16 byte align the IP header */ |
509 | skb_reserve (skb, 2); | 508 | skb_reserve (skb, 2); |
510 | np->rx_ring[entry].fraginfo = | 509 | np->rx_ring[entry].fraginfo = |
@@ -575,7 +574,6 @@ alloc_list (struct net_device *dev) | |||
575 | dev->name); | 574 | dev->name); |
576 | break; | 575 | break; |
577 | } | 576 | } |
578 | skb->dev = dev; /* Mark as being used by this device. */ | ||
579 | skb_reserve (skb, 2); /* 16 byte align the IP header. */ | 577 | skb_reserve (skb, 2); /* 16 byte align the IP header. */ |
580 | /* Rubicon now supports 40 bits of addressing space. */ | 578 | /* Rubicon now supports 40 bits of addressing space. */ |
581 | np->rx_ring[i].fraginfo = | 579 | np->rx_ring[i].fraginfo = |
@@ -866,7 +864,6 @@ receive_packet (struct net_device *dev) | |||
866 | DMA_48BIT_MASK, | 864 | DMA_48BIT_MASK, |
867 | np->rx_buf_sz, | 865 | np->rx_buf_sz, |
868 | PCI_DMA_FROMDEVICE); | 866 | PCI_DMA_FROMDEVICE); |
869 | skb->dev = dev; | ||
870 | /* 16 byte align the IP header */ | 867 | /* 16 byte align the IP header */ |
871 | skb_reserve (skb, 2); | 868 | skb_reserve (skb, 2); |
872 | eth_copy_and_sum (skb, | 869 | eth_copy_and_sum (skb, |
@@ -910,7 +907,6 @@ receive_packet (struct net_device *dev) | |||
910 | break; | 907 | break; |
911 | } | 908 | } |
912 | np->rx_skbuff[entry] = skb; | 909 | np->rx_skbuff[entry] = skb; |
913 | skb->dev = dev; | ||
914 | /* 16 byte align the IP header */ | 910 | /* 16 byte align the IP header */ |
915 | skb_reserve (skb, 2); | 911 | skb_reserve (skb, 2); |
916 | np->rx_ring[entry].fraginfo = | 912 | np->rx_ring[entry].fraginfo = |
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c index 615d2b14efa7..8cc1174e7f64 100644 --- a/drivers/net/dm9000.c +++ b/drivers/net/dm9000.c | |||
@@ -954,7 +954,6 @@ dm9000_rx(struct net_device *dev) | |||
954 | /* Move data from DM9000 */ | 954 | /* Move data from DM9000 */ |
955 | if (GoodPacket | 955 | if (GoodPacket |
956 | && ((skb = dev_alloc_skb(RxLen + 4)) != NULL)) { | 956 | && ((skb = dev_alloc_skb(RxLen + 4)) != NULL)) { |
957 | skb->dev = dev; | ||
958 | skb_reserve(skb, 2); | 957 | skb_reserve(skb, 2); |
959 | rdptr = (u8 *) skb_put(skb, RxLen - 4); | 958 | rdptr = (u8 *) skb_put(skb, RxLen - 4); |
960 | 959 | ||
diff --git a/drivers/net/e100.c b/drivers/net/e100.c index 0cefef5e3f06..4d0e0aea72bf 100644 --- a/drivers/net/e100.c +++ b/drivers/net/e100.c | |||
@@ -1769,7 +1769,7 @@ static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx) | |||
1769 | 1769 | ||
1770 | /* Align, init, and map the RFD. */ | 1770 | /* Align, init, and map the RFD. */ |
1771 | skb_reserve(rx->skb, NET_IP_ALIGN); | 1771 | skb_reserve(rx->skb, NET_IP_ALIGN); |
1772 | memcpy(rx->skb->data, &nic->blank_rfd, sizeof(struct rfd)); | 1772 | skb_copy_to_linear_data(rx->skb, &nic->blank_rfd, sizeof(struct rfd)); |
1773 | rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data, | 1773 | rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data, |
1774 | RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL); | 1774 | RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL); |
1775 | 1775 | ||
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c index 1d08e937af82..48e2ade704d3 100644 --- a/drivers/net/e1000/e1000_main.c +++ b/drivers/net/e1000/e1000_main.c | |||
@@ -2887,33 +2887,30 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, | |||
2887 | return err; | 2887 | return err; |
2888 | } | 2888 | } |
2889 | 2889 | ||
2890 | hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2)); | 2890 | hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); |
2891 | mss = skb_shinfo(skb)->gso_size; | 2891 | mss = skb_shinfo(skb)->gso_size; |
2892 | if (skb->protocol == htons(ETH_P_IP)) { | 2892 | if (skb->protocol == htons(ETH_P_IP)) { |
2893 | skb->nh.iph->tot_len = 0; | 2893 | struct iphdr *iph = ip_hdr(skb); |
2894 | skb->nh.iph->check = 0; | 2894 | iph->tot_len = 0; |
2895 | skb->h.th->check = | 2895 | iph->check = 0; |
2896 | ~csum_tcpudp_magic(skb->nh.iph->saddr, | 2896 | tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, |
2897 | skb->nh.iph->daddr, | 2897 | iph->daddr, 0, |
2898 | 0, | 2898 | IPPROTO_TCP, |
2899 | IPPROTO_TCP, | 2899 | 0); |
2900 | 0); | ||
2901 | cmd_length = E1000_TXD_CMD_IP; | 2900 | cmd_length = E1000_TXD_CMD_IP; |
2902 | ipcse = skb->h.raw - skb->data - 1; | 2901 | ipcse = skb_transport_offset(skb) - 1; |
2903 | } else if (skb->protocol == htons(ETH_P_IPV6)) { | 2902 | } else if (skb->protocol == htons(ETH_P_IPV6)) { |
2904 | skb->nh.ipv6h->payload_len = 0; | 2903 | ipv6_hdr(skb)->payload_len = 0; |
2905 | skb->h.th->check = | 2904 | tcp_hdr(skb)->check = |
2906 | ~csum_ipv6_magic(&skb->nh.ipv6h->saddr, | 2905 | ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, |
2907 | &skb->nh.ipv6h->daddr, | 2906 | &ipv6_hdr(skb)->daddr, |
2908 | 0, | 2907 | 0, IPPROTO_TCP, 0); |
2909 | IPPROTO_TCP, | ||
2910 | 0); | ||
2911 | ipcse = 0; | 2908 | ipcse = 0; |
2912 | } | 2909 | } |
2913 | ipcss = skb->nh.raw - skb->data; | 2910 | ipcss = skb_network_offset(skb); |
2914 | ipcso = (void *)&(skb->nh.iph->check) - (void *)skb->data; | 2911 | ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data; |
2915 | tucss = skb->h.raw - skb->data; | 2912 | tucss = skb_transport_offset(skb); |
2916 | tucso = (void *)&(skb->h.th->check) - (void *)skb->data; | 2913 | tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data; |
2917 | tucse = 0; | 2914 | tucse = 0; |
2918 | 2915 | ||
2919 | cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE | | 2916 | cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE | |
@@ -2954,7 +2951,7 @@ e1000_tx_csum(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, | |||
2954 | uint8_t css; | 2951 | uint8_t css; |
2955 | 2952 | ||
2956 | if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { | 2953 | if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { |
2957 | css = skb->h.raw - skb->data; | 2954 | css = skb_transport_offset(skb); |
2958 | 2955 | ||
2959 | i = tx_ring->next_to_use; | 2956 | i = tx_ring->next_to_use; |
2960 | buffer_info = &tx_ring->buffer_info[i]; | 2957 | buffer_info = &tx_ring->buffer_info[i]; |
@@ -2962,7 +2959,8 @@ e1000_tx_csum(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, | |||
2962 | 2959 | ||
2963 | context_desc->lower_setup.ip_config = 0; | 2960 | context_desc->lower_setup.ip_config = 0; |
2964 | context_desc->upper_setup.tcp_fields.tucss = css; | 2961 | context_desc->upper_setup.tcp_fields.tucss = css; |
2965 | context_desc->upper_setup.tcp_fields.tucso = css + skb->csum; | 2962 | context_desc->upper_setup.tcp_fields.tucso = |
2963 | css + skb->csum_offset; | ||
2966 | context_desc->upper_setup.tcp_fields.tucse = 0; | 2964 | context_desc->upper_setup.tcp_fields.tucse = 0; |
2967 | context_desc->tcp_seg_setup.data = 0; | 2965 | context_desc->tcp_seg_setup.data = 0; |
2968 | context_desc->cmd_and_length = cpu_to_le32(E1000_TXD_CMD_DEXT); | 2966 | context_desc->cmd_and_length = cpu_to_le32(E1000_TXD_CMD_DEXT); |
@@ -3296,7 +3294,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
3296 | /* TSO Workaround for 82571/2/3 Controllers -- if skb->data | 3294 | /* TSO Workaround for 82571/2/3 Controllers -- if skb->data |
3297 | * points to just header, pull a few bytes of payload from | 3295 | * points to just header, pull a few bytes of payload from |
3298 | * frags into skb->data */ | 3296 | * frags into skb->data */ |
3299 | hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2)); | 3297 | hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); |
3300 | if (skb->data_len && (hdr_len == (skb->len - skb->data_len))) { | 3298 | if (skb->data_len && (hdr_len == (skb->len - skb->data_len))) { |
3301 | switch (adapter->hw.mac_type) { | 3299 | switch (adapter->hw.mac_type) { |
3302 | unsigned int pull_size; | 3300 | unsigned int pull_size; |
@@ -3307,7 +3305,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
3307 | * NOTE: this is a TSO only workaround | 3305 | * NOTE: this is a TSO only workaround |
3308 | * if end byte alignment not correct move us | 3306 | * if end byte alignment not correct move us |
3309 | * into the next dword */ | 3307 | * into the next dword */ |
3310 | if ((unsigned long)(skb->tail - 1) & 4) | 3308 | if ((unsigned long)(skb_tail_pointer(skb) - 1) & 4) |
3311 | break; | 3309 | break; |
3312 | /* fall through */ | 3310 | /* fall through */ |
3313 | case e1000_82571: | 3311 | case e1000_82571: |
@@ -3796,7 +3794,7 @@ e1000_intr_msi(int irq, void *data) | |||
3796 | 3794 | ||
3797 | for (i = 0; i < E1000_MAX_INTR; i++) | 3795 | for (i = 0; i < E1000_MAX_INTR; i++) |
3798 | if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) & | 3796 | if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) & |
3799 | e1000_clean_tx_irq(adapter, adapter->tx_ring))) | 3797 | !e1000_clean_tx_irq(adapter, adapter->tx_ring))) |
3800 | break; | 3798 | break; |
3801 | 3799 | ||
3802 | if (likely(adapter->itr_setting & 3)) | 3800 | if (likely(adapter->itr_setting & 3)) |
@@ -3899,7 +3897,7 @@ e1000_intr(int irq, void *data) | |||
3899 | 3897 | ||
3900 | for (i = 0; i < E1000_MAX_INTR; i++) | 3898 | for (i = 0; i < E1000_MAX_INTR; i++) |
3901 | if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) & | 3899 | if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) & |
3902 | e1000_clean_tx_irq(adapter, adapter->tx_ring))) | 3900 | !e1000_clean_tx_irq(adapter, adapter->tx_ring))) |
3903 | break; | 3901 | break; |
3904 | 3902 | ||
3905 | if (likely(adapter->itr_setting & 3)) | 3903 | if (likely(adapter->itr_setting & 3)) |
@@ -3949,7 +3947,7 @@ e1000_clean(struct net_device *poll_dev, int *budget) | |||
3949 | poll_dev->quota -= work_done; | 3947 | poll_dev->quota -= work_done; |
3950 | 3948 | ||
3951 | /* If no Tx and not enough Rx work done, exit the polling mode */ | 3949 | /* If no Tx and not enough Rx work done, exit the polling mode */ |
3952 | if ((tx_cleaned && (work_done < work_to_do)) || | 3950 | if ((!tx_cleaned && (work_done == 0)) || |
3953 | !netif_running(poll_dev)) { | 3951 | !netif_running(poll_dev)) { |
3954 | quit_polling: | 3952 | quit_polling: |
3955 | if (likely(adapter->itr_setting & 3)) | 3953 | if (likely(adapter->itr_setting & 3)) |
@@ -3979,7 +3977,7 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter, | |||
3979 | #ifdef CONFIG_E1000_NAPI | 3977 | #ifdef CONFIG_E1000_NAPI |
3980 | unsigned int count = 0; | 3978 | unsigned int count = 0; |
3981 | #endif | 3979 | #endif |
3982 | boolean_t cleaned = TRUE; | 3980 | boolean_t cleaned = FALSE; |
3983 | unsigned int total_tx_bytes=0, total_tx_packets=0; | 3981 | unsigned int total_tx_bytes=0, total_tx_packets=0; |
3984 | 3982 | ||
3985 | i = tx_ring->next_to_clean; | 3983 | i = tx_ring->next_to_clean; |
@@ -4013,10 +4011,7 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter, | |||
4013 | #ifdef CONFIG_E1000_NAPI | 4011 | #ifdef CONFIG_E1000_NAPI |
4014 | #define E1000_TX_WEIGHT 64 | 4012 | #define E1000_TX_WEIGHT 64 |
4015 | /* weight of a sort for tx, to avoid endless transmit cleanup */ | 4013 | /* weight of a sort for tx, to avoid endless transmit cleanup */ |
4016 | if (count++ == E1000_TX_WEIGHT) { | 4014 | if (count++ == E1000_TX_WEIGHT) break; |
4017 | cleaned = FALSE; | ||
4018 | break; | ||
4019 | } | ||
4020 | #endif | 4015 | #endif |
4021 | } | 4016 | } |
4022 | 4017 | ||
@@ -4230,9 +4225,12 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter, | |||
4230 | netdev_alloc_skb(netdev, length + NET_IP_ALIGN); | 4225 | netdev_alloc_skb(netdev, length + NET_IP_ALIGN); |
4231 | if (new_skb) { | 4226 | if (new_skb) { |
4232 | skb_reserve(new_skb, NET_IP_ALIGN); | 4227 | skb_reserve(new_skb, NET_IP_ALIGN); |
4233 | memcpy(new_skb->data - NET_IP_ALIGN, | 4228 | skb_copy_to_linear_data_offset(new_skb, |
4234 | skb->data - NET_IP_ALIGN, | 4229 | -NET_IP_ALIGN, |
4235 | length + NET_IP_ALIGN); | 4230 | (skb->data - |
4231 | NET_IP_ALIGN), | ||
4232 | (length + | ||
4233 | NET_IP_ALIGN)); | ||
4236 | /* save the skb in buffer_info as good */ | 4234 | /* save the skb in buffer_info as good */ |
4237 | buffer_info->skb = skb; | 4235 | buffer_info->skb = skb; |
4238 | skb = new_skb; | 4236 | skb = new_skb; |
@@ -4394,7 +4392,7 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, | |||
4394 | PCI_DMA_FROMDEVICE); | 4392 | PCI_DMA_FROMDEVICE); |
4395 | vaddr = kmap_atomic(ps_page->ps_page[0], | 4393 | vaddr = kmap_atomic(ps_page->ps_page[0], |
4396 | KM_SKB_DATA_SOFTIRQ); | 4394 | KM_SKB_DATA_SOFTIRQ); |
4397 | memcpy(skb->tail, vaddr, l1); | 4395 | memcpy(skb_tail_pointer(skb), vaddr, l1); |
4398 | kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ); | 4396 | kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ); |
4399 | pci_dma_sync_single_for_device(pdev, | 4397 | pci_dma_sync_single_for_device(pdev, |
4400 | ps_page_dma->ps_page_dma[0], | 4398 | ps_page_dma->ps_page_dma[0], |
diff --git a/drivers/net/eepro.c b/drivers/net/eepro.c index b4463094c93a..39654e1e2bed 100644 --- a/drivers/net/eepro.c +++ b/drivers/net/eepro.c | |||
@@ -1591,7 +1591,6 @@ eepro_rx(struct net_device *dev) | |||
1591 | 1591 | ||
1592 | break; | 1592 | break; |
1593 | } | 1593 | } |
1594 | skb->dev = dev; | ||
1595 | skb_reserve(skb,2); | 1594 | skb_reserve(skb,2); |
1596 | 1595 | ||
1597 | if (lp->version == LAN595) | 1596 | if (lp->version == LAN595) |
diff --git a/drivers/net/eepro100.c b/drivers/net/eepro100.c index e28bb1e38f8d..6c267c38df97 100644 --- a/drivers/net/eepro100.c +++ b/drivers/net/eepro100.c | |||
@@ -1793,7 +1793,6 @@ speedo_rx(struct net_device *dev) | |||
1793 | copying to a properly sized skbuff. */ | 1793 | copying to a properly sized skbuff. */ |
1794 | if (pkt_len < rx_copybreak | 1794 | if (pkt_len < rx_copybreak |
1795 | && (skb = dev_alloc_skb(pkt_len + 2)) != 0) { | 1795 | && (skb = dev_alloc_skb(pkt_len + 2)) != 0) { |
1796 | skb->dev = dev; | ||
1797 | skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ | 1796 | skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ |
1798 | /* 'skb_put()' points to the start of sk_buff data area. */ | 1797 | /* 'skb_put()' points to the start of sk_buff data area. */ |
1799 | pci_dma_sync_single_for_cpu(sp->pdev, sp->rx_ring_dma[entry], | 1798 | pci_dma_sync_single_for_cpu(sp->pdev, sp->rx_ring_dma[entry], |
@@ -1805,8 +1804,9 @@ speedo_rx(struct net_device *dev) | |||
1805 | eth_copy_and_sum(skb, sp->rx_skbuff[entry]->data, pkt_len, 0); | 1804 | eth_copy_and_sum(skb, sp->rx_skbuff[entry]->data, pkt_len, 0); |
1806 | skb_put(skb, pkt_len); | 1805 | skb_put(skb, pkt_len); |
1807 | #else | 1806 | #else |
1808 | memcpy(skb_put(skb, pkt_len), sp->rx_skbuff[entry]->data, | 1807 | skb_copy_from_linear_data(sp->rx_skbuff[entry], |
1809 | pkt_len); | 1808 | skb_put(skb, pkt_len), |
1809 | pkt_len); | ||
1810 | #endif | 1810 | #endif |
1811 | pci_dma_sync_single_for_device(sp->pdev, sp->rx_ring_dma[entry], | 1811 | pci_dma_sync_single_for_device(sp->pdev, sp->rx_ring_dma[entry], |
1812 | sizeof(struct RxFD) + pkt_len, | 1812 | sizeof(struct RxFD) + pkt_len, |
diff --git a/drivers/net/eexpress.c b/drivers/net/eexpress.c index 3868b8031266..8aaf5ec0c360 100644 --- a/drivers/net/eexpress.c +++ b/drivers/net/eexpress.c | |||
@@ -976,7 +976,6 @@ static void eexp_hw_rx_pio(struct net_device *dev) | |||
976 | lp->stats.rx_dropped++; | 976 | lp->stats.rx_dropped++; |
977 | break; | 977 | break; |
978 | } | 978 | } |
979 | skb->dev = dev; | ||
980 | skb_reserve(skb, 2); | 979 | skb_reserve(skb, 2); |
981 | outw(pbuf+10, ioaddr+READ_PTR); | 980 | outw(pbuf+10, ioaddr+READ_PTR); |
982 | insw(ioaddr+DATAPORT, skb_put(skb,pkt_len),(pkt_len+1)>>1); | 981 | insw(ioaddr+DATAPORT, skb_put(skb,pkt_len),(pkt_len+1)>>1); |
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c index 0e4042bc0a48..58364a0ff378 100644 --- a/drivers/net/ehea/ehea_main.c +++ b/drivers/net/ehea/ehea_main.c | |||
@@ -391,8 +391,8 @@ static int ehea_poll(struct net_device *dev, int *budget) | |||
391 | if (!skb) | 391 | if (!skb) |
392 | break; | 392 | break; |
393 | } | 393 | } |
394 | memcpy(skb->data, ((char*)cqe) + 64, | 394 | skb_copy_to_linear_data(skb, ((char*)cqe) + 64, |
395 | cqe->num_bytes_transfered - 4); | 395 | cqe->num_bytes_transfered - 4); |
396 | ehea_fill_skb(dev, skb, cqe); | 396 | ehea_fill_skb(dev, skb, cqe); |
397 | } else if (rq == 2) { /* RQ2 */ | 397 | } else if (rq == 2) { /* RQ2 */ |
398 | skb = get_skb_by_index(skb_arr_rq2, | 398 | skb = get_skb_by_index(skb_arr_rq2, |
@@ -1262,8 +1262,8 @@ static int ehea_clean_portres(struct ehea_port *port, struct ehea_port_res *pr) | |||
1262 | static inline void write_ip_start_end(struct ehea_swqe *swqe, | 1262 | static inline void write_ip_start_end(struct ehea_swqe *swqe, |
1263 | const struct sk_buff *skb) | 1263 | const struct sk_buff *skb) |
1264 | { | 1264 | { |
1265 | swqe->ip_start = (u8)(((u64)skb->nh.iph) - ((u64)skb->data)); | 1265 | swqe->ip_start = skb_network_offset(skb); |
1266 | swqe->ip_end = (u8)(swqe->ip_start + skb->nh.iph->ihl * 4 - 1); | 1266 | swqe->ip_end = (u8)(swqe->ip_start + ip_hdrlen(skb) - 1); |
1267 | } | 1267 | } |
1268 | 1268 | ||
1269 | static inline void write_tcp_offset_end(struct ehea_swqe *swqe, | 1269 | static inline void write_tcp_offset_end(struct ehea_swqe *swqe, |
@@ -1300,13 +1300,13 @@ static void write_swqe2_TSO(struct sk_buff *skb, | |||
1300 | /* copy only eth/ip/tcp headers to immediate data and | 1300 | /* copy only eth/ip/tcp headers to immediate data and |
1301 | * the rest of skb->data to sg1entry | 1301 | * the rest of skb->data to sg1entry |
1302 | */ | 1302 | */ |
1303 | headersize = ETH_HLEN + (skb->nh.iph->ihl * 4) + (skb->h.th->doff * 4); | 1303 | headersize = ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb); |
1304 | 1304 | ||
1305 | skb_data_size = skb->len - skb->data_len; | 1305 | skb_data_size = skb->len - skb->data_len; |
1306 | 1306 | ||
1307 | if (skb_data_size >= headersize) { | 1307 | if (skb_data_size >= headersize) { |
1308 | /* copy immediate data */ | 1308 | /* copy immediate data */ |
1309 | memcpy(imm_data, skb->data, headersize); | 1309 | skb_copy_from_linear_data(skb, imm_data, headersize); |
1310 | swqe->immediate_data_length = headersize; | 1310 | swqe->immediate_data_length = headersize; |
1311 | 1311 | ||
1312 | if (skb_data_size > headersize) { | 1312 | if (skb_data_size > headersize) { |
@@ -1337,7 +1337,7 @@ static void write_swqe2_nonTSO(struct sk_buff *skb, | |||
1337 | */ | 1337 | */ |
1338 | if (skb_data_size >= SWQE2_MAX_IMM) { | 1338 | if (skb_data_size >= SWQE2_MAX_IMM) { |
1339 | /* copy immediate data */ | 1339 | /* copy immediate data */ |
1340 | memcpy(imm_data, skb->data, SWQE2_MAX_IMM); | 1340 | skb_copy_from_linear_data(skb, imm_data, SWQE2_MAX_IMM); |
1341 | 1341 | ||
1342 | swqe->immediate_data_length = SWQE2_MAX_IMM; | 1342 | swqe->immediate_data_length = SWQE2_MAX_IMM; |
1343 | 1343 | ||
@@ -1350,7 +1350,7 @@ static void write_swqe2_nonTSO(struct sk_buff *skb, | |||
1350 | swqe->descriptors++; | 1350 | swqe->descriptors++; |
1351 | } | 1351 | } |
1352 | } else { | 1352 | } else { |
1353 | memcpy(imm_data, skb->data, skb_data_size); | 1353 | skb_copy_from_linear_data(skb, imm_data, skb_data_size); |
1354 | swqe->immediate_data_length = skb_data_size; | 1354 | swqe->immediate_data_length = skb_data_size; |
1355 | } | 1355 | } |
1356 | } | 1356 | } |
@@ -1688,6 +1688,7 @@ static void ehea_xmit2(struct sk_buff *skb, struct net_device *dev, | |||
1688 | struct ehea_swqe *swqe, u32 lkey) | 1688 | struct ehea_swqe *swqe, u32 lkey) |
1689 | { | 1689 | { |
1690 | if (skb->protocol == htons(ETH_P_IP)) { | 1690 | if (skb->protocol == htons(ETH_P_IP)) { |
1691 | const struct iphdr *iph = ip_hdr(skb); | ||
1691 | /* IPv4 */ | 1692 | /* IPv4 */ |
1692 | swqe->tx_control |= EHEA_SWQE_CRC | 1693 | swqe->tx_control |= EHEA_SWQE_CRC |
1693 | | EHEA_SWQE_IP_CHECKSUM | 1694 | | EHEA_SWQE_IP_CHECKSUM |
@@ -1697,15 +1698,15 @@ static void ehea_xmit2(struct sk_buff *skb, struct net_device *dev, | |||
1697 | 1698 | ||
1698 | write_ip_start_end(swqe, skb); | 1699 | write_ip_start_end(swqe, skb); |
1699 | 1700 | ||
1700 | if (skb->nh.iph->protocol == IPPROTO_UDP) { | 1701 | if (iph->protocol == IPPROTO_UDP) { |
1701 | if ((skb->nh.iph->frag_off & IP_MF) || | 1702 | if ((iph->frag_off & IP_MF) || |
1702 | (skb->nh.iph->frag_off & IP_OFFSET)) | 1703 | (iph->frag_off & IP_OFFSET)) |
1703 | /* IP fragment, so don't change cs */ | 1704 | /* IP fragment, so don't change cs */ |
1704 | swqe->tx_control &= ~EHEA_SWQE_TCP_CHECKSUM; | 1705 | swqe->tx_control &= ~EHEA_SWQE_TCP_CHECKSUM; |
1705 | else | 1706 | else |
1706 | write_udp_offset_end(swqe, skb); | 1707 | write_udp_offset_end(swqe, skb); |
1707 | 1708 | ||
1708 | } else if (skb->nh.iph->protocol == IPPROTO_TCP) { | 1709 | } else if (iph->protocol == IPPROTO_TCP) { |
1709 | write_tcp_offset_end(swqe, skb); | 1710 | write_tcp_offset_end(swqe, skb); |
1710 | } | 1711 | } |
1711 | 1712 | ||
@@ -1731,10 +1732,11 @@ static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev, | |||
1731 | int i; | 1732 | int i; |
1732 | 1733 | ||
1733 | if (skb->protocol == htons(ETH_P_IP)) { | 1734 | if (skb->protocol == htons(ETH_P_IP)) { |
1735 | const struct iphdr *iph = ip_hdr(skb); | ||
1734 | /* IPv4 */ | 1736 | /* IPv4 */ |
1735 | write_ip_start_end(swqe, skb); | 1737 | write_ip_start_end(swqe, skb); |
1736 | 1738 | ||
1737 | if (skb->nh.iph->protocol == IPPROTO_TCP) { | 1739 | if (iph->protocol == IPPROTO_TCP) { |
1738 | swqe->tx_control |= EHEA_SWQE_CRC | 1740 | swqe->tx_control |= EHEA_SWQE_CRC |
1739 | | EHEA_SWQE_IP_CHECKSUM | 1741 | | EHEA_SWQE_IP_CHECKSUM |
1740 | | EHEA_SWQE_TCP_CHECKSUM | 1742 | | EHEA_SWQE_TCP_CHECKSUM |
@@ -1742,9 +1744,9 @@ static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev, | |||
1742 | 1744 | ||
1743 | write_tcp_offset_end(swqe, skb); | 1745 | write_tcp_offset_end(swqe, skb); |
1744 | 1746 | ||
1745 | } else if (skb->nh.iph->protocol == IPPROTO_UDP) { | 1747 | } else if (iph->protocol == IPPROTO_UDP) { |
1746 | if ((skb->nh.iph->frag_off & IP_MF) || | 1748 | if ((iph->frag_off & IP_MF) || |
1747 | (skb->nh.iph->frag_off & IP_OFFSET)) | 1749 | (iph->frag_off & IP_OFFSET)) |
1748 | /* IP fragment, so don't change cs */ | 1750 | /* IP fragment, so don't change cs */ |
1749 | swqe->tx_control |= EHEA_SWQE_CRC | 1751 | swqe->tx_control |= EHEA_SWQE_CRC |
1750 | | EHEA_SWQE_IMM_DATA_PRESENT; | 1752 | | EHEA_SWQE_IMM_DATA_PRESENT; |
@@ -1770,10 +1772,11 @@ static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev, | |||
1770 | /* copy (immediate) data */ | 1772 | /* copy (immediate) data */ |
1771 | if (nfrags == 0) { | 1773 | if (nfrags == 0) { |
1772 | /* data is in a single piece */ | 1774 | /* data is in a single piece */ |
1773 | memcpy(imm_data, skb->data, skb->len); | 1775 | skb_copy_from_linear_data(skb, imm_data, skb->len); |
1774 | } else { | 1776 | } else { |
1775 | /* first copy data from the skb->data buffer ... */ | 1777 | /* first copy data from the skb->data buffer ... */ |
1776 | memcpy(imm_data, skb->data, skb->len - skb->data_len); | 1778 | skb_copy_from_linear_data(skb, imm_data, |
1779 | skb->len - skb->data_len); | ||
1777 | imm_data += skb->len - skb->data_len; | 1780 | imm_data += skb->len - skb->data_len; |
1778 | 1781 | ||
1779 | /* ... then copy data from the fragments */ | 1782 | /* ... then copy data from the fragments */ |
diff --git a/drivers/net/epic100.c b/drivers/net/epic100.c index 3a6a83d3ee1c..4e3f14c9c717 100644 --- a/drivers/net/epic100.c +++ b/drivers/net/epic100.c | |||
@@ -934,7 +934,6 @@ static void epic_init_ring(struct net_device *dev) | |||
934 | ep->rx_skbuff[i] = skb; | 934 | ep->rx_skbuff[i] = skb; |
935 | if (skb == NULL) | 935 | if (skb == NULL) |
936 | break; | 936 | break; |
937 | skb->dev = dev; /* Mark as being used by this device. */ | ||
938 | skb_reserve(skb, 2); /* 16 byte align the IP header. */ | 937 | skb_reserve(skb, 2); /* 16 byte align the IP header. */ |
939 | ep->rx_ring[i].bufaddr = pci_map_single(ep->pci_dev, | 938 | ep->rx_ring[i].bufaddr = pci_map_single(ep->pci_dev, |
940 | skb->data, ep->rx_buf_sz, PCI_DMA_FROMDEVICE); | 939 | skb->data, ep->rx_buf_sz, PCI_DMA_FROMDEVICE); |
@@ -1199,7 +1198,6 @@ static int epic_rx(struct net_device *dev, int budget) | |||
1199 | to a minimally-sized skbuff. */ | 1198 | to a minimally-sized skbuff. */ |
1200 | if (pkt_len < rx_copybreak | 1199 | if (pkt_len < rx_copybreak |
1201 | && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) { | 1200 | && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) { |
1202 | skb->dev = dev; | ||
1203 | skb_reserve(skb, 2); /* 16 byte align the IP header */ | 1201 | skb_reserve(skb, 2); /* 16 byte align the IP header */ |
1204 | pci_dma_sync_single_for_cpu(ep->pci_dev, | 1202 | pci_dma_sync_single_for_cpu(ep->pci_dev, |
1205 | ep->rx_ring[entry].bufaddr, | 1203 | ep->rx_ring[entry].bufaddr, |
@@ -1236,7 +1234,6 @@ static int epic_rx(struct net_device *dev, int budget) | |||
1236 | skb = ep->rx_skbuff[entry] = dev_alloc_skb(ep->rx_buf_sz); | 1234 | skb = ep->rx_skbuff[entry] = dev_alloc_skb(ep->rx_buf_sz); |
1237 | if (skb == NULL) | 1235 | if (skb == NULL) |
1238 | break; | 1236 | break; |
1239 | skb->dev = dev; /* Mark as being used by this device. */ | ||
1240 | skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ | 1237 | skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ |
1241 | ep->rx_ring[entry].bufaddr = pci_map_single(ep->pci_dev, | 1238 | ep->rx_ring[entry].bufaddr = pci_map_single(ep->pci_dev, |
1242 | skb->data, ep->rx_buf_sz, PCI_DMA_FROMDEVICE); | 1239 | skb->data, ep->rx_buf_sz, PCI_DMA_FROMDEVICE); |
diff --git a/drivers/net/eth16i.c b/drivers/net/eth16i.c index 93283e386f3a..04abf59e5007 100644 --- a/drivers/net/eth16i.c +++ b/drivers/net/eth16i.c | |||
@@ -1175,7 +1175,6 @@ static void eth16i_rx(struct net_device *dev) | |||
1175 | break; | 1175 | break; |
1176 | } | 1176 | } |
1177 | 1177 | ||
1178 | skb->dev = dev; | ||
1179 | skb_reserve(skb,2); | 1178 | skb_reserve(skb,2); |
1180 | 1179 | ||
1181 | /* | 1180 | /* |
diff --git a/drivers/net/ewrk3.c b/drivers/net/ewrk3.c index 714ea1176ec7..cb0792c187ba 100644 --- a/drivers/net/ewrk3.c +++ b/drivers/net/ewrk3.c | |||
@@ -993,7 +993,6 @@ static int ewrk3_rx(struct net_device *dev) | |||
993 | 993 | ||
994 | if ((skb = dev_alloc_skb(pkt_len + 2)) != NULL) { | 994 | if ((skb = dev_alloc_skb(pkt_len + 2)) != NULL) { |
995 | unsigned char *p; | 995 | unsigned char *p; |
996 | skb->dev = dev; | ||
997 | skb_reserve(skb, 2); /* Align to 16 bytes */ | 996 | skb_reserve(skb, 2); /* Align to 16 bytes */ |
998 | p = skb_put(skb, pkt_len); | 997 | p = skb_put(skb, pkt_len); |
999 | 998 | ||
diff --git a/drivers/net/fealnx.c b/drivers/net/fealnx.c index 38a13f440530..abe9b089c610 100644 --- a/drivers/net/fealnx.c +++ b/drivers/net/fealnx.c | |||
@@ -1719,7 +1719,6 @@ static int netdev_rx(struct net_device *dev) | |||
1719 | to a minimally-sized skbuff. */ | 1719 | to a minimally-sized skbuff. */ |
1720 | if (pkt_len < rx_copybreak && | 1720 | if (pkt_len < rx_copybreak && |
1721 | (skb = dev_alloc_skb(pkt_len + 2)) != NULL) { | 1721 | (skb = dev_alloc_skb(pkt_len + 2)) != NULL) { |
1722 | skb->dev = dev; | ||
1723 | skb_reserve(skb, 2); /* 16 byte align the IP header */ | 1722 | skb_reserve(skb, 2); /* 16 byte align the IP header */ |
1724 | pci_dma_sync_single_for_cpu(np->pci_dev, | 1723 | pci_dma_sync_single_for_cpu(np->pci_dev, |
1725 | np->cur_rx->buffer, | 1724 | np->cur_rx->buffer, |
diff --git a/drivers/net/fec.c b/drivers/net/fec.c index 6764281b4531..255b09124e11 100644 --- a/drivers/net/fec.c +++ b/drivers/net/fec.c | |||
@@ -647,7 +647,6 @@ while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) { | |||
647 | printk("%s: Memory squeeze, dropping packet.\n", dev->name); | 647 | printk("%s: Memory squeeze, dropping packet.\n", dev->name); |
648 | fep->stats.rx_dropped++; | 648 | fep->stats.rx_dropped++; |
649 | } else { | 649 | } else { |
650 | skb->dev = dev; | ||
651 | skb_put(skb,pkt_len-4); /* Make room */ | 650 | skb_put(skb,pkt_len-4); /* Make room */ |
652 | eth_copy_and_sum(skb, data, pkt_len-4, 0); | 651 | eth_copy_and_sum(skb, data, pkt_len-4, 0); |
653 | skb->protocol=eth_type_trans(skb,dev); | 652 | skb->protocol=eth_type_trans(skb,dev); |
diff --git a/drivers/net/fec_8xx/fec_main.c b/drivers/net/fec_8xx/fec_main.c index 77f747a5afa7..e824d5d231af 100644 --- a/drivers/net/fec_8xx/fec_main.c +++ b/drivers/net/fec_8xx/fec_main.c | |||
@@ -551,7 +551,9 @@ static int fec_enet_rx_common(struct net_device *dev, int *budget) | |||
551 | skbn = dev_alloc_skb(pkt_len + 2); | 551 | skbn = dev_alloc_skb(pkt_len + 2); |
552 | if (skbn != NULL) { | 552 | if (skbn != NULL) { |
553 | skb_reserve(skbn, 2); /* align IP header */ | 553 | skb_reserve(skbn, 2); /* align IP header */ |
554 | memcpy(skbn->data, skb->data, pkt_len); | 554 | skb_copy_from_linear_data(skb |
555 | skbn->data, | ||
556 | pkt_len); | ||
555 | /* swap */ | 557 | /* swap */ |
556 | skbt = skb; | 558 | skbt = skb; |
557 | skb = skbn; | 559 | skb = skbn; |
@@ -561,7 +563,6 @@ static int fec_enet_rx_common(struct net_device *dev, int *budget) | |||
561 | skbn = dev_alloc_skb(ENET_RX_FRSIZE); | 563 | skbn = dev_alloc_skb(ENET_RX_FRSIZE); |
562 | 564 | ||
563 | if (skbn != NULL) { | 565 | if (skbn != NULL) { |
564 | skb->dev = dev; | ||
565 | skb_put(skb, pkt_len); /* Make room */ | 566 | skb_put(skb, pkt_len); /* Make room */ |
566 | skb->protocol = eth_type_trans(skb, dev); | 567 | skb->protocol = eth_type_trans(skb, dev); |
567 | received++; | 568 | received++; |
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c index d04214e4e581..7a018027fcc0 100644 --- a/drivers/net/forcedeth.c +++ b/drivers/net/forcedeth.c | |||
@@ -1385,11 +1385,12 @@ static int nv_alloc_rx(struct net_device *dev) | |||
1385 | while (np->put_rx.orig != less_rx) { | 1385 | while (np->put_rx.orig != less_rx) { |
1386 | struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD); | 1386 | struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD); |
1387 | if (skb) { | 1387 | if (skb) { |
1388 | skb->dev = dev; | ||
1389 | np->put_rx_ctx->skb = skb; | 1388 | np->put_rx_ctx->skb = skb; |
1390 | np->put_rx_ctx->dma = pci_map_single(np->pci_dev, skb->data, | 1389 | np->put_rx_ctx->dma = pci_map_single(np->pci_dev, |
1391 | skb->end-skb->data, PCI_DMA_FROMDEVICE); | 1390 | skb->data, |
1392 | np->put_rx_ctx->dma_len = skb->end-skb->data; | 1391 | skb_tailroom(skb), |
1392 | PCI_DMA_FROMDEVICE); | ||
1393 | np->put_rx_ctx->dma_len = skb_tailroom(skb); | ||
1393 | np->put_rx.orig->buf = cpu_to_le32(np->put_rx_ctx->dma); | 1394 | np->put_rx.orig->buf = cpu_to_le32(np->put_rx_ctx->dma); |
1394 | wmb(); | 1395 | wmb(); |
1395 | np->put_rx.orig->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL); | 1396 | np->put_rx.orig->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL); |
@@ -1416,11 +1417,12 @@ static int nv_alloc_rx_optimized(struct net_device *dev) | |||
1416 | while (np->put_rx.ex != less_rx) { | 1417 | while (np->put_rx.ex != less_rx) { |
1417 | struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD); | 1418 | struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD); |
1418 | if (skb) { | 1419 | if (skb) { |
1419 | skb->dev = dev; | ||
1420 | np->put_rx_ctx->skb = skb; | 1420 | np->put_rx_ctx->skb = skb; |
1421 | np->put_rx_ctx->dma = pci_map_single(np->pci_dev, skb->data, | 1421 | np->put_rx_ctx->dma = pci_map_single(np->pci_dev, |
1422 | skb->end-skb->data, PCI_DMA_FROMDEVICE); | 1422 | skb->data, |
1423 | np->put_rx_ctx->dma_len = skb->end-skb->data; | 1423 | skb_tailroom(skb), |
1424 | PCI_DMA_FROMDEVICE); | ||
1425 | np->put_rx_ctx->dma_len = skb_tailroom(skb); | ||
1424 | np->put_rx.ex->bufhigh = cpu_to_le64(np->put_rx_ctx->dma) >> 32; | 1426 | np->put_rx.ex->bufhigh = cpu_to_le64(np->put_rx_ctx->dma) >> 32; |
1425 | np->put_rx.ex->buflow = cpu_to_le64(np->put_rx_ctx->dma) & 0x0FFFFFFFF; | 1427 | np->put_rx.ex->buflow = cpu_to_le64(np->put_rx_ctx->dma) & 0x0FFFFFFFF; |
1426 | wmb(); | 1428 | wmb(); |
@@ -1604,8 +1606,9 @@ static void nv_drain_rx(struct net_device *dev) | |||
1604 | wmb(); | 1606 | wmb(); |
1605 | if (np->rx_skb[i].skb) { | 1607 | if (np->rx_skb[i].skb) { |
1606 | pci_unmap_single(np->pci_dev, np->rx_skb[i].dma, | 1608 | pci_unmap_single(np->pci_dev, np->rx_skb[i].dma, |
1607 | np->rx_skb[i].skb->end-np->rx_skb[i].skb->data, | 1609 | (skb_end_pointer(np->rx_skb[i].skb) - |
1608 | PCI_DMA_FROMDEVICE); | 1610 | np->rx_skb[i].skb->data), |
1611 | PCI_DMA_FROMDEVICE); | ||
1609 | dev_kfree_skb(np->rx_skb[i].skb); | 1612 | dev_kfree_skb(np->rx_skb[i].skb); |
1610 | np->rx_skb[i].skb = NULL; | 1613 | np->rx_skb[i].skb = NULL; |
1611 | } | 1614 | } |
@@ -4376,11 +4379,12 @@ static int nv_loopback_test(struct net_device *dev) | |||
4376 | ret = 0; | 4379 | ret = 0; |
4377 | goto out; | 4380 | goto out; |
4378 | } | 4381 | } |
4382 | test_dma_addr = pci_map_single(np->pci_dev, tx_skb->data, | ||
4383 | skb_tailroom(tx_skb), | ||
4384 | PCI_DMA_FROMDEVICE); | ||
4379 | pkt_data = skb_put(tx_skb, pkt_len); | 4385 | pkt_data = skb_put(tx_skb, pkt_len); |
4380 | for (i = 0; i < pkt_len; i++) | 4386 | for (i = 0; i < pkt_len; i++) |
4381 | pkt_data[i] = (u8)(i & 0xff); | 4387 | pkt_data[i] = (u8)(i & 0xff); |
4382 | test_dma_addr = pci_map_single(np->pci_dev, tx_skb->data, | ||
4383 | tx_skb->end-tx_skb->data, PCI_DMA_FROMDEVICE); | ||
4384 | 4388 | ||
4385 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { | 4389 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { |
4386 | np->tx_ring.orig[0].buf = cpu_to_le32(test_dma_addr); | 4390 | np->tx_ring.orig[0].buf = cpu_to_le32(test_dma_addr); |
@@ -4437,7 +4441,7 @@ static int nv_loopback_test(struct net_device *dev) | |||
4437 | } | 4441 | } |
4438 | 4442 | ||
4439 | pci_unmap_page(np->pci_dev, test_dma_addr, | 4443 | pci_unmap_page(np->pci_dev, test_dma_addr, |
4440 | tx_skb->end-tx_skb->data, | 4444 | (skb_end_pointer(tx_skb) - tx_skb->data), |
4441 | PCI_DMA_TODEVICE); | 4445 | PCI_DMA_TODEVICE); |
4442 | dev_kfree_skb_any(tx_skb); | 4446 | dev_kfree_skb_any(tx_skb); |
4443 | out: | 4447 | out: |
diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c index 4a05c14bf7ec..e2ddd617493a 100644 --- a/drivers/net/fs_enet/fs_enet-main.c +++ b/drivers/net/fs_enet/fs_enet-main.c | |||
@@ -160,7 +160,8 @@ static int fs_enet_rx_napi(struct net_device *dev, int *budget) | |||
160 | skbn = dev_alloc_skb(pkt_len + 2); | 160 | skbn = dev_alloc_skb(pkt_len + 2); |
161 | if (skbn != NULL) { | 161 | if (skbn != NULL) { |
162 | skb_reserve(skbn, 2); /* align IP header */ | 162 | skb_reserve(skbn, 2); /* align IP header */ |
163 | memcpy(skbn->data, skb->data, pkt_len); | 163 | skb_copy_from_linear_data(skb, |
164 | skbn->data, pkt_len); | ||
164 | /* swap */ | 165 | /* swap */ |
165 | skbt = skb; | 166 | skbt = skb; |
166 | skb = skbn; | 167 | skb = skbn; |
@@ -170,7 +171,6 @@ static int fs_enet_rx_napi(struct net_device *dev, int *budget) | |||
170 | skbn = dev_alloc_skb(ENET_RX_FRSIZE); | 171 | skbn = dev_alloc_skb(ENET_RX_FRSIZE); |
171 | 172 | ||
172 | if (skbn != NULL) { | 173 | if (skbn != NULL) { |
173 | skb->dev = dev; | ||
174 | skb_put(skb, pkt_len); /* Make room */ | 174 | skb_put(skb, pkt_len); /* Make room */ |
175 | skb->protocol = eth_type_trans(skb, dev); | 175 | skb->protocol = eth_type_trans(skb, dev); |
176 | received++; | 176 | received++; |
@@ -294,7 +294,8 @@ static int fs_enet_rx_non_napi(struct net_device *dev) | |||
294 | skbn = dev_alloc_skb(pkt_len + 2); | 294 | skbn = dev_alloc_skb(pkt_len + 2); |
295 | if (skbn != NULL) { | 295 | if (skbn != NULL) { |
296 | skb_reserve(skbn, 2); /* align IP header */ | 296 | skb_reserve(skbn, 2); /* align IP header */ |
297 | memcpy(skbn->data, skb->data, pkt_len); | 297 | skb_copy_from_linear_data(skb, |
298 | skbn->data, pkt_len); | ||
298 | /* swap */ | 299 | /* swap */ |
299 | skbt = skb; | 300 | skbt = skb; |
300 | skb = skbn; | 301 | skb = skbn; |
@@ -304,7 +305,6 @@ static int fs_enet_rx_non_napi(struct net_device *dev) | |||
304 | skbn = dev_alloc_skb(ENET_RX_FRSIZE); | 305 | skbn = dev_alloc_skb(ENET_RX_FRSIZE); |
305 | 306 | ||
306 | if (skbn != NULL) { | 307 | if (skbn != NULL) { |
307 | skb->dev = dev; | ||
308 | skb_put(skb, pkt_len); /* Make room */ | 308 | skb_put(skb, pkt_len); /* Make room */ |
309 | skb->protocol = eth_type_trans(skb, dev); | 309 | skb->protocol = eth_type_trans(skb, dev); |
310 | received++; | 310 | received++; |
@@ -516,7 +516,6 @@ void fs_init_bds(struct net_device *dev) | |||
516 | break; | 516 | break; |
517 | } | 517 | } |
518 | fep->rx_skbuff[i] = skb; | 518 | fep->rx_skbuff[i] = skb; |
519 | skb->dev = dev; | ||
520 | CBDW_BUFADDR(bdp, | 519 | CBDW_BUFADDR(bdp, |
521 | dma_map_single(fep->dev, skb->data, | 520 | dma_map_single(fep->dev, skb->data, |
522 | L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), | 521 | L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), |
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c index d981d4c41dd3..b666a0cc0642 100644 --- a/drivers/net/gianfar.c +++ b/drivers/net/gianfar.c | |||
@@ -942,18 +942,18 @@ static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb) | |||
942 | 942 | ||
943 | /* Tell the controller what the protocol is */ | 943 | /* Tell the controller what the protocol is */ |
944 | /* And provide the already calculated phcs */ | 944 | /* And provide the already calculated phcs */ |
945 | if (skb->nh.iph->protocol == IPPROTO_UDP) { | 945 | if (ip_hdr(skb)->protocol == IPPROTO_UDP) { |
946 | flags |= TXFCB_UDP; | 946 | flags |= TXFCB_UDP; |
947 | fcb->phcs = skb->h.uh->check; | 947 | fcb->phcs = udp_hdr(skb)->check; |
948 | } else | 948 | } else |
949 | fcb->phcs = skb->h.th->check; | 949 | fcb->phcs = udp_hdr(skb)->check; |
950 | 950 | ||
951 | /* l3os is the distance between the start of the | 951 | /* l3os is the distance between the start of the |
952 | * frame (skb->data) and the start of the IP hdr. | 952 | * frame (skb->data) and the start of the IP hdr. |
953 | * l4os is the distance between the start of the | 953 | * l4os is the distance between the start of the |
954 | * l3 hdr and the l4 hdr */ | 954 | * l3 hdr and the l4 hdr */ |
955 | fcb->l3os = (u16)(skb->nh.raw - skb->data - GMAC_FCB_LEN); | 955 | fcb->l3os = (u16)(skb_network_offset(skb) - GMAC_FCB_LEN); |
956 | fcb->l4os = (u16)(skb->h.raw - skb->nh.raw); | 956 | fcb->l4os = skb_network_header_len(skb); |
957 | 957 | ||
958 | fcb->flags = flags; | 958 | fcb->flags = flags; |
959 | } | 959 | } |
@@ -1295,8 +1295,6 @@ struct sk_buff * gfar_new_skb(struct net_device *dev, struct rxbd8 *bdp) | |||
1295 | */ | 1295 | */ |
1296 | skb_reserve(skb, alignamount); | 1296 | skb_reserve(skb, alignamount); |
1297 | 1297 | ||
1298 | skb->dev = dev; | ||
1299 | |||
1300 | bdp->bufPtr = dma_map_single(NULL, skb->data, | 1298 | bdp->bufPtr = dma_map_single(NULL, skb->data, |
1301 | priv->rx_buffer_size, DMA_FROM_DEVICE); | 1299 | priv->rx_buffer_size, DMA_FROM_DEVICE); |
1302 | 1300 | ||
diff --git a/drivers/net/hamachi.c b/drivers/net/hamachi.c index c3c0d67fc383..2521b111b3a5 100644 --- a/drivers/net/hamachi.c +++ b/drivers/net/hamachi.c | |||
@@ -1568,7 +1568,6 @@ static int hamachi_rx(struct net_device *dev) | |||
1568 | printk(KERN_ERR "%s: rx_copybreak non-zero " | 1568 | printk(KERN_ERR "%s: rx_copybreak non-zero " |
1569 | "not good with RX_CHECKSUM\n", dev->name); | 1569 | "not good with RX_CHECKSUM\n", dev->name); |
1570 | #endif | 1570 | #endif |
1571 | skb->dev = dev; | ||
1572 | skb_reserve(skb, 2); /* 16 byte align the IP header */ | 1571 | skb_reserve(skb, 2); /* 16 byte align the IP header */ |
1573 | pci_dma_sync_single_for_cpu(hmp->pci_dev, | 1572 | pci_dma_sync_single_for_cpu(hmp->pci_dev, |
1574 | hmp->rx_ring[entry].addr, | 1573 | hmp->rx_ring[entry].addr, |
diff --git a/drivers/net/hamradio/baycom_ser_fdx.c b/drivers/net/hamradio/baycom_ser_fdx.c index 59214e74b9cf..30baf6ecfc63 100644 --- a/drivers/net/hamradio/baycom_ser_fdx.c +++ b/drivers/net/hamradio/baycom_ser_fdx.c | |||
@@ -75,12 +75,14 @@ | |||
75 | #include <linux/ioport.h> | 75 | #include <linux/ioport.h> |
76 | #include <linux/string.h> | 76 | #include <linux/string.h> |
77 | #include <linux/init.h> | 77 | #include <linux/init.h> |
78 | #include <asm/uaccess.h> | ||
79 | #include <asm/io.h> | ||
80 | #include <linux/hdlcdrv.h> | 78 | #include <linux/hdlcdrv.h> |
81 | #include <linux/baycom.h> | 79 | #include <linux/baycom.h> |
82 | #include <linux/jiffies.h> | 80 | #include <linux/jiffies.h> |
83 | 81 | ||
82 | #include <asm/uaccess.h> | ||
83 | #include <asm/io.h> | ||
84 | #include <asm/irq.h> | ||
85 | |||
84 | /* --------------------------------------------------------------------- */ | 86 | /* --------------------------------------------------------------------- */ |
85 | 87 | ||
86 | #define BAYCOM_DEBUG | 88 | #define BAYCOM_DEBUG |
diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c index d2542697e298..656f2789c9ba 100644 --- a/drivers/net/hamradio/bpqether.c +++ b/drivers/net/hamradio/bpqether.c | |||
@@ -282,7 +282,7 @@ static int bpq_xmit(struct sk_buff *skb, struct net_device *dev) | |||
282 | } | 282 | } |
283 | 283 | ||
284 | skb->protocol = ax25_type_trans(skb, dev); | 284 | skb->protocol = ax25_type_trans(skb, dev); |
285 | skb->nh.raw = skb->data; | 285 | skb_reset_network_header(skb); |
286 | dev->hard_header(skb, dev, ETH_P_BPQ, bpq->dest_addr, NULL, 0); | 286 | dev->hard_header(skb, dev, ETH_P_BPQ, bpq->dest_addr, NULL, 0); |
287 | bpq->stats.tx_packets++; | 287 | bpq->stats.tx_packets++; |
288 | bpq->stats.tx_bytes+=skb->len; | 288 | bpq->stats.tx_bytes+=skb->len; |
diff --git a/drivers/net/hamradio/dmascc.c b/drivers/net/hamradio/dmascc.c index 0fbb414b5a4d..3be8c5047599 100644 --- a/drivers/net/hamradio/dmascc.c +++ b/drivers/net/hamradio/dmascc.c | |||
@@ -930,7 +930,7 @@ static int scc_send_packet(struct sk_buff *skb, struct net_device *dev) | |||
930 | 930 | ||
931 | /* Transfer data to DMA buffer */ | 931 | /* Transfer data to DMA buffer */ |
932 | i = priv->tx_head; | 932 | i = priv->tx_head; |
933 | memcpy(priv->tx_buf[i], skb->data + 1, skb->len - 1); | 933 | skb_copy_from_linear_data_offset(skb, 1, priv->tx_buf[i], skb->len - 1); |
934 | priv->tx_len[i] = skb->len - 1; | 934 | priv->tx_len[i] = skb->len - 1; |
935 | 935 | ||
936 | /* Clear interrupts while we touch our circular buffers */ | 936 | /* Clear interrupts while we touch our circular buffers */ |
diff --git a/drivers/net/hamradio/hdlcdrv.c b/drivers/net/hamradio/hdlcdrv.c index f5a17ad9d3d6..b33adc6a340b 100644 --- a/drivers/net/hamradio/hdlcdrv.c +++ b/drivers/net/hamradio/hdlcdrv.c | |||
@@ -317,7 +317,9 @@ void hdlcdrv_transmitter(struct net_device *dev, struct hdlcdrv_state *s) | |||
317 | dev_kfree_skb_irq(skb); | 317 | dev_kfree_skb_irq(skb); |
318 | break; | 318 | break; |
319 | } | 319 | } |
320 | memcpy(s->hdlctx.buffer, skb->data+1, pkt_len); | 320 | skb_copy_from_linear_data_offset(skb, 1, |
321 | s->hdlctx.buffer, | ||
322 | pkt_len); | ||
321 | dev_kfree_skb_irq(skb); | 323 | dev_kfree_skb_irq(skb); |
322 | s->hdlctx.bp = s->hdlctx.buffer; | 324 | s->hdlctx.bp = s->hdlctx.buffer; |
323 | append_crc_ccitt(s->hdlctx.buffer, pkt_len); | 325 | append_crc_ccitt(s->hdlctx.buffer, pkt_len); |
diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c index ee3ea4fa729f..467559debfd6 100644 --- a/drivers/net/hamradio/yam.c +++ b/drivers/net/hamradio/yam.c | |||
@@ -638,7 +638,9 @@ static void yam_tx_byte(struct net_device *dev, struct yam_port *yp) | |||
638 | dev_kfree_skb_any(skb); | 638 | dev_kfree_skb_any(skb); |
639 | break; | 639 | break; |
640 | } | 640 | } |
641 | memcpy(yp->tx_buf, skb->data + 1, yp->tx_len); | 641 | skb_copy_from_linear_data_offset(skb, 1, |
642 | yp->tx_buf, | ||
643 | yp->tx_len); | ||
642 | dev_kfree_skb_any(skb); | 644 | dev_kfree_skb_any(skb); |
643 | yp->tx_count = 0; | 645 | yp->tx_count = 0; |
644 | yp->tx_crcl = 0x21; | 646 | yp->tx_crcl = 0x21; |
diff --git a/drivers/net/hp100.c b/drivers/net/hp100.c index 7dc5185aa2c0..8118a6750b61 100644 --- a/drivers/net/hp100.c +++ b/drivers/net/hp100.c | |||
@@ -1816,7 +1816,6 @@ static void hp100_rx(struct net_device *dev) | |||
1816 | u_char *ptr; | 1816 | u_char *ptr; |
1817 | 1817 | ||
1818 | skb_reserve(skb,2); | 1818 | skb_reserve(skb,2); |
1819 | skb->dev = dev; | ||
1820 | 1819 | ||
1821 | /* ptr to start of the sk_buff data area */ | 1820 | /* ptr to start of the sk_buff data area */ |
1822 | skb_put(skb, pkt_len); | 1821 | skb_put(skb, pkt_len); |
diff --git a/drivers/net/ibm_emac/ibm_emac_core.c b/drivers/net/ibm_emac/ibm_emac_core.c index dd8ad8746825..3d82d46f4998 100644 --- a/drivers/net/ibm_emac/ibm_emac_core.c +++ b/drivers/net/ibm_emac/ibm_emac_core.c | |||
@@ -1338,7 +1338,7 @@ static inline int emac_rx_sg_append(struct ocp_enet_private *dev, int slot) | |||
1338 | dev_kfree_skb(dev->rx_sg_skb); | 1338 | dev_kfree_skb(dev->rx_sg_skb); |
1339 | dev->rx_sg_skb = NULL; | 1339 | dev->rx_sg_skb = NULL; |
1340 | } else { | 1340 | } else { |
1341 | cacheable_memcpy(dev->rx_sg_skb->tail, | 1341 | cacheable_memcpy(skb_tail_pointer(dev->rx_sg_skb), |
1342 | dev->rx_skb[slot]->data, len); | 1342 | dev->rx_skb[slot]->data, len); |
1343 | skb_put(dev->rx_sg_skb, len); | 1343 | skb_put(dev->rx_sg_skb, len); |
1344 | emac_recycle_rx_skb(dev, slot, len); | 1344 | emac_recycle_rx_skb(dev, slot, len); |
@@ -1398,7 +1398,6 @@ static int emac_poll_rx(void *param, int budget) | |||
1398 | 1398 | ||
1399 | skb_put(skb, len); | 1399 | skb_put(skb, len); |
1400 | push_packet: | 1400 | push_packet: |
1401 | skb->dev = dev->ndev; | ||
1402 | skb->protocol = eth_type_trans(skb, dev->ndev); | 1401 | skb->protocol = eth_type_trans(skb, dev->ndev); |
1403 | emac_rx_csum(dev, skb, ctrl); | 1402 | emac_rx_csum(dev, skb, ctrl); |
1404 | 1403 | ||
diff --git a/drivers/net/ibmlana.c b/drivers/net/ibmlana.c index 3f946c811511..fe85d6fcba33 100644 --- a/drivers/net/ibmlana.c +++ b/drivers/net/ibmlana.c | |||
@@ -601,7 +601,6 @@ static void irqrx_handler(struct net_device *dev) | |||
601 | 601 | ||
602 | /* set up skb fields */ | 602 | /* set up skb fields */ |
603 | 603 | ||
604 | skb->dev = dev; | ||
605 | skb->protocol = eth_type_trans(skb, dev); | 604 | skb->protocol = eth_type_trans(skb, dev); |
606 | skb->ip_summed = CHECKSUM_NONE; | 605 | skb->ip_summed = CHECKSUM_NONE; |
607 | 606 | ||
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c index 458db0538a9a..0573fcfcb2c4 100644 --- a/drivers/net/ibmveth.c +++ b/drivers/net/ibmveth.c | |||
@@ -798,7 +798,6 @@ static int ibmveth_poll(struct net_device *netdev, int *budget) | |||
798 | 798 | ||
799 | skb_reserve(skb, offset); | 799 | skb_reserve(skb, offset); |
800 | skb_put(skb, length); | 800 | skb_put(skb, length); |
801 | skb->dev = netdev; | ||
802 | skb->protocol = eth_type_trans(skb, netdev); | 801 | skb->protocol = eth_type_trans(skb, netdev); |
803 | 802 | ||
804 | netif_receive_skb(skb); /* send it up */ | 803 | netif_receive_skb(skb); /* send it up */ |
diff --git a/drivers/net/ioc3-eth.c b/drivers/net/ioc3-eth.c index 4ad780719a84..f749e07c6425 100644 --- a/drivers/net/ioc3-eth.c +++ b/drivers/net/ioc3-eth.c | |||
@@ -633,8 +633,6 @@ static inline void ioc3_rx(struct ioc3_private *ip) | |||
633 | 633 | ||
634 | ip->rx_skbs[rx_entry] = NULL; /* Poison */ | 634 | ip->rx_skbs[rx_entry] = NULL; /* Poison */ |
635 | 635 | ||
636 | new_skb->dev = priv_netdev(ip); | ||
637 | |||
638 | /* Because we reserve afterwards. */ | 636 | /* Because we reserve afterwards. */ |
639 | skb_put(new_skb, (1664 + RX_OFFSET)); | 637 | skb_put(new_skb, (1664 + RX_OFFSET)); |
640 | rxb = (struct ioc3_erxbuf *) new_skb->data; | 638 | rxb = (struct ioc3_erxbuf *) new_skb->data; |
@@ -940,7 +938,6 @@ static void ioc3_alloc_rings(struct net_device *dev) | |||
940 | } | 938 | } |
941 | 939 | ||
942 | ip->rx_skbs[i] = skb; | 940 | ip->rx_skbs[i] = skb; |
943 | skb->dev = dev; | ||
944 | 941 | ||
945 | /* Because we reserve afterwards. */ | 942 | /* Because we reserve afterwards. */ |
946 | skb_put(skb, (1664 + RX_OFFSET)); | 943 | skb_put(skb, (1664 + RX_OFFSET)); |
@@ -1396,9 +1393,9 @@ static int ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1396 | * manually. | 1393 | * manually. |
1397 | */ | 1394 | */ |
1398 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | 1395 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
1399 | int proto = ntohs(skb->nh.iph->protocol); | 1396 | const struct iphdr *ih = ip_hdr(skb); |
1397 | const int proto = ntohs(ih->protocol); | ||
1400 | unsigned int csoff; | 1398 | unsigned int csoff; |
1401 | struct iphdr *ih = skb->nh.iph; | ||
1402 | uint32_t csum, ehsum; | 1399 | uint32_t csum, ehsum; |
1403 | uint16_t *eh; | 1400 | uint16_t *eh; |
1404 | 1401 | ||
@@ -1425,11 +1422,11 @@ static int ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1425 | csoff = ETH_HLEN + (ih->ihl << 2); | 1422 | csoff = ETH_HLEN + (ih->ihl << 2); |
1426 | if (proto == IPPROTO_UDP) { | 1423 | if (proto == IPPROTO_UDP) { |
1427 | csoff += offsetof(struct udphdr, check); | 1424 | csoff += offsetof(struct udphdr, check); |
1428 | skb->h.uh->check = csum; | 1425 | udp_hdr(skb)->check = csum; |
1429 | } | 1426 | } |
1430 | if (proto == IPPROTO_TCP) { | 1427 | if (proto == IPPROTO_TCP) { |
1431 | csoff += offsetof(struct tcphdr, check); | 1428 | csoff += offsetof(struct tcphdr, check); |
1432 | skb->h.th->check = csum; | 1429 | tcp_hdr(skb)->check = csum; |
1433 | } | 1430 | } |
1434 | 1431 | ||
1435 | w0 = ETXD_DOCHECKSUM | (csoff << ETXD_CHKOFF_SHIFT); | 1432 | w0 = ETXD_DOCHECKSUM | (csoff << ETXD_CHKOFF_SHIFT); |
@@ -1446,7 +1443,7 @@ static int ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1446 | 1443 | ||
1447 | if (len <= 104) { | 1444 | if (len <= 104) { |
1448 | /* Short packet, let's copy it directly into the ring. */ | 1445 | /* Short packet, let's copy it directly into the ring. */ |
1449 | memcpy(desc->data, skb->data, skb->len); | 1446 | skb_copy_from_linear_data(skb, desc->data, skb->len); |
1450 | if (len < ETH_ZLEN) { | 1447 | if (len < ETH_ZLEN) { |
1451 | /* Very short packet, pad with zeros at the end. */ | 1448 | /* Very short packet, pad with zeros at the end. */ |
1452 | memset(desc->data + len, 0, ETH_ZLEN - len); | 1449 | memset(desc->data + len, 0, ETH_ZLEN - len); |
diff --git a/drivers/net/irda/ali-ircc.c b/drivers/net/irda/ali-ircc.c index cebf8c374bc5..f9c889c0dd07 100644 --- a/drivers/net/irda/ali-ircc.c +++ b/drivers/net/irda/ali-ircc.c | |||
@@ -1472,9 +1472,8 @@ static int ali_ircc_fir_hard_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1472 | 1472 | ||
1473 | self->stats.tx_bytes += skb->len; | 1473 | self->stats.tx_bytes += skb->len; |
1474 | 1474 | ||
1475 | memcpy(self->tx_fifo.queue[self->tx_fifo.free].start, skb->data, | 1475 | skb_copy_from_linear_data(skb, self->tx_fifo.queue[self->tx_fifo.free].start, |
1476 | skb->len); | 1476 | skb->len); |
1477 | |||
1478 | self->tx_fifo.len++; | 1477 | self->tx_fifo.len++; |
1479 | self->tx_fifo.free++; | 1478 | self->tx_fifo.free++; |
1480 | 1479 | ||
@@ -1924,7 +1923,7 @@ static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self) | |||
1924 | 1923 | ||
1925 | /* Copy frame without CRC, CRC is removed by hardware*/ | 1924 | /* Copy frame without CRC, CRC is removed by hardware*/ |
1926 | skb_put(skb, len); | 1925 | skb_put(skb, len); |
1927 | memcpy(skb->data, self->rx_buff.data, len); | 1926 | skb_copy_to_linear_data(skb, self->rx_buff.data, len); |
1928 | 1927 | ||
1929 | /* Move to next frame */ | 1928 | /* Move to next frame */ |
1930 | self->rx_buff.data += len; | 1929 | self->rx_buff.data += len; |
@@ -1932,7 +1931,7 @@ static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self) | |||
1932 | self->stats.rx_packets++; | 1931 | self->stats.rx_packets++; |
1933 | 1932 | ||
1934 | skb->dev = self->netdev; | 1933 | skb->dev = self->netdev; |
1935 | skb->mac.raw = skb->data; | 1934 | skb_reset_mac_header(skb); |
1936 | skb->protocol = htons(ETH_P_IRDA); | 1935 | skb->protocol = htons(ETH_P_IRDA); |
1937 | netif_rx(skb); | 1936 | netif_rx(skb); |
1938 | self->netdev->last_rx = jiffies; | 1937 | self->netdev->last_rx = jiffies; |
diff --git a/drivers/net/irda/au1k_ir.c b/drivers/net/irda/au1k_ir.c index 37914dc5b90e..4dbdfaaf37bf 100644 --- a/drivers/net/irda/au1k_ir.c +++ b/drivers/net/irda/au1k_ir.c | |||
@@ -526,7 +526,7 @@ static int au1k_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev) | |||
526 | 526 | ||
527 | if (aup->speed == 4000000) { | 527 | if (aup->speed == 4000000) { |
528 | /* FIR */ | 528 | /* FIR */ |
529 | memcpy((void *)pDB->vaddr, skb->data, skb->len); | 529 | skb_copy_from_linear_data(skb, pDB->vaddr, skb->len); |
530 | ptxd->count_0 = skb->len & 0xff; | 530 | ptxd->count_0 = skb->len & 0xff; |
531 | ptxd->count_1 = (skb->len >> 8) & 0xff; | 531 | ptxd->count_1 = (skb->len >> 8) & 0xff; |
532 | 532 | ||
@@ -604,9 +604,9 @@ static int au1k_irda_rx(struct net_device *dev) | |||
604 | skb_put(skb, count); | 604 | skb_put(skb, count); |
605 | else | 605 | else |
606 | skb_put(skb, count-2); | 606 | skb_put(skb, count-2); |
607 | memcpy(skb->data, (void *)pDB->vaddr, count-2); | 607 | skb_copy_to_linear_data(skb, pDB->vaddr, count - 2); |
608 | skb->dev = dev; | 608 | skb->dev = dev; |
609 | skb->mac.raw = skb->data; | 609 | skb_reset_mac_header(skb); |
610 | skb->protocol = htons(ETH_P_IRDA); | 610 | skb->protocol = htons(ETH_P_IRDA); |
611 | netif_rx(skb); | 611 | netif_rx(skb); |
612 | prxd->count_0 = 0; | 612 | prxd->count_0 = 0; |
diff --git a/drivers/net/irda/donauboe.c b/drivers/net/irda/donauboe.c index 11af0ae7510e..3ca47bf6dfec 100644 --- a/drivers/net/irda/donauboe.c +++ b/drivers/net/irda/donauboe.c | |||
@@ -1119,7 +1119,7 @@ dumpbufs(skb->data,skb->len,'>'); | |||
1119 | else | 1119 | else |
1120 | { | 1120 | { |
1121 | len = skb->len; | 1121 | len = skb->len; |
1122 | memcpy (self->tx_bufs[self->txs], skb->data, len); | 1122 | skb_copy_from_linear_data(skb, self->tx_bufs[self->txs], len); |
1123 | } | 1123 | } |
1124 | self->ring->tx[self->txs].len = len & 0x0fff; | 1124 | self->ring->tx[self->txs].len = len & 0x0fff; |
1125 | 1125 | ||
@@ -1282,11 +1282,11 @@ dumpbufs(self->rx_bufs[self->rxs],len,'<'); | |||
1282 | skb_reserve (skb, 1); | 1282 | skb_reserve (skb, 1); |
1283 | 1283 | ||
1284 | skb_put (skb, len); | 1284 | skb_put (skb, len); |
1285 | memcpy (skb->data, self->rx_bufs[self->rxs], len); | 1285 | skb_copy_to_linear_data(skb, self->rx_bufs[self->rxs], |
1286 | 1286 | len); | |
1287 | self->stats.rx_packets++; | 1287 | self->stats.rx_packets++; |
1288 | skb->dev = self->netdev; | 1288 | skb->dev = self->netdev; |
1289 | skb->mac.raw = skb->data; | 1289 | skb_reset_mac_header(skb); |
1290 | skb->protocol = htons (ETH_P_IRDA); | 1290 | skb->protocol = htons (ETH_P_IRDA); |
1291 | } | 1291 | } |
1292 | else | 1292 | else |
diff --git a/drivers/net/irda/irda-usb.c b/drivers/net/irda/irda-usb.c index 1d510bdc9b84..0ac240ca905b 100644 --- a/drivers/net/irda/irda-usb.c +++ b/drivers/net/irda/irda-usb.c | |||
@@ -441,7 +441,7 @@ static int irda_usb_hard_xmit(struct sk_buff *skb, struct net_device *netdev) | |||
441 | goto drop; | 441 | goto drop; |
442 | } | 442 | } |
443 | 443 | ||
444 | memcpy(self->tx_buff + self->header_length, skb->data, skb->len); | 444 | skb_copy_from_linear_data(skb, self->tx_buff + self->header_length, skb->len); |
445 | 445 | ||
446 | /* Change setting for next frame */ | 446 | /* Change setting for next frame */ |
447 | if (self->capability & IUC_STIR421X) { | 447 | if (self->capability & IUC_STIR421X) { |
@@ -902,7 +902,7 @@ static void irda_usb_receive(struct urb *urb) | |||
902 | 902 | ||
903 | if(docopy) { | 903 | if(docopy) { |
904 | /* Copy packet, so we can recycle the original */ | 904 | /* Copy packet, so we can recycle the original */ |
905 | memcpy(newskb->data, skb->data, urb->actual_length); | 905 | skb_copy_from_linear_data(skb, newskb->data, urb->actual_length); |
906 | /* Deliver this new skb */ | 906 | /* Deliver this new skb */ |
907 | dataskb = newskb; | 907 | dataskb = newskb; |
908 | /* And hook the old skb to the URB | 908 | /* And hook the old skb to the URB |
@@ -921,7 +921,7 @@ static void irda_usb_receive(struct urb *urb) | |||
921 | 921 | ||
922 | /* Ask the networking layer to queue the packet for the IrDA stack */ | 922 | /* Ask the networking layer to queue the packet for the IrDA stack */ |
923 | dataskb->dev = self->netdev; | 923 | dataskb->dev = self->netdev; |
924 | dataskb->mac.raw = dataskb->data; | 924 | skb_reset_mac_header(dataskb); |
925 | dataskb->protocol = htons(ETH_P_IRDA); | 925 | dataskb->protocol = htons(ETH_P_IRDA); |
926 | len = dataskb->len; | 926 | len = dataskb->len; |
927 | netif_rx(dataskb); | 927 | netif_rx(dataskb); |
diff --git a/drivers/net/irda/mcs7780.c b/drivers/net/irda/mcs7780.c index f0c61f3b2a82..0de867288a47 100644 --- a/drivers/net/irda/mcs7780.c +++ b/drivers/net/irda/mcs7780.c | |||
@@ -200,14 +200,14 @@ static inline int mcs_setup_transceiver_vishay(struct mcs_cb *mcs) | |||
200 | /* Setup a communication between mcs7780 and agilent chip. */ | 200 | /* Setup a communication between mcs7780 and agilent chip. */ |
201 | static inline int mcs_setup_transceiver_agilent(struct mcs_cb *mcs) | 201 | static inline int mcs_setup_transceiver_agilent(struct mcs_cb *mcs) |
202 | { | 202 | { |
203 | IRDA_WARNING("This transceiver type is not supported yet."); | 203 | IRDA_WARNING("This transceiver type is not supported yet.\n"); |
204 | return 1; | 204 | return 1; |
205 | } | 205 | } |
206 | 206 | ||
207 | /* Setup a communication between mcs7780 and sharp chip. */ | 207 | /* Setup a communication between mcs7780 and sharp chip. */ |
208 | static inline int mcs_setup_transceiver_sharp(struct mcs_cb *mcs) | 208 | static inline int mcs_setup_transceiver_sharp(struct mcs_cb *mcs) |
209 | { | 209 | { |
210 | IRDA_WARNING("This transceiver type is not supported yet."); | 210 | IRDA_WARNING("This transceiver type is not supported yet.\n"); |
211 | return 1; | 211 | return 1; |
212 | } | 212 | } |
213 | 213 | ||
@@ -279,7 +279,7 @@ static inline int mcs_setup_transceiver(struct mcs_cb *mcs) | |||
279 | break; | 279 | break; |
280 | 280 | ||
281 | default: | 281 | default: |
282 | IRDA_WARNING("Unknown transceiver type: %d", | 282 | IRDA_WARNING("Unknown transceiver type: %d\n", |
283 | mcs->transceiver_type); | 283 | mcs->transceiver_type); |
284 | ret = 1; | 284 | ret = 1; |
285 | } | 285 | } |
@@ -318,7 +318,7 @@ static inline int mcs_setup_transceiver(struct mcs_cb *mcs) | |||
318 | return ret; | 318 | return ret; |
319 | 319 | ||
320 | error: | 320 | error: |
321 | IRDA_ERROR("%s", msg); | 321 | IRDA_ERROR("%s\n", msg); |
322 | return ret; | 322 | return ret; |
323 | } | 323 | } |
324 | 324 | ||
@@ -353,7 +353,7 @@ static unsigned mcs_wrap_fir_skb(const struct sk_buff *skb, __u8 *buf) | |||
353 | buf[0] = len & 0xff; | 353 | buf[0] = len & 0xff; |
354 | buf[1] = (len >> 8) & 0xff; | 354 | buf[1] = (len >> 8) & 0xff; |
355 | /* copy the data into the tx buffer. */ | 355 | /* copy the data into the tx buffer. */ |
356 | memcpy(buf+2, skb->data, skb->len); | 356 | skb_copy_from_linear_data(skb, buf + 2, skb->len); |
357 | /* put the fcs in the last four bytes in little endian order. */ | 357 | /* put the fcs in the last four bytes in little endian order. */ |
358 | buf[len - 4] = fcs & 0xff; | 358 | buf[len - 4] = fcs & 0xff; |
359 | buf[len - 3] = (fcs >> 8) & 0xff; | 359 | buf[len - 3] = (fcs >> 8) & 0xff; |
@@ -377,7 +377,7 @@ static unsigned mcs_wrap_mir_skb(const struct sk_buff *skb, __u8 *buf) | |||
377 | buf[0] = len & 0xff; | 377 | buf[0] = len & 0xff; |
378 | buf[1] = (len >> 8) & 0xff; | 378 | buf[1] = (len >> 8) & 0xff; |
379 | /* copy the data */ | 379 | /* copy the data */ |
380 | memcpy(buf+2, skb->data, skb->len); | 380 | skb_copy_from_linear_data(skb, buf + 2, skb->len); |
381 | /* put the fcs in last two bytes in little endian order. */ | 381 | /* put the fcs in last two bytes in little endian order. */ |
382 | buf[len - 2] = fcs & 0xff; | 382 | buf[len - 2] = fcs & 0xff; |
383 | buf[len - 1] = (fcs >> 8) & 0xff; | 383 | buf[len - 1] = (fcs >> 8) & 0xff; |
@@ -426,9 +426,9 @@ static void mcs_unwrap_mir(struct mcs_cb *mcs, __u8 *buf, int len) | |||
426 | } | 426 | } |
427 | 427 | ||
428 | skb_reserve(skb, 1); | 428 | skb_reserve(skb, 1); |
429 | memcpy(skb->data, buf, new_len); | 429 | skb_copy_to_linear_data(skb, buf, new_len); |
430 | skb_put(skb, new_len); | 430 | skb_put(skb, new_len); |
431 | skb->mac.raw = skb->data; | 431 | skb_reset_mac_header(skb); |
432 | skb->protocol = htons(ETH_P_IRDA); | 432 | skb->protocol = htons(ETH_P_IRDA); |
433 | skb->dev = mcs->netdev; | 433 | skb->dev = mcs->netdev; |
434 | 434 | ||
@@ -479,9 +479,9 @@ static void mcs_unwrap_fir(struct mcs_cb *mcs, __u8 *buf, int len) | |||
479 | } | 479 | } |
480 | 480 | ||
481 | skb_reserve(skb, 1); | 481 | skb_reserve(skb, 1); |
482 | memcpy(skb->data, buf, new_len); | 482 | skb_copy_to_linear_data(skb, buf, new_len); |
483 | skb_put(skb, new_len); | 483 | skb_put(skb, new_len); |
484 | skb->mac.raw = skb->data; | 484 | skb_reset_mac_header(skb); |
485 | skb->protocol = htons(ETH_P_IRDA); | 485 | skb->protocol = htons(ETH_P_IRDA); |
486 | skb->dev = mcs->netdev; | 486 | skb->dev = mcs->netdev; |
487 | 487 | ||
@@ -587,7 +587,7 @@ static int mcs_speed_change(struct mcs_cb *mcs) | |||
587 | } while(cnt++ < 100 && (rval & MCS_IRINTX)); | 587 | } while(cnt++ < 100 && (rval & MCS_IRINTX)); |
588 | 588 | ||
589 | if(cnt >= 100) { | 589 | if(cnt >= 100) { |
590 | IRDA_ERROR("unable to change speed"); | 590 | IRDA_ERROR("unable to change speed\n"); |
591 | ret = -EIO; | 591 | ret = -EIO; |
592 | goto error; | 592 | goto error; |
593 | } | 593 | } |
@@ -638,7 +638,7 @@ static int mcs_speed_change(struct mcs_cb *mcs) | |||
638 | 638 | ||
639 | default: | 639 | default: |
640 | ret = 1; | 640 | ret = 1; |
641 | IRDA_WARNING("Unknown transceiver type: %d", | 641 | IRDA_WARNING("Unknown transceiver type: %d\n", |
642 | mcs->transceiver_type); | 642 | mcs->transceiver_type); |
643 | } | 643 | } |
644 | if (unlikely(ret)) | 644 | if (unlikely(ret)) |
@@ -733,7 +733,7 @@ static int mcs_net_open(struct net_device *netdev) | |||
733 | sprintf(hwname, "usb#%d", mcs->usbdev->devnum); | 733 | sprintf(hwname, "usb#%d", mcs->usbdev->devnum); |
734 | mcs->irlap = irlap_open(netdev, &mcs->qos, hwname); | 734 | mcs->irlap = irlap_open(netdev, &mcs->qos, hwname); |
735 | if (!mcs->irlap) { | 735 | if (!mcs->irlap) { |
736 | IRDA_ERROR("mcs7780: irlap_open failed"); | 736 | IRDA_ERROR("mcs7780: irlap_open failed\n"); |
737 | goto error2; | 737 | goto error2; |
738 | } | 738 | } |
739 | 739 | ||
@@ -862,7 +862,7 @@ static int mcs_hard_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
862 | mcs->out_buf, wraplen, mcs_send_irq, mcs); | 862 | mcs->out_buf, wraplen, mcs_send_irq, mcs); |
863 | 863 | ||
864 | if ((ret = usb_submit_urb(mcs->tx_urb, GFP_ATOMIC))) { | 864 | if ((ret = usb_submit_urb(mcs->tx_urb, GFP_ATOMIC))) { |
865 | IRDA_ERROR("failed tx_urb: %d", ret); | 865 | IRDA_ERROR("failed tx_urb: %d\n", ret); |
866 | switch (ret) { | 866 | switch (ret) { |
867 | case -ENODEV: | 867 | case -ENODEV: |
868 | case -EPIPE: | 868 | case -EPIPE: |
@@ -897,7 +897,7 @@ static int mcs_probe(struct usb_interface *intf, | |||
897 | if (!ndev) | 897 | if (!ndev) |
898 | goto error1; | 898 | goto error1; |
899 | 899 | ||
900 | IRDA_DEBUG(1, "MCS7780 USB-IrDA bridge found at %d.", udev->devnum); | 900 | IRDA_DEBUG(1, "MCS7780 USB-IrDA bridge found at %d.\n", udev->devnum); |
901 | 901 | ||
902 | /* what is it realy for? */ | 902 | /* what is it realy for? */ |
903 | SET_MODULE_OWNER(ndev); | 903 | SET_MODULE_OWNER(ndev); |
@@ -905,7 +905,7 @@ static int mcs_probe(struct usb_interface *intf, | |||
905 | 905 | ||
906 | ret = usb_reset_configuration(udev); | 906 | ret = usb_reset_configuration(udev); |
907 | if (ret != 0) { | 907 | if (ret != 0) { |
908 | IRDA_ERROR("mcs7780: usb reset configuration failed"); | 908 | IRDA_ERROR("mcs7780: usb reset configuration failed\n"); |
909 | goto error2; | 909 | goto error2; |
910 | } | 910 | } |
911 | 911 | ||
@@ -950,7 +950,7 @@ static int mcs_probe(struct usb_interface *intf, | |||
950 | if (ret != 0) | 950 | if (ret != 0) |
951 | goto error2; | 951 | goto error2; |
952 | 952 | ||
953 | IRDA_DEBUG(1, "IrDA: Registered MosChip MCS7780 device as %s", | 953 | IRDA_DEBUG(1, "IrDA: Registered MosChip MCS7780 device as %s\n", |
954 | ndev->name); | 954 | ndev->name); |
955 | 955 | ||
956 | mcs->transceiver_type = transceiver_type; | 956 | mcs->transceiver_type = transceiver_type; |
@@ -981,7 +981,7 @@ static void mcs_disconnect(struct usb_interface *intf) | |||
981 | free_netdev(mcs->netdev); | 981 | free_netdev(mcs->netdev); |
982 | 982 | ||
983 | usb_set_intfdata(intf, NULL); | 983 | usb_set_intfdata(intf, NULL); |
984 | IRDA_DEBUG(0, "MCS7780 now disconnected."); | 984 | IRDA_DEBUG(0, "MCS7780 now disconnected.\n"); |
985 | } | 985 | } |
986 | 986 | ||
987 | /* Module insertion */ | 987 | /* Module insertion */ |
@@ -992,7 +992,7 @@ static int __init mcs_init(void) | |||
992 | /* register this driver with the USB subsystem */ | 992 | /* register this driver with the USB subsystem */ |
993 | result = usb_register(&mcs_driver); | 993 | result = usb_register(&mcs_driver); |
994 | if (result) | 994 | if (result) |
995 | IRDA_ERROR("usb_register failed. Error number %d", result); | 995 | IRDA_ERROR("usb_register failed. Error number %d\n", result); |
996 | 996 | ||
997 | return result; | 997 | return result; |
998 | } | 998 | } |
diff --git a/drivers/net/irda/nsc-ircc.c b/drivers/net/irda/nsc-ircc.c index 29b5ccd29d0b..d96c89751a71 100644 --- a/drivers/net/irda/nsc-ircc.c +++ b/drivers/net/irda/nsc-ircc.c | |||
@@ -1466,9 +1466,8 @@ static int nsc_ircc_hard_xmit_fir(struct sk_buff *skb, struct net_device *dev) | |||
1466 | 1466 | ||
1467 | self->stats.tx_bytes += skb->len; | 1467 | self->stats.tx_bytes += skb->len; |
1468 | 1468 | ||
1469 | memcpy(self->tx_fifo.queue[self->tx_fifo.free].start, skb->data, | 1469 | skb_copy_from_linear_data(skb, self->tx_fifo.queue[self->tx_fifo.free].start, |
1470 | skb->len); | 1470 | skb->len); |
1471 | |||
1472 | self->tx_fifo.len++; | 1471 | self->tx_fifo.len++; |
1473 | self->tx_fifo.free++; | 1472 | self->tx_fifo.free++; |
1474 | 1473 | ||
@@ -1869,10 +1868,14 @@ static int nsc_ircc_dma_receive_complete(struct nsc_ircc_cb *self, int iobase) | |||
1869 | /* Copy frame without CRC */ | 1868 | /* Copy frame without CRC */ |
1870 | if (self->io.speed < 4000000) { | 1869 | if (self->io.speed < 4000000) { |
1871 | skb_put(skb, len-2); | 1870 | skb_put(skb, len-2); |
1872 | memcpy(skb->data, self->rx_buff.data, len-2); | 1871 | skb_copy_to_linear_data(skb, |
1872 | self->rx_buff.data, | ||
1873 | len - 2); | ||
1873 | } else { | 1874 | } else { |
1874 | skb_put(skb, len-4); | 1875 | skb_put(skb, len-4); |
1875 | memcpy(skb->data, self->rx_buff.data, len-4); | 1876 | skb_copy_to_linear_data(skb, |
1877 | self->rx_buff.data, | ||
1878 | len - 4); | ||
1876 | } | 1879 | } |
1877 | 1880 | ||
1878 | /* Move to next frame */ | 1881 | /* Move to next frame */ |
@@ -1881,7 +1884,7 @@ static int nsc_ircc_dma_receive_complete(struct nsc_ircc_cb *self, int iobase) | |||
1881 | self->stats.rx_packets++; | 1884 | self->stats.rx_packets++; |
1882 | 1885 | ||
1883 | skb->dev = self->netdev; | 1886 | skb->dev = self->netdev; |
1884 | skb->mac.raw = skb->data; | 1887 | skb_reset_mac_header(skb); |
1885 | skb->protocol = htons(ETH_P_IRDA); | 1888 | skb->protocol = htons(ETH_P_IRDA); |
1886 | netif_rx(skb); | 1889 | netif_rx(skb); |
1887 | self->netdev->last_rx = jiffies; | 1890 | self->netdev->last_rx = jiffies; |
diff --git a/drivers/net/irda/pxaficp_ir.c b/drivers/net/irda/pxaficp_ir.c index 2272156af31e..fb196fd91855 100644 --- a/drivers/net/irda/pxaficp_ir.c +++ b/drivers/net/irda/pxaficp_ir.c | |||
@@ -386,12 +386,12 @@ static void pxa_irda_fir_irq_eif(struct pxa_irda *si, struct net_device *dev, in | |||
386 | 386 | ||
387 | /* Align IP header to 20 bytes */ | 387 | /* Align IP header to 20 bytes */ |
388 | skb_reserve(skb, 1); | 388 | skb_reserve(skb, 1); |
389 | memcpy(skb->data, si->dma_rx_buff, len); | 389 | skb_copy_to_linear_data(skb, si->dma_rx_buff, len); |
390 | skb_put(skb, len); | 390 | skb_put(skb, len); |
391 | 391 | ||
392 | /* Feed it to IrLAP */ | 392 | /* Feed it to IrLAP */ |
393 | skb->dev = dev; | 393 | skb->dev = dev; |
394 | skb->mac.raw = skb->data; | 394 | skb_reset_mac_header(skb); |
395 | skb->protocol = htons(ETH_P_IRDA); | 395 | skb->protocol = htons(ETH_P_IRDA); |
396 | netif_rx(skb); | 396 | netif_rx(skb); |
397 | 397 | ||
@@ -484,7 +484,7 @@ static int pxa_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev) | |||
484 | unsigned long mtt = irda_get_mtt(skb); | 484 | unsigned long mtt = irda_get_mtt(skb); |
485 | 485 | ||
486 | si->dma_tx_buff_len = skb->len; | 486 | si->dma_tx_buff_len = skb->len; |
487 | memcpy(si->dma_tx_buff, skb->data, skb->len); | 487 | skb_copy_from_linear_data(skb, si->dma_tx_buff, skb->len); |
488 | 488 | ||
489 | if (mtt) | 489 | if (mtt) |
490 | while ((unsigned)(OSCR - si->last_oscr)/4 < mtt) | 490 | while ((unsigned)(OSCR - si->last_oscr)/4 < mtt) |
diff --git a/drivers/net/irda/sa1100_ir.c b/drivers/net/irda/sa1100_ir.c index 937372d00398..056639f72bec 100644 --- a/drivers/net/irda/sa1100_ir.c +++ b/drivers/net/irda/sa1100_ir.c | |||
@@ -504,7 +504,7 @@ static void sa1100_irda_fir_error(struct sa1100_irda *si, struct net_device *dev | |||
504 | 504 | ||
505 | skb_put(skb, len); | 505 | skb_put(skb, len); |
506 | skb->dev = dev; | 506 | skb->dev = dev; |
507 | skb->mac.raw = skb->data; | 507 | skb_reset_mac_header(skb); |
508 | skb->protocol = htons(ETH_P_IRDA); | 508 | skb->protocol = htons(ETH_P_IRDA); |
509 | si->stats.rx_packets++; | 509 | si->stats.rx_packets++; |
510 | si->stats.rx_bytes += len; | 510 | si->stats.rx_bytes += len; |
diff --git a/drivers/net/irda/smsc-ircc2.c b/drivers/net/irda/smsc-ircc2.c index 31c623381ea8..198bf3bfa70f 100644 --- a/drivers/net/irda/smsc-ircc2.c +++ b/drivers/net/irda/smsc-ircc2.c | |||
@@ -315,6 +315,7 @@ static struct smsc_chip __initdata lpc_chips_flat[] = | |||
315 | { | 315 | { |
316 | /* Base address 0x2E or 0x4E */ | 316 | /* Base address 0x2E or 0x4E */ |
317 | { "47N227", KEY55_1|FIR|SERx4, 0x5a, 0x00 }, | 317 | { "47N227", KEY55_1|FIR|SERx4, 0x5a, 0x00 }, |
318 | { "47N227", KEY55_1|FIR|SERx4, 0x7a, 0x00 }, | ||
318 | { "47N267", KEY55_1|FIR|SERx4, 0x5e, 0x00 }, | 319 | { "47N267", KEY55_1|FIR|SERx4, 0x5e, 0x00 }, |
319 | { NULL } | 320 | { NULL } |
320 | }; | 321 | }; |
@@ -1161,7 +1162,7 @@ static int smsc_ircc_hard_xmit_fir(struct sk_buff *skb, struct net_device *dev) | |||
1161 | self->new_speed = speed; | 1162 | self->new_speed = speed; |
1162 | } | 1163 | } |
1163 | 1164 | ||
1164 | memcpy(self->tx_buff.head, skb->data, skb->len); | 1165 | skb_copy_from_linear_data(skb, self->tx_buff.head, skb->len); |
1165 | 1166 | ||
1166 | self->tx_buff.len = skb->len; | 1167 | self->tx_buff.len = skb->len; |
1167 | self->tx_buff.data = self->tx_buff.head; | 1168 | self->tx_buff.data = self->tx_buff.head; |
@@ -1412,7 +1413,7 @@ static void smsc_ircc_dma_receive_complete(struct smsc_ircc_cb *self) | |||
1412 | self->stats.rx_bytes += len; | 1413 | self->stats.rx_bytes += len; |
1413 | 1414 | ||
1414 | skb->dev = self->netdev; | 1415 | skb->dev = self->netdev; |
1415 | skb->mac.raw = skb->data; | 1416 | skb_reset_mac_header(skb); |
1416 | skb->protocol = htons(ETH_P_IRDA); | 1417 | skb->protocol = htons(ETH_P_IRDA); |
1417 | netif_rx(skb); | 1418 | netif_rx(skb); |
1418 | } | 1419 | } |
diff --git a/drivers/net/irda/stir4200.c b/drivers/net/irda/stir4200.c index 20d306fea4cb..755aa444a4dd 100644 --- a/drivers/net/irda/stir4200.c +++ b/drivers/net/irda/stir4200.c | |||
@@ -52,7 +52,6 @@ | |||
52 | #include <linux/kthread.h> | 52 | #include <linux/kthread.h> |
53 | #include <linux/freezer.h> | 53 | #include <linux/freezer.h> |
54 | #include <net/irda/irda.h> | 54 | #include <net/irda/irda.h> |
55 | #include <net/irda/irlap.h> | ||
56 | #include <net/irda/irda_device.h> | 55 | #include <net/irda/irda_device.h> |
57 | #include <net/irda/wrapper.h> | 56 | #include <net/irda/wrapper.h> |
58 | #include <net/irda/crc.h> | 57 | #include <net/irda/crc.h> |
@@ -349,7 +348,7 @@ static void fir_eof(struct stir_cb *stir) | |||
349 | } | 348 | } |
350 | skb_reserve(nskb, 1); | 349 | skb_reserve(nskb, 1); |
351 | skb = nskb; | 350 | skb = nskb; |
352 | memcpy(nskb->data, rx_buff->data, len); | 351 | skb_copy_to_linear_data(nskb, rx_buff->data, len); |
353 | } else { | 352 | } else { |
354 | nskb = dev_alloc_skb(rx_buff->truesize); | 353 | nskb = dev_alloc_skb(rx_buff->truesize); |
355 | if (unlikely(!nskb)) { | 354 | if (unlikely(!nskb)) { |
@@ -364,7 +363,7 @@ static void fir_eof(struct stir_cb *stir) | |||
364 | 363 | ||
365 | skb_put(skb, len); | 364 | skb_put(skb, len); |
366 | 365 | ||
367 | skb->mac.raw = skb->data; | 366 | skb_reset_mac_header(skb); |
368 | skb->protocol = htons(ETH_P_IRDA); | 367 | skb->protocol = htons(ETH_P_IRDA); |
369 | skb->dev = stir->netdev; | 368 | skb->dev = stir->netdev; |
370 | 369 | ||
diff --git a/drivers/net/irda/via-ircc.c b/drivers/net/irda/via-ircc.c index c3ed9b3067e5..ff5358574d0a 100644 --- a/drivers/net/irda/via-ircc.c +++ b/drivers/net/irda/via-ircc.c | |||
@@ -925,8 +925,8 @@ static int via_ircc_hard_xmit_fir(struct sk_buff *skb, | |||
925 | 925 | ||
926 | self->tx_fifo.tail += skb->len; | 926 | self->tx_fifo.tail += skb->len; |
927 | self->stats.tx_bytes += skb->len; | 927 | self->stats.tx_bytes += skb->len; |
928 | memcpy(self->tx_fifo.queue[self->tx_fifo.free].start, skb->data, | 928 | skb_copy_from_linear_data(skb, |
929 | skb->len); | 929 | self->tx_fifo.queue[self->tx_fifo.free].start, skb->len); |
930 | self->tx_fifo.len++; | 930 | self->tx_fifo.len++; |
931 | self->tx_fifo.free++; | 931 | self->tx_fifo.free++; |
932 | //F01 if (self->tx_fifo.len == 1) { | 932 | //F01 if (self->tx_fifo.len == 1) { |
@@ -1125,7 +1125,7 @@ static int via_ircc_dma_receive_complete(struct via_ircc_cb *self, | |||
1125 | self->stats.rx_bytes += len; | 1125 | self->stats.rx_bytes += len; |
1126 | self->stats.rx_packets++; | 1126 | self->stats.rx_packets++; |
1127 | skb->dev = self->netdev; | 1127 | skb->dev = self->netdev; |
1128 | skb->mac.raw = skb->data; | 1128 | skb_reset_mac_header(skb); |
1129 | skb->protocol = htons(ETH_P_IRDA); | 1129 | skb->protocol = htons(ETH_P_IRDA); |
1130 | netif_rx(skb); | 1130 | netif_rx(skb); |
1131 | return TRUE; | 1131 | return TRUE; |
@@ -1189,7 +1189,7 @@ F01_E */ | |||
1189 | skb_reserve(skb, 1); | 1189 | skb_reserve(skb, 1); |
1190 | skb_put(skb, len - 4); | 1190 | skb_put(skb, len - 4); |
1191 | 1191 | ||
1192 | memcpy(skb->data, self->rx_buff.data, len - 4); | 1192 | skb_copy_to_linear_data(skb, self->rx_buff.data, len - 4); |
1193 | IRDA_DEBUG(2, "%s(): len=%x.rx_buff=%p\n", __FUNCTION__, | 1193 | IRDA_DEBUG(2, "%s(): len=%x.rx_buff=%p\n", __FUNCTION__, |
1194 | len - 4, self->rx_buff.data); | 1194 | len - 4, self->rx_buff.data); |
1195 | 1195 | ||
@@ -1198,7 +1198,7 @@ F01_E */ | |||
1198 | self->stats.rx_bytes += len; | 1198 | self->stats.rx_bytes += len; |
1199 | self->stats.rx_packets++; | 1199 | self->stats.rx_packets++; |
1200 | skb->dev = self->netdev; | 1200 | skb->dev = self->netdev; |
1201 | skb->mac.raw = skb->data; | 1201 | skb_reset_mac_header(skb); |
1202 | skb->protocol = htons(ETH_P_IRDA); | 1202 | skb->protocol = htons(ETH_P_IRDA); |
1203 | netif_rx(skb); | 1203 | netif_rx(skb); |
1204 | 1204 | ||
@@ -1234,7 +1234,7 @@ static int upload_rxdata(struct via_ircc_cb *self, int iobase) | |||
1234 | } | 1234 | } |
1235 | skb_reserve(skb, 1); | 1235 | skb_reserve(skb, 1); |
1236 | skb_put(skb, len - 4 + 1); | 1236 | skb_put(skb, len - 4 + 1); |
1237 | memcpy(skb->data, self->rx_buff.data, len - 4 + 1); | 1237 | skb_copy_to_linear_data(skb, self->rx_buff.data, len - 4 + 1); |
1238 | st_fifo->tail++; | 1238 | st_fifo->tail++; |
1239 | st_fifo->len++; | 1239 | st_fifo->len++; |
1240 | if (st_fifo->tail > MAX_RX_WINDOW) | 1240 | if (st_fifo->tail > MAX_RX_WINDOW) |
@@ -1244,7 +1244,7 @@ static int upload_rxdata(struct via_ircc_cb *self, int iobase) | |||
1244 | self->stats.rx_bytes += len; | 1244 | self->stats.rx_bytes += len; |
1245 | self->stats.rx_packets++; | 1245 | self->stats.rx_packets++; |
1246 | skb->dev = self->netdev; | 1246 | skb->dev = self->netdev; |
1247 | skb->mac.raw = skb->data; | 1247 | skb_reset_mac_header(skb); |
1248 | skb->protocol = htons(ETH_P_IRDA); | 1248 | skb->protocol = htons(ETH_P_IRDA); |
1249 | netif_rx(skb); | 1249 | netif_rx(skb); |
1250 | if (st_fifo->len < (MAX_RX_WINDOW + 2)) { | 1250 | if (st_fifo->len < (MAX_RX_WINDOW + 2)) { |
@@ -1303,7 +1303,7 @@ static int RxTimerHandler(struct via_ircc_cb *self, int iobase) | |||
1303 | } | 1303 | } |
1304 | skb_reserve(skb, 1); | 1304 | skb_reserve(skb, 1); |
1305 | skb_put(skb, len - 4); | 1305 | skb_put(skb, len - 4); |
1306 | memcpy(skb->data, self->rx_buff.data, len - 4); | 1306 | skb_copy_to_linear_data(skb, self->rx_buff.data, len - 4); |
1307 | 1307 | ||
1308 | IRDA_DEBUG(2, "%s(): len=%x.head=%x\n", __FUNCTION__, | 1308 | IRDA_DEBUG(2, "%s(): len=%x.head=%x\n", __FUNCTION__, |
1309 | len - 4, st_fifo->head); | 1309 | len - 4, st_fifo->head); |
@@ -1313,7 +1313,7 @@ static int RxTimerHandler(struct via_ircc_cb *self, int iobase) | |||
1313 | self->stats.rx_bytes += len; | 1313 | self->stats.rx_bytes += len; |
1314 | self->stats.rx_packets++; | 1314 | self->stats.rx_packets++; |
1315 | skb->dev = self->netdev; | 1315 | skb->dev = self->netdev; |
1316 | skb->mac.raw = skb->data; | 1316 | skb_reset_mac_header(skb); |
1317 | skb->protocol = htons(ETH_P_IRDA); | 1317 | skb->protocol = htons(ETH_P_IRDA); |
1318 | netif_rx(skb); | 1318 | netif_rx(skb); |
1319 | } //while | 1319 | } //while |
diff --git a/drivers/net/irda/vlsi_ir.c b/drivers/net/irda/vlsi_ir.c index 3457e9d8b667..c4be973867a6 100644 --- a/drivers/net/irda/vlsi_ir.c +++ b/drivers/net/irda/vlsi_ir.c | |||
@@ -595,7 +595,7 @@ static int vlsi_process_rx(struct vlsi_ring *r, struct ring_descr *rd) | |||
595 | rd->skb = NULL; | 595 | rd->skb = NULL; |
596 | skb->dev = ndev; | 596 | skb->dev = ndev; |
597 | memcpy(skb_put(skb,len), rd->buf, len); | 597 | memcpy(skb_put(skb,len), rd->buf, len); |
598 | skb->mac.raw = skb->data; | 598 | skb_reset_mac_header(skb); |
599 | if (in_interrupt()) | 599 | if (in_interrupt()) |
600 | netif_rx(skb); | 600 | netif_rx(skb); |
601 | else | 601 | else |
@@ -993,7 +993,7 @@ static int vlsi_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
993 | goto drop; | 993 | goto drop; |
994 | } | 994 | } |
995 | else | 995 | else |
996 | memcpy(rd->buf, skb->data, len); | 996 | skb_copy_from_linear_data(skb, rd->buf, len); |
997 | } | 997 | } |
998 | 998 | ||
999 | rd->skb = skb; /* remember skb for tx-complete stats */ | 999 | rd->skb = skb; /* remember skb for tx-complete stats */ |
diff --git a/drivers/net/irda/w83977af_ir.c b/drivers/net/irda/w83977af_ir.c index 4212657fa4f9..5182e800cc18 100644 --- a/drivers/net/irda/w83977af_ir.c +++ b/drivers/net/irda/w83977af_ir.c | |||
@@ -529,7 +529,7 @@ int w83977af_hard_xmit(struct sk_buff *skb, struct net_device *dev) | |||
529 | /* Decide if we should use PIO or DMA transfer */ | 529 | /* Decide if we should use PIO or DMA transfer */ |
530 | if (self->io.speed > PIO_MAX_SPEED) { | 530 | if (self->io.speed > PIO_MAX_SPEED) { |
531 | self->tx_buff.data = self->tx_buff.head; | 531 | self->tx_buff.data = self->tx_buff.head; |
532 | memcpy(self->tx_buff.data, skb->data, skb->len); | 532 | skb_copy_from_linear_data(skb, self->tx_buff.data, skb->len); |
533 | self->tx_buff.len = skb->len; | 533 | self->tx_buff.len = skb->len; |
534 | 534 | ||
535 | mtt = irda_get_mtt(skb); | 535 | mtt = irda_get_mtt(skb); |
@@ -908,10 +908,14 @@ int w83977af_dma_receive_complete(struct w83977af_ir *self) | |||
908 | /* Copy frame without CRC */ | 908 | /* Copy frame without CRC */ |
909 | if (self->io.speed < 4000000) { | 909 | if (self->io.speed < 4000000) { |
910 | skb_put(skb, len-2); | 910 | skb_put(skb, len-2); |
911 | memcpy(skb->data, self->rx_buff.data, len-2); | 911 | skb_copy_to_linear_data(skb, |
912 | self->rx_buff.data, | ||
913 | len - 2); | ||
912 | } else { | 914 | } else { |
913 | skb_put(skb, len-4); | 915 | skb_put(skb, len-4); |
914 | memcpy(skb->data, self->rx_buff.data, len-4); | 916 | skb_copy_to_linear_data(skb, |
917 | self->rx_buff.data, | ||
918 | len - 4); | ||
915 | } | 919 | } |
916 | 920 | ||
917 | /* Move to next frame */ | 921 | /* Move to next frame */ |
@@ -919,7 +923,7 @@ int w83977af_dma_receive_complete(struct w83977af_ir *self) | |||
919 | self->stats.rx_packets++; | 923 | self->stats.rx_packets++; |
920 | 924 | ||
921 | skb->dev = self->netdev; | 925 | skb->dev = self->netdev; |
922 | skb->mac.raw = skb->data; | 926 | skb_reset_mac_header(skb); |
923 | skb->protocol = htons(ETH_P_IRDA); | 927 | skb->protocol = htons(ETH_P_IRDA); |
924 | netif_rx(skb); | 928 | netif_rx(skb); |
925 | self->netdev->last_rx = jiffies; | 929 | self->netdev->last_rx = jiffies; |
diff --git a/drivers/net/iseries_veth.c b/drivers/net/iseries_veth.c index 0e9ba3c3faf7..347d50cd77d4 100644 --- a/drivers/net/iseries_veth.c +++ b/drivers/net/iseries_veth.c | |||
@@ -1540,7 +1540,6 @@ static void veth_receive(struct veth_lpar_connection *cnx, | |||
1540 | } | 1540 | } |
1541 | 1541 | ||
1542 | skb_put(skb, length); | 1542 | skb_put(skb, length); |
1543 | skb->dev = dev; | ||
1544 | skb->protocol = eth_type_trans(skb, dev); | 1543 | skb->protocol = eth_type_trans(skb, dev); |
1545 | skb->ip_summed = CHECKSUM_NONE; | 1544 | skb->ip_summed = CHECKSUM_NONE; |
1546 | netif_rx(skb); /* send it up */ | 1545 | netif_rx(skb); /* send it up */ |
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c index afc2ec72529e..dfde80e54aef 100644 --- a/drivers/net/ixgb/ixgb_main.c +++ b/drivers/net/ixgb/ixgb_main.c | |||
@@ -1182,24 +1182,27 @@ ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb) | |||
1182 | 1182 | ||
1183 | if (likely(skb_is_gso(skb))) { | 1183 | if (likely(skb_is_gso(skb))) { |
1184 | struct ixgb_buffer *buffer_info; | 1184 | struct ixgb_buffer *buffer_info; |
1185 | struct iphdr *iph; | ||
1186 | |||
1185 | if (skb_header_cloned(skb)) { | 1187 | if (skb_header_cloned(skb)) { |
1186 | err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); | 1188 | err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); |
1187 | if (err) | 1189 | if (err) |
1188 | return err; | 1190 | return err; |
1189 | } | 1191 | } |
1190 | 1192 | ||
1191 | hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2)); | 1193 | hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); |
1192 | mss = skb_shinfo(skb)->gso_size; | 1194 | mss = skb_shinfo(skb)->gso_size; |
1193 | skb->nh.iph->tot_len = 0; | 1195 | iph = ip_hdr(skb); |
1194 | skb->nh.iph->check = 0; | 1196 | iph->tot_len = 0; |
1195 | skb->h.th->check = ~csum_tcpudp_magic(skb->nh.iph->saddr, | 1197 | iph->check = 0; |
1196 | skb->nh.iph->daddr, | 1198 | tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, |
1197 | 0, IPPROTO_TCP, 0); | 1199 | iph->daddr, 0, |
1198 | ipcss = skb->nh.raw - skb->data; | 1200 | IPPROTO_TCP, 0); |
1199 | ipcso = (void *)&(skb->nh.iph->check) - (void *)skb->data; | 1201 | ipcss = skb_network_offset(skb); |
1200 | ipcse = skb->h.raw - skb->data - 1; | 1202 | ipcso = (void *)&(iph->check) - (void *)skb->data; |
1201 | tucss = skb->h.raw - skb->data; | 1203 | ipcse = skb_transport_offset(skb) - 1; |
1202 | tucso = (void *)&(skb->h.th->check) - (void *)skb->data; | 1204 | tucss = skb_transport_offset(skb); |
1205 | tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data; | ||
1203 | tucse = 0; | 1206 | tucse = 0; |
1204 | 1207 | ||
1205 | i = adapter->tx_ring.next_to_use; | 1208 | i = adapter->tx_ring.next_to_use; |
@@ -1243,7 +1246,7 @@ ixgb_tx_csum(struct ixgb_adapter *adapter, struct sk_buff *skb) | |||
1243 | 1246 | ||
1244 | if(likely(skb->ip_summed == CHECKSUM_PARTIAL)) { | 1247 | if(likely(skb->ip_summed == CHECKSUM_PARTIAL)) { |
1245 | struct ixgb_buffer *buffer_info; | 1248 | struct ixgb_buffer *buffer_info; |
1246 | css = skb->h.raw - skb->data; | 1249 | css = skb_transport_offset(skb); |
1247 | cso = css + skb->csum_offset; | 1250 | cso = css + skb->csum_offset; |
1248 | 1251 | ||
1249 | i = adapter->tx_ring.next_to_use; | 1252 | i = adapter->tx_ring.next_to_use; |
@@ -2014,9 +2017,12 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter) | |||
2014 | netdev_alloc_skb(netdev, length + NET_IP_ALIGN); | 2017 | netdev_alloc_skb(netdev, length + NET_IP_ALIGN); |
2015 | if (new_skb) { | 2018 | if (new_skb) { |
2016 | skb_reserve(new_skb, NET_IP_ALIGN); | 2019 | skb_reserve(new_skb, NET_IP_ALIGN); |
2017 | memcpy(new_skb->data - NET_IP_ALIGN, | 2020 | skb_copy_to_linear_data_offset(new_skb, |
2018 | skb->data - NET_IP_ALIGN, | 2021 | -NET_IP_ALIGN, |
2019 | length + NET_IP_ALIGN); | 2022 | (skb->data - |
2023 | NET_IP_ALIGN), | ||
2024 | (length + | ||
2025 | NET_IP_ALIGN)); | ||
2020 | /* save the skb in buffer_info as good */ | 2026 | /* save the skb in buffer_info as good */ |
2021 | buffer_info->skb = skb; | 2027 | buffer_info->skb = skb; |
2022 | skb = new_skb; | 2028 | skb = new_skb; |
diff --git a/drivers/net/ixp2000/ixpdev.c b/drivers/net/ixp2000/ixpdev.c index a4eccb11d677..6683afc02aaa 100644 --- a/drivers/net/ixp2000/ixpdev.c +++ b/drivers/net/ixp2000/ixpdev.c | |||
@@ -110,11 +110,10 @@ static int ixpdev_rx(struct net_device *dev, int *budget) | |||
110 | 110 | ||
111 | skb = dev_alloc_skb(desc->pkt_length + 2); | 111 | skb = dev_alloc_skb(desc->pkt_length + 2); |
112 | if (likely(skb != NULL)) { | 112 | if (likely(skb != NULL)) { |
113 | skb->dev = nds[desc->channel]; | ||
114 | skb_reserve(skb, 2); | 113 | skb_reserve(skb, 2); |
115 | eth_copy_and_sum(skb, buf, desc->pkt_length, 0); | 114 | eth_copy_and_sum(skb, buf, desc->pkt_length, 0); |
116 | skb_put(skb, desc->pkt_length); | 115 | skb_put(skb, desc->pkt_length); |
117 | skb->protocol = eth_type_trans(skb, skb->dev); | 116 | skb->protocol = eth_type_trans(skb, nds[desc->channel]); |
118 | 117 | ||
119 | skb->dev->last_rx = jiffies; | 118 | skb->dev->last_rx = jiffies; |
120 | 119 | ||
diff --git a/drivers/net/lance.c b/drivers/net/lance.c index a3843320dbe1..0fe96c85828b 100644 --- a/drivers/net/lance.c +++ b/drivers/net/lance.c | |||
@@ -988,7 +988,7 @@ static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
988 | if (lance_debug > 5) | 988 | if (lance_debug > 5) |
989 | printk("%s: bouncing a high-memory packet (%#x).\n", | 989 | printk("%s: bouncing a high-memory packet (%#x).\n", |
990 | dev->name, (u32)isa_virt_to_bus(skb->data)); | 990 | dev->name, (u32)isa_virt_to_bus(skb->data)); |
991 | memcpy(&lp->tx_bounce_buffs[entry], skb->data, skb->len); | 991 | skb_copy_from_linear_data(skb, &lp->tx_bounce_buffs[entry], skb->len); |
992 | lp->tx_ring[entry].base = | 992 | lp->tx_ring[entry].base = |
993 | ((u32)isa_virt_to_bus((lp->tx_bounce_buffs + entry)) & 0xffffff) | 0x83000000; | 993 | ((u32)isa_virt_to_bus((lp->tx_bounce_buffs + entry)) & 0xffffff) | 0x83000000; |
994 | dev_kfree_skb(skb); | 994 | dev_kfree_skb(skb); |
@@ -1184,7 +1184,6 @@ lance_rx(struct net_device *dev) | |||
1184 | } | 1184 | } |
1185 | break; | 1185 | break; |
1186 | } | 1186 | } |
1187 | skb->dev = dev; | ||
1188 | skb_reserve(skb,2); /* 16 byte align */ | 1187 | skb_reserve(skb,2); /* 16 byte align */ |
1189 | skb_put(skb,pkt_len); /* Make room */ | 1188 | skb_put(skb,pkt_len); /* Make room */ |
1190 | eth_copy_and_sum(skb, | 1189 | eth_copy_and_sum(skb, |
diff --git a/drivers/net/lasi_82596.c b/drivers/net/lasi_82596.c index 452863d5d498..0edcd125fd61 100644 --- a/drivers/net/lasi_82596.c +++ b/drivers/net/lasi_82596.c | |||
@@ -801,7 +801,6 @@ memory_squeeze: | |||
801 | lp->stats.rx_dropped++; | 801 | lp->stats.rx_dropped++; |
802 | } | 802 | } |
803 | else { | 803 | else { |
804 | skb->dev = dev; | ||
805 | if (!rx_in_place) { | 804 | if (!rx_in_place) { |
806 | /* 16 byte align the data fields */ | 805 | /* 16 byte align the data fields */ |
807 | dma_sync_single_for_cpu(lp->dev, (dma_addr_t)WSWAPchar(rbd->b_data), PKT_BUF_SZ, DMA_FROM_DEVICE); | 806 | dma_sync_single_for_cpu(lp->dev, (dma_addr_t)WSWAPchar(rbd->b_data), PKT_BUF_SZ, DMA_FROM_DEVICE); |
diff --git a/drivers/net/lib8390.c b/drivers/net/lib8390.c index e726c06b8dc6..5c86e737f954 100644 --- a/drivers/net/lib8390.c +++ b/drivers/net/lib8390.c | |||
@@ -722,7 +722,6 @@ static void ei_receive(struct net_device *dev) | |||
722 | else | 722 | else |
723 | { | 723 | { |
724 | skb_reserve(skb,2); /* IP headers on 16 byte boundaries */ | 724 | skb_reserve(skb,2); /* IP headers on 16 byte boundaries */ |
725 | skb->dev = dev; | ||
726 | skb_put(skb, pkt_len); /* Make room */ | 725 | skb_put(skb, pkt_len); /* Make room */ |
727 | ei_block_input(dev, pkt_len, skb, current_offset + sizeof(rx_frame)); | 726 | ei_block_input(dev, pkt_len, skb, current_offset + sizeof(rx_frame)); |
728 | skb->protocol=eth_type_trans(skb,dev); | 727 | skb->protocol=eth_type_trans(skb,dev); |
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c index 2b739fd584f1..6ba6ed2b480a 100644 --- a/drivers/net/loopback.c +++ b/drivers/net/loopback.c | |||
@@ -75,8 +75,9 @@ static DEFINE_PER_CPU(struct pcpu_lstats, pcpu_lstats); | |||
75 | #ifdef LOOPBACK_TSO | 75 | #ifdef LOOPBACK_TSO |
76 | static void emulate_large_send_offload(struct sk_buff *skb) | 76 | static void emulate_large_send_offload(struct sk_buff *skb) |
77 | { | 77 | { |
78 | struct iphdr *iph = skb->nh.iph; | 78 | struct iphdr *iph = ip_hdr(skb); |
79 | struct tcphdr *th = (struct tcphdr*)(skb->nh.raw + (iph->ihl * 4)); | 79 | struct tcphdr *th = (struct tcphdr *)(skb_network_header(skb) + |
80 | (iph->ihl * 4)); | ||
80 | unsigned int doffset = (iph->ihl + th->doff) * 4; | 81 | unsigned int doffset = (iph->ihl + th->doff) * 4; |
81 | unsigned int mtu = skb_shinfo(skb)->gso_size + doffset; | 82 | unsigned int mtu = skb_shinfo(skb)->gso_size + doffset; |
82 | unsigned int offset = 0; | 83 | unsigned int offset = 0; |
@@ -90,10 +91,11 @@ static void emulate_large_send_offload(struct sk_buff *skb) | |||
90 | if (!nskb) | 91 | if (!nskb) |
91 | break; | 92 | break; |
92 | skb_reserve(nskb, 32); | 93 | skb_reserve(nskb, 32); |
93 | nskb->mac.raw = nskb->data - 14; | 94 | skb_set_mac_header(nskb, -ETH_HLEN); |
94 | nskb->nh.raw = nskb->data; | 95 | skb_reset_network_header(nskb); |
95 | iph = nskb->nh.iph; | 96 | iph = ip_hdr(nskb); |
96 | memcpy(nskb->data, skb->nh.raw, doffset); | 97 | skb_copy_to_linear_data(nskb, skb_network_header(skb), |
98 | doffset); | ||
97 | if (skb_copy_bits(skb, | 99 | if (skb_copy_bits(skb, |
98 | doffset + offset, | 100 | doffset + offset, |
99 | nskb->data + doffset, | 101 | nskb->data + doffset, |
@@ -108,7 +110,7 @@ static void emulate_large_send_offload(struct sk_buff *skb) | |||
108 | memcpy(nskb->cb, skb->cb, sizeof(skb->cb)); | 110 | memcpy(nskb->cb, skb->cb, sizeof(skb->cb)); |
109 | nskb->pkt_type = skb->pkt_type; | 111 | nskb->pkt_type = skb->pkt_type; |
110 | 112 | ||
111 | th = (struct tcphdr*)(nskb->nh.raw + iph->ihl*4); | 113 | th = (struct tcphdr *)(skb_network_header(nskb) + iph->ihl * 4); |
112 | iph->tot_len = htons(frag_size + doffset); | 114 | iph->tot_len = htons(frag_size + doffset); |
113 | iph->id = htons(id); | 115 | iph->id = htons(id); |
114 | iph->check = 0; | 116 | iph->check = 0; |
@@ -137,7 +139,6 @@ static int loopback_xmit(struct sk_buff *skb, struct net_device *dev) | |||
137 | skb_orphan(skb); | 139 | skb_orphan(skb); |
138 | 140 | ||
139 | skb->protocol = eth_type_trans(skb,dev); | 141 | skb->protocol = eth_type_trans(skb,dev); |
140 | skb->dev = dev; | ||
141 | #ifndef LOOPBACK_MUST_CHECKSUM | 142 | #ifndef LOOPBACK_MUST_CHECKSUM |
142 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 143 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
143 | #endif | 144 | #endif |
@@ -145,7 +146,7 @@ static int loopback_xmit(struct sk_buff *skb, struct net_device *dev) | |||
145 | #ifdef LOOPBACK_TSO | 146 | #ifdef LOOPBACK_TSO |
146 | if (skb_is_gso(skb)) { | 147 | if (skb_is_gso(skb)) { |
147 | BUG_ON(skb->protocol != htons(ETH_P_IP)); | 148 | BUG_ON(skb->protocol != htons(ETH_P_IP)); |
148 | BUG_ON(skb->nh.iph->protocol != IPPROTO_TCP); | 149 | BUG_ON(ip_hdr(skb)->protocol != IPPROTO_TCP); |
149 | 150 | ||
150 | emulate_large_send_offload(skb); | 151 | emulate_large_send_offload(skb); |
151 | return 0; | 152 | return 0; |
@@ -163,11 +164,9 @@ static int loopback_xmit(struct sk_buff *skb, struct net_device *dev) | |||
163 | return 0; | 164 | return 0; |
164 | } | 165 | } |
165 | 166 | ||
166 | static struct net_device_stats loopback_stats; | ||
167 | |||
168 | static struct net_device_stats *get_stats(struct net_device *dev) | 167 | static struct net_device_stats *get_stats(struct net_device *dev) |
169 | { | 168 | { |
170 | struct net_device_stats *stats = &loopback_stats; | 169 | struct net_device_stats *stats = &dev->stats; |
171 | unsigned long bytes = 0; | 170 | unsigned long bytes = 0; |
172 | unsigned long packets = 0; | 171 | unsigned long packets = 0; |
173 | int i; | 172 | int i; |
@@ -207,7 +206,6 @@ static const struct ethtool_ops loopback_ethtool_ops = { | |||
207 | struct net_device loopback_dev = { | 206 | struct net_device loopback_dev = { |
208 | .name = "lo", | 207 | .name = "lo", |
209 | .get_stats = &get_stats, | 208 | .get_stats = &get_stats, |
210 | .priv = &loopback_stats, | ||
211 | .mtu = (16 * 1024) + 20 + 20 + 12, | 209 | .mtu = (16 * 1024) + 20 + 20 + 12, |
212 | .hard_start_xmit = loopback_xmit, | 210 | .hard_start_xmit = loopback_xmit, |
213 | .hard_header = eth_header, | 211 | .hard_header = eth_header, |
diff --git a/drivers/net/lp486e.c b/drivers/net/lp486e.c index 177c502f7385..5fc18da1873d 100644 --- a/drivers/net/lp486e.c +++ b/drivers/net/lp486e.c | |||
@@ -676,7 +676,6 @@ i596_rx_one(struct net_device *dev, struct i596_private *lp, | |||
676 | return 1; | 676 | return 1; |
677 | } | 677 | } |
678 | 678 | ||
679 | skb->dev = dev; | ||
680 | memcpy(skb_put(skb,pkt_len), rfd->data, pkt_len); | 679 | memcpy(skb_put(skb,pkt_len), rfd->data, pkt_len); |
681 | 680 | ||
682 | skb->protocol = eth_type_trans(skb,dev); | 681 | skb->protocol = eth_type_trans(skb,dev); |
diff --git a/drivers/net/mac89x0.c b/drivers/net/mac89x0.c index e960138011c0..90e695d53266 100644 --- a/drivers/net/mac89x0.c +++ b/drivers/net/mac89x0.c | |||
@@ -530,7 +530,6 @@ net_rx(struct net_device *dev) | |||
530 | return; | 530 | return; |
531 | } | 531 | } |
532 | skb_put(skb, length); | 532 | skb_put(skb, length); |
533 | skb->dev = dev; | ||
534 | 533 | ||
535 | memcpy_fromio(skb->data, dev->mem_start + PP_RxFrame, length); | 534 | memcpy_fromio(skb->data, dev->mem_start + PP_RxFrame, length); |
536 | 535 | ||
diff --git a/drivers/net/macb.c b/drivers/net/macb.c index 2e9571bf0736..0e04f7ac3f2e 100644 --- a/drivers/net/macb.c +++ b/drivers/net/macb.c | |||
@@ -357,7 +357,6 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag, | |||
357 | } | 357 | } |
358 | 358 | ||
359 | skb_reserve(skb, RX_OFFSET); | 359 | skb_reserve(skb, RX_OFFSET); |
360 | skb->dev = bp->dev; | ||
361 | skb->ip_summed = CHECKSUM_NONE; | 360 | skb->ip_summed = CHECKSUM_NONE; |
362 | skb_put(skb, len); | 361 | skb_put(skb, len); |
363 | 362 | ||
@@ -368,9 +367,10 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag, | |||
368 | BUG_ON(frag != last_frag); | 367 | BUG_ON(frag != last_frag); |
369 | frag_len = len - offset; | 368 | frag_len = len - offset; |
370 | } | 369 | } |
371 | memcpy(skb->data + offset, | 370 | skb_copy_to_linear_data_offset(skb, offset, |
372 | bp->rx_buffers + (RX_BUFFER_SIZE * frag), | 371 | (bp->rx_buffers + |
373 | frag_len); | 372 | (RX_BUFFER_SIZE * frag)), |
373 | frag_len); | ||
374 | offset += RX_BUFFER_SIZE; | 374 | offset += RX_BUFFER_SIZE; |
375 | bp->rx_ring[frag].addr &= ~MACB_BIT(RX_USED); | 375 | bp->rx_ring[frag].addr &= ~MACB_BIT(RX_USED); |
376 | wmb(); | 376 | wmb(); |
@@ -576,7 +576,8 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
576 | int i; | 576 | int i; |
577 | dev_dbg(&bp->pdev->dev, | 577 | dev_dbg(&bp->pdev->dev, |
578 | "start_xmit: len %u head %p data %p tail %p end %p\n", | 578 | "start_xmit: len %u head %p data %p tail %p end %p\n", |
579 | skb->len, skb->head, skb->data, skb->tail, skb->end); | 579 | skb->len, skb->head, skb->data, |
580 | skb_tail_pointer(skb), skb_end_pointer(skb)); | ||
580 | dev_dbg(&bp->pdev->dev, | 581 | dev_dbg(&bp->pdev->dev, |
581 | "data:"); | 582 | "data:"); |
582 | for (i = 0; i < 16; i++) | 583 | for (i = 0; i < 16; i++) |
diff --git a/drivers/net/mace.c b/drivers/net/mace.c index 9ec24f0d5d68..b3bd62394958 100644 --- a/drivers/net/mace.c +++ b/drivers/net/mace.c | |||
@@ -939,7 +939,6 @@ static irqreturn_t mace_rxdma_intr(int irq, void *dev_id) | |||
939 | else /* Ethernet header; mace includes FCS */ | 939 | else /* Ethernet header; mace includes FCS */ |
940 | nb -= 8; | 940 | nb -= 8; |
941 | skb_put(skb, nb); | 941 | skb_put(skb, nb); |
942 | skb->dev = dev; | ||
943 | skb->protocol = eth_type_trans(skb, dev); | 942 | skb->protocol = eth_type_trans(skb, dev); |
944 | mp->stats.rx_bytes += skb->len; | 943 | mp->stats.rx_bytes += skb->len; |
945 | netif_rx(skb); | 944 | netif_rx(skb); |
diff --git a/drivers/net/macmace.c b/drivers/net/macmace.c index 5d541e873041..27911c07558d 100644 --- a/drivers/net/macmace.c +++ b/drivers/net/macmace.c | |||
@@ -420,8 +420,7 @@ static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev) | |||
420 | mp->stats.tx_bytes += skb->len; | 420 | mp->stats.tx_bytes += skb->len; |
421 | 421 | ||
422 | /* We need to copy into our xmit buffer to take care of alignment and caching issues */ | 422 | /* We need to copy into our xmit buffer to take care of alignment and caching issues */ |
423 | 423 | skb_copy_from_linear_data(skb, mp->tx_ring, skb->len); | |
424 | memcpy((void *) mp->tx_ring, skb->data, skb->len); | ||
425 | 424 | ||
426 | /* load the Tx DMA and fire it off */ | 425 | /* load the Tx DMA and fire it off */ |
427 | 426 | ||
@@ -621,7 +620,6 @@ static void mace_dma_rx_frame(struct net_device *dev, struct mace_frame *mf) | |||
621 | skb_reserve(skb,2); | 620 | skb_reserve(skb,2); |
622 | memcpy(skb_put(skb, mf->len), mf->data, mf->len); | 621 | memcpy(skb_put(skb, mf->len), mf->data, mf->len); |
623 | 622 | ||
624 | skb->dev = dev; | ||
625 | skb->protocol = eth_type_trans(skb, dev); | 623 | skb->protocol = eth_type_trans(skb, dev); |
626 | netif_rx(skb); | 624 | netif_rx(skb); |
627 | dev->last_rx = jiffies; | 625 | dev->last_rx = jiffies; |
diff --git a/drivers/net/meth.c b/drivers/net/meth.c index 7e69ca6edd91..0343ea12b299 100644 --- a/drivers/net/meth.c +++ b/drivers/net/meth.c | |||
@@ -421,7 +421,6 @@ static void meth_rx(struct net_device* dev, unsigned long int_status) | |||
421 | /* Write metadata, and then pass to the receive level */ | 421 | /* Write metadata, and then pass to the receive level */ |
422 | skb_put(skb_c, len); | 422 | skb_put(skb_c, len); |
423 | priv->rx_skbs[priv->rx_write] = skb; | 423 | priv->rx_skbs[priv->rx_write] = skb; |
424 | skb_c->dev = dev; | ||
425 | skb_c->protocol = eth_type_trans(skb_c, dev); | 424 | skb_c->protocol = eth_type_trans(skb_c, dev); |
426 | dev->last_rx = jiffies; | 425 | dev->last_rx = jiffies; |
427 | priv->stats.rx_packets++; | 426 | priv->stats.rx_packets++; |
@@ -609,7 +608,7 @@ static void meth_tx_short_prepare(struct meth_private *priv, | |||
609 | 608 | ||
610 | desc->header.raw = METH_TX_CMD_INT_EN | (len-1) | ((128-len) << 16); | 609 | desc->header.raw = METH_TX_CMD_INT_EN | (len-1) | ((128-len) << 16); |
611 | /* maybe I should set whole thing to 0 first... */ | 610 | /* maybe I should set whole thing to 0 first... */ |
612 | memcpy(desc->data.dt + (120 - len), skb->data, skb->len); | 611 | skb_copy_from_linear_data(skb, desc->data.dt + (120 - len), skb->len); |
613 | if (skb->len < len) | 612 | if (skb->len < len) |
614 | memset(desc->data.dt + 120 - len + skb->len, 0, len-skb->len); | 613 | memset(desc->data.dt + 120 - len + skb->len, 0, len-skb->len); |
615 | } | 614 | } |
@@ -627,8 +626,8 @@ static void meth_tx_1page_prepare(struct meth_private *priv, | |||
627 | 626 | ||
628 | /* unaligned part */ | 627 | /* unaligned part */ |
629 | if (unaligned_len) { | 628 | if (unaligned_len) { |
630 | memcpy(desc->data.dt + (120 - unaligned_len), | 629 | skb_copy_from_linear_data(skb, desc->data.dt + (120 - unaligned_len), |
631 | skb->data, unaligned_len); | 630 | unaligned_len); |
632 | desc->header.raw |= (128 - unaligned_len) << 16; | 631 | desc->header.raw |= (128 - unaligned_len) << 16; |
633 | } | 632 | } |
634 | 633 | ||
@@ -653,8 +652,8 @@ static void meth_tx_2page_prepare(struct meth_private *priv, | |||
653 | desc->header.raw = METH_TX_CMD_INT_EN | TX_CATBUF1 | TX_CATBUF2| (skb->len - 1); | 652 | desc->header.raw = METH_TX_CMD_INT_EN | TX_CATBUF1 | TX_CATBUF2| (skb->len - 1); |
654 | /* unaligned part */ | 653 | /* unaligned part */ |
655 | if (unaligned_len){ | 654 | if (unaligned_len){ |
656 | memcpy(desc->data.dt + (120 - unaligned_len), | 655 | skb_copy_from_linear_data(skb, desc->data.dt + (120 - unaligned_len), |
657 | skb->data, unaligned_len); | 656 | unaligned_len); |
658 | desc->header.raw |= (128 - unaligned_len) << 16; | 657 | desc->header.raw |= (128 - unaligned_len) << 16; |
659 | } | 658 | } |
660 | 659 | ||
diff --git a/drivers/net/mipsnet.c b/drivers/net/mipsnet.c index f42b9e201937..403f63afd201 100644 --- a/drivers/net/mipsnet.c +++ b/drivers/net/mipsnet.c | |||
@@ -101,7 +101,6 @@ static inline ssize_t mipsnet_get_fromdev(struct net_device *dev, size_t count) | |||
101 | if (ioiocpy_frommipsnet(dev, skb_put(skb, len), len)) | 101 | if (ioiocpy_frommipsnet(dev, skb_put(skb, len), len)) |
102 | return -EFAULT; | 102 | return -EFAULT; |
103 | 103 | ||
104 | skb->dev = dev; | ||
105 | skb->protocol = eth_type_trans(skb, dev); | 104 | skb->protocol = eth_type_trans(skb, dev); |
106 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 105 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
107 | 106 | ||
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c index 8015a7c5b0c9..ab15ecd4b3d6 100644 --- a/drivers/net/mv643xx_eth.c +++ b/drivers/net/mv643xx_eth.c | |||
@@ -434,7 +434,6 @@ static int mv643xx_eth_receive_queue(struct net_device *dev, int budget) | |||
434 | * received packet | 434 | * received packet |
435 | */ | 435 | */ |
436 | skb_put(skb, pkt_info.byte_cnt - 4); | 436 | skb_put(skb, pkt_info.byte_cnt - 4); |
437 | skb->dev = dev; | ||
438 | 437 | ||
439 | if (pkt_info.cmd_sts & ETH_LAYER_4_CHECKSUM_OK) { | 438 | if (pkt_info.cmd_sts & ETH_LAYER_4_CHECKSUM_OK) { |
440 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 439 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
@@ -1162,15 +1161,15 @@ static void eth_tx_submit_descs_for_skb(struct mv643xx_private *mp, | |||
1162 | 1161 | ||
1163 | cmd_sts |= ETH_GEN_TCP_UDP_CHECKSUM | | 1162 | cmd_sts |= ETH_GEN_TCP_UDP_CHECKSUM | |
1164 | ETH_GEN_IP_V_4_CHECKSUM | | 1163 | ETH_GEN_IP_V_4_CHECKSUM | |
1165 | skb->nh.iph->ihl << ETH_TX_IHL_SHIFT; | 1164 | ip_hdr(skb)->ihl << ETH_TX_IHL_SHIFT; |
1166 | 1165 | ||
1167 | switch (skb->nh.iph->protocol) { | 1166 | switch (ip_hdr(skb)->protocol) { |
1168 | case IPPROTO_UDP: | 1167 | case IPPROTO_UDP: |
1169 | cmd_sts |= ETH_UDP_FRAME; | 1168 | cmd_sts |= ETH_UDP_FRAME; |
1170 | desc->l4i_chk = skb->h.uh->check; | 1169 | desc->l4i_chk = udp_hdr(skb)->check; |
1171 | break; | 1170 | break; |
1172 | case IPPROTO_TCP: | 1171 | case IPPROTO_TCP: |
1173 | desc->l4i_chk = skb->h.th->check; | 1172 | desc->l4i_chk = tcp_hdr(skb)->check; |
1174 | break; | 1173 | break; |
1175 | default: | 1174 | default: |
1176 | BUG(); | 1175 | BUG(); |
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c index f8efe0e70a6b..16e3c4315e82 100644 --- a/drivers/net/myri10ge/myri10ge.c +++ b/drivers/net/myri10ge/myri10ge.c | |||
@@ -879,7 +879,7 @@ myri10ge_rx_skb_build(struct sk_buff *skb, u8 * va, | |||
879 | * skb_pull() (for ether_pad and eth_type_trans()) requires | 879 | * skb_pull() (for ether_pad and eth_type_trans()) requires |
880 | * the beginning of the packet in skb_headlen(), move it | 880 | * the beginning of the packet in skb_headlen(), move it |
881 | * manually */ | 881 | * manually */ |
882 | memcpy(skb->data, va, hlen); | 882 | skb_copy_to_linear_data(skb, va, hlen); |
883 | skb_shinfo(skb)->frags[0].page_offset += hlen; | 883 | skb_shinfo(skb)->frags[0].page_offset += hlen; |
884 | skb_shinfo(skb)->frags[0].size -= hlen; | 884 | skb_shinfo(skb)->frags[0].size -= hlen; |
885 | skb->data_len -= hlen; | 885 | skb->data_len -= hlen; |
@@ -1020,7 +1020,6 @@ myri10ge_rx_done(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx, | |||
1020 | skb_shinfo(skb)->nr_frags = 0; | 1020 | skb_shinfo(skb)->nr_frags = 0; |
1021 | } | 1021 | } |
1022 | skb->protocol = eth_type_trans(skb, dev); | 1022 | skb->protocol = eth_type_trans(skb, dev); |
1023 | skb->dev = dev; | ||
1024 | 1023 | ||
1025 | if (mgp->csum_flag) { | 1024 | if (mgp->csum_flag) { |
1026 | if ((skb->protocol == htons(ETH_P_IP)) || | 1025 | if ((skb->protocol == htons(ETH_P_IP)) || |
@@ -2030,7 +2029,7 @@ again: | |||
2030 | odd_flag = 0; | 2029 | odd_flag = 0; |
2031 | flags = (MXGEFW_FLAGS_NO_TSO | MXGEFW_FLAGS_FIRST); | 2030 | flags = (MXGEFW_FLAGS_NO_TSO | MXGEFW_FLAGS_FIRST); |
2032 | if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { | 2031 | if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { |
2033 | cksum_offset = (skb->h.raw - skb->data); | 2032 | cksum_offset = skb_transport_offset(skb); |
2034 | pseudo_hdr_offset = cksum_offset + skb->csum_offset; | 2033 | pseudo_hdr_offset = cksum_offset + skb->csum_offset; |
2035 | /* If the headers are excessively large, then we must | 2034 | /* If the headers are excessively large, then we must |
2036 | * fall back to a software checksum */ | 2035 | * fall back to a software checksum */ |
@@ -2055,7 +2054,7 @@ again: | |||
2055 | * send loop that we are still in the | 2054 | * send loop that we are still in the |
2056 | * header portion of the TSO packet. | 2055 | * header portion of the TSO packet. |
2057 | * TSO header must be at most 134 bytes long */ | 2056 | * TSO header must be at most 134 bytes long */ |
2058 | cum_len = -((skb->h.raw - skb->data) + (skb->h.th->doff << 2)); | 2057 | cum_len = -(skb_transport_offset(skb) + tcp_hdrlen(skb)); |
2059 | 2058 | ||
2060 | /* for TSO, pseudo_hdr_offset holds mss. | 2059 | /* for TSO, pseudo_hdr_offset holds mss. |
2061 | * The firmware figures out where to put | 2060 | * The firmware figures out where to put |
diff --git a/drivers/net/myri_sbus.c b/drivers/net/myri_sbus.c index ee26ef52289f..13444da93273 100644 --- a/drivers/net/myri_sbus.c +++ b/drivers/net/myri_sbus.c | |||
@@ -368,7 +368,7 @@ static __be16 myri_type_trans(struct sk_buff *skb, struct net_device *dev) | |||
368 | struct ethhdr *eth; | 368 | struct ethhdr *eth; |
369 | unsigned char *rawp; | 369 | unsigned char *rawp; |
370 | 370 | ||
371 | skb->mac.raw = (((unsigned char *)skb->data) + MYRI_PAD_LEN); | 371 | skb_set_mac_header(skb, MYRI_PAD_LEN); |
372 | skb_pull(skb, dev->hard_header_len); | 372 | skb_pull(skb, dev->hard_header_len); |
373 | eth = eth_hdr(skb); | 373 | eth = eth_hdr(skb); |
374 | 374 | ||
@@ -502,7 +502,7 @@ static void myri_rx(struct myri_eth *mp, struct net_device *dev) | |||
502 | copy_skb->dev = dev; | 502 | copy_skb->dev = dev; |
503 | DRX(("resv_and_put ")); | 503 | DRX(("resv_and_put ")); |
504 | skb_put(copy_skb, len); | 504 | skb_put(copy_skb, len); |
505 | memcpy(copy_skb->data, skb->data, len); | 505 | skb_copy_from_linear_data(skb, copy_skb->data, len); |
506 | 506 | ||
507 | /* Reuse original ring buffer. */ | 507 | /* Reuse original ring buffer. */ |
508 | DRX(("reuse ")); | 508 | DRX(("reuse ")); |
diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c index 349b96a3ec4c..a8d7ff2c96ac 100644 --- a/drivers/net/natsemi.c +++ b/drivers/net/natsemi.c | |||
@@ -2289,7 +2289,6 @@ static void netdev_rx(struct net_device *dev, int *work_done, int work_to_do) | |||
2289 | * without copying to a minimally-sized skbuff. */ | 2289 | * without copying to a minimally-sized skbuff. */ |
2290 | if (pkt_len < rx_copybreak | 2290 | if (pkt_len < rx_copybreak |
2291 | && (skb = dev_alloc_skb(pkt_len + RX_OFFSET)) != NULL) { | 2291 | && (skb = dev_alloc_skb(pkt_len + RX_OFFSET)) != NULL) { |
2292 | skb->dev = dev; | ||
2293 | /* 16 byte align the IP header */ | 2292 | /* 16 byte align the IP header */ |
2294 | skb_reserve(skb, RX_OFFSET); | 2293 | skb_reserve(skb, RX_OFFSET); |
2295 | pci_dma_sync_single_for_cpu(np->pci_dev, | 2294 | pci_dma_sync_single_for_cpu(np->pci_dev, |
diff --git a/drivers/net/netx-eth.c b/drivers/net/netx-eth.c index a53644f6a29b..2b8da0a54998 100644 --- a/drivers/net/netx-eth.c +++ b/drivers/net/netx-eth.c | |||
@@ -168,7 +168,6 @@ static void netx_eth_receive(struct net_device *ndev) | |||
168 | FIFO_PTR_SEGMENT(seg) | FIFO_PTR_FRAMENO(frameno)); | 168 | FIFO_PTR_SEGMENT(seg) | FIFO_PTR_FRAMENO(frameno)); |
169 | 169 | ||
170 | ndev->last_rx = jiffies; | 170 | ndev->last_rx = jiffies; |
171 | skb->dev = ndev; | ||
172 | skb->protocol = eth_type_trans(skb, ndev); | 171 | skb->protocol = eth_type_trans(skb, ndev); |
173 | netif_rx(skb); | 172 | netif_rx(skb); |
174 | priv->stats.rx_packets++; | 173 | priv->stats.rx_packets++; |
diff --git a/drivers/net/netxen/netxen_nic_hw.c b/drivers/net/netxen/netxen_nic_hw.c index 6537574a9cda..0fba8f190762 100644 --- a/drivers/net/netxen/netxen_nic_hw.c +++ b/drivers/net/netxen/netxen_nic_hw.c | |||
@@ -35,6 +35,8 @@ | |||
35 | #include "netxen_nic_hw.h" | 35 | #include "netxen_nic_hw.h" |
36 | #include "netxen_nic_phan_reg.h" | 36 | #include "netxen_nic_phan_reg.h" |
37 | 37 | ||
38 | #include <net/ip.h> | ||
39 | |||
38 | /* PCI Windowing for DDR regions. */ | 40 | /* PCI Windowing for DDR regions. */ |
39 | 41 | ||
40 | #define ADDR_IN_RANGE(addr, low, high) \ | 42 | #define ADDR_IN_RANGE(addr, low, high) \ |
@@ -371,22 +373,21 @@ void netxen_tso_check(struct netxen_adapter *adapter, | |||
371 | struct cmd_desc_type0 *desc, struct sk_buff *skb) | 373 | struct cmd_desc_type0 *desc, struct sk_buff *skb) |
372 | { | 374 | { |
373 | if (desc->mss) { | 375 | if (desc->mss) { |
374 | desc->total_hdr_length = sizeof(struct ethhdr) + | 376 | desc->total_hdr_length = (sizeof(struct ethhdr) + |
375 | ((skb->nh.iph)->ihl * sizeof(u32)) + | 377 | ip_hdrlen(skb) + tcp_hdrlen(skb)); |
376 | ((skb->h.th)->doff * sizeof(u32)); | ||
377 | netxen_set_cmd_desc_opcode(desc, TX_TCP_LSO); | 378 | netxen_set_cmd_desc_opcode(desc, TX_TCP_LSO); |
378 | } else if (skb->ip_summed == CHECKSUM_PARTIAL) { | 379 | } else if (skb->ip_summed == CHECKSUM_PARTIAL) { |
379 | if (skb->nh.iph->protocol == IPPROTO_TCP) { | 380 | if (ip_hdr(skb)->protocol == IPPROTO_TCP) { |
380 | netxen_set_cmd_desc_opcode(desc, TX_TCP_PKT); | 381 | netxen_set_cmd_desc_opcode(desc, TX_TCP_PKT); |
381 | } else if (skb->nh.iph->protocol == IPPROTO_UDP) { | 382 | } else if (ip_hdr(skb)->protocol == IPPROTO_UDP) { |
382 | netxen_set_cmd_desc_opcode(desc, TX_UDP_PKT); | 383 | netxen_set_cmd_desc_opcode(desc, TX_UDP_PKT); |
383 | } else { | 384 | } else { |
384 | return; | 385 | return; |
385 | } | 386 | } |
386 | } | 387 | } |
387 | adapter->stats.xmitcsummed++; | 388 | adapter->stats.xmitcsummed++; |
388 | desc->tcp_hdr_offset = skb->h.raw - skb->data; | 389 | desc->tcp_hdr_offset = skb_transport_offset(skb); |
389 | desc->ip_hdr_offset = skb->nh.raw - skb->data; | 390 | desc->ip_hdr_offset = skb_network_offset(skb); |
390 | } | 391 | } |
391 | 392 | ||
392 | int netxen_is_flash_supported(struct netxen_adapter *adapter) | 393 | int netxen_is_flash_supported(struct netxen_adapter *adapter) |
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c index eff965dc5fff..5cd40562da7c 100644 --- a/drivers/net/netxen/netxen_nic_init.c +++ b/drivers/net/netxen/netxen_nic_init.c | |||
@@ -1129,7 +1129,6 @@ netxen_process_rcv(struct netxen_adapter *adapter, int ctxid, | |||
1129 | port->stats.csummed++; | 1129 | port->stats.csummed++; |
1130 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 1130 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
1131 | } | 1131 | } |
1132 | skb->dev = netdev; | ||
1133 | if (desc_ctx == RCV_DESC_LRO_CTXID) { | 1132 | if (desc_ctx == RCV_DESC_LRO_CTXID) { |
1134 | /* True length was only available on the last pkt */ | 1133 | /* True length was only available on the last pkt */ |
1135 | skb_put(skb, buffer->lro_length); | 1134 | skb_put(skb, buffer->lro_length); |
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c index 7d2525e76abb..ab25c225a07e 100644 --- a/drivers/net/netxen/netxen_nic_main.c +++ b/drivers/net/netxen/netxen_nic_main.c | |||
@@ -41,6 +41,7 @@ | |||
41 | 41 | ||
42 | #include <linux/dma-mapping.h> | 42 | #include <linux/dma-mapping.h> |
43 | #include <linux/vmalloc.h> | 43 | #include <linux/vmalloc.h> |
44 | #include <net/ip.h> | ||
44 | 45 | ||
45 | MODULE_DESCRIPTION("NetXen Multi port (1/10) Gigabit Network Driver"); | 46 | MODULE_DESCRIPTION("NetXen Multi port (1/10) Gigabit Network Driver"); |
46 | MODULE_LICENSE("GPL"); | 47 | MODULE_LICENSE("GPL"); |
@@ -778,9 +779,8 @@ static int netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
778 | if (skb_shinfo(skb)->gso_size > 0) { | 779 | if (skb_shinfo(skb)->gso_size > 0) { |
779 | 780 | ||
780 | no_of_desc++; | 781 | no_of_desc++; |
781 | if (((skb->nh.iph)->ihl * sizeof(u32)) + | 782 | if ((ip_hdrlen(skb) + tcp_hdrlen(skb) + |
782 | ((skb->h.th)->doff * sizeof(u32)) + | 783 | sizeof(struct ethhdr)) > |
783 | sizeof(struct ethhdr) > | ||
784 | (sizeof(struct cmd_desc_type0) - 2)) { | 784 | (sizeof(struct cmd_desc_type0) - 2)) { |
785 | no_of_desc++; | 785 | no_of_desc++; |
786 | } | 786 | } |
@@ -920,8 +920,10 @@ static int netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
920 | /* copy the next 64 bytes - should be enough except | 920 | /* copy the next 64 bytes - should be enough except |
921 | * for pathological case | 921 | * for pathological case |
922 | */ | 922 | */ |
923 | memcpy((void *)hwdesc, (void *)(skb->data) + | 923 | skb_copy_from_linear_data_offset(skb, first_hdr_len, |
924 | first_hdr_len, hdr_len - first_hdr_len); | 924 | hwdesc, |
925 | (hdr_len - | ||
926 | first_hdr_len)); | ||
925 | producer = get_next_index(producer, max_tx_desc_count); | 927 | producer = get_next_index(producer, max_tx_desc_count); |
926 | } | 928 | } |
927 | } | 929 | } |
diff --git a/drivers/net/ni5010.c b/drivers/net/ni5010.c index 8be0d030d6f4..3d5b4232f65f 100644 --- a/drivers/net/ni5010.c +++ b/drivers/net/ni5010.c | |||
@@ -562,7 +562,6 @@ static void ni5010_rx(struct net_device *dev) | |||
562 | return; | 562 | return; |
563 | } | 563 | } |
564 | 564 | ||
565 | skb->dev = dev; | ||
566 | skb_reserve(skb, 2); | 565 | skb_reserve(skb, 2); |
567 | 566 | ||
568 | /* Read packet into buffer */ | 567 | /* Read packet into buffer */ |
diff --git a/drivers/net/ni52.c b/drivers/net/ni52.c index a6f4b24b0176..8dbd6d1900b5 100644 --- a/drivers/net/ni52.c +++ b/drivers/net/ni52.c | |||
@@ -934,7 +934,6 @@ static void ni52_rcv_int(struct net_device *dev) | |||
934 | skb = (struct sk_buff *) dev_alloc_skb(totlen+2); | 934 | skb = (struct sk_buff *) dev_alloc_skb(totlen+2); |
935 | if(skb != NULL) | 935 | if(skb != NULL) |
936 | { | 936 | { |
937 | skb->dev = dev; | ||
938 | skb_reserve(skb,2); | 937 | skb_reserve(skb,2); |
939 | skb_put(skb,totlen); | 938 | skb_put(skb,totlen); |
940 | eth_copy_and_sum(skb,(char *) p->base+(unsigned long) rbd->buffer,totlen,0); | 939 | eth_copy_and_sum(skb,(char *) p->base+(unsigned long) rbd->buffer,totlen,0); |
@@ -1183,7 +1182,7 @@ static int ni52_send_packet(struct sk_buff *skb, struct net_device *dev) | |||
1183 | else | 1182 | else |
1184 | #endif | 1183 | #endif |
1185 | { | 1184 | { |
1186 | memcpy((char *)p->xmit_cbuffs[p->xmit_count],(char *)(skb->data),skb->len); | 1185 | skb_copy_from_linear_data(skb, (char *) p->xmit_cbuffs[p->xmit_count], skb->len); |
1187 | len = skb->len; | 1186 | len = skb->len; |
1188 | if (len < ETH_ZLEN) { | 1187 | if (len < ETH_ZLEN) { |
1189 | len = ETH_ZLEN; | 1188 | len = ETH_ZLEN; |
diff --git a/drivers/net/ni65.c b/drivers/net/ni65.c index 1578f4d98498..3818edf0ac18 100644 --- a/drivers/net/ni65.c +++ b/drivers/net/ni65.c | |||
@@ -610,7 +610,6 @@ static void *ni65_alloc_mem(struct net_device *dev,char *what,int size,int type) | |||
610 | printk(KERN_WARNING "%s: unable to allocate %s memory.\n",dev->name,what); | 610 | printk(KERN_WARNING "%s: unable to allocate %s memory.\n",dev->name,what); |
611 | return NULL; | 611 | return NULL; |
612 | } | 612 | } |
613 | skb->dev = dev; | ||
614 | skb_reserve(skb,2+16); | 613 | skb_reserve(skb,2+16); |
615 | skb_put(skb,R_BUF_SIZE); /* grab the whole space .. (not necessary) */ | 614 | skb_put(skb,R_BUF_SIZE); /* grab the whole space .. (not necessary) */ |
616 | ptr = skb->data; | 615 | ptr = skb->data; |
@@ -1094,7 +1093,6 @@ static void ni65_recv_intr(struct net_device *dev,int csr0) | |||
1094 | if(skb) | 1093 | if(skb) |
1095 | { | 1094 | { |
1096 | skb_reserve(skb,2); | 1095 | skb_reserve(skb,2); |
1097 | skb->dev = dev; | ||
1098 | #ifdef RCV_VIA_SKB | 1096 | #ifdef RCV_VIA_SKB |
1099 | if( (unsigned long) (skb->data + R_BUF_SIZE) > 0x1000000) { | 1097 | if( (unsigned long) (skb->data + R_BUF_SIZE) > 0x1000000) { |
1100 | skb_put(skb,len); | 1098 | skb_put(skb,len); |
@@ -1178,8 +1176,9 @@ static int ni65_send_packet(struct sk_buff *skb, struct net_device *dev) | |||
1178 | if( (unsigned long) (skb->data + skb->len) > 0x1000000) { | 1176 | if( (unsigned long) (skb->data + skb->len) > 0x1000000) { |
1179 | #endif | 1177 | #endif |
1180 | 1178 | ||
1181 | memcpy((char *) p->tmdbounce[p->tmdbouncenum] ,(char *)skb->data, | 1179 | skb_copy_from_linear_data(skb, p->tmdbounce[p->tmdbouncenum], |
1182 | (skb->len > T_BUF_SIZE) ? T_BUF_SIZE : skb->len); | 1180 | skb->len > T_BUF_SIZE ? T_BUF_SIZE : |
1181 | skb->len); | ||
1183 | if (len > skb->len) | 1182 | if (len > skb->len) |
1184 | memset((char *)p->tmdbounce[p->tmdbouncenum]+skb->len, 0, len-skb->len); | 1183 | memset((char *)p->tmdbounce[p->tmdbouncenum]+skb->len, 0, len-skb->len); |
1185 | dev_kfree_skb (skb); | 1184 | dev_kfree_skb (skb); |
diff --git a/drivers/net/ns83820.c b/drivers/net/ns83820.c index 9ec6e9e54f47..6a32338623f1 100644 --- a/drivers/net/ns83820.c +++ b/drivers/net/ns83820.c | |||
@@ -607,7 +607,6 @@ static inline int rx_refill(struct net_device *ndev, gfp_t gfp) | |||
607 | res &= 0xf; | 607 | res &= 0xf; |
608 | skb_reserve(skb, res); | 608 | skb_reserve(skb, res); |
609 | 609 | ||
610 | skb->dev = ndev; | ||
611 | if (gfp != GFP_ATOMIC) | 610 | if (gfp != GFP_ATOMIC) |
612 | spin_lock_irqsave(&dev->rx_info.lock, flags); | 611 | spin_lock_irqsave(&dev->rx_info.lock, flags); |
613 | res = ns83820_add_rx_skb(dev, skb); | 612 | res = ns83820_add_rx_skb(dev, skb); |
@@ -1157,9 +1156,9 @@ again: | |||
1157 | extsts = 0; | 1156 | extsts = 0; |
1158 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | 1157 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
1159 | extsts |= EXTSTS_IPPKT; | 1158 | extsts |= EXTSTS_IPPKT; |
1160 | if (IPPROTO_TCP == skb->nh.iph->protocol) | 1159 | if (IPPROTO_TCP == ip_hdr(skb)->protocol) |
1161 | extsts |= EXTSTS_TCPPKT; | 1160 | extsts |= EXTSTS_TCPPKT; |
1162 | else if (IPPROTO_UDP == skb->nh.iph->protocol) | 1161 | else if (IPPROTO_UDP == ip_hdr(skb)->protocol) |
1163 | extsts |= EXTSTS_UDPPKT; | 1162 | extsts |= EXTSTS_UDPPKT; |
1164 | } | 1163 | } |
1165 | 1164 | ||
diff --git a/drivers/net/pasemi_mac.c b/drivers/net/pasemi_mac.c index d670ac74824f..76fe9dd8e841 100644 --- a/drivers/net/pasemi_mac.c +++ b/drivers/net/pasemi_mac.c | |||
@@ -334,8 +334,6 @@ static void pasemi_mac_replenish_rx_ring(struct net_device *dev) | |||
334 | break; | 334 | break; |
335 | } | 335 | } |
336 | 336 | ||
337 | skb->dev = dev; | ||
338 | |||
339 | dma = pci_map_single(mac->dma_pdev, skb->data, skb->len, | 337 | dma = pci_map_single(mac->dma_pdev, skb->data, skb->len, |
340 | PCI_DMA_FROMDEVICE); | 338 | PCI_DMA_FROMDEVICE); |
341 | 339 | ||
@@ -731,16 +729,18 @@ static int pasemi_mac_start_tx(struct sk_buff *skb, struct net_device *dev) | |||
731 | dflags = XCT_MACTX_O | XCT_MACTX_ST | XCT_MACTX_SS | XCT_MACTX_CRC_PAD; | 729 | dflags = XCT_MACTX_O | XCT_MACTX_ST | XCT_MACTX_SS | XCT_MACTX_CRC_PAD; |
732 | 730 | ||
733 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | 731 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
734 | switch (skb->nh.iph->protocol) { | 732 | const unsigned char *nh = skb_network_header(skb); |
733 | |||
734 | switch (ip_hdr(skb)->protocol) { | ||
735 | case IPPROTO_TCP: | 735 | case IPPROTO_TCP: |
736 | dflags |= XCT_MACTX_CSUM_TCP; | 736 | dflags |= XCT_MACTX_CSUM_TCP; |
737 | dflags |= XCT_MACTX_IPH((skb->h.raw - skb->nh.raw) >> 2); | 737 | dflags |= XCT_MACTX_IPH(skb_network_header_len(skb) >> 2); |
738 | dflags |= XCT_MACTX_IPO(skb->nh.raw - skb->data); | 738 | dflags |= XCT_MACTX_IPO(nh - skb->data); |
739 | break; | 739 | break; |
740 | case IPPROTO_UDP: | 740 | case IPPROTO_UDP: |
741 | dflags |= XCT_MACTX_CSUM_UDP; | 741 | dflags |= XCT_MACTX_CSUM_UDP; |
742 | dflags |= XCT_MACTX_IPH((skb->h.raw - skb->nh.raw) >> 2); | 742 | dflags |= XCT_MACTX_IPH(skb_network_header_len(skb) >> 2); |
743 | dflags |= XCT_MACTX_IPO(skb->nh.raw - skb->data); | 743 | dflags |= XCT_MACTX_IPO(nh - skb->data); |
744 | break; | 744 | break; |
745 | } | 745 | } |
746 | } | 746 | } |
diff --git a/drivers/net/pci-skeleton.c b/drivers/net/pci-skeleton.c index 6ca4e4fa6b88..df8998b4f37e 100644 --- a/drivers/net/pci-skeleton.c +++ b/drivers/net/pci-skeleton.c | |||
@@ -1344,7 +1344,7 @@ static int netdrv_start_xmit (struct sk_buff *skb, struct net_device *dev) | |||
1344 | 1344 | ||
1345 | tp->tx_info[entry].skb = skb; | 1345 | tp->tx_info[entry].skb = skb; |
1346 | /* tp->tx_info[entry].mapping = 0; */ | 1346 | /* tp->tx_info[entry].mapping = 0; */ |
1347 | memcpy (tp->tx_buf[entry], skb->data, skb->len); | 1347 | skb_copy_from_linear_data(skb, tp->tx_buf[entry], skb->len); |
1348 | 1348 | ||
1349 | /* Note: the chip doesn't have auto-pad! */ | 1349 | /* Note: the chip doesn't have auto-pad! */ |
1350 | NETDRV_W32 (TxStatus0 + (entry * sizeof(u32)), | 1350 | NETDRV_W32 (TxStatus0 + (entry * sizeof(u32)), |
@@ -1565,7 +1565,6 @@ static void netdrv_rx_interrupt (struct net_device *dev, | |||
1565 | 1565 | ||
1566 | skb = dev_alloc_skb (pkt_size + 2); | 1566 | skb = dev_alloc_skb (pkt_size + 2); |
1567 | if (skb) { | 1567 | if (skb) { |
1568 | skb->dev = dev; | ||
1569 | skb_reserve (skb, 2); /* 16 byte align the IP fields. */ | 1568 | skb_reserve (skb, 2); /* 16 byte align the IP fields. */ |
1570 | 1569 | ||
1571 | eth_copy_and_sum (skb, &rx_ring[ring_offset + 4], pkt_size, 0); | 1570 | eth_copy_and_sum (skb, &rx_ring[ring_offset + 4], pkt_size, 0); |
diff --git a/drivers/net/pcmcia/3c574_cs.c b/drivers/net/pcmcia/3c574_cs.c index c7bd9c1c7f31..2b395ee21f75 100644 --- a/drivers/net/pcmcia/3c574_cs.c +++ b/drivers/net/pcmcia/3c574_cs.c | |||
@@ -1056,7 +1056,6 @@ static int el3_rx(struct net_device *dev, int worklimit) | |||
1056 | DEBUG(3, " Receiving packet size %d status %4.4x.\n", | 1056 | DEBUG(3, " Receiving packet size %d status %4.4x.\n", |
1057 | pkt_len, rx_status); | 1057 | pkt_len, rx_status); |
1058 | if (skb != NULL) { | 1058 | if (skb != NULL) { |
1059 | skb->dev = dev; | ||
1060 | skb_reserve(skb, 2); | 1059 | skb_reserve(skb, 2); |
1061 | insl(ioaddr+RX_FIFO, skb_put(skb, pkt_len), | 1060 | insl(ioaddr+RX_FIFO, skb_put(skb, pkt_len), |
1062 | ((pkt_len+3)>>2)); | 1061 | ((pkt_len+3)>>2)); |
diff --git a/drivers/net/pcmcia/3c589_cs.c b/drivers/net/pcmcia/3c589_cs.c index 461e8274ef69..143ae2ff309e 100644 --- a/drivers/net/pcmcia/3c589_cs.c +++ b/drivers/net/pcmcia/3c589_cs.c | |||
@@ -883,7 +883,6 @@ static int el3_rx(struct net_device *dev) | |||
883 | DEBUG(3, " Receiving packet size %d status %4.4x.\n", | 883 | DEBUG(3, " Receiving packet size %d status %4.4x.\n", |
884 | pkt_len, rx_status); | 884 | pkt_len, rx_status); |
885 | if (skb != NULL) { | 885 | if (skb != NULL) { |
886 | skb->dev = dev; | ||
887 | skb_reserve(skb, 2); | 886 | skb_reserve(skb, 2); |
888 | insl(ioaddr+RX_FIFO, skb_put(skb, pkt_len), | 887 | insl(ioaddr+RX_FIFO, skb_put(skb, pkt_len), |
889 | (pkt_len+3)>>2); | 888 | (pkt_len+3)>>2); |
diff --git a/drivers/net/pcmcia/axnet_cs.c b/drivers/net/pcmcia/axnet_cs.c index 6139048f8117..808fae1577e0 100644 --- a/drivers/net/pcmcia/axnet_cs.c +++ b/drivers/net/pcmcia/axnet_cs.c | |||
@@ -1136,7 +1136,7 @@ static int ei_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1136 | ei_block_output(dev, length, skb->data, output_page); | 1136 | ei_block_output(dev, length, skb->data, output_page); |
1137 | else { | 1137 | else { |
1138 | memset(packet, 0, ETH_ZLEN); | 1138 | memset(packet, 0, ETH_ZLEN); |
1139 | memcpy(packet, skb->data, skb->len); | 1139 | skb_copy_from_linear_data(skb, packet, skb->len); |
1140 | ei_block_output(dev, length, packet, output_page); | 1140 | ei_block_output(dev, length, packet, output_page); |
1141 | } | 1141 | } |
1142 | 1142 | ||
@@ -1496,7 +1496,6 @@ static void ei_receive(struct net_device *dev) | |||
1496 | else | 1496 | else |
1497 | { | 1497 | { |
1498 | skb_reserve(skb,2); /* IP headers on 16 byte boundaries */ | 1498 | skb_reserve(skb,2); /* IP headers on 16 byte boundaries */ |
1499 | skb->dev = dev; | ||
1500 | skb_put(skb, pkt_len); /* Make room */ | 1499 | skb_put(skb, pkt_len); /* Make room */ |
1501 | ei_block_input(dev, pkt_len, skb, current_offset + sizeof(rx_frame)); | 1500 | ei_block_input(dev, pkt_len, skb, current_offset + sizeof(rx_frame)); |
1502 | skb->protocol=eth_type_trans(skb,dev); | 1501 | skb->protocol=eth_type_trans(skb,dev); |
diff --git a/drivers/net/pcmcia/fmvj18x_cs.c b/drivers/net/pcmcia/fmvj18x_cs.c index 0d7de617e535..3f93d4933235 100644 --- a/drivers/net/pcmcia/fmvj18x_cs.c +++ b/drivers/net/pcmcia/fmvj18x_cs.c | |||
@@ -999,7 +999,6 @@ static void fjn_rx(struct net_device *dev) | |||
999 | lp->stats.rx_dropped++; | 999 | lp->stats.rx_dropped++; |
1000 | break; | 1000 | break; |
1001 | } | 1001 | } |
1002 | skb->dev = dev; | ||
1003 | 1002 | ||
1004 | skb_reserve(skb, 2); | 1003 | skb_reserve(skb, 2); |
1005 | insw(ioaddr + DATAPORT, skb_put(skb, pkt_len), | 1004 | insw(ioaddr + DATAPORT, skb_put(skb, pkt_len), |
diff --git a/drivers/net/pcmcia/nmclan_cs.c b/drivers/net/pcmcia/nmclan_cs.c index 3b707747a811..73da611fd536 100644 --- a/drivers/net/pcmcia/nmclan_cs.c +++ b/drivers/net/pcmcia/nmclan_cs.c | |||
@@ -1182,12 +1182,10 @@ static int mace_rx(struct net_device *dev, unsigned char RxCnt) | |||
1182 | skb = dev_alloc_skb(pkt_len+2); | 1182 | skb = dev_alloc_skb(pkt_len+2); |
1183 | 1183 | ||
1184 | if (skb != NULL) { | 1184 | if (skb != NULL) { |
1185 | skb->dev = dev; | ||
1186 | |||
1187 | skb_reserve(skb, 2); | 1185 | skb_reserve(skb, 2); |
1188 | insw(ioaddr + AM2150_RCV, skb_put(skb, pkt_len), pkt_len>>1); | 1186 | insw(ioaddr + AM2150_RCV, skb_put(skb, pkt_len), pkt_len>>1); |
1189 | if (pkt_len & 1) | 1187 | if (pkt_len & 1) |
1190 | *(skb->tail-1) = inb(ioaddr + AM2150_RCV); | 1188 | *(skb_tail_pointer(skb) - 1) = inb(ioaddr + AM2150_RCV); |
1191 | skb->protocol = eth_type_trans(skb, dev); | 1189 | skb->protocol = eth_type_trans(skb, dev); |
1192 | 1190 | ||
1193 | netif_rx(skb); /* Send the packet to the upper (protocol) layers. */ | 1191 | netif_rx(skb); /* Send the packet to the upper (protocol) layers. */ |
diff --git a/drivers/net/pcmcia/smc91c92_cs.c b/drivers/net/pcmcia/smc91c92_cs.c index 2561f76033ea..7912dbd14251 100644 --- a/drivers/net/pcmcia/smc91c92_cs.c +++ b/drivers/net/pcmcia/smc91c92_cs.c | |||
@@ -1669,7 +1669,6 @@ static void smc_rx(struct net_device *dev) | |||
1669 | (packet_length+1)>>1); | 1669 | (packet_length+1)>>1); |
1670 | skb->protocol = eth_type_trans(skb, dev); | 1670 | skb->protocol = eth_type_trans(skb, dev); |
1671 | 1671 | ||
1672 | skb->dev = dev; | ||
1673 | netif_rx(skb); | 1672 | netif_rx(skb); |
1674 | dev->last_rx = jiffies; | 1673 | dev->last_rx = jiffies; |
1675 | smc->stats.rx_packets++; | 1674 | smc->stats.rx_packets++; |
diff --git a/drivers/net/pcmcia/xirc2ps_cs.c b/drivers/net/pcmcia/xirc2ps_cs.c index 5879e7c36988..809ec440b8eb 100644 --- a/drivers/net/pcmcia/xirc2ps_cs.c +++ b/drivers/net/pcmcia/xirc2ps_cs.c | |||
@@ -1226,7 +1226,6 @@ xirc2ps_interrupt(int irq, void *dev_id) | |||
1226 | (pktlen+1)>>1); | 1226 | (pktlen+1)>>1); |
1227 | } | 1227 | } |
1228 | skb->protocol = eth_type_trans(skb, dev); | 1228 | skb->protocol = eth_type_trans(skb, dev); |
1229 | skb->dev = dev; | ||
1230 | netif_rx(skb); | 1229 | netif_rx(skb); |
1231 | dev->last_rx = jiffies; | 1230 | dev->last_rx = jiffies; |
1232 | lp->stats.rx_packets++; | 1231 | lp->stats.rx_packets++; |
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c index 4d94ba7899bf..0791360a6a66 100644 --- a/drivers/net/pcnet32.c +++ b/drivers/net/pcnet32.c | |||
@@ -1206,7 +1206,6 @@ static void pcnet32_rx_entry(struct net_device *dev, | |||
1206 | PCI_DMA_FROMDEVICE); | 1206 | PCI_DMA_FROMDEVICE); |
1207 | skb_put(skb, pkt_len); | 1207 | skb_put(skb, pkt_len); |
1208 | lp->rx_skbuff[entry] = newskb; | 1208 | lp->rx_skbuff[entry] = newskb; |
1209 | newskb->dev = dev; | ||
1210 | lp->rx_dma_addr[entry] = | 1209 | lp->rx_dma_addr[entry] = |
1211 | pci_map_single(lp->pci_dev, | 1210 | pci_map_single(lp->pci_dev, |
1212 | newskb->data, | 1211 | newskb->data, |
diff --git a/drivers/net/plip.c b/drivers/net/plip.c index 6bb085f54437..8754cf3356b0 100644 --- a/drivers/net/plip.c +++ b/drivers/net/plip.c | |||
@@ -546,7 +546,7 @@ static __be16 plip_type_trans(struct sk_buff *skb, struct net_device *dev) | |||
546 | struct ethhdr *eth; | 546 | struct ethhdr *eth; |
547 | unsigned char *rawp; | 547 | unsigned char *rawp; |
548 | 548 | ||
549 | skb->mac.raw=skb->data; | 549 | skb_reset_mac_header(skb); |
550 | skb_pull(skb,dev->hard_header_len); | 550 | skb_pull(skb,dev->hard_header_len); |
551 | eth = eth_hdr(skb); | 551 | eth = eth_hdr(skb); |
552 | 552 | ||
diff --git a/drivers/net/ppp_async.c b/drivers/net/ppp_async.c index 933e2f3c77aa..caabbc408c34 100644 --- a/drivers/net/ppp_async.c +++ b/drivers/net/ppp_async.c | |||
@@ -802,9 +802,9 @@ process_input_packet(struct asyncppp *ap) | |||
802 | 802 | ||
803 | /* check for address/control and protocol compression */ | 803 | /* check for address/control and protocol compression */ |
804 | p = skb->data; | 804 | p = skb->data; |
805 | if (p[0] == PPP_ALLSTATIONS && p[1] == PPP_UI) { | 805 | if (p[0] == PPP_ALLSTATIONS) { |
806 | /* chop off address/control */ | 806 | /* chop off address/control */ |
807 | if (skb->len < 3) | 807 | if (p[1] != PPP_UI || skb->len < 3) |
808 | goto err; | 808 | goto err; |
809 | p = skb_pull(skb, 2); | 809 | p = skb_pull(skb, 2); |
810 | } | 810 | } |
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c index ef58e4128782..6d596ca50cfd 100644 --- a/drivers/net/ppp_generic.c +++ b/drivers/net/ppp_generic.c | |||
@@ -88,8 +88,6 @@ struct ppp_file { | |||
88 | #define PF_TO_PPP(pf) PF_TO_X(pf, struct ppp) | 88 | #define PF_TO_PPP(pf) PF_TO_X(pf, struct ppp) |
89 | #define PF_TO_CHANNEL(pf) PF_TO_X(pf, struct channel) | 89 | #define PF_TO_CHANNEL(pf) PF_TO_X(pf, struct channel) |
90 | 90 | ||
91 | #define ROUNDUP(n, x) (((n) + (x) - 1) / (x)) | ||
92 | |||
93 | /* | 91 | /* |
94 | * Data structure describing one ppp unit. | 92 | * Data structure describing one ppp unit. |
95 | * A ppp unit corresponds to a ppp network interface device | 93 | * A ppp unit corresponds to a ppp network interface device |
@@ -1297,7 +1295,7 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb) | |||
1297 | */ | 1295 | */ |
1298 | fragsize = len; | 1296 | fragsize = len; |
1299 | if (nfree > 1) | 1297 | if (nfree > 1) |
1300 | fragsize = ROUNDUP(fragsize, nfree); | 1298 | fragsize = DIV_ROUND_UP(fragsize, nfree); |
1301 | /* nbigger channels get fragsize bytes, the rest get fragsize-1, | 1299 | /* nbigger channels get fragsize bytes, the rest get fragsize-1, |
1302 | except if nbigger==0, then they all get fragsize. */ | 1300 | except if nbigger==0, then they all get fragsize. */ |
1303 | nbigger = len % nfree; | 1301 | nbigger = len % nfree; |
@@ -1685,7 +1683,7 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb) | |||
1685 | skb_pull_rcsum(skb, 2); | 1683 | skb_pull_rcsum(skb, 2); |
1686 | skb->dev = ppp->dev; | 1684 | skb->dev = ppp->dev; |
1687 | skb->protocol = htons(npindex_to_ethertype[npi]); | 1685 | skb->protocol = htons(npindex_to_ethertype[npi]); |
1688 | skb->mac.raw = skb->data; | 1686 | skb_reset_mac_header(skb); |
1689 | netif_rx(skb); | 1687 | netif_rx(skb); |
1690 | ppp->dev->last_rx = jiffies; | 1688 | ppp->dev->last_rx = jiffies; |
1691 | } | 1689 | } |
diff --git a/drivers/net/ppp_synctty.c b/drivers/net/ppp_synctty.c index b6f0e9a25e26..5918fab38349 100644 --- a/drivers/net/ppp_synctty.c +++ b/drivers/net/ppp_synctty.c | |||
@@ -594,7 +594,8 @@ ppp_sync_txmunge(struct syncppp *ap, struct sk_buff *skb) | |||
594 | return NULL; | 594 | return NULL; |
595 | } | 595 | } |
596 | skb_reserve(npkt,2); | 596 | skb_reserve(npkt,2); |
597 | memcpy(skb_put(npkt,skb->len), skb->data, skb->len); | 597 | skb_copy_from_linear_data(skb, |
598 | skb_put(npkt, skb->len), skb->len); | ||
598 | kfree_skb(skb); | 599 | kfree_skb(skb); |
599 | skb = npkt; | 600 | skb = npkt; |
600 | } | 601 | } |
diff --git a/drivers/net/pppoe.c b/drivers/net/pppoe.c index ebfa2967cd68..6f98834e6ace 100644 --- a/drivers/net/pppoe.c +++ b/drivers/net/pppoe.c | |||
@@ -207,7 +207,7 @@ static inline struct pppox_sock *get_item(unsigned long sid, | |||
207 | 207 | ||
208 | static inline struct pppox_sock *get_item_by_addr(struct sockaddr_pppox *sp) | 208 | static inline struct pppox_sock *get_item_by_addr(struct sockaddr_pppox *sp) |
209 | { | 209 | { |
210 | struct net_device *dev = NULL; | 210 | struct net_device *dev; |
211 | int ifindex; | 211 | int ifindex; |
212 | 212 | ||
213 | dev = dev_get_by_name(sp->sa_addr.pppoe.dev); | 213 | dev = dev_get_by_name(sp->sa_addr.pppoe.dev); |
@@ -218,20 +218,6 @@ static inline struct pppox_sock *get_item_by_addr(struct sockaddr_pppox *sp) | |||
218 | return get_item(sp->sa_addr.pppoe.sid, sp->sa_addr.pppoe.remote, ifindex); | 218 | return get_item(sp->sa_addr.pppoe.sid, sp->sa_addr.pppoe.remote, ifindex); |
219 | } | 219 | } |
220 | 220 | ||
221 | static inline int set_item(struct pppox_sock *po) | ||
222 | { | ||
223 | int i; | ||
224 | |||
225 | if (!po) | ||
226 | return -EINVAL; | ||
227 | |||
228 | write_lock_bh(&pppoe_hash_lock); | ||
229 | i = __set_item(po); | ||
230 | write_unlock_bh(&pppoe_hash_lock); | ||
231 | |||
232 | return i; | ||
233 | } | ||
234 | |||
235 | static inline struct pppox_sock *delete_item(unsigned long sid, char *addr, int ifindex) | 221 | static inline struct pppox_sock *delete_item(unsigned long sid, char *addr, int ifindex) |
236 | { | 222 | { |
237 | struct pppox_sock *ret; | 223 | struct pppox_sock *ret; |
@@ -255,54 +241,53 @@ static inline struct pppox_sock *delete_item(unsigned long sid, char *addr, int | |||
255 | static void pppoe_flush_dev(struct net_device *dev) | 241 | static void pppoe_flush_dev(struct net_device *dev) |
256 | { | 242 | { |
257 | int hash; | 243 | int hash; |
258 | |||
259 | BUG_ON(dev == NULL); | 244 | BUG_ON(dev == NULL); |
260 | 245 | ||
261 | read_lock_bh(&pppoe_hash_lock); | 246 | write_lock_bh(&pppoe_hash_lock); |
262 | for (hash = 0; hash < PPPOE_HASH_SIZE; hash++) { | 247 | for (hash = 0; hash < PPPOE_HASH_SIZE; hash++) { |
263 | struct pppox_sock *po = item_hash_table[hash]; | 248 | struct pppox_sock *po = item_hash_table[hash]; |
264 | 249 | ||
265 | while (po != NULL) { | 250 | while (po != NULL) { |
266 | if (po->pppoe_dev == dev) { | 251 | struct sock *sk = sk_pppox(po); |
267 | struct sock *sk = sk_pppox(po); | 252 | if (po->pppoe_dev != dev) { |
268 | 253 | po = po->next; | |
269 | sock_hold(sk); | 254 | continue; |
270 | po->pppoe_dev = NULL; | 255 | } |
256 | po->pppoe_dev = NULL; | ||
257 | dev_put(dev); | ||
271 | 258 | ||
272 | /* We hold a reference to SK, now drop the | ||
273 | * hash table lock so that we may attempt | ||
274 | * to lock the socket (which can sleep). | ||
275 | */ | ||
276 | read_unlock_bh(&pppoe_hash_lock); | ||
277 | 259 | ||
278 | lock_sock(sk); | 260 | /* We always grab the socket lock, followed by the |
261 | * pppoe_hash_lock, in that order. Since we should | ||
262 | * hold the sock lock while doing any unbinding, | ||
263 | * we need to release the lock we're holding. | ||
264 | * Hold a reference to the sock so it doesn't disappear | ||
265 | * as we're jumping between locks. | ||
266 | */ | ||
279 | 267 | ||
280 | if (sk->sk_state & | 268 | sock_hold(sk); |
281 | (PPPOX_CONNECTED | PPPOX_BOUND)) { | ||
282 | pppox_unbind_sock(sk); | ||
283 | dev_put(dev); | ||
284 | sk->sk_state = PPPOX_ZOMBIE; | ||
285 | sk->sk_state_change(sk); | ||
286 | } | ||
287 | 269 | ||
288 | release_sock(sk); | 270 | write_unlock_bh(&pppoe_hash_lock); |
271 | lock_sock(sk); | ||
289 | 272 | ||
290 | sock_put(sk); | 273 | if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) { |
274 | pppox_unbind_sock(sk); | ||
275 | sk->sk_state = PPPOX_ZOMBIE; | ||
276 | sk->sk_state_change(sk); | ||
277 | } | ||
291 | 278 | ||
292 | read_lock_bh(&pppoe_hash_lock); | 279 | release_sock(sk); |
280 | sock_put(sk); | ||
293 | 281 | ||
294 | /* Now restart from the beginning of this | 282 | /* Restart scan at the beginning of this hash chain. |
295 | * hash chain. We always NULL out pppoe_dev | 283 | * While the lock was dropped the chain contents may |
296 | * so we are guaranteed to make forward | 284 | * have changed. |
297 | * progress. | 285 | */ |
298 | */ | 286 | write_lock_bh(&pppoe_hash_lock); |
299 | po = item_hash_table[hash]; | 287 | po = item_hash_table[hash]; |
300 | continue; | ||
301 | } | ||
302 | po = po->next; | ||
303 | } | 288 | } |
304 | } | 289 | } |
305 | read_unlock_bh(&pppoe_hash_lock); | 290 | write_unlock_bh(&pppoe_hash_lock); |
306 | } | 291 | } |
307 | 292 | ||
308 | static int pppoe_device_event(struct notifier_block *this, | 293 | static int pppoe_device_event(struct notifier_block *this, |
@@ -344,10 +329,10 @@ static struct notifier_block pppoe_notifier = { | |||
344 | static int pppoe_rcv_core(struct sock *sk, struct sk_buff *skb) | 329 | static int pppoe_rcv_core(struct sock *sk, struct sk_buff *skb) |
345 | { | 330 | { |
346 | struct pppox_sock *po = pppox_sk(sk); | 331 | struct pppox_sock *po = pppox_sk(sk); |
347 | struct pppox_sock *relay_po = NULL; | 332 | struct pppox_sock *relay_po; |
348 | 333 | ||
349 | if (sk->sk_state & PPPOX_BOUND) { | 334 | if (sk->sk_state & PPPOX_BOUND) { |
350 | struct pppoe_hdr *ph = (struct pppoe_hdr *) skb->nh.raw; | 335 | struct pppoe_hdr *ph = pppoe_hdr(skb); |
351 | int len = ntohs(ph->length); | 336 | int len = ntohs(ph->length); |
352 | skb_pull_rcsum(skb, sizeof(struct pppoe_hdr)); | 337 | skb_pull_rcsum(skb, sizeof(struct pppoe_hdr)); |
353 | if (pskb_trim_rcsum(skb, len)) | 338 | if (pskb_trim_rcsum(skb, len)) |
@@ -401,7 +386,7 @@ static int pppoe_rcv(struct sk_buff *skb, | |||
401 | if (!(skb = skb_share_check(skb, GFP_ATOMIC))) | 386 | if (!(skb = skb_share_check(skb, GFP_ATOMIC))) |
402 | goto out; | 387 | goto out; |
403 | 388 | ||
404 | ph = (struct pppoe_hdr *) skb->nh.raw; | 389 | ph = pppoe_hdr(skb); |
405 | 390 | ||
406 | po = get_item((unsigned long) ph->sid, eth_hdr(skb)->h_source, dev->ifindex); | 391 | po = get_item((unsigned long) ph->sid, eth_hdr(skb)->h_source, dev->ifindex); |
407 | if (po != NULL) | 392 | if (po != NULL) |
@@ -433,7 +418,7 @@ static int pppoe_disc_rcv(struct sk_buff *skb, | |||
433 | if (!(skb = skb_share_check(skb, GFP_ATOMIC))) | 418 | if (!(skb = skb_share_check(skb, GFP_ATOMIC))) |
434 | goto out; | 419 | goto out; |
435 | 420 | ||
436 | ph = (struct pppoe_hdr *) skb->nh.raw; | 421 | ph = pppoe_hdr(skb); |
437 | if (ph->code != PADT_CODE) | 422 | if (ph->code != PADT_CODE) |
438 | goto abort; | 423 | goto abort; |
439 | 424 | ||
@@ -514,36 +499,49 @@ static int pppoe_release(struct socket *sock) | |||
514 | { | 499 | { |
515 | struct sock *sk = sock->sk; | 500 | struct sock *sk = sock->sk; |
516 | struct pppox_sock *po; | 501 | struct pppox_sock *po; |
517 | int error = 0; | ||
518 | 502 | ||
519 | if (!sk) | 503 | if (!sk) |
520 | return 0; | 504 | return 0; |
521 | 505 | ||
522 | if (sock_flag(sk, SOCK_DEAD)) | 506 | lock_sock(sk); |
507 | if (sock_flag(sk, SOCK_DEAD)){ | ||
508 | release_sock(sk); | ||
523 | return -EBADF; | 509 | return -EBADF; |
510 | } | ||
524 | 511 | ||
525 | pppox_unbind_sock(sk); | 512 | pppox_unbind_sock(sk); |
526 | 513 | ||
527 | /* Signal the death of the socket. */ | 514 | /* Signal the death of the socket. */ |
528 | sk->sk_state = PPPOX_DEAD; | 515 | sk->sk_state = PPPOX_DEAD; |
529 | 516 | ||
517 | |||
518 | /* Write lock on hash lock protects the entire "po" struct from | ||
519 | * concurrent updates via pppoe_flush_dev. The "po" struct should | ||
520 | * be considered part of the hash table contents, thus protected | ||
521 | * by the hash table lock */ | ||
522 | write_lock_bh(&pppoe_hash_lock); | ||
523 | |||
530 | po = pppox_sk(sk); | 524 | po = pppox_sk(sk); |
531 | if (po->pppoe_pa.sid) { | 525 | if (po->pppoe_pa.sid) { |
532 | delete_item(po->pppoe_pa.sid, po->pppoe_pa.remote, po->pppoe_ifindex); | 526 | __delete_item(po->pppoe_pa.sid, |
527 | po->pppoe_pa.remote, po->pppoe_ifindex); | ||
533 | } | 528 | } |
534 | 529 | ||
535 | if (po->pppoe_dev) | 530 | if (po->pppoe_dev) { |
536 | dev_put(po->pppoe_dev); | 531 | dev_put(po->pppoe_dev); |
532 | po->pppoe_dev = NULL; | ||
533 | } | ||
537 | 534 | ||
538 | po->pppoe_dev = NULL; | 535 | write_unlock_bh(&pppoe_hash_lock); |
539 | 536 | ||
540 | sock_orphan(sk); | 537 | sock_orphan(sk); |
541 | sock->sk = NULL; | 538 | sock->sk = NULL; |
542 | 539 | ||
543 | skb_queue_purge(&sk->sk_receive_queue); | 540 | skb_queue_purge(&sk->sk_receive_queue); |
541 | release_sock(sk); | ||
544 | sock_put(sk); | 542 | sock_put(sk); |
545 | 543 | ||
546 | return error; | 544 | return 0; |
547 | } | 545 | } |
548 | 546 | ||
549 | 547 | ||
@@ -599,14 +597,18 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr, | |||
599 | po->pppoe_dev = dev; | 597 | po->pppoe_dev = dev; |
600 | po->pppoe_ifindex = dev->ifindex; | 598 | po->pppoe_ifindex = dev->ifindex; |
601 | 599 | ||
602 | if (!(dev->flags & IFF_UP)) | 600 | write_lock_bh(&pppoe_hash_lock); |
601 | if (!(dev->flags & IFF_UP)){ | ||
602 | write_unlock_bh(&pppoe_hash_lock); | ||
603 | goto err_put; | 603 | goto err_put; |
604 | } | ||
604 | 605 | ||
605 | memcpy(&po->pppoe_pa, | 606 | memcpy(&po->pppoe_pa, |
606 | &sp->sa_addr.pppoe, | 607 | &sp->sa_addr.pppoe, |
607 | sizeof(struct pppoe_addr)); | 608 | sizeof(struct pppoe_addr)); |
608 | 609 | ||
609 | error = set_item(po); | 610 | error = __set_item(po); |
611 | write_unlock_bh(&pppoe_hash_lock); | ||
610 | if (error < 0) | 612 | if (error < 0) |
611 | goto err_put; | 613 | goto err_put; |
612 | 614 | ||
@@ -762,10 +764,10 @@ static int pppoe_ioctl(struct socket *sock, unsigned int cmd, | |||
762 | static int pppoe_sendmsg(struct kiocb *iocb, struct socket *sock, | 764 | static int pppoe_sendmsg(struct kiocb *iocb, struct socket *sock, |
763 | struct msghdr *m, size_t total_len) | 765 | struct msghdr *m, size_t total_len) |
764 | { | 766 | { |
765 | struct sk_buff *skb = NULL; | 767 | struct sk_buff *skb; |
766 | struct sock *sk = sock->sk; | 768 | struct sock *sk = sock->sk; |
767 | struct pppox_sock *po = pppox_sk(sk); | 769 | struct pppox_sock *po = pppox_sk(sk); |
768 | int error = 0; | 770 | int error; |
769 | struct pppoe_hdr hdr; | 771 | struct pppoe_hdr hdr; |
770 | struct pppoe_hdr *ph; | 772 | struct pppoe_hdr *ph; |
771 | struct net_device *dev; | 773 | struct net_device *dev; |
@@ -799,7 +801,7 @@ static int pppoe_sendmsg(struct kiocb *iocb, struct socket *sock, | |||
799 | 801 | ||
800 | /* Reserve space for headers. */ | 802 | /* Reserve space for headers. */ |
801 | skb_reserve(skb, dev->hard_header_len); | 803 | skb_reserve(skb, dev->hard_header_len); |
802 | skb->nh.raw = skb->data; | 804 | skb_reset_network_header(skb); |
803 | 805 | ||
804 | skb->dev = dev; | 806 | skb->dev = dev; |
805 | 807 | ||
@@ -869,7 +871,8 @@ static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb) | |||
869 | goto abort; | 871 | goto abort; |
870 | 872 | ||
871 | skb_reserve(skb2, dev->hard_header_len + sizeof(struct pppoe_hdr)); | 873 | skb_reserve(skb2, dev->hard_header_len + sizeof(struct pppoe_hdr)); |
872 | memcpy(skb_put(skb2, skb->len), skb->data, skb->len); | 874 | skb_copy_from_linear_data(skb, skb_put(skb2, skb->len), |
875 | skb->len); | ||
873 | } else { | 876 | } else { |
874 | /* Make a clone so as to not disturb the original skb, | 877 | /* Make a clone so as to not disturb the original skb, |
875 | * give dev_queue_xmit something it can free. | 878 | * give dev_queue_xmit something it can free. |
@@ -884,7 +887,7 @@ static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb) | |||
884 | memcpy(ph, &hdr, sizeof(struct pppoe_hdr)); | 887 | memcpy(ph, &hdr, sizeof(struct pppoe_hdr)); |
885 | skb2->protocol = __constant_htons(ETH_P_PPP_SES); | 888 | skb2->protocol = __constant_htons(ETH_P_PPP_SES); |
886 | 889 | ||
887 | skb2->nh.raw = skb2->data; | 890 | skb_reset_network_header(skb2); |
888 | 891 | ||
889 | skb2->dev = dev; | 892 | skb2->dev = dev; |
890 | 893 | ||
@@ -929,10 +932,8 @@ static int pppoe_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
929 | struct msghdr *m, size_t total_len, int flags) | 932 | struct msghdr *m, size_t total_len, int flags) |
930 | { | 933 | { |
931 | struct sock *sk = sock->sk; | 934 | struct sock *sk = sock->sk; |
932 | struct sk_buff *skb = NULL; | 935 | struct sk_buff *skb; |
933 | int error = 0; | 936 | int error = 0; |
934 | int len; | ||
935 | struct pppoe_hdr *ph = NULL; | ||
936 | 937 | ||
937 | if (sk->sk_state & PPPOX_BOUND) { | 938 | if (sk->sk_state & PPPOX_BOUND) { |
938 | error = -EIO; | 939 | error = -EIO; |
@@ -942,26 +943,21 @@ static int pppoe_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
942 | skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, | 943 | skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, |
943 | flags & MSG_DONTWAIT, &error); | 944 | flags & MSG_DONTWAIT, &error); |
944 | 945 | ||
945 | if (error < 0) { | 946 | if (error < 0) |
946 | goto end; | 947 | goto end; |
947 | } | ||
948 | 948 | ||
949 | m->msg_namelen = 0; | 949 | m->msg_namelen = 0; |
950 | 950 | ||
951 | if (skb) { | 951 | if (skb) { |
952 | error = 0; | 952 | struct pppoe_hdr *ph = pppoe_hdr(skb); |
953 | ph = (struct pppoe_hdr *) skb->nh.raw; | 953 | const int len = ntohs(ph->length); |
954 | len = ntohs(ph->length); | ||
955 | 954 | ||
956 | error = memcpy_toiovec(m->msg_iov, (unsigned char *) &ph->tag[0], len); | 955 | error = memcpy_toiovec(m->msg_iov, (unsigned char *) &ph->tag[0], len); |
957 | if (error < 0) | 956 | if (error == 0) |
958 | goto do_skb_free; | 957 | error = len; |
959 | error = len; | ||
960 | } | 958 | } |
961 | 959 | ||
962 | do_skb_free: | 960 | kfree_skb(skb); |
963 | if (skb) | ||
964 | kfree_skb(skb); | ||
965 | end: | 961 | end: |
966 | return error; | 962 | return error; |
967 | } | 963 | } |
@@ -991,7 +987,7 @@ out: | |||
991 | 987 | ||
992 | static __inline__ struct pppox_sock *pppoe_get_idx(loff_t pos) | 988 | static __inline__ struct pppox_sock *pppoe_get_idx(loff_t pos) |
993 | { | 989 | { |
994 | struct pppox_sock *po = NULL; | 990 | struct pppox_sock *po; |
995 | int i = 0; | 991 | int i = 0; |
996 | 992 | ||
997 | for (; i < PPPOE_HASH_SIZE; i++) { | 993 | for (; i < PPPOE_HASH_SIZE; i++) { |
diff --git a/drivers/net/pppox.c b/drivers/net/pppox.c index 9315046b3f55..3f8115db4d54 100644 --- a/drivers/net/pppox.c +++ b/drivers/net/pppox.c | |||
@@ -58,7 +58,7 @@ void pppox_unbind_sock(struct sock *sk) | |||
58 | { | 58 | { |
59 | /* Clear connection to ppp device, if attached. */ | 59 | /* Clear connection to ppp device, if attached. */ |
60 | 60 | ||
61 | if (sk->sk_state & (PPPOX_BOUND | PPPOX_ZOMBIE)) { | 61 | if (sk->sk_state & (PPPOX_BOUND | PPPOX_CONNECTED | PPPOX_ZOMBIE)) { |
62 | ppp_unregister_channel(&pppox_sk(sk)->chan); | 62 | ppp_unregister_channel(&pppox_sk(sk)->chan); |
63 | sk->sk_state = PPPOX_DEAD; | 63 | sk->sk_state = PPPOX_DEAD; |
64 | } | 64 | } |
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c index a8246eb2f8d9..7b80fb7a9d9b 100755 --- a/drivers/net/qla3xxx.c +++ b/drivers/net/qla3xxx.c | |||
@@ -1873,7 +1873,6 @@ static void ql_process_mac_rx_intr(struct ql3_adapter *qdev, | |||
1873 | pci_unmap_len(lrg_buf_cb2, maplen), | 1873 | pci_unmap_len(lrg_buf_cb2, maplen), |
1874 | PCI_DMA_FROMDEVICE); | 1874 | PCI_DMA_FROMDEVICE); |
1875 | prefetch(skb->data); | 1875 | prefetch(skb->data); |
1876 | skb->dev = qdev->ndev; | ||
1877 | skb->ip_summed = CHECKSUM_NONE; | 1876 | skb->ip_summed = CHECKSUM_NONE; |
1878 | skb->protocol = eth_type_trans(skb, qdev->ndev); | 1877 | skb->protocol = eth_type_trans(skb, qdev->ndev); |
1879 | 1878 | ||
@@ -1928,7 +1927,8 @@ static void ql_process_macip_rx_intr(struct ql3_adapter *qdev, | |||
1928 | * Copy the ethhdr from first buffer to second. This | 1927 | * Copy the ethhdr from first buffer to second. This |
1929 | * is necessary for 3022 IP completions. | 1928 | * is necessary for 3022 IP completions. |
1930 | */ | 1929 | */ |
1931 | memcpy(skb_push(skb2, size), skb1->data + VLAN_ID_LEN, size); | 1930 | skb_copy_from_linear_data_offset(skb1, VLAN_ID_LEN, |
1931 | skb_push(skb2, size), size); | ||
1932 | } else { | 1932 | } else { |
1933 | u16 checksum = le16_to_cpu(ib_ip_rsp_ptr->checksum); | 1933 | u16 checksum = le16_to_cpu(ib_ip_rsp_ptr->checksum); |
1934 | if (checksum & | 1934 | if (checksum & |
@@ -1946,7 +1946,6 @@ static void ql_process_macip_rx_intr(struct ql3_adapter *qdev, | |||
1946 | skb2->ip_summed = CHECKSUM_UNNECESSARY; | 1946 | skb2->ip_summed = CHECKSUM_UNNECESSARY; |
1947 | } | 1947 | } |
1948 | } | 1948 | } |
1949 | skb2->dev = qdev->ndev; | ||
1950 | skb2->protocol = eth_type_trans(skb2, qdev->ndev); | 1949 | skb2->protocol = eth_type_trans(skb2, qdev->ndev); |
1951 | 1950 | ||
1952 | netif_receive_skb(skb2); | 1951 | netif_receive_skb(skb2); |
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c index 6a77b8a92245..45876a854f00 100644 --- a/drivers/net/r8169.c +++ b/drivers/net/r8169.c | |||
@@ -2284,7 +2284,7 @@ static inline u32 rtl8169_tso_csum(struct sk_buff *skb, struct net_device *dev) | |||
2284 | return LargeSend | ((mss & MSSMask) << MSSShift); | 2284 | return LargeSend | ((mss & MSSMask) << MSSShift); |
2285 | } | 2285 | } |
2286 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | 2286 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
2287 | const struct iphdr *ip = skb->nh.iph; | 2287 | const struct iphdr *ip = ip_hdr(skb); |
2288 | 2288 | ||
2289 | if (ip->protocol == IPPROTO_TCP) | 2289 | if (ip->protocol == IPPROTO_TCP) |
2290 | return IPCS | TCPCS; | 2290 | return IPCS | TCPCS; |
@@ -2586,7 +2586,6 @@ rtl8169_rx_interrupt(struct net_device *dev, struct rtl8169_private *tp, | |||
2586 | pci_action(tp->pci_dev, le64_to_cpu(desc->addr), | 2586 | pci_action(tp->pci_dev, le64_to_cpu(desc->addr), |
2587 | tp->rx_buf_sz, PCI_DMA_FROMDEVICE); | 2587 | tp->rx_buf_sz, PCI_DMA_FROMDEVICE); |
2588 | 2588 | ||
2589 | skb->dev = dev; | ||
2590 | skb_put(skb, pkt_size); | 2589 | skb_put(skb, pkt_size); |
2591 | skb->protocol = eth_type_trans(skb, dev); | 2590 | skb->protocol = eth_type_trans(skb, dev); |
2592 | 2591 | ||
diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c index b7ff484af3e1..df6b73872fdb 100644 --- a/drivers/net/rionet.c +++ b/drivers/net/rionet.c | |||
@@ -115,7 +115,6 @@ static int rionet_rx_clean(struct net_device *ndev) | |||
115 | 115 | ||
116 | rnet->rx_skb[i]->data = data; | 116 | rnet->rx_skb[i]->data = data; |
117 | skb_put(rnet->rx_skb[i], RIO_MAX_MSG_SIZE); | 117 | skb_put(rnet->rx_skb[i], RIO_MAX_MSG_SIZE); |
118 | rnet->rx_skb[i]->dev = ndev; | ||
119 | rnet->rx_skb[i]->protocol = | 118 | rnet->rx_skb[i]->protocol = |
120 | eth_type_trans(rnet->rx_skb[i], ndev); | 119 | eth_type_trans(rnet->rx_skb[i], ndev); |
121 | error = netif_rx(rnet->rx_skb[i]); | 120 | error = netif_rx(rnet->rx_skb[i]); |
diff --git a/drivers/net/rrunner.c b/drivers/net/rrunner.c index d81536f90df6..25c73d47daad 100644 --- a/drivers/net/rrunner.c +++ b/drivers/net/rrunner.c | |||
@@ -1029,7 +1029,6 @@ static void rx_int(struct net_device *dev, u32 rxlimit, u32 index) | |||
1029 | goto defer; | 1029 | goto defer; |
1030 | } | 1030 | } |
1031 | } | 1031 | } |
1032 | skb->dev = dev; | ||
1033 | skb->protocol = hippi_type_trans(skb, dev); | 1032 | skb->protocol = hippi_type_trans(skb, dev); |
1034 | 1033 | ||
1035 | netif_rx(skb); /* send it up */ | 1034 | netif_rx(skb); /* send it up */ |
@@ -1452,7 +1451,7 @@ static int rr_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1452 | } | 1451 | } |
1453 | skb_reserve(new_skb, 8); | 1452 | skb_reserve(new_skb, 8); |
1454 | skb_put(new_skb, len); | 1453 | skb_put(new_skb, len); |
1455 | memcpy(new_skb->data, skb->data, len); | 1454 | skb_copy_from_linear_data(skb, new_skb->data, len); |
1456 | dev_kfree_skb(skb); | 1455 | dev_kfree_skb(skb); |
1457 | skb = new_skb; | 1456 | skb = new_skb; |
1458 | } | 1457 | } |
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c index 46ebf141ee5a..600d3ff347fc 100644 --- a/drivers/net/s2io.c +++ b/drivers/net/s2io.c | |||
@@ -2195,7 +2195,7 @@ static int fill_rxd_3buf(struct s2io_nic *nic, struct RxD_t *rxdp, struct \ | |||
2195 | frag_list->next = NULL; | 2195 | frag_list->next = NULL; |
2196 | tmp = (void *)ALIGN((long)frag_list->data, ALIGN_SIZE + 1); | 2196 | tmp = (void *)ALIGN((long)frag_list->data, ALIGN_SIZE + 1); |
2197 | frag_list->data = tmp; | 2197 | frag_list->data = tmp; |
2198 | frag_list->tail = tmp; | 2198 | skb_reset_tail_pointer(frag_list); |
2199 | 2199 | ||
2200 | /* Buffer-2 receives L4 data payload */ | 2200 | /* Buffer-2 receives L4 data payload */ |
2201 | ((struct RxD3*)rxdp)->Buffer2_ptr = pci_map_single(nic->pdev, | 2201 | ((struct RxD3*)rxdp)->Buffer2_ptr = pci_map_single(nic->pdev, |
@@ -2349,7 +2349,7 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no) | |||
2349 | tmp += ALIGN_SIZE; | 2349 | tmp += ALIGN_SIZE; |
2350 | tmp &= ~ALIGN_SIZE; | 2350 | tmp &= ~ALIGN_SIZE; |
2351 | skb->data = (void *) (unsigned long)tmp; | 2351 | skb->data = (void *) (unsigned long)tmp; |
2352 | skb->tail = (void *) (unsigned long)tmp; | 2352 | skb_reset_tail_pointer(skb); |
2353 | 2353 | ||
2354 | if (!(((struct RxD3*)rxdp)->Buffer0_ptr)) | 2354 | if (!(((struct RxD3*)rxdp)->Buffer0_ptr)) |
2355 | ((struct RxD3*)rxdp)->Buffer0_ptr = | 2355 | ((struct RxD3*)rxdp)->Buffer0_ptr = |
diff --git a/drivers/net/saa9730.c b/drivers/net/saa9730.c index 143958f1ef0a..ad94358ece89 100644 --- a/drivers/net/saa9730.c +++ b/drivers/net/saa9730.c | |||
@@ -688,7 +688,6 @@ static int lan_saa9730_rx(struct net_device *dev) | |||
688 | } else { | 688 | } else { |
689 | lp->stats.rx_bytes += len; | 689 | lp->stats.rx_bytes += len; |
690 | lp->stats.rx_packets++; | 690 | lp->stats.rx_packets++; |
691 | skb->dev = dev; | ||
692 | skb_reserve(skb, 2); /* 16 byte align */ | 691 | skb_reserve(skb, 2); /* 16 byte align */ |
693 | skb_put(skb, len); /* make room */ | 692 | skb_put(skb, len); /* make room */ |
694 | eth_copy_and_sum(skb, | 693 | eth_copy_and_sum(skb, |
diff --git a/drivers/net/sb1000.c b/drivers/net/sb1000.c index b9fa4fbb1398..1de3eec1a792 100644 --- a/drivers/net/sb1000.c +++ b/drivers/net/sb1000.c | |||
@@ -834,7 +834,7 @@ printk("cm0: IP identification: %02x%02x fragment offset: %02x%02x\n", buffer[3 | |||
834 | goto dropped_frame; | 834 | goto dropped_frame; |
835 | } | 835 | } |
836 | skb->dev = dev; | 836 | skb->dev = dev; |
837 | skb->mac.raw = skb->data; | 837 | skb_reset_mac_header(skb); |
838 | skb->protocol = (unsigned short) buffer[NewDatagramHeaderSkip + 16]; | 838 | skb->protocol = (unsigned short) buffer[NewDatagramHeaderSkip + 16]; |
839 | insw(ioaddr, skb_put(skb, NewDatagramDataSize), | 839 | insw(ioaddr, skb_put(skb, NewDatagramDataSize), |
840 | NewDatagramDataSize / 2); | 840 | NewDatagramDataSize / 2); |
diff --git a/drivers/net/sb1250-mac.c b/drivers/net/sb1250-mac.c index 103c3174ab54..0a3a379b634c 100644 --- a/drivers/net/sb1250-mac.c +++ b/drivers/net/sb1250-mac.c | |||
@@ -933,9 +933,6 @@ static int sbdma_add_rcvbuffer(sbmacdma_t *d,struct sk_buff *sb) | |||
933 | } | 933 | } |
934 | 934 | ||
935 | sbdma_align_skb(sb_new, SMP_CACHE_BYTES, ETHER_ALIGN); | 935 | sbdma_align_skb(sb_new, SMP_CACHE_BYTES, ETHER_ALIGN); |
936 | |||
937 | /* mark skbuff owned by our device */ | ||
938 | sb_new->dev = d->sbdma_eth->sbm_dev; | ||
939 | } | 936 | } |
940 | else { | 937 | else { |
941 | sb_new = sb; | 938 | sb_new = sb; |
diff --git a/drivers/net/sc92031.c b/drivers/net/sc92031.c index c32c21af3fdd..5b7284c955dc 100644 --- a/drivers/net/sc92031.c +++ b/drivers/net/sc92031.c | |||
@@ -814,7 +814,6 @@ static void _sc92031_rx_tasklet(struct net_device *dev) | |||
814 | memcpy(skb_put(skb, pkt_size), rx_ring + rx_ring_offset, pkt_size); | 814 | memcpy(skb_put(skb, pkt_size), rx_ring + rx_ring_offset, pkt_size); |
815 | } | 815 | } |
816 | 816 | ||
817 | skb->dev = dev; | ||
818 | skb->protocol = eth_type_trans(skb, dev); | 817 | skb->protocol = eth_type_trans(skb, dev); |
819 | dev->last_rx = jiffies; | 818 | dev->last_rx = jiffies; |
820 | netif_rx(skb); | 819 | netif_rx(skb); |
diff --git a/drivers/net/seeq8005.c b/drivers/net/seeq8005.c index 0d6c95c7aedf..4bce7c4f373c 100644 --- a/drivers/net/seeq8005.c +++ b/drivers/net/seeq8005.c | |||
@@ -550,7 +550,6 @@ static void seeq8005_rx(struct net_device *dev) | |||
550 | lp->stats.rx_dropped++; | 550 | lp->stats.rx_dropped++; |
551 | break; | 551 | break; |
552 | } | 552 | } |
553 | skb->dev = dev; | ||
554 | skb_reserve(skb, 2); /* align data on 16 byte */ | 553 | skb_reserve(skb, 2); /* align data on 16 byte */ |
555 | buf = skb_put(skb,pkt_len); | 554 | buf = skb_put(skb,pkt_len); |
556 | 555 | ||
diff --git a/drivers/net/sgiseeq.c b/drivers/net/sgiseeq.c index 52ed522a234c..d8c9c5d66d4f 100644 --- a/drivers/net/sgiseeq.c +++ b/drivers/net/sgiseeq.c | |||
@@ -318,7 +318,6 @@ static inline void sgiseeq_rx(struct net_device *dev, struct sgiseeq_private *sp | |||
318 | skb = dev_alloc_skb(len + 2); | 318 | skb = dev_alloc_skb(len + 2); |
319 | 319 | ||
320 | if (skb) { | 320 | if (skb) { |
321 | skb->dev = dev; | ||
322 | skb_reserve(skb, 2); | 321 | skb_reserve(skb, 2); |
323 | skb_put(skb, len); | 322 | skb_put(skb, len); |
324 | 323 | ||
@@ -535,7 +534,7 @@ static int sgiseeq_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
535 | * entry and the HPC got to the end of the chain before we | 534 | * entry and the HPC got to the end of the chain before we |
536 | * added this new entry and restarted it. | 535 | * added this new entry and restarted it. |
537 | */ | 536 | */ |
538 | memcpy((char *)(long)td->buf_vaddr, skb->data, skblen); | 537 | skb_copy_from_linear_data(skb, (char *)(long)td->buf_vaddr, skblen); |
539 | if (len != skblen) | 538 | if (len != skblen) |
540 | memset((char *)(long)td->buf_vaddr + skb->len, 0, len-skblen); | 539 | memset((char *)(long)td->buf_vaddr + skb->len, 0, len-skblen); |
541 | td->tdma.cntinfo = (len & HPCDMA_BCNT) | | 540 | td->tdma.cntinfo = (len & HPCDMA_BCNT) | |
diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c index 34463ce6f132..bc8de48da313 100644 --- a/drivers/net/sis190.c +++ b/drivers/net/sis190.c | |||
@@ -632,7 +632,6 @@ static int sis190_rx_interrupt(struct net_device *dev, | |||
632 | pci_action(tp->pci_dev, le32_to_cpu(desc->addr), | 632 | pci_action(tp->pci_dev, le32_to_cpu(desc->addr), |
633 | tp->rx_buf_sz, PCI_DMA_FROMDEVICE); | 633 | tp->rx_buf_sz, PCI_DMA_FROMDEVICE); |
634 | 634 | ||
635 | skb->dev = dev; | ||
636 | skb_put(skb, pkt_size); | 635 | skb_put(skb, pkt_size); |
637 | skb->protocol = eth_type_trans(skb, dev); | 636 | skb->protocol = eth_type_trans(skb, dev); |
638 | 637 | ||
diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c index b3750f284279..dea0126723da 100644 --- a/drivers/net/sis900.c +++ b/drivers/net/sis900.c | |||
@@ -1160,7 +1160,6 @@ sis900_init_rx_ring(struct net_device *net_dev) | |||
1160 | buffer */ | 1160 | buffer */ |
1161 | break; | 1161 | break; |
1162 | } | 1162 | } |
1163 | skb->dev = net_dev; | ||
1164 | sis_priv->rx_skbuff[i] = skb; | 1163 | sis_priv->rx_skbuff[i] = skb; |
1165 | sis_priv->rx_ring[i].cmdsts = RX_BUF_SIZE; | 1164 | sis_priv->rx_ring[i].cmdsts = RX_BUF_SIZE; |
1166 | sis_priv->rx_ring[i].bufptr = pci_map_single(sis_priv->pci_dev, | 1165 | sis_priv->rx_ring[i].bufptr = pci_map_single(sis_priv->pci_dev, |
@@ -1755,6 +1754,24 @@ static int sis900_rx(struct net_device *net_dev) | |||
1755 | } else { | 1754 | } else { |
1756 | struct sk_buff * skb; | 1755 | struct sk_buff * skb; |
1757 | 1756 | ||
1757 | pci_unmap_single(sis_priv->pci_dev, | ||
1758 | sis_priv->rx_ring[entry].bufptr, RX_BUF_SIZE, | ||
1759 | PCI_DMA_FROMDEVICE); | ||
1760 | |||
1761 | /* refill the Rx buffer, what if there is not enought | ||
1762 | * memory for new socket buffer ?? */ | ||
1763 | if ((skb = dev_alloc_skb(RX_BUF_SIZE)) == NULL) { | ||
1764 | /* | ||
1765 | * Not enough memory to refill the buffer | ||
1766 | * so we need to recycle the old one so | ||
1767 | * as to avoid creating a memory hole | ||
1768 | * in the rx ring | ||
1769 | */ | ||
1770 | skb = sis_priv->rx_skbuff[entry]; | ||
1771 | sis_priv->stats.rx_dropped++; | ||
1772 | goto refill_rx_ring; | ||
1773 | } | ||
1774 | |||
1758 | /* This situation should never happen, but due to | 1775 | /* This situation should never happen, but due to |
1759 | some unknow bugs, it is possible that | 1776 | some unknow bugs, it is possible that |
1760 | we are working on NULL sk_buff :-( */ | 1777 | we are working on NULL sk_buff :-( */ |
@@ -1768,9 +1785,6 @@ static int sis900_rx(struct net_device *net_dev) | |||
1768 | break; | 1785 | break; |
1769 | } | 1786 | } |
1770 | 1787 | ||
1771 | pci_unmap_single(sis_priv->pci_dev, | ||
1772 | sis_priv->rx_ring[entry].bufptr, RX_BUF_SIZE, | ||
1773 | PCI_DMA_FROMDEVICE); | ||
1774 | /* give the socket buffer to upper layers */ | 1788 | /* give the socket buffer to upper layers */ |
1775 | skb = sis_priv->rx_skbuff[entry]; | 1789 | skb = sis_priv->rx_skbuff[entry]; |
1776 | skb_put(skb, rx_size); | 1790 | skb_put(skb, rx_size); |
@@ -1783,33 +1797,13 @@ static int sis900_rx(struct net_device *net_dev) | |||
1783 | net_dev->last_rx = jiffies; | 1797 | net_dev->last_rx = jiffies; |
1784 | sis_priv->stats.rx_bytes += rx_size; | 1798 | sis_priv->stats.rx_bytes += rx_size; |
1785 | sis_priv->stats.rx_packets++; | 1799 | sis_priv->stats.rx_packets++; |
1786 | 1800 | sis_priv->dirty_rx++; | |
1787 | /* refill the Rx buffer, what if there is not enought | 1801 | refill_rx_ring: |
1788 | * memory for new socket buffer ?? */ | ||
1789 | if ((skb = dev_alloc_skb(RX_BUF_SIZE)) == NULL) { | ||
1790 | /* not enough memory for skbuff, this makes a | ||
1791 | * "hole" on the buffer ring, it is not clear | ||
1792 | * how the hardware will react to this kind | ||
1793 | * of degenerated buffer */ | ||
1794 | if (netif_msg_rx_status(sis_priv)) | ||
1795 | printk(KERN_INFO "%s: Memory squeeze," | ||
1796 | "deferring packet.\n", | ||
1797 | net_dev->name); | ||
1798 | sis_priv->rx_skbuff[entry] = NULL; | ||
1799 | /* reset buffer descriptor state */ | ||
1800 | sis_priv->rx_ring[entry].cmdsts = 0; | ||
1801 | sis_priv->rx_ring[entry].bufptr = 0; | ||
1802 | sis_priv->stats.rx_dropped++; | ||
1803 | sis_priv->cur_rx++; | ||
1804 | break; | ||
1805 | } | ||
1806 | skb->dev = net_dev; | ||
1807 | sis_priv->rx_skbuff[entry] = skb; | 1802 | sis_priv->rx_skbuff[entry] = skb; |
1808 | sis_priv->rx_ring[entry].cmdsts = RX_BUF_SIZE; | 1803 | sis_priv->rx_ring[entry].cmdsts = RX_BUF_SIZE; |
1809 | sis_priv->rx_ring[entry].bufptr = | 1804 | sis_priv->rx_ring[entry].bufptr = |
1810 | pci_map_single(sis_priv->pci_dev, skb->data, | 1805 | pci_map_single(sis_priv->pci_dev, skb->data, |
1811 | RX_BUF_SIZE, PCI_DMA_FROMDEVICE); | 1806 | RX_BUF_SIZE, PCI_DMA_FROMDEVICE); |
1812 | sis_priv->dirty_rx++; | ||
1813 | } | 1807 | } |
1814 | sis_priv->cur_rx++; | 1808 | sis_priv->cur_rx++; |
1815 | entry = sis_priv->cur_rx % NUM_RX_DESC; | 1809 | entry = sis_priv->cur_rx % NUM_RX_DESC; |
@@ -1836,7 +1830,6 @@ static int sis900_rx(struct net_device *net_dev) | |||
1836 | sis_priv->stats.rx_dropped++; | 1830 | sis_priv->stats.rx_dropped++; |
1837 | break; | 1831 | break; |
1838 | } | 1832 | } |
1839 | skb->dev = net_dev; | ||
1840 | sis_priv->rx_skbuff[entry] = skb; | 1833 | sis_priv->rx_skbuff[entry] = skb; |
1841 | sis_priv->rx_ring[entry].cmdsts = RX_BUF_SIZE; | 1834 | sis_priv->rx_ring[entry].cmdsts = RX_BUF_SIZE; |
1842 | sis_priv->rx_ring[entry].bufptr = | 1835 | sis_priv->rx_ring[entry].bufptr = |
diff --git a/drivers/net/sk98lin/skge.c b/drivers/net/sk98lin/skge.c index e94ab256b540..e0a93005e6dc 100644 --- a/drivers/net/sk98lin/skge.c +++ b/drivers/net/sk98lin/skge.c | |||
@@ -1562,10 +1562,10 @@ struct sk_buff *pMessage) /* pointer to send-message */ | |||
1562 | pTxd->pMBuf = pMessage; | 1562 | pTxd->pMBuf = pMessage; |
1563 | 1563 | ||
1564 | if (pMessage->ip_summed == CHECKSUM_PARTIAL) { | 1564 | if (pMessage->ip_summed == CHECKSUM_PARTIAL) { |
1565 | u16 hdrlen = pMessage->h.raw - pMessage->data; | 1565 | u16 hdrlen = skb_transport_offset(pMessage); |
1566 | u16 offset = hdrlen + pMessage->csum_offset; | 1566 | u16 offset = hdrlen + pMessage->csum_offset; |
1567 | 1567 | ||
1568 | if ((pMessage->h.ipiph->protocol == IPPROTO_UDP ) && | 1568 | if ((ipip_hdr(pMessage)->protocol == IPPROTO_UDP) && |
1569 | (pAC->GIni.GIChipRev == 0) && | 1569 | (pAC->GIni.GIChipRev == 0) && |
1570 | (pAC->GIni.GIChipId == CHIP_ID_YUKON)) { | 1570 | (pAC->GIni.GIChipId == CHIP_ID_YUKON)) { |
1571 | pTxd->TBControl = BMU_TCP_CHECK; | 1571 | pTxd->TBControl = BMU_TCP_CHECK; |
@@ -1681,7 +1681,7 @@ struct sk_buff *pMessage) /* pointer to send-message */ | |||
1681 | ** Does the HW need to evaluate checksum for TCP or UDP packets? | 1681 | ** Does the HW need to evaluate checksum for TCP or UDP packets? |
1682 | */ | 1682 | */ |
1683 | if (pMessage->ip_summed == CHECKSUM_PARTIAL) { | 1683 | if (pMessage->ip_summed == CHECKSUM_PARTIAL) { |
1684 | u16 hdrlen = pMessage->h.raw - pMessage->data; | 1684 | u16 hdrlen = skb_transport_offset(pMessage); |
1685 | u16 offset = hdrlen + pMessage->csum_offset; | 1685 | u16 offset = hdrlen + pMessage->csum_offset; |
1686 | 1686 | ||
1687 | Control = BMU_STFWD; | 1687 | Control = BMU_STFWD; |
@@ -1691,7 +1691,7 @@ struct sk_buff *pMessage) /* pointer to send-message */ | |||
1691 | ** opcode for udp is not working in the hardware yet | 1691 | ** opcode for udp is not working in the hardware yet |
1692 | ** (Revision 2.0) | 1692 | ** (Revision 2.0) |
1693 | */ | 1693 | */ |
1694 | if ((pMessage->h.ipiph->protocol == IPPROTO_UDP ) && | 1694 | if ((ipip_hdr(pMessage)->protocol == IPPROTO_UDP) && |
1695 | (pAC->GIni.GIChipRev == 0) && | 1695 | (pAC->GIni.GIChipRev == 0) && |
1696 | (pAC->GIni.GIChipId == CHIP_ID_YUKON)) { | 1696 | (pAC->GIni.GIChipId == CHIP_ID_YUKON)) { |
1697 | Control |= BMU_TCP_CHECK; | 1697 | Control |= BMU_TCP_CHECK; |
@@ -2127,7 +2127,7 @@ rx_start: | |||
2127 | (dma_addr_t) PhysAddr, | 2127 | (dma_addr_t) PhysAddr, |
2128 | FrameLength, | 2128 | FrameLength, |
2129 | PCI_DMA_FROMDEVICE); | 2129 | PCI_DMA_FROMDEVICE); |
2130 | memcpy(pNewMsg->data, pMsg, FrameLength); | 2130 | skb_copy_to_linear_data(pNewMsg, pMsg, FrameLength); |
2131 | 2131 | ||
2132 | pci_dma_sync_single_for_device(pAC->PciDev, | 2132 | pci_dma_sync_single_for_device(pAC->PciDev, |
2133 | (dma_addr_t) PhysAddr, | 2133 | (dma_addr_t) PhysAddr, |
@@ -2193,7 +2193,6 @@ rx_start: | |||
2193 | SK_PNMI_CNT_RX_OCTETS_DELIVERED(pAC, | 2193 | SK_PNMI_CNT_RX_OCTETS_DELIVERED(pAC, |
2194 | FrameLength, pRxPort->PortIndex); | 2194 | FrameLength, pRxPort->PortIndex); |
2195 | 2195 | ||
2196 | pMsg->dev = pAC->dev[pRxPort->PortIndex]; | ||
2197 | pMsg->protocol = eth_type_trans(pMsg, | 2196 | pMsg->protocol = eth_type_trans(pMsg, |
2198 | pAC->dev[pRxPort->PortIndex]); | 2197 | pAC->dev[pRxPort->PortIndex]); |
2199 | netif_rx(pMsg); | 2198 | netif_rx(pMsg); |
@@ -2246,7 +2245,6 @@ rx_start: | |||
2246 | (IFF_PROMISC | IFF_ALLMULTI)) != 0 || | 2245 | (IFF_PROMISC | IFF_ALLMULTI)) != 0 || |
2247 | (ForRlmt & SK_RLMT_RX_PROTOCOL) == | 2246 | (ForRlmt & SK_RLMT_RX_PROTOCOL) == |
2248 | SK_RLMT_RX_PROTOCOL) { | 2247 | SK_RLMT_RX_PROTOCOL) { |
2249 | pMsg->dev = pAC->dev[pRxPort->PortIndex]; | ||
2250 | pMsg->protocol = eth_type_trans(pMsg, | 2248 | pMsg->protocol = eth_type_trans(pMsg, |
2251 | pAC->dev[pRxPort->PortIndex]); | 2249 | pAC->dev[pRxPort->PortIndex]); |
2252 | netif_rx(pMsg); | 2250 | netif_rx(pMsg); |
diff --git a/drivers/net/skfp/skfddi.c b/drivers/net/skfp/skfddi.c index 9733a11c6146..a7ef6c8b7721 100644 --- a/drivers/net/skfp/skfddi.c +++ b/drivers/net/skfp/skfddi.c | |||
@@ -1680,7 +1680,6 @@ void mac_drv_rx_complete(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd, | |||
1680 | rxd->rxd_os.skb = NULL; | 1680 | rxd->rxd_os.skb = NULL; |
1681 | skb_trim(skb, len); | 1681 | skb_trim(skb, len); |
1682 | skb->protocol = fddi_type_trans(skb, bp->dev); | 1682 | skb->protocol = fddi_type_trans(skb, bp->dev); |
1683 | skb->dev = bp->dev; /* pass up device pointer */ | ||
1684 | 1683 | ||
1685 | netif_rx(skb); | 1684 | netif_rx(skb); |
1686 | bp->dev->last_rx = jiffies; | 1685 | bp->dev->last_rx = jiffies; |
@@ -1938,7 +1937,7 @@ int mac_drv_rx_init(struct s_smc *smc, int len, int fc, | |||
1938 | } | 1937 | } |
1939 | skb_reserve(skb, 3); | 1938 | skb_reserve(skb, 3); |
1940 | skb_put(skb, len); | 1939 | skb_put(skb, len); |
1941 | memcpy(skb->data, look_ahead, len); | 1940 | skb_copy_to_linear_data(skb, look_ahead, len); |
1942 | 1941 | ||
1943 | // deliver frame to system | 1942 | // deliver frame to system |
1944 | skb->protocol = fddi_type_trans(skb, smc->os.dev); | 1943 | skb->protocol = fddi_type_trans(skb, smc->os.dev); |
diff --git a/drivers/net/skge.c b/drivers/net/skge.c index d476a3cc2e94..f1a0e6c0fbdd 100644 --- a/drivers/net/skge.c +++ b/drivers/net/skge.c | |||
@@ -2654,12 +2654,12 @@ static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev) | |||
2654 | td->dma_hi = map >> 32; | 2654 | td->dma_hi = map >> 32; |
2655 | 2655 | ||
2656 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | 2656 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
2657 | int offset = skb->h.raw - skb->data; | 2657 | const int offset = skb_transport_offset(skb); |
2658 | 2658 | ||
2659 | /* This seems backwards, but it is what the sk98lin | 2659 | /* This seems backwards, but it is what the sk98lin |
2660 | * does. Looks like hardware is wrong? | 2660 | * does. Looks like hardware is wrong? |
2661 | */ | 2661 | */ |
2662 | if (skb->h.ipiph->protocol == IPPROTO_UDP | 2662 | if (ipip_hdr(skb)->protocol == IPPROTO_UDP |
2663 | && hw->chip_rev == 0 && hw->chip_id == CHIP_ID_YUKON) | 2663 | && hw->chip_rev == 0 && hw->chip_id == CHIP_ID_YUKON) |
2664 | control = BMU_TCP_CHECK; | 2664 | control = BMU_TCP_CHECK; |
2665 | else | 2665 | else |
@@ -2950,7 +2950,7 @@ static struct sk_buff *skge_rx_get(struct net_device *dev, | |||
2950 | pci_dma_sync_single_for_cpu(skge->hw->pdev, | 2950 | pci_dma_sync_single_for_cpu(skge->hw->pdev, |
2951 | pci_unmap_addr(e, mapaddr), | 2951 | pci_unmap_addr(e, mapaddr), |
2952 | len, PCI_DMA_FROMDEVICE); | 2952 | len, PCI_DMA_FROMDEVICE); |
2953 | memcpy(skb->data, e->skb->data, len); | 2953 | skb_copy_from_linear_data(e->skb, skb->data, len); |
2954 | pci_dma_sync_single_for_device(skge->hw->pdev, | 2954 | pci_dma_sync_single_for_device(skge->hw->pdev, |
2955 | pci_unmap_addr(e, mapaddr), | 2955 | pci_unmap_addr(e, mapaddr), |
2956 | len, PCI_DMA_FROMDEVICE); | 2956 | len, PCI_DMA_FROMDEVICE); |
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c index 4a009b7b1777..238c2ca34da6 100644 --- a/drivers/net/sky2.c +++ b/drivers/net/sky2.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #include <linux/ethtool.h> | 32 | #include <linux/ethtool.h> |
33 | #include <linux/pci.h> | 33 | #include <linux/pci.h> |
34 | #include <linux/ip.h> | 34 | #include <linux/ip.h> |
35 | #include <net/ip.h> | ||
35 | #include <linux/tcp.h> | 36 | #include <linux/tcp.h> |
36 | #include <linux/in.h> | 37 | #include <linux/in.h> |
37 | #include <linux/delay.h> | 38 | #include <linux/delay.h> |
@@ -49,7 +50,7 @@ | |||
49 | #include "sky2.h" | 50 | #include "sky2.h" |
50 | 51 | ||
51 | #define DRV_NAME "sky2" | 52 | #define DRV_NAME "sky2" |
52 | #define DRV_VERSION "1.13" | 53 | #define DRV_VERSION "1.14" |
53 | #define PFX DRV_NAME " " | 54 | #define PFX DRV_NAME " " |
54 | 55 | ||
55 | /* | 56 | /* |
@@ -123,7 +124,10 @@ static const struct pci_device_id sky2_id_table[] = { | |||
123 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4361) }, /* 88E8050 */ | 124 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4361) }, /* 88E8050 */ |
124 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4362) }, /* 88E8053 */ | 125 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4362) }, /* 88E8053 */ |
125 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4363) }, /* 88E8055 */ | 126 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4363) }, /* 88E8055 */ |
127 | #ifdef broken | ||
128 | /* This device causes data corruption problems that are not resolved */ | ||
126 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4364) }, /* 88E8056 */ | 129 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4364) }, /* 88E8056 */ |
130 | #endif | ||
127 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4366) }, /* 88EC036 */ | 131 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4366) }, /* 88EC036 */ |
128 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4367) }, /* 88EC032 */ | 132 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4367) }, /* 88EC032 */ |
129 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4368) }, /* 88EC034 */ | 133 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4368) }, /* 88EC034 */ |
@@ -740,12 +744,17 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port) | |||
740 | if (hw->chip_id == CHIP_ID_YUKON_EC_U || hw->chip_id == CHIP_ID_YUKON_EX) { | 744 | if (hw->chip_id == CHIP_ID_YUKON_EC_U || hw->chip_id == CHIP_ID_YUKON_EX) { |
741 | sky2_write8(hw, SK_REG(port, RX_GMF_LP_THR), 768/8); | 745 | sky2_write8(hw, SK_REG(port, RX_GMF_LP_THR), 768/8); |
742 | sky2_write8(hw, SK_REG(port, RX_GMF_UP_THR), 1024/8); | 746 | sky2_write8(hw, SK_REG(port, RX_GMF_UP_THR), 1024/8); |
743 | if (hw->dev[port]->mtu > ETH_DATA_LEN) { | 747 | |
744 | /* set Tx GMAC FIFO Almost Empty Threshold */ | 748 | /* set Tx GMAC FIFO Almost Empty Threshold */ |
745 | sky2_write32(hw, SK_REG(port, TX_GMF_AE_THR), 0x180); | 749 | sky2_write32(hw, SK_REG(port, TX_GMF_AE_THR), |
746 | /* Disable Store & Forward mode for TX */ | 750 | (ECU_JUMBO_WM << 16) | ECU_AE_THR); |
747 | sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_STFW_DIS); | 751 | |
748 | } | 752 | if (hw->dev[port]->mtu > ETH_DATA_LEN) |
753 | sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), | ||
754 | TX_JUMBO_ENA | TX_STFW_DIS); | ||
755 | else | ||
756 | sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), | ||
757 | TX_JUMBO_DIS | TX_STFW_ENA); | ||
749 | } | 758 | } |
750 | 759 | ||
751 | } | 760 | } |
@@ -1278,7 +1287,7 @@ static int sky2_up(struct net_device *dev) | |||
1278 | /* Set almost empty threshold */ | 1287 | /* Set almost empty threshold */ |
1279 | if (hw->chip_id == CHIP_ID_YUKON_EC_U | 1288 | if (hw->chip_id == CHIP_ID_YUKON_EC_U |
1280 | && hw->chip_rev == CHIP_REV_YU_EC_U_A0) | 1289 | && hw->chip_rev == CHIP_REV_YU_EC_U_A0) |
1281 | sky2_write16(hw, Q_ADDR(txqaddr[port], Q_AL), 0x1a0); | 1290 | sky2_write16(hw, Q_ADDR(txqaddr[port], Q_AL), ECU_TXFF_LEV); |
1282 | 1291 | ||
1283 | sky2_prefetch_init(hw, txqaddr[port], sky2->tx_le_map, | 1292 | sky2_prefetch_init(hw, txqaddr[port], sky2->tx_le_map, |
1284 | TX_RING_SIZE - 1); | 1293 | TX_RING_SIZE - 1); |
@@ -1383,8 +1392,8 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev) | |||
1383 | /* Check for TCP Segmentation Offload */ | 1392 | /* Check for TCP Segmentation Offload */ |
1384 | mss = skb_shinfo(skb)->gso_size; | 1393 | mss = skb_shinfo(skb)->gso_size; |
1385 | if (mss != 0) { | 1394 | if (mss != 0) { |
1386 | mss += ((skb->h.th->doff - 5) * 4); /* TCP options */ | 1395 | mss += tcp_optlen(skb); /* TCP options */ |
1387 | mss += (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr); | 1396 | mss += ip_hdrlen(skb) + sizeof(struct tcphdr); |
1388 | mss += ETH_HLEN; | 1397 | mss += ETH_HLEN; |
1389 | 1398 | ||
1390 | if (mss != sky2->tx_last_mss) { | 1399 | if (mss != sky2->tx_last_mss) { |
@@ -1412,14 +1421,14 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev) | |||
1412 | 1421 | ||
1413 | /* Handle TCP checksum offload */ | 1422 | /* Handle TCP checksum offload */ |
1414 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | 1423 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
1415 | unsigned offset = skb->h.raw - skb->data; | 1424 | const unsigned offset = skb_transport_offset(skb); |
1416 | u32 tcpsum; | 1425 | u32 tcpsum; |
1417 | 1426 | ||
1418 | tcpsum = offset << 16; /* sum start */ | 1427 | tcpsum = offset << 16; /* sum start */ |
1419 | tcpsum |= offset + skb->csum_offset; /* sum write */ | 1428 | tcpsum |= offset + skb->csum_offset; /* sum write */ |
1420 | 1429 | ||
1421 | ctrl = CALSUM | WR_SUM | INIT_SUM | LOCK_SUM; | 1430 | ctrl = CALSUM | WR_SUM | INIT_SUM | LOCK_SUM; |
1422 | if (skb->nh.iph->protocol == IPPROTO_UDP) | 1431 | if (ip_hdr(skb)->protocol == IPPROTO_UDP) |
1423 | ctrl |= UDPTCP; | 1432 | ctrl |= UDPTCP; |
1424 | 1433 | ||
1425 | if (tcpsum != sky2->tx_tcpsum) { | 1434 | if (tcpsum != sky2->tx_tcpsum) { |
@@ -1584,13 +1593,6 @@ static int sky2_down(struct net_device *dev) | |||
1584 | sky2_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL), | 1593 | sky2_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL), |
1585 | RB_RST_SET | RB_DIS_OP_MD); | 1594 | RB_RST_SET | RB_DIS_OP_MD); |
1586 | 1595 | ||
1587 | /* WA for dev. #4.209 */ | ||
1588 | if (hw->chip_id == CHIP_ID_YUKON_EC_U | ||
1589 | && (hw->chip_rev == CHIP_REV_YU_EC_U_A1 || hw->chip_rev == CHIP_REV_YU_EC_U_B0)) | ||
1590 | sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), | ||
1591 | sky2->speed != SPEED_1000 ? | ||
1592 | TX_STFW_ENA : TX_STFW_DIS); | ||
1593 | |||
1594 | ctrl = gma_read16(hw, port, GM_GP_CTRL); | 1596 | ctrl = gma_read16(hw, port, GM_GP_CTRL); |
1595 | ctrl &= ~(GM_GPCR_TX_ENA | GM_GPCR_RX_ENA); | 1597 | ctrl &= ~(GM_GPCR_TX_ENA | GM_GPCR_RX_ENA); |
1596 | gma_write16(hw, port, GM_GP_CTRL, ctrl); | 1598 | gma_write16(hw, port, GM_GP_CTRL, ctrl); |
@@ -1890,6 +1892,7 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu) | |||
1890 | { | 1892 | { |
1891 | struct sky2_port *sky2 = netdev_priv(dev); | 1893 | struct sky2_port *sky2 = netdev_priv(dev); |
1892 | struct sky2_hw *hw = sky2->hw; | 1894 | struct sky2_hw *hw = sky2->hw; |
1895 | unsigned port = sky2->port; | ||
1893 | int err; | 1896 | int err; |
1894 | u16 ctl, mode; | 1897 | u16 ctl, mode; |
1895 | u32 imask; | 1898 | u32 imask; |
@@ -1897,9 +1900,8 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu) | |||
1897 | if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU) | 1900 | if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU) |
1898 | return -EINVAL; | 1901 | return -EINVAL; |
1899 | 1902 | ||
1900 | /* TSO on Yukon Ultra and MTU > 1500 not supported */ | 1903 | if (new_mtu > ETH_DATA_LEN && hw->chip_id == CHIP_ID_YUKON_FE) |
1901 | if (hw->chip_id == CHIP_ID_YUKON_EC_U && new_mtu > ETH_DATA_LEN) | 1904 | return -EINVAL; |
1902 | dev->features &= ~NETIF_F_TSO; | ||
1903 | 1905 | ||
1904 | if (!netif_running(dev)) { | 1906 | if (!netif_running(dev)) { |
1905 | dev->mtu = new_mtu; | 1907 | dev->mtu = new_mtu; |
@@ -1915,8 +1917,18 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu) | |||
1915 | 1917 | ||
1916 | synchronize_irq(hw->pdev->irq); | 1918 | synchronize_irq(hw->pdev->irq); |
1917 | 1919 | ||
1918 | ctl = gma_read16(hw, sky2->port, GM_GP_CTRL); | 1920 | if (hw->chip_id == CHIP_ID_YUKON_EC_U || hw->chip_id == CHIP_ID_YUKON_EX) { |
1919 | gma_write16(hw, sky2->port, GM_GP_CTRL, ctl & ~GM_GPCR_RX_ENA); | 1921 | if (new_mtu > ETH_DATA_LEN) { |
1922 | sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), | ||
1923 | TX_JUMBO_ENA | TX_STFW_DIS); | ||
1924 | dev->features &= NETIF_F_TSO | NETIF_F_SG | NETIF_F_IP_CSUM; | ||
1925 | } else | ||
1926 | sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), | ||
1927 | TX_JUMBO_DIS | TX_STFW_ENA); | ||
1928 | } | ||
1929 | |||
1930 | ctl = gma_read16(hw, port, GM_GP_CTRL); | ||
1931 | gma_write16(hw, port, GM_GP_CTRL, ctl & ~GM_GPCR_RX_ENA); | ||
1920 | sky2_rx_stop(sky2); | 1932 | sky2_rx_stop(sky2); |
1921 | sky2_rx_clean(sky2); | 1933 | sky2_rx_clean(sky2); |
1922 | 1934 | ||
@@ -1928,9 +1940,9 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu) | |||
1928 | if (dev->mtu > ETH_DATA_LEN) | 1940 | if (dev->mtu > ETH_DATA_LEN) |
1929 | mode |= GM_SMOD_JUMBO_ENA; | 1941 | mode |= GM_SMOD_JUMBO_ENA; |
1930 | 1942 | ||
1931 | gma_write16(hw, sky2->port, GM_SERIAL_MODE, mode); | 1943 | gma_write16(hw, port, GM_SERIAL_MODE, mode); |
1932 | 1944 | ||
1933 | sky2_write8(hw, RB_ADDR(rxqaddr[sky2->port], RB_CTRL), RB_ENA_OP_MD); | 1945 | sky2_write8(hw, RB_ADDR(rxqaddr[port], RB_CTRL), RB_ENA_OP_MD); |
1934 | 1946 | ||
1935 | err = sky2_rx_start(sky2); | 1947 | err = sky2_rx_start(sky2); |
1936 | sky2_write32(hw, B0_IMSK, imask); | 1948 | sky2_write32(hw, B0_IMSK, imask); |
@@ -1938,7 +1950,7 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu) | |||
1938 | if (err) | 1950 | if (err) |
1939 | dev_close(dev); | 1951 | dev_close(dev); |
1940 | else { | 1952 | else { |
1941 | gma_write16(hw, sky2->port, GM_GP_CTRL, ctl); | 1953 | gma_write16(hw, port, GM_GP_CTRL, ctl); |
1942 | 1954 | ||
1943 | netif_poll_enable(hw->dev[0]); | 1955 | netif_poll_enable(hw->dev[0]); |
1944 | netif_wake_queue(dev); | 1956 | netif_wake_queue(dev); |
@@ -1959,7 +1971,7 @@ static struct sk_buff *receive_copy(struct sky2_port *sky2, | |||
1959 | skb_reserve(skb, 2); | 1971 | skb_reserve(skb, 2); |
1960 | pci_dma_sync_single_for_cpu(sky2->hw->pdev, re->data_addr, | 1972 | pci_dma_sync_single_for_cpu(sky2->hw->pdev, re->data_addr, |
1961 | length, PCI_DMA_FROMDEVICE); | 1973 | length, PCI_DMA_FROMDEVICE); |
1962 | memcpy(skb->data, re->skb->data, length); | 1974 | skb_copy_from_linear_data(re->skb, skb->data, length); |
1963 | skb->ip_summed = re->skb->ip_summed; | 1975 | skb->ip_summed = re->skb->ip_summed; |
1964 | skb->csum = re->skb->csum; | 1976 | skb->csum = re->skb->csum; |
1965 | pci_dma_sync_single_for_device(sky2->hw->pdev, re->data_addr, | 1977 | pci_dma_sync_single_for_device(sky2->hw->pdev, re->data_addr, |
@@ -2340,26 +2352,22 @@ static void sky2_mac_intr(struct sky2_hw *hw, unsigned port) | |||
2340 | } | 2352 | } |
2341 | } | 2353 | } |
2342 | 2354 | ||
2343 | /* This should never happen it is a fatal situation */ | 2355 | /* This should never happen it is a bug. */ |
2344 | static void sky2_descriptor_error(struct sky2_hw *hw, unsigned port, | 2356 | static void sky2_le_error(struct sky2_hw *hw, unsigned port, |
2345 | const char *rxtx, u32 mask) | 2357 | u16 q, unsigned ring_size) |
2346 | { | 2358 | { |
2347 | struct net_device *dev = hw->dev[port]; | 2359 | struct net_device *dev = hw->dev[port]; |
2348 | struct sky2_port *sky2 = netdev_priv(dev); | 2360 | struct sky2_port *sky2 = netdev_priv(dev); |
2349 | u32 imask; | 2361 | unsigned idx; |
2350 | 2362 | const u64 *le = (q == Q_R1 || q == Q_R2) | |
2351 | printk(KERN_ERR PFX "%s: %s descriptor error (hardware problem)\n", | 2363 | ? (u64 *) sky2->rx_le : (u64 *) sky2->tx_le; |
2352 | dev ? dev->name : "<not registered>", rxtx); | ||
2353 | 2364 | ||
2354 | imask = sky2_read32(hw, B0_IMSK); | 2365 | idx = sky2_read16(hw, Y2_QADDR(q, PREF_UNIT_GET_IDX)); |
2355 | imask &= ~mask; | 2366 | printk(KERN_ERR PFX "%s: descriptor error q=%#x get=%u [%llx] put=%u\n", |
2356 | sky2_write32(hw, B0_IMSK, imask); | 2367 | dev->name, (unsigned) q, idx, (unsigned long long) le[idx], |
2368 | (unsigned) sky2_read16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX))); | ||
2357 | 2369 | ||
2358 | if (dev) { | 2370 | sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_CLR_IRQ_CHK); |
2359 | spin_lock(&sky2->phy_lock); | ||
2360 | sky2_link_down(sky2); | ||
2361 | spin_unlock(&sky2->phy_lock); | ||
2362 | } | ||
2363 | } | 2371 | } |
2364 | 2372 | ||
2365 | /* If idle then force a fake soft NAPI poll once a second | 2373 | /* If idle then force a fake soft NAPI poll once a second |
@@ -2383,23 +2391,15 @@ static void sky2_idle(unsigned long arg) | |||
2383 | mod_timer(&hw->idle_timer, jiffies + msecs_to_jiffies(idle_timeout)); | 2391 | mod_timer(&hw->idle_timer, jiffies + msecs_to_jiffies(idle_timeout)); |
2384 | } | 2392 | } |
2385 | 2393 | ||
2386 | 2394 | /* Hardware/software error handling */ | |
2387 | static int sky2_poll(struct net_device *dev0, int *budget) | 2395 | static void sky2_err_intr(struct sky2_hw *hw, u32 status) |
2388 | { | 2396 | { |
2389 | struct sky2_hw *hw = ((struct sky2_port *) netdev_priv(dev0))->hw; | 2397 | if (net_ratelimit()) |
2390 | int work_limit = min(dev0->quota, *budget); | 2398 | dev_warn(&hw->pdev->dev, "error interrupt status=%#x\n", status); |
2391 | int work_done = 0; | ||
2392 | u32 status = sky2_read32(hw, B0_Y2_SP_EISR); | ||
2393 | 2399 | ||
2394 | if (status & Y2_IS_HW_ERR) | 2400 | if (status & Y2_IS_HW_ERR) |
2395 | sky2_hw_intr(hw); | 2401 | sky2_hw_intr(hw); |
2396 | 2402 | ||
2397 | if (status & Y2_IS_IRQ_PHY1) | ||
2398 | sky2_phy_intr(hw, 0); | ||
2399 | |||
2400 | if (status & Y2_IS_IRQ_PHY2) | ||
2401 | sky2_phy_intr(hw, 1); | ||
2402 | |||
2403 | if (status & Y2_IS_IRQ_MAC1) | 2403 | if (status & Y2_IS_IRQ_MAC1) |
2404 | sky2_mac_intr(hw, 0); | 2404 | sky2_mac_intr(hw, 0); |
2405 | 2405 | ||
@@ -2407,16 +2407,33 @@ static int sky2_poll(struct net_device *dev0, int *budget) | |||
2407 | sky2_mac_intr(hw, 1); | 2407 | sky2_mac_intr(hw, 1); |
2408 | 2408 | ||
2409 | if (status & Y2_IS_CHK_RX1) | 2409 | if (status & Y2_IS_CHK_RX1) |
2410 | sky2_descriptor_error(hw, 0, "receive", Y2_IS_CHK_RX1); | 2410 | sky2_le_error(hw, 0, Q_R1, RX_LE_SIZE); |
2411 | 2411 | ||
2412 | if (status & Y2_IS_CHK_RX2) | 2412 | if (status & Y2_IS_CHK_RX2) |
2413 | sky2_descriptor_error(hw, 1, "receive", Y2_IS_CHK_RX2); | 2413 | sky2_le_error(hw, 1, Q_R2, RX_LE_SIZE); |
2414 | 2414 | ||
2415 | if (status & Y2_IS_CHK_TXA1) | 2415 | if (status & Y2_IS_CHK_TXA1) |
2416 | sky2_descriptor_error(hw, 0, "transmit", Y2_IS_CHK_TXA1); | 2416 | sky2_le_error(hw, 0, Q_XA1, TX_RING_SIZE); |
2417 | 2417 | ||
2418 | if (status & Y2_IS_CHK_TXA2) | 2418 | if (status & Y2_IS_CHK_TXA2) |
2419 | sky2_descriptor_error(hw, 1, "transmit", Y2_IS_CHK_TXA2); | 2419 | sky2_le_error(hw, 1, Q_XA2, TX_RING_SIZE); |
2420 | } | ||
2421 | |||
2422 | static int sky2_poll(struct net_device *dev0, int *budget) | ||
2423 | { | ||
2424 | struct sky2_hw *hw = ((struct sky2_port *) netdev_priv(dev0))->hw; | ||
2425 | int work_limit = min(dev0->quota, *budget); | ||
2426 | int work_done = 0; | ||
2427 | u32 status = sky2_read32(hw, B0_Y2_SP_EISR); | ||
2428 | |||
2429 | if (unlikely(status & Y2_IS_ERROR)) | ||
2430 | sky2_err_intr(hw, status); | ||
2431 | |||
2432 | if (status & Y2_IS_IRQ_PHY1) | ||
2433 | sky2_phy_intr(hw, 0); | ||
2434 | |||
2435 | if (status & Y2_IS_IRQ_PHY2) | ||
2436 | sky2_phy_intr(hw, 1); | ||
2420 | 2437 | ||
2421 | work_done = sky2_status_intr(hw, work_limit); | 2438 | work_done = sky2_status_intr(hw, work_limit); |
2422 | if (work_done < work_limit) { | 2439 | if (work_done < work_limit) { |
@@ -2534,16 +2551,14 @@ static void sky2_reset(struct sky2_hw *hw) | |||
2534 | int i; | 2551 | int i; |
2535 | 2552 | ||
2536 | /* disable ASF */ | 2553 | /* disable ASF */ |
2537 | if (hw->chip_id <= CHIP_ID_YUKON_EC) { | 2554 | if (hw->chip_id == CHIP_ID_YUKON_EX) { |
2538 | if (hw->chip_id == CHIP_ID_YUKON_EX) { | 2555 | status = sky2_read16(hw, HCU_CCSR); |
2539 | status = sky2_read16(hw, HCU_CCSR); | 2556 | status &= ~(HCU_CCSR_AHB_RST | HCU_CCSR_CPU_RST_MODE | |
2540 | status &= ~(HCU_CCSR_AHB_RST | HCU_CCSR_CPU_RST_MODE | | 2557 | HCU_CCSR_UC_STATE_MSK); |
2541 | HCU_CCSR_UC_STATE_MSK); | 2558 | sky2_write16(hw, HCU_CCSR, status); |
2542 | sky2_write16(hw, HCU_CCSR, status); | 2559 | } else |
2543 | } else | 2560 | sky2_write8(hw, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET); |
2544 | sky2_write8(hw, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET); | 2561 | sky2_write16(hw, B0_CTST, Y2_ASF_DISABLE); |
2545 | sky2_write16(hw, B0_CTST, Y2_ASF_DISABLE); | ||
2546 | } | ||
2547 | 2562 | ||
2548 | /* do a SW reset */ | 2563 | /* do a SW reset */ |
2549 | sky2_write8(hw, B0_CTST, CS_RST_SET); | 2564 | sky2_write8(hw, B0_CTST, CS_RST_SET); |
@@ -3328,6 +3343,36 @@ static void sky2_get_regs(struct net_device *dev, struct ethtool_regs *regs, | |||
3328 | regs->len - B3_RI_WTO_R1); | 3343 | regs->len - B3_RI_WTO_R1); |
3329 | } | 3344 | } |
3330 | 3345 | ||
3346 | /* In order to do Jumbo packets on these chips, need to turn off the | ||
3347 | * transmit store/forward. Therefore checksum offload won't work. | ||
3348 | */ | ||
3349 | static int no_tx_offload(struct net_device *dev) | ||
3350 | { | ||
3351 | const struct sky2_port *sky2 = netdev_priv(dev); | ||
3352 | const struct sky2_hw *hw = sky2->hw; | ||
3353 | |||
3354 | return dev->mtu > ETH_DATA_LEN && | ||
3355 | (hw->chip_id == CHIP_ID_YUKON_EX | ||
3356 | || hw->chip_id == CHIP_ID_YUKON_EC_U); | ||
3357 | } | ||
3358 | |||
3359 | static int sky2_set_tx_csum(struct net_device *dev, u32 data) | ||
3360 | { | ||
3361 | if (data && no_tx_offload(dev)) | ||
3362 | return -EINVAL; | ||
3363 | |||
3364 | return ethtool_op_set_tx_csum(dev, data); | ||
3365 | } | ||
3366 | |||
3367 | |||
3368 | static int sky2_set_tso(struct net_device *dev, u32 data) | ||
3369 | { | ||
3370 | if (data && no_tx_offload(dev)) | ||
3371 | return -EINVAL; | ||
3372 | |||
3373 | return ethtool_op_set_tso(dev, data); | ||
3374 | } | ||
3375 | |||
3331 | static const struct ethtool_ops sky2_ethtool_ops = { | 3376 | static const struct ethtool_ops sky2_ethtool_ops = { |
3332 | .get_settings = sky2_get_settings, | 3377 | .get_settings = sky2_get_settings, |
3333 | .set_settings = sky2_set_settings, | 3378 | .set_settings = sky2_set_settings, |
@@ -3343,9 +3388,9 @@ static const struct ethtool_ops sky2_ethtool_ops = { | |||
3343 | .get_sg = ethtool_op_get_sg, | 3388 | .get_sg = ethtool_op_get_sg, |
3344 | .set_sg = ethtool_op_set_sg, | 3389 | .set_sg = ethtool_op_set_sg, |
3345 | .get_tx_csum = ethtool_op_get_tx_csum, | 3390 | .get_tx_csum = ethtool_op_get_tx_csum, |
3346 | .set_tx_csum = ethtool_op_set_tx_csum, | 3391 | .set_tx_csum = sky2_set_tx_csum, |
3347 | .get_tso = ethtool_op_get_tso, | 3392 | .get_tso = ethtool_op_get_tso, |
3348 | .set_tso = ethtool_op_set_tso, | 3393 | .set_tso = sky2_set_tso, |
3349 | .get_rx_csum = sky2_get_rx_csum, | 3394 | .get_rx_csum = sky2_get_rx_csum, |
3350 | .set_rx_csum = sky2_set_rx_csum, | 3395 | .set_rx_csum = sky2_set_rx_csum, |
3351 | .get_strings = sky2_get_strings, | 3396 | .get_strings = sky2_get_strings, |
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h index ac24bdc42976..5efb5afc45ba 100644 --- a/drivers/net/sky2.h +++ b/drivers/net/sky2.h | |||
@@ -288,6 +288,9 @@ enum { | |||
288 | | Y2_IS_CHK_TXA1 | Y2_IS_CHK_RX1, | 288 | | Y2_IS_CHK_TXA1 | Y2_IS_CHK_RX1, |
289 | Y2_IS_PORT_2 = Y2_IS_IRQ_PHY2 | Y2_IS_IRQ_MAC2 | 289 | Y2_IS_PORT_2 = Y2_IS_IRQ_PHY2 | Y2_IS_IRQ_MAC2 |
290 | | Y2_IS_CHK_TXA2 | Y2_IS_CHK_RX2, | 290 | | Y2_IS_CHK_TXA2 | Y2_IS_CHK_RX2, |
291 | Y2_IS_ERROR = Y2_IS_HW_ERR | | ||
292 | Y2_IS_IRQ_MAC1 | Y2_IS_CHK_TXA1 | Y2_IS_CHK_RX1 | | ||
293 | Y2_IS_IRQ_MAC2 | Y2_IS_CHK_TXA2 | Y2_IS_CHK_RX2, | ||
291 | }; | 294 | }; |
292 | 295 | ||
293 | /* B2_IRQM_HWE_MSK 32 bit IRQ Moderation HW Error Mask */ | 296 | /* B2_IRQM_HWE_MSK 32 bit IRQ Moderation HW Error Mask */ |
@@ -738,6 +741,11 @@ enum { | |||
738 | TX_GMF_RP = 0x0d70,/* 32 bit Tx GMAC FIFO Read Pointer */ | 741 | TX_GMF_RP = 0x0d70,/* 32 bit Tx GMAC FIFO Read Pointer */ |
739 | TX_GMF_RSTP = 0x0d74,/* 32 bit Tx GMAC FIFO Restart Pointer */ | 742 | TX_GMF_RSTP = 0x0d74,/* 32 bit Tx GMAC FIFO Restart Pointer */ |
740 | TX_GMF_RLEV = 0x0d78,/* 32 bit Tx GMAC FIFO Read Level */ | 743 | TX_GMF_RLEV = 0x0d78,/* 32 bit Tx GMAC FIFO Read Level */ |
744 | |||
745 | /* Threshold values for Yukon-EC Ultra and Extreme */ | ||
746 | ECU_AE_THR = 0x0070, /* Almost Empty Threshold */ | ||
747 | ECU_TXFF_LEV = 0x01a0, /* Tx BMU FIFO Level */ | ||
748 | ECU_JUMBO_WM = 0x0080, /* Jumbo Mode Watermark */ | ||
741 | }; | 749 | }; |
742 | 750 | ||
743 | /* Descriptor Poll Timer Registers */ | 751 | /* Descriptor Poll Timer Registers */ |
@@ -1631,6 +1639,9 @@ enum { | |||
1631 | TX_VLAN_TAG_ON = 1<<25,/* enable VLAN tagging */ | 1639 | TX_VLAN_TAG_ON = 1<<25,/* enable VLAN tagging */ |
1632 | TX_VLAN_TAG_OFF = 1<<24,/* disable VLAN tagging */ | 1640 | TX_VLAN_TAG_OFF = 1<<24,/* disable VLAN tagging */ |
1633 | 1641 | ||
1642 | TX_JUMBO_ENA = 1<<23,/* PCI Jumbo Mode enable (Yukon-EC Ultra) */ | ||
1643 | TX_JUMBO_DIS = 1<<22,/* PCI Jumbo Mode enable (Yukon-EC Ultra) */ | ||
1644 | |||
1634 | GMF_WSP_TST_ON = 1<<18,/* Write Shadow Pointer Test On */ | 1645 | GMF_WSP_TST_ON = 1<<18,/* Write Shadow Pointer Test On */ |
1635 | GMF_WSP_TST_OFF = 1<<17,/* Write Shadow Pointer Test Off */ | 1646 | GMF_WSP_TST_OFF = 1<<17,/* Write Shadow Pointer Test Off */ |
1636 | GMF_WSP_STEP = 1<<16,/* Write Shadow Pointer Step/Increment */ | 1647 | GMF_WSP_STEP = 1<<16,/* Write Shadow Pointer Step/Increment */ |
diff --git a/drivers/net/slip.c b/drivers/net/slip.c index 2f4b1de7a2b4..65bd20fac820 100644 --- a/drivers/net/slip.c +++ b/drivers/net/slip.c | |||
@@ -363,7 +363,7 @@ sl_bump(struct slip *sl) | |||
363 | } | 363 | } |
364 | skb->dev = sl->dev; | 364 | skb->dev = sl->dev; |
365 | memcpy(skb_put(skb,count), sl->rbuff, count); | 365 | memcpy(skb_put(skb,count), sl->rbuff, count); |
366 | skb->mac.raw=skb->data; | 366 | skb_reset_mac_header(skb); |
367 | skb->protocol=htons(ETH_P_IP); | 367 | skb->protocol=htons(ETH_P_IP); |
368 | netif_rx(skb); | 368 | netif_rx(skb); |
369 | sl->dev->last_rx = jiffies; | 369 | sl->dev->last_rx = jiffies; |
diff --git a/drivers/net/smc911x.c b/drivers/net/smc911x.c index c95614131980..8a2109a913b6 100644 --- a/drivers/net/smc911x.c +++ b/drivers/net/smc911x.c | |||
@@ -502,7 +502,6 @@ static inline void smc911x_rcv(struct net_device *dev) | |||
502 | DBG(SMC_DEBUG_PKTS, "%s: Received packet\n", dev->name,); | 502 | DBG(SMC_DEBUG_PKTS, "%s: Received packet\n", dev->name,); |
503 | PRINT_PKT(data, ((pkt_len - 4) <= 64) ? pkt_len - 4 : 64); | 503 | PRINT_PKT(data, ((pkt_len - 4) <= 64) ? pkt_len - 4 : 64); |
504 | dev->last_rx = jiffies; | 504 | dev->last_rx = jiffies; |
505 | skb->dev = dev; | ||
506 | skb->protocol = eth_type_trans(skb, dev); | 505 | skb->protocol = eth_type_trans(skb, dev); |
507 | netif_rx(skb); | 506 | netif_rx(skb); |
508 | lp->stats.rx_packets++; | 507 | lp->stats.rx_packets++; |
@@ -1307,7 +1306,6 @@ smc911x_rx_dma_irq(int dma, void *data) | |||
1307 | lp->current_rx_skb = NULL; | 1306 | lp->current_rx_skb = NULL; |
1308 | PRINT_PKT(skb->data, skb->len); | 1307 | PRINT_PKT(skb->data, skb->len); |
1309 | dev->last_rx = jiffies; | 1308 | dev->last_rx = jiffies; |
1310 | skb->dev = dev; | ||
1311 | skb->protocol = eth_type_trans(skb, dev); | 1309 | skb->protocol = eth_type_trans(skb, dev); |
1312 | netif_rx(skb); | 1310 | netif_rx(skb); |
1313 | lp->stats.rx_packets++; | 1311 | lp->stats.rx_packets++; |
diff --git a/drivers/net/smc9194.c b/drivers/net/smc9194.c index bd6e84506c29..36c1ebadbf20 100644 --- a/drivers/net/smc9194.c +++ b/drivers/net/smc9194.c | |||
@@ -1262,7 +1262,6 @@ static void smc_rcv(struct net_device *dev) | |||
1262 | 1262 | ||
1263 | skb_reserve( skb, 2 ); /* 16 bit alignment */ | 1263 | skb_reserve( skb, 2 ); /* 16 bit alignment */ |
1264 | 1264 | ||
1265 | skb->dev = dev; | ||
1266 | data = skb_put( skb, packet_length); | 1265 | data = skb_put( skb, packet_length); |
1267 | 1266 | ||
1268 | #ifdef USE_32_BIT | 1267 | #ifdef USE_32_BIT |
diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c index 49f4b7712ebf..01cc3c742c38 100644 --- a/drivers/net/smc91x.c +++ b/drivers/net/smc91x.c | |||
@@ -568,7 +568,6 @@ static inline void smc_rcv(struct net_device *dev) | |||
568 | PRINT_PKT(data, packet_len - 4); | 568 | PRINT_PKT(data, packet_len - 4); |
569 | 569 | ||
570 | dev->last_rx = jiffies; | 570 | dev->last_rx = jiffies; |
571 | skb->dev = dev; | ||
572 | skb->protocol = eth_type_trans(skb, dev); | 571 | skb->protocol = eth_type_trans(skb, dev); |
573 | netif_rx(skb); | 572 | netif_rx(skb); |
574 | lp->stats.rx_packets++; | 573 | lp->stats.rx_packets++; |
diff --git a/drivers/net/sonic.c b/drivers/net/sonic.c index ed7aa0a5acca..c6320c719931 100644 --- a/drivers/net/sonic.c +++ b/drivers/net/sonic.c | |||
@@ -85,7 +85,6 @@ static int sonic_open(struct net_device *dev) | |||
85 | dev->name); | 85 | dev->name); |
86 | return -ENOMEM; | 86 | return -ENOMEM; |
87 | } | 87 | } |
88 | skb->dev = dev; | ||
89 | /* align IP header unless DMA requires otherwise */ | 88 | /* align IP header unless DMA requires otherwise */ |
90 | if (SONIC_BUS_SCALE(lp->dma_bitmode) == 2) | 89 | if (SONIC_BUS_SCALE(lp->dma_bitmode) == 2) |
91 | skb_reserve(skb, 2); | 90 | skb_reserve(skb, 2); |
@@ -451,7 +450,6 @@ static void sonic_rx(struct net_device *dev) | |||
451 | lp->stats.rx_dropped++; | 450 | lp->stats.rx_dropped++; |
452 | break; | 451 | break; |
453 | } | 452 | } |
454 | new_skb->dev = dev; | ||
455 | /* provide 16 byte IP header alignment unless DMA requires otherwise */ | 453 | /* provide 16 byte IP header alignment unless DMA requires otherwise */ |
456 | if(SONIC_BUS_SCALE(lp->dma_bitmode) == 2) | 454 | if(SONIC_BUS_SCALE(lp->dma_bitmode) == 2) |
457 | skb_reserve(new_skb, 2); | 455 | skb_reserve(new_skb, 2); |
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c index 3b91af89e4c7..230da14b1b68 100644 --- a/drivers/net/spider_net.c +++ b/drivers/net/spider_net.c | |||
@@ -719,8 +719,8 @@ spider_net_prepare_tx_descr(struct spider_net_card *card, | |||
719 | SPIDER_NET_DESCR_CARDOWNED | SPIDER_NET_DMAC_NOCS; | 719 | SPIDER_NET_DESCR_CARDOWNED | SPIDER_NET_DMAC_NOCS; |
720 | spin_unlock_irqrestore(&chain->lock, flags); | 720 | spin_unlock_irqrestore(&chain->lock, flags); |
721 | 721 | ||
722 | if (skb->protocol == htons(ETH_P_IP)) | 722 | if (skb->protocol == htons(ETH_P_IP) && skb->ip_summed == CHECKSUM_PARTIAL) |
723 | switch (skb->nh.iph->protocol) { | 723 | switch (ip_hdr(skb)->protocol) { |
724 | case IPPROTO_TCP: | 724 | case IPPROTO_TCP: |
725 | hwdescr->dmac_cmd_status |= SPIDER_NET_DMAC_TCP; | 725 | hwdescr->dmac_cmd_status |= SPIDER_NET_DMAC_TCP; |
726 | break; | 726 | break; |
@@ -990,7 +990,6 @@ spider_net_pass_skb_up(struct spider_net_descr *descr, | |||
990 | netdev = card->netdev; | 990 | netdev = card->netdev; |
991 | 991 | ||
992 | skb = descr->skb; | 992 | skb = descr->skb; |
993 | skb->dev = netdev; | ||
994 | skb_put(skb, hwdescr->valid_size); | 993 | skb_put(skb, hwdescr->valid_size); |
995 | 994 | ||
996 | /* the card seems to add 2 bytes of junk in front | 995 | /* the card seems to add 2 bytes of junk in front |
diff --git a/drivers/net/starfire.c b/drivers/net/starfire.c index 8bba2e3da7e1..9d6e454a8f98 100644 --- a/drivers/net/starfire.c +++ b/drivers/net/starfire.c | |||
@@ -1452,7 +1452,6 @@ static int __netdev_rx(struct net_device *dev, int *quota) | |||
1452 | to a minimally-sized skbuff. */ | 1452 | to a minimally-sized skbuff. */ |
1453 | if (pkt_len < rx_copybreak | 1453 | if (pkt_len < rx_copybreak |
1454 | && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) { | 1454 | && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) { |
1455 | skb->dev = dev; | ||
1456 | skb_reserve(skb, 2); /* 16 byte align the IP header */ | 1455 | skb_reserve(skb, 2); /* 16 byte align the IP header */ |
1457 | pci_dma_sync_single_for_cpu(np->pci_dev, | 1456 | pci_dma_sync_single_for_cpu(np->pci_dev, |
1458 | np->rx_info[entry].mapping, | 1457 | np->rx_info[entry].mapping, |
diff --git a/drivers/net/sun3_82586.c b/drivers/net/sun3_82586.c index 4757aa647c7a..396c3d961f88 100644 --- a/drivers/net/sun3_82586.c +++ b/drivers/net/sun3_82586.c | |||
@@ -775,7 +775,6 @@ static void sun3_82586_rcv_int(struct net_device *dev) | |||
775 | skb = (struct sk_buff *) dev_alloc_skb(totlen+2); | 775 | skb = (struct sk_buff *) dev_alloc_skb(totlen+2); |
776 | if(skb != NULL) | 776 | if(skb != NULL) |
777 | { | 777 | { |
778 | skb->dev = dev; | ||
779 | skb_reserve(skb,2); | 778 | skb_reserve(skb,2); |
780 | skb_put(skb,totlen); | 779 | skb_put(skb,totlen); |
781 | eth_copy_and_sum(skb,(char *) p->base+swab32((unsigned long) rbd->buffer),totlen,0); | 780 | eth_copy_and_sum(skb,(char *) p->base+swab32((unsigned long) rbd->buffer),totlen,0); |
@@ -1027,7 +1026,7 @@ static int sun3_82586_send_packet(struct sk_buff *skb, struct net_device *dev) | |||
1027 | memset((char *)p->xmit_cbuffs[p->xmit_count], 0, ETH_ZLEN); | 1026 | memset((char *)p->xmit_cbuffs[p->xmit_count], 0, ETH_ZLEN); |
1028 | len = ETH_ZLEN; | 1027 | len = ETH_ZLEN; |
1029 | } | 1028 | } |
1030 | memcpy((char *)p->xmit_cbuffs[p->xmit_count],(char *)(skb->data),skb->len); | 1029 | skb_copy_from_linear_data(skb, p->xmit_cbuffs[p->xmit_count], skb->len); |
1031 | 1030 | ||
1032 | #if (NUM_XMIT_BUFFS == 1) | 1031 | #if (NUM_XMIT_BUFFS == 1) |
1033 | # ifdef NO_NOPCOMMANDS | 1032 | # ifdef NO_NOPCOMMANDS |
diff --git a/drivers/net/sun3lance.c b/drivers/net/sun3lance.c index 7bee45b42a2c..791e081fdc15 100644 --- a/drivers/net/sun3lance.c +++ b/drivers/net/sun3lance.c | |||
@@ -629,7 +629,7 @@ static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev ) | |||
629 | head->length = (-len) | 0xf000; | 629 | head->length = (-len) | 0xf000; |
630 | head->misc = 0; | 630 | head->misc = 0; |
631 | 631 | ||
632 | memcpy( PKTBUF_ADDR(head), (void *)skb->data, skb->len ); | 632 | skb_copy_from_linear_data(skb, PKTBUF_ADDR(head), skb->len); |
633 | if (len != skb->len) | 633 | if (len != skb->len) |
634 | memset(PKTBUF_ADDR(head) + skb->len, 0, len-skb->len); | 634 | memset(PKTBUF_ADDR(head) + skb->len, 0, len-skb->len); |
635 | 635 | ||
@@ -851,10 +851,9 @@ static int lance_rx( struct net_device *dev ) | |||
851 | } | 851 | } |
852 | 852 | ||
853 | 853 | ||
854 | skb->dev = dev; | ||
855 | skb_reserve( skb, 2 ); /* 16 byte align */ | 854 | skb_reserve( skb, 2 ); /* 16 byte align */ |
856 | skb_put( skb, pkt_len ); /* Make room */ | 855 | skb_put( skb, pkt_len ); /* Make room */ |
857 | // memcpy( skb->data, PKTBUF_ADDR(head), pkt_len ); | 856 | // skb_copy_to_linear_data(skb, PKTBUF_ADDR(head), pkt_len); |
858 | eth_copy_and_sum(skb, | 857 | eth_copy_and_sum(skb, |
859 | PKTBUF_ADDR(head), | 858 | PKTBUF_ADDR(head), |
860 | pkt_len, 0); | 859 | pkt_len, 0); |
diff --git a/drivers/net/sunbmac.c b/drivers/net/sunbmac.c index 18f88853e1e5..2ad8d58dee3b 100644 --- a/drivers/net/sunbmac.c +++ b/drivers/net/sunbmac.c | |||
@@ -855,7 +855,6 @@ static void bigmac_rx(struct bigmac *bp) | |||
855 | drops++; | 855 | drops++; |
856 | goto drop_it; | 856 | goto drop_it; |
857 | } | 857 | } |
858 | copy_skb->dev = bp->dev; | ||
859 | skb_reserve(copy_skb, 2); | 858 | skb_reserve(copy_skb, 2); |
860 | skb_put(copy_skb, len); | 859 | skb_put(copy_skb, len); |
861 | sbus_dma_sync_single_for_cpu(bp->bigmac_sdev, | 860 | sbus_dma_sync_single_for_cpu(bp->bigmac_sdev, |
diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c index c06ecc8002b9..f51ba31970aa 100644 --- a/drivers/net/sundance.c +++ b/drivers/net/sundance.c | |||
@@ -1308,7 +1308,6 @@ static void rx_poll(unsigned long data) | |||
1308 | to a minimally-sized skbuff. */ | 1308 | to a minimally-sized skbuff. */ |
1309 | if (pkt_len < rx_copybreak | 1309 | if (pkt_len < rx_copybreak |
1310 | && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) { | 1310 | && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) { |
1311 | skb->dev = dev; | ||
1312 | skb_reserve(skb, 2); /* 16 byte align the IP header */ | 1311 | skb_reserve(skb, 2); /* 16 byte align the IP header */ |
1313 | pci_dma_sync_single_for_cpu(np->pci_dev, | 1312 | pci_dma_sync_single_for_cpu(np->pci_dev, |
1314 | desc->frag[0].addr, | 1313 | desc->frag[0].addr, |
diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c index 08ea61db46fe..5da73212ac91 100644 --- a/drivers/net/sungem.c +++ b/drivers/net/sungem.c | |||
@@ -64,11 +64,9 @@ | |||
64 | #include <asm/uaccess.h> | 64 | #include <asm/uaccess.h> |
65 | #include <asm/irq.h> | 65 | #include <asm/irq.h> |
66 | 66 | ||
67 | #ifdef __sparc__ | 67 | #ifdef CONFIG_SPARC |
68 | #include <asm/idprom.h> | 68 | #include <asm/idprom.h> |
69 | #include <asm/openprom.h> | 69 | #include <asm/prom.h> |
70 | #include <asm/oplib.h> | ||
71 | #include <asm/pbm.h> | ||
72 | #endif | 70 | #endif |
73 | 71 | ||
74 | #ifdef CONFIG_PPC_PMAC | 72 | #ifdef CONFIG_PPC_PMAC |
@@ -845,11 +843,10 @@ static int gem_rx(struct gem *gp, int work_to_do) | |||
845 | goto drop_it; | 843 | goto drop_it; |
846 | } | 844 | } |
847 | 845 | ||
848 | copy_skb->dev = gp->dev; | ||
849 | skb_reserve(copy_skb, 2); | 846 | skb_reserve(copy_skb, 2); |
850 | skb_put(copy_skb, len); | 847 | skb_put(copy_skb, len); |
851 | pci_dma_sync_single_for_cpu(gp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); | 848 | pci_dma_sync_single_for_cpu(gp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); |
852 | memcpy(copy_skb->data, skb->data, len); | 849 | skb_copy_from_linear_data(skb, copy_skb->data, len); |
853 | pci_dma_sync_single_for_device(gp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); | 850 | pci_dma_sync_single_for_device(gp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); |
854 | 851 | ||
855 | /* We'll reuse the original ring buffer. */ | 852 | /* We'll reuse the original ring buffer. */ |
@@ -1029,10 +1026,8 @@ static int gem_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1029 | 1026 | ||
1030 | ctrl = 0; | 1027 | ctrl = 0; |
1031 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | 1028 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
1032 | u64 csum_start_off, csum_stuff_off; | 1029 | const u64 csum_start_off = skb_transport_offset(skb); |
1033 | 1030 | const u64 csum_stuff_off = csum_start_off + skb->csum_offset; | |
1034 | csum_start_off = (u64) (skb->h.raw - skb->data); | ||
1035 | csum_stuff_off = csum_start_off + skb->csum_offset; | ||
1036 | 1031 | ||
1037 | ctrl = (TXDCTRL_CENAB | | 1032 | ctrl = (TXDCTRL_CENAB | |
1038 | (csum_start_off << 15) | | 1033 | (csum_start_off << 15) | |
@@ -2849,7 +2844,7 @@ static int gem_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |||
2849 | return rc; | 2844 | return rc; |
2850 | } | 2845 | } |
2851 | 2846 | ||
2852 | #if (!defined(__sparc__) && !defined(CONFIG_PPC_PMAC)) | 2847 | #if (!defined(CONFIG_SPARC) && !defined(CONFIG_PPC_PMAC)) |
2853 | /* Fetch MAC address from vital product data of PCI ROM. */ | 2848 | /* Fetch MAC address from vital product data of PCI ROM. */ |
2854 | static int find_eth_addr_in_vpd(void __iomem *rom_base, int len, unsigned char *dev_addr) | 2849 | static int find_eth_addr_in_vpd(void __iomem *rom_base, int len, unsigned char *dev_addr) |
2855 | { | 2850 | { |
@@ -2904,36 +2899,19 @@ static void get_gem_mac_nonobp(struct pci_dev *pdev, unsigned char *dev_addr) | |||
2904 | 2899 | ||
2905 | static int __devinit gem_get_device_address(struct gem *gp) | 2900 | static int __devinit gem_get_device_address(struct gem *gp) |
2906 | { | 2901 | { |
2907 | #if defined(__sparc__) || defined(CONFIG_PPC_PMAC) | 2902 | #if defined(CONFIG_SPARC) || defined(CONFIG_PPC_PMAC) |
2908 | struct net_device *dev = gp->dev; | 2903 | struct net_device *dev = gp->dev; |
2909 | #endif | ||
2910 | |||
2911 | #if defined(__sparc__) | ||
2912 | struct pci_dev *pdev = gp->pdev; | ||
2913 | struct pcidev_cookie *pcp = pdev->sysdata; | ||
2914 | int use_idprom = 1; | ||
2915 | |||
2916 | if (pcp != NULL) { | ||
2917 | unsigned char *addr; | ||
2918 | int len; | ||
2919 | |||
2920 | addr = of_get_property(pcp->prom_node, "local-mac-address", | ||
2921 | &len); | ||
2922 | if (addr && len == 6) { | ||
2923 | use_idprom = 0; | ||
2924 | memcpy(dev->dev_addr, addr, 6); | ||
2925 | } | ||
2926 | } | ||
2927 | if (use_idprom) | ||
2928 | memcpy(dev->dev_addr, idprom->id_ethaddr, 6); | ||
2929 | #elif defined(CONFIG_PPC_PMAC) | ||
2930 | const unsigned char *addr; | 2904 | const unsigned char *addr; |
2931 | 2905 | ||
2932 | addr = get_property(gp->of_node, "local-mac-address", NULL); | 2906 | addr = get_property(gp->of_node, "local-mac-address", NULL); |
2933 | if (addr == NULL) { | 2907 | if (addr == NULL) { |
2908 | #ifdef CONFIG_SPARC | ||
2909 | addr = idprom->id_ethaddr; | ||
2910 | #else | ||
2934 | printk("\n"); | 2911 | printk("\n"); |
2935 | printk(KERN_ERR "%s: can't get mac-address\n", dev->name); | 2912 | printk(KERN_ERR "%s: can't get mac-address\n", dev->name); |
2936 | return -1; | 2913 | return -1; |
2914 | #endif | ||
2937 | } | 2915 | } |
2938 | memcpy(dev->dev_addr, addr, 6); | 2916 | memcpy(dev->dev_addr, addr, 6); |
2939 | #else | 2917 | #else |
@@ -3091,7 +3069,7 @@ static int __devinit gem_init_one(struct pci_dev *pdev, | |||
3091 | /* On Apple, we want a reference to the Open Firmware device-tree | 3069 | /* On Apple, we want a reference to the Open Firmware device-tree |
3092 | * node. We use it for clock control. | 3070 | * node. We use it for clock control. |
3093 | */ | 3071 | */ |
3094 | #ifdef CONFIG_PPC_PMAC | 3072 | #if defined(CONFIG_PPC_PMAC) || defined(CONFIG_SPARC) |
3095 | gp->of_node = pci_device_to_OF_node(pdev); | 3073 | gp->of_node = pci_device_to_OF_node(pdev); |
3096 | #endif | 3074 | #endif |
3097 | 3075 | ||
diff --git a/drivers/net/sungem.h b/drivers/net/sungem.h index a70067c85cc9..58cf87c5751e 100644 --- a/drivers/net/sungem.h +++ b/drivers/net/sungem.h | |||
@@ -1025,7 +1025,7 @@ struct gem { | |||
1025 | 1025 | ||
1026 | struct pci_dev *pdev; | 1026 | struct pci_dev *pdev; |
1027 | struct net_device *dev; | 1027 | struct net_device *dev; |
1028 | #ifdef CONFIG_PPC_PMAC | 1028 | #if defined(CONFIG_PPC_PMAC) || defined(CONFIG_SPARC) |
1029 | struct device_node *of_node; | 1029 | struct device_node *of_node; |
1030 | #endif | 1030 | #endif |
1031 | }; | 1031 | }; |
diff --git a/drivers/net/sunhme.c b/drivers/net/sunhme.c index ef671739cfea..51c3fe2108a3 100644 --- a/drivers/net/sunhme.c +++ b/drivers/net/sunhme.c | |||
@@ -55,9 +55,6 @@ | |||
55 | 55 | ||
56 | #ifdef CONFIG_PCI | 56 | #ifdef CONFIG_PCI |
57 | #include <linux/pci.h> | 57 | #include <linux/pci.h> |
58 | #ifdef CONFIG_SPARC | ||
59 | #include <asm/pbm.h> | ||
60 | #endif | ||
61 | #endif | 58 | #endif |
62 | 59 | ||
63 | #include "sunhme.h" | 60 | #include "sunhme.h" |
@@ -2058,11 +2055,10 @@ static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev) | |||
2058 | goto drop_it; | 2055 | goto drop_it; |
2059 | } | 2056 | } |
2060 | 2057 | ||
2061 | copy_skb->dev = dev; | ||
2062 | skb_reserve(copy_skb, 2); | 2058 | skb_reserve(copy_skb, 2); |
2063 | skb_put(copy_skb, len); | 2059 | skb_put(copy_skb, len); |
2064 | hme_dma_sync_for_cpu(hp, dma_addr, len, DMA_FROMDEVICE); | 2060 | hme_dma_sync_for_cpu(hp, dma_addr, len, DMA_FROMDEVICE); |
2065 | memcpy(copy_skb->data, skb->data, len); | 2061 | skb_copy_from_linear_data(skb, copy_skb->data, len); |
2066 | hme_dma_sync_for_device(hp, dma_addr, len, DMA_FROMDEVICE); | 2062 | hme_dma_sync_for_device(hp, dma_addr, len, DMA_FROMDEVICE); |
2067 | 2063 | ||
2068 | /* Reuse original ring buffer. */ | 2064 | /* Reuse original ring buffer. */ |
@@ -2270,10 +2266,8 @@ static int happy_meal_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2270 | 2266 | ||
2271 | tx_flags = TXFLAG_OWN; | 2267 | tx_flags = TXFLAG_OWN; |
2272 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | 2268 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
2273 | u32 csum_start_off, csum_stuff_off; | 2269 | const u32 csum_start_off = skb_transport_offset(skb); |
2274 | 2270 | const u32 csum_stuff_off = csum_start_off + skb->csum_offset; | |
2275 | csum_start_off = (u32) (skb->h.raw - skb->data); | ||
2276 | csum_stuff_off = csum_start_off + skb->csum_offset; | ||
2277 | 2271 | ||
2278 | tx_flags = (TXFLAG_OWN | TXFLAG_CSENABLE | | 2272 | tx_flags = (TXFLAG_OWN | TXFLAG_CSENABLE | |
2279 | ((csum_start_off << 14) & TXFLAG_CSBUFBEGIN) | | 2273 | ((csum_start_off << 14) & TXFLAG_CSBUFBEGIN) | |
@@ -2704,7 +2698,7 @@ static int __devinit happy_meal_sbus_probe_one(struct sbus_dev *sdev, int is_qfe | |||
2704 | dev->dev_addr[i] = macaddr[i]; | 2698 | dev->dev_addr[i] = macaddr[i]; |
2705 | macaddr[5]++; | 2699 | macaddr[5]++; |
2706 | } else { | 2700 | } else { |
2707 | unsigned char *addr; | 2701 | const unsigned char *addr; |
2708 | int len; | 2702 | int len; |
2709 | 2703 | ||
2710 | addr = of_get_property(dp, "local-mac-address", &len); | 2704 | addr = of_get_property(dp, "local-mac-address", &len); |
@@ -2986,7 +2980,7 @@ static int __devinit happy_meal_pci_probe(struct pci_dev *pdev, | |||
2986 | { | 2980 | { |
2987 | struct quattro *qp = NULL; | 2981 | struct quattro *qp = NULL; |
2988 | #ifdef CONFIG_SPARC | 2982 | #ifdef CONFIG_SPARC |
2989 | struct pcidev_cookie *pcp; | 2983 | struct device_node *dp; |
2990 | #endif | 2984 | #endif |
2991 | struct happy_meal *hp; | 2985 | struct happy_meal *hp; |
2992 | struct net_device *dev; | 2986 | struct net_device *dev; |
@@ -2998,13 +2992,8 @@ static int __devinit happy_meal_pci_probe(struct pci_dev *pdev, | |||
2998 | 2992 | ||
2999 | /* Now make sure pci_dev cookie is there. */ | 2993 | /* Now make sure pci_dev cookie is there. */ |
3000 | #ifdef CONFIG_SPARC | 2994 | #ifdef CONFIG_SPARC |
3001 | pcp = pdev->sysdata; | 2995 | dp = pci_device_to_OF_node(pdev); |
3002 | if (pcp == NULL) { | 2996 | strcpy(prom_name, dp->name); |
3003 | printk(KERN_ERR "happymeal(PCI): Some PCI device info missing\n"); | ||
3004 | return -ENODEV; | ||
3005 | } | ||
3006 | |||
3007 | strcpy(prom_name, pcp->prom_node->name); | ||
3008 | #else | 2997 | #else |
3009 | if (is_quattro_p(pdev)) | 2998 | if (is_quattro_p(pdev)) |
3010 | strcpy(prom_name, "SUNW,qfe"); | 2999 | strcpy(prom_name, "SUNW,qfe"); |
@@ -3081,11 +3070,11 @@ static int __devinit happy_meal_pci_probe(struct pci_dev *pdev, | |||
3081 | macaddr[5]++; | 3070 | macaddr[5]++; |
3082 | } else { | 3071 | } else { |
3083 | #ifdef CONFIG_SPARC | 3072 | #ifdef CONFIG_SPARC |
3084 | unsigned char *addr; | 3073 | const unsigned char *addr; |
3085 | int len; | 3074 | int len; |
3086 | 3075 | ||
3087 | if (qfe_slot != -1 && | 3076 | if (qfe_slot != -1 && |
3088 | (addr = of_get_property(pcp->prom_node, | 3077 | (addr = of_get_property(dp, |
3089 | "local-mac-address", &len)) != NULL | 3078 | "local-mac-address", &len)) != NULL |
3090 | && len == 6) { | 3079 | && len == 6) { |
3091 | memcpy(dev->dev_addr, addr, 6); | 3080 | memcpy(dev->dev_addr, addr, 6); |
@@ -3105,7 +3094,7 @@ static int __devinit happy_meal_pci_probe(struct pci_dev *pdev, | |||
3105 | hp->tcvregs = (hpreg_base + 0x7000UL); | 3094 | hp->tcvregs = (hpreg_base + 0x7000UL); |
3106 | 3095 | ||
3107 | #ifdef CONFIG_SPARC | 3096 | #ifdef CONFIG_SPARC |
3108 | hp->hm_revision = of_getintprop_default(pcp->prom_node, "hm-rev", 0xff); | 3097 | hp->hm_revision = of_getintprop_default(dp, "hm-rev", 0xff); |
3109 | if (hp->hm_revision == 0xff) { | 3098 | if (hp->hm_revision == 0xff) { |
3110 | unsigned char prev; | 3099 | unsigned char prev; |
3111 | 3100 | ||
@@ -3300,7 +3289,7 @@ static int __devinit hme_sbus_probe(struct of_device *dev, const struct of_devic | |||
3300 | { | 3289 | { |
3301 | struct sbus_dev *sdev = to_sbus_device(&dev->dev); | 3290 | struct sbus_dev *sdev = to_sbus_device(&dev->dev); |
3302 | struct device_node *dp = dev->node; | 3291 | struct device_node *dp = dev->node; |
3303 | char *model = of_get_property(dp, "model", NULL); | 3292 | const char *model = of_get_property(dp, "model", NULL); |
3304 | int is_qfe = (match->data != NULL); | 3293 | int is_qfe = (match->data != NULL); |
3305 | 3294 | ||
3306 | if (!is_qfe && model && !strcmp(model, "SUNW,sbus-qfe")) | 3295 | if (!is_qfe && model && !strcmp(model, "SUNW,sbus-qfe")) |
@@ -3314,7 +3303,7 @@ static int __devexit hme_sbus_remove(struct of_device *dev) | |||
3314 | struct happy_meal *hp = dev_get_drvdata(&dev->dev); | 3303 | struct happy_meal *hp = dev_get_drvdata(&dev->dev); |
3315 | struct net_device *net_dev = hp->dev; | 3304 | struct net_device *net_dev = hp->dev; |
3316 | 3305 | ||
3317 | unregister_netdevice(net_dev); | 3306 | unregister_netdev(net_dev); |
3318 | 3307 | ||
3319 | /* XXX qfe parent interrupt... */ | 3308 | /* XXX qfe parent interrupt... */ |
3320 | 3309 | ||
diff --git a/drivers/net/sunlance.c b/drivers/net/sunlance.c index 5b00d79b5573..42722530ab24 100644 --- a/drivers/net/sunlance.c +++ b/drivers/net/sunlance.c | |||
@@ -547,7 +547,6 @@ static void lance_rx_dvma(struct net_device *dev) | |||
547 | 547 | ||
548 | lp->stats.rx_bytes += len; | 548 | lp->stats.rx_bytes += len; |
549 | 549 | ||
550 | skb->dev = dev; | ||
551 | skb_reserve(skb, 2); /* 16 byte align */ | 550 | skb_reserve(skb, 2); /* 16 byte align */ |
552 | skb_put(skb, len); /* make room */ | 551 | skb_put(skb, len); /* make room */ |
553 | eth_copy_and_sum(skb, | 552 | eth_copy_and_sum(skb, |
@@ -721,7 +720,6 @@ static void lance_rx_pio(struct net_device *dev) | |||
721 | 720 | ||
722 | lp->stats.rx_bytes += len; | 721 | lp->stats.rx_bytes += len; |
723 | 722 | ||
724 | skb->dev = dev; | ||
725 | skb_reserve (skb, 2); /* 16 byte align */ | 723 | skb_reserve (skb, 2); /* 16 byte align */ |
726 | skb_put(skb, len); /* make room */ | 724 | skb_put(skb, len); /* make room */ |
727 | lance_piocopy_to_skb(skb, &(ib->rx_buf[entry][0]), len); | 725 | lance_piocopy_to_skb(skb, &(ib->rx_buf[entry][0]), len); |
@@ -1145,7 +1143,7 @@ static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1145 | struct lance_init_block *ib = lp->init_block_mem; | 1143 | struct lance_init_block *ib = lp->init_block_mem; |
1146 | ib->btx_ring [entry].length = (-len) | 0xf000; | 1144 | ib->btx_ring [entry].length = (-len) | 0xf000; |
1147 | ib->btx_ring [entry].misc = 0; | 1145 | ib->btx_ring [entry].misc = 0; |
1148 | memcpy((char *)&ib->tx_buf [entry][0], skb->data, skblen); | 1146 | skb_copy_from_linear_data(skb, &ib->tx_buf [entry][0], skblen); |
1149 | if (len != skblen) | 1147 | if (len != skblen) |
1150 | memset((char *) &ib->tx_buf [entry][skblen], 0, len - skblen); | 1148 | memset((char *) &ib->tx_buf [entry][skblen], 0, len - skblen); |
1151 | ib->btx_ring [entry].tmd1_bits = (LE_T1_POK | LE_T1_OWN); | 1149 | ib->btx_ring [entry].tmd1_bits = (LE_T1_POK | LE_T1_OWN); |
@@ -1550,7 +1548,7 @@ static int __exit sunlance_sun4_remove(void) | |||
1550 | struct lance_private *lp = dev_get_drvdata(&sun4_sdev.ofdev.dev); | 1548 | struct lance_private *lp = dev_get_drvdata(&sun4_sdev.ofdev.dev); |
1551 | struct net_device *net_dev = lp->dev; | 1549 | struct net_device *net_dev = lp->dev; |
1552 | 1550 | ||
1553 | unregister_netdevice(net_dev); | 1551 | unregister_netdev(net_dev); |
1554 | 1552 | ||
1555 | lance_free_hwresources(lp); | 1553 | lance_free_hwresources(lp); |
1556 | 1554 | ||
@@ -1590,7 +1588,7 @@ static int __devexit sunlance_sbus_remove(struct of_device *dev) | |||
1590 | struct lance_private *lp = dev_get_drvdata(&dev->dev); | 1588 | struct lance_private *lp = dev_get_drvdata(&dev->dev); |
1591 | struct net_device *net_dev = lp->dev; | 1589 | struct net_device *net_dev = lp->dev; |
1592 | 1590 | ||
1593 | unregister_netdevice(net_dev); | 1591 | unregister_netdev(net_dev); |
1594 | 1592 | ||
1595 | lance_free_hwresources(lp); | 1593 | lance_free_hwresources(lp); |
1596 | 1594 | ||
diff --git a/drivers/net/sunqe.c b/drivers/net/sunqe.c index 7874eb1ef043..fa70e0b78af7 100644 --- a/drivers/net/sunqe.c +++ b/drivers/net/sunqe.c | |||
@@ -437,7 +437,6 @@ static void qe_rx(struct sunqe *qep) | |||
437 | drops++; | 437 | drops++; |
438 | qep->net_stats.rx_dropped++; | 438 | qep->net_stats.rx_dropped++; |
439 | } else { | 439 | } else { |
440 | skb->dev = qep->dev; | ||
441 | skb_reserve(skb, 2); | 440 | skb_reserve(skb, 2); |
442 | skb_put(skb, len); | 441 | skb_put(skb, len); |
443 | eth_copy_and_sum(skb, (unsigned char *) this_qbuf, | 442 | eth_copy_and_sum(skb, (unsigned char *) this_qbuf, |
@@ -593,7 +592,7 @@ static int qe_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
593 | /* Avoid a race... */ | 592 | /* Avoid a race... */ |
594 | qep->qe_block->qe_txd[entry].tx_flags = TXD_UPDATE; | 593 | qep->qe_block->qe_txd[entry].tx_flags = TXD_UPDATE; |
595 | 594 | ||
596 | memcpy(txbuf, skb->data, len); | 595 | skb_copy_from_linear_data(skb, txbuf, len); |
597 | 596 | ||
598 | qep->qe_block->qe_txd[entry].tx_addr = txbuf_dvma; | 597 | qep->qe_block->qe_txd[entry].tx_addr = txbuf_dvma; |
599 | qep->qe_block->qe_txd[entry].tx_flags = | 598 | qep->qe_block->qe_txd[entry].tx_flags = |
@@ -845,6 +844,8 @@ static int __init qec_ether_init(struct sbus_dev *sdev) | |||
845 | if (!dev) | 844 | if (!dev) |
846 | return -ENOMEM; | 845 | return -ENOMEM; |
847 | 846 | ||
847 | memcpy(dev->dev_addr, idprom->id_ethaddr, 6); | ||
848 | |||
848 | qe = netdev_priv(dev); | 849 | qe = netdev_priv(dev); |
849 | 850 | ||
850 | i = of_getintprop_default(sdev->ofdev.node, "channel#", -1); | 851 | i = of_getintprop_default(sdev->ofdev.node, "channel#", -1); |
@@ -960,7 +961,7 @@ static int __devexit qec_sbus_remove(struct of_device *dev) | |||
960 | struct sunqe *qp = dev_get_drvdata(&dev->dev); | 961 | struct sunqe *qp = dev_get_drvdata(&dev->dev); |
961 | struct net_device *net_dev = qp->dev; | 962 | struct net_device *net_dev = qp->dev; |
962 | 963 | ||
963 | unregister_netdevice(net_dev); | 964 | unregister_netdev(net_dev); |
964 | 965 | ||
965 | sbus_iounmap(qp->qcregs, CREG_REG_SIZE); | 966 | sbus_iounmap(qp->qcregs, CREG_REG_SIZE); |
966 | sbus_iounmap(qp->mregs, MREGS_REG_SIZE); | 967 | sbus_iounmap(qp->mregs, MREGS_REG_SIZE); |
diff --git a/drivers/net/tc35815.c b/drivers/net/tc35815.c index e3a7e3ceab77..d7741e23f8de 100644 --- a/drivers/net/tc35815.c +++ b/drivers/net/tc35815.c | |||
@@ -1145,7 +1145,6 @@ tc35815_rx(struct net_device *dev) | |||
1145 | break; | 1145 | break; |
1146 | } | 1146 | } |
1147 | skb_reserve(skb, 2); /* 16 bit alignment */ | 1147 | skb_reserve(skb, 2); /* 16 bit alignment */ |
1148 | skb->dev = dev; | ||
1149 | 1148 | ||
1150 | data = skb_put(skb, pkt_len); | 1149 | data = skb_put(skb, pkt_len); |
1151 | 1150 | ||
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index 256969e1300c..9488f49ea569 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c | |||
@@ -40,16 +40,16 @@ | |||
40 | #include <linux/dma-mapping.h> | 40 | #include <linux/dma-mapping.h> |
41 | 41 | ||
42 | #include <net/checksum.h> | 42 | #include <net/checksum.h> |
43 | #include <net/ip.h> | ||
43 | 44 | ||
44 | #include <asm/system.h> | 45 | #include <asm/system.h> |
45 | #include <asm/io.h> | 46 | #include <asm/io.h> |
46 | #include <asm/byteorder.h> | 47 | #include <asm/byteorder.h> |
47 | #include <asm/uaccess.h> | 48 | #include <asm/uaccess.h> |
48 | 49 | ||
49 | #ifdef CONFIG_SPARC64 | 50 | #ifdef CONFIG_SPARC |
50 | #include <asm/idprom.h> | 51 | #include <asm/idprom.h> |
51 | #include <asm/oplib.h> | 52 | #include <asm/prom.h> |
52 | #include <asm/pbm.h> | ||
53 | #endif | 53 | #endif |
54 | 54 | ||
55 | #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) | 55 | #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) |
@@ -3349,7 +3349,7 @@ static int tg3_rx(struct tg3 *tp, int budget) | |||
3349 | skb_reserve(copy_skb, 2); | 3349 | skb_reserve(copy_skb, 2); |
3350 | skb_put(copy_skb, len); | 3350 | skb_put(copy_skb, len); |
3351 | pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); | 3351 | pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); |
3352 | memcpy(copy_skb->data, skb->data, len); | 3352 | skb_copy_from_linear_data(skb, copy_skb->data, len); |
3353 | pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); | 3353 | pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); |
3354 | 3354 | ||
3355 | /* We'll reuse the original ring buffer. */ | 3355 | /* We'll reuse the original ring buffer. */ |
@@ -3908,20 +3908,20 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
3908 | if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) | 3908 | if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) |
3909 | mss |= (skb_headlen(skb) - ETH_HLEN) << 9; | 3909 | mss |= (skb_headlen(skb) - ETH_HLEN) << 9; |
3910 | else { | 3910 | else { |
3911 | tcp_opt_len = ((skb->h.th->doff - 5) * 4); | 3911 | struct iphdr *iph = ip_hdr(skb); |
3912 | ip_tcp_len = (skb->nh.iph->ihl * 4) + | 3912 | |
3913 | sizeof(struct tcphdr); | 3913 | tcp_opt_len = tcp_optlen(skb); |
3914 | ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr); | ||
3914 | 3915 | ||
3915 | skb->nh.iph->check = 0; | 3916 | iph->check = 0; |
3916 | skb->nh.iph->tot_len = htons(mss + ip_tcp_len + | 3917 | iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len); |
3917 | tcp_opt_len); | ||
3918 | mss |= (ip_tcp_len + tcp_opt_len) << 9; | 3918 | mss |= (ip_tcp_len + tcp_opt_len) << 9; |
3919 | } | 3919 | } |
3920 | 3920 | ||
3921 | base_flags |= (TXD_FLAG_CPU_PRE_DMA | | 3921 | base_flags |= (TXD_FLAG_CPU_PRE_DMA | |
3922 | TXD_FLAG_CPU_POST_DMA); | 3922 | TXD_FLAG_CPU_POST_DMA); |
3923 | 3923 | ||
3924 | skb->h.th->check = 0; | 3924 | tcp_hdr(skb)->check = 0; |
3925 | 3925 | ||
3926 | } | 3926 | } |
3927 | else if (skb->ip_summed == CHECKSUM_PARTIAL) | 3927 | else if (skb->ip_summed == CHECKSUM_PARTIAL) |
@@ -4055,6 +4055,7 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev) | |||
4055 | mss = 0; | 4055 | mss = 0; |
4056 | if (skb->len > (tp->dev->mtu + ETH_HLEN) && | 4056 | if (skb->len > (tp->dev->mtu + ETH_HLEN) && |
4057 | (mss = skb_shinfo(skb)->gso_size) != 0) { | 4057 | (mss = skb_shinfo(skb)->gso_size) != 0) { |
4058 | struct iphdr *iph; | ||
4058 | int tcp_opt_len, ip_tcp_len, hdr_len; | 4059 | int tcp_opt_len, ip_tcp_len, hdr_len; |
4059 | 4060 | ||
4060 | if (skb_header_cloned(skb) && | 4061 | if (skb_header_cloned(skb) && |
@@ -4063,8 +4064,8 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev) | |||
4063 | goto out_unlock; | 4064 | goto out_unlock; |
4064 | } | 4065 | } |
4065 | 4066 | ||
4066 | tcp_opt_len = ((skb->h.th->doff - 5) * 4); | 4067 | tcp_opt_len = tcp_optlen(skb); |
4067 | ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr); | 4068 | ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr); |
4068 | 4069 | ||
4069 | hdr_len = ip_tcp_len + tcp_opt_len; | 4070 | hdr_len = ip_tcp_len + tcp_opt_len; |
4070 | if (unlikely((ETH_HLEN + hdr_len) > 80) && | 4071 | if (unlikely((ETH_HLEN + hdr_len) > 80) && |
@@ -4074,34 +4075,31 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev) | |||
4074 | base_flags |= (TXD_FLAG_CPU_PRE_DMA | | 4075 | base_flags |= (TXD_FLAG_CPU_PRE_DMA | |
4075 | TXD_FLAG_CPU_POST_DMA); | 4076 | TXD_FLAG_CPU_POST_DMA); |
4076 | 4077 | ||
4077 | skb->nh.iph->check = 0; | 4078 | iph = ip_hdr(skb); |
4078 | skb->nh.iph->tot_len = htons(mss + hdr_len); | 4079 | iph->check = 0; |
4080 | iph->tot_len = htons(mss + hdr_len); | ||
4079 | if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) { | 4081 | if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) { |
4080 | skb->h.th->check = 0; | 4082 | tcp_hdr(skb)->check = 0; |
4081 | base_flags &= ~TXD_FLAG_TCPUDP_CSUM; | 4083 | base_flags &= ~TXD_FLAG_TCPUDP_CSUM; |
4082 | } | 4084 | } else |
4083 | else { | 4085 | tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, |
4084 | skb->h.th->check = | 4086 | iph->daddr, 0, |
4085 | ~csum_tcpudp_magic(skb->nh.iph->saddr, | 4087 | IPPROTO_TCP, |
4086 | skb->nh.iph->daddr, | 4088 | 0); |
4087 | 0, IPPROTO_TCP, 0); | ||
4088 | } | ||
4089 | 4089 | ||
4090 | if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) || | 4090 | if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) || |
4091 | (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) { | 4091 | (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) { |
4092 | if (tcp_opt_len || skb->nh.iph->ihl > 5) { | 4092 | if (tcp_opt_len || iph->ihl > 5) { |
4093 | int tsflags; | 4093 | int tsflags; |
4094 | 4094 | ||
4095 | tsflags = ((skb->nh.iph->ihl - 5) + | 4095 | tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2); |
4096 | (tcp_opt_len >> 2)); | ||
4097 | mss |= (tsflags << 11); | 4096 | mss |= (tsflags << 11); |
4098 | } | 4097 | } |
4099 | } else { | 4098 | } else { |
4100 | if (tcp_opt_len || skb->nh.iph->ihl > 5) { | 4099 | if (tcp_opt_len || iph->ihl > 5) { |
4101 | int tsflags; | 4100 | int tsflags; |
4102 | 4101 | ||
4103 | tsflags = ((skb->nh.iph->ihl - 5) + | 4102 | tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2); |
4104 | (tcp_opt_len >> 2)); | ||
4105 | base_flags |= tsflags << 12; | 4103 | base_flags |= tsflags << 12; |
4106 | } | 4104 | } |
4107 | } | 4105 | } |
@@ -10988,24 +10986,20 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) | |||
10988 | return err; | 10986 | return err; |
10989 | } | 10987 | } |
10990 | 10988 | ||
10991 | #ifdef CONFIG_SPARC64 | 10989 | #ifdef CONFIG_SPARC |
10992 | static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp) | 10990 | static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp) |
10993 | { | 10991 | { |
10994 | struct net_device *dev = tp->dev; | 10992 | struct net_device *dev = tp->dev; |
10995 | struct pci_dev *pdev = tp->pdev; | 10993 | struct pci_dev *pdev = tp->pdev; |
10996 | struct pcidev_cookie *pcp = pdev->sysdata; | 10994 | struct device_node *dp = pci_device_to_OF_node(pdev); |
10997 | 10995 | const unsigned char *addr; | |
10998 | if (pcp != NULL) { | 10996 | int len; |
10999 | unsigned char *addr; | 10997 | |
11000 | int len; | 10998 | addr = of_get_property(dp, "local-mac-address", &len); |
11001 | 10999 | if (addr && len == 6) { | |
11002 | addr = of_get_property(pcp->prom_node, "local-mac-address", | 11000 | memcpy(dev->dev_addr, addr, 6); |
11003 | &len); | 11001 | memcpy(dev->perm_addr, dev->dev_addr, 6); |
11004 | if (addr && len == 6) { | 11002 | return 0; |
11005 | memcpy(dev->dev_addr, addr, 6); | ||
11006 | memcpy(dev->perm_addr, dev->dev_addr, 6); | ||
11007 | return 0; | ||
11008 | } | ||
11009 | } | 11003 | } |
11010 | return -ENODEV; | 11004 | return -ENODEV; |
11011 | } | 11005 | } |
@@ -11026,7 +11020,7 @@ static int __devinit tg3_get_device_address(struct tg3 *tp) | |||
11026 | u32 hi, lo, mac_offset; | 11020 | u32 hi, lo, mac_offset; |
11027 | int addr_ok = 0; | 11021 | int addr_ok = 0; |
11028 | 11022 | ||
11029 | #ifdef CONFIG_SPARC64 | 11023 | #ifdef CONFIG_SPARC |
11030 | if (!tg3_get_macaddr_sparc(tp)) | 11024 | if (!tg3_get_macaddr_sparc(tp)) |
11031 | return 0; | 11025 | return 0; |
11032 | #endif | 11026 | #endif |
diff --git a/drivers/net/tlan.c b/drivers/net/tlan.c index f85f00251123..106dc1ef0acb 100644 --- a/drivers/net/tlan.c +++ b/drivers/net/tlan.c | |||
@@ -1112,7 +1112,7 @@ static int TLan_StartTx( struct sk_buff *skb, struct net_device *dev ) | |||
1112 | 1112 | ||
1113 | if ( bbuf ) { | 1113 | if ( bbuf ) { |
1114 | tail_buffer = priv->txBuffer + ( priv->txTail * TLAN_MAX_FRAME_SIZE ); | 1114 | tail_buffer = priv->txBuffer + ( priv->txTail * TLAN_MAX_FRAME_SIZE ); |
1115 | memcpy( tail_buffer, skb->data, skb->len ); | 1115 | skb_copy_from_linear_data(skb, tail_buffer, skb->len); |
1116 | } else { | 1116 | } else { |
1117 | tail_list->buffer[0].address = pci_map_single(priv->pciDev, skb->data, skb->len, PCI_DMA_TODEVICE); | 1117 | tail_list->buffer[0].address = pci_map_single(priv->pciDev, skb->data, skb->len, PCI_DMA_TODEVICE); |
1118 | TLan_StoreSKB(tail_list, skb); | 1118 | TLan_StoreSKB(tail_list, skb); |
@@ -1577,7 +1577,6 @@ u32 TLan_HandleRxEOF( struct net_device *dev, u16 host_int ) | |||
1577 | printk(KERN_INFO "TLAN: Couldn't allocate memory for received data.\n"); | 1577 | printk(KERN_INFO "TLAN: Couldn't allocate memory for received data.\n"); |
1578 | else { | 1578 | else { |
1579 | head_buffer = priv->rxBuffer + (priv->rxHead * TLAN_MAX_FRAME_SIZE); | 1579 | head_buffer = priv->rxBuffer + (priv->rxHead * TLAN_MAX_FRAME_SIZE); |
1580 | skb->dev = dev; | ||
1581 | skb_reserve(skb, 2); | 1580 | skb_reserve(skb, 2); |
1582 | t = (void *) skb_put(skb, frameSize); | 1581 | t = (void *) skb_put(skb, frameSize); |
1583 | 1582 | ||
@@ -1608,7 +1607,6 @@ u32 TLan_HandleRxEOF( struct net_device *dev, u16 host_int ) | |||
1608 | skb->protocol = eth_type_trans( skb, dev ); | 1607 | skb->protocol = eth_type_trans( skb, dev ); |
1609 | netif_rx( skb ); | 1608 | netif_rx( skb ); |
1610 | 1609 | ||
1611 | new_skb->dev = dev; | ||
1612 | skb_reserve( new_skb, 2 ); | 1610 | skb_reserve( new_skb, 2 ); |
1613 | t = (void *) skb_put( new_skb, TLAN_MAX_FRAME_SIZE ); | 1611 | t = (void *) skb_put( new_skb, TLAN_MAX_FRAME_SIZE ); |
1614 | head_list->buffer[0].address = pci_map_single(priv->pciDev, new_skb->data, TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE); | 1612 | head_list->buffer[0].address = pci_map_single(priv->pciDev, new_skb->data, TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE); |
diff --git a/drivers/net/tokenring/3c359.c b/drivers/net/tokenring/3c359.c index 7580bdeacadc..e22a3f5333ef 100644 --- a/drivers/net/tokenring/3c359.c +++ b/drivers/net/tokenring/3c359.c | |||
@@ -933,20 +933,21 @@ static void xl_rx(struct net_device *dev) | |||
933 | return ; | 933 | return ; |
934 | } | 934 | } |
935 | 935 | ||
936 | skb->dev = dev ; | ||
937 | |||
938 | while (xl_priv->rx_ring_tail != temp_ring_loc) { | 936 | while (xl_priv->rx_ring_tail != temp_ring_loc) { |
939 | copy_len = xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfraglen & 0x7FFF ; | 937 | copy_len = xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfraglen & 0x7FFF ; |
940 | frame_length -= copy_len ; | 938 | frame_length -= copy_len ; |
941 | pci_dma_sync_single_for_cpu(xl_priv->pdev,xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr,xl_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ; | 939 | pci_dma_sync_single_for_cpu(xl_priv->pdev,xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr,xl_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ; |
942 | memcpy(skb_put(skb,copy_len), xl_priv->rx_ring_skb[xl_priv->rx_ring_tail]->data, copy_len) ; | 940 | skb_copy_from_linear_data(xl_priv->rx_ring_skb[xl_priv->rx_ring_tail], |
941 | skb_put(skb, copy_len), | ||
942 | copy_len); | ||
943 | pci_dma_sync_single_for_device(xl_priv->pdev,xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr,xl_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ; | 943 | pci_dma_sync_single_for_device(xl_priv->pdev,xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr,xl_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ; |
944 | adv_rx_ring(dev) ; | 944 | adv_rx_ring(dev) ; |
945 | } | 945 | } |
946 | 946 | ||
947 | /* Now we have found the last fragment */ | 947 | /* Now we have found the last fragment */ |
948 | pci_dma_sync_single_for_cpu(xl_priv->pdev,xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr,xl_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ; | 948 | pci_dma_sync_single_for_cpu(xl_priv->pdev,xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr,xl_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ; |
949 | memcpy(skb_put(skb,copy_len), xl_priv->rx_ring_skb[xl_priv->rx_ring_tail]->data, frame_length) ; | 949 | skb_copy_from_linear_data(xl_priv->rx_ring_skb[xl_priv->rx_ring_tail], |
950 | skb_put(skb,copy_len), frame_length); | ||
950 | /* memcpy(skb_put(skb,frame_length), bus_to_virt(xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr), frame_length) ; */ | 951 | /* memcpy(skb_put(skb,frame_length), bus_to_virt(xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr), frame_length) ; */ |
951 | pci_dma_sync_single_for_device(xl_priv->pdev,xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr,xl_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ; | 952 | pci_dma_sync_single_for_device(xl_priv->pdev,xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr,xl_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ; |
952 | adv_rx_ring(dev) ; | 953 | adv_rx_ring(dev) ; |
@@ -967,8 +968,6 @@ static void xl_rx(struct net_device *dev) | |||
967 | return ; | 968 | return ; |
968 | } | 969 | } |
969 | 970 | ||
970 | skb->dev = dev ; | ||
971 | |||
972 | skb2 = xl_priv->rx_ring_skb[xl_priv->rx_ring_tail] ; | 971 | skb2 = xl_priv->rx_ring_skb[xl_priv->rx_ring_tail] ; |
973 | pci_unmap_single(xl_priv->pdev, xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr, xl_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ; | 972 | pci_unmap_single(xl_priv->pdev, xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr, xl_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ; |
974 | skb_put(skb2, frame_length) ; | 973 | skb_put(skb2, frame_length) ; |
diff --git a/drivers/net/tokenring/ibmtr.c b/drivers/net/tokenring/ibmtr.c index 01d55315ee8c..1e8958ee2d0a 100644 --- a/drivers/net/tokenring/ibmtr.c +++ b/drivers/net/tokenring/ibmtr.c | |||
@@ -1771,7 +1771,6 @@ static void tr_rx(struct net_device *dev) | |||
1771 | /*BMS again, if she comes in with few but leaves with many */ | 1771 | /*BMS again, if she comes in with few but leaves with many */ |
1772 | skb_reserve(skb, sizeof(struct trh_hdr) - lan_hdr_len); | 1772 | skb_reserve(skb, sizeof(struct trh_hdr) - lan_hdr_len); |
1773 | skb_put(skb, length); | 1773 | skb_put(skb, length); |
1774 | skb->dev = dev; | ||
1775 | data = skb->data; | 1774 | data = skb->data; |
1776 | rbuffer_len = ntohs(readw(rbuf + offsetof(struct rec_buf, buf_len))); | 1775 | rbuffer_len = ntohs(readw(rbuf + offsetof(struct rec_buf, buf_len))); |
1777 | rbufdata = rbuf + offsetof(struct rec_buf, data); | 1776 | rbufdata = rbuf + offsetof(struct rec_buf, data); |
diff --git a/drivers/net/tokenring/lanstreamer.c b/drivers/net/tokenring/lanstreamer.c index e999feb8c0bb..5d849c089a3b 100644 --- a/drivers/net/tokenring/lanstreamer.c +++ b/drivers/net/tokenring/lanstreamer.c | |||
@@ -944,8 +944,6 @@ static void streamer_rx(struct net_device *dev) | |||
944 | printk(KERN_WARNING "%s: Not enough memory to copy packet to upper layers. \n", dev->name); | 944 | printk(KERN_WARNING "%s: Not enough memory to copy packet to upper layers. \n", dev->name); |
945 | streamer_priv->streamer_stats.rx_dropped++; | 945 | streamer_priv->streamer_stats.rx_dropped++; |
946 | } else { /* we allocated an skb OK */ | 946 | } else { /* we allocated an skb OK */ |
947 | skb->dev = dev; | ||
948 | |||
949 | if (buffer_cnt == 1) { | 947 | if (buffer_cnt == 1) { |
950 | /* release the DMA mapping */ | 948 | /* release the DMA mapping */ |
951 | pci_unmap_single(streamer_priv->pci_dev, | 949 | pci_unmap_single(streamer_priv->pci_dev, |
@@ -1607,10 +1605,11 @@ static void streamer_arb_cmd(struct net_device *dev) | |||
1607 | frame_data, buffer_len); | 1605 | frame_data, buffer_len); |
1608 | } while (next_ptr && (buff_off = next_ptr)); | 1606 | } while (next_ptr && (buff_off = next_ptr)); |
1609 | 1607 | ||
1608 | mac_frame->protocol = tr_type_trans(mac_frame, dev); | ||
1610 | #if STREAMER_NETWORK_MONITOR | 1609 | #if STREAMER_NETWORK_MONITOR |
1611 | printk(KERN_WARNING "%s: Received MAC Frame, details: \n", | 1610 | printk(KERN_WARNING "%s: Received MAC Frame, details: \n", |
1612 | dev->name); | 1611 | dev->name); |
1613 | mac_hdr = (struct trh_hdr *) mac_frame->data; | 1612 | mac_hdr = tr_hdr(mac_frame); |
1614 | printk(KERN_WARNING | 1613 | printk(KERN_WARNING |
1615 | "%s: MAC Frame Dest. Addr: %02x:%02x:%02x:%02x:%02x:%02x \n", | 1614 | "%s: MAC Frame Dest. Addr: %02x:%02x:%02x:%02x:%02x:%02x \n", |
1616 | dev->name, mac_hdr->daddr[0], mac_hdr->daddr[1], | 1615 | dev->name, mac_hdr->daddr[0], mac_hdr->daddr[1], |
@@ -1622,8 +1621,6 @@ static void streamer_arb_cmd(struct net_device *dev) | |||
1622 | mac_hdr->saddr[2], mac_hdr->saddr[3], | 1621 | mac_hdr->saddr[2], mac_hdr->saddr[3], |
1623 | mac_hdr->saddr[4], mac_hdr->saddr[5]); | 1622 | mac_hdr->saddr[4], mac_hdr->saddr[5]); |
1624 | #endif | 1623 | #endif |
1625 | mac_frame->dev = dev; | ||
1626 | mac_frame->protocol = tr_type_trans(mac_frame, dev); | ||
1627 | netif_rx(mac_frame); | 1624 | netif_rx(mac_frame); |
1628 | 1625 | ||
1629 | /* Now tell the card we have dealt with the received frame */ | 1626 | /* Now tell the card we have dealt with the received frame */ |
diff --git a/drivers/net/tokenring/olympic.c b/drivers/net/tokenring/olympic.c index 8f4ecc1109cb..09b3cfb8e809 100644 --- a/drivers/net/tokenring/olympic.c +++ b/drivers/net/tokenring/olympic.c | |||
@@ -814,8 +814,6 @@ static void olympic_rx(struct net_device *dev) | |||
814 | olympic_priv->rx_ring_last_received += i ; | 814 | olympic_priv->rx_ring_last_received += i ; |
815 | olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1) ; | 815 | olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1) ; |
816 | } else { | 816 | } else { |
817 | skb->dev = dev ; | ||
818 | |||
819 | /* Optimise based upon number of buffers used. | 817 | /* Optimise based upon number of buffers used. |
820 | If only one buffer is used we can simply swap the buffers around. | 818 | If only one buffer is used we can simply swap the buffers around. |
821 | If more than one then we must use the new buffer and copy the information | 819 | If more than one then we must use the new buffer and copy the information |
@@ -847,7 +845,9 @@ static void olympic_rx(struct net_device *dev) | |||
847 | pci_dma_sync_single_for_cpu(olympic_priv->pdev, | 845 | pci_dma_sync_single_for_cpu(olympic_priv->pdev, |
848 | le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer), | 846 | le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer), |
849 | olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ; | 847 | olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ; |
850 | memcpy(skb_put(skb,length-4),olympic_priv->rx_ring_skb[rx_ring_last_received]->data,length-4) ; | 848 | skb_copy_from_linear_data(olympic_priv->rx_ring_skb[rx_ring_last_received], |
849 | skb_put(skb,length - 4), | ||
850 | length - 4); | ||
851 | pci_dma_sync_single_for_device(olympic_priv->pdev, | 851 | pci_dma_sync_single_for_device(olympic_priv->pdev, |
852 | le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer), | 852 | le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer), |
853 | olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ; | 853 | olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ; |
@@ -864,7 +864,9 @@ static void olympic_rx(struct net_device *dev) | |||
864 | olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ; | 864 | olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ; |
865 | rx_desc = &(olympic_priv->olympic_rx_ring[rx_ring_last_received]); | 865 | rx_desc = &(olympic_priv->olympic_rx_ring[rx_ring_last_received]); |
866 | cpy_length = (i == 1 ? frag_len : le32_to_cpu(rx_desc->res_length)); | 866 | cpy_length = (i == 1 ? frag_len : le32_to_cpu(rx_desc->res_length)); |
867 | memcpy(skb_put(skb, cpy_length), olympic_priv->rx_ring_skb[rx_ring_last_received]->data, cpy_length) ; | 867 | skb_copy_from_linear_data(olympic_priv->rx_ring_skb[rx_ring_last_received], |
868 | skb_put(skb, cpy_length), | ||
869 | cpy_length); | ||
868 | pci_dma_sync_single_for_device(olympic_priv->pdev, | 870 | pci_dma_sync_single_for_device(olympic_priv->pdev, |
869 | le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer), | 871 | le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer), |
870 | olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ; | 872 | olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ; |
@@ -1440,16 +1442,16 @@ static void olympic_arb_cmd(struct net_device *dev) | |||
1440 | next_ptr=readw(buf_ptr+offsetof(struct mac_receive_buffer,next)); | 1442 | next_ptr=readw(buf_ptr+offsetof(struct mac_receive_buffer,next)); |
1441 | } while (next_ptr && (buf_ptr=olympic_priv->olympic_lap + ntohs(next_ptr))); | 1443 | } while (next_ptr && (buf_ptr=olympic_priv->olympic_lap + ntohs(next_ptr))); |
1442 | 1444 | ||
1445 | mac_frame->protocol = tr_type_trans(mac_frame, dev); | ||
1446 | |||
1443 | if (olympic_priv->olympic_network_monitor) { | 1447 | if (olympic_priv->olympic_network_monitor) { |
1444 | struct trh_hdr *mac_hdr ; | 1448 | struct trh_hdr *mac_hdr ; |
1445 | printk(KERN_WARNING "%s: Received MAC Frame, details: \n",dev->name) ; | 1449 | printk(KERN_WARNING "%s: Received MAC Frame, details: \n",dev->name) ; |
1446 | mac_hdr = (struct trh_hdr *)mac_frame->data ; | 1450 | mac_hdr = tr_hdr(mac_frame); |
1447 | printk(KERN_WARNING "%s: MAC Frame Dest. Addr: %02x:%02x:%02x:%02x:%02x:%02x \n", dev->name , mac_hdr->daddr[0], mac_hdr->daddr[1], mac_hdr->daddr[2], mac_hdr->daddr[3], mac_hdr->daddr[4], mac_hdr->daddr[5]) ; | 1451 | printk(KERN_WARNING "%s: MAC Frame Dest. Addr: %02x:%02x:%02x:%02x:%02x:%02x \n", dev->name , mac_hdr->daddr[0], mac_hdr->daddr[1], mac_hdr->daddr[2], mac_hdr->daddr[3], mac_hdr->daddr[4], mac_hdr->daddr[5]) ; |
1448 | printk(KERN_WARNING "%s: MAC Frame Srce. Addr: %02x:%02x:%02x:%02x:%02x:%02x \n", dev->name , mac_hdr->saddr[0], mac_hdr->saddr[1], mac_hdr->saddr[2], mac_hdr->saddr[3], mac_hdr->saddr[4], mac_hdr->saddr[5]) ; | 1452 | printk(KERN_WARNING "%s: MAC Frame Srce. Addr: %02x:%02x:%02x:%02x:%02x:%02x \n", dev->name , mac_hdr->saddr[0], mac_hdr->saddr[1], mac_hdr->saddr[2], mac_hdr->saddr[3], mac_hdr->saddr[4], mac_hdr->saddr[5]) ; |
1449 | } | 1453 | } |
1450 | mac_frame->dev = dev ; | 1454 | netif_rx(mac_frame); |
1451 | mac_frame->protocol = tr_type_trans(mac_frame,dev); | ||
1452 | netif_rx(mac_frame) ; | ||
1453 | dev->last_rx = jiffies; | 1455 | dev->last_rx = jiffies; |
1454 | 1456 | ||
1455 | drop_frame: | 1457 | drop_frame: |
diff --git a/drivers/net/tokenring/smctr.c b/drivers/net/tokenring/smctr.c index cec282a6f62d..9bbea5c8acf4 100644 --- a/drivers/net/tokenring/smctr.c +++ b/drivers/net/tokenring/smctr.c | |||
@@ -3889,14 +3889,13 @@ static int smctr_process_rx_packet(MAC_HEADER *rmf, __u16 size, | |||
3889 | 3889 | ||
3890 | /* Slide data into a sleek skb. */ | 3890 | /* Slide data into a sleek skb. */ |
3891 | skb_put(skb, skb->len); | 3891 | skb_put(skb, skb->len); |
3892 | memcpy(skb->data, rmf, skb->len); | 3892 | skb_copy_to_linear_data(skb, rmf, skb->len); |
3893 | 3893 | ||
3894 | /* Update Counters */ | 3894 | /* Update Counters */ |
3895 | tp->MacStat.rx_packets++; | 3895 | tp->MacStat.rx_packets++; |
3896 | tp->MacStat.rx_bytes += skb->len; | 3896 | tp->MacStat.rx_bytes += skb->len; |
3897 | 3897 | ||
3898 | /* Kick the packet on up. */ | 3898 | /* Kick the packet on up. */ |
3899 | skb->dev = dev; | ||
3900 | skb->protocol = tr_type_trans(skb, dev); | 3899 | skb->protocol = tr_type_trans(skb, dev); |
3901 | netif_rx(skb); | 3900 | netif_rx(skb); |
3902 | dev->last_rx = jiffies; | 3901 | dev->last_rx = jiffies; |
@@ -4476,14 +4475,13 @@ static int smctr_rx_frame(struct net_device *dev) | |||
4476 | if (skb) { | 4475 | if (skb) { |
4477 | skb_put(skb, rx_size); | 4476 | skb_put(skb, rx_size); |
4478 | 4477 | ||
4479 | memcpy(skb->data, pbuff, rx_size); | 4478 | skb_copy_to_linear_data(skb, pbuff, rx_size); |
4480 | 4479 | ||
4481 | /* Update Counters */ | 4480 | /* Update Counters */ |
4482 | tp->MacStat.rx_packets++; | 4481 | tp->MacStat.rx_packets++; |
4483 | tp->MacStat.rx_bytes += skb->len; | 4482 | tp->MacStat.rx_bytes += skb->len; |
4484 | 4483 | ||
4485 | /* Kick the packet on up. */ | 4484 | /* Kick the packet on up. */ |
4486 | skb->dev = dev; | ||
4487 | skb->protocol = tr_type_trans(skb, dev); | 4485 | skb->protocol = tr_type_trans(skb, dev); |
4488 | netif_rx(skb); | 4486 | netif_rx(skb); |
4489 | dev->last_rx = jiffies; | 4487 | dev->last_rx = jiffies; |
diff --git a/drivers/net/tokenring/tms380tr.c b/drivers/net/tokenring/tms380tr.c index ea797ca2b988..12bd294045a7 100644 --- a/drivers/net/tokenring/tms380tr.c +++ b/drivers/net/tokenring/tms380tr.c | |||
@@ -644,7 +644,7 @@ static int tms380tr_hardware_send_packet(struct sk_buff *skb, struct net_device | |||
644 | dmabuf = 0; | 644 | dmabuf = 0; |
645 | i = tp->TplFree->TPLIndex; | 645 | i = tp->TplFree->TPLIndex; |
646 | buf = tp->LocalTxBuffers[i]; | 646 | buf = tp->LocalTxBuffers[i]; |
647 | memcpy(buf, skb->data, length); | 647 | skb_copy_from_linear_data(skb, buf, length); |
648 | newbuf = ((char *)buf - (char *)tp) + tp->dmabuffer; | 648 | newbuf = ((char *)buf - (char *)tp) + tp->dmabuffer; |
649 | } | 649 | } |
650 | else { | 650 | else { |
@@ -2168,7 +2168,6 @@ static void tms380tr_rcv_status_irq(struct net_device *dev) | |||
2168 | } | 2168 | } |
2169 | else | 2169 | else |
2170 | { | 2170 | { |
2171 | skb->dev = dev; | ||
2172 | skb_put(skb, tp->MaxPacketSize); | 2171 | skb_put(skb, tp->MaxPacketSize); |
2173 | rpl->SkbStat = SKB_DATA_COPY; | 2172 | rpl->SkbStat = SKB_DATA_COPY; |
2174 | ReceiveDataPtr = rpl->MData; | 2173 | ReceiveDataPtr = rpl->MData; |
@@ -2179,7 +2178,8 @@ static void tms380tr_rcv_status_irq(struct net_device *dev) | |||
2179 | || rpl->SkbStat == SKB_DMA_DIRECT)) | 2178 | || rpl->SkbStat == SKB_DMA_DIRECT)) |
2180 | { | 2179 | { |
2181 | if(rpl->SkbStat == SKB_DATA_COPY) | 2180 | if(rpl->SkbStat == SKB_DATA_COPY) |
2182 | memcpy(skb->data, ReceiveDataPtr, Length); | 2181 | skb_copy_to_linear_data(skb, ReceiveDataPtr, |
2182 | Length); | ||
2183 | 2183 | ||
2184 | /* Deliver frame to system */ | 2184 | /* Deliver frame to system */ |
2185 | rpl->Skb = NULL; | 2185 | rpl->Skb = NULL; |
diff --git a/drivers/net/tsi108_eth.c b/drivers/net/tsi108_eth.c index d92c5c597e16..0bfc2c9c1c08 100644 --- a/drivers/net/tsi108_eth.c +++ b/drivers/net/tsi108_eth.c | |||
@@ -788,7 +788,6 @@ static int tsi108_complete_rx(struct net_device *dev, int budget) | |||
788 | printk(".\n"); | 788 | printk(".\n"); |
789 | } | 789 | } |
790 | 790 | ||
791 | skb->dev = dev; | ||
792 | skb_put(skb, data->rxring[rx].len); | 791 | skb_put(skb, data->rxring[rx].len); |
793 | skb->protocol = eth_type_trans(skb, dev); | 792 | skb->protocol = eth_type_trans(skb, dev); |
794 | netif_receive_skb(skb); | 793 | netif_receive_skb(skb); |
diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c index c82befa209a2..861729806dc1 100644 --- a/drivers/net/tulip/de2104x.c +++ b/drivers/net/tulip/de2104x.c | |||
@@ -63,7 +63,7 @@ MODULE_PARM_DESC (debug, "de2104x bitmapped message enable number"); | |||
63 | 63 | ||
64 | /* Set the copy breakpoint for the copy-only-tiny-buffer Rx structure. */ | 64 | /* Set the copy breakpoint for the copy-only-tiny-buffer Rx structure. */ |
65 | #if defined(__alpha__) || defined(__arm__) || defined(__hppa__) \ | 65 | #if defined(__alpha__) || defined(__arm__) || defined(__hppa__) \ |
66 | || defined(__sparc__) || defined(__ia64__) \ | 66 | || defined(CONFIG_SPARC) || defined(__ia64__) \ |
67 | || defined(__sh__) || defined(__mips__) | 67 | || defined(__sh__) || defined(__mips__) |
68 | static int rx_copybreak = 1518; | 68 | static int rx_copybreak = 1518; |
69 | #else | 69 | #else |
@@ -435,7 +435,6 @@ static void de_rx (struct de_private *de) | |||
435 | rx_work = 100; | 435 | rx_work = 100; |
436 | goto rx_next; | 436 | goto rx_next; |
437 | } | 437 | } |
438 | copy_skb->dev = de->dev; | ||
439 | 438 | ||
440 | if (!copying_skb) { | 439 | if (!copying_skb) { |
441 | pci_unmap_single(de->pdev, mapping, | 440 | pci_unmap_single(de->pdev, mapping, |
@@ -450,8 +449,8 @@ static void de_rx (struct de_private *de) | |||
450 | } else { | 449 | } else { |
451 | pci_dma_sync_single_for_cpu(de->pdev, mapping, len, PCI_DMA_FROMDEVICE); | 450 | pci_dma_sync_single_for_cpu(de->pdev, mapping, len, PCI_DMA_FROMDEVICE); |
452 | skb_reserve(copy_skb, RX_OFFSET); | 451 | skb_reserve(copy_skb, RX_OFFSET); |
453 | memcpy(skb_put(copy_skb, len), skb->data, len); | 452 | skb_copy_from_linear_data(skb, skb_put(copy_skb, len), |
454 | 453 | len); | |
455 | pci_dma_sync_single_for_device(de->pdev, mapping, len, PCI_DMA_FROMDEVICE); | 454 | pci_dma_sync_single_for_device(de->pdev, mapping, len, PCI_DMA_FROMDEVICE); |
456 | 455 | ||
457 | /* We'll reuse the original ring buffer. */ | 456 | /* We'll reuse the original ring buffer. */ |
diff --git a/drivers/net/tulip/de4x5.c b/drivers/net/tulip/de4x5.c index 4b3cd3d8b62a..62143f92c231 100644 --- a/drivers/net/tulip/de4x5.c +++ b/drivers/net/tulip/de4x5.c | |||
@@ -1160,7 +1160,7 @@ de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev) | |||
1160 | sprintf(lp->adapter_name,"%s (%s)", name, gendev->bus_id); | 1160 | sprintf(lp->adapter_name,"%s (%s)", name, gendev->bus_id); |
1161 | 1161 | ||
1162 | lp->dma_size = (NUM_RX_DESC + NUM_TX_DESC) * sizeof(struct de4x5_desc); | 1162 | lp->dma_size = (NUM_RX_DESC + NUM_TX_DESC) * sizeof(struct de4x5_desc); |
1163 | #if defined(__alpha__) || defined(__powerpc__) || defined(__sparc_v9__) || defined(DE4X5_DO_MEMCPY) | 1163 | #if defined(__alpha__) || defined(__powerpc__) || defined(CONFIG_SPARC) || defined(DE4X5_DO_MEMCPY) |
1164 | lp->dma_size += RX_BUFF_SZ * NUM_RX_DESC + DE4X5_ALIGN; | 1164 | lp->dma_size += RX_BUFF_SZ * NUM_RX_DESC + DE4X5_ALIGN; |
1165 | #endif | 1165 | #endif |
1166 | lp->rx_ring = dma_alloc_coherent(gendev, lp->dma_size, | 1166 | lp->rx_ring = dma_alloc_coherent(gendev, lp->dma_size, |
@@ -1175,7 +1175,7 @@ de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev) | |||
1175 | ** Set up the RX descriptor ring (Intels) | 1175 | ** Set up the RX descriptor ring (Intels) |
1176 | ** Allocate contiguous receive buffers, long word aligned (Alphas) | 1176 | ** Allocate contiguous receive buffers, long word aligned (Alphas) |
1177 | */ | 1177 | */ |
1178 | #if !defined(__alpha__) && !defined(__powerpc__) && !defined(__sparc_v9__) && !defined(DE4X5_DO_MEMCPY) | 1178 | #if !defined(__alpha__) && !defined(__powerpc__) && !defined(CONFIG_SPARC) && !defined(DE4X5_DO_MEMCPY) |
1179 | for (i=0; i<NUM_RX_DESC; i++) { | 1179 | for (i=0; i<NUM_RX_DESC; i++) { |
1180 | lp->rx_ring[i].status = 0; | 1180 | lp->rx_ring[i].status = 0; |
1181 | lp->rx_ring[i].des1 = cpu_to_le32(RX_BUFF_SZ); | 1181 | lp->rx_ring[i].des1 = cpu_to_le32(RX_BUFF_SZ); |
@@ -1252,11 +1252,7 @@ de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev) | |||
1252 | mii_get_phy(dev); | 1252 | mii_get_phy(dev); |
1253 | } | 1253 | } |
1254 | 1254 | ||
1255 | #ifndef __sparc_v9__ | ||
1256 | printk(" and requires IRQ%d (provided by %s).\n", dev->irq, | 1255 | printk(" and requires IRQ%d (provided by %s).\n", dev->irq, |
1257 | #else | ||
1258 | printk(" and requires IRQ%x (provided by %s).\n", dev->irq, | ||
1259 | #endif | ||
1260 | ((lp->bus == PCI) ? "PCI BIOS" : "EISA CNFG")); | 1256 | ((lp->bus == PCI) ? "PCI BIOS" : "EISA CNFG")); |
1261 | } | 1257 | } |
1262 | 1258 | ||
@@ -3627,14 +3623,13 @@ de4x5_alloc_rx_buff(struct net_device *dev, int index, int len) | |||
3627 | struct de4x5_private *lp = netdev_priv(dev); | 3623 | struct de4x5_private *lp = netdev_priv(dev); |
3628 | struct sk_buff *p; | 3624 | struct sk_buff *p; |
3629 | 3625 | ||
3630 | #if !defined(__alpha__) && !defined(__powerpc__) && !defined(__sparc_v9__) && !defined(DE4X5_DO_MEMCPY) | 3626 | #if !defined(__alpha__) && !defined(__powerpc__) && !defined(CONFIG_SPARC) && !defined(DE4X5_DO_MEMCPY) |
3631 | struct sk_buff *ret; | 3627 | struct sk_buff *ret; |
3632 | u_long i=0, tmp; | 3628 | u_long i=0, tmp; |
3633 | 3629 | ||
3634 | p = dev_alloc_skb(IEEE802_3_SZ + DE4X5_ALIGN + 2); | 3630 | p = dev_alloc_skb(IEEE802_3_SZ + DE4X5_ALIGN + 2); |
3635 | if (!p) return NULL; | 3631 | if (!p) return NULL; |
3636 | 3632 | ||
3637 | p->dev = dev; | ||
3638 | tmp = virt_to_bus(p->data); | 3633 | tmp = virt_to_bus(p->data); |
3639 | i = ((tmp + DE4X5_ALIGN) & ~DE4X5_ALIGN) - tmp; | 3634 | i = ((tmp + DE4X5_ALIGN) & ~DE4X5_ALIGN) - tmp; |
3640 | skb_reserve(p, i); | 3635 | skb_reserve(p, i); |
@@ -3655,7 +3650,6 @@ de4x5_alloc_rx_buff(struct net_device *dev, int index, int len) | |||
3655 | p = dev_alloc_skb(len + 2); | 3650 | p = dev_alloc_skb(len + 2); |
3656 | if (!p) return NULL; | 3651 | if (!p) return NULL; |
3657 | 3652 | ||
3658 | p->dev = dev; | ||
3659 | skb_reserve(p, 2); /* Align */ | 3653 | skb_reserve(p, 2); /* Align */ |
3660 | if (index < lp->rx_old) { /* Wrapped buffer */ | 3654 | if (index < lp->rx_old) { /* Wrapped buffer */ |
3661 | short tlen = (lp->rxRingSize - lp->rx_old) * RX_BUFF_SZ; | 3655 | short tlen = (lp->rxRingSize - lp->rx_old) * RX_BUFF_SZ; |
diff --git a/drivers/net/tulip/dmfe.c b/drivers/net/tulip/dmfe.c index 9aeac76184f3..b3a64ca98634 100644 --- a/drivers/net/tulip/dmfe.c +++ b/drivers/net/tulip/dmfe.c | |||
@@ -682,7 +682,7 @@ static int dmfe_start_xmit(struct sk_buff *skb, struct DEVICE *dev) | |||
682 | 682 | ||
683 | /* transmit this packet */ | 683 | /* transmit this packet */ |
684 | txptr = db->tx_insert_ptr; | 684 | txptr = db->tx_insert_ptr; |
685 | memcpy(txptr->tx_buf_ptr, skb->data, skb->len); | 685 | skb_copy_from_linear_data(skb, txptr->tx_buf_ptr, skb->len); |
686 | txptr->tdes1 = cpu_to_le32(0xe1000000 | skb->len); | 686 | txptr->tdes1 = cpu_to_le32(0xe1000000 | skb->len); |
687 | 687 | ||
688 | /* Point to next transmit free descriptor */ | 688 | /* Point to next transmit free descriptor */ |
@@ -988,14 +988,14 @@ static void dmfe_rx_packet(struct DEVICE *dev, struct dmfe_board_info * db) | |||
988 | 988 | ||
989 | skb = newskb; | 989 | skb = newskb; |
990 | /* size less than COPY_SIZE, allocate a rxlen SKB */ | 990 | /* size less than COPY_SIZE, allocate a rxlen SKB */ |
991 | skb->dev = dev; | ||
992 | skb_reserve(skb, 2); /* 16byte align */ | 991 | skb_reserve(skb, 2); /* 16byte align */ |
993 | memcpy(skb_put(skb, rxlen), rxptr->rx_skb_ptr->data, rxlen); | 992 | skb_copy_from_linear_data(rxptr->rx_skb_ptr, |
993 | skb_put(skb, rxlen), | ||
994 | rxlen); | ||
994 | dmfe_reuse_skb(db, rxptr->rx_skb_ptr); | 995 | dmfe_reuse_skb(db, rxptr->rx_skb_ptr); |
995 | } else { | 996 | } else |
996 | skb->dev = dev; | ||
997 | skb_put(skb, rxlen); | 997 | skb_put(skb, rxlen); |
998 | } | 998 | |
999 | skb->protocol = eth_type_trans(skb, dev); | 999 | skb->protocol = eth_type_trans(skb, dev); |
1000 | netif_rx(skb); | 1000 | netif_rx(skb); |
1001 | dev->last_rx = jiffies; | 1001 | dev->last_rx = jiffies; |
diff --git a/drivers/net/tulip/interrupt.c b/drivers/net/tulip/interrupt.c index e3488d7b8ede..e86df07769a1 100644 --- a/drivers/net/tulip/interrupt.c +++ b/drivers/net/tulip/interrupt.c | |||
@@ -192,7 +192,6 @@ int tulip_poll(struct net_device *dev, int *budget) | |||
192 | to a minimally-sized skbuff. */ | 192 | to a minimally-sized skbuff. */ |
193 | if (pkt_len < tulip_rx_copybreak | 193 | if (pkt_len < tulip_rx_copybreak |
194 | && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) { | 194 | && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) { |
195 | skb->dev = dev; | ||
196 | skb_reserve(skb, 2); /* 16 byte align the IP header */ | 195 | skb_reserve(skb, 2); /* 16 byte align the IP header */ |
197 | pci_dma_sync_single_for_cpu(tp->pdev, | 196 | pci_dma_sync_single_for_cpu(tp->pdev, |
198 | tp->rx_buffers[entry].mapping, | 197 | tp->rx_buffers[entry].mapping, |
@@ -416,7 +415,6 @@ static int tulip_rx(struct net_device *dev) | |||
416 | to a minimally-sized skbuff. */ | 415 | to a minimally-sized skbuff. */ |
417 | if (pkt_len < tulip_rx_copybreak | 416 | if (pkt_len < tulip_rx_copybreak |
418 | && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) { | 417 | && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) { |
419 | skb->dev = dev; | ||
420 | skb_reserve(skb, 2); /* 16 byte align the IP header */ | 418 | skb_reserve(skb, 2); /* 16 byte align the IP header */ |
421 | pci_dma_sync_single_for_cpu(tp->pdev, | 419 | pci_dma_sync_single_for_cpu(tp->pdev, |
422 | tp->rx_buffers[entry].mapping, | 420 | tp->rx_buffers[entry].mapping, |
diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c index e3774a522372..e9bf526ec534 100644 --- a/drivers/net/tulip/tulip_core.c +++ b/drivers/net/tulip/tulip_core.c | |||
@@ -36,8 +36,8 @@ | |||
36 | #include <asm/unaligned.h> | 36 | #include <asm/unaligned.h> |
37 | #include <asm/uaccess.h> | 37 | #include <asm/uaccess.h> |
38 | 38 | ||
39 | #ifdef __sparc__ | 39 | #ifdef CONFIG_SPARC |
40 | #include <asm/pbm.h> | 40 | #include <asm/prom.h> |
41 | #endif | 41 | #endif |
42 | 42 | ||
43 | static char version[] __devinitdata = | 43 | static char version[] __devinitdata = |
@@ -67,7 +67,7 @@ const char * const medianame[32] = { | |||
67 | 67 | ||
68 | /* Set the copy breakpoint for the copy-only-tiny-buffer Rx structure. */ | 68 | /* Set the copy breakpoint for the copy-only-tiny-buffer Rx structure. */ |
69 | #if defined(__alpha__) || defined(__arm__) || defined(__hppa__) \ | 69 | #if defined(__alpha__) || defined(__arm__) || defined(__hppa__) \ |
70 | || defined(__sparc__) || defined(__ia64__) \ | 70 | || defined(CONFIG_SPARC) || defined(__ia64__) \ |
71 | || defined(__sh__) || defined(__mips__) | 71 | || defined(__sh__) || defined(__mips__) |
72 | static int rx_copybreak = 1518; | 72 | static int rx_copybreak = 1518; |
73 | #else | 73 | #else |
@@ -91,7 +91,7 @@ static int rx_copybreak = 100; | |||
91 | static int csr0 = 0x01A00000 | 0xE000; | 91 | static int csr0 = 0x01A00000 | 0xE000; |
92 | #elif defined(__i386__) || defined(__powerpc__) || defined(__x86_64__) | 92 | #elif defined(__i386__) || defined(__powerpc__) || defined(__x86_64__) |
93 | static int csr0 = 0x01A00000 | 0x8000; | 93 | static int csr0 = 0x01A00000 | 0x8000; |
94 | #elif defined(__sparc__) || defined(__hppa__) | 94 | #elif defined(CONFIG_SPARC) || defined(__hppa__) |
95 | /* The UltraSparc PCI controllers will disconnect at every 64-byte | 95 | /* The UltraSparc PCI controllers will disconnect at every 64-byte |
96 | * crossing anyways so it makes no sense to tell Tulip to burst | 96 | * crossing anyways so it makes no sense to tell Tulip to burst |
97 | * any more than that. | 97 | * any more than that. |
@@ -1315,7 +1315,7 @@ static int __devinit tulip_init_one (struct pci_dev *pdev, | |||
1315 | /* DM9102A has troubles with MRM & clear reserved bits 24:22, 20, 16, 7:1 */ | 1315 | /* DM9102A has troubles with MRM & clear reserved bits 24:22, 20, 16, 7:1 */ |
1316 | if (tulip_uli_dm_quirk(pdev)) { | 1316 | if (tulip_uli_dm_quirk(pdev)) { |
1317 | csr0 &= ~0x01f100ff; | 1317 | csr0 &= ~0x01f100ff; |
1318 | #if defined(__sparc__) | 1318 | #if defined(CONFIG_SPARC) |
1319 | csr0 = (csr0 & ~0xff00) | 0xe000; | 1319 | csr0 = (csr0 & ~0xff00) | 0xe000; |
1320 | #endif | 1320 | #endif |
1321 | } | 1321 | } |
@@ -1535,23 +1535,19 @@ static int __devinit tulip_init_one (struct pci_dev *pdev, | |||
1535 | Many PCI BIOSes also incorrectly report the IRQ line, so we correct | 1535 | Many PCI BIOSes also incorrectly report the IRQ line, so we correct |
1536 | that here as well. */ | 1536 | that here as well. */ |
1537 | if (sum == 0 || sum == 6*0xff) { | 1537 | if (sum == 0 || sum == 6*0xff) { |
1538 | #if defined(__sparc__) | 1538 | #if defined(CONFIG_SPARC) |
1539 | struct pcidev_cookie *pcp = pdev->sysdata; | 1539 | struct device_node *dp = pci_device_to_OF_node(pdev); |
1540 | const unsigned char *addr; | ||
1541 | int len; | ||
1540 | #endif | 1542 | #endif |
1541 | eeprom_missing = 1; | 1543 | eeprom_missing = 1; |
1542 | for (i = 0; i < 5; i++) | 1544 | for (i = 0; i < 5; i++) |
1543 | dev->dev_addr[i] = last_phys_addr[i]; | 1545 | dev->dev_addr[i] = last_phys_addr[i]; |
1544 | dev->dev_addr[i] = last_phys_addr[i] + 1; | 1546 | dev->dev_addr[i] = last_phys_addr[i] + 1; |
1545 | #if defined(__sparc__) | 1547 | #if defined(CONFIG_SPARC) |
1546 | if (pcp) { | 1548 | addr = of_get_property(dp, "local-mac-address", &len); |
1547 | unsigned char *addr; | 1549 | if (addr && len == 6) |
1548 | int len; | 1550 | memcpy(dev->dev_addr, addr, 6); |
1549 | |||
1550 | addr = of_get_property(pcp->prom_node, | ||
1551 | "local-mac-address", &len); | ||
1552 | if (addr && len == 6) | ||
1553 | memcpy(dev->dev_addr, addr, 6); | ||
1554 | } | ||
1555 | #endif | 1551 | #endif |
1556 | #if defined(__i386__) || defined(__x86_64__) /* Patch up x86 BIOS bug. */ | 1552 | #if defined(__i386__) || defined(__x86_64__) /* Patch up x86 BIOS bug. */ |
1557 | if (last_irq) | 1553 | if (last_irq) |
diff --git a/drivers/net/tulip/uli526x.c b/drivers/net/tulip/uli526x.c index 229158e8e4be..ca2548eb7d63 100644 --- a/drivers/net/tulip/uli526x.c +++ b/drivers/net/tulip/uli526x.c | |||
@@ -583,7 +583,7 @@ static int uli526x_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
583 | 583 | ||
584 | /* transmit this packet */ | 584 | /* transmit this packet */ |
585 | txptr = db->tx_insert_ptr; | 585 | txptr = db->tx_insert_ptr; |
586 | memcpy(txptr->tx_buf_ptr, skb->data, skb->len); | 586 | skb_copy_from_linear_data(skb, txptr->tx_buf_ptr, skb->len); |
587 | txptr->tdes1 = cpu_to_le32(0xe1000000 | skb->len); | 587 | txptr->tdes1 = cpu_to_le32(0xe1000000 | skb->len); |
588 | 588 | ||
589 | /* Point to next transmit free descriptor */ | 589 | /* Point to next transmit free descriptor */ |
@@ -828,14 +828,14 @@ static void uli526x_rx_packet(struct net_device *dev, struct uli526x_board_info | |||
828 | ( (skb = dev_alloc_skb(rxlen + 2) ) | 828 | ( (skb = dev_alloc_skb(rxlen + 2) ) |
829 | != NULL) ) { | 829 | != NULL) ) { |
830 | /* size less than COPY_SIZE, allocate a rxlen SKB */ | 830 | /* size less than COPY_SIZE, allocate a rxlen SKB */ |
831 | skb->dev = dev; | ||
832 | skb_reserve(skb, 2); /* 16byte align */ | 831 | skb_reserve(skb, 2); /* 16byte align */ |
833 | memcpy(skb_put(skb, rxlen), rxptr->rx_skb_ptr->tail, rxlen); | 832 | memcpy(skb_put(skb, rxlen), |
833 | skb_tail_pointer(rxptr->rx_skb_ptr), | ||
834 | rxlen); | ||
834 | uli526x_reuse_skb(db, rxptr->rx_skb_ptr); | 835 | uli526x_reuse_skb(db, rxptr->rx_skb_ptr); |
835 | } else { | 836 | } else |
836 | skb->dev = dev; | ||
837 | skb_put(skb, rxlen); | 837 | skb_put(skb, rxlen); |
838 | } | 838 | |
839 | skb->protocol = eth_type_trans(skb, dev); | 839 | skb->protocol = eth_type_trans(skb, dev); |
840 | netif_rx(skb); | 840 | netif_rx(skb); |
841 | dev->last_rx = jiffies; | 841 | dev->last_rx = jiffies; |
@@ -1177,7 +1177,10 @@ static void uli526x_reuse_skb(struct uli526x_board_info *db, struct sk_buff * sk | |||
1177 | 1177 | ||
1178 | if (!(rxptr->rdes0 & cpu_to_le32(0x80000000))) { | 1178 | if (!(rxptr->rdes0 & cpu_to_le32(0x80000000))) { |
1179 | rxptr->rx_skb_ptr = skb; | 1179 | rxptr->rx_skb_ptr = skb; |
1180 | rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, skb->tail, RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) ); | 1180 | rxptr->rdes2 = cpu_to_le32(pci_map_single(db->pdev, |
1181 | skb_tail_pointer(skb), | ||
1182 | RX_ALLOC_SIZE, | ||
1183 | PCI_DMA_FROMDEVICE)); | ||
1181 | wmb(); | 1184 | wmb(); |
1182 | rxptr->rdes0 = cpu_to_le32(0x80000000); | 1185 | rxptr->rdes0 = cpu_to_le32(0x80000000); |
1183 | db->rx_avail_cnt++; | 1186 | db->rx_avail_cnt++; |
@@ -1341,7 +1344,10 @@ static void allocate_rx_buffer(struct uli526x_board_info *db) | |||
1341 | if ( ( skb = dev_alloc_skb(RX_ALLOC_SIZE) ) == NULL ) | 1344 | if ( ( skb = dev_alloc_skb(RX_ALLOC_SIZE) ) == NULL ) |
1342 | break; | 1345 | break; |
1343 | rxptr->rx_skb_ptr = skb; /* FIXME (?) */ | 1346 | rxptr->rx_skb_ptr = skb; /* FIXME (?) */ |
1344 | rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, skb->tail, RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) ); | 1347 | rxptr->rdes2 = cpu_to_le32(pci_map_single(db->pdev, |
1348 | skb_tail_pointer(skb), | ||
1349 | RX_ALLOC_SIZE, | ||
1350 | PCI_DMA_FROMDEVICE)); | ||
1345 | wmb(); | 1351 | wmb(); |
1346 | rxptr->rdes0 = cpu_to_le32(0x80000000); | 1352 | rxptr->rdes0 = cpu_to_le32(0x80000000); |
1347 | rxptr = rxptr->next_rx_desc; | 1353 | rxptr = rxptr->next_rx_desc; |
diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c index 002a05e0722f..5b71ac78bca2 100644 --- a/drivers/net/tulip/winbond-840.c +++ b/drivers/net/tulip/winbond-840.c | |||
@@ -813,7 +813,6 @@ static void init_rxtx_rings(struct net_device *dev) | |||
813 | np->rx_skbuff[i] = skb; | 813 | np->rx_skbuff[i] = skb; |
814 | if (skb == NULL) | 814 | if (skb == NULL) |
815 | break; | 815 | break; |
816 | skb->dev = dev; /* Mark as being used by this device. */ | ||
817 | np->rx_addr[i] = pci_map_single(np->pci_dev,skb->data, | 816 | np->rx_addr[i] = pci_map_single(np->pci_dev,skb->data, |
818 | np->rx_buf_sz,PCI_DMA_FROMDEVICE); | 817 | np->rx_buf_sz,PCI_DMA_FROMDEVICE); |
819 | 818 | ||
@@ -903,7 +902,7 @@ static void init_registers(struct net_device *dev) | |||
903 | } | 902 | } |
904 | #elif defined(__powerpc__) || defined(__i386__) || defined(__alpha__) || defined(__ia64__) || defined(__x86_64__) | 903 | #elif defined(__powerpc__) || defined(__i386__) || defined(__alpha__) || defined(__ia64__) || defined(__x86_64__) |
905 | i |= 0xE000; | 904 | i |= 0xE000; |
906 | #elif defined(__sparc__) || defined (CONFIG_PARISC) | 905 | #elif defined(CONFIG_SPARC) || defined (CONFIG_PARISC) |
907 | i |= 0x4800; | 906 | i |= 0x4800; |
908 | #else | 907 | #else |
909 | #warning Processor architecture undefined | 908 | #warning Processor architecture undefined |
@@ -1229,7 +1228,6 @@ static int netdev_rx(struct net_device *dev) | |||
1229 | to a minimally-sized skbuff. */ | 1228 | to a minimally-sized skbuff. */ |
1230 | if (pkt_len < rx_copybreak | 1229 | if (pkt_len < rx_copybreak |
1231 | && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) { | 1230 | && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) { |
1232 | skb->dev = dev; | ||
1233 | skb_reserve(skb, 2); /* 16 byte align the IP header */ | 1231 | skb_reserve(skb, 2); /* 16 byte align the IP header */ |
1234 | pci_dma_sync_single_for_cpu(np->pci_dev,np->rx_addr[entry], | 1232 | pci_dma_sync_single_for_cpu(np->pci_dev,np->rx_addr[entry], |
1235 | np->rx_skbuff[entry]->len, | 1233 | np->rx_skbuff[entry]->len, |
@@ -1278,7 +1276,6 @@ static int netdev_rx(struct net_device *dev) | |||
1278 | np->rx_skbuff[entry] = skb; | 1276 | np->rx_skbuff[entry] = skb; |
1279 | if (skb == NULL) | 1277 | if (skb == NULL) |
1280 | break; /* Better luck next round. */ | 1278 | break; /* Better luck next round. */ |
1281 | skb->dev = dev; /* Mark as being used by this device. */ | ||
1282 | np->rx_addr[entry] = pci_map_single(np->pci_dev, | 1279 | np->rx_addr[entry] = pci_map_single(np->pci_dev, |
1283 | skb->data, | 1280 | skb->data, |
1284 | np->rx_buf_sz, PCI_DMA_FROMDEVICE); | 1281 | np->rx_buf_sz, PCI_DMA_FROMDEVICE); |
diff --git a/drivers/net/tulip/xircom_cb.c b/drivers/net/tulip/xircom_cb.c index 61d313049dd0..985a1810ca59 100644 --- a/drivers/net/tulip/xircom_cb.c +++ b/drivers/net/tulip/xircom_cb.c | |||
@@ -411,9 +411,9 @@ static int xircom_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
411 | sometimes sends more than you ask it to. */ | 411 | sometimes sends more than you ask it to. */ |
412 | 412 | ||
413 | memset(&card->tx_buffer[bufferoffsets[desc]/4],0,1536); | 413 | memset(&card->tx_buffer[bufferoffsets[desc]/4],0,1536); |
414 | memcpy(&(card->tx_buffer[bufferoffsets[desc]/4]),skb->data,skb->len); | 414 | skb_copy_from_linear_data(skb, |
415 | 415 | &(card->tx_buffer[bufferoffsets[desc] / 4]), | |
416 | 416 | skb->len); | |
417 | /* FIXME: The specification tells us that the length we send HAS to be a multiple of | 417 | /* FIXME: The specification tells us that the length we send HAS to be a multiple of |
418 | 4 bytes. */ | 418 | 4 bytes. */ |
419 | 419 | ||
@@ -1207,7 +1207,6 @@ static void investigate_read_descriptor(struct net_device *dev,struct xircom_pri | |||
1207 | card->stats.rx_dropped++; | 1207 | card->stats.rx_dropped++; |
1208 | goto out; | 1208 | goto out; |
1209 | } | 1209 | } |
1210 | skb->dev = dev; | ||
1211 | skb_reserve(skb, 2); | 1210 | skb_reserve(skb, 2); |
1212 | eth_copy_and_sum(skb, (unsigned char*)&card->rx_buffer[bufferoffset / 4], pkt_len, 0); | 1211 | eth_copy_and_sum(skb, (unsigned char*)&card->rx_buffer[bufferoffset / 4], pkt_len, 0); |
1213 | skb_put(skb, pkt_len); | 1212 | skb_put(skb, pkt_len); |
diff --git a/drivers/net/tulip/xircom_tulip_cb.c b/drivers/net/tulip/xircom_tulip_cb.c index a998c5d0ae9c..f64172927377 100644 --- a/drivers/net/tulip/xircom_tulip_cb.c +++ b/drivers/net/tulip/xircom_tulip_cb.c | |||
@@ -65,7 +65,7 @@ static int rx_copybreak = 100; | |||
65 | static int csr0 = 0x01A00000 | 0xE000; | 65 | static int csr0 = 0x01A00000 | 0xE000; |
66 | #elif defined(__powerpc__) | 66 | #elif defined(__powerpc__) |
67 | static int csr0 = 0x01B00000 | 0x8000; | 67 | static int csr0 = 0x01B00000 | 0x8000; |
68 | #elif defined(__sparc__) | 68 | #elif defined(CONFIG_SPARC) |
69 | static int csr0 = 0x01B00080 | 0x8000; | 69 | static int csr0 = 0x01B00080 | 0x8000; |
70 | #elif defined(__i386__) | 70 | #elif defined(__i386__) |
71 | static int csr0 = 0x01A00000 | 0x8000; | 71 | static int csr0 = 0x01A00000 | 0x8000; |
@@ -915,7 +915,9 @@ xircom_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
915 | 915 | ||
916 | tp->tx_skbuff[entry] = skb; | 916 | tp->tx_skbuff[entry] = skb; |
917 | if (tp->chip_id == X3201_3) { | 917 | if (tp->chip_id == X3201_3) { |
918 | memcpy(tp->tx_aligned_skbuff[entry]->data,skb->data,skb->len); | 918 | skb_copy_from_linear_data(skb, |
919 | tp->tx_aligned_skbuff[entry]->data, | ||
920 | skb->len); | ||
919 | tp->tx_ring[entry].buffer1 = virt_to_bus(tp->tx_aligned_skbuff[entry]->data); | 921 | tp->tx_ring[entry].buffer1 = virt_to_bus(tp->tx_aligned_skbuff[entry]->data); |
920 | } else | 922 | } else |
921 | tp->tx_ring[entry].buffer1 = virt_to_bus(skb->data); | 923 | tp->tx_ring[entry].buffer1 = virt_to_bus(skb->data); |
@@ -1238,7 +1240,6 @@ xircom_rx(struct net_device *dev) | |||
1238 | to a minimally-sized skbuff. */ | 1240 | to a minimally-sized skbuff. */ |
1239 | if (pkt_len < rx_copybreak | 1241 | if (pkt_len < rx_copybreak |
1240 | && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) { | 1242 | && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) { |
1241 | skb->dev = dev; | ||
1242 | skb_reserve(skb, 2); /* 16 byte align the IP header */ | 1243 | skb_reserve(skb, 2); /* 16 byte align the IP header */ |
1243 | #if ! defined(__alpha__) | 1244 | #if ! defined(__alpha__) |
1244 | eth_copy_and_sum(skb, bus_to_virt(tp->rx_ring[entry].buffer1), | 1245 | eth_copy_and_sum(skb, bus_to_virt(tp->rx_ring[entry].buffer1), |
diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 5643d1e84ed6..a2c6caaaae93 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c | |||
@@ -18,6 +18,10 @@ | |||
18 | /* | 18 | /* |
19 | * Changes: | 19 | * Changes: |
20 | * | 20 | * |
21 | * Brian Braunstein <linuxkernel@bristyle.com> 2007/03/23 | ||
22 | * Fixed hw address handling. Now net_device.dev_addr is kept consistent | ||
23 | * with tun.dev_addr when the address is set by this module. | ||
24 | * | ||
21 | * Mike Kershaw <dragorn@kismetwireless.net> 2005/08/14 | 25 | * Mike Kershaw <dragorn@kismetwireless.net> 2005/08/14 |
22 | * Add TUNSETLINK ioctl to set the link encapsulation | 26 | * Add TUNSETLINK ioctl to set the link encapsulation |
23 | * | 27 | * |
@@ -196,7 +200,10 @@ static void tun_net_init(struct net_device *dev) | |||
196 | dev->set_multicast_list = tun_net_mclist; | 200 | dev->set_multicast_list = tun_net_mclist; |
197 | 201 | ||
198 | ether_setup(dev); | 202 | ether_setup(dev); |
199 | random_ether_addr(dev->dev_addr); | 203 | |
204 | /* random address already created for us by tun_set_iff, use it */ | ||
205 | memcpy(dev->dev_addr, tun->dev_addr, min(sizeof(tun->dev_addr), sizeof(dev->dev_addr)) ); | ||
206 | |||
200 | dev->tx_queue_len = TUN_READQ_SIZE; /* We prefer our own queue length */ | 207 | dev->tx_queue_len = TUN_READQ_SIZE; /* We prefer our own queue length */ |
201 | break; | 208 | break; |
202 | } | 209 | } |
@@ -254,11 +261,11 @@ static __inline__ ssize_t tun_get_user(struct tun_struct *tun, struct iovec *iv, | |||
254 | return -EFAULT; | 261 | return -EFAULT; |
255 | } | 262 | } |
256 | 263 | ||
257 | skb->dev = tun->dev; | ||
258 | switch (tun->flags & TUN_TYPE_MASK) { | 264 | switch (tun->flags & TUN_TYPE_MASK) { |
259 | case TUN_TUN_DEV: | 265 | case TUN_TUN_DEV: |
260 | skb->mac.raw = skb->data; | 266 | skb_reset_mac_header(skb); |
261 | skb->protocol = pi.proto; | 267 | skb->protocol = pi.proto; |
268 | skb->dev = tun->dev; | ||
262 | break; | 269 | break; |
263 | case TUN_TAP_DEV: | 270 | case TUN_TAP_DEV: |
264 | skb->protocol = eth_type_trans(skb, tun->dev); | 271 | skb->protocol = eth_type_trans(skb, tun->dev); |
@@ -386,8 +393,8 @@ static ssize_t tun_chr_aio_read(struct kiocb *iocb, const struct iovec *iv, | |||
386 | * - we are multicast promiscous. | 393 | * - we are multicast promiscous. |
387 | * - we belong to the multicast group. | 394 | * - we belong to the multicast group. |
388 | */ | 395 | */ |
389 | memcpy(addr, skb->data, | 396 | skb_copy_from_linear_data(skb, addr, min_t(size_t, sizeof addr, |
390 | min_t(size_t, sizeof addr, skb->len)); | 397 | skb->len)); |
391 | bit_nr = ether_crc(sizeof addr, addr) >> 26; | 398 | bit_nr = ether_crc(sizeof addr, addr) >> 26; |
392 | if ((tun->if_flags & IFF_PROMISC) || | 399 | if ((tun->if_flags & IFF_PROMISC) || |
393 | memcmp(addr, tun->dev_addr, sizeof addr) == 0 || | 400 | memcmp(addr, tun->dev_addr, sizeof addr) == 0 || |
@@ -636,6 +643,7 @@ static int tun_chr_ioctl(struct inode *inode, struct file *file, | |||
636 | return 0; | 643 | return 0; |
637 | 644 | ||
638 | case SIOCGIFHWADDR: | 645 | case SIOCGIFHWADDR: |
646 | /* Note: the actual net device's address may be different */ | ||
639 | memcpy(ifr.ifr_hwaddr.sa_data, tun->dev_addr, | 647 | memcpy(ifr.ifr_hwaddr.sa_data, tun->dev_addr, |
640 | min(sizeof ifr.ifr_hwaddr.sa_data, sizeof tun->dev_addr)); | 648 | min(sizeof ifr.ifr_hwaddr.sa_data, sizeof tun->dev_addr)); |
641 | if (copy_to_user( argp, &ifr, sizeof ifr)) | 649 | if (copy_to_user( argp, &ifr, sizeof ifr)) |
@@ -643,16 +651,24 @@ static int tun_chr_ioctl(struct inode *inode, struct file *file, | |||
643 | return 0; | 651 | return 0; |
644 | 652 | ||
645 | case SIOCSIFHWADDR: | 653 | case SIOCSIFHWADDR: |
646 | /** Set the character device's hardware address. This is used when | 654 | { |
647 | * filtering packets being sent from the network device to the character | 655 | /* try to set the actual net device's hw address */ |
648 | * device. */ | 656 | int ret = dev_set_mac_address(tun->dev, &ifr.ifr_hwaddr); |
649 | memcpy(tun->dev_addr, ifr.ifr_hwaddr.sa_data, | 657 | |
650 | min(sizeof ifr.ifr_hwaddr.sa_data, sizeof tun->dev_addr)); | 658 | if (ret == 0) { |
651 | DBG(KERN_DEBUG "%s: set hardware address: %x:%x:%x:%x:%x:%x\n", | 659 | /** Set the character device's hardware address. This is used when |
652 | tun->dev->name, | 660 | * filtering packets being sent from the network device to the character |
653 | tun->dev_addr[0], tun->dev_addr[1], tun->dev_addr[2], | 661 | * device. */ |
654 | tun->dev_addr[3], tun->dev_addr[4], tun->dev_addr[5]); | 662 | memcpy(tun->dev_addr, ifr.ifr_hwaddr.sa_data, |
655 | return 0; | 663 | min(sizeof ifr.ifr_hwaddr.sa_data, sizeof tun->dev_addr)); |
664 | DBG(KERN_DEBUG "%s: set hardware address: %x:%x:%x:%x:%x:%x\n", | ||
665 | tun->dev->name, | ||
666 | tun->dev_addr[0], tun->dev_addr[1], tun->dev_addr[2], | ||
667 | tun->dev_addr[3], tun->dev_addr[4], tun->dev_addr[5]); | ||
668 | } | ||
669 | |||
670 | return ret; | ||
671 | } | ||
656 | 672 | ||
657 | case SIOCADDMULTI: | 673 | case SIOCADDMULTI: |
658 | /** Add the specified group to the character device's multicast filter | 674 | /** Add the specified group to the character device's multicast filter |
diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c index 0d91d094edd9..f2dd7763cd0b 100644 --- a/drivers/net/typhoon.c +++ b/drivers/net/typhoon.c | |||
@@ -1708,7 +1708,6 @@ typhoon_rx(struct typhoon *tp, struct basic_ring *rxRing, volatile u32 * ready, | |||
1708 | 1708 | ||
1709 | if(pkt_len < rx_copybreak && | 1709 | if(pkt_len < rx_copybreak && |
1710 | (new_skb = dev_alloc_skb(pkt_len + 2)) != NULL) { | 1710 | (new_skb = dev_alloc_skb(pkt_len + 2)) != NULL) { |
1711 | new_skb->dev = tp->dev; | ||
1712 | skb_reserve(new_skb, 2); | 1711 | skb_reserve(new_skb, 2); |
1713 | pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, | 1712 | pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, |
1714 | PKT_BUF_SZ, | 1713 | PKT_BUF_SZ, |
diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c index f3a972e74e9a..adea290a9d5e 100644 --- a/drivers/net/via-rhine.c +++ b/drivers/net/via-rhine.c | |||
@@ -1486,7 +1486,6 @@ static int rhine_rx(struct net_device *dev, int limit) | |||
1486 | copying to a minimally-sized skbuff. */ | 1486 | copying to a minimally-sized skbuff. */ |
1487 | if (pkt_len < rx_copybreak && | 1487 | if (pkt_len < rx_copybreak && |
1488 | (skb = dev_alloc_skb(pkt_len + 2)) != NULL) { | 1488 | (skb = dev_alloc_skb(pkt_len + 2)) != NULL) { |
1489 | skb->dev = dev; | ||
1490 | skb_reserve(skb, 2); /* 16 byte align the IP header */ | 1489 | skb_reserve(skb, 2); /* 16 byte align the IP header */ |
1491 | pci_dma_sync_single_for_cpu(rp->pdev, | 1490 | pci_dma_sync_single_for_cpu(rp->pdev, |
1492 | rp->rx_skbuff_dma[entry], | 1491 | rp->rx_skbuff_dma[entry], |
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c index 8e5d82051bd4..25b75b615188 100644 --- a/drivers/net/via-velocity.c +++ b/drivers/net/via-velocity.c | |||
@@ -1339,7 +1339,8 @@ static inline int velocity_rx_copy(struct sk_buff **rx_skb, int pkt_size, | |||
1339 | if (vptr->flags & VELOCITY_FLAGS_IP_ALIGN) | 1339 | if (vptr->flags & VELOCITY_FLAGS_IP_ALIGN) |
1340 | skb_reserve(new_skb, 2); | 1340 | skb_reserve(new_skb, 2); |
1341 | 1341 | ||
1342 | memcpy(new_skb->data, rx_skb[0]->data, pkt_size); | 1342 | skb_copy_from_linear_data(rx_skb[0], new_skb->data, |
1343 | pkt_size); | ||
1343 | *rx_skb = new_skb; | 1344 | *rx_skb = new_skb; |
1344 | ret = 0; | 1345 | ret = 0; |
1345 | } | 1346 | } |
@@ -1398,7 +1399,6 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx) | |||
1398 | vptr->stats.multicast++; | 1399 | vptr->stats.multicast++; |
1399 | 1400 | ||
1400 | skb = rd_info->skb; | 1401 | skb = rd_info->skb; |
1401 | skb->dev = vptr->dev; | ||
1402 | 1402 | ||
1403 | pci_dma_sync_single_for_cpu(vptr->pdev, rd_info->skb_dma, | 1403 | pci_dma_sync_single_for_cpu(vptr->pdev, rd_info->skb_dma, |
1404 | vptr->rx_buf_sz, PCI_DMA_FROMDEVICE); | 1404 | vptr->rx_buf_sz, PCI_DMA_FROMDEVICE); |
@@ -1428,7 +1428,7 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx) | |||
1428 | PCI_DMA_FROMDEVICE); | 1428 | PCI_DMA_FROMDEVICE); |
1429 | 1429 | ||
1430 | skb_put(skb, pkt_len - 4); | 1430 | skb_put(skb, pkt_len - 4); |
1431 | skb->protocol = eth_type_trans(skb, skb->dev); | 1431 | skb->protocol = eth_type_trans(skb, vptr->dev); |
1432 | 1432 | ||
1433 | stats->rx_bytes += pkt_len; | 1433 | stats->rx_bytes += pkt_len; |
1434 | netif_rx(skb); | 1434 | netif_rx(skb); |
@@ -1928,7 +1928,7 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1928 | if (pktlen < ETH_ZLEN) { | 1928 | if (pktlen < ETH_ZLEN) { |
1929 | /* Cannot occur until ZC support */ | 1929 | /* Cannot occur until ZC support */ |
1930 | pktlen = ETH_ZLEN; | 1930 | pktlen = ETH_ZLEN; |
1931 | memcpy(tdinfo->buf, skb->data, skb->len); | 1931 | skb_copy_from_linear_data(skb, tdinfo->buf, skb->len); |
1932 | memset(tdinfo->buf + skb->len, 0, ETH_ZLEN - skb->len); | 1932 | memset(tdinfo->buf + skb->len, 0, ETH_ZLEN - skb->len); |
1933 | tdinfo->skb = skb; | 1933 | tdinfo->skb = skb; |
1934 | tdinfo->skb_dma[0] = tdinfo->buf_dma; | 1934 | tdinfo->skb_dma[0] = tdinfo->buf_dma; |
@@ -1944,7 +1944,7 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1944 | int nfrags = skb_shinfo(skb)->nr_frags; | 1944 | int nfrags = skb_shinfo(skb)->nr_frags; |
1945 | tdinfo->skb = skb; | 1945 | tdinfo->skb = skb; |
1946 | if (nfrags > 6) { | 1946 | if (nfrags > 6) { |
1947 | memcpy(tdinfo->buf, skb->data, skb->len); | 1947 | skb_copy_from_linear_data(skb, tdinfo->buf, skb->len); |
1948 | tdinfo->skb_dma[0] = tdinfo->buf_dma; | 1948 | tdinfo->skb_dma[0] = tdinfo->buf_dma; |
1949 | td_ptr->tdesc0.pktsize = | 1949 | td_ptr->tdesc0.pktsize = |
1950 | td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]); | 1950 | td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]); |
@@ -2007,7 +2007,7 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2007 | */ | 2007 | */ |
2008 | if ((vptr->flags & VELOCITY_FLAGS_TX_CSUM) | 2008 | if ((vptr->flags & VELOCITY_FLAGS_TX_CSUM) |
2009 | && (skb->ip_summed == CHECKSUM_PARTIAL)) { | 2009 | && (skb->ip_summed == CHECKSUM_PARTIAL)) { |
2010 | struct iphdr *ip = skb->nh.iph; | 2010 | const struct iphdr *ip = ip_hdr(skb); |
2011 | if (ip->protocol == IPPROTO_TCP) | 2011 | if (ip->protocol == IPPROTO_TCP) |
2012 | td_ptr->tdesc1.TCR |= TCR0_TCPCK; | 2012 | td_ptr->tdesc1.TCR |= TCR0_TCPCK; |
2013 | else if (ip->protocol == IPPROTO_UDP) | 2013 | else if (ip->protocol == IPPROTO_UDP) |
diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c index 5b82e4fd0d73..23464735fa88 100644 --- a/drivers/net/wan/cosa.c +++ b/drivers/net/wan/cosa.c | |||
@@ -773,7 +773,7 @@ static int sppp_rx_done(struct channel_data *chan) | |||
773 | } | 773 | } |
774 | chan->rx_skb->protocol = htons(ETH_P_WAN_PPP); | 774 | chan->rx_skb->protocol = htons(ETH_P_WAN_PPP); |
775 | chan->rx_skb->dev = chan->pppdev.dev; | 775 | chan->rx_skb->dev = chan->pppdev.dev; |
776 | chan->rx_skb->mac.raw = chan->rx_skb->data; | 776 | skb_reset_mac_header(chan->rx_skb); |
777 | chan->stats.rx_packets++; | 777 | chan->stats.rx_packets++; |
778 | chan->stats.rx_bytes += chan->cosa->rxsize; | 778 | chan->stats.rx_bytes += chan->cosa->rxsize; |
779 | netif_rx(chan->rx_skb); | 779 | netif_rx(chan->rx_skb); |
diff --git a/drivers/net/wan/cycx_x25.c b/drivers/net/wan/cycx_x25.c index a631d1c2fa14..016b3ff3ea5e 100644 --- a/drivers/net/wan/cycx_x25.c +++ b/drivers/net/wan/cycx_x25.c | |||
@@ -834,7 +834,7 @@ static void cycx_x25_irq_rx(struct cycx_device *card, struct cycx_x25_cmd *cmd) | |||
834 | ++chan->ifstats.rx_packets; | 834 | ++chan->ifstats.rx_packets; |
835 | chan->ifstats.rx_bytes += pktlen; | 835 | chan->ifstats.rx_bytes += pktlen; |
836 | 836 | ||
837 | skb->mac.raw = skb->data; | 837 | skb_reset_mac_header(skb); |
838 | netif_rx(skb); | 838 | netif_rx(skb); |
839 | dev->last_rx = jiffies; /* timestamp */ | 839 | dev->last_rx = jiffies; /* timestamp */ |
840 | } | 840 | } |
diff --git a/drivers/net/wan/dlci.c b/drivers/net/wan/dlci.c index 736987559432..66be20c292b6 100644 --- a/drivers/net/wan/dlci.c +++ b/drivers/net/wan/dlci.c | |||
@@ -176,7 +176,7 @@ static void dlci_receive(struct sk_buff *skb, struct net_device *dev) | |||
176 | if (process) | 176 | if (process) |
177 | { | 177 | { |
178 | /* we've set up the protocol, so discard the header */ | 178 | /* we've set up the protocol, so discard the header */ |
179 | skb->mac.raw = skb->data; | 179 | skb_reset_mac_header(skb); |
180 | skb_pull(skb, header); | 180 | skb_pull(skb, header); |
181 | dlp->stats.rx_bytes += skb->len; | 181 | dlp->stats.rx_bytes += skb->len; |
182 | netif_rx(skb); | 182 | netif_rx(skb); |
diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c index 25021a7992a9..dca024471455 100644 --- a/drivers/net/wan/dscc4.c +++ b/drivers/net/wan/dscc4.c | |||
@@ -1904,7 +1904,8 @@ static struct sk_buff *dscc4_init_dummy_skb(struct dscc4_dev_priv *dpriv) | |||
1904 | struct TxFD *tx_fd = dpriv->tx_fd + last; | 1904 | struct TxFD *tx_fd = dpriv->tx_fd + last; |
1905 | 1905 | ||
1906 | skb->len = DUMMY_SKB_SIZE; | 1906 | skb->len = DUMMY_SKB_SIZE; |
1907 | memcpy(skb->data, version, strlen(version)%DUMMY_SKB_SIZE); | 1907 | skb_copy_to_linear_data(skb, version, |
1908 | strlen(version) % DUMMY_SKB_SIZE); | ||
1908 | tx_fd->state = FrameEnd | TO_STATE_TX(DUMMY_SKB_SIZE); | 1909 | tx_fd->state = FrameEnd | TO_STATE_TX(DUMMY_SKB_SIZE); |
1909 | tx_fd->data = pci_map_single(dpriv->pci_priv->pdev, skb->data, | 1910 | tx_fd->data = pci_map_single(dpriv->pci_priv->pdev, skb->data, |
1910 | DUMMY_SKB_SIZE, PCI_DMA_TODEVICE); | 1911 | DUMMY_SKB_SIZE, PCI_DMA_TODEVICE); |
diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c index c45d6a83339d..58a53b6d9b42 100644 --- a/drivers/net/wan/farsync.c +++ b/drivers/net/wan/farsync.c | |||
@@ -864,7 +864,7 @@ fst_tx_dma_complete(struct fst_card_info *card, struct fst_port_info *port, | |||
864 | static __be16 farsync_type_trans(struct sk_buff *skb, struct net_device *dev) | 864 | static __be16 farsync_type_trans(struct sk_buff *skb, struct net_device *dev) |
865 | { | 865 | { |
866 | skb->dev = dev; | 866 | skb->dev = dev; |
867 | skb->mac.raw = skb->data; | 867 | skb_reset_mac_header(skb); |
868 | skb->pkt_type = PACKET_HOST; | 868 | skb->pkt_type = PACKET_HOST; |
869 | return htons(ETH_P_CUST); | 869 | return htons(ETH_P_CUST); |
870 | } | 870 | } |
diff --git a/drivers/net/wan/hdlc_cisco.c b/drivers/net/wan/hdlc_cisco.c index c9664fd8a917..00e0aaadabcc 100644 --- a/drivers/net/wan/hdlc_cisco.c +++ b/drivers/net/wan/hdlc_cisco.c | |||
@@ -124,7 +124,7 @@ static void cisco_keepalive_send(struct net_device *dev, u32 type, | |||
124 | skb_put(skb, sizeof(struct cisco_packet)); | 124 | skb_put(skb, sizeof(struct cisco_packet)); |
125 | skb->priority = TC_PRIO_CONTROL; | 125 | skb->priority = TC_PRIO_CONTROL; |
126 | skb->dev = dev; | 126 | skb->dev = dev; |
127 | skb->nh.raw = skb->data; | 127 | skb_reset_network_header(skb); |
128 | 128 | ||
129 | dev_queue_xmit(skb); | 129 | dev_queue_xmit(skb); |
130 | } | 130 | } |
diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c index c6c3c757d6f1..aeb2789adf26 100644 --- a/drivers/net/wan/hdlc_fr.c +++ b/drivers/net/wan/hdlc_fr.c | |||
@@ -533,7 +533,7 @@ static void fr_lmi_send(struct net_device *dev, int fullrep) | |||
533 | skb->protocol = __constant_htons(NLPID_CCITT_ANSI_LMI); | 533 | skb->protocol = __constant_htons(NLPID_CCITT_ANSI_LMI); |
534 | fr_hard_header(&skb, LMI_CCITT_ANSI_DLCI); | 534 | fr_hard_header(&skb, LMI_CCITT_ANSI_DLCI); |
535 | } | 535 | } |
536 | data = skb->tail; | 536 | data = skb_tail_pointer(skb); |
537 | data[i++] = LMI_CALLREF; | 537 | data[i++] = LMI_CALLREF; |
538 | data[i++] = dce ? LMI_STATUS : LMI_STATUS_ENQUIRY; | 538 | data[i++] = dce ? LMI_STATUS : LMI_STATUS_ENQUIRY; |
539 | if (lmi == LMI_ANSI) | 539 | if (lmi == LMI_ANSI) |
@@ -590,7 +590,7 @@ static void fr_lmi_send(struct net_device *dev, int fullrep) | |||
590 | skb_put(skb, i); | 590 | skb_put(skb, i); |
591 | skb->priority = TC_PRIO_CONTROL; | 591 | skb->priority = TC_PRIO_CONTROL; |
592 | skb->dev = dev; | 592 | skb->dev = dev; |
593 | skb->nh.raw = skb->data; | 593 | skb_reset_network_header(skb); |
594 | 594 | ||
595 | dev_queue_xmit(skb); | 595 | dev_queue_xmit(skb); |
596 | } | 596 | } |
@@ -1011,7 +1011,6 @@ static int fr_rx(struct sk_buff *skb) | |||
1011 | stats->rx_bytes += skb->len; | 1011 | stats->rx_bytes += skb->len; |
1012 | if (pvc->state.becn) | 1012 | if (pvc->state.becn) |
1013 | stats->rx_compressed++; | 1013 | stats->rx_compressed++; |
1014 | skb->dev = dev; | ||
1015 | netif_rx(skb); | 1014 | netif_rx(skb); |
1016 | return NET_RX_SUCCESS; | 1015 | return NET_RX_SUCCESS; |
1017 | } else { | 1016 | } else { |
diff --git a/drivers/net/wan/hostess_sv11.c b/drivers/net/wan/hostess_sv11.c index a02c5fb40567..9ba3e4ee6ec7 100644 --- a/drivers/net/wan/hostess_sv11.c +++ b/drivers/net/wan/hostess_sv11.c | |||
@@ -59,7 +59,7 @@ static void hostess_input(struct z8530_channel *c, struct sk_buff *skb) | |||
59 | /* Drop the CRC - it's not a good idea to try and negotiate it ;) */ | 59 | /* Drop the CRC - it's not a good idea to try and negotiate it ;) */ |
60 | skb_trim(skb, skb->len-2); | 60 | skb_trim(skb, skb->len-2); |
61 | skb->protocol=__constant_htons(ETH_P_WAN_PPP); | 61 | skb->protocol=__constant_htons(ETH_P_WAN_PPP); |
62 | skb->mac.raw=skb->data; | 62 | skb_reset_mac_header(skb); |
63 | skb->dev=c->netdevice; | 63 | skb->dev=c->netdevice; |
64 | /* | 64 | /* |
65 | * Send it to the PPP layer. We don't have time to process | 65 | * Send it to the PPP layer. We don't have time to process |
diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c index 2b54f1bc3a0d..ae132c1c5459 100644 --- a/drivers/net/wan/lmc/lmc_main.c +++ b/drivers/net/wan/lmc/lmc_main.c | |||
@@ -1636,7 +1636,7 @@ static int lmc_rx (struct net_device *dev) /*fold00*/ | |||
1636 | if (nsb) { | 1636 | if (nsb) { |
1637 | sc->lmc_rxq[i] = nsb; | 1637 | sc->lmc_rxq[i] = nsb; |
1638 | nsb->dev = dev; | 1638 | nsb->dev = dev; |
1639 | sc->lmc_rxring[i].buffer1 = virt_to_bus (nsb->tail); | 1639 | sc->lmc_rxring[i].buffer1 = virt_to_bus(skb_tail_pointer(nsb)); |
1640 | } | 1640 | } |
1641 | sc->failed_recv_alloc = 1; | 1641 | sc->failed_recv_alloc = 1; |
1642 | goto skip_packet; | 1642 | goto skip_packet; |
@@ -1667,8 +1667,8 @@ static int lmc_rx (struct net_device *dev) /*fold00*/ | |||
1667 | skb_put (skb, len); | 1667 | skb_put (skb, len); |
1668 | skb->protocol = lmc_proto_type(sc, skb); | 1668 | skb->protocol = lmc_proto_type(sc, skb); |
1669 | skb->protocol = htons(ETH_P_WAN_PPP); | 1669 | skb->protocol = htons(ETH_P_WAN_PPP); |
1670 | skb->mac.raw = skb->data; | 1670 | skb_reset_mac_header(skb); |
1671 | // skb->nh.raw = skb->data; | 1671 | /* skb_reset_network_header(skb); */ |
1672 | skb->dev = dev; | 1672 | skb->dev = dev; |
1673 | lmc_proto_netif(sc, skb); | 1673 | lmc_proto_netif(sc, skb); |
1674 | 1674 | ||
@@ -1679,7 +1679,7 @@ static int lmc_rx (struct net_device *dev) /*fold00*/ | |||
1679 | if (nsb) { | 1679 | if (nsb) { |
1680 | sc->lmc_rxq[i] = nsb; | 1680 | sc->lmc_rxq[i] = nsb; |
1681 | nsb->dev = dev; | 1681 | nsb->dev = dev; |
1682 | sc->lmc_rxring[i].buffer1 = virt_to_bus (nsb->tail); | 1682 | sc->lmc_rxring[i].buffer1 = virt_to_bus(skb_tail_pointer(nsb)); |
1683 | /* Transferred to 21140 below */ | 1683 | /* Transferred to 21140 below */ |
1684 | } | 1684 | } |
1685 | else { | 1685 | else { |
@@ -1702,11 +1702,11 @@ static int lmc_rx (struct net_device *dev) /*fold00*/ | |||
1702 | if(!nsb) { | 1702 | if(!nsb) { |
1703 | goto give_it_anyways; | 1703 | goto give_it_anyways; |
1704 | } | 1704 | } |
1705 | memcpy(skb_put(nsb, len), skb->data, len); | 1705 | skb_copy_from_linear_data(skb, skb_put(nsb, len), len); |
1706 | 1706 | ||
1707 | nsb->protocol = lmc_proto_type(sc, skb); | 1707 | nsb->protocol = lmc_proto_type(sc, skb); |
1708 | nsb->mac.raw = nsb->data; | 1708 | skb_reset_mac_header(nsb); |
1709 | // nsb->nh.raw = nsb->data; | 1709 | /* skb_reset_network_header(nsb); */ |
1710 | nsb->dev = dev; | 1710 | nsb->dev = dev; |
1711 | lmc_proto_netif(sc, nsb); | 1711 | lmc_proto_netif(sc, nsb); |
1712 | } | 1712 | } |
@@ -1932,7 +1932,7 @@ static void lmc_softreset (lmc_softc_t * const sc) /*fold00*/ | |||
1932 | sc->lmc_rxring[i].status = 0x80000000; | 1932 | sc->lmc_rxring[i].status = 0x80000000; |
1933 | 1933 | ||
1934 | /* used to be PKT_BUF_SZ now uses skb since we lose some to head room */ | 1934 | /* used to be PKT_BUF_SZ now uses skb since we lose some to head room */ |
1935 | sc->lmc_rxring[i].length = skb->end - skb->data; | 1935 | sc->lmc_rxring[i].length = skb_tailroom(skb); |
1936 | 1936 | ||
1937 | /* use to be tail which is dumb since you're thinking why write | 1937 | /* use to be tail which is dumb since you're thinking why write |
1938 | * to the end of the packj,et but since there's nothing there tail == data | 1938 | * to the end of the packj,et but since there's nothing there tail == data |
diff --git a/drivers/net/wan/pc300_drv.c b/drivers/net/wan/pc300_drv.c index 62184dee377c..999bf71937ca 100644 --- a/drivers/net/wan/pc300_drv.c +++ b/drivers/net/wan/pc300_drv.c | |||
@@ -1755,17 +1755,17 @@ cpc_trace(struct net_device *dev, struct sk_buff *skb_main, char rx_tx) | |||
1755 | 1755 | ||
1756 | skb->dev = dev; | 1756 | skb->dev = dev; |
1757 | skb->protocol = htons(ETH_P_CUST); | 1757 | skb->protocol = htons(ETH_P_CUST); |
1758 | skb->mac.raw = skb->data; | 1758 | skb_reset_mac_header(skb); |
1759 | skb->pkt_type = PACKET_HOST; | 1759 | skb->pkt_type = PACKET_HOST; |
1760 | skb->len = 10 + skb_main->len; | 1760 | skb->len = 10 + skb_main->len; |
1761 | 1761 | ||
1762 | memcpy(skb->data, dev->name, 5); | 1762 | skb_copy_to_linear_data(skb, dev->name, 5); |
1763 | skb->data[5] = '['; | 1763 | skb->data[5] = '['; |
1764 | skb->data[6] = rx_tx; | 1764 | skb->data[6] = rx_tx; |
1765 | skb->data[7] = ']'; | 1765 | skb->data[7] = ']'; |
1766 | skb->data[8] = ':'; | 1766 | skb->data[8] = ':'; |
1767 | skb->data[9] = ' '; | 1767 | skb->data[9] = ' '; |
1768 | memcpy(&skb->data[10], skb_main->data, skb_main->len); | 1768 | skb_copy_from_linear_data(skb_main, &skb->data[10], skb_main->len); |
1769 | 1769 | ||
1770 | netif_rx(skb); | 1770 | netif_rx(skb); |
1771 | } | 1771 | } |
diff --git a/drivers/net/wan/pc300_tty.c b/drivers/net/wan/pc300_tty.c index 5873c346e7e9..07dbdfbfc15d 100644 --- a/drivers/net/wan/pc300_tty.c +++ b/drivers/net/wan/pc300_tty.c | |||
@@ -1003,17 +1003,17 @@ static void cpc_tty_trace(pc300dev_t *dev, char* buf, int len, char rxtx) | |||
1003 | skb_put (skb, 10 + len); | 1003 | skb_put (skb, 10 + len); |
1004 | skb->dev = dev->dev; | 1004 | skb->dev = dev->dev; |
1005 | skb->protocol = htons(ETH_P_CUST); | 1005 | skb->protocol = htons(ETH_P_CUST); |
1006 | skb->mac.raw = skb->data; | 1006 | skb_reset_mac_header(skb); |
1007 | skb->pkt_type = PACKET_HOST; | 1007 | skb->pkt_type = PACKET_HOST; |
1008 | skb->len = 10 + len; | 1008 | skb->len = 10 + len; |
1009 | 1009 | ||
1010 | memcpy(skb->data,dev->dev->name,5); | 1010 | skb_copy_to_linear_data(skb, dev->dev->name, 5); |
1011 | skb->data[5] = '['; | 1011 | skb->data[5] = '['; |
1012 | skb->data[6] = rxtx; | 1012 | skb->data[6] = rxtx; |
1013 | skb->data[7] = ']'; | 1013 | skb->data[7] = ']'; |
1014 | skb->data[8] = ':'; | 1014 | skb->data[8] = ':'; |
1015 | skb->data[9] = ' '; | 1015 | skb->data[9] = ' '; |
1016 | memcpy(&skb->data[10], buf, len); | 1016 | skb_copy_to_linear_data_offset(skb, 10, buf, len); |
1017 | netif_rx(skb); | 1017 | netif_rx(skb); |
1018 | } | 1018 | } |
1019 | 1019 | ||
diff --git a/drivers/net/wan/sbni.c b/drivers/net/wan/sbni.c index fc5c0c611ffd..35eded7ffb2d 100644 --- a/drivers/net/wan/sbni.c +++ b/drivers/net/wan/sbni.c | |||
@@ -999,11 +999,6 @@ get_rx_buf( struct net_device *dev ) | |||
999 | if( !skb ) | 999 | if( !skb ) |
1000 | return NULL; | 1000 | return NULL; |
1001 | 1001 | ||
1002 | #ifdef CONFIG_SBNI_MULTILINE | ||
1003 | skb->dev = ((struct net_local *) dev->priv)->master; | ||
1004 | #else | ||
1005 | skb->dev = dev; | ||
1006 | #endif | ||
1007 | skb_reserve( skb, 2 ); /* Align IP on longword boundaries */ | 1002 | skb_reserve( skb, 2 ); /* Align IP on longword boundaries */ |
1008 | return skb; | 1003 | return skb; |
1009 | } | 1004 | } |
diff --git a/drivers/net/wan/sealevel.c b/drivers/net/wan/sealevel.c index 70fb1b98b1dd..131358108c5a 100644 --- a/drivers/net/wan/sealevel.c +++ b/drivers/net/wan/sealevel.c | |||
@@ -61,7 +61,7 @@ static void sealevel_input(struct z8530_channel *c, struct sk_buff *skb) | |||
61 | /* Drop the CRC - it's not a good idea to try and negotiate it ;) */ | 61 | /* Drop the CRC - it's not a good idea to try and negotiate it ;) */ |
62 | skb_trim(skb, skb->len-2); | 62 | skb_trim(skb, skb->len-2); |
63 | skb->protocol=htons(ETH_P_WAN_PPP); | 63 | skb->protocol=htons(ETH_P_WAN_PPP); |
64 | skb->mac.raw=skb->data; | 64 | skb_reset_mac_header(skb); |
65 | skb->dev=c->netdevice; | 65 | skb->dev=c->netdevice; |
66 | /* | 66 | /* |
67 | * Send it to the PPP layer. We don't have time to process | 67 | * Send it to the PPP layer. We don't have time to process |
diff --git a/drivers/net/wan/syncppp.c b/drivers/net/wan/syncppp.c index 218f7b574ab3..67fc67cfd452 100644 --- a/drivers/net/wan/syncppp.c +++ b/drivers/net/wan/syncppp.c | |||
@@ -227,7 +227,7 @@ static void sppp_input (struct net_device *dev, struct sk_buff *skb) | |||
227 | unsigned long flags; | 227 | unsigned long flags; |
228 | 228 | ||
229 | skb->dev=dev; | 229 | skb->dev=dev; |
230 | skb->mac.raw=skb->data; | 230 | skb_reset_mac_header(skb); |
231 | 231 | ||
232 | if (dev->flags & IFF_RUNNING) | 232 | if (dev->flags & IFF_RUNNING) |
233 | { | 233 | { |
diff --git a/drivers/net/wan/z85230.c b/drivers/net/wan/z85230.c index 8b4540bfc1b0..98ef400908b8 100644 --- a/drivers/net/wan/z85230.c +++ b/drivers/net/wan/z85230.c | |||
@@ -1656,7 +1656,7 @@ static void z8530_rx_done(struct z8530_channel *c) | |||
1656 | else | 1656 | else |
1657 | { | 1657 | { |
1658 | skb_put(skb, ct); | 1658 | skb_put(skb, ct); |
1659 | memcpy(skb->data, rxb, ct); | 1659 | skb_copy_to_linear_data(skb, rxb, ct); |
1660 | c->stats.rx_packets++; | 1660 | c->stats.rx_packets++; |
1661 | c->stats.rx_bytes+=ct; | 1661 | c->stats.rx_bytes+=ct; |
1662 | } | 1662 | } |
@@ -1782,7 +1782,7 @@ int z8530_queue_xmit(struct z8530_channel *c, struct sk_buff *skb) | |||
1782 | */ | 1782 | */ |
1783 | c->tx_next_ptr=c->tx_dma_buf[c->tx_dma_used]; | 1783 | c->tx_next_ptr=c->tx_dma_buf[c->tx_dma_used]; |
1784 | c->tx_dma_used^=1; /* Flip temp buffer */ | 1784 | c->tx_dma_used^=1; /* Flip temp buffer */ |
1785 | memcpy(c->tx_next_ptr, skb->data, skb->len); | 1785 | skb_copy_from_linear_data(skb, c->tx_next_ptr, skb->len); |
1786 | } | 1786 | } |
1787 | else | 1787 | else |
1788 | c->tx_next_ptr=skb->data; | 1788 | c->tx_next_ptr=skb->data; |
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig index ece3d9c2dc61..4426841b2be6 100644 --- a/drivers/net/wireless/Kconfig +++ b/drivers/net/wireless/Kconfig | |||
@@ -2,47 +2,21 @@ | |||
2 | # Wireless LAN device configuration | 2 | # Wireless LAN device configuration |
3 | # | 3 | # |
4 | 4 | ||
5 | menu "Wireless LAN (non-hamradio)" | 5 | menu "Wireless LAN" |
6 | depends on NETDEVICES | ||
7 | |||
8 | config NET_RADIO | ||
9 | bool "Wireless LAN drivers (non-hamradio) & Wireless Extensions" | ||
10 | select WIRELESS_EXT | ||
11 | ---help--- | ||
12 | Support for wireless LANs and everything having to do with radio, | ||
13 | but not with amateur radio or FM broadcasting. | ||
14 | |||
15 | Saying Y here also enables the Wireless Extensions (creates | ||
16 | /proc/net/wireless and enables iwconfig access). The Wireless | ||
17 | Extension is a generic API allowing a driver to expose to the user | ||
18 | space configuration and statistics specific to common Wireless LANs. | ||
19 | The beauty of it is that a single set of tool can support all the | ||
20 | variations of Wireless LANs, regardless of their type (as long as | ||
21 | the driver supports Wireless Extension). Another advantage is that | ||
22 | these parameters may be changed on the fly without restarting the | ||
23 | driver (or Linux). If you wish to use Wireless Extensions with | ||
24 | wireless PCMCIA (PC-) cards, you need to say Y here; you can fetch | ||
25 | the tools from | ||
26 | <http://www.hpl.hp.com/personal/Jean_Tourrilhes/Linux/Tools.html>. | ||
27 | 6 | ||
28 | config NET_WIRELESS_RTNETLINK | 7 | config WLAN_PRE80211 |
29 | bool "Wireless Extension API over RtNetlink" | 8 | bool "Wireless LAN (pre-802.11)" |
30 | depends on NET_RADIO | 9 | depends on NETDEVICES |
31 | ---help--- | 10 | ---help--- |
32 | Support the Wireless Extension API over the RtNetlink socket | 11 | Say Y if you have any pre-802.11 wireless LAN hardware. |
33 | in addition to the traditional ioctl interface (selected above). | ||
34 | 12 | ||
35 | For now, few tools use this facility, but it might grow in the | 13 | This option does not affect the kernel build, it only |
36 | future. The only downside is that it adds 4.5 kB to your kernel. | 14 | lets you choose drivers. |
37 | |||
38 | # Note : the cards are obsolete (can't buy them anymore), but the drivers | ||
39 | # are not, as people are still using them... | ||
40 | comment "Obsolete Wireless cards support (pre-802.11)" | ||
41 | depends on NET_RADIO && (INET || ISA || PCMCIA) | ||
42 | 15 | ||
43 | config STRIP | 16 | config STRIP |
44 | tristate "STRIP (Metricom starmode radio IP)" | 17 | tristate "STRIP (Metricom starmode radio IP)" |
45 | depends on NET_RADIO && INET | 18 | depends on INET && WLAN_PRE80211 |
19 | select WIRELESS_EXT | ||
46 | ---help--- | 20 | ---help--- |
47 | Say Y if you have a Metricom radio and intend to use Starmode Radio | 21 | Say Y if you have a Metricom radio and intend to use Starmode Radio |
48 | IP. STRIP is a radio protocol developed for the MosquitoNet project | 22 | IP. STRIP is a radio protocol developed for the MosquitoNet project |
@@ -65,7 +39,8 @@ config STRIP | |||
65 | 39 | ||
66 | config ARLAN | 40 | config ARLAN |
67 | tristate "Aironet Arlan 655 & IC2200 DS support" | 41 | tristate "Aironet Arlan 655 & IC2200 DS support" |
68 | depends on NET_RADIO && ISA && !64BIT | 42 | depends on ISA && !64BIT && WLAN_PRE80211 |
43 | select WIRELESS_EXT | ||
69 | ---help--- | 44 | ---help--- |
70 | Aironet makes Arlan, a class of wireless LAN adapters. These use the | 45 | Aironet makes Arlan, a class of wireless LAN adapters. These use the |
71 | www.Telxon.com chip, which is also used on several similar cards. | 46 | www.Telxon.com chip, which is also used on several similar cards. |
@@ -80,7 +55,8 @@ config ARLAN | |||
80 | 55 | ||
81 | config WAVELAN | 56 | config WAVELAN |
82 | tristate "AT&T/Lucent old WaveLAN & DEC RoamAbout DS ISA support" | 57 | tristate "AT&T/Lucent old WaveLAN & DEC RoamAbout DS ISA support" |
83 | depends on NET_RADIO && ISA | 58 | depends on ISA && WLAN_PRE80211 |
59 | select WIRELESS_EXT | ||
84 | ---help--- | 60 | ---help--- |
85 | The Lucent WaveLAN (formerly NCR and AT&T; or DEC RoamAbout DS) is | 61 | The Lucent WaveLAN (formerly NCR and AT&T; or DEC RoamAbout DS) is |
86 | a Radio LAN (wireless Ethernet-like Local Area Network) using the | 62 | a Radio LAN (wireless Ethernet-like Local Area Network) using the |
@@ -107,7 +83,8 @@ config WAVELAN | |||
107 | 83 | ||
108 | config PCMCIA_WAVELAN | 84 | config PCMCIA_WAVELAN |
109 | tristate "AT&T/Lucent old WaveLAN Pcmcia wireless support" | 85 | tristate "AT&T/Lucent old WaveLAN Pcmcia wireless support" |
110 | depends on NET_RADIO && PCMCIA | 86 | depends on PCMCIA && WLAN_PRE80211 |
87 | select WIRELESS_EXT | ||
111 | help | 88 | help |
112 | Say Y here if you intend to attach an AT&T/Lucent Wavelan PCMCIA | 89 | Say Y here if you intend to attach an AT&T/Lucent Wavelan PCMCIA |
113 | (PC-card) wireless Ethernet networking card to your computer. This | 90 | (PC-card) wireless Ethernet networking card to your computer. This |
@@ -118,7 +95,8 @@ config PCMCIA_WAVELAN | |||
118 | 95 | ||
119 | config PCMCIA_NETWAVE | 96 | config PCMCIA_NETWAVE |
120 | tristate "Xircom Netwave AirSurfer Pcmcia wireless support" | 97 | tristate "Xircom Netwave AirSurfer Pcmcia wireless support" |
121 | depends on NET_RADIO && PCMCIA | 98 | depends on PCMCIA && WLAN_PRE80211 |
99 | select WIRELESS_EXT | ||
122 | help | 100 | help |
123 | Say Y here if you intend to attach this type of PCMCIA (PC-card) | 101 | Say Y here if you intend to attach this type of PCMCIA (PC-card) |
124 | wireless Ethernet networking card to your computer. | 102 | wireless Ethernet networking card to your computer. |
@@ -126,12 +104,20 @@ config PCMCIA_NETWAVE | |||
126 | To compile this driver as a module, choose M here: the module will be | 104 | To compile this driver as a module, choose M here: the module will be |
127 | called netwave_cs. If unsure, say N. | 105 | called netwave_cs. If unsure, say N. |
128 | 106 | ||
129 | comment "Wireless 802.11 Frequency Hopping cards support" | 107 | |
130 | depends on NET_RADIO && PCMCIA | 108 | config WLAN_80211 |
109 | bool "Wireless LAN (IEEE 802.11)" | ||
110 | depends on NETDEVICES | ||
111 | ---help--- | ||
112 | Say Y if you have any 802.11 wireless LAN hardware. | ||
113 | |||
114 | This option does not affect the kernel build, it only | ||
115 | lets you choose drivers. | ||
131 | 116 | ||
132 | config PCMCIA_RAYCS | 117 | config PCMCIA_RAYCS |
133 | tristate "Aviator/Raytheon 2.4MHz wireless support" | 118 | tristate "Aviator/Raytheon 2.4MHz wireless support" |
134 | depends on NET_RADIO && PCMCIA | 119 | depends on PCMCIA && WLAN_80211 |
120 | select WIRELESS_EXT | ||
135 | ---help--- | 121 | ---help--- |
136 | Say Y here if you intend to attach an Aviator/Raytheon PCMCIA | 122 | Say Y here if you intend to attach an Aviator/Raytheon PCMCIA |
137 | (PC-card) wireless Ethernet networking card to your computer. | 123 | (PC-card) wireless Ethernet networking card to your computer. |
@@ -141,12 +127,10 @@ config PCMCIA_RAYCS | |||
141 | To compile this driver as a module, choose M here: the module will be | 127 | To compile this driver as a module, choose M here: the module will be |
142 | called ray_cs. If unsure, say N. | 128 | called ray_cs. If unsure, say N. |
143 | 129 | ||
144 | comment "Wireless 802.11b ISA/PCI cards support" | ||
145 | depends on NET_RADIO && (ISA || PCI || PPC_PMAC || PCMCIA) | ||
146 | |||
147 | config IPW2100 | 130 | config IPW2100 |
148 | tristate "Intel PRO/Wireless 2100 Network Connection" | 131 | tristate "Intel PRO/Wireless 2100 Network Connection" |
149 | depends on NET_RADIO && PCI | 132 | depends on PCI && WLAN_80211 |
133 | select WIRELESS_EXT | ||
150 | select FW_LOADER | 134 | select FW_LOADER |
151 | select IEEE80211 | 135 | select IEEE80211 |
152 | ---help--- | 136 | ---help--- |
@@ -200,7 +184,8 @@ config IPW2100_DEBUG | |||
200 | 184 | ||
201 | config IPW2200 | 185 | config IPW2200 |
202 | tristate "Intel PRO/Wireless 2200BG and 2915ABG Network Connection" | 186 | tristate "Intel PRO/Wireless 2200BG and 2915ABG Network Connection" |
203 | depends on NET_RADIO && PCI | 187 | depends on PCI && WLAN_80211 |
188 | select WIRELESS_EXT | ||
204 | select FW_LOADER | 189 | select FW_LOADER |
205 | select IEEE80211 | 190 | select IEEE80211 |
206 | ---help--- | 191 | ---help--- |
@@ -282,7 +267,8 @@ config IPW2200_DEBUG | |||
282 | 267 | ||
283 | config AIRO | 268 | config AIRO |
284 | tristate "Cisco/Aironet 34X/35X/4500/4800 ISA and PCI cards" | 269 | tristate "Cisco/Aironet 34X/35X/4500/4800 ISA and PCI cards" |
285 | depends on NET_RADIO && ISA_DMA_API && (PCI || BROKEN) | 270 | depends on ISA_DMA_API && WLAN_80211 && (PCI || BROKEN) |
271 | select WIRELESS_EXT | ||
286 | select CRYPTO | 272 | select CRYPTO |
287 | ---help--- | 273 | ---help--- |
288 | This is the standard Linux driver to support Cisco/Aironet ISA and | 274 | This is the standard Linux driver to support Cisco/Aironet ISA and |
@@ -299,7 +285,8 @@ config AIRO | |||
299 | 285 | ||
300 | config HERMES | 286 | config HERMES |
301 | tristate "Hermes chipset 802.11b support (Orinoco/Prism2/Symbol)" | 287 | tristate "Hermes chipset 802.11b support (Orinoco/Prism2/Symbol)" |
302 | depends on NET_RADIO && (PPC_PMAC || PCI || PCMCIA) | 288 | depends on (PPC_PMAC || PCI || PCMCIA) && WLAN_80211 |
289 | select WIRELESS_EXT | ||
303 | ---help--- | 290 | ---help--- |
304 | A driver for 802.11b wireless cards based on the "Hermes" or | 291 | A driver for 802.11b wireless cards based on the "Hermes" or |
305 | Intersil HFA384x (Prism 2) MAC controller. This includes the vast | 292 | Intersil HFA384x (Prism 2) MAC controller. This includes the vast |
@@ -373,7 +360,8 @@ config PCI_HERMES | |||
373 | 360 | ||
374 | config ATMEL | 361 | config ATMEL |
375 | tristate "Atmel at76c50x chipset 802.11b support" | 362 | tristate "Atmel at76c50x chipset 802.11b support" |
376 | depends on NET_RADIO && (PCI || PCMCIA) | 363 | depends on (PCI || PCMCIA) && WLAN_80211 |
364 | select WIRELESS_EXT | ||
377 | select FW_LOADER | 365 | select FW_LOADER |
378 | select CRC32 | 366 | select CRC32 |
379 | ---help--- | 367 | ---help--- |
@@ -394,13 +382,9 @@ config PCI_ATMEL | |||
394 | Enable support for PCI and mini-PCI cards containing the | 382 | Enable support for PCI and mini-PCI cards containing the |
395 | Atmel at76c506 chip. | 383 | Atmel at76c506 chip. |
396 | 384 | ||
397 | # If Pcmcia is compiled in, offer Pcmcia cards... | ||
398 | comment "Wireless 802.11b Pcmcia/Cardbus cards support" | ||
399 | depends on NET_RADIO && PCMCIA | ||
400 | |||
401 | config PCMCIA_HERMES | 385 | config PCMCIA_HERMES |
402 | tristate "Hermes PCMCIA card support" | 386 | tristate "Hermes PCMCIA card support" |
403 | depends on NET_RADIO && PCMCIA && HERMES | 387 | depends on PCMCIA && HERMES |
404 | ---help--- | 388 | ---help--- |
405 | A driver for "Hermes" chipset based PCMCIA wireless adaptors, such | 389 | A driver for "Hermes" chipset based PCMCIA wireless adaptors, such |
406 | as the Lucent WavelanIEEE/Orinoco cards and their OEM (Cabletron/ | 390 | as the Lucent WavelanIEEE/Orinoco cards and their OEM (Cabletron/ |
@@ -420,7 +404,7 @@ config PCMCIA_HERMES | |||
420 | 404 | ||
421 | config PCMCIA_SPECTRUM | 405 | config PCMCIA_SPECTRUM |
422 | tristate "Symbol Spectrum24 Trilogy PCMCIA card support" | 406 | tristate "Symbol Spectrum24 Trilogy PCMCIA card support" |
423 | depends on NET_RADIO && PCMCIA && HERMES | 407 | depends on PCMCIA && HERMES |
424 | select FW_LOADER | 408 | select FW_LOADER |
425 | ---help--- | 409 | ---help--- |
426 | 410 | ||
@@ -434,7 +418,8 @@ config PCMCIA_SPECTRUM | |||
434 | 418 | ||
435 | config AIRO_CS | 419 | config AIRO_CS |
436 | tristate "Cisco/Aironet 34X/35X/4500/4800 PCMCIA cards" | 420 | tristate "Cisco/Aironet 34X/35X/4500/4800 PCMCIA cards" |
437 | depends on NET_RADIO && PCMCIA && (BROKEN || !M32R) | 421 | depends on PCMCIA && (BROKEN || !M32R) && WLAN_80211 |
422 | select WIRELESS_EXT | ||
438 | select CRYPTO | 423 | select CRYPTO |
439 | select CRYPTO_AES | 424 | select CRYPTO_AES |
440 | ---help--- | 425 | ---help--- |
@@ -458,7 +443,8 @@ config AIRO_CS | |||
458 | 443 | ||
459 | config PCMCIA_ATMEL | 444 | config PCMCIA_ATMEL |
460 | tristate "Atmel at76c502/at76c504 PCMCIA cards" | 445 | tristate "Atmel at76c502/at76c504 PCMCIA cards" |
461 | depends on NET_RADIO && ATMEL && PCMCIA | 446 | depends on ATMEL && PCMCIA |
447 | select WIRELESS_EXT | ||
462 | select FW_LOADER | 448 | select FW_LOADER |
463 | select CRC32 | 449 | select CRC32 |
464 | ---help--- | 450 | ---help--- |
@@ -467,17 +453,17 @@ config PCMCIA_ATMEL | |||
467 | 453 | ||
468 | config PCMCIA_WL3501 | 454 | config PCMCIA_WL3501 |
469 | tristate "Planet WL3501 PCMCIA cards" | 455 | tristate "Planet WL3501 PCMCIA cards" |
470 | depends on NET_RADIO && EXPERIMENTAL && PCMCIA | 456 | depends on EXPERIMENTAL && PCMCIA && WLAN_80211 |
457 | select WIRELESS_EXT | ||
471 | ---help--- | 458 | ---help--- |
472 | A driver for WL3501 PCMCIA 802.11 wireless cards made by Planet. | 459 | A driver for WL3501 PCMCIA 802.11 wireless cards made by Planet. |
473 | It has basic support for Linux wireless extensions and initial | 460 | It has basic support for Linux wireless extensions and initial |
474 | micro support for ethtool. | 461 | micro support for ethtool. |
475 | 462 | ||
476 | comment "Prism GT/Duette 802.11(a/b/g) PCI/Cardbus support" | ||
477 | depends on NET_RADIO && PCI | ||
478 | config PRISM54 | 463 | config PRISM54 |
479 | tristate 'Intersil Prism GT/Duette/Indigo PCI/Cardbus' | 464 | tristate 'Intersil Prism GT/Duette/Indigo PCI/Cardbus' |
480 | depends on PCI && NET_RADIO && EXPERIMENTAL | 465 | depends on PCI && EXPERIMENTAL && WLAN_80211 |
466 | select WIRELESS_EXT | ||
481 | select FW_LOADER | 467 | select FW_LOADER |
482 | ---help--- | 468 | ---help--- |
483 | Enable PCI and Cardbus support for the following chipset based cards: | 469 | Enable PCI and Cardbus support for the following chipset based cards: |
@@ -523,7 +509,8 @@ config PRISM54 | |||
523 | 509 | ||
524 | config USB_ZD1201 | 510 | config USB_ZD1201 |
525 | tristate "USB ZD1201 based Wireless device support" | 511 | tristate "USB ZD1201 based Wireless device support" |
526 | depends on USB && NET_RADIO | 512 | depends on USB && WLAN_80211 |
513 | select WIRELESS_EXT | ||
527 | select FW_LOADER | 514 | select FW_LOADER |
528 | ---help--- | 515 | ---help--- |
529 | Say Y if you want to use wireless LAN adapters based on the ZyDAS | 516 | Say Y if you want to use wireless LAN adapters based on the ZyDAS |
@@ -542,11 +529,4 @@ source "drivers/net/wireless/hostap/Kconfig" | |||
542 | source "drivers/net/wireless/bcm43xx/Kconfig" | 529 | source "drivers/net/wireless/bcm43xx/Kconfig" |
543 | source "drivers/net/wireless/zd1211rw/Kconfig" | 530 | source "drivers/net/wireless/zd1211rw/Kconfig" |
544 | 531 | ||
545 | # yes, this works even when no drivers are selected | ||
546 | config NET_WIRELESS | ||
547 | bool | ||
548 | depends on NET_RADIO && (ISA || PCI || PPC_PMAC || PCMCIA) | ||
549 | default y | ||
550 | |||
551 | endmenu | 532 | endmenu |
552 | |||
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c index 2ada76a93cb6..7fe0a61091a6 100644 --- a/drivers/net/wireless/airo.c +++ b/drivers/net/wireless/airo.c | |||
@@ -2444,7 +2444,7 @@ static int add_airo_dev( struct net_device *dev ); | |||
2444 | 2444 | ||
2445 | static int wll_header_parse(struct sk_buff *skb, unsigned char *haddr) | 2445 | static int wll_header_parse(struct sk_buff *skb, unsigned char *haddr) |
2446 | { | 2446 | { |
2447 | memcpy(haddr, skb->mac.raw + 10, ETH_ALEN); | 2447 | memcpy(haddr, skb_mac_header(skb) + 10, ETH_ALEN); |
2448 | return ETH_ALEN; | 2448 | return ETH_ALEN; |
2449 | } | 2449 | } |
2450 | 2450 | ||
@@ -3411,14 +3411,12 @@ badrx: | |||
3411 | OUT4500( apriv, EVACK, EV_RX); | 3411 | OUT4500( apriv, EVACK, EV_RX); |
3412 | 3412 | ||
3413 | if (test_bit(FLAG_802_11, &apriv->flags)) { | 3413 | if (test_bit(FLAG_802_11, &apriv->flags)) { |
3414 | skb->mac.raw = skb->data; | 3414 | skb_reset_mac_header(skb); |
3415 | skb->pkt_type = PACKET_OTHERHOST; | 3415 | skb->pkt_type = PACKET_OTHERHOST; |
3416 | skb->dev = apriv->wifidev; | 3416 | skb->dev = apriv->wifidev; |
3417 | skb->protocol = htons(ETH_P_802_2); | 3417 | skb->protocol = htons(ETH_P_802_2); |
3418 | } else { | 3418 | } else |
3419 | skb->dev = dev; | ||
3420 | skb->protocol = eth_type_trans(skb,dev); | 3419 | skb->protocol = eth_type_trans(skb,dev); |
3421 | } | ||
3422 | skb->dev->last_rx = jiffies; | 3420 | skb->dev->last_rx = jiffies; |
3423 | skb->ip_summed = CHECKSUM_NONE; | 3421 | skb->ip_summed = CHECKSUM_NONE; |
3424 | 3422 | ||
@@ -3641,7 +3639,6 @@ badmic: | |||
3641 | } | 3639 | } |
3642 | #endif /* WIRELESS_SPY */ | 3640 | #endif /* WIRELESS_SPY */ |
3643 | 3641 | ||
3644 | skb->dev = ai->dev; | ||
3645 | skb->ip_summed = CHECKSUM_NONE; | 3642 | skb->ip_summed = CHECKSUM_NONE; |
3646 | skb->protocol = eth_type_trans(skb, ai->dev); | 3643 | skb->protocol = eth_type_trans(skb, ai->dev); |
3647 | skb->dev->last_rx = jiffies; | 3644 | skb->dev->last_rx = jiffies; |
@@ -3749,7 +3746,7 @@ void mpi_receive_802_11 (struct airo_info *ai) | |||
3749 | wireless_spy_update(ai->dev, sa, &wstats); | 3746 | wireless_spy_update(ai->dev, sa, &wstats); |
3750 | } | 3747 | } |
3751 | #endif /* IW_WIRELESS_SPY */ | 3748 | #endif /* IW_WIRELESS_SPY */ |
3752 | skb->mac.raw = skb->data; | 3749 | skb_reset_mac_header(skb); |
3753 | skb->pkt_type = PACKET_OTHERHOST; | 3750 | skb->pkt_type = PACKET_OTHERHOST; |
3754 | skb->dev = ai->wifidev; | 3751 | skb->dev = ai->wifidev; |
3755 | skb->protocol = htons(ETH_P_802_2); | 3752 | skb->protocol = htons(ETH_P_802_2); |
diff --git a/drivers/net/wireless/arlan-main.c b/drivers/net/wireless/arlan-main.c index 4688e56b69c7..498e8486d125 100644 --- a/drivers/net/wireless/arlan-main.c +++ b/drivers/net/wireless/arlan-main.c | |||
@@ -1500,7 +1500,6 @@ static void arlan_rx_interrupt(struct net_device *dev, u_char rxStatus, u_short | |||
1500 | break; | 1500 | break; |
1501 | } | 1501 | } |
1502 | skb_reserve(skb, 2); | 1502 | skb_reserve(skb, 2); |
1503 | skb->dev = dev; | ||
1504 | skbtmp = skb_put(skb, pkt_len); | 1503 | skbtmp = skb_put(skb, pkt_len); |
1505 | 1504 | ||
1506 | memcpy_fromio(skbtmp + ARLAN_FAKE_HDR_LEN, ((char __iomem *) arlan) + rxOffset, pkt_len - ARLAN_FAKE_HDR_LEN); | 1505 | memcpy_fromio(skbtmp + ARLAN_FAKE_HDR_LEN, ((char __iomem *) arlan) + rxOffset, pkt_len - ARLAN_FAKE_HDR_LEN); |
diff --git a/drivers/net/wireless/atmel.c b/drivers/net/wireless/atmel.c index 23eba698aec5..51a7db53afa5 100644 --- a/drivers/net/wireless/atmel.c +++ b/drivers/net/wireless/atmel.c | |||
@@ -827,14 +827,14 @@ static int start_tx(struct sk_buff *skb, struct net_device *dev) | |||
827 | if (priv->wep_is_on) | 827 | if (priv->wep_is_on) |
828 | frame_ctl |= IEEE80211_FCTL_PROTECTED; | 828 | frame_ctl |= IEEE80211_FCTL_PROTECTED; |
829 | if (priv->operating_mode == IW_MODE_ADHOC) { | 829 | if (priv->operating_mode == IW_MODE_ADHOC) { |
830 | memcpy(&header.addr1, skb->data, 6); | 830 | skb_copy_from_linear_data(skb, &header.addr1, 6); |
831 | memcpy(&header.addr2, dev->dev_addr, 6); | 831 | memcpy(&header.addr2, dev->dev_addr, 6); |
832 | memcpy(&header.addr3, priv->BSSID, 6); | 832 | memcpy(&header.addr3, priv->BSSID, 6); |
833 | } else { | 833 | } else { |
834 | frame_ctl |= IEEE80211_FCTL_TODS; | 834 | frame_ctl |= IEEE80211_FCTL_TODS; |
835 | memcpy(&header.addr1, priv->CurrentBSSID, 6); | 835 | memcpy(&header.addr1, priv->CurrentBSSID, 6); |
836 | memcpy(&header.addr2, dev->dev_addr, 6); | 836 | memcpy(&header.addr2, dev->dev_addr, 6); |
837 | memcpy(&header.addr3, skb->data, 6); | 837 | skb_copy_from_linear_data(skb, &header.addr3, 6); |
838 | } | 838 | } |
839 | 839 | ||
840 | if (priv->use_wpa) | 840 | if (priv->use_wpa) |
@@ -920,7 +920,6 @@ static void fast_rx_path(struct atmel_private *priv, | |||
920 | memcpy(&skbp[6], header->addr2, 6); /* source address */ | 920 | memcpy(&skbp[6], header->addr2, 6); /* source address */ |
921 | 921 | ||
922 | priv->dev->last_rx = jiffies; | 922 | priv->dev->last_rx = jiffies; |
923 | skb->dev = priv->dev; | ||
924 | skb->protocol = eth_type_trans(skb, priv->dev); | 923 | skb->protocol = eth_type_trans(skb, priv->dev); |
925 | skb->ip_summed = CHECKSUM_NONE; | 924 | skb->ip_summed = CHECKSUM_NONE; |
926 | netif_rx(skb); | 925 | netif_rx(skb); |
@@ -1028,7 +1027,6 @@ static void frag_rx_path(struct atmel_private *priv, | |||
1028 | priv->rx_buf, | 1027 | priv->rx_buf, |
1029 | priv->frag_len + 12); | 1028 | priv->frag_len + 12); |
1030 | priv->dev->last_rx = jiffies; | 1029 | priv->dev->last_rx = jiffies; |
1031 | skb->dev = priv->dev; | ||
1032 | skb->protocol = eth_type_trans(skb, priv->dev); | 1030 | skb->protocol = eth_type_trans(skb, priv->dev); |
1033 | skb->ip_summed = CHECKSUM_NONE; | 1031 | skb->ip_summed = CHECKSUM_NONE; |
1034 | netif_rx(skb); | 1032 | netif_rx(skb); |
diff --git a/drivers/net/wireless/bcm43xx/Kconfig b/drivers/net/wireless/bcm43xx/Kconfig index 533993f538fc..ce397e4284f4 100644 --- a/drivers/net/wireless/bcm43xx/Kconfig +++ b/drivers/net/wireless/bcm43xx/Kconfig | |||
@@ -1,6 +1,7 @@ | |||
1 | config BCM43XX | 1 | config BCM43XX |
2 | tristate "Broadcom BCM43xx wireless support" | 2 | tristate "Broadcom BCM43xx wireless support" |
3 | depends on PCI && IEEE80211 && IEEE80211_SOFTMAC && NET_RADIO && EXPERIMENTAL | 3 | depends on PCI && IEEE80211 && IEEE80211_SOFTMAC && WLAN_80211 && EXPERIMENTAL |
4 | select WIRELESS_EXT | ||
4 | select FW_LOADER | 5 | select FW_LOADER |
5 | select HW_RANDOM | 6 | select HW_RANDOM |
6 | ---help--- | 7 | ---help--- |
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_dma.c b/drivers/net/wireless/bcm43xx/bcm43xx_dma.c index 6e0dc76400e5..e3d2e61a31ee 100644 --- a/drivers/net/wireless/bcm43xx/bcm43xx_dma.c +++ b/drivers/net/wireless/bcm43xx/bcm43xx_dma.c | |||
@@ -998,7 +998,8 @@ static void dma_tx_fragment(struct bcm43xx_dmaring *ring, | |||
998 | assert(0); | 998 | assert(0); |
999 | return; | 999 | return; |
1000 | } | 1000 | } |
1001 | memcpy(skb_put(bounce_skb, skb->len), skb->data, skb->len); | 1001 | skb_copy_from_linear_data(skb, skb_put(bounce_skb, skb->len), |
1002 | skb->len); | ||
1002 | dev_kfree_skb_any(skb); | 1003 | dev_kfree_skb_any(skb); |
1003 | skb = bounce_skb; | 1004 | skb = bounce_skb; |
1004 | } | 1005 | } |
diff --git a/drivers/net/wireless/hostap/Kconfig b/drivers/net/wireless/hostap/Kconfig index 308f773ad566..1fef33169fdd 100644 --- a/drivers/net/wireless/hostap/Kconfig +++ b/drivers/net/wireless/hostap/Kconfig | |||
@@ -1,6 +1,7 @@ | |||
1 | config HOSTAP | 1 | config HOSTAP |
2 | tristate "IEEE 802.11 for Host AP (Prism2/2.5/3 and WEP/TKIP/CCMP)" | 2 | tristate "IEEE 802.11 for Host AP (Prism2/2.5/3 and WEP/TKIP/CCMP)" |
3 | depends on NET_RADIO | 3 | depends on WLAN_80211 |
4 | select WIRELESS_EXT | ||
4 | select IEEE80211 | 5 | select IEEE80211 |
5 | select IEEE80211_CRYPT_WEP | 6 | select IEEE80211_CRYPT_WEP |
6 | ---help--- | 7 | ---help--- |
diff --git a/drivers/net/wireless/hostap/hostap_80211_rx.c b/drivers/net/wireless/hostap/hostap_80211_rx.c index 7e04dc94b3bc..cbedc9ee740a 100644 --- a/drivers/net/wireless/hostap/hostap_80211_rx.c +++ b/drivers/net/wireless/hostap/hostap_80211_rx.c | |||
@@ -167,7 +167,7 @@ hdr->f.status = s; hdr->f.len = l; hdr->f.data = d | |||
167 | 167 | ||
168 | ret = skb->len - phdrlen; | 168 | ret = skb->len - phdrlen; |
169 | skb->dev = dev; | 169 | skb->dev = dev; |
170 | skb->mac.raw = skb->data; | 170 | skb_reset_mac_header(skb); |
171 | skb_pull(skb, hdrlen); | 171 | skb_pull(skb, hdrlen); |
172 | if (prism_header) | 172 | if (prism_header) |
173 | skb_pull(skb, phdrlen); | 173 | skb_pull(skb, phdrlen); |
@@ -933,12 +933,14 @@ void hostap_80211_rx(struct net_device *dev, struct sk_buff *skb, | |||
933 | if (frag == 0) { | 933 | if (frag == 0) { |
934 | /* copy first fragment (including full headers) into | 934 | /* copy first fragment (including full headers) into |
935 | * beginning of the fragment cache skb */ | 935 | * beginning of the fragment cache skb */ |
936 | memcpy(skb_put(frag_skb, flen), skb->data, flen); | 936 | skb_copy_from_linear_data(skb, skb_put(frag_skb, flen), |
937 | flen); | ||
937 | } else { | 938 | } else { |
938 | /* append frame payload to the end of the fragment | 939 | /* append frame payload to the end of the fragment |
939 | * cache skb */ | 940 | * cache skb */ |
940 | memcpy(skb_put(frag_skb, flen), skb->data + hdrlen, | 941 | skb_copy_from_linear_data_offset(skb, hdrlen, |
941 | flen); | 942 | skb_put(frag_skb, |
943 | flen), flen); | ||
942 | } | 944 | } |
943 | dev_kfree_skb(skb); | 945 | dev_kfree_skb(skb); |
944 | skb = NULL; | 946 | skb = NULL; |
@@ -1044,8 +1046,9 @@ void hostap_80211_rx(struct net_device *dev, struct sk_buff *skb, | |||
1044 | skb->len >= ETH_HLEN + ETH_ALEN) { | 1046 | skb->len >= ETH_HLEN + ETH_ALEN) { |
1045 | /* Non-standard frame: get addr4 from its bogus location after | 1047 | /* Non-standard frame: get addr4 from its bogus location after |
1046 | * the payload */ | 1048 | * the payload */ |
1047 | memcpy(skb->data + ETH_ALEN, | 1049 | skb_copy_from_linear_data_offset(skb, skb->len - ETH_ALEN, |
1048 | skb->data + skb->len - ETH_ALEN, ETH_ALEN); | 1050 | skb->data + ETH_ALEN, |
1051 | ETH_ALEN); | ||
1049 | skb_trim(skb, skb->len - ETH_ALEN); | 1052 | skb_trim(skb, skb->len - ETH_ALEN); |
1050 | } | 1053 | } |
1051 | 1054 | ||
@@ -1073,17 +1076,17 @@ void hostap_80211_rx(struct net_device *dev, struct sk_buff *skb, | |||
1073 | 1076 | ||
1074 | if (skb2 != NULL) { | 1077 | if (skb2 != NULL) { |
1075 | /* send to wireless media */ | 1078 | /* send to wireless media */ |
1076 | skb2->protocol = __constant_htons(ETH_P_802_3); | ||
1077 | skb2->mac.raw = skb2->nh.raw = skb2->data; | ||
1078 | /* skb2->nh.raw = skb2->data + ETH_HLEN; */ | ||
1079 | skb2->dev = dev; | 1079 | skb2->dev = dev; |
1080 | skb2->protocol = __constant_htons(ETH_P_802_3); | ||
1081 | skb_reset_mac_header(skb2); | ||
1082 | skb_reset_network_header(skb2); | ||
1083 | /* skb2->network_header += ETH_HLEN; */ | ||
1080 | dev_queue_xmit(skb2); | 1084 | dev_queue_xmit(skb2); |
1081 | } | 1085 | } |
1082 | 1086 | ||
1083 | if (skb) { | 1087 | if (skb) { |
1084 | skb->protocol = eth_type_trans(skb, dev); | 1088 | skb->protocol = eth_type_trans(skb, dev); |
1085 | memset(skb->cb, 0, sizeof(skb->cb)); | 1089 | memset(skb->cb, 0, sizeof(skb->cb)); |
1086 | skb->dev = dev; | ||
1087 | netif_rx(skb); | 1090 | netif_rx(skb); |
1088 | } | 1091 | } |
1089 | 1092 | ||
diff --git a/drivers/net/wireless/hostap/hostap_80211_tx.c b/drivers/net/wireless/hostap/hostap_80211_tx.c index 4a5be70c0419..246fac0e8001 100644 --- a/drivers/net/wireless/hostap/hostap_80211_tx.c +++ b/drivers/net/wireless/hostap/hostap_80211_tx.c | |||
@@ -146,7 +146,8 @@ int hostap_data_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
146 | fc |= IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS; | 146 | fc |= IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS; |
147 | /* From&To DS: Addr1 = RA, Addr2 = TA, Addr3 = DA, | 147 | /* From&To DS: Addr1 = RA, Addr2 = TA, Addr3 = DA, |
148 | * Addr4 = SA */ | 148 | * Addr4 = SA */ |
149 | memcpy(&hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN); | 149 | skb_copy_from_linear_data_offset(skb, ETH_ALEN, |
150 | &hdr.addr4, ETH_ALEN); | ||
150 | hdr_len += ETH_ALEN; | 151 | hdr_len += ETH_ALEN; |
151 | } else { | 152 | } else { |
152 | /* bogus 4-addr format to workaround Prism2 station | 153 | /* bogus 4-addr format to workaround Prism2 station |
@@ -159,7 +160,8 @@ int hostap_data_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
159 | /* SA from skb->data + ETH_ALEN will be added after | 160 | /* SA from skb->data + ETH_ALEN will be added after |
160 | * frame payload; use hdr.addr4 as a temporary buffer | 161 | * frame payload; use hdr.addr4 as a temporary buffer |
161 | */ | 162 | */ |
162 | memcpy(&hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN); | 163 | skb_copy_from_linear_data_offset(skb, ETH_ALEN, |
164 | &hdr.addr4, ETH_ALEN); | ||
163 | need_tailroom += ETH_ALEN; | 165 | need_tailroom += ETH_ALEN; |
164 | } | 166 | } |
165 | 167 | ||
@@ -174,24 +176,27 @@ int hostap_data_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
174 | else | 176 | else |
175 | memcpy(&hdr.addr1, local->bssid, ETH_ALEN); | 177 | memcpy(&hdr.addr1, local->bssid, ETH_ALEN); |
176 | memcpy(&hdr.addr2, dev->dev_addr, ETH_ALEN); | 178 | memcpy(&hdr.addr2, dev->dev_addr, ETH_ALEN); |
177 | memcpy(&hdr.addr3, skb->data, ETH_ALEN); | 179 | skb_copy_from_linear_data(skb, &hdr.addr3, ETH_ALEN); |
178 | } else if (local->iw_mode == IW_MODE_MASTER && !to_assoc_ap) { | 180 | } else if (local->iw_mode == IW_MODE_MASTER && !to_assoc_ap) { |
179 | fc |= IEEE80211_FCTL_FROMDS; | 181 | fc |= IEEE80211_FCTL_FROMDS; |
180 | /* From DS: Addr1 = DA, Addr2 = BSSID, Addr3 = SA */ | 182 | /* From DS: Addr1 = DA, Addr2 = BSSID, Addr3 = SA */ |
181 | memcpy(&hdr.addr1, skb->data, ETH_ALEN); | 183 | skb_copy_from_linear_data(skb, &hdr.addr1, ETH_ALEN); |
182 | memcpy(&hdr.addr2, dev->dev_addr, ETH_ALEN); | 184 | memcpy(&hdr.addr2, dev->dev_addr, ETH_ALEN); |
183 | memcpy(&hdr.addr3, skb->data + ETH_ALEN, ETH_ALEN); | 185 | skb_copy_from_linear_data_offset(skb, ETH_ALEN, &hdr.addr3, |
186 | ETH_ALEN); | ||
184 | } else if (local->iw_mode == IW_MODE_INFRA || to_assoc_ap) { | 187 | } else if (local->iw_mode == IW_MODE_INFRA || to_assoc_ap) { |
185 | fc |= IEEE80211_FCTL_TODS; | 188 | fc |= IEEE80211_FCTL_TODS; |
186 | /* To DS: Addr1 = BSSID, Addr2 = SA, Addr3 = DA */ | 189 | /* To DS: Addr1 = BSSID, Addr2 = SA, Addr3 = DA */ |
187 | memcpy(&hdr.addr1, to_assoc_ap ? | 190 | memcpy(&hdr.addr1, to_assoc_ap ? |
188 | local->assoc_ap_addr : local->bssid, ETH_ALEN); | 191 | local->assoc_ap_addr : local->bssid, ETH_ALEN); |
189 | memcpy(&hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN); | 192 | skb_copy_from_linear_data_offset(skb, ETH_ALEN, &hdr.addr2, |
190 | memcpy(&hdr.addr3, skb->data, ETH_ALEN); | 193 | ETH_ALEN); |
194 | skb_copy_from_linear_data(skb, &hdr.addr3, ETH_ALEN); | ||
191 | } else if (local->iw_mode == IW_MODE_ADHOC) { | 195 | } else if (local->iw_mode == IW_MODE_ADHOC) { |
192 | /* not From/To DS: Addr1 = DA, Addr2 = SA, Addr3 = BSSID */ | 196 | /* not From/To DS: Addr1 = DA, Addr2 = SA, Addr3 = BSSID */ |
193 | memcpy(&hdr.addr1, skb->data, ETH_ALEN); | 197 | skb_copy_from_linear_data(skb, &hdr.addr1, ETH_ALEN); |
194 | memcpy(&hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN); | 198 | skb_copy_from_linear_data_offset(skb, ETH_ALEN, &hdr.addr2, |
199 | ETH_ALEN); | ||
195 | memcpy(&hdr.addr3, local->bssid, ETH_ALEN); | 200 | memcpy(&hdr.addr3, local->bssid, ETH_ALEN); |
196 | } | 201 | } |
197 | 202 | ||
@@ -237,7 +242,7 @@ int hostap_data_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
237 | iface->stats.tx_packets++; | 242 | iface->stats.tx_packets++; |
238 | iface->stats.tx_bytes += skb->len; | 243 | iface->stats.tx_bytes += skb->len; |
239 | 244 | ||
240 | skb->mac.raw = skb->data; | 245 | skb_reset_mac_header(skb); |
241 | meta = (struct hostap_skb_tx_data *) skb->cb; | 246 | meta = (struct hostap_skb_tx_data *) skb->cb; |
242 | memset(meta, 0, sizeof(*meta)); | 247 | memset(meta, 0, sizeof(*meta)); |
243 | meta->magic = HOSTAP_SKB_TX_DATA_MAGIC; | 248 | meta->magic = HOSTAP_SKB_TX_DATA_MAGIC; |
diff --git a/drivers/net/wireless/hostap/hostap_ap.c b/drivers/net/wireless/hostap/hostap_ap.c index efb8cf3bd8ad..4ca8a27b8c55 100644 --- a/drivers/net/wireless/hostap/hostap_ap.c +++ b/drivers/net/wireless/hostap/hostap_ap.c | |||
@@ -982,7 +982,8 @@ static void prism2_send_mgmt(struct net_device *dev, | |||
982 | meta->tx_cb_idx = tx_cb_idx; | 982 | meta->tx_cb_idx = tx_cb_idx; |
983 | 983 | ||
984 | skb->dev = dev; | 984 | skb->dev = dev; |
985 | skb->mac.raw = skb->nh.raw = skb->data; | 985 | skb_reset_mac_header(skb); |
986 | skb_reset_network_header(skb); | ||
986 | dev_queue_xmit(skb); | 987 | dev_queue_xmit(skb); |
987 | } | 988 | } |
988 | #endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */ | 989 | #endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */ |
@@ -1276,8 +1277,8 @@ static char * ap_auth_make_challenge(struct ap_data *ap) | |||
1276 | return NULL; | 1277 | return NULL; |
1277 | } | 1278 | } |
1278 | 1279 | ||
1279 | memcpy(tmpbuf, skb->data + ap->crypt->extra_mpdu_prefix_len, | 1280 | skb_copy_from_linear_data_offset(skb, ap->crypt->extra_mpdu_prefix_len, |
1280 | WLAN_AUTH_CHALLENGE_LEN); | 1281 | tmpbuf, WLAN_AUTH_CHALLENGE_LEN); |
1281 | dev_kfree_skb(skb); | 1282 | dev_kfree_skb(skb); |
1282 | 1283 | ||
1283 | return tmpbuf; | 1284 | return tmpbuf; |
diff --git a/drivers/net/wireless/hostap/hostap_hw.c b/drivers/net/wireless/hostap/hostap_hw.c index 3079378fb8cd..fb01fb95a9f0 100644 --- a/drivers/net/wireless/hostap/hostap_hw.c +++ b/drivers/net/wireless/hostap/hostap_hw.c | |||
@@ -1838,13 +1838,14 @@ static int prism2_tx_80211(struct sk_buff *skb, struct net_device *dev) | |||
1838 | 1838 | ||
1839 | /* skb->data starts with txdesc->frame_control */ | 1839 | /* skb->data starts with txdesc->frame_control */ |
1840 | hdr_len = 24; | 1840 | hdr_len = 24; |
1841 | memcpy(&txdesc.frame_control, skb->data, hdr_len); | 1841 | skb_copy_from_linear_data(skb, &txdesc.frame_control, hdr_len); |
1842 | fc = le16_to_cpu(txdesc.frame_control); | 1842 | fc = le16_to_cpu(txdesc.frame_control); |
1843 | if (WLAN_FC_GET_TYPE(fc) == IEEE80211_FTYPE_DATA && | 1843 | if (WLAN_FC_GET_TYPE(fc) == IEEE80211_FTYPE_DATA && |
1844 | (fc & IEEE80211_FCTL_FROMDS) && (fc & IEEE80211_FCTL_TODS) && | 1844 | (fc & IEEE80211_FCTL_FROMDS) && (fc & IEEE80211_FCTL_TODS) && |
1845 | skb->len >= 30) { | 1845 | skb->len >= 30) { |
1846 | /* Addr4 */ | 1846 | /* Addr4 */ |
1847 | memcpy(txdesc.addr4, skb->data + hdr_len, ETH_ALEN); | 1847 | skb_copy_from_linear_data_offset(skb, hdr_len, txdesc.addr4, |
1848 | ETH_ALEN); | ||
1848 | hdr_len += ETH_ALEN; | 1849 | hdr_len += ETH_ALEN; |
1849 | } | 1850 | } |
1850 | 1851 | ||
@@ -2217,7 +2218,7 @@ static void hostap_tx_callback(local_info_t *local, | |||
2217 | memcpy(skb_put(skb, len), payload, len); | 2218 | memcpy(skb_put(skb, len), payload, len); |
2218 | 2219 | ||
2219 | skb->dev = local->dev; | 2220 | skb->dev = local->dev; |
2220 | skb->mac.raw = skb->data; | 2221 | skb_reset_mac_header(skb); |
2221 | 2222 | ||
2222 | cb->func(skb, ok, cb->data); | 2223 | cb->func(skb, ok, cb->data); |
2223 | } | 2224 | } |
diff --git a/drivers/net/wireless/hostap/hostap_main.c b/drivers/net/wireless/hostap/hostap_main.c index 9077e6edde34..1f9edd91565d 100644 --- a/drivers/net/wireless/hostap/hostap_main.c +++ b/drivers/net/wireless/hostap/hostap_main.c | |||
@@ -590,20 +590,20 @@ void hostap_dump_tx_header(const char *name, const struct hfa384x_tx_frame *tx) | |||
590 | 590 | ||
591 | int hostap_80211_header_parse(struct sk_buff *skb, unsigned char *haddr) | 591 | int hostap_80211_header_parse(struct sk_buff *skb, unsigned char *haddr) |
592 | { | 592 | { |
593 | memcpy(haddr, skb->mac.raw + 10, ETH_ALEN); /* addr2 */ | 593 | memcpy(haddr, skb_mac_header(skb) + 10, ETH_ALEN); /* addr2 */ |
594 | return ETH_ALEN; | 594 | return ETH_ALEN; |
595 | } | 595 | } |
596 | 596 | ||
597 | 597 | ||
598 | int hostap_80211_prism_header_parse(struct sk_buff *skb, unsigned char *haddr) | 598 | int hostap_80211_prism_header_parse(struct sk_buff *skb, unsigned char *haddr) |
599 | { | 599 | { |
600 | if (*(u32 *)skb->mac.raw == LWNG_CAP_DID_BASE) { | 600 | const unsigned char *mac = skb_mac_header(skb); |
601 | memcpy(haddr, skb->mac.raw + | 601 | |
602 | sizeof(struct linux_wlan_ng_prism_hdr) + 10, | 602 | if (*(u32 *)mac == LWNG_CAP_DID_BASE) { |
603 | memcpy(haddr, mac + sizeof(struct linux_wlan_ng_prism_hdr) + 10, | ||
603 | ETH_ALEN); /* addr2 */ | 604 | ETH_ALEN); /* addr2 */ |
604 | } else { /* (*(u32 *)skb->mac.raw == htonl(LWNG_CAPHDR_VERSION)) */ | 605 | } else { /* (*(u32 *)mac == htonl(LWNG_CAPHDR_VERSION)) */ |
605 | memcpy(haddr, skb->mac.raw + | 606 | memcpy(haddr, mac + sizeof(struct linux_wlan_ng_cap_hdr) + 10, |
606 | sizeof(struct linux_wlan_ng_cap_hdr) + 10, | ||
607 | ETH_ALEN); /* addr2 */ | 607 | ETH_ALEN); /* addr2 */ |
608 | } | 608 | } |
609 | return ETH_ALEN; | 609 | return ETH_ALEN; |
@@ -1063,7 +1063,8 @@ int prism2_sta_send_mgmt(local_info_t *local, u8 *dst, u16 stype, | |||
1063 | meta->iface = netdev_priv(dev); | 1063 | meta->iface = netdev_priv(dev); |
1064 | 1064 | ||
1065 | skb->dev = dev; | 1065 | skb->dev = dev; |
1066 | skb->mac.raw = skb->nh.raw = skb->data; | 1066 | skb_reset_mac_header(skb); |
1067 | skb_reset_network_header(skb); | ||
1067 | dev_queue_xmit(skb); | 1068 | dev_queue_xmit(skb); |
1068 | 1069 | ||
1069 | return 0; | 1070 | return 0; |
diff --git a/drivers/net/wireless/ipw2100.c b/drivers/net/wireless/ipw2100.c index ad6e4a428355..9137a4dd02eb 100644 --- a/drivers/net/wireless/ipw2100.c +++ b/drivers/net/wireless/ipw2100.c | |||
@@ -2416,8 +2416,9 @@ static void isr_rx(struct ipw2100_priv *priv, int i, | |||
2416 | #ifdef IPW2100_RX_DEBUG | 2416 | #ifdef IPW2100_RX_DEBUG |
2417 | /* Make a copy of the frame so we can dump it to the logs if | 2417 | /* Make a copy of the frame so we can dump it to the logs if |
2418 | * ieee80211_rx fails */ | 2418 | * ieee80211_rx fails */ |
2419 | memcpy(packet_data, packet->skb->data, | 2419 | skb_copy_from_linear_data(packet->skb, packet_data, |
2420 | min_t(u32, status->frame_size, IPW_RX_NIC_BUFFER_LENGTH)); | 2420 | min_t(u32, status->frame_size, |
2421 | IPW_RX_NIC_BUFFER_LENGTH)); | ||
2421 | #endif | 2422 | #endif |
2422 | 2423 | ||
2423 | if (!ieee80211_rx(priv->ieee, packet->skb, stats)) { | 2424 | if (!ieee80211_rx(priv->ieee, packet->skb, stats)) { |
diff --git a/drivers/net/wireless/ipw2200.c b/drivers/net/wireless/ipw2200.c index c878a2f3239c..4839a45098cb 100644 --- a/drivers/net/wireless/ipw2200.c +++ b/drivers/net/wireless/ipw2200.c | |||
@@ -8133,7 +8133,7 @@ static void ipw_handle_mgmt_packet(struct ipw_priv *priv, | |||
8133 | skb->dev = priv->ieee->dev; | 8133 | skb->dev = priv->ieee->dev; |
8134 | 8134 | ||
8135 | /* Point raw at the ieee80211_stats */ | 8135 | /* Point raw at the ieee80211_stats */ |
8136 | skb->mac.raw = skb->data; | 8136 | skb_reset_mac_header(skb); |
8137 | 8137 | ||
8138 | skb->pkt_type = PACKET_OTHERHOST; | 8138 | skb->pkt_type = PACKET_OTHERHOST; |
8139 | skb->protocol = __constant_htons(ETH_P_80211_STATS); | 8139 | skb->protocol = __constant_htons(ETH_P_80211_STATS); |
@@ -10355,7 +10355,7 @@ static void ipw_handle_promiscuous_tx(struct ipw_priv *priv, | |||
10355 | 10355 | ||
10356 | rt_hdr->it_len = dst->len; | 10356 | rt_hdr->it_len = dst->len; |
10357 | 10357 | ||
10358 | memcpy(skb_put(dst, len), src->data, len); | 10358 | skb_copy_from_linear_data(src, skb_put(dst, len), len); |
10359 | 10359 | ||
10360 | if (!ieee80211_rx(priv->prom_priv->ieee, dst, &dummystats)) | 10360 | if (!ieee80211_rx(priv->prom_priv->ieee, dst, &dummystats)) |
10361 | dev_kfree_skb_any(dst); | 10361 | dev_kfree_skb_any(dst); |
diff --git a/drivers/net/wireless/netwave_cs.c b/drivers/net/wireless/netwave_cs.c index a009ab517710..45b00e13ab2b 100644 --- a/drivers/net/wireless/netwave_cs.c +++ b/drivers/net/wireless/netwave_cs.c | |||
@@ -1283,7 +1283,6 @@ static int netwave_rx(struct net_device *dev) | |||
1283 | 1283 | ||
1284 | skb_reserve( skb, 2); /* Align IP on 16 byte */ | 1284 | skb_reserve( skb, 2); /* Align IP on 16 byte */ |
1285 | skb_put( skb, rcvLen); | 1285 | skb_put( skb, rcvLen); |
1286 | skb->dev = dev; | ||
1287 | 1286 | ||
1288 | /* Copy packet fragments to the skb data area */ | 1287 | /* Copy packet fragments to the skb data area */ |
1289 | ptr = (u_char*) skb->data; | 1288 | ptr = (u_char*) skb->data; |
diff --git a/drivers/net/wireless/orinoco.c b/drivers/net/wireless/orinoco.c index 4e7f6cf51436..062286dc8e15 100644 --- a/drivers/net/wireless/orinoco.c +++ b/drivers/net/wireless/orinoco.c | |||
@@ -689,7 +689,7 @@ static void orinoco_stat_gather(struct net_device *dev, | |||
689 | /* Note : gcc will optimise the whole section away if | 689 | /* Note : gcc will optimise the whole section away if |
690 | * WIRELESS_SPY is not defined... - Jean II */ | 690 | * WIRELESS_SPY is not defined... - Jean II */ |
691 | if (SPY_NUMBER(priv)) { | 691 | if (SPY_NUMBER(priv)) { |
692 | orinoco_spy_gather(dev, skb->mac.raw + ETH_ALEN, | 692 | orinoco_spy_gather(dev, skb_mac_header(skb) + ETH_ALEN, |
693 | desc->signal, desc->silence); | 693 | desc->signal, desc->silence); |
694 | } | 694 | } |
695 | } | 695 | } |
@@ -770,7 +770,7 @@ static void orinoco_rx_monitor(struct net_device *dev, u16 rxfid, | |||
770 | 770 | ||
771 | /* Copy the 802.11 header to the skb */ | 771 | /* Copy the 802.11 header to the skb */ |
772 | memcpy(skb_put(skb, hdrlen), &(desc->frame_ctl), hdrlen); | 772 | memcpy(skb_put(skb, hdrlen), &(desc->frame_ctl), hdrlen); |
773 | skb->mac.raw = skb->data; | 773 | skb_reset_mac_header(skb); |
774 | 774 | ||
775 | /* If any, copy the data from the card to the skb */ | 775 | /* If any, copy the data from the card to the skb */ |
776 | if (datalen > 0) { | 776 | if (datalen > 0) { |
@@ -915,7 +915,6 @@ static void __orinoco_ev_rx(struct net_device *dev, hermes_t *hw) | |||
915 | memcpy(hdr->h_source, desc.addr2, ETH_ALEN); | 915 | memcpy(hdr->h_source, desc.addr2, ETH_ALEN); |
916 | 916 | ||
917 | dev->last_rx = jiffies; | 917 | dev->last_rx = jiffies; |
918 | skb->dev = dev; | ||
919 | skb->protocol = eth_type_trans(skb, dev); | 918 | skb->protocol = eth_type_trans(skb, dev); |
920 | skb->ip_summed = CHECKSUM_NONE; | 919 | skb->ip_summed = CHECKSUM_NONE; |
921 | if (fc & IEEE80211_FCTL_TODS) | 920 | if (fc & IEEE80211_FCTL_TODS) |
diff --git a/drivers/net/wireless/prism54/islpci_eth.c b/drivers/net/wireless/prism54/islpci_eth.c index b1122912ee2d..dd070cccf324 100644 --- a/drivers/net/wireless/prism54/islpci_eth.c +++ b/drivers/net/wireless/prism54/islpci_eth.c | |||
@@ -136,7 +136,7 @@ islpci_eth_transmit(struct sk_buff *skb, struct net_device *ndev) | |||
136 | printk("islpci_eth_transmit:wds_mac\n"); | 136 | printk("islpci_eth_transmit:wds_mac\n"); |
137 | #endif | 137 | #endif |
138 | memmove(skb->data + 6, src, skb->len); | 138 | memmove(skb->data + 6, src, skb->len); |
139 | memcpy(skb->data, wds_mac, 6); | 139 | skb_copy_to_linear_data(skb, wds_mac, 6); |
140 | } else { | 140 | } else { |
141 | memmove(skb->data, src, skb->len); | 141 | memmove(skb->data, src, skb->len); |
142 | } | 142 | } |
@@ -162,13 +162,16 @@ islpci_eth_transmit(struct sk_buff *skb, struct net_device *ndev) | |||
162 | 162 | ||
163 | skb_put(newskb, init_wds ? skb->len + 6 : skb->len); | 163 | skb_put(newskb, init_wds ? skb->len + 6 : skb->len); |
164 | if (init_wds) { | 164 | if (init_wds) { |
165 | memcpy(newskb->data + 6, skb->data, skb->len); | 165 | skb_copy_from_linear_data(skb, |
166 | memcpy(newskb->data, wds_mac, 6); | 166 | newskb->data + 6, |
167 | skb->len); | ||
168 | skb_copy_to_linear_data(newskb, wds_mac, 6); | ||
167 | #ifdef ISLPCI_ETH_DEBUG | 169 | #ifdef ISLPCI_ETH_DEBUG |
168 | printk("islpci_eth_transmit:wds_mac\n"); | 170 | printk("islpci_eth_transmit:wds_mac\n"); |
169 | #endif | 171 | #endif |
170 | } else | 172 | } else |
171 | memcpy(newskb->data, skb->data, skb->len); | 173 | skb_copy_from_linear_data(skb, newskb->data, |
174 | skb->len); | ||
172 | 175 | ||
173 | #if VERBOSE > SHOW_ERROR_MESSAGES | 176 | #if VERBOSE > SHOW_ERROR_MESSAGES |
174 | DEBUG(SHOW_TRACING, "memcpy %p %p %i wds %i\n", | 177 | DEBUG(SHOW_TRACING, "memcpy %p %p %i wds %i\n", |
@@ -303,7 +306,7 @@ islpci_monitor_rx(islpci_private *priv, struct sk_buff **skb) | |||
303 | skb_pull(*skb, sizeof (struct rfmon_header)); | 306 | skb_pull(*skb, sizeof (struct rfmon_header)); |
304 | 307 | ||
305 | (*skb)->protocol = htons(ETH_P_802_2); | 308 | (*skb)->protocol = htons(ETH_P_802_2); |
306 | (*skb)->mac.raw = (*skb)->data; | 309 | skb_reset_mac_header(*skb); |
307 | (*skb)->pkt_type = PACKET_OTHERHOST; | 310 | (*skb)->pkt_type = PACKET_OTHERHOST; |
308 | 311 | ||
309 | return 0; | 312 | return 0; |
@@ -374,10 +377,6 @@ islpci_eth_receive(islpci_private *priv) | |||
374 | DEBUG(SHOW_BUFFER_CONTENTS, "\nrx %p ", skb->data); | 377 | DEBUG(SHOW_BUFFER_CONTENTS, "\nrx %p ", skb->data); |
375 | display_buffer((char *) skb->data, skb->len); | 378 | display_buffer((char *) skb->data, skb->len); |
376 | #endif | 379 | #endif |
377 | |||
378 | /* do some additional sk_buff and network layer parameters */ | ||
379 | skb->dev = ndev; | ||
380 | |||
381 | /* take care of monitor mode and spy monitoring. */ | 380 | /* take care of monitor mode and spy monitoring. */ |
382 | if (unlikely(priv->iw_mode == IW_MODE_MONITOR)) | 381 | if (unlikely(priv->iw_mode == IW_MODE_MONITOR)) |
383 | discard = islpci_monitor_rx(priv, &skb); | 382 | discard = islpci_monitor_rx(priv, &skb); |
@@ -398,8 +397,10 @@ islpci_eth_receive(islpci_private *priv) | |||
398 | /* Update spy records */ | 397 | /* Update spy records */ |
399 | wireless_spy_update(ndev, annex->addr2, &wstats); | 398 | wireless_spy_update(ndev, annex->addr2, &wstats); |
400 | 399 | ||
401 | memcpy(skb->data + sizeof (struct rfmon_header), | 400 | skb_copy_from_linear_data(skb, |
402 | skb->data, 2 * ETH_ALEN); | 401 | (skb->data + |
402 | sizeof(struct rfmon_header)), | ||
403 | 2 * ETH_ALEN); | ||
403 | skb_pull(skb, sizeof (struct rfmon_header)); | 404 | skb_pull(skb, sizeof (struct rfmon_header)); |
404 | } | 405 | } |
405 | skb->protocol = eth_type_trans(skb, ndev); | 406 | skb->protocol = eth_type_trans(skb, ndev); |
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c index 47b2ccb6a633..3be624295a1f 100644 --- a/drivers/net/wireless/ray_cs.c +++ b/drivers/net/wireless/ray_cs.c | |||
@@ -2232,7 +2232,6 @@ static void rx_data(struct net_device *dev, struct rcs __iomem *prcs, unsigned i | |||
2232 | return; | 2232 | return; |
2233 | } | 2233 | } |
2234 | skb_reserve( skb, 2); /* Align IP on 16 byte (TBD check this)*/ | 2234 | skb_reserve( skb, 2); /* Align IP on 16 byte (TBD check this)*/ |
2235 | skb->dev = dev; | ||
2236 | 2235 | ||
2237 | DEBUG(4,"ray_cs rx_data total_len = %x, rx_len = %x\n",total_len,rx_len); | 2236 | DEBUG(4,"ray_cs rx_data total_len = %x, rx_len = %x\n",total_len,rx_len); |
2238 | 2237 | ||
@@ -2243,7 +2242,8 @@ static void rx_data(struct net_device *dev, struct rcs __iomem *prcs, unsigned i | |||
2243 | rx_ptr += copy_from_rx_buff(local, rx_ptr, pkt_addr & RX_BUFF_END, rx_len); | 2242 | rx_ptr += copy_from_rx_buff(local, rx_ptr, pkt_addr & RX_BUFF_END, rx_len); |
2244 | /* Get source address */ | 2243 | /* Get source address */ |
2245 | #ifdef WIRELESS_SPY | 2244 | #ifdef WIRELESS_SPY |
2246 | memcpy(linksrcaddr, ((struct mac_header *)skb->data)->addr_2, ETH_ALEN); | 2245 | skb_copy_from_linear_data_offset(skb, offsetof(struct mac_header, addr_2), |
2246 | linksrcaddr, ETH_ALEN); | ||
2247 | #endif | 2247 | #endif |
2248 | /* Now, deal with encapsulation/translation/sniffer */ | 2248 | /* Now, deal with encapsulation/translation/sniffer */ |
2249 | if (!sniffer) { | 2249 | if (!sniffer) { |
diff --git a/drivers/net/wireless/strip.c b/drivers/net/wireless/strip.c index f5ce1c6063d8..2a299a0676a6 100644 --- a/drivers/net/wireless/strip.c +++ b/drivers/net/wireless/strip.c | |||
@@ -2009,7 +2009,7 @@ static void deliver_packet(struct strip *strip_info, STRIP_Header * header, | |||
2009 | packetlen); | 2009 | packetlen); |
2010 | skb->dev = get_strip_dev(strip_info); | 2010 | skb->dev = get_strip_dev(strip_info); |
2011 | skb->protocol = header->protocol; | 2011 | skb->protocol = header->protocol; |
2012 | skb->mac.raw = skb->data; | 2012 | skb_reset_mac_header(skb); |
2013 | 2013 | ||
2014 | /* Having put a fake header on the front of the sk_buff for the */ | 2014 | /* Having put a fake header on the front of the sk_buff for the */ |
2015 | /* benefit of tools like tcpdump, skb_pull now 'consumes' that */ | 2015 | /* benefit of tools like tcpdump, skb_pull now 'consumes' that */ |
diff --git a/drivers/net/wireless/wavelan.c b/drivers/net/wireless/wavelan.c index 2aa3c761dd83..1cf090d60edc 100644 --- a/drivers/net/wireless/wavelan.c +++ b/drivers/net/wireless/wavelan.c | |||
@@ -2512,14 +2512,13 @@ wv_packet_read(struct net_device * dev, u16 buf_off, int sksize) | |||
2512 | return; | 2512 | return; |
2513 | } | 2513 | } |
2514 | 2514 | ||
2515 | skb->dev = dev; | ||
2516 | |||
2517 | /* Copy the packet to the buffer. */ | 2515 | /* Copy the packet to the buffer. */ |
2518 | obram_read(ioaddr, buf_off, skb_put(skb, sksize), sksize); | 2516 | obram_read(ioaddr, buf_off, skb_put(skb, sksize), sksize); |
2519 | skb->protocol = eth_type_trans(skb, dev); | 2517 | skb->protocol = eth_type_trans(skb, dev); |
2520 | 2518 | ||
2521 | #ifdef DEBUG_RX_INFO | 2519 | #ifdef DEBUG_RX_INFO |
2522 | wv_packet_info(skb->mac.raw, sksize, dev->name, "wv_packet_read"); | 2520 | wv_packet_info(skb_mac_header(skb), sksize, dev->name, |
2521 | "wv_packet_read"); | ||
2523 | #endif /* DEBUG_RX_INFO */ | 2522 | #endif /* DEBUG_RX_INFO */ |
2524 | 2523 | ||
2525 | /* Statistics-gathering and associated stuff. | 2524 | /* Statistics-gathering and associated stuff. |
@@ -2555,7 +2554,7 @@ wv_packet_read(struct net_device * dev, u16 buf_off, int sksize) | |||
2555 | 2554 | ||
2556 | /* Spying stuff */ | 2555 | /* Spying stuff */ |
2557 | #ifdef IW_WIRELESS_SPY | 2556 | #ifdef IW_WIRELESS_SPY |
2558 | wl_spy_gather(dev, skb->mac.raw + WAVELAN_ADDR_SIZE, | 2557 | wl_spy_gather(dev, skb_mac_header(skb) + WAVELAN_ADDR_SIZE, |
2559 | stats); | 2558 | stats); |
2560 | #endif /* IW_WIRELESS_SPY */ | 2559 | #endif /* IW_WIRELESS_SPY */ |
2561 | #ifdef HISTOGRAM | 2560 | #ifdef HISTOGRAM |
@@ -2939,7 +2938,7 @@ static int wavelan_packet_xmit(struct sk_buff *skb, struct net_device * dev) | |||
2939 | * need to pad. Jean II */ | 2938 | * need to pad. Jean II */ |
2940 | if (skb->len < ETH_ZLEN) { | 2939 | if (skb->len < ETH_ZLEN) { |
2941 | memset(data, 0, ETH_ZLEN); | 2940 | memset(data, 0, ETH_ZLEN); |
2942 | memcpy(data, skb->data, skb->len); | 2941 | skb_copy_from_linear_data(skb, data, skb->len); |
2943 | /* Write packet on the card */ | 2942 | /* Write packet on the card */ |
2944 | if(wv_packet_write(dev, data, ETH_ZLEN)) | 2943 | if(wv_packet_write(dev, data, ETH_ZLEN)) |
2945 | return 1; /* We failed */ | 2944 | return 1; /* We failed */ |
diff --git a/drivers/net/wireless/wavelan_cs.c b/drivers/net/wireless/wavelan_cs.c index b04239792f63..67b867f837ca 100644 --- a/drivers/net/wireless/wavelan_cs.c +++ b/drivers/net/wireless/wavelan_cs.c | |||
@@ -2884,14 +2884,12 @@ wv_packet_read(struct net_device * dev, | |||
2884 | return; | 2884 | return; |
2885 | } | 2885 | } |
2886 | 2886 | ||
2887 | skb->dev = dev; | ||
2888 | |||
2889 | skb_reserve(skb, 2); | 2887 | skb_reserve(skb, 2); |
2890 | fd_p = read_ringbuf(dev, fd_p, (char *) skb_put(skb, sksize), sksize); | 2888 | fd_p = read_ringbuf(dev, fd_p, (char *) skb_put(skb, sksize), sksize); |
2891 | skb->protocol = eth_type_trans(skb, dev); | 2889 | skb->protocol = eth_type_trans(skb, dev); |
2892 | 2890 | ||
2893 | #ifdef DEBUG_RX_INFO | 2891 | #ifdef DEBUG_RX_INFO |
2894 | wv_packet_info(skb->mac.raw, sksize, dev->name, "wv_packet_read"); | 2892 | wv_packet_info(skb_mac_header(skb), sksize, dev->name, "wv_packet_read"); |
2895 | #endif /* DEBUG_RX_INFO */ | 2893 | #endif /* DEBUG_RX_INFO */ |
2896 | 2894 | ||
2897 | /* Statistics gathering & stuff associated. | 2895 | /* Statistics gathering & stuff associated. |
@@ -2925,7 +2923,7 @@ wv_packet_read(struct net_device * dev, | |||
2925 | #endif /* WAVELAN_ROAMING */ | 2923 | #endif /* WAVELAN_ROAMING */ |
2926 | 2924 | ||
2927 | #ifdef WIRELESS_SPY | 2925 | #ifdef WIRELESS_SPY |
2928 | wl_spy_gather(dev, skb->mac.raw + WAVELAN_ADDR_SIZE, stats); | 2926 | wl_spy_gather(dev, skb_mac_header(skb) + WAVELAN_ADDR_SIZE, stats); |
2929 | #endif /* WIRELESS_SPY */ | 2927 | #endif /* WIRELESS_SPY */ |
2930 | #ifdef HISTOGRAM | 2928 | #ifdef HISTOGRAM |
2931 | wl_his_gather(dev, stats); | 2929 | wl_his_gather(dev, stats); |
diff --git a/drivers/net/wireless/zd1201.c b/drivers/net/wireless/zd1201.c index 6cb66a356c96..935b144d9b56 100644 --- a/drivers/net/wireless/zd1201.c +++ b/drivers/net/wireless/zd1201.c | |||
@@ -327,7 +327,6 @@ static void zd1201_usbrx(struct urb *urb) | |||
327 | memcpy(skb_put(skb, 6), &data[datalen-8], 6); | 327 | memcpy(skb_put(skb, 6), &data[datalen-8], 6); |
328 | memcpy(skb_put(skb, 2), &data[datalen-24], 2); | 328 | memcpy(skb_put(skb, 2), &data[datalen-24], 2); |
329 | memcpy(skb_put(skb, len), data, len); | 329 | memcpy(skb_put(skb, len), data, len); |
330 | skb->dev = zd->dev; | ||
331 | skb->dev->last_rx = jiffies; | 330 | skb->dev->last_rx = jiffies; |
332 | skb->protocol = eth_type_trans(skb, zd->dev); | 331 | skb->protocol = eth_type_trans(skb, zd->dev); |
333 | zd->stats.rx_packets++; | 332 | zd->stats.rx_packets++; |
@@ -385,7 +384,6 @@ static void zd1201_usbrx(struct urb *urb) | |||
385 | memcpy(skb_put(skb, 2), &data[6], 2); | 384 | memcpy(skb_put(skb, 2), &data[6], 2); |
386 | memcpy(skb_put(skb, len), data+8, len); | 385 | memcpy(skb_put(skb, len), data+8, len); |
387 | } | 386 | } |
388 | skb->dev = zd->dev; | ||
389 | skb->dev->last_rx = jiffies; | 387 | skb->dev->last_rx = jiffies; |
390 | skb->protocol = eth_type_trans(skb, zd->dev); | 388 | skb->protocol = eth_type_trans(skb, zd->dev); |
391 | zd->stats.rx_packets++; | 389 | zd->stats.rx_packets++; |
@@ -809,10 +807,10 @@ static int zd1201_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
809 | txbuf[4] = 0x00; | 807 | txbuf[4] = 0x00; |
810 | txbuf[5] = 0x00; | 808 | txbuf[5] = 0x00; |
811 | 809 | ||
812 | memcpy(txbuf+6, skb->data+12, skb->len-12); | 810 | skb_copy_from_linear_data_offset(skb, 12, txbuf + 6, skb->len - 12); |
813 | if (pad) | 811 | if (pad) |
814 | txbuf[skb->len-12+6]=0; | 812 | txbuf[skb->len-12+6]=0; |
815 | memcpy(txbuf+skb->len-12+6+pad, skb->data, 12); | 813 | skb_copy_from_linear_data(skb, txbuf + skb->len - 12 + 6 + pad, 12); |
816 | *(__be16*)&txbuf[skb->len+6+pad] = htons(skb->len-12+6); | 814 | *(__be16*)&txbuf[skb->len+6+pad] = htons(skb->len-12+6); |
817 | txbuf[txbuflen-1] = 0; | 815 | txbuf[txbuflen-1] = 0; |
818 | 816 | ||
diff --git a/drivers/net/wireless/zd1211rw/Kconfig b/drivers/net/wireless/zd1211rw/Kconfig index 66ed55bc5460..d1ab24a95630 100644 --- a/drivers/net/wireless/zd1211rw/Kconfig +++ b/drivers/net/wireless/zd1211rw/Kconfig | |||
@@ -1,6 +1,7 @@ | |||
1 | config ZD1211RW | 1 | config ZD1211RW |
2 | tristate "ZyDAS ZD1211/ZD1211B USB-wireless support" | 2 | tristate "ZyDAS ZD1211/ZD1211B USB-wireless support" |
3 | depends on USB && IEEE80211 && IEEE80211_SOFTMAC && NET_RADIO && EXPERIMENTAL | 3 | depends on USB && IEEE80211_SOFTMAC && WLAN_80211 && EXPERIMENTAL |
4 | select WIRELESS_EXT | ||
4 | select FW_LOADER | 5 | select FW_LOADER |
5 | ---help--- | 6 | ---help--- |
6 | This is an experimental driver for the ZyDAS ZD1211/ZD1211B wireless | 7 | This is an experimental driver for the ZyDAS ZD1211/ZD1211B wireless |
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c index aac8a1c5ba08..edaaad2f648b 100644 --- a/drivers/net/wireless/zd1211rw/zd_usb.c +++ b/drivers/net/wireless/zd1211rw/zd_usb.c | |||
@@ -62,6 +62,7 @@ static struct usb_device_id usb_ids[] = { | |||
62 | { USB_DEVICE(0x0471, 0x1236), .driver_info = DEVICE_ZD1211B }, | 62 | { USB_DEVICE(0x0471, 0x1236), .driver_info = DEVICE_ZD1211B }, |
63 | { USB_DEVICE(0x13b1, 0x0024), .driver_info = DEVICE_ZD1211B }, | 63 | { USB_DEVICE(0x13b1, 0x0024), .driver_info = DEVICE_ZD1211B }, |
64 | { USB_DEVICE(0x0586, 0x340f), .driver_info = DEVICE_ZD1211B }, | 64 | { USB_DEVICE(0x0586, 0x340f), .driver_info = DEVICE_ZD1211B }, |
65 | { USB_DEVICE(0x0baf, 0x0121), .driver_info = DEVICE_ZD1211B }, | ||
65 | /* "Driverless" devices that need ejecting */ | 66 | /* "Driverless" devices that need ejecting */ |
66 | { USB_DEVICE(0x0ace, 0x2011), .driver_info = DEVICE_INSTALLER }, | 67 | { USB_DEVICE(0x0ace, 0x2011), .driver_info = DEVICE_INSTALLER }, |
67 | {} | 68 | {} |
diff --git a/drivers/net/yellowfin.c b/drivers/net/yellowfin.c index 2412ce4917f2..3f4a7cf9efea 100644 --- a/drivers/net/yellowfin.c +++ b/drivers/net/yellowfin.c | |||
@@ -1137,7 +1137,6 @@ static int yellowfin_rx(struct net_device *dev) | |||
1137 | skb = dev_alloc_skb(pkt_len + 2); | 1137 | skb = dev_alloc_skb(pkt_len + 2); |
1138 | if (skb == NULL) | 1138 | if (skb == NULL) |
1139 | break; | 1139 | break; |
1140 | skb->dev = dev; | ||
1141 | skb_reserve(skb, 2); /* 16 byte align the IP header */ | 1140 | skb_reserve(skb, 2); /* 16 byte align the IP header */ |
1142 | eth_copy_and_sum(skb, rx_skb->data, pkt_len, 0); | 1141 | eth_copy_and_sum(skb, rx_skb->data, pkt_len, 0); |
1143 | skb_put(skb, pkt_len); | 1142 | skb_put(skb, pkt_len); |
diff --git a/drivers/net/znet.c b/drivers/net/znet.c index b24b0727108c..4032e9f6f9b0 100644 --- a/drivers/net/znet.c +++ b/drivers/net/znet.c | |||
@@ -774,7 +774,6 @@ static void znet_rx(struct net_device *dev) | |||
774 | znet->stats.rx_dropped++; | 774 | znet->stats.rx_dropped++; |
775 | break; | 775 | break; |
776 | } | 776 | } |
777 | skb->dev = dev; | ||
778 | 777 | ||
779 | if (&znet->rx_cur[(pkt_len+1)>>1] > znet->rx_end) { | 778 | if (&znet->rx_cur[(pkt_len+1)>>1] > znet->rx_end) { |
780 | int semi_cnt = (znet->rx_end - znet->rx_cur)<<1; | 779 | int semi_cnt = (znet->rx_end - znet->rx_cur)<<1; |
diff --git a/drivers/parisc/led.c b/drivers/parisc/led.c index d190c05d87ed..453e6829756c 100644 --- a/drivers/parisc/led.c +++ b/drivers/parisc/led.c | |||
@@ -372,9 +372,9 @@ static __inline__ int led_get_net_activity(void) | |||
372 | continue; | 372 | continue; |
373 | if (LOOPBACK(in_dev->ifa_list->ifa_local)) | 373 | if (LOOPBACK(in_dev->ifa_list->ifa_local)) |
374 | continue; | 374 | continue; |
375 | if (!dev->get_stats) | ||
376 | continue; | ||
377 | stats = dev->get_stats(dev); | 375 | stats = dev->get_stats(dev); |
376 | if (!stats) | ||
377 | continue; | ||
378 | rx_total += stats->rx_packets; | 378 | rx_total += stats->rx_packets; |
379 | tx_total += stats->tx_packets; | 379 | tx_total += stats->tx_packets; |
380 | } | 380 | } |
diff --git a/drivers/parport/parport_sunbpp.c b/drivers/parport/parport_sunbpp.c index 9793533276ec..400bb90084cf 100644 --- a/drivers/parport/parport_sunbpp.c +++ b/drivers/parport/parport_sunbpp.c | |||
@@ -126,7 +126,7 @@ static unsigned char status_sunbpp_to_pc(struct parport *p) | |||
126 | if (!(value_tcr & P_TCR_BUSY)) | 126 | if (!(value_tcr & P_TCR_BUSY)) |
127 | bits |= PARPORT_STATUS_BUSY; | 127 | bits |= PARPORT_STATUS_BUSY; |
128 | 128 | ||
129 | dprintk((KERN_DEBUG "tcr 0x%x ir 0x%x\n", regs->p_tcr, regs->p_ir)); | 129 | dprintk((KERN_DEBUG "tcr 0x%x ir 0x%x\n", value_tcr, value_ir)); |
130 | dprintk((KERN_DEBUG "read status 0x%x\n", bits)); | 130 | dprintk((KERN_DEBUG "read status 0x%x\n", bits)); |
131 | return bits; | 131 | return bits; |
132 | } | 132 | } |
@@ -147,7 +147,7 @@ static unsigned char control_sunbpp_to_pc(struct parport *p) | |||
147 | if (value_or & P_OR_SLCT_IN) | 147 | if (value_or & P_OR_SLCT_IN) |
148 | bits |= PARPORT_CONTROL_SELECT; | 148 | bits |= PARPORT_CONTROL_SELECT; |
149 | 149 | ||
150 | dprintk((KERN_DEBUG "tcr 0x%x or 0x%x\n", regs->p_tcr, regs->p_or)); | 150 | dprintk((KERN_DEBUG "tcr 0x%x or 0x%x\n", value_tcr, value_or)); |
151 | dprintk((KERN_DEBUG "read control 0x%x\n", bits)); | 151 | dprintk((KERN_DEBUG "read control 0x%x\n", bits)); |
152 | return bits; | 152 | return bits; |
153 | } | 153 | } |
@@ -165,7 +165,8 @@ static unsigned char parport_sunbpp_frob_control(struct parport *p, | |||
165 | unsigned char value_tcr = sbus_readb(®s->p_tcr); | 165 | unsigned char value_tcr = sbus_readb(®s->p_tcr); |
166 | unsigned char value_or = sbus_readb(®s->p_or); | 166 | unsigned char value_or = sbus_readb(®s->p_or); |
167 | 167 | ||
168 | dprintk((KERN_DEBUG "frob1: tcr 0x%x or 0x%x\n", regs->p_tcr, regs->p_or)); | 168 | dprintk((KERN_DEBUG "frob1: tcr 0x%x or 0x%x\n", |
169 | value_tcr, value_or)); | ||
169 | if (mask & PARPORT_CONTROL_STROBE) { | 170 | if (mask & PARPORT_CONTROL_STROBE) { |
170 | if (val & PARPORT_CONTROL_STROBE) { | 171 | if (val & PARPORT_CONTROL_STROBE) { |
171 | value_tcr &= ~P_TCR_DS; | 172 | value_tcr &= ~P_TCR_DS; |
@@ -197,7 +198,8 @@ static unsigned char parport_sunbpp_frob_control(struct parport *p, | |||
197 | 198 | ||
198 | sbus_writeb(value_or, ®s->p_or); | 199 | sbus_writeb(value_or, ®s->p_or); |
199 | sbus_writeb(value_tcr, ®s->p_tcr); | 200 | sbus_writeb(value_tcr, ®s->p_tcr); |
200 | dprintk((KERN_DEBUG "frob2: tcr 0x%x or 0x%x\n", regs->p_tcr, regs->p_or)); | 201 | dprintk((KERN_DEBUG "frob2: tcr 0x%x or 0x%x\n", |
202 | value_tcr, value_or)); | ||
201 | return parport_sunbpp_read_control(p); | 203 | return parport_sunbpp_read_control(p); |
202 | } | 204 | } |
203 | 205 | ||
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index a4a96826d9e0..2fe1d690eb13 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c | |||
@@ -682,34 +682,7 @@ static void pci_read_irq(struct pci_dev *dev) | |||
682 | dev->irq = irq; | 682 | dev->irq = irq; |
683 | } | 683 | } |
684 | 684 | ||
685 | static void change_legacy_io_resource(struct pci_dev * dev, unsigned index, | 685 | #define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED) |
686 | unsigned start, unsigned end) | ||
687 | { | ||
688 | unsigned base = start & PCI_BASE_ADDRESS_IO_MASK; | ||
689 | unsigned len = (end | ~PCI_BASE_ADDRESS_IO_MASK) - base + 1; | ||
690 | |||
691 | /* | ||
692 | * Some X versions get confused when the BARs reported through | ||
693 | * /sys or /proc differ from those seen in config space, thus | ||
694 | * try to update the config space values, too. | ||
695 | */ | ||
696 | if (!(pci_resource_flags(dev, index) & IORESOURCE_IO)) | ||
697 | printk(KERN_WARNING "%s: cannot adjust BAR%u (not I/O)\n", | ||
698 | pci_name(dev), index); | ||
699 | else if (pci_resource_len(dev, index) != len) | ||
700 | printk(KERN_WARNING "%s: cannot adjust BAR%u (size %04X)\n", | ||
701 | pci_name(dev), index, (unsigned)pci_resource_len(dev, index)); | ||
702 | else { | ||
703 | printk(KERN_INFO "%s: trying to change BAR%u from %04X to %04X\n", | ||
704 | pci_name(dev), index, | ||
705 | (unsigned)pci_resource_start(dev, index), base); | ||
706 | pci_write_config_dword(dev, PCI_BASE_ADDRESS_0 + index * 4, base); | ||
707 | } | ||
708 | pci_resource_start(dev, index) = start; | ||
709 | pci_resource_end(dev, index) = end; | ||
710 | pci_resource_flags(dev, index) = | ||
711 | IORESOURCE_IO | IORESOURCE_PCI_FIXED | PCI_BASE_ADDRESS_SPACE_IO; | ||
712 | } | ||
713 | 686 | ||
714 | /** | 687 | /** |
715 | * pci_setup_device - fill in class and map information of a device | 688 | * pci_setup_device - fill in class and map information of a device |
@@ -762,12 +735,20 @@ static int pci_setup_device(struct pci_dev * dev) | |||
762 | u8 progif; | 735 | u8 progif; |
763 | pci_read_config_byte(dev, PCI_CLASS_PROG, &progif); | 736 | pci_read_config_byte(dev, PCI_CLASS_PROG, &progif); |
764 | if ((progif & 1) == 0) { | 737 | if ((progif & 1) == 0) { |
765 | change_legacy_io_resource(dev, 0, 0x1F0, 0x1F7); | 738 | dev->resource[0].start = 0x1F0; |
766 | change_legacy_io_resource(dev, 1, 0x3F6, 0x3F6); | 739 | dev->resource[0].end = 0x1F7; |
740 | dev->resource[0].flags = LEGACY_IO_RESOURCE; | ||
741 | dev->resource[1].start = 0x3F6; | ||
742 | dev->resource[1].end = 0x3F6; | ||
743 | dev->resource[1].flags = LEGACY_IO_RESOURCE; | ||
767 | } | 744 | } |
768 | if ((progif & 4) == 0) { | 745 | if ((progif & 4) == 0) { |
769 | change_legacy_io_resource(dev, 2, 0x170, 0x177); | 746 | dev->resource[2].start = 0x170; |
770 | change_legacy_io_resource(dev, 3, 0x376, 0x376); | 747 | dev->resource[2].end = 0x177; |
748 | dev->resource[2].flags = LEGACY_IO_RESOURCE; | ||
749 | dev->resource[3].start = 0x376; | ||
750 | dev->resource[3].end = 0x376; | ||
751 | dev->resource[3].flags = LEGACY_IO_RESOURCE; | ||
771 | } | 752 | } |
772 | } | 753 | } |
773 | break; | 754 | break; |
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index eb5dc62f0d9c..e71929db8b06 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c | |||
@@ -398,6 +398,9 @@ dasd_change_state(struct dasd_device *device) | |||
398 | 398 | ||
399 | if (device->state == device->target) | 399 | if (device->state == device->target) |
400 | wake_up(&dasd_init_waitq); | 400 | wake_up(&dasd_init_waitq); |
401 | |||
402 | /* let user-space know that the device status changed */ | ||
403 | kobject_uevent(&device->cdev->dev.kobj, KOBJ_CHANGE); | ||
401 | } | 404 | } |
402 | 405 | ||
403 | /* | 406 | /* |
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c index ed70852cc915..6a89cefe99bb 100644 --- a/drivers/s390/block/dasd_devmap.c +++ b/drivers/s390/block/dasd_devmap.c | |||
@@ -19,6 +19,7 @@ | |||
19 | 19 | ||
20 | #include <asm/debug.h> | 20 | #include <asm/debug.h> |
21 | #include <asm/uaccess.h> | 21 | #include <asm/uaccess.h> |
22 | #include <asm/ipl.h> | ||
22 | 23 | ||
23 | /* This is ugly... */ | 24 | /* This is ugly... */ |
24 | #define PRINTK_HEADER "dasd_devmap:" | 25 | #define PRINTK_HEADER "dasd_devmap:" |
@@ -133,6 +134,8 @@ dasd_call_setup(char *str) | |||
133 | __setup ("dasd=", dasd_call_setup); | 134 | __setup ("dasd=", dasd_call_setup); |
134 | #endif /* #ifndef MODULE */ | 135 | #endif /* #ifndef MODULE */ |
135 | 136 | ||
137 | #define DASD_IPLDEV "ipldev" | ||
138 | |||
136 | /* | 139 | /* |
137 | * Read a device busid/devno from a string. | 140 | * Read a device busid/devno from a string. |
138 | */ | 141 | */ |
@@ -141,6 +144,20 @@ dasd_busid(char **str, int *id0, int *id1, int *devno) | |||
141 | { | 144 | { |
142 | int val, old_style; | 145 | int val, old_style; |
143 | 146 | ||
147 | /* Interpret ipldev busid */ | ||
148 | if (strncmp(DASD_IPLDEV, *str, strlen(DASD_IPLDEV)) == 0) { | ||
149 | if (ipl_info.type != IPL_TYPE_CCW) { | ||
150 | MESSAGE(KERN_ERR, "%s", "ipl device is not a ccw " | ||
151 | "device"); | ||
152 | return -EINVAL; | ||
153 | } | ||
154 | *id0 = 0; | ||
155 | *id1 = ipl_info.data.ccw.dev_id.ssid; | ||
156 | *devno = ipl_info.data.ccw.dev_id.devno; | ||
157 | *str += strlen(DASD_IPLDEV); | ||
158 | |||
159 | return 0; | ||
160 | } | ||
144 | /* check for leading '0x' */ | 161 | /* check for leading '0x' */ |
145 | old_style = 0; | 162 | old_style = 0; |
146 | if ((*str)[0] == '0' && (*str)[1] == 'x') { | 163 | if ((*str)[0] == '0' && (*str)[1] == 'x') { |
@@ -829,6 +846,46 @@ dasd_discipline_show(struct device *dev, struct device_attribute *attr, | |||
829 | static DEVICE_ATTR(discipline, 0444, dasd_discipline_show, NULL); | 846 | static DEVICE_ATTR(discipline, 0444, dasd_discipline_show, NULL); |
830 | 847 | ||
831 | static ssize_t | 848 | static ssize_t |
849 | dasd_device_status_show(struct device *dev, struct device_attribute *attr, | ||
850 | char *buf) | ||
851 | { | ||
852 | struct dasd_device *device; | ||
853 | ssize_t len; | ||
854 | |||
855 | device = dasd_device_from_cdev(to_ccwdev(dev)); | ||
856 | if (!IS_ERR(device)) { | ||
857 | switch (device->state) { | ||
858 | case DASD_STATE_NEW: | ||
859 | len = snprintf(buf, PAGE_SIZE, "new\n"); | ||
860 | break; | ||
861 | case DASD_STATE_KNOWN: | ||
862 | len = snprintf(buf, PAGE_SIZE, "detected\n"); | ||
863 | break; | ||
864 | case DASD_STATE_BASIC: | ||
865 | len = snprintf(buf, PAGE_SIZE, "basic\n"); | ||
866 | break; | ||
867 | case DASD_STATE_UNFMT: | ||
868 | len = snprintf(buf, PAGE_SIZE, "unformatted\n"); | ||
869 | break; | ||
870 | case DASD_STATE_READY: | ||
871 | len = snprintf(buf, PAGE_SIZE, "ready\n"); | ||
872 | break; | ||
873 | case DASD_STATE_ONLINE: | ||
874 | len = snprintf(buf, PAGE_SIZE, "online\n"); | ||
875 | break; | ||
876 | default: | ||
877 | len = snprintf(buf, PAGE_SIZE, "no stat\n"); | ||
878 | break; | ||
879 | } | ||
880 | dasd_put_device(device); | ||
881 | } else | ||
882 | len = snprintf(buf, PAGE_SIZE, "unknown\n"); | ||
883 | return len; | ||
884 | } | ||
885 | |||
886 | static DEVICE_ATTR(status, 0444, dasd_device_status_show, NULL); | ||
887 | |||
888 | static ssize_t | ||
832 | dasd_alias_show(struct device *dev, struct device_attribute *attr, char *buf) | 889 | dasd_alias_show(struct device *dev, struct device_attribute *attr, char *buf) |
833 | { | 890 | { |
834 | struct dasd_devmap *devmap; | 891 | struct dasd_devmap *devmap; |
@@ -939,6 +996,7 @@ static DEVICE_ATTR(eer_enabled, 0644, dasd_eer_show, dasd_eer_store); | |||
939 | static struct attribute * dasd_attrs[] = { | 996 | static struct attribute * dasd_attrs[] = { |
940 | &dev_attr_readonly.attr, | 997 | &dev_attr_readonly.attr, |
941 | &dev_attr_discipline.attr, | 998 | &dev_attr_discipline.attr, |
999 | &dev_attr_status.attr, | ||
942 | &dev_attr_alias.attr, | 1000 | &dev_attr_alias.attr, |
943 | &dev_attr_vendor.attr, | 1001 | &dev_attr_vendor.attr, |
944 | &dev_attr_uid.attr, | 1002 | &dev_attr_uid.attr, |
diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile index 293e667b50f2..c210784bdf46 100644 --- a/drivers/s390/char/Makefile +++ b/drivers/s390/char/Makefile | |||
@@ -3,7 +3,7 @@ | |||
3 | # | 3 | # |
4 | 4 | ||
5 | obj-y += ctrlchar.o keyboard.o defkeymap.o sclp.o sclp_rw.o sclp_quiesce.o \ | 5 | obj-y += ctrlchar.o keyboard.o defkeymap.o sclp.o sclp_rw.o sclp_quiesce.o \ |
6 | sclp_info.o | 6 | sclp_info.o sclp_config.o sclp_chp.o |
7 | 7 | ||
8 | obj-$(CONFIG_TN3270) += raw3270.o | 8 | obj-$(CONFIG_TN3270) += raw3270.o |
9 | obj-$(CONFIG_TN3270_CONSOLE) += con3270.o | 9 | obj-$(CONFIG_TN3270_CONSOLE) += con3270.o |
@@ -29,3 +29,6 @@ obj-$(CONFIG_S390_TAPE_34XX) += tape_34xx.o | |||
29 | obj-$(CONFIG_S390_TAPE_3590) += tape_3590.o | 29 | obj-$(CONFIG_S390_TAPE_3590) += tape_3590.o |
30 | obj-$(CONFIG_MONREADER) += monreader.o | 30 | obj-$(CONFIG_MONREADER) += monreader.o |
31 | obj-$(CONFIG_MONWRITER) += monwriter.o | 31 | obj-$(CONFIG_MONWRITER) += monwriter.o |
32 | |||
33 | zcore_mod-objs := sclp_sdias.o zcore.o | ||
34 | obj-$(CONFIG_ZFCPDUMP) += zcore_mod.o | ||
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c index 9a328f14a641..6000bdee4082 100644 --- a/drivers/s390/char/con3215.c +++ b/drivers/s390/char/con3215.c | |||
@@ -813,12 +813,6 @@ con3215_unblank(void) | |||
813 | spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags); | 813 | spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags); |
814 | } | 814 | } |
815 | 815 | ||
816 | static int __init | ||
817 | con3215_consetup(struct console *co, char *options) | ||
818 | { | ||
819 | return 0; | ||
820 | } | ||
821 | |||
822 | /* | 816 | /* |
823 | * The console structure for the 3215 console | 817 | * The console structure for the 3215 console |
824 | */ | 818 | */ |
@@ -827,7 +821,6 @@ static struct console con3215 = { | |||
827 | .write = con3215_write, | 821 | .write = con3215_write, |
828 | .device = con3215_device, | 822 | .device = con3215_device, |
829 | .unblank = con3215_unblank, | 823 | .unblank = con3215_unblank, |
830 | .setup = con3215_consetup, | ||
831 | .flags = CON_PRINTBUFFER, | 824 | .flags = CON_PRINTBUFFER, |
832 | }; | 825 | }; |
833 | 826 | ||
diff --git a/drivers/s390/char/con3270.c b/drivers/s390/char/con3270.c index 8e7f2d7633d6..fd3479119eb4 100644 --- a/drivers/s390/char/con3270.c +++ b/drivers/s390/char/con3270.c | |||
@@ -555,12 +555,6 @@ con3270_unblank(void) | |||
555 | spin_unlock_irqrestore(&cp->view.lock, flags); | 555 | spin_unlock_irqrestore(&cp->view.lock, flags); |
556 | } | 556 | } |
557 | 557 | ||
558 | static int __init | ||
559 | con3270_consetup(struct console *co, char *options) | ||
560 | { | ||
561 | return 0; | ||
562 | } | ||
563 | |||
564 | /* | 558 | /* |
565 | * The console structure for the 3270 console | 559 | * The console structure for the 3270 console |
566 | */ | 560 | */ |
@@ -569,7 +563,6 @@ static struct console con3270 = { | |||
569 | .write = con3270_write, | 563 | .write = con3270_write, |
570 | .device = con3270_device, | 564 | .device = con3270_device, |
571 | .unblank = con3270_unblank, | 565 | .unblank = con3270_unblank, |
572 | .setup = con3270_consetup, | ||
573 | .flags = CON_PRINTBUFFER, | 566 | .flags = CON_PRINTBUFFER, |
574 | }; | 567 | }; |
575 | 568 | ||
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c index f171de3b0b11..fa62e6944057 100644 --- a/drivers/s390/char/sclp.c +++ b/drivers/s390/char/sclp.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/timer.h> | 15 | #include <linux/timer.h> |
16 | #include <linux/reboot.h> | 16 | #include <linux/reboot.h> |
17 | #include <linux/jiffies.h> | 17 | #include <linux/jiffies.h> |
18 | #include <linux/init.h> | ||
18 | #include <asm/types.h> | 19 | #include <asm/types.h> |
19 | #include <asm/s390_ext.h> | 20 | #include <asm/s390_ext.h> |
20 | 21 | ||
@@ -510,7 +511,7 @@ sclp_state_change_cb(struct evbuf_header *evbuf) | |||
510 | } | 511 | } |
511 | 512 | ||
512 | static struct sclp_register sclp_state_change_event = { | 513 | static struct sclp_register sclp_state_change_event = { |
513 | .receive_mask = EvTyp_StateChange_Mask, | 514 | .receive_mask = EVTYP_STATECHANGE_MASK, |
514 | .receiver_fn = sclp_state_change_cb | 515 | .receiver_fn = sclp_state_change_cb |
515 | }; | 516 | }; |
516 | 517 | ||
@@ -930,3 +931,10 @@ sclp_init(void) | |||
930 | sclp_init_mask(1); | 931 | sclp_init_mask(1); |
931 | return 0; | 932 | return 0; |
932 | } | 933 | } |
934 | |||
935 | static __init int sclp_initcall(void) | ||
936 | { | ||
937 | return sclp_init(); | ||
938 | } | ||
939 | |||
940 | arch_initcall(sclp_initcall); | ||
diff --git a/drivers/s390/char/sclp.h b/drivers/s390/char/sclp.h index 7d29ab45a6ed..87ac4a3ad49d 100644 --- a/drivers/s390/char/sclp.h +++ b/drivers/s390/char/sclp.h | |||
@@ -19,33 +19,37 @@ | |||
19 | #define MAX_KMEM_PAGES (sizeof(unsigned long) << 3) | 19 | #define MAX_KMEM_PAGES (sizeof(unsigned long) << 3) |
20 | #define MAX_CONSOLE_PAGES 4 | 20 | #define MAX_CONSOLE_PAGES 4 |
21 | 21 | ||
22 | #define EvTyp_OpCmd 0x01 | 22 | #define EVTYP_OPCMD 0x01 |
23 | #define EvTyp_Msg 0x02 | 23 | #define EVTYP_MSG 0x02 |
24 | #define EvTyp_StateChange 0x08 | 24 | #define EVTYP_STATECHANGE 0x08 |
25 | #define EvTyp_PMsgCmd 0x09 | 25 | #define EVTYP_PMSGCMD 0x09 |
26 | #define EvTyp_CntlProgOpCmd 0x20 | 26 | #define EVTYP_CNTLPROGOPCMD 0x20 |
27 | #define EvTyp_CntlProgIdent 0x0B | 27 | #define EVTYP_CNTLPROGIDENT 0x0B |
28 | #define EvTyp_SigQuiesce 0x1D | 28 | #define EVTYP_SIGQUIESCE 0x1D |
29 | #define EvTyp_VT220Msg 0x1A | 29 | #define EVTYP_VT220MSG 0x1A |
30 | 30 | #define EVTYP_CONFMGMDATA 0x04 | |
31 | #define EvTyp_OpCmd_Mask 0x80000000 | 31 | #define EVTYP_SDIAS 0x1C |
32 | #define EvTyp_Msg_Mask 0x40000000 | 32 | |
33 | #define EvTyp_StateChange_Mask 0x01000000 | 33 | #define EVTYP_OPCMD_MASK 0x80000000 |
34 | #define EvTyp_PMsgCmd_Mask 0x00800000 | 34 | #define EVTYP_MSG_MASK 0x40000000 |
35 | #define EvTyp_CtlProgOpCmd_Mask 0x00000001 | 35 | #define EVTYP_STATECHANGE_MASK 0x01000000 |
36 | #define EvTyp_CtlProgIdent_Mask 0x00200000 | 36 | #define EVTYP_PMSGCMD_MASK 0x00800000 |
37 | #define EvTyp_SigQuiesce_Mask 0x00000008 | 37 | #define EVTYP_CTLPROGOPCMD_MASK 0x00000001 |
38 | #define EvTyp_VT220Msg_Mask 0x00000040 | 38 | #define EVTYP_CTLPROGIDENT_MASK 0x00200000 |
39 | 39 | #define EVTYP_SIGQUIESCE_MASK 0x00000008 | |
40 | #define GnrlMsgFlgs_DOM 0x8000 | 40 | #define EVTYP_VT220MSG_MASK 0x00000040 |
41 | #define GnrlMsgFlgs_SndAlrm 0x4000 | 41 | #define EVTYP_CONFMGMDATA_MASK 0x10000000 |
42 | #define GnrlMsgFlgs_HoldMsg 0x2000 | 42 | #define EVTYP_SDIAS_MASK 0x00000010 |
43 | 43 | ||
44 | #define LnTpFlgs_CntlText 0x8000 | 44 | #define GNRLMSGFLGS_DOM 0x8000 |
45 | #define LnTpFlgs_LabelText 0x4000 | 45 | #define GNRLMSGFLGS_SNDALRM 0x4000 |
46 | #define LnTpFlgs_DataText 0x2000 | 46 | #define GNRLMSGFLGS_HOLDMSG 0x2000 |
47 | #define LnTpFlgs_EndText 0x1000 | 47 | |
48 | #define LnTpFlgs_PromptText 0x0800 | 48 | #define LNTPFLGS_CNTLTEXT 0x8000 |
49 | #define LNTPFLGS_LABELTEXT 0x4000 | ||
50 | #define LNTPFLGS_DATATEXT 0x2000 | ||
51 | #define LNTPFLGS_ENDTEXT 0x1000 | ||
52 | #define LNTPFLGS_PROMPTTEXT 0x0800 | ||
49 | 53 | ||
50 | typedef unsigned int sclp_cmdw_t; | 54 | typedef unsigned int sclp_cmdw_t; |
51 | 55 | ||
@@ -56,15 +60,15 @@ typedef unsigned int sclp_cmdw_t; | |||
56 | #define SCLP_CMDW_READ_SCP_INFO_FORCED 0x00120001 | 60 | #define SCLP_CMDW_READ_SCP_INFO_FORCED 0x00120001 |
57 | 61 | ||
58 | #define GDS_ID_MDSMU 0x1310 | 62 | #define GDS_ID_MDSMU 0x1310 |
59 | #define GDS_ID_MDSRouteInfo 0x1311 | 63 | #define GDS_ID_MDSROUTEINFO 0x1311 |
60 | #define GDS_ID_AgUnWrkCorr 0x1549 | 64 | #define GDS_ID_AGUNWRKCORR 0x1549 |
61 | #define GDS_ID_SNACondReport 0x1532 | 65 | #define GDS_ID_SNACONDREPORT 0x1532 |
62 | #define GDS_ID_CPMSU 0x1212 | 66 | #define GDS_ID_CPMSU 0x1212 |
63 | #define GDS_ID_RoutTargInstr 0x154D | 67 | #define GDS_ID_ROUTTARGINSTR 0x154D |
64 | #define GDS_ID_OpReq 0x8070 | 68 | #define GDS_ID_OPREQ 0x8070 |
65 | #define GDS_ID_TextCmd 0x1320 | 69 | #define GDS_ID_TEXTCMD 0x1320 |
66 | 70 | ||
67 | #define GDS_KEY_SelfDefTextMsg 0x31 | 71 | #define GDS_KEY_SELFDEFTEXTMSG 0x31 |
68 | 72 | ||
69 | typedef u32 sccb_mask_t; /* ATTENTION: assumes 32bit mask !!! */ | 73 | typedef u32 sccb_mask_t; /* ATTENTION: assumes 32bit mask !!! */ |
70 | 74 | ||
diff --git a/drivers/s390/char/sclp_chp.c b/drivers/s390/char/sclp_chp.c new file mode 100644 index 000000000000..a66b914519b5 --- /dev/null +++ b/drivers/s390/char/sclp_chp.c | |||
@@ -0,0 +1,196 @@ | |||
1 | /* | ||
2 | * drivers/s390/char/sclp_chp.c | ||
3 | * | ||
4 | * Copyright IBM Corp. 2007 | ||
5 | * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com> | ||
6 | */ | ||
7 | |||
8 | #include <linux/types.h> | ||
9 | #include <linux/gfp.h> | ||
10 | #include <linux/errno.h> | ||
11 | #include <linux/completion.h> | ||
12 | #include <asm/sclp.h> | ||
13 | #include <asm/chpid.h> | ||
14 | |||
15 | #include "sclp.h" | ||
16 | |||
17 | #define TAG "sclp_chp: " | ||
18 | |||
19 | #define SCLP_CMDW_CONFIGURE_CHANNEL_PATH 0x000f0001 | ||
20 | #define SCLP_CMDW_DECONFIGURE_CHANNEL_PATH 0x000e0001 | ||
21 | #define SCLP_CMDW_READ_CHANNEL_PATH_INFORMATION 0x00030001 | ||
22 | |||
23 | static inline sclp_cmdw_t get_configure_cmdw(struct chp_id chpid) | ||
24 | { | ||
25 | return SCLP_CMDW_CONFIGURE_CHANNEL_PATH | chpid.id << 8; | ||
26 | } | ||
27 | |||
28 | static inline sclp_cmdw_t get_deconfigure_cmdw(struct chp_id chpid) | ||
29 | { | ||
30 | return SCLP_CMDW_DECONFIGURE_CHANNEL_PATH | chpid.id << 8; | ||
31 | } | ||
32 | |||
33 | static void chp_callback(struct sclp_req *req, void *data) | ||
34 | { | ||
35 | struct completion *completion = data; | ||
36 | |||
37 | complete(completion); | ||
38 | } | ||
39 | |||
40 | struct chp_cfg_sccb { | ||
41 | struct sccb_header header; | ||
42 | u8 ccm; | ||
43 | u8 reserved[6]; | ||
44 | u8 cssid; | ||
45 | } __attribute__((packed)); | ||
46 | |||
47 | struct chp_cfg_data { | ||
48 | struct chp_cfg_sccb sccb; | ||
49 | struct sclp_req req; | ||
50 | struct completion completion; | ||
51 | } __attribute__((packed)); | ||
52 | |||
53 | static int do_configure(sclp_cmdw_t cmd) | ||
54 | { | ||
55 | struct chp_cfg_data *data; | ||
56 | int rc; | ||
57 | |||
58 | /* Prepare sccb. */ | ||
59 | data = (struct chp_cfg_data *) get_zeroed_page(GFP_KERNEL | GFP_DMA); | ||
60 | if (!data) | ||
61 | return -ENOMEM; | ||
62 | data->sccb.header.length = sizeof(struct chp_cfg_sccb); | ||
63 | data->req.command = cmd; | ||
64 | data->req.sccb = &(data->sccb); | ||
65 | data->req.status = SCLP_REQ_FILLED; | ||
66 | data->req.callback = chp_callback; | ||
67 | data->req.callback_data = &(data->completion); | ||
68 | init_completion(&data->completion); | ||
69 | |||
70 | /* Perform sclp request. */ | ||
71 | rc = sclp_add_request(&(data->req)); | ||
72 | if (rc) | ||
73 | goto out; | ||
74 | wait_for_completion(&data->completion); | ||
75 | |||
76 | /* Check response .*/ | ||
77 | if (data->req.status != SCLP_REQ_DONE) { | ||
78 | printk(KERN_WARNING TAG "configure channel-path request failed " | ||
79 | "(status=0x%02x)\n", data->req.status); | ||
80 | rc = -EIO; | ||
81 | goto out; | ||
82 | } | ||
83 | switch (data->sccb.header.response_code) { | ||
84 | case 0x0020: | ||
85 | case 0x0120: | ||
86 | case 0x0440: | ||
87 | case 0x0450: | ||
88 | break; | ||
89 | default: | ||
90 | printk(KERN_WARNING TAG "configure channel-path failed " | ||
91 | "(cmd=0x%08x, response=0x%04x)\n", cmd, | ||
92 | data->sccb.header.response_code); | ||
93 | rc = -EIO; | ||
94 | break; | ||
95 | } | ||
96 | out: | ||
97 | free_page((unsigned long) data); | ||
98 | |||
99 | return rc; | ||
100 | } | ||
101 | |||
102 | /** | ||
103 | * sclp_chp_configure - perform configure channel-path sclp command | ||
104 | * @chpid: channel-path ID | ||
105 | * | ||
106 | * Perform configure channel-path command sclp command for specified chpid. | ||
107 | * Return 0 after command successfully finished, non-zero otherwise. | ||
108 | */ | ||
109 | int sclp_chp_configure(struct chp_id chpid) | ||
110 | { | ||
111 | return do_configure(get_configure_cmdw(chpid)); | ||
112 | } | ||
113 | |||
114 | /** | ||
115 | * sclp_chp_deconfigure - perform deconfigure channel-path sclp command | ||
116 | * @chpid: channel-path ID | ||
117 | * | ||
118 | * Perform deconfigure channel-path command sclp command for specified chpid | ||
119 | * and wait for completion. On success return 0. Return non-zero otherwise. | ||
120 | */ | ||
121 | int sclp_chp_deconfigure(struct chp_id chpid) | ||
122 | { | ||
123 | return do_configure(get_deconfigure_cmdw(chpid)); | ||
124 | } | ||
125 | |||
126 | struct chp_info_sccb { | ||
127 | struct sccb_header header; | ||
128 | u8 recognized[SCLP_CHP_INFO_MASK_SIZE]; | ||
129 | u8 standby[SCLP_CHP_INFO_MASK_SIZE]; | ||
130 | u8 configured[SCLP_CHP_INFO_MASK_SIZE]; | ||
131 | u8 ccm; | ||
132 | u8 reserved[6]; | ||
133 | u8 cssid; | ||
134 | } __attribute__((packed)); | ||
135 | |||
136 | struct chp_info_data { | ||
137 | struct chp_info_sccb sccb; | ||
138 | struct sclp_req req; | ||
139 | struct completion completion; | ||
140 | } __attribute__((packed)); | ||
141 | |||
142 | /** | ||
143 | * sclp_chp_read_info - perform read channel-path information sclp command | ||
144 | * @info: resulting channel-path information data | ||
145 | * | ||
146 | * Perform read channel-path information sclp command and wait for completion. | ||
147 | * On success, store channel-path information in @info and return 0. Return | ||
148 | * non-zero otherwise. | ||
149 | */ | ||
150 | int sclp_chp_read_info(struct sclp_chp_info *info) | ||
151 | { | ||
152 | struct chp_info_data *data; | ||
153 | int rc; | ||
154 | |||
155 | /* Prepare sccb. */ | ||
156 | data = (struct chp_info_data *) get_zeroed_page(GFP_KERNEL | GFP_DMA); | ||
157 | if (!data) | ||
158 | return -ENOMEM; | ||
159 | data->sccb.header.length = sizeof(struct chp_info_sccb); | ||
160 | data->req.command = SCLP_CMDW_READ_CHANNEL_PATH_INFORMATION; | ||
161 | data->req.sccb = &(data->sccb); | ||
162 | data->req.status = SCLP_REQ_FILLED; | ||
163 | data->req.callback = chp_callback; | ||
164 | data->req.callback_data = &(data->completion); | ||
165 | init_completion(&data->completion); | ||
166 | |||
167 | /* Perform sclp request. */ | ||
168 | rc = sclp_add_request(&(data->req)); | ||
169 | if (rc) | ||
170 | goto out; | ||
171 | wait_for_completion(&data->completion); | ||
172 | |||
173 | /* Check response .*/ | ||
174 | if (data->req.status != SCLP_REQ_DONE) { | ||
175 | printk(KERN_WARNING TAG "read channel-path info request failed " | ||
176 | "(status=0x%02x)\n", data->req.status); | ||
177 | rc = -EIO; | ||
178 | goto out; | ||
179 | } | ||
180 | if (data->sccb.header.response_code != 0x0010) { | ||
181 | printk(KERN_WARNING TAG "read channel-path info failed " | ||
182 | "(response=0x%04x)\n", data->sccb.header.response_code); | ||
183 | rc = -EIO; | ||
184 | goto out; | ||
185 | } | ||
186 | memcpy(info->recognized, data->sccb.recognized, | ||
187 | SCLP_CHP_INFO_MASK_SIZE); | ||
188 | memcpy(info->standby, data->sccb.standby, | ||
189 | SCLP_CHP_INFO_MASK_SIZE); | ||
190 | memcpy(info->configured, data->sccb.configured, | ||
191 | SCLP_CHP_INFO_MASK_SIZE); | ||
192 | out: | ||
193 | free_page((unsigned long) data); | ||
194 | |||
195 | return rc; | ||
196 | } | ||
diff --git a/drivers/s390/char/sclp_config.c b/drivers/s390/char/sclp_config.c new file mode 100644 index 000000000000..5322e5e54a98 --- /dev/null +++ b/drivers/s390/char/sclp_config.c | |||
@@ -0,0 +1,75 @@ | |||
1 | /* | ||
2 | * drivers/s390/char/sclp_config.c | ||
3 | * | ||
4 | * Copyright IBM Corp. 2007 | ||
5 | * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> | ||
6 | */ | ||
7 | |||
8 | #include <linux/init.h> | ||
9 | #include <linux/errno.h> | ||
10 | #include <linux/cpu.h> | ||
11 | #include <linux/sysdev.h> | ||
12 | #include <linux/workqueue.h> | ||
13 | #include "sclp.h" | ||
14 | |||
15 | #define TAG "sclp_config: " | ||
16 | |||
17 | struct conf_mgm_data { | ||
18 | u8 reserved; | ||
19 | u8 ev_qualifier; | ||
20 | } __attribute__((packed)); | ||
21 | |||
22 | #define EV_QUAL_CAP_CHANGE 3 | ||
23 | |||
24 | static struct work_struct sclp_cpu_capability_work; | ||
25 | |||
26 | static void sclp_cpu_capability_notify(struct work_struct *work) | ||
27 | { | ||
28 | int cpu; | ||
29 | struct sys_device *sysdev; | ||
30 | |||
31 | printk(KERN_WARNING TAG "cpu capability changed.\n"); | ||
32 | lock_cpu_hotplug(); | ||
33 | for_each_online_cpu(cpu) { | ||
34 | sysdev = get_cpu_sysdev(cpu); | ||
35 | kobject_uevent(&sysdev->kobj, KOBJ_CHANGE); | ||
36 | } | ||
37 | unlock_cpu_hotplug(); | ||
38 | } | ||
39 | |||
40 | static void sclp_conf_receiver_fn(struct evbuf_header *evbuf) | ||
41 | { | ||
42 | struct conf_mgm_data *cdata; | ||
43 | |||
44 | cdata = (struct conf_mgm_data *)(evbuf + 1); | ||
45 | if (cdata->ev_qualifier == EV_QUAL_CAP_CHANGE) | ||
46 | schedule_work(&sclp_cpu_capability_work); | ||
47 | } | ||
48 | |||
49 | static struct sclp_register sclp_conf_register = | ||
50 | { | ||
51 | .receive_mask = EVTYP_CONFMGMDATA_MASK, | ||
52 | .receiver_fn = sclp_conf_receiver_fn, | ||
53 | }; | ||
54 | |||
55 | static int __init sclp_conf_init(void) | ||
56 | { | ||
57 | int rc; | ||
58 | |||
59 | INIT_WORK(&sclp_cpu_capability_work, sclp_cpu_capability_notify); | ||
60 | |||
61 | rc = sclp_register(&sclp_conf_register); | ||
62 | if (rc) { | ||
63 | printk(KERN_ERR TAG "failed to register (%d).\n", rc); | ||
64 | return rc; | ||
65 | } | ||
66 | |||
67 | if (!(sclp_conf_register.sclp_receive_mask & EVTYP_CONFMGMDATA_MASK)) { | ||
68 | printk(KERN_WARNING TAG "no configuration management.\n"); | ||
69 | sclp_unregister(&sclp_conf_register); | ||
70 | rc = -ENOSYS; | ||
71 | } | ||
72 | return rc; | ||
73 | } | ||
74 | |||
75 | __initcall(sclp_conf_init); | ||
diff --git a/drivers/s390/char/sclp_cpi.c b/drivers/s390/char/sclp_cpi.c index 65aa2c85737f..29fe2a5ec2fe 100644 --- a/drivers/s390/char/sclp_cpi.c +++ b/drivers/s390/char/sclp_cpi.c | |||
@@ -46,7 +46,7 @@ struct cpi_sccb { | |||
46 | /* Event type structure for write message and write priority message */ | 46 | /* Event type structure for write message and write priority message */ |
47 | static struct sclp_register sclp_cpi_event = | 47 | static struct sclp_register sclp_cpi_event = |
48 | { | 48 | { |
49 | .send_mask = EvTyp_CtlProgIdent_Mask | 49 | .send_mask = EVTYP_CTLPROGIDENT_MASK |
50 | }; | 50 | }; |
51 | 51 | ||
52 | MODULE_LICENSE("GPL"); | 52 | MODULE_LICENSE("GPL"); |
@@ -201,7 +201,7 @@ cpi_module_init(void) | |||
201 | "console.\n"); | 201 | "console.\n"); |
202 | return -EINVAL; | 202 | return -EINVAL; |
203 | } | 203 | } |
204 | if (!(sclp_cpi_event.sclp_send_mask & EvTyp_CtlProgIdent_Mask)) { | 204 | if (!(sclp_cpi_event.sclp_send_mask & EVTYP_CTLPROGIDENT_MASK)) { |
205 | printk(KERN_WARNING "cpi: no control program identification " | 205 | printk(KERN_WARNING "cpi: no control program identification " |
206 | "support\n"); | 206 | "support\n"); |
207 | sclp_unregister(&sclp_cpi_event); | 207 | sclp_unregister(&sclp_cpi_event); |
diff --git a/drivers/s390/char/sclp_quiesce.c b/drivers/s390/char/sclp_quiesce.c index baa8fe669ed2..45ff25e787cb 100644 --- a/drivers/s390/char/sclp_quiesce.c +++ b/drivers/s390/char/sclp_quiesce.c | |||
@@ -43,7 +43,7 @@ sclp_quiesce_handler(struct evbuf_header *evbuf) | |||
43 | } | 43 | } |
44 | 44 | ||
45 | static struct sclp_register sclp_quiesce_event = { | 45 | static struct sclp_register sclp_quiesce_event = { |
46 | .receive_mask = EvTyp_SigQuiesce_Mask, | 46 | .receive_mask = EVTYP_SIGQUIESCE_MASK, |
47 | .receiver_fn = sclp_quiesce_handler | 47 | .receiver_fn = sclp_quiesce_handler |
48 | }; | 48 | }; |
49 | 49 | ||
diff --git a/drivers/s390/char/sclp_rw.c b/drivers/s390/char/sclp_rw.c index 2486783ea58e..bbd5b8b66f42 100644 --- a/drivers/s390/char/sclp_rw.c +++ b/drivers/s390/char/sclp_rw.c | |||
@@ -30,7 +30,7 @@ | |||
30 | 30 | ||
31 | /* Event type structure for write message and write priority message */ | 31 | /* Event type structure for write message and write priority message */ |
32 | static struct sclp_register sclp_rw_event = { | 32 | static struct sclp_register sclp_rw_event = { |
33 | .send_mask = EvTyp_Msg_Mask | EvTyp_PMsgCmd_Mask | 33 | .send_mask = EVTYP_MSG_MASK | EVTYP_PMSGCMD_MASK |
34 | }; | 34 | }; |
35 | 35 | ||
36 | /* | 36 | /* |
@@ -64,7 +64,7 @@ sclp_make_buffer(void *page, unsigned short columns, unsigned short htab) | |||
64 | memset(sccb, 0, sizeof(struct write_sccb)); | 64 | memset(sccb, 0, sizeof(struct write_sccb)); |
65 | sccb->header.length = sizeof(struct write_sccb); | 65 | sccb->header.length = sizeof(struct write_sccb); |
66 | sccb->msg_buf.header.length = sizeof(struct msg_buf); | 66 | sccb->msg_buf.header.length = sizeof(struct msg_buf); |
67 | sccb->msg_buf.header.type = EvTyp_Msg; | 67 | sccb->msg_buf.header.type = EVTYP_MSG; |
68 | sccb->msg_buf.mdb.header.length = sizeof(struct mdb); | 68 | sccb->msg_buf.mdb.header.length = sizeof(struct mdb); |
69 | sccb->msg_buf.mdb.header.type = 1; | 69 | sccb->msg_buf.mdb.header.type = 1; |
70 | sccb->msg_buf.mdb.header.tag = 0xD4C4C240; /* ebcdic "MDB " */ | 70 | sccb->msg_buf.mdb.header.tag = 0xD4C4C240; /* ebcdic "MDB " */ |
@@ -114,7 +114,7 @@ sclp_initialize_mto(struct sclp_buffer *buffer, int max_len) | |||
114 | memset(mto, 0, sizeof(struct mto)); | 114 | memset(mto, 0, sizeof(struct mto)); |
115 | mto->length = sizeof(struct mto); | 115 | mto->length = sizeof(struct mto); |
116 | mto->type = 4; /* message text object */ | 116 | mto->type = 4; /* message text object */ |
117 | mto->line_type_flags = LnTpFlgs_EndText; /* end text */ | 117 | mto->line_type_flags = LNTPFLGS_ENDTEXT; /* end text */ |
118 | 118 | ||
119 | /* set pointer to first byte after struct mto. */ | 119 | /* set pointer to first byte after struct mto. */ |
120 | buffer->current_line = (char *) (mto + 1); | 120 | buffer->current_line = (char *) (mto + 1); |
@@ -215,7 +215,7 @@ sclp_write(struct sclp_buffer *buffer, const unsigned char *msg, int count) | |||
215 | case '\a': /* bell, one for several times */ | 215 | case '\a': /* bell, one for several times */ |
216 | /* set SCLP sound alarm bit in General Object */ | 216 | /* set SCLP sound alarm bit in General Object */ |
217 | buffer->sccb->msg_buf.mdb.go.general_msg_flags |= | 217 | buffer->sccb->msg_buf.mdb.go.general_msg_flags |= |
218 | GnrlMsgFlgs_SndAlrm; | 218 | GNRLMSGFLGS_SNDALRM; |
219 | break; | 219 | break; |
220 | case '\t': /* horizontal tabulator */ | 220 | case '\t': /* horizontal tabulator */ |
221 | /* check if new mto needs to be created */ | 221 | /* check if new mto needs to be created */ |
@@ -452,12 +452,12 @@ sclp_emit_buffer(struct sclp_buffer *buffer, | |||
452 | return -EIO; | 452 | return -EIO; |
453 | 453 | ||
454 | sccb = buffer->sccb; | 454 | sccb = buffer->sccb; |
455 | if (sclp_rw_event.sclp_send_mask & EvTyp_Msg_Mask) | 455 | if (sclp_rw_event.sclp_send_mask & EVTYP_MSG_MASK) |
456 | /* Use normal write message */ | 456 | /* Use normal write message */ |
457 | sccb->msg_buf.header.type = EvTyp_Msg; | 457 | sccb->msg_buf.header.type = EVTYP_MSG; |
458 | else if (sclp_rw_event.sclp_send_mask & EvTyp_PMsgCmd_Mask) | 458 | else if (sclp_rw_event.sclp_send_mask & EVTYP_PMSGCMD_MASK) |
459 | /* Use write priority message */ | 459 | /* Use write priority message */ |
460 | sccb->msg_buf.header.type = EvTyp_PMsgCmd; | 460 | sccb->msg_buf.header.type = EVTYP_PMSGCMD; |
461 | else | 461 | else |
462 | return -ENOSYS; | 462 | return -ENOSYS; |
463 | buffer->request.command = SCLP_CMDW_WRITE_EVENT_DATA; | 463 | buffer->request.command = SCLP_CMDW_WRITE_EVENT_DATA; |
diff --git a/drivers/s390/char/sclp_sdias.c b/drivers/s390/char/sclp_sdias.c new file mode 100644 index 000000000000..52283daddaef --- /dev/null +++ b/drivers/s390/char/sclp_sdias.c | |||
@@ -0,0 +1,255 @@ | |||
1 | /* | ||
2 | * Sclp "store data in absolut storage" | ||
3 | * | ||
4 | * Copyright IBM Corp. 2003,2007 | ||
5 | * Author(s): Michael Holzheu | ||
6 | */ | ||
7 | |||
8 | #include <linux/sched.h> | ||
9 | #include <asm/sclp.h> | ||
10 | #include <asm/debug.h> | ||
11 | #include <asm/ipl.h> | ||
12 | #include "sclp.h" | ||
13 | #include "sclp_rw.h" | ||
14 | |||
15 | #define TRACE(x...) debug_sprintf_event(sdias_dbf, 1, x) | ||
16 | #define ERROR_MSG(x...) printk ( KERN_ALERT "SDIAS: " x ) | ||
17 | |||
18 | #define SDIAS_RETRIES 300 | ||
19 | #define SDIAS_SLEEP_TICKS 50 | ||
20 | |||
21 | #define EQ_STORE_DATA 0x0 | ||
22 | #define EQ_SIZE 0x1 | ||
23 | #define DI_FCP_DUMP 0x0 | ||
24 | #define ASA_SIZE_32 0x0 | ||
25 | #define ASA_SIZE_64 0x1 | ||
26 | #define EVSTATE_ALL_STORED 0x0 | ||
27 | #define EVSTATE_NO_DATA 0x3 | ||
28 | #define EVSTATE_PART_STORED 0x10 | ||
29 | |||
30 | static struct debug_info *sdias_dbf; | ||
31 | |||
32 | static struct sclp_register sclp_sdias_register = { | ||
33 | .send_mask = EVTYP_SDIAS_MASK, | ||
34 | }; | ||
35 | |||
36 | struct sdias_evbuf { | ||
37 | struct evbuf_header hdr; | ||
38 | u8 event_qual; | ||
39 | u8 data_id; | ||
40 | u64 reserved2; | ||
41 | u32 event_id; | ||
42 | u16 reserved3; | ||
43 | u8 asa_size; | ||
44 | u8 event_status; | ||
45 | u32 reserved4; | ||
46 | u32 blk_cnt; | ||
47 | u64 asa; | ||
48 | u32 reserved5; | ||
49 | u32 fbn; | ||
50 | u32 reserved6; | ||
51 | u32 lbn; | ||
52 | u16 reserved7; | ||
53 | u16 dbs; | ||
54 | } __attribute__((packed)); | ||
55 | |||
56 | struct sdias_sccb { | ||
57 | struct sccb_header hdr; | ||
58 | struct sdias_evbuf evbuf; | ||
59 | } __attribute__((packed)); | ||
60 | |||
61 | static struct sdias_sccb sccb __attribute__((aligned(4096))); | ||
62 | |||
63 | static int sclp_req_done; | ||
64 | static wait_queue_head_t sdias_wq; | ||
65 | static DEFINE_MUTEX(sdias_mutex); | ||
66 | |||
67 | static void sdias_callback(struct sclp_req *request, void *data) | ||
68 | { | ||
69 | struct sdias_sccb *sccb; | ||
70 | |||
71 | sccb = (struct sdias_sccb *) request->sccb; | ||
72 | sclp_req_done = 1; | ||
73 | wake_up(&sdias_wq); /* Inform caller, that request is complete */ | ||
74 | TRACE("callback done\n"); | ||
75 | } | ||
76 | |||
77 | static int sdias_sclp_send(struct sclp_req *req) | ||
78 | { | ||
79 | int retries; | ||
80 | int rc; | ||
81 | |||
82 | for (retries = SDIAS_RETRIES; retries; retries--) { | ||
83 | sclp_req_done = 0; | ||
84 | TRACE("add request\n"); | ||
85 | rc = sclp_add_request(req); | ||
86 | if (rc) { | ||
87 | /* not initiated, wait some time and retry */ | ||
88 | set_current_state(TASK_INTERRUPTIBLE); | ||
89 | TRACE("add request failed: rc = %i\n",rc); | ||
90 | schedule_timeout(SDIAS_SLEEP_TICKS); | ||
91 | continue; | ||
92 | } | ||
93 | /* initiated, wait for completion of service call */ | ||
94 | wait_event(sdias_wq, (sclp_req_done == 1)); | ||
95 | if (req->status == SCLP_REQ_FAILED) { | ||
96 | TRACE("sclp request failed\n"); | ||
97 | rc = -EIO; | ||
98 | continue; | ||
99 | } | ||
100 | TRACE("request done\n"); | ||
101 | break; | ||
102 | } | ||
103 | return rc; | ||
104 | } | ||
105 | |||
106 | /* | ||
107 | * Get number of blocks (4K) available in the HSA | ||
108 | */ | ||
109 | int sclp_sdias_blk_count(void) | ||
110 | { | ||
111 | struct sclp_req request; | ||
112 | int rc; | ||
113 | |||
114 | mutex_lock(&sdias_mutex); | ||
115 | |||
116 | memset(&sccb, 0, sizeof(sccb)); | ||
117 | memset(&request, 0, sizeof(request)); | ||
118 | |||
119 | sccb.hdr.length = sizeof(sccb); | ||
120 | sccb.evbuf.hdr.length = sizeof(struct sdias_evbuf); | ||
121 | sccb.evbuf.hdr.type = EVTYP_SDIAS; | ||
122 | sccb.evbuf.event_qual = EQ_SIZE; | ||
123 | sccb.evbuf.data_id = DI_FCP_DUMP; | ||
124 | sccb.evbuf.event_id = 4712; | ||
125 | sccb.evbuf.dbs = 1; | ||
126 | |||
127 | request.sccb = &sccb; | ||
128 | request.command = SCLP_CMDW_WRITE_EVENT_DATA; | ||
129 | request.status = SCLP_REQ_FILLED; | ||
130 | request.callback = sdias_callback; | ||
131 | |||
132 | rc = sdias_sclp_send(&request); | ||
133 | if (rc) { | ||
134 | ERROR_MSG("sclp_send failed for get_nr_blocks\n"); | ||
135 | goto out; | ||
136 | } | ||
137 | if (sccb.hdr.response_code != 0x0020) { | ||
138 | TRACE("send failed: %x\n", sccb.hdr.response_code); | ||
139 | rc = -EIO; | ||
140 | goto out; | ||
141 | } | ||
142 | |||
143 | switch (sccb.evbuf.event_status) { | ||
144 | case 0: | ||
145 | rc = sccb.evbuf.blk_cnt; | ||
146 | break; | ||
147 | default: | ||
148 | ERROR_MSG("SCLP error: %x\n", sccb.evbuf.event_status); | ||
149 | rc = -EIO; | ||
150 | goto out; | ||
151 | } | ||
152 | TRACE("%i blocks\n", rc); | ||
153 | out: | ||
154 | mutex_unlock(&sdias_mutex); | ||
155 | return rc; | ||
156 | } | ||
157 | |||
158 | /* | ||
159 | * Copy from HSA to absolute storage (not reentrant): | ||
160 | * | ||
161 | * @dest : Address of buffer where data should be copied | ||
162 | * @start_blk: Start Block (beginning with 1) | ||
163 | * @nr_blks : Number of 4K blocks to copy | ||
164 | * | ||
165 | * Return Value: 0 : Requested 'number' of blocks of data copied | ||
166 | * <0: ERROR - negative event status | ||
167 | */ | ||
168 | int sclp_sdias_copy(void *dest, int start_blk, int nr_blks) | ||
169 | { | ||
170 | struct sclp_req request; | ||
171 | int rc; | ||
172 | |||
173 | mutex_lock(&sdias_mutex); | ||
174 | |||
175 | memset(&sccb, 0, sizeof(sccb)); | ||
176 | memset(&request, 0, sizeof(request)); | ||
177 | |||
178 | sccb.hdr.length = sizeof(sccb); | ||
179 | sccb.evbuf.hdr.length = sizeof(struct sdias_evbuf); | ||
180 | sccb.evbuf.hdr.type = EVTYP_SDIAS; | ||
181 | sccb.evbuf.hdr.flags = 0; | ||
182 | sccb.evbuf.event_qual = EQ_STORE_DATA; | ||
183 | sccb.evbuf.data_id = DI_FCP_DUMP; | ||
184 | sccb.evbuf.event_id = 4712; | ||
185 | #ifdef __s390x__ | ||
186 | sccb.evbuf.asa_size = ASA_SIZE_64; | ||
187 | #else | ||
188 | sccb.evbuf.asa_size = ASA_SIZE_32; | ||
189 | #endif | ||
190 | sccb.evbuf.event_status = 0; | ||
191 | sccb.evbuf.blk_cnt = nr_blks; | ||
192 | sccb.evbuf.asa = (unsigned long)dest; | ||
193 | sccb.evbuf.fbn = start_blk; | ||
194 | sccb.evbuf.lbn = 0; | ||
195 | sccb.evbuf.dbs = 1; | ||
196 | |||
197 | request.sccb = &sccb; | ||
198 | request.command = SCLP_CMDW_WRITE_EVENT_DATA; | ||
199 | request.status = SCLP_REQ_FILLED; | ||
200 | request.callback = sdias_callback; | ||
201 | |||
202 | rc = sdias_sclp_send(&request); | ||
203 | if (rc) { | ||
204 | ERROR_MSG("sclp_send failed: %x\n", rc); | ||
205 | goto out; | ||
206 | } | ||
207 | if (sccb.hdr.response_code != 0x0020) { | ||
208 | TRACE("copy failed: %x\n", sccb.hdr.response_code); | ||
209 | rc = -EIO; | ||
210 | goto out; | ||
211 | } | ||
212 | |||
213 | switch (sccb.evbuf.event_status) { | ||
214 | case EVSTATE_ALL_STORED: | ||
215 | TRACE("all stored\n"); | ||
216 | case EVSTATE_PART_STORED: | ||
217 | TRACE("part stored: %i\n", sccb.evbuf.blk_cnt); | ||
218 | break; | ||
219 | case EVSTATE_NO_DATA: | ||
220 | TRACE("no data\n"); | ||
221 | default: | ||
222 | ERROR_MSG("Error from SCLP while copying hsa. " | ||
223 | "Event status = %x\n", | ||
224 | sccb.evbuf.event_status); | ||
225 | rc = -EIO; | ||
226 | } | ||
227 | out: | ||
228 | mutex_unlock(&sdias_mutex); | ||
229 | return rc; | ||
230 | } | ||
231 | |||
232 | int __init sdias_init(void) | ||
233 | { | ||
234 | int rc; | ||
235 | |||
236 | if (ipl_info.type != IPL_TYPE_FCP_DUMP) | ||
237 | return 0; | ||
238 | sdias_dbf = debug_register("dump_sdias", 4, 1, 4 * sizeof(long)); | ||
239 | debug_register_view(sdias_dbf, &debug_sprintf_view); | ||
240 | debug_set_level(sdias_dbf, 6); | ||
241 | rc = sclp_register(&sclp_sdias_register); | ||
242 | if (rc) { | ||
243 | ERROR_MSG("sclp register failed\n"); | ||
244 | return rc; | ||
245 | } | ||
246 | init_waitqueue_head(&sdias_wq); | ||
247 | TRACE("init done\n"); | ||
248 | return 0; | ||
249 | } | ||
250 | |||
251 | void __exit sdias_exit(void) | ||
252 | { | ||
253 | debug_unregister(sdias_dbf); | ||
254 | sclp_unregister(&sclp_sdias_register); | ||
255 | } | ||
diff --git a/drivers/s390/char/sclp_tty.c b/drivers/s390/char/sclp_tty.c index 076816b9d528..e3b3d390b4a3 100644 --- a/drivers/s390/char/sclp_tty.c +++ b/drivers/s390/char/sclp_tty.c | |||
@@ -648,7 +648,7 @@ sclp_eval_textcmd(struct gds_subvector *start, | |||
648 | subvec = start; | 648 | subvec = start; |
649 | while (subvec < end) { | 649 | while (subvec < end) { |
650 | subvec = find_gds_subvector(subvec, end, | 650 | subvec = find_gds_subvector(subvec, end, |
651 | GDS_KEY_SelfDefTextMsg); | 651 | GDS_KEY_SELFDEFTEXTMSG); |
652 | if (!subvec) | 652 | if (!subvec) |
653 | break; | 653 | break; |
654 | sclp_eval_selfdeftextmsg((struct gds_subvector *)(subvec + 1), | 654 | sclp_eval_selfdeftextmsg((struct gds_subvector *)(subvec + 1), |
@@ -664,7 +664,7 @@ sclp_eval_cpmsu(struct gds_vector *start, struct gds_vector *end) | |||
664 | 664 | ||
665 | vec = start; | 665 | vec = start; |
666 | while (vec < end) { | 666 | while (vec < end) { |
667 | vec = find_gds_vector(vec, end, GDS_ID_TextCmd); | 667 | vec = find_gds_vector(vec, end, GDS_ID_TEXTCMD); |
668 | if (!vec) | 668 | if (!vec) |
669 | break; | 669 | break; |
670 | sclp_eval_textcmd((struct gds_subvector *)(vec + 1), | 670 | sclp_eval_textcmd((struct gds_subvector *)(vec + 1), |
@@ -703,7 +703,7 @@ sclp_tty_state_change(struct sclp_register *reg) | |||
703 | 703 | ||
704 | static struct sclp_register sclp_input_event = | 704 | static struct sclp_register sclp_input_event = |
705 | { | 705 | { |
706 | .receive_mask = EvTyp_OpCmd_Mask | EvTyp_PMsgCmd_Mask, | 706 | .receive_mask = EVTYP_OPCMD_MASK | EVTYP_PMSGCMD_MASK, |
707 | .state_change_fn = sclp_tty_state_change, | 707 | .state_change_fn = sclp_tty_state_change, |
708 | .receiver_fn = sclp_tty_receiver | 708 | .receiver_fn = sclp_tty_receiver |
709 | }; | 709 | }; |
diff --git a/drivers/s390/char/sclp_vt220.c b/drivers/s390/char/sclp_vt220.c index f77dc33b5f8d..726334757bbf 100644 --- a/drivers/s390/char/sclp_vt220.c +++ b/drivers/s390/char/sclp_vt220.c | |||
@@ -99,8 +99,8 @@ static void sclp_vt220_emit_current(void); | |||
99 | 99 | ||
100 | /* Registration structure for our interest in SCLP event buffers */ | 100 | /* Registration structure for our interest in SCLP event buffers */ |
101 | static struct sclp_register sclp_vt220_register = { | 101 | static struct sclp_register sclp_vt220_register = { |
102 | .send_mask = EvTyp_VT220Msg_Mask, | 102 | .send_mask = EVTYP_VT220MSG_MASK, |
103 | .receive_mask = EvTyp_VT220Msg_Mask, | 103 | .receive_mask = EVTYP_VT220MSG_MASK, |
104 | .state_change_fn = NULL, | 104 | .state_change_fn = NULL, |
105 | .receiver_fn = sclp_vt220_receiver_fn | 105 | .receiver_fn = sclp_vt220_receiver_fn |
106 | }; | 106 | }; |
@@ -202,7 +202,7 @@ sclp_vt220_callback(struct sclp_req *request, void *data) | |||
202 | static int | 202 | static int |
203 | __sclp_vt220_emit(struct sclp_vt220_request *request) | 203 | __sclp_vt220_emit(struct sclp_vt220_request *request) |
204 | { | 204 | { |
205 | if (!(sclp_vt220_register.sclp_send_mask & EvTyp_VT220Msg_Mask)) { | 205 | if (!(sclp_vt220_register.sclp_send_mask & EVTYP_VT220MSG_MASK)) { |
206 | request->sclp_req.status = SCLP_REQ_FAILED; | 206 | request->sclp_req.status = SCLP_REQ_FAILED; |
207 | return -EIO; | 207 | return -EIO; |
208 | } | 208 | } |
@@ -284,7 +284,7 @@ sclp_vt220_initialize_page(void *page) | |||
284 | sccb->header.length = sizeof(struct sclp_vt220_sccb); | 284 | sccb->header.length = sizeof(struct sclp_vt220_sccb); |
285 | sccb->header.function_code = SCLP_NORMAL_WRITE; | 285 | sccb->header.function_code = SCLP_NORMAL_WRITE; |
286 | sccb->header.response_code = 0x0000; | 286 | sccb->header.response_code = 0x0000; |
287 | sccb->evbuf.type = EvTyp_VT220Msg; | 287 | sccb->evbuf.type = EVTYP_VT220MSG; |
288 | sccb->evbuf.length = sizeof(struct evbuf_header); | 288 | sccb->evbuf.length = sizeof(struct evbuf_header); |
289 | 289 | ||
290 | return request; | 290 | return request; |
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c index b87d3b019936..a5a00e9ae4d0 100644 --- a/drivers/s390/char/vmlogrdr.c +++ b/drivers/s390/char/vmlogrdr.c | |||
@@ -125,7 +125,7 @@ static struct vmlogrdr_priv_t sys_ser[] = { | |||
125 | .recording_name = "EREP", | 125 | .recording_name = "EREP", |
126 | .minor_num = 0, | 126 | .minor_num = 0, |
127 | .buffer_free = 1, | 127 | .buffer_free = 1, |
128 | .priv_lock = SPIN_LOCK_UNLOCKED, | 128 | .priv_lock = __SPIN_LOCK_UNLOCKED(sys_ser[0].priv_lock), |
129 | .autorecording = 1, | 129 | .autorecording = 1, |
130 | .autopurge = 1, | 130 | .autopurge = 1, |
131 | }, | 131 | }, |
@@ -134,7 +134,7 @@ static struct vmlogrdr_priv_t sys_ser[] = { | |||
134 | .recording_name = "ACCOUNT", | 134 | .recording_name = "ACCOUNT", |
135 | .minor_num = 1, | 135 | .minor_num = 1, |
136 | .buffer_free = 1, | 136 | .buffer_free = 1, |
137 | .priv_lock = SPIN_LOCK_UNLOCKED, | 137 | .priv_lock = __SPIN_LOCK_UNLOCKED(sys_ser[1].priv_lock), |
138 | .autorecording = 1, | 138 | .autorecording = 1, |
139 | .autopurge = 1, | 139 | .autopurge = 1, |
140 | }, | 140 | }, |
@@ -143,7 +143,7 @@ static struct vmlogrdr_priv_t sys_ser[] = { | |||
143 | .recording_name = "SYMPTOM", | 143 | .recording_name = "SYMPTOM", |
144 | .minor_num = 2, | 144 | .minor_num = 2, |
145 | .buffer_free = 1, | 145 | .buffer_free = 1, |
146 | .priv_lock = SPIN_LOCK_UNLOCKED, | 146 | .priv_lock = __SPIN_LOCK_UNLOCKED(sys_ser[2].priv_lock), |
147 | .autorecording = 1, | 147 | .autorecording = 1, |
148 | .autopurge = 1, | 148 | .autopurge = 1, |
149 | } | 149 | } |
@@ -385,6 +385,9 @@ static int vmlogrdr_release (struct inode *inode, struct file *filp) | |||
385 | 385 | ||
386 | struct vmlogrdr_priv_t * logptr = filp->private_data; | 386 | struct vmlogrdr_priv_t * logptr = filp->private_data; |
387 | 387 | ||
388 | iucv_path_sever(logptr->path, NULL); | ||
389 | kfree(logptr->path); | ||
390 | logptr->path = NULL; | ||
388 | if (logptr->autorecording) { | 391 | if (logptr->autorecording) { |
389 | ret = vmlogrdr_recording(logptr,0,logptr->autopurge); | 392 | ret = vmlogrdr_recording(logptr,0,logptr->autopurge); |
390 | if (ret) | 393 | if (ret) |
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c new file mode 100644 index 000000000000..89d439316a53 --- /dev/null +++ b/drivers/s390/char/zcore.c | |||
@@ -0,0 +1,651 @@ | |||
1 | /* | ||
2 | * zcore module to export memory content and register sets for creating system | ||
3 | * dumps on SCSI disks (zfcpdump). The "zcore/mem" debugfs file shows the same | ||
4 | * dump format as s390 standalone dumps. | ||
5 | * | ||
6 | * For more information please refer to Documentation/s390/zfcpdump.txt | ||
7 | * | ||
8 | * Copyright IBM Corp. 2003,2007 | ||
9 | * Author(s): Michael Holzheu | ||
10 | */ | ||
11 | |||
12 | #include <linux/init.h> | ||
13 | #include <linux/miscdevice.h> | ||
14 | #include <linux/utsname.h> | ||
15 | #include <linux/debugfs.h> | ||
16 | #include <asm/ipl.h> | ||
17 | #include <asm/sclp.h> | ||
18 | #include <asm/setup.h> | ||
19 | #include <asm/sigp.h> | ||
20 | #include <asm/uaccess.h> | ||
21 | #include <asm/debug.h> | ||
22 | #include <asm/processor.h> | ||
23 | #include <asm/irqflags.h> | ||
24 | |||
25 | #define TRACE(x...) debug_sprintf_event(zcore_dbf, 1, x) | ||
26 | #define MSG(x...) printk( KERN_ALERT x ) | ||
27 | #define ERROR_MSG(x...) printk ( KERN_ALERT "DUMP: " x ) | ||
28 | |||
29 | #define TO_USER 0 | ||
30 | #define TO_KERNEL 1 | ||
31 | |||
32 | enum arch_id { | ||
33 | ARCH_S390 = 0, | ||
34 | ARCH_S390X = 1, | ||
35 | }; | ||
36 | |||
37 | /* dump system info */ | ||
38 | |||
39 | struct sys_info { | ||
40 | enum arch_id arch; | ||
41 | unsigned long sa_base; | ||
42 | u32 sa_size; | ||
43 | int cpu_map[NR_CPUS]; | ||
44 | unsigned long mem_size; | ||
45 | union save_area lc_mask; | ||
46 | }; | ||
47 | |||
48 | static struct sys_info sys_info; | ||
49 | static struct debug_info *zcore_dbf; | ||
50 | static int hsa_available; | ||
51 | static struct dentry *zcore_dir; | ||
52 | static struct dentry *zcore_file; | ||
53 | |||
54 | /* | ||
55 | * Copy memory from HSA to kernel or user memory (not reentrant): | ||
56 | * | ||
57 | * @dest: Kernel or user buffer where memory should be copied to | ||
58 | * @src: Start address within HSA where data should be copied | ||
59 | * @count: Size of buffer, which should be copied | ||
60 | * @mode: Either TO_KERNEL or TO_USER | ||
61 | */ | ||
62 | static int memcpy_hsa(void *dest, unsigned long src, size_t count, int mode) | ||
63 | { | ||
64 | int offs, blk_num; | ||
65 | static char buf[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE))); | ||
66 | |||
67 | if (count == 0) | ||
68 | return 0; | ||
69 | |||
70 | /* copy first block */ | ||
71 | offs = 0; | ||
72 | if ((src % PAGE_SIZE) != 0) { | ||
73 | blk_num = src / PAGE_SIZE + 2; | ||
74 | if (sclp_sdias_copy(buf, blk_num, 1)) { | ||
75 | TRACE("sclp_sdias_copy() failed\n"); | ||
76 | return -EIO; | ||
77 | } | ||
78 | offs = min((PAGE_SIZE - (src % PAGE_SIZE)), count); | ||
79 | if (mode == TO_USER) { | ||
80 | if (copy_to_user((__force __user void*) dest, | ||
81 | buf + (src % PAGE_SIZE), offs)) | ||
82 | return -EFAULT; | ||
83 | } else | ||
84 | memcpy(dest, buf + (src % PAGE_SIZE), offs); | ||
85 | } | ||
86 | if (offs == count) | ||
87 | goto out; | ||
88 | |||
89 | /* copy middle */ | ||
90 | for (; (offs + PAGE_SIZE) <= count; offs += PAGE_SIZE) { | ||
91 | blk_num = (src + offs) / PAGE_SIZE + 2; | ||
92 | if (sclp_sdias_copy(buf, blk_num, 1)) { | ||
93 | TRACE("sclp_sdias_copy() failed\n"); | ||
94 | return -EIO; | ||
95 | } | ||
96 | if (mode == TO_USER) { | ||
97 | if (copy_to_user((__force __user void*) dest + offs, | ||
98 | buf, PAGE_SIZE)) | ||
99 | return -EFAULT; | ||
100 | } else | ||
101 | memcpy(dest + offs, buf, PAGE_SIZE); | ||
102 | } | ||
103 | if (offs == count) | ||
104 | goto out; | ||
105 | |||
106 | /* copy last block */ | ||
107 | blk_num = (src + offs) / PAGE_SIZE + 2; | ||
108 | if (sclp_sdias_copy(buf, blk_num, 1)) { | ||
109 | TRACE("sclp_sdias_copy() failed\n"); | ||
110 | return -EIO; | ||
111 | } | ||
112 | if (mode == TO_USER) { | ||
113 | if (copy_to_user((__force __user void*) dest + offs, buf, | ||
114 | PAGE_SIZE)) | ||
115 | return -EFAULT; | ||
116 | } else | ||
117 | memcpy(dest + offs, buf, count - offs); | ||
118 | out: | ||
119 | return 0; | ||
120 | } | ||
121 | |||
122 | static int memcpy_hsa_user(void __user *dest, unsigned long src, size_t count) | ||
123 | { | ||
124 | return memcpy_hsa((void __force *) dest, src, count, TO_USER); | ||
125 | } | ||
126 | |||
127 | static int memcpy_hsa_kernel(void *dest, unsigned long src, size_t count) | ||
128 | { | ||
129 | return memcpy_hsa(dest, src, count, TO_KERNEL); | ||
130 | } | ||
131 | |||
132 | static int memcpy_real(void *dest, unsigned long src, size_t count) | ||
133 | { | ||
134 | unsigned long flags; | ||
135 | int rc = -EFAULT; | ||
136 | register unsigned long _dest asm("2") = (unsigned long) dest; | ||
137 | register unsigned long _len1 asm("3") = (unsigned long) count; | ||
138 | register unsigned long _src asm("4") = src; | ||
139 | register unsigned long _len2 asm("5") = (unsigned long) count; | ||
140 | |||
141 | if (count == 0) | ||
142 | return 0; | ||
143 | flags = __raw_local_irq_stnsm(0xf8); /* switch to real mode */ | ||
144 | asm volatile ( | ||
145 | "0: mvcle %1,%2,0x0\n" | ||
146 | "1: jo 0b\n" | ||
147 | " lhi %0,0x0\n" | ||
148 | "2:\n" | ||
149 | EX_TABLE(1b,2b) | ||
150 | : "+d" (rc) | ||
151 | : "d" (_dest), "d" (_src), "d" (_len1), "d" (_len2) | ||
152 | : "cc", "memory"); | ||
153 | __raw_local_irq_ssm(flags); | ||
154 | |||
155 | return rc; | ||
156 | } | ||
157 | |||
158 | static int memcpy_real_user(__user void *dest, unsigned long src, size_t count) | ||
159 | { | ||
160 | static char buf[4096]; | ||
161 | int offs = 0, size; | ||
162 | |||
163 | while (offs < count) { | ||
164 | size = min(sizeof(buf), count - offs); | ||
165 | if (memcpy_real(buf, src + offs, size)) | ||
166 | return -EFAULT; | ||
167 | if (copy_to_user(dest + offs, buf, size)) | ||
168 | return -EFAULT; | ||
169 | offs += size; | ||
170 | } | ||
171 | return 0; | ||
172 | } | ||
173 | |||
174 | #ifdef __s390x__ | ||
175 | /* | ||
176 | * Convert s390x (64 bit) cpu info to s390 (32 bit) cpu info | ||
177 | */ | ||
178 | static void __init s390x_to_s390_regs(union save_area *out, union save_area *in, | ||
179 | int cpu) | ||
180 | { | ||
181 | int i; | ||
182 | |||
183 | for (i = 0; i < 16; i++) { | ||
184 | out->s390.gp_regs[i] = in->s390x.gp_regs[i] & 0x00000000ffffffff; | ||
185 | out->s390.acc_regs[i] = in->s390x.acc_regs[i]; | ||
186 | out->s390.ctrl_regs[i] = | ||
187 | in->s390x.ctrl_regs[i] & 0x00000000ffffffff; | ||
188 | } | ||
189 | /* locore for 31 bit has only space for fpregs 0,2,4,6 */ | ||
190 | out->s390.fp_regs[0] = in->s390x.fp_regs[0]; | ||
191 | out->s390.fp_regs[1] = in->s390x.fp_regs[2]; | ||
192 | out->s390.fp_regs[2] = in->s390x.fp_regs[4]; | ||
193 | out->s390.fp_regs[3] = in->s390x.fp_regs[6]; | ||
194 | memcpy(&(out->s390.psw[0]), &(in->s390x.psw[0]), 4); | ||
195 | out->s390.psw[1] |= 0x8; /* set bit 12 */ | ||
196 | memcpy(&(out->s390.psw[4]),&(in->s390x.psw[12]), 4); | ||
197 | out->s390.psw[4] |= 0x80; /* set (31bit) addressing bit */ | ||
198 | out->s390.pref_reg = in->s390x.pref_reg; | ||
199 | out->s390.timer = in->s390x.timer; | ||
200 | out->s390.clk_cmp = in->s390x.clk_cmp; | ||
201 | } | ||
202 | |||
203 | static void __init s390x_to_s390_save_areas(void) | ||
204 | { | ||
205 | int i = 1; | ||
206 | static union save_area tmp; | ||
207 | |||
208 | while (zfcpdump_save_areas[i]) { | ||
209 | s390x_to_s390_regs(&tmp, zfcpdump_save_areas[i], i); | ||
210 | memcpy(zfcpdump_save_areas[i], &tmp, sizeof(tmp)); | ||
211 | i++; | ||
212 | } | ||
213 | } | ||
214 | |||
215 | #endif /* __s390x__ */ | ||
216 | |||
217 | static int __init init_cpu_info(enum arch_id arch) | ||
218 | { | ||
219 | union save_area *sa; | ||
220 | |||
221 | /* get info for boot cpu from lowcore, stored in the HSA */ | ||
222 | |||
223 | sa = kmalloc(sizeof(*sa), GFP_KERNEL); | ||
224 | if (!sa) { | ||
225 | ERROR_MSG("kmalloc failed: %s: %i\n",__FUNCTION__, __LINE__); | ||
226 | return -ENOMEM; | ||
227 | } | ||
228 | if (memcpy_hsa_kernel(sa, sys_info.sa_base, sys_info.sa_size) < 0) { | ||
229 | ERROR_MSG("could not copy from HSA\n"); | ||
230 | kfree(sa); | ||
231 | return -EIO; | ||
232 | } | ||
233 | zfcpdump_save_areas[0] = sa; | ||
234 | |||
235 | #ifdef __s390x__ | ||
236 | /* convert s390x regs to s390, if we are dumping an s390 Linux */ | ||
237 | |||
238 | if (arch == ARCH_S390) | ||
239 | s390x_to_s390_save_areas(); | ||
240 | #endif | ||
241 | |||
242 | return 0; | ||
243 | } | ||
244 | |||
245 | static DEFINE_MUTEX(zcore_mutex); | ||
246 | |||
247 | #define DUMP_VERSION 0x3 | ||
248 | #define DUMP_MAGIC 0xa8190173618f23fdULL | ||
249 | #define DUMP_ARCH_S390X 2 | ||
250 | #define DUMP_ARCH_S390 1 | ||
251 | #define HEADER_SIZE 4096 | ||
252 | |||
253 | /* dump header dumped according to s390 crash dump format */ | ||
254 | |||
255 | struct zcore_header { | ||
256 | u64 magic; | ||
257 | u32 version; | ||
258 | u32 header_size; | ||
259 | u32 dump_level; | ||
260 | u32 page_size; | ||
261 | u64 mem_size; | ||
262 | u64 mem_start; | ||
263 | u64 mem_end; | ||
264 | u32 num_pages; | ||
265 | u32 pad1; | ||
266 | u64 tod; | ||
267 | cpuid_t cpu_id; | ||
268 | u32 arch_id; | ||
269 | u32 build_arch; | ||
270 | char pad2[4016]; | ||
271 | } __attribute__((packed,__aligned__(16))); | ||
272 | |||
273 | static struct zcore_header zcore_header = { | ||
274 | .magic = DUMP_MAGIC, | ||
275 | .version = DUMP_VERSION, | ||
276 | .header_size = 4096, | ||
277 | .dump_level = 0, | ||
278 | .page_size = PAGE_SIZE, | ||
279 | .mem_start = 0, | ||
280 | #ifdef __s390x__ | ||
281 | .build_arch = DUMP_ARCH_S390X, | ||
282 | #else | ||
283 | .build_arch = DUMP_ARCH_S390, | ||
284 | #endif | ||
285 | }; | ||
286 | |||
287 | /* | ||
288 | * Copy lowcore info to buffer. Use map in order to copy only register parts. | ||
289 | * | ||
290 | * @buf: User buffer | ||
291 | * @sa: Pointer to save area | ||
292 | * @sa_off: Offset in save area to copy | ||
293 | * @len: Number of bytes to copy | ||
294 | */ | ||
295 | static int copy_lc(void __user *buf, void *sa, int sa_off, int len) | ||
296 | { | ||
297 | int i; | ||
298 | char *lc_mask = (char*)&sys_info.lc_mask; | ||
299 | |||
300 | for (i = 0; i < len; i++) { | ||
301 | if (!lc_mask[i + sa_off]) | ||
302 | continue; | ||
303 | if (copy_to_user(buf + i, sa + sa_off + i, 1)) | ||
304 | return -EFAULT; | ||
305 | } | ||
306 | return 0; | ||
307 | } | ||
308 | |||
309 | /* | ||
310 | * Copy lowcores info to memory, if necessary | ||
311 | * | ||
312 | * @buf: User buffer | ||
313 | * @addr: Start address of buffer in dump memory | ||
314 | * @count: Size of buffer | ||
315 | */ | ||
316 | static int zcore_add_lc(char __user *buf, unsigned long start, size_t count) | ||
317 | { | ||
318 | unsigned long end; | ||
319 | int i = 0; | ||
320 | |||
321 | if (count == 0) | ||
322 | return 0; | ||
323 | |||
324 | end = start + count; | ||
325 | while (zfcpdump_save_areas[i]) { | ||
326 | unsigned long cp_start, cp_end; /* copy range */ | ||
327 | unsigned long sa_start, sa_end; /* save area range */ | ||
328 | unsigned long prefix; | ||
329 | unsigned long sa_off, len, buf_off; | ||
330 | |||
331 | if (sys_info.arch == ARCH_S390) | ||
332 | prefix = zfcpdump_save_areas[i]->s390.pref_reg; | ||
333 | else | ||
334 | prefix = zfcpdump_save_areas[i]->s390x.pref_reg; | ||
335 | |||
336 | sa_start = prefix + sys_info.sa_base; | ||
337 | sa_end = prefix + sys_info.sa_base + sys_info.sa_size; | ||
338 | |||
339 | if ((end < sa_start) || (start > sa_end)) | ||
340 | goto next; | ||
341 | cp_start = max(start, sa_start); | ||
342 | cp_end = min(end, sa_end); | ||
343 | |||
344 | buf_off = cp_start - start; | ||
345 | sa_off = cp_start - sa_start; | ||
346 | len = cp_end - cp_start; | ||
347 | |||
348 | TRACE("copy_lc for: %lx\n", start); | ||
349 | if (copy_lc(buf + buf_off, zfcpdump_save_areas[i], sa_off, len)) | ||
350 | return -EFAULT; | ||
351 | next: | ||
352 | i++; | ||
353 | } | ||
354 | return 0; | ||
355 | } | ||
356 | |||
357 | /* | ||
358 | * Read routine for zcore character device | ||
359 | * First 4K are dump header | ||
360 | * Next 32MB are HSA Memory | ||
361 | * Rest is read from absolute Memory | ||
362 | */ | ||
363 | static ssize_t zcore_read(struct file *file, char __user *buf, size_t count, | ||
364 | loff_t *ppos) | ||
365 | { | ||
366 | unsigned long mem_start; /* Start address in memory */ | ||
367 | size_t mem_offs; /* Offset in dump memory */ | ||
368 | size_t hdr_count; /* Size of header part of output buffer */ | ||
369 | size_t size; | ||
370 | int rc; | ||
371 | |||
372 | mutex_lock(&zcore_mutex); | ||
373 | |||
374 | if (*ppos > (sys_info.mem_size + HEADER_SIZE)) { | ||
375 | rc = -EINVAL; | ||
376 | goto fail; | ||
377 | } | ||
378 | |||
379 | count = min(count, (size_t) (sys_info.mem_size + HEADER_SIZE - *ppos)); | ||
380 | |||
381 | /* Copy dump header */ | ||
382 | if (*ppos < HEADER_SIZE) { | ||
383 | size = min(count, (size_t) (HEADER_SIZE - *ppos)); | ||
384 | if (copy_to_user(buf, &zcore_header + *ppos, size)) { | ||
385 | rc = -EFAULT; | ||
386 | goto fail; | ||
387 | } | ||
388 | hdr_count = size; | ||
389 | mem_start = 0; | ||
390 | } else { | ||
391 | hdr_count = 0; | ||
392 | mem_start = *ppos - HEADER_SIZE; | ||
393 | } | ||
394 | |||
395 | mem_offs = 0; | ||
396 | |||
397 | /* Copy from HSA data */ | ||
398 | if (*ppos < (ZFCPDUMP_HSA_SIZE + HEADER_SIZE)) { | ||
399 | size = min((count - hdr_count), (size_t) (ZFCPDUMP_HSA_SIZE | ||
400 | - mem_start)); | ||
401 | rc = memcpy_hsa_user(buf + hdr_count, mem_start, size); | ||
402 | if (rc) | ||
403 | goto fail; | ||
404 | |||
405 | mem_offs += size; | ||
406 | } | ||
407 | |||
408 | /* Copy from real mem */ | ||
409 | size = count - mem_offs - hdr_count; | ||
410 | rc = memcpy_real_user(buf + hdr_count + mem_offs, mem_start + mem_offs, | ||
411 | size); | ||
412 | if (rc) | ||
413 | goto fail; | ||
414 | |||
415 | /* | ||
416 | * Since s390 dump analysis tools like lcrash or crash | ||
417 | * expect register sets in the prefix pages of the cpus, | ||
418 | * we copy them into the read buffer, if necessary. | ||
419 | * buf + hdr_count: Start of memory part of output buffer | ||
420 | * mem_start: Start memory address to copy from | ||
421 | * count - hdr_count: Size of memory area to copy | ||
422 | */ | ||
423 | if (zcore_add_lc(buf + hdr_count, mem_start, count - hdr_count)) { | ||
424 | rc = -EFAULT; | ||
425 | goto fail; | ||
426 | } | ||
427 | *ppos += count; | ||
428 | fail: | ||
429 | mutex_unlock(&zcore_mutex); | ||
430 | return (rc < 0) ? rc : count; | ||
431 | } | ||
432 | |||
433 | static int zcore_open(struct inode *inode, struct file *filp) | ||
434 | { | ||
435 | if (!hsa_available) | ||
436 | return -ENODATA; | ||
437 | else | ||
438 | return capable(CAP_SYS_RAWIO) ? 0 : -EPERM; | ||
439 | } | ||
440 | |||
441 | static int zcore_release(struct inode *inode, struct file *filep) | ||
442 | { | ||
443 | diag308(DIAG308_REL_HSA, NULL); | ||
444 | hsa_available = 0; | ||
445 | return 0; | ||
446 | } | ||
447 | |||
448 | static loff_t zcore_lseek(struct file *file, loff_t offset, int orig) | ||
449 | { | ||
450 | loff_t rc; | ||
451 | |||
452 | mutex_lock(&zcore_mutex); | ||
453 | switch (orig) { | ||
454 | case 0: | ||
455 | file->f_pos = offset; | ||
456 | rc = file->f_pos; | ||
457 | break; | ||
458 | case 1: | ||
459 | file->f_pos += offset; | ||
460 | rc = file->f_pos; | ||
461 | break; | ||
462 | default: | ||
463 | rc = -EINVAL; | ||
464 | } | ||
465 | mutex_unlock(&zcore_mutex); | ||
466 | return rc; | ||
467 | } | ||
468 | |||
469 | static struct file_operations zcore_fops = { | ||
470 | .owner = THIS_MODULE, | ||
471 | .llseek = zcore_lseek, | ||
472 | .read = zcore_read, | ||
473 | .open = zcore_open, | ||
474 | .release = zcore_release, | ||
475 | }; | ||
476 | |||
477 | |||
478 | static void __init set_s390_lc_mask(union save_area *map) | ||
479 | { | ||
480 | memset(&map->s390.ext_save, 0xff, sizeof(map->s390.ext_save)); | ||
481 | memset(&map->s390.timer, 0xff, sizeof(map->s390.timer)); | ||
482 | memset(&map->s390.clk_cmp, 0xff, sizeof(map->s390.clk_cmp)); | ||
483 | memset(&map->s390.psw, 0xff, sizeof(map->s390.psw)); | ||
484 | memset(&map->s390.pref_reg, 0xff, sizeof(map->s390.pref_reg)); | ||
485 | memset(&map->s390.acc_regs, 0xff, sizeof(map->s390.acc_regs)); | ||
486 | memset(&map->s390.fp_regs, 0xff, sizeof(map->s390.fp_regs)); | ||
487 | memset(&map->s390.gp_regs, 0xff, sizeof(map->s390.gp_regs)); | ||
488 | memset(&map->s390.ctrl_regs, 0xff, sizeof(map->s390.ctrl_regs)); | ||
489 | } | ||
490 | |||
491 | static void __init set_s390x_lc_mask(union save_area *map) | ||
492 | { | ||
493 | memset(&map->s390x.fp_regs, 0xff, sizeof(map->s390x.fp_regs)); | ||
494 | memset(&map->s390x.gp_regs, 0xff, sizeof(map->s390x.gp_regs)); | ||
495 | memset(&map->s390x.psw, 0xff, sizeof(map->s390x.psw)); | ||
496 | memset(&map->s390x.pref_reg, 0xff, sizeof(map->s390x.pref_reg)); | ||
497 | memset(&map->s390x.fp_ctrl_reg, 0xff, sizeof(map->s390x.fp_ctrl_reg)); | ||
498 | memset(&map->s390x.tod_reg, 0xff, sizeof(map->s390x.tod_reg)); | ||
499 | memset(&map->s390x.timer, 0xff, sizeof(map->s390x.timer)); | ||
500 | memset(&map->s390x.clk_cmp, 0xff, sizeof(map->s390x.clk_cmp)); | ||
501 | memset(&map->s390x.acc_regs, 0xff, sizeof(map->s390x.acc_regs)); | ||
502 | memset(&map->s390x.ctrl_regs, 0xff, sizeof(map->s390x.ctrl_regs)); | ||
503 | } | ||
504 | |||
505 | /* | ||
506 | * Initialize dump globals for a given architecture | ||
507 | */ | ||
508 | static int __init sys_info_init(enum arch_id arch) | ||
509 | { | ||
510 | switch (arch) { | ||
511 | case ARCH_S390X: | ||
512 | MSG("DETECTED 'S390X (64 bit) OS'\n"); | ||
513 | sys_info.sa_base = SAVE_AREA_BASE_S390X; | ||
514 | sys_info.sa_size = sizeof(struct save_area_s390x); | ||
515 | set_s390x_lc_mask(&sys_info.lc_mask); | ||
516 | break; | ||
517 | case ARCH_S390: | ||
518 | MSG("DETECTED 'S390 (32 bit) OS'\n"); | ||
519 | sys_info.sa_base = SAVE_AREA_BASE_S390; | ||
520 | sys_info.sa_size = sizeof(struct save_area_s390); | ||
521 | set_s390_lc_mask(&sys_info.lc_mask); | ||
522 | break; | ||
523 | default: | ||
524 | ERROR_MSG("unknown architecture 0x%x.\n",arch); | ||
525 | return -EINVAL; | ||
526 | } | ||
527 | sys_info.arch = arch; | ||
528 | if (init_cpu_info(arch)) { | ||
529 | ERROR_MSG("get cpu info failed\n"); | ||
530 | return -ENOMEM; | ||
531 | } | ||
532 | sys_info.mem_size = real_memory_size; | ||
533 | |||
534 | return 0; | ||
535 | } | ||
536 | |||
537 | static int __init check_sdias(void) | ||
538 | { | ||
539 | int rc, act_hsa_size; | ||
540 | |||
541 | rc = sclp_sdias_blk_count(); | ||
542 | if (rc < 0) { | ||
543 | ERROR_MSG("Could not determine HSA size\n"); | ||
544 | return rc; | ||
545 | } | ||
546 | act_hsa_size = (rc - 1) * PAGE_SIZE; | ||
547 | if (act_hsa_size < ZFCPDUMP_HSA_SIZE) { | ||
548 | ERROR_MSG("HSA size too small: %i\n", act_hsa_size); | ||
549 | return -EINVAL; | ||
550 | } | ||
551 | return 0; | ||
552 | } | ||
553 | |||
554 | static void __init zcore_header_init(int arch, struct zcore_header *hdr) | ||
555 | { | ||
556 | if (arch == ARCH_S390X) | ||
557 | hdr->arch_id = DUMP_ARCH_S390X; | ||
558 | else | ||
559 | hdr->arch_id = DUMP_ARCH_S390; | ||
560 | hdr->mem_size = sys_info.mem_size; | ||
561 | hdr->mem_end = sys_info.mem_size; | ||
562 | hdr->num_pages = sys_info.mem_size / PAGE_SIZE; | ||
563 | hdr->tod = get_clock(); | ||
564 | get_cpu_id(&hdr->cpu_id); | ||
565 | } | ||
566 | |||
567 | extern int sdias_init(void); | ||
568 | |||
569 | static int __init zcore_init(void) | ||
570 | { | ||
571 | unsigned char arch; | ||
572 | int rc; | ||
573 | |||
574 | if (ipl_info.type != IPL_TYPE_FCP_DUMP) | ||
575 | return -ENODATA; | ||
576 | |||
577 | zcore_dbf = debug_register("zcore", 4, 1, 4 * sizeof(long)); | ||
578 | debug_register_view(zcore_dbf, &debug_sprintf_view); | ||
579 | debug_set_level(zcore_dbf, 6); | ||
580 | |||
581 | TRACE("devno: %x\n", ipl_info.data.fcp.dev_id.devno); | ||
582 | TRACE("wwpn: %llx\n", (unsigned long long) ipl_info.data.fcp.wwpn); | ||
583 | TRACE("lun: %llx\n", (unsigned long long) ipl_info.data.fcp.lun); | ||
584 | |||
585 | rc = sdias_init(); | ||
586 | if (rc) | ||
587 | goto fail; | ||
588 | |||
589 | rc = check_sdias(); | ||
590 | if (rc) { | ||
591 | ERROR_MSG("Dump initialization failed\n"); | ||
592 | goto fail; | ||
593 | } | ||
594 | |||
595 | rc = memcpy_hsa_kernel(&arch, __LC_AR_MODE_ID, 1); | ||
596 | if (rc) { | ||
597 | ERROR_MSG("sdial memcpy for arch id failed\n"); | ||
598 | goto fail; | ||
599 | } | ||
600 | |||
601 | #ifndef __s390x__ | ||
602 | if (arch == ARCH_S390X) { | ||
603 | ERROR_MSG("32 bit dumper can't dump 64 bit system!\n"); | ||
604 | rc = -EINVAL; | ||
605 | goto fail; | ||
606 | } | ||
607 | #endif | ||
608 | |||
609 | rc = sys_info_init(arch); | ||
610 | if (rc) { | ||
611 | ERROR_MSG("arch init failed\n"); | ||
612 | goto fail; | ||
613 | } | ||
614 | |||
615 | zcore_header_init(arch, &zcore_header); | ||
616 | |||
617 | zcore_dir = debugfs_create_dir("zcore" , NULL); | ||
618 | if (!zcore_dir) { | ||
619 | rc = -ENOMEM; | ||
620 | goto fail; | ||
621 | } | ||
622 | zcore_file = debugfs_create_file("mem", S_IRUSR, zcore_dir, NULL, | ||
623 | &zcore_fops); | ||
624 | if (!zcore_file) { | ||
625 | debugfs_remove(zcore_dir); | ||
626 | rc = -ENOMEM; | ||
627 | goto fail; | ||
628 | } | ||
629 | hsa_available = 1; | ||
630 | return 0; | ||
631 | |||
632 | fail: | ||
633 | diag308(DIAG308_REL_HSA, NULL); | ||
634 | return rc; | ||
635 | } | ||
636 | |||
637 | extern void sdias_exit(void); | ||
638 | |||
639 | static void __exit zcore_exit(void) | ||
640 | { | ||
641 | debug_unregister(zcore_dbf); | ||
642 | sdias_exit(); | ||
643 | diag308(DIAG308_REL_HSA, NULL); | ||
644 | } | ||
645 | |||
646 | MODULE_AUTHOR("Copyright IBM Corp. 2003,2007"); | ||
647 | MODULE_DESCRIPTION("zcore module for zfcpdump support"); | ||
648 | MODULE_LICENSE("GPL"); | ||
649 | |||
650 | subsys_initcall(zcore_init); | ||
651 | module_exit(zcore_exit); | ||
diff --git a/drivers/s390/cio/Makefile b/drivers/s390/cio/Makefile index c490c2a1c2fc..cfaf77b320f5 100644 --- a/drivers/s390/cio/Makefile +++ b/drivers/s390/cio/Makefile | |||
@@ -2,7 +2,7 @@ | |||
2 | # Makefile for the S/390 common i/o drivers | 2 | # Makefile for the S/390 common i/o drivers |
3 | # | 3 | # |
4 | 4 | ||
5 | obj-y += airq.o blacklist.o chsc.o cio.o css.o | 5 | obj-y += airq.o blacklist.o chsc.o cio.o css.o chp.o idset.o |
6 | ccw_device-objs += device.o device_fsm.o device_ops.o | 6 | ccw_device-objs += device.o device_fsm.o device_ops.o |
7 | ccw_device-objs += device_id.o device_pgid.o device_status.o | 7 | ccw_device-objs += device_id.o device_pgid.o device_status.o |
8 | obj-y += ccw_device.o cmf.o | 8 | obj-y += ccw_device.o cmf.o |
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c index 5aeb68e732b0..e5ccda63e883 100644 --- a/drivers/s390/cio/ccwgroup.c +++ b/drivers/s390/cio/ccwgroup.c | |||
@@ -75,8 +75,10 @@ static void ccwgroup_ungroup_callback(struct device *dev) | |||
75 | { | 75 | { |
76 | struct ccwgroup_device *gdev = to_ccwgroupdev(dev); | 76 | struct ccwgroup_device *gdev = to_ccwgroupdev(dev); |
77 | 77 | ||
78 | mutex_lock(&gdev->reg_mutex); | ||
78 | __ccwgroup_remove_symlinks(gdev); | 79 | __ccwgroup_remove_symlinks(gdev); |
79 | device_unregister(dev); | 80 | device_unregister(dev); |
81 | mutex_unlock(&gdev->reg_mutex); | ||
80 | } | 82 | } |
81 | 83 | ||
82 | static ssize_t | 84 | static ssize_t |
@@ -173,7 +175,8 @@ ccwgroup_create(struct device *root, | |||
173 | return -ENOMEM; | 175 | return -ENOMEM; |
174 | 176 | ||
175 | atomic_set(&gdev->onoff, 0); | 177 | atomic_set(&gdev->onoff, 0); |
176 | 178 | mutex_init(&gdev->reg_mutex); | |
179 | mutex_lock(&gdev->reg_mutex); | ||
177 | for (i = 0; i < argc; i++) { | 180 | for (i = 0; i < argc; i++) { |
178 | gdev->cdev[i] = get_ccwdev_by_busid(cdrv, argv[i]); | 181 | gdev->cdev[i] = get_ccwdev_by_busid(cdrv, argv[i]); |
179 | 182 | ||
@@ -183,12 +186,12 @@ ccwgroup_create(struct device *root, | |||
183 | || gdev->cdev[i]->id.driver_info != | 186 | || gdev->cdev[i]->id.driver_info != |
184 | gdev->cdev[0]->id.driver_info) { | 187 | gdev->cdev[0]->id.driver_info) { |
185 | rc = -EINVAL; | 188 | rc = -EINVAL; |
186 | goto free_dev; | 189 | goto error; |
187 | } | 190 | } |
188 | /* Don't allow a device to belong to more than one group. */ | 191 | /* Don't allow a device to belong to more than one group. */ |
189 | if (gdev->cdev[i]->dev.driver_data) { | 192 | if (gdev->cdev[i]->dev.driver_data) { |
190 | rc = -EINVAL; | 193 | rc = -EINVAL; |
191 | goto free_dev; | 194 | goto error; |
192 | } | 195 | } |
193 | gdev->cdev[i]->dev.driver_data = gdev; | 196 | gdev->cdev[i]->dev.driver_data = gdev; |
194 | } | 197 | } |
@@ -203,9 +206,8 @@ ccwgroup_create(struct device *root, | |||
203 | gdev->cdev[0]->dev.bus_id); | 206 | gdev->cdev[0]->dev.bus_id); |
204 | 207 | ||
205 | rc = device_register(&gdev->dev); | 208 | rc = device_register(&gdev->dev); |
206 | |||
207 | if (rc) | 209 | if (rc) |
208 | goto free_dev; | 210 | goto error; |
209 | get_device(&gdev->dev); | 211 | get_device(&gdev->dev); |
210 | rc = device_create_file(&gdev->dev, &dev_attr_ungroup); | 212 | rc = device_create_file(&gdev->dev, &dev_attr_ungroup); |
211 | 213 | ||
@@ -216,6 +218,7 @@ ccwgroup_create(struct device *root, | |||
216 | 218 | ||
217 | rc = __ccwgroup_create_symlinks(gdev); | 219 | rc = __ccwgroup_create_symlinks(gdev); |
218 | if (!rc) { | 220 | if (!rc) { |
221 | mutex_unlock(&gdev->reg_mutex); | ||
219 | put_device(&gdev->dev); | 222 | put_device(&gdev->dev); |
220 | return 0; | 223 | return 0; |
221 | } | 224 | } |
@@ -224,19 +227,12 @@ ccwgroup_create(struct device *root, | |||
224 | error: | 227 | error: |
225 | for (i = 0; i < argc; i++) | 228 | for (i = 0; i < argc; i++) |
226 | if (gdev->cdev[i]) { | 229 | if (gdev->cdev[i]) { |
227 | put_device(&gdev->cdev[i]->dev); | ||
228 | gdev->cdev[i]->dev.driver_data = NULL; | ||
229 | } | ||
230 | put_device(&gdev->dev); | ||
231 | return rc; | ||
232 | free_dev: | ||
233 | for (i = 0; i < argc; i++) | ||
234 | if (gdev->cdev[i]) { | ||
235 | if (gdev->cdev[i]->dev.driver_data == gdev) | 230 | if (gdev->cdev[i]->dev.driver_data == gdev) |
236 | gdev->cdev[i]->dev.driver_data = NULL; | 231 | gdev->cdev[i]->dev.driver_data = NULL; |
237 | put_device(&gdev->cdev[i]->dev); | 232 | put_device(&gdev->cdev[i]->dev); |
238 | } | 233 | } |
239 | kfree(gdev); | 234 | mutex_unlock(&gdev->reg_mutex); |
235 | put_device(&gdev->dev); | ||
240 | return rc; | 236 | return rc; |
241 | } | 237 | } |
242 | 238 | ||
@@ -422,8 +418,12 @@ ccwgroup_driver_unregister (struct ccwgroup_driver *cdriver) | |||
422 | get_driver(&cdriver->driver); | 418 | get_driver(&cdriver->driver); |
423 | while ((dev = driver_find_device(&cdriver->driver, NULL, NULL, | 419 | while ((dev = driver_find_device(&cdriver->driver, NULL, NULL, |
424 | __ccwgroup_match_all))) { | 420 | __ccwgroup_match_all))) { |
425 | __ccwgroup_remove_symlinks(to_ccwgroupdev(dev)); | 421 | struct ccwgroup_device *gdev = to_ccwgroupdev(dev); |
422 | |||
423 | mutex_lock(&gdev->reg_mutex); | ||
424 | __ccwgroup_remove_symlinks(gdev); | ||
426 | device_unregister(dev); | 425 | device_unregister(dev); |
426 | mutex_unlock(&gdev->reg_mutex); | ||
427 | put_device(dev); | 427 | put_device(dev); |
428 | } | 428 | } |
429 | put_driver(&cdriver->driver); | 429 | put_driver(&cdriver->driver); |
@@ -444,8 +444,10 @@ __ccwgroup_get_gdev_by_cdev(struct ccw_device *cdev) | |||
444 | if (cdev->dev.driver_data) { | 444 | if (cdev->dev.driver_data) { |
445 | gdev = (struct ccwgroup_device *)cdev->dev.driver_data; | 445 | gdev = (struct ccwgroup_device *)cdev->dev.driver_data; |
446 | if (get_device(&gdev->dev)) { | 446 | if (get_device(&gdev->dev)) { |
447 | mutex_lock(&gdev->reg_mutex); | ||
447 | if (device_is_registered(&gdev->dev)) | 448 | if (device_is_registered(&gdev->dev)) |
448 | return gdev; | 449 | return gdev; |
450 | mutex_unlock(&gdev->reg_mutex); | ||
449 | put_device(&gdev->dev); | 451 | put_device(&gdev->dev); |
450 | } | 452 | } |
451 | return NULL; | 453 | return NULL; |
@@ -465,6 +467,7 @@ ccwgroup_remove_ccwdev(struct ccw_device *cdev) | |||
465 | if (gdev) { | 467 | if (gdev) { |
466 | __ccwgroup_remove_symlinks(gdev); | 468 | __ccwgroup_remove_symlinks(gdev); |
467 | device_unregister(&gdev->dev); | 469 | device_unregister(&gdev->dev); |
470 | mutex_unlock(&gdev->reg_mutex); | ||
468 | put_device(&gdev->dev); | 471 | put_device(&gdev->dev); |
469 | } | 472 | } |
470 | } | 473 | } |
diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c new file mode 100644 index 000000000000..ac289e6eadfe --- /dev/null +++ b/drivers/s390/cio/chp.c | |||
@@ -0,0 +1,683 @@ | |||
1 | /* | ||
2 | * drivers/s390/cio/chp.c | ||
3 | * | ||
4 | * Copyright IBM Corp. 1999,2007 | ||
5 | * Author(s): Cornelia Huck (cornelia.huck@de.ibm.com) | ||
6 | * Arnd Bergmann (arndb@de.ibm.com) | ||
7 | * Peter Oberparleiter <peter.oberparleiter@de.ibm.com> | ||
8 | */ | ||
9 | |||
10 | #include <linux/bug.h> | ||
11 | #include <linux/workqueue.h> | ||
12 | #include <linux/spinlock.h> | ||
13 | #include <linux/init.h> | ||
14 | #include <linux/jiffies.h> | ||
15 | #include <linux/wait.h> | ||
16 | #include <linux/mutex.h> | ||
17 | #include <asm/errno.h> | ||
18 | #include <asm/chpid.h> | ||
19 | #include <asm/sclp.h> | ||
20 | |||
21 | #include "cio.h" | ||
22 | #include "css.h" | ||
23 | #include "ioasm.h" | ||
24 | #include "cio_debug.h" | ||
25 | #include "chp.h" | ||
26 | |||
27 | #define to_channelpath(device) container_of(device, struct channel_path, dev) | ||
28 | #define CHP_INFO_UPDATE_INTERVAL 1*HZ | ||
29 | |||
30 | enum cfg_task_t { | ||
31 | cfg_none, | ||
32 | cfg_configure, | ||
33 | cfg_deconfigure | ||
34 | }; | ||
35 | |||
36 | /* Map for pending configure tasks. */ | ||
37 | static enum cfg_task_t chp_cfg_task[__MAX_CSSID + 1][__MAX_CHPID + 1]; | ||
38 | static DEFINE_MUTEX(cfg_lock); | ||
39 | static int cfg_busy; | ||
40 | |||
41 | /* Map for channel-path status. */ | ||
42 | static struct sclp_chp_info chp_info; | ||
43 | static DEFINE_MUTEX(info_lock); | ||
44 | |||
45 | /* Time after which channel-path status may be outdated. */ | ||
46 | static unsigned long chp_info_expires; | ||
47 | |||
48 | /* Workqueue to perform pending configure tasks. */ | ||
49 | static struct workqueue_struct *chp_wq; | ||
50 | static struct work_struct cfg_work; | ||
51 | |||
52 | /* Wait queue for configure completion events. */ | ||
53 | static wait_queue_head_t cfg_wait_queue; | ||
54 | |||
55 | /* Return channel_path struct for given chpid. */ | ||
56 | static inline struct channel_path *chpid_to_chp(struct chp_id chpid) | ||
57 | { | ||
58 | return css[chpid.cssid]->chps[chpid.id]; | ||
59 | } | ||
60 | |||
61 | /* Set vary state for given chpid. */ | ||
62 | static void set_chp_logically_online(struct chp_id chpid, int onoff) | ||
63 | { | ||
64 | chpid_to_chp(chpid)->state = onoff; | ||
65 | } | ||
66 | |||
67 | /* On succes return 0 if channel-path is varied offline, 1 if it is varied | ||
68 | * online. Return -ENODEV if channel-path is not registered. */ | ||
69 | int chp_get_status(struct chp_id chpid) | ||
70 | { | ||
71 | return (chpid_to_chp(chpid) ? chpid_to_chp(chpid)->state : -ENODEV); | ||
72 | } | ||
73 | |||
74 | /** | ||
75 | * chp_get_sch_opm - return opm for subchannel | ||
76 | * @sch: subchannel | ||
77 | * | ||
78 | * Calculate and return the operational path mask (opm) based on the chpids | ||
79 | * used by the subchannel and the status of the associated channel-paths. | ||
80 | */ | ||
81 | u8 chp_get_sch_opm(struct subchannel *sch) | ||
82 | { | ||
83 | struct chp_id chpid; | ||
84 | int opm; | ||
85 | int i; | ||
86 | |||
87 | opm = 0; | ||
88 | chp_id_init(&chpid); | ||
89 | for (i=0; i < 8; i++) { | ||
90 | opm <<= 1; | ||
91 | chpid.id = sch->schib.pmcw.chpid[i]; | ||
92 | if (chp_get_status(chpid) != 0) | ||
93 | opm |= 1; | ||
94 | } | ||
95 | return opm; | ||
96 | } | ||
97 | |||
98 | /** | ||
99 | * chp_is_registered - check if a channel-path is registered | ||
100 | * @chpid: channel-path ID | ||
101 | * | ||
102 | * Return non-zero if a channel-path with the given chpid is registered, | ||
103 | * zero otherwise. | ||
104 | */ | ||
105 | int chp_is_registered(struct chp_id chpid) | ||
106 | { | ||
107 | return chpid_to_chp(chpid) != NULL; | ||
108 | } | ||
109 | |||
110 | /* | ||
111 | * Function: s390_vary_chpid | ||
112 | * Varies the specified chpid online or offline | ||
113 | */ | ||
114 | static int s390_vary_chpid(struct chp_id chpid, int on) | ||
115 | { | ||
116 | char dbf_text[15]; | ||
117 | int status; | ||
118 | |||
119 | sprintf(dbf_text, on?"varyon%x.%02x":"varyoff%x.%02x", chpid.cssid, | ||
120 | chpid.id); | ||
121 | CIO_TRACE_EVENT( 2, dbf_text); | ||
122 | |||
123 | status = chp_get_status(chpid); | ||
124 | if (status < 0) { | ||
125 | printk(KERN_ERR "Can't vary unknown chpid %x.%02x\n", | ||
126 | chpid.cssid, chpid.id); | ||
127 | return -EINVAL; | ||
128 | } | ||
129 | |||
130 | if (!on && !status) { | ||
131 | printk(KERN_ERR "chpid %x.%02x is already offline\n", | ||
132 | chpid.cssid, chpid.id); | ||
133 | return -EINVAL; | ||
134 | } | ||
135 | |||
136 | set_chp_logically_online(chpid, on); | ||
137 | chsc_chp_vary(chpid, on); | ||
138 | return 0; | ||
139 | } | ||
140 | |||
141 | /* | ||
142 | * Channel measurement related functions | ||
143 | */ | ||
144 | static ssize_t chp_measurement_chars_read(struct kobject *kobj, char *buf, | ||
145 | loff_t off, size_t count) | ||
146 | { | ||
147 | struct channel_path *chp; | ||
148 | unsigned int size; | ||
149 | |||
150 | chp = to_channelpath(container_of(kobj, struct device, kobj)); | ||
151 | if (!chp->cmg_chars) | ||
152 | return 0; | ||
153 | |||
154 | size = sizeof(struct cmg_chars); | ||
155 | |||
156 | if (off > size) | ||
157 | return 0; | ||
158 | if (off + count > size) | ||
159 | count = size - off; | ||
160 | memcpy(buf, chp->cmg_chars + off, count); | ||
161 | return count; | ||
162 | } | ||
163 | |||
164 | static struct bin_attribute chp_measurement_chars_attr = { | ||
165 | .attr = { | ||
166 | .name = "measurement_chars", | ||
167 | .mode = S_IRUSR, | ||
168 | .owner = THIS_MODULE, | ||
169 | }, | ||
170 | .size = sizeof(struct cmg_chars), | ||
171 | .read = chp_measurement_chars_read, | ||
172 | }; | ||
173 | |||
174 | static void chp_measurement_copy_block(struct cmg_entry *buf, | ||
175 | struct channel_subsystem *css, | ||
176 | struct chp_id chpid) | ||
177 | { | ||
178 | void *area; | ||
179 | struct cmg_entry *entry, reference_buf; | ||
180 | int idx; | ||
181 | |||
182 | if (chpid.id < 128) { | ||
183 | area = css->cub_addr1; | ||
184 | idx = chpid.id; | ||
185 | } else { | ||
186 | area = css->cub_addr2; | ||
187 | idx = chpid.id - 128; | ||
188 | } | ||
189 | entry = area + (idx * sizeof(struct cmg_entry)); | ||
190 | do { | ||
191 | memcpy(buf, entry, sizeof(*entry)); | ||
192 | memcpy(&reference_buf, entry, sizeof(*entry)); | ||
193 | } while (reference_buf.values[0] != buf->values[0]); | ||
194 | } | ||
195 | |||
196 | static ssize_t chp_measurement_read(struct kobject *kobj, char *buf, | ||
197 | loff_t off, size_t count) | ||
198 | { | ||
199 | struct channel_path *chp; | ||
200 | struct channel_subsystem *css; | ||
201 | unsigned int size; | ||
202 | |||
203 | chp = to_channelpath(container_of(kobj, struct device, kobj)); | ||
204 | css = to_css(chp->dev.parent); | ||
205 | |||
206 | size = sizeof(struct cmg_entry); | ||
207 | |||
208 | /* Only allow single reads. */ | ||
209 | if (off || count < size) | ||
210 | return 0; | ||
211 | chp_measurement_copy_block((struct cmg_entry *)buf, css, chp->chpid); | ||
212 | count = size; | ||
213 | return count; | ||
214 | } | ||
215 | |||
216 | static struct bin_attribute chp_measurement_attr = { | ||
217 | .attr = { | ||
218 | .name = "measurement", | ||
219 | .mode = S_IRUSR, | ||
220 | .owner = THIS_MODULE, | ||
221 | }, | ||
222 | .size = sizeof(struct cmg_entry), | ||
223 | .read = chp_measurement_read, | ||
224 | }; | ||
225 | |||
226 | void chp_remove_cmg_attr(struct channel_path *chp) | ||
227 | { | ||
228 | device_remove_bin_file(&chp->dev, &chp_measurement_chars_attr); | ||
229 | device_remove_bin_file(&chp->dev, &chp_measurement_attr); | ||
230 | } | ||
231 | |||
232 | int chp_add_cmg_attr(struct channel_path *chp) | ||
233 | { | ||
234 | int ret; | ||
235 | |||
236 | ret = device_create_bin_file(&chp->dev, &chp_measurement_chars_attr); | ||
237 | if (ret) | ||
238 | return ret; | ||
239 | ret = device_create_bin_file(&chp->dev, &chp_measurement_attr); | ||
240 | if (ret) | ||
241 | device_remove_bin_file(&chp->dev, &chp_measurement_chars_attr); | ||
242 | return ret; | ||
243 | } | ||
244 | |||
245 | /* | ||
246 | * Files for the channel path entries. | ||
247 | */ | ||
248 | static ssize_t chp_status_show(struct device *dev, | ||
249 | struct device_attribute *attr, char *buf) | ||
250 | { | ||
251 | struct channel_path *chp = container_of(dev, struct channel_path, dev); | ||
252 | |||
253 | if (!chp) | ||
254 | return 0; | ||
255 | return (chp_get_status(chp->chpid) ? sprintf(buf, "online\n") : | ||
256 | sprintf(buf, "offline\n")); | ||
257 | } | ||
258 | |||
259 | static ssize_t chp_status_write(struct device *dev, | ||
260 | struct device_attribute *attr, | ||
261 | const char *buf, size_t count) | ||
262 | { | ||
263 | struct channel_path *cp = container_of(dev, struct channel_path, dev); | ||
264 | char cmd[10]; | ||
265 | int num_args; | ||
266 | int error; | ||
267 | |||
268 | num_args = sscanf(buf, "%5s", cmd); | ||
269 | if (!num_args) | ||
270 | return count; | ||
271 | |||
272 | if (!strnicmp(cmd, "on", 2) || !strcmp(cmd, "1")) | ||
273 | error = s390_vary_chpid(cp->chpid, 1); | ||
274 | else if (!strnicmp(cmd, "off", 3) || !strcmp(cmd, "0")) | ||
275 | error = s390_vary_chpid(cp->chpid, 0); | ||
276 | else | ||
277 | error = -EINVAL; | ||
278 | |||
279 | return error < 0 ? error : count; | ||
280 | |||
281 | } | ||
282 | |||
283 | static DEVICE_ATTR(status, 0644, chp_status_show, chp_status_write); | ||
284 | |||
285 | static ssize_t chp_configure_show(struct device *dev, | ||
286 | struct device_attribute *attr, char *buf) | ||
287 | { | ||
288 | struct channel_path *cp; | ||
289 | int status; | ||
290 | |||
291 | cp = container_of(dev, struct channel_path, dev); | ||
292 | status = chp_info_get_status(cp->chpid); | ||
293 | if (status < 0) | ||
294 | return status; | ||
295 | |||
296 | return snprintf(buf, PAGE_SIZE, "%d\n", status); | ||
297 | } | ||
298 | |||
299 | static int cfg_wait_idle(void); | ||
300 | |||
301 | static ssize_t chp_configure_write(struct device *dev, | ||
302 | struct device_attribute *attr, | ||
303 | const char *buf, size_t count) | ||
304 | { | ||
305 | struct channel_path *cp; | ||
306 | int val; | ||
307 | char delim; | ||
308 | |||
309 | if (sscanf(buf, "%d %c", &val, &delim) != 1) | ||
310 | return -EINVAL; | ||
311 | if (val != 0 && val != 1) | ||
312 | return -EINVAL; | ||
313 | cp = container_of(dev, struct channel_path, dev); | ||
314 | chp_cfg_schedule(cp->chpid, val); | ||
315 | cfg_wait_idle(); | ||
316 | |||
317 | return count; | ||
318 | } | ||
319 | |||
320 | static DEVICE_ATTR(configure, 0644, chp_configure_show, chp_configure_write); | ||
321 | |||
322 | static ssize_t chp_type_show(struct device *dev, struct device_attribute *attr, | ||
323 | char *buf) | ||
324 | { | ||
325 | struct channel_path *chp = container_of(dev, struct channel_path, dev); | ||
326 | |||
327 | if (!chp) | ||
328 | return 0; | ||
329 | return sprintf(buf, "%x\n", chp->desc.desc); | ||
330 | } | ||
331 | |||
332 | static DEVICE_ATTR(type, 0444, chp_type_show, NULL); | ||
333 | |||
334 | static ssize_t chp_cmg_show(struct device *dev, struct device_attribute *attr, | ||
335 | char *buf) | ||
336 | { | ||
337 | struct channel_path *chp = to_channelpath(dev); | ||
338 | |||
339 | if (!chp) | ||
340 | return 0; | ||
341 | if (chp->cmg == -1) /* channel measurements not available */ | ||
342 | return sprintf(buf, "unknown\n"); | ||
343 | return sprintf(buf, "%x\n", chp->cmg); | ||
344 | } | ||
345 | |||
346 | static DEVICE_ATTR(cmg, 0444, chp_cmg_show, NULL); | ||
347 | |||
348 | static ssize_t chp_shared_show(struct device *dev, | ||
349 | struct device_attribute *attr, char *buf) | ||
350 | { | ||
351 | struct channel_path *chp = to_channelpath(dev); | ||
352 | |||
353 | if (!chp) | ||
354 | return 0; | ||
355 | if (chp->shared == -1) /* channel measurements not available */ | ||
356 | return sprintf(buf, "unknown\n"); | ||
357 | return sprintf(buf, "%x\n", chp->shared); | ||
358 | } | ||
359 | |||
360 | static DEVICE_ATTR(shared, 0444, chp_shared_show, NULL); | ||
361 | |||
362 | static struct attribute * chp_attrs[] = { | ||
363 | &dev_attr_status.attr, | ||
364 | &dev_attr_configure.attr, | ||
365 | &dev_attr_type.attr, | ||
366 | &dev_attr_cmg.attr, | ||
367 | &dev_attr_shared.attr, | ||
368 | NULL, | ||
369 | }; | ||
370 | |||
371 | static struct attribute_group chp_attr_group = { | ||
372 | .attrs = chp_attrs, | ||
373 | }; | ||
374 | |||
375 | static void chp_release(struct device *dev) | ||
376 | { | ||
377 | struct channel_path *cp; | ||
378 | |||
379 | cp = container_of(dev, struct channel_path, dev); | ||
380 | kfree(cp); | ||
381 | } | ||
382 | |||
383 | /** | ||
384 | * chp_new - register a new channel-path | ||
385 | * @chpid - channel-path ID | ||
386 | * | ||
387 | * Create and register data structure representing new channel-path. Return | ||
388 | * zero on success, non-zero otherwise. | ||
389 | */ | ||
390 | int chp_new(struct chp_id chpid) | ||
391 | { | ||
392 | struct channel_path *chp; | ||
393 | int ret; | ||
394 | |||
395 | if (chp_is_registered(chpid)) | ||
396 | return 0; | ||
397 | chp = kzalloc(sizeof(struct channel_path), GFP_KERNEL); | ||
398 | if (!chp) | ||
399 | return -ENOMEM; | ||
400 | |||
401 | /* fill in status, etc. */ | ||
402 | chp->chpid = chpid; | ||
403 | chp->state = 1; | ||
404 | chp->dev.parent = &css[chpid.cssid]->device; | ||
405 | chp->dev.release = chp_release; | ||
406 | snprintf(chp->dev.bus_id, BUS_ID_SIZE, "chp%x.%02x", chpid.cssid, | ||
407 | chpid.id); | ||
408 | |||
409 | /* Obtain channel path description and fill it in. */ | ||
410 | ret = chsc_determine_channel_path_description(chpid, &chp->desc); | ||
411 | if (ret) | ||
412 | goto out_free; | ||
413 | if ((chp->desc.flags & 0x80) == 0) { | ||
414 | ret = -ENODEV; | ||
415 | goto out_free; | ||
416 | } | ||
417 | /* Get channel-measurement characteristics. */ | ||
418 | if (css_characteristics_avail && css_chsc_characteristics.scmc | ||
419 | && css_chsc_characteristics.secm) { | ||
420 | ret = chsc_get_channel_measurement_chars(chp); | ||
421 | if (ret) | ||
422 | goto out_free; | ||
423 | } else { | ||
424 | static int msg_done; | ||
425 | |||
426 | if (!msg_done) { | ||
427 | printk(KERN_WARNING "cio: Channel measurements not " | ||
428 | "available, continuing.\n"); | ||
429 | msg_done = 1; | ||
430 | } | ||
431 | chp->cmg = -1; | ||
432 | } | ||
433 | |||
434 | /* make it known to the system */ | ||
435 | ret = device_register(&chp->dev); | ||
436 | if (ret) { | ||
437 | printk(KERN_WARNING "%s: could not register %x.%02x\n", | ||
438 | __func__, chpid.cssid, chpid.id); | ||
439 | goto out_free; | ||
440 | } | ||
441 | ret = sysfs_create_group(&chp->dev.kobj, &chp_attr_group); | ||
442 | if (ret) { | ||
443 | device_unregister(&chp->dev); | ||
444 | goto out_free; | ||
445 | } | ||
446 | mutex_lock(&css[chpid.cssid]->mutex); | ||
447 | if (css[chpid.cssid]->cm_enabled) { | ||
448 | ret = chp_add_cmg_attr(chp); | ||
449 | if (ret) { | ||
450 | sysfs_remove_group(&chp->dev.kobj, &chp_attr_group); | ||
451 | device_unregister(&chp->dev); | ||
452 | mutex_unlock(&css[chpid.cssid]->mutex); | ||
453 | goto out_free; | ||
454 | } | ||
455 | } | ||
456 | css[chpid.cssid]->chps[chpid.id] = chp; | ||
457 | mutex_unlock(&css[chpid.cssid]->mutex); | ||
458 | return ret; | ||
459 | out_free: | ||
460 | kfree(chp); | ||
461 | return ret; | ||
462 | } | ||
463 | |||
464 | /** | ||
465 | * chp_get_chp_desc - return newly allocated channel-path description | ||
466 | * @chpid: channel-path ID | ||
467 | * | ||
468 | * On success return a newly allocated copy of the channel-path description | ||
469 | * data associated with the given channel-path ID. Return %NULL on error. | ||
470 | */ | ||
471 | void *chp_get_chp_desc(struct chp_id chpid) | ||
472 | { | ||
473 | struct channel_path *chp; | ||
474 | struct channel_path_desc *desc; | ||
475 | |||
476 | chp = chpid_to_chp(chpid); | ||
477 | if (!chp) | ||
478 | return NULL; | ||
479 | desc = kmalloc(sizeof(struct channel_path_desc), GFP_KERNEL); | ||
480 | if (!desc) | ||
481 | return NULL; | ||
482 | memcpy(desc, &chp->desc, sizeof(struct channel_path_desc)); | ||
483 | return desc; | ||
484 | } | ||
485 | |||
486 | /** | ||
487 | * chp_process_crw - process channel-path status change | ||
488 | * @id: channel-path ID number | ||
489 | * @status: non-zero if channel-path has become available, zero otherwise | ||
490 | * | ||
491 | * Handle channel-report-words indicating that the status of a channel-path | ||
492 | * has changed. | ||
493 | */ | ||
494 | void chp_process_crw(int id, int status) | ||
495 | { | ||
496 | struct chp_id chpid; | ||
497 | |||
498 | chp_id_init(&chpid); | ||
499 | chpid.id = id; | ||
500 | if (status) { | ||
501 | if (!chp_is_registered(chpid)) | ||
502 | chp_new(chpid); | ||
503 | chsc_chp_online(chpid); | ||
504 | } else | ||
505 | chsc_chp_offline(chpid); | ||
506 | } | ||
507 | |||
508 | static inline int info_bit_num(struct chp_id id) | ||
509 | { | ||
510 | return id.id + id.cssid * (__MAX_CHPID + 1); | ||
511 | } | ||
512 | |||
513 | /* Force chp_info refresh on next call to info_validate(). */ | ||
514 | static void info_expire(void) | ||
515 | { | ||
516 | mutex_lock(&info_lock); | ||
517 | chp_info_expires = jiffies - 1; | ||
518 | mutex_unlock(&info_lock); | ||
519 | } | ||
520 | |||
521 | /* Ensure that chp_info is up-to-date. */ | ||
522 | static int info_update(void) | ||
523 | { | ||
524 | int rc; | ||
525 | |||
526 | mutex_lock(&info_lock); | ||
527 | rc = 0; | ||
528 | if (time_after(jiffies, chp_info_expires)) { | ||
529 | /* Data is too old, update. */ | ||
530 | rc = sclp_chp_read_info(&chp_info); | ||
531 | chp_info_expires = jiffies + CHP_INFO_UPDATE_INTERVAL ; | ||
532 | } | ||
533 | mutex_unlock(&info_lock); | ||
534 | |||
535 | return rc; | ||
536 | } | ||
537 | |||
538 | /** | ||
539 | * chp_info_get_status - retrieve configure status of a channel-path | ||
540 | * @chpid: channel-path ID | ||
541 | * | ||
542 | * On success, return 0 for standby, 1 for configured, 2 for reserved, | ||
543 | * 3 for not recognized. Return negative error code on error. | ||
544 | */ | ||
545 | int chp_info_get_status(struct chp_id chpid) | ||
546 | { | ||
547 | int rc; | ||
548 | int bit; | ||
549 | |||
550 | rc = info_update(); | ||
551 | if (rc) | ||
552 | return rc; | ||
553 | |||
554 | bit = info_bit_num(chpid); | ||
555 | mutex_lock(&info_lock); | ||
556 | if (!chp_test_bit(chp_info.recognized, bit)) | ||
557 | rc = CHP_STATUS_NOT_RECOGNIZED; | ||
558 | else if (chp_test_bit(chp_info.configured, bit)) | ||
559 | rc = CHP_STATUS_CONFIGURED; | ||
560 | else if (chp_test_bit(chp_info.standby, bit)) | ||
561 | rc = CHP_STATUS_STANDBY; | ||
562 | else | ||
563 | rc = CHP_STATUS_RESERVED; | ||
564 | mutex_unlock(&info_lock); | ||
565 | |||
566 | return rc; | ||
567 | } | ||
568 | |||
569 | /* Return configure task for chpid. */ | ||
570 | static enum cfg_task_t cfg_get_task(struct chp_id chpid) | ||
571 | { | ||
572 | return chp_cfg_task[chpid.cssid][chpid.id]; | ||
573 | } | ||
574 | |||
575 | /* Set configure task for chpid. */ | ||
576 | static void cfg_set_task(struct chp_id chpid, enum cfg_task_t cfg) | ||
577 | { | ||
578 | chp_cfg_task[chpid.cssid][chpid.id] = cfg; | ||
579 | } | ||
580 | |||
581 | /* Perform one configure/deconfigure request. Reschedule work function until | ||
582 | * last request. */ | ||
583 | static void cfg_func(struct work_struct *work) | ||
584 | { | ||
585 | struct chp_id chpid; | ||
586 | enum cfg_task_t t; | ||
587 | |||
588 | mutex_lock(&cfg_lock); | ||
589 | t = cfg_none; | ||
590 | chp_id_for_each(&chpid) { | ||
591 | t = cfg_get_task(chpid); | ||
592 | if (t != cfg_none) { | ||
593 | cfg_set_task(chpid, cfg_none); | ||
594 | break; | ||
595 | } | ||
596 | } | ||
597 | mutex_unlock(&cfg_lock); | ||
598 | |||
599 | switch (t) { | ||
600 | case cfg_configure: | ||
601 | sclp_chp_configure(chpid); | ||
602 | info_expire(); | ||
603 | chsc_chp_online(chpid); | ||
604 | break; | ||
605 | case cfg_deconfigure: | ||
606 | sclp_chp_deconfigure(chpid); | ||
607 | info_expire(); | ||
608 | chsc_chp_offline(chpid); | ||
609 | break; | ||
610 | case cfg_none: | ||
611 | /* Get updated information after last change. */ | ||
612 | info_update(); | ||
613 | mutex_lock(&cfg_lock); | ||
614 | cfg_busy = 0; | ||
615 | mutex_unlock(&cfg_lock); | ||
616 | wake_up_interruptible(&cfg_wait_queue); | ||
617 | return; | ||
618 | } | ||
619 | queue_work(chp_wq, &cfg_work); | ||
620 | } | ||
621 | |||
622 | /** | ||
623 | * chp_cfg_schedule - schedule chpid configuration request | ||
624 | * @chpid - channel-path ID | ||
625 | * @configure - Non-zero for configure, zero for deconfigure | ||
626 | * | ||
627 | * Schedule a channel-path configuration/deconfiguration request. | ||
628 | */ | ||
629 | void chp_cfg_schedule(struct chp_id chpid, int configure) | ||
630 | { | ||
631 | CIO_MSG_EVENT(2, "chp_cfg_sched%x.%02x=%d\n", chpid.cssid, chpid.id, | ||
632 | configure); | ||
633 | mutex_lock(&cfg_lock); | ||
634 | cfg_set_task(chpid, configure ? cfg_configure : cfg_deconfigure); | ||
635 | cfg_busy = 1; | ||
636 | mutex_unlock(&cfg_lock); | ||
637 | queue_work(chp_wq, &cfg_work); | ||
638 | } | ||
639 | |||
640 | /** | ||
641 | * chp_cfg_cancel_deconfigure - cancel chpid deconfiguration request | ||
642 | * @chpid - channel-path ID | ||
643 | * | ||
644 | * Cancel an active channel-path deconfiguration request if it has not yet | ||
645 | * been performed. | ||
646 | */ | ||
647 | void chp_cfg_cancel_deconfigure(struct chp_id chpid) | ||
648 | { | ||
649 | CIO_MSG_EVENT(2, "chp_cfg_cancel:%x.%02x\n", chpid.cssid, chpid.id); | ||
650 | mutex_lock(&cfg_lock); | ||
651 | if (cfg_get_task(chpid) == cfg_deconfigure) | ||
652 | cfg_set_task(chpid, cfg_none); | ||
653 | mutex_unlock(&cfg_lock); | ||
654 | } | ||
655 | |||
656 | static int cfg_wait_idle(void) | ||
657 | { | ||
658 | if (wait_event_interruptible(cfg_wait_queue, !cfg_busy)) | ||
659 | return -ERESTARTSYS; | ||
660 | return 0; | ||
661 | } | ||
662 | |||
663 | static int __init chp_init(void) | ||
664 | { | ||
665 | struct chp_id chpid; | ||
666 | |||
667 | chp_wq = create_singlethread_workqueue("cio_chp"); | ||
668 | if (!chp_wq) | ||
669 | return -ENOMEM; | ||
670 | INIT_WORK(&cfg_work, cfg_func); | ||
671 | init_waitqueue_head(&cfg_wait_queue); | ||
672 | if (info_update()) | ||
673 | return 0; | ||
674 | /* Register available channel-paths. */ | ||
675 | chp_id_for_each(&chpid) { | ||
676 | if (chp_info_get_status(chpid) != CHP_STATUS_NOT_RECOGNIZED) | ||
677 | chp_new(chpid); | ||
678 | } | ||
679 | |||
680 | return 0; | ||
681 | } | ||
682 | |||
683 | subsys_initcall(chp_init); | ||
diff --git a/drivers/s390/cio/chp.h b/drivers/s390/cio/chp.h new file mode 100644 index 000000000000..65286563c592 --- /dev/null +++ b/drivers/s390/cio/chp.h | |||
@@ -0,0 +1,53 @@ | |||
1 | /* | ||
2 | * drivers/s390/cio/chp.h | ||
3 | * | ||
4 | * Copyright IBM Corp. 2007 | ||
5 | * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com> | ||
6 | */ | ||
7 | |||
8 | #ifndef S390_CHP_H | ||
9 | #define S390_CHP_H S390_CHP_H | ||
10 | |||
11 | #include <linux/types.h> | ||
12 | #include <linux/device.h> | ||
13 | #include <asm/chpid.h> | ||
14 | #include "chsc.h" | ||
15 | |||
16 | #define CHP_STATUS_STANDBY 0 | ||
17 | #define CHP_STATUS_CONFIGURED 1 | ||
18 | #define CHP_STATUS_RESERVED 2 | ||
19 | #define CHP_STATUS_NOT_RECOGNIZED 3 | ||
20 | |||
21 | static inline int chp_test_bit(u8 *bitmap, int num) | ||
22 | { | ||
23 | int byte = num >> 3; | ||
24 | int mask = 128 >> (num & 7); | ||
25 | |||
26 | return (bitmap[byte] & mask) ? 1 : 0; | ||
27 | } | ||
28 | |||
29 | |||
30 | struct channel_path { | ||
31 | struct chp_id chpid; | ||
32 | int state; | ||
33 | struct channel_path_desc desc; | ||
34 | /* Channel-measurement related stuff: */ | ||
35 | int cmg; | ||
36 | int shared; | ||
37 | void *cmg_chars; | ||
38 | struct device dev; | ||
39 | }; | ||
40 | |||
41 | int chp_get_status(struct chp_id chpid); | ||
42 | u8 chp_get_sch_opm(struct subchannel *sch); | ||
43 | int chp_is_registered(struct chp_id chpid); | ||
44 | void *chp_get_chp_desc(struct chp_id chpid); | ||
45 | void chp_process_crw(int id, int available); | ||
46 | void chp_remove_cmg_attr(struct channel_path *chp); | ||
47 | int chp_add_cmg_attr(struct channel_path *chp); | ||
48 | int chp_new(struct chp_id chpid); | ||
49 | void chp_cfg_schedule(struct chp_id chpid, int configure); | ||
50 | void chp_cfg_cancel_deconfigure(struct chp_id chpid); | ||
51 | int chp_info_get_status(struct chp_id chpid); | ||
52 | |||
53 | #endif /* S390_CHP_H */ | ||
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c index 6f05a44e3817..ea92ac4d6577 100644 --- a/drivers/s390/cio/chsc.c +++ b/drivers/s390/cio/chsc.c | |||
@@ -15,202 +15,124 @@ | |||
15 | #include <linux/device.h> | 15 | #include <linux/device.h> |
16 | 16 | ||
17 | #include <asm/cio.h> | 17 | #include <asm/cio.h> |
18 | #include <asm/chpid.h> | ||
18 | 19 | ||
19 | #include "css.h" | 20 | #include "css.h" |
20 | #include "cio.h" | 21 | #include "cio.h" |
21 | #include "cio_debug.h" | 22 | #include "cio_debug.h" |
22 | #include "ioasm.h" | 23 | #include "ioasm.h" |
24 | #include "chp.h" | ||
23 | #include "chsc.h" | 25 | #include "chsc.h" |
24 | 26 | ||
25 | static void *sei_page; | 27 | static void *sei_page; |
26 | 28 | ||
27 | static int new_channel_path(int chpid); | 29 | struct chsc_ssd_area { |
28 | 30 | struct chsc_header request; | |
29 | static inline void | 31 | u16 :10; |
30 | set_chp_logically_online(int chp, int onoff) | 32 | u16 ssid:2; |
31 | { | 33 | u16 :4; |
32 | css[0]->chps[chp]->state = onoff; | 34 | u16 f_sch; /* first subchannel */ |
33 | } | 35 | u16 :16; |
34 | 36 | u16 l_sch; /* last subchannel */ | |
35 | static int | 37 | u32 :32; |
36 | get_chp_status(int chp) | 38 | struct chsc_header response; |
37 | { | 39 | u32 :32; |
38 | return (css[0]->chps[chp] ? css[0]->chps[chp]->state : -ENODEV); | 40 | u8 sch_valid : 1; |
39 | } | 41 | u8 dev_valid : 1; |
40 | 42 | u8 st : 3; /* subchannel type */ | |
41 | void | 43 | u8 zeroes : 3; |
42 | chsc_validate_chpids(struct subchannel *sch) | 44 | u8 unit_addr; /* unit address */ |
43 | { | 45 | u16 devno; /* device number */ |
44 | int mask, chp; | 46 | u8 path_mask; |
45 | 47 | u8 fla_valid_mask; | |
46 | for (chp = 0; chp <= 7; chp++) { | 48 | u16 sch; /* subchannel */ |
47 | mask = 0x80 >> chp; | 49 | u8 chpid[8]; /* chpids 0-7 */ |
48 | if (!get_chp_status(sch->schib.pmcw.chpid[chp])) | 50 | u16 fla[8]; /* full link addresses 0-7 */ |
49 | /* disable using this path */ | 51 | } __attribute__ ((packed)); |
50 | sch->opm &= ~mask; | ||
51 | } | ||
52 | } | ||
53 | |||
54 | void | ||
55 | chpid_is_actually_online(int chp) | ||
56 | { | ||
57 | int state; | ||
58 | |||
59 | state = get_chp_status(chp); | ||
60 | if (state < 0) { | ||
61 | need_rescan = 1; | ||
62 | queue_work(slow_path_wq, &slow_path_work); | ||
63 | } else | ||
64 | WARN_ON(!state); | ||
65 | } | ||
66 | 52 | ||
67 | /* FIXME: this is _always_ called for every subchannel. shouldn't we | 53 | int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd) |
68 | * process more than one at a time? */ | ||
69 | static int | ||
70 | chsc_get_sch_desc_irq(struct subchannel *sch, void *page) | ||
71 | { | 54 | { |
72 | int ccode, j; | 55 | unsigned long page; |
73 | 56 | struct chsc_ssd_area *ssd_area; | |
74 | struct { | 57 | int ccode; |
75 | struct chsc_header request; | 58 | int ret; |
76 | u16 reserved1a:10; | 59 | int i; |
77 | u16 ssid:2; | 60 | int mask; |
78 | u16 reserved1b:4; | ||
79 | u16 f_sch; /* first subchannel */ | ||
80 | u16 reserved2; | ||
81 | u16 l_sch; /* last subchannel */ | ||
82 | u32 reserved3; | ||
83 | struct chsc_header response; | ||
84 | u32 reserved4; | ||
85 | u8 sch_valid : 1; | ||
86 | u8 dev_valid : 1; | ||
87 | u8 st : 3; /* subchannel type */ | ||
88 | u8 zeroes : 3; | ||
89 | u8 unit_addr; /* unit address */ | ||
90 | u16 devno; /* device number */ | ||
91 | u8 path_mask; | ||
92 | u8 fla_valid_mask; | ||
93 | u16 sch; /* subchannel */ | ||
94 | u8 chpid[8]; /* chpids 0-7 */ | ||
95 | u16 fla[8]; /* full link addresses 0-7 */ | ||
96 | } __attribute__ ((packed)) *ssd_area; | ||
97 | |||
98 | ssd_area = page; | ||
99 | 61 | ||
62 | page = get_zeroed_page(GFP_KERNEL | GFP_DMA); | ||
63 | if (!page) | ||
64 | return -ENOMEM; | ||
65 | ssd_area = (struct chsc_ssd_area *) page; | ||
100 | ssd_area->request.length = 0x0010; | 66 | ssd_area->request.length = 0x0010; |
101 | ssd_area->request.code = 0x0004; | 67 | ssd_area->request.code = 0x0004; |
102 | 68 | ssd_area->ssid = schid.ssid; | |
103 | ssd_area->ssid = sch->schid.ssid; | 69 | ssd_area->f_sch = schid.sch_no; |
104 | ssd_area->f_sch = sch->schid.sch_no; | 70 | ssd_area->l_sch = schid.sch_no; |
105 | ssd_area->l_sch = sch->schid.sch_no; | ||
106 | 71 | ||
107 | ccode = chsc(ssd_area); | 72 | ccode = chsc(ssd_area); |
73 | /* Check response. */ | ||
108 | if (ccode > 0) { | 74 | if (ccode > 0) { |
109 | pr_debug("chsc returned with ccode = %d\n", ccode); | 75 | ret = (ccode == 3) ? -ENODEV : -EBUSY; |
110 | return (ccode == 3) ? -ENODEV : -EBUSY; | 76 | goto out_free; |
111 | } | 77 | } |
112 | 78 | if (ssd_area->response.code != 0x0001) { | |
113 | switch (ssd_area->response.code) { | 79 | CIO_MSG_EVENT(2, "chsc: ssd failed for 0.%x.%04x (rc=%04x)\n", |
114 | case 0x0001: /* everything ok */ | 80 | schid.ssid, schid.sch_no, |
115 | break; | ||
116 | case 0x0002: | ||
117 | CIO_CRW_EVENT(2, "Invalid command!\n"); | ||
118 | return -EINVAL; | ||
119 | case 0x0003: | ||
120 | CIO_CRW_EVENT(2, "Error in chsc request block!\n"); | ||
121 | return -EINVAL; | ||
122 | case 0x0004: | ||
123 | CIO_CRW_EVENT(2, "Model does not provide ssd\n"); | ||
124 | return -EOPNOTSUPP; | ||
125 | default: | ||
126 | CIO_CRW_EVENT(2, "Unknown CHSC response %d\n", | ||
127 | ssd_area->response.code); | 81 | ssd_area->response.code); |
128 | return -EIO; | 82 | ret = -EIO; |
83 | goto out_free; | ||
129 | } | 84 | } |
130 | 85 | if (!ssd_area->sch_valid) { | |
131 | /* | 86 | ret = -ENODEV; |
132 | * ssd_area->st stores the type of the detected | 87 | goto out_free; |
133 | * subchannel, with the following definitions: | ||
134 | * | ||
135 | * 0: I/O subchannel: All fields have meaning | ||
136 | * 1: CHSC subchannel: Only sch_val, st and sch | ||
137 | * have meaning | ||
138 | * 2: Message subchannel: All fields except unit_addr | ||
139 | * have meaning | ||
140 | * 3: ADM subchannel: Only sch_val, st and sch | ||
141 | * have meaning | ||
142 | * | ||
143 | * Other types are currently undefined. | ||
144 | */ | ||
145 | if (ssd_area->st > 3) { /* uhm, that looks strange... */ | ||
146 | CIO_CRW_EVENT(0, "Strange subchannel type %d" | ||
147 | " for sch 0.%x.%04x\n", ssd_area->st, | ||
148 | sch->schid.ssid, sch->schid.sch_no); | ||
149 | /* | ||
150 | * There may have been a new subchannel type defined in the | ||
151 | * time since this code was written; since we don't know which | ||
152 | * fields have meaning and what to do with it we just jump out | ||
153 | */ | ||
154 | return 0; | ||
155 | } else { | ||
156 | const char *type[4] = {"I/O", "chsc", "message", "ADM"}; | ||
157 | CIO_CRW_EVENT(6, "ssd: sch 0.%x.%04x is %s subchannel\n", | ||
158 | sch->schid.ssid, sch->schid.sch_no, | ||
159 | type[ssd_area->st]); | ||
160 | |||
161 | sch->ssd_info.valid = 1; | ||
162 | sch->ssd_info.type = ssd_area->st; | ||
163 | } | 88 | } |
164 | 89 | /* Copy data */ | |
165 | if (ssd_area->st == 0 || ssd_area->st == 2) { | 90 | ret = 0; |
166 | for (j = 0; j < 8; j++) { | 91 | memset(ssd, 0, sizeof(struct chsc_ssd_info)); |
167 | if (!((0x80 >> j) & ssd_area->path_mask & | 92 | if ((ssd_area->st != 0) && (ssd_area->st != 2)) |
168 | ssd_area->fla_valid_mask)) | 93 | goto out_free; |
169 | continue; | 94 | ssd->path_mask = ssd_area->path_mask; |
170 | sch->ssd_info.chpid[j] = ssd_area->chpid[j]; | 95 | ssd->fla_valid_mask = ssd_area->fla_valid_mask; |
171 | sch->ssd_info.fla[j] = ssd_area->fla[j]; | 96 | for (i = 0; i < 8; i++) { |
97 | mask = 0x80 >> i; | ||
98 | if (ssd_area->path_mask & mask) { | ||
99 | chp_id_init(&ssd->chpid[i]); | ||
100 | ssd->chpid[i].id = ssd_area->chpid[i]; | ||
172 | } | 101 | } |
102 | if (ssd_area->fla_valid_mask & mask) | ||
103 | ssd->fla[i] = ssd_area->fla[i]; | ||
173 | } | 104 | } |
174 | return 0; | 105 | out_free: |
106 | free_page(page); | ||
107 | return ret; | ||
175 | } | 108 | } |
176 | 109 | ||
177 | int | 110 | static int check_for_io_on_path(struct subchannel *sch, int mask) |
178 | css_get_ssd_info(struct subchannel *sch) | ||
179 | { | 111 | { |
180 | int ret; | 112 | int cc; |
181 | void *page; | ||
182 | 113 | ||
183 | page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); | 114 | cc = stsch(sch->schid, &sch->schib); |
184 | if (!page) | 115 | if (cc) |
185 | return -ENOMEM; | 116 | return 0; |
186 | spin_lock_irq(sch->lock); | 117 | if (sch->schib.scsw.actl && sch->schib.pmcw.lpum == mask) |
187 | ret = chsc_get_sch_desc_irq(sch, page); | 118 | return 1; |
188 | if (ret) { | 119 | return 0; |
189 | static int cio_chsc_err_msg; | 120 | } |
190 | 121 | ||
191 | if (!cio_chsc_err_msg) { | 122 | static void terminate_internal_io(struct subchannel *sch) |
192 | printk(KERN_ERR | 123 | { |
193 | "chsc_get_sch_descriptions:" | 124 | if (cio_clear(sch)) { |
194 | " Error %d while doing chsc; " | 125 | /* Recheck device in case clear failed. */ |
195 | "processing some machine checks may " | 126 | sch->lpm = 0; |
196 | "not work\n", ret); | 127 | if (device_trigger_verify(sch) != 0) |
197 | cio_chsc_err_msg = 1; | 128 | css_schedule_eval(sch->schid); |
198 | } | 129 | return; |
199 | } | ||
200 | spin_unlock_irq(sch->lock); | ||
201 | free_page((unsigned long)page); | ||
202 | if (!ret) { | ||
203 | int j, chpid, mask; | ||
204 | /* Allocate channel path structures, if needed. */ | ||
205 | for (j = 0; j < 8; j++) { | ||
206 | mask = 0x80 >> j; | ||
207 | chpid = sch->ssd_info.chpid[j]; | ||
208 | if ((sch->schib.pmcw.pim & mask) && | ||
209 | (get_chp_status(chpid) < 0)) | ||
210 | new_channel_path(chpid); | ||
211 | } | ||
212 | } | 130 | } |
213 | return ret; | 131 | /* Request retry of internal operation. */ |
132 | device_set_intretry(sch); | ||
133 | /* Call handler. */ | ||
134 | if (sch->driver && sch->driver->termination) | ||
135 | sch->driver->termination(&sch->dev); | ||
214 | } | 136 | } |
215 | 137 | ||
216 | static int | 138 | static int |
@@ -219,7 +141,7 @@ s390_subchannel_remove_chpid(struct device *dev, void *data) | |||
219 | int j; | 141 | int j; |
220 | int mask; | 142 | int mask; |
221 | struct subchannel *sch; | 143 | struct subchannel *sch; |
222 | struct channel_path *chpid; | 144 | struct chp_id *chpid; |
223 | struct schib schib; | 145 | struct schib schib; |
224 | 146 | ||
225 | sch = to_subchannel(dev); | 147 | sch = to_subchannel(dev); |
@@ -243,106 +165,50 @@ s390_subchannel_remove_chpid(struct device *dev, void *data) | |||
243 | if (sch->schib.pmcw.pim == 0x80) | 165 | if (sch->schib.pmcw.pim == 0x80) |
244 | goto out_unreg; | 166 | goto out_unreg; |
245 | 167 | ||
246 | if ((sch->schib.scsw.actl & SCSW_ACTL_DEVACT) && | 168 | if (check_for_io_on_path(sch, mask)) { |
247 | (sch->schib.scsw.actl & SCSW_ACTL_SCHACT) && | 169 | if (device_is_online(sch)) |
248 | (sch->schib.pmcw.lpum == mask)) { | 170 | device_kill_io(sch); |
249 | int cc; | 171 | else { |
250 | 172 | terminate_internal_io(sch); | |
251 | cc = cio_clear(sch); | 173 | /* Re-start path verification. */ |
252 | if (cc == -ENODEV) | 174 | if (sch->driver && sch->driver->verify) |
175 | sch->driver->verify(&sch->dev); | ||
176 | } | ||
177 | } else { | ||
178 | /* trigger path verification. */ | ||
179 | if (sch->driver && sch->driver->verify) | ||
180 | sch->driver->verify(&sch->dev); | ||
181 | else if (sch->lpm == mask) | ||
253 | goto out_unreg; | 182 | goto out_unreg; |
254 | /* Request retry of internal operation. */ | ||
255 | device_set_intretry(sch); | ||
256 | /* Call handler. */ | ||
257 | if (sch->driver && sch->driver->termination) | ||
258 | sch->driver->termination(&sch->dev); | ||
259 | goto out_unlock; | ||
260 | } | 183 | } |
261 | 184 | ||
262 | /* trigger path verification. */ | ||
263 | if (sch->driver && sch->driver->verify) | ||
264 | sch->driver->verify(&sch->dev); | ||
265 | else if (sch->lpm == mask) | ||
266 | goto out_unreg; | ||
267 | out_unlock: | ||
268 | spin_unlock_irq(sch->lock); | 185 | spin_unlock_irq(sch->lock); |
269 | return 0; | 186 | return 0; |
187 | |||
270 | out_unreg: | 188 | out_unreg: |
271 | spin_unlock_irq(sch->lock); | ||
272 | sch->lpm = 0; | 189 | sch->lpm = 0; |
273 | if (css_enqueue_subchannel_slow(sch->schid)) { | 190 | spin_unlock_irq(sch->lock); |
274 | css_clear_subchannel_slow_list(); | 191 | css_schedule_eval(sch->schid); |
275 | need_rescan = 1; | ||
276 | } | ||
277 | return 0; | 192 | return 0; |
278 | } | 193 | } |
279 | 194 | ||
280 | static void | 195 | void chsc_chp_offline(struct chp_id chpid) |
281 | s390_set_chpid_offline( __u8 chpid) | ||
282 | { | 196 | { |
283 | char dbf_txt[15]; | 197 | char dbf_txt[15]; |
284 | struct device *dev; | ||
285 | 198 | ||
286 | sprintf(dbf_txt, "chpr%x", chpid); | 199 | sprintf(dbf_txt, "chpr%x.%02x", chpid.cssid, chpid.id); |
287 | CIO_TRACE_EVENT(2, dbf_txt); | 200 | CIO_TRACE_EVENT(2, dbf_txt); |
288 | 201 | ||
289 | if (get_chp_status(chpid) <= 0) | 202 | if (chp_get_status(chpid) <= 0) |
290 | return; | 203 | return; |
291 | dev = get_device(&css[0]->chps[chpid]->dev); | 204 | bus_for_each_dev(&css_bus_type, NULL, &chpid, |
292 | bus_for_each_dev(&css_bus_type, NULL, to_channelpath(dev), | ||
293 | s390_subchannel_remove_chpid); | 205 | s390_subchannel_remove_chpid); |
294 | |||
295 | if (need_rescan || css_slow_subchannels_exist()) | ||
296 | queue_work(slow_path_wq, &slow_path_work); | ||
297 | put_device(dev); | ||
298 | } | ||
299 | |||
300 | struct res_acc_data { | ||
301 | struct channel_path *chp; | ||
302 | u32 fla_mask; | ||
303 | u16 fla; | ||
304 | }; | ||
305 | |||
306 | static int | ||
307 | s390_process_res_acc_sch(struct res_acc_data *res_data, struct subchannel *sch) | ||
308 | { | ||
309 | int found; | ||
310 | int chp; | ||
311 | int ccode; | ||
312 | |||
313 | found = 0; | ||
314 | for (chp = 0; chp <= 7; chp++) | ||
315 | /* | ||
316 | * check if chpid is in information updated by ssd | ||
317 | */ | ||
318 | if (sch->ssd_info.valid && | ||
319 | sch->ssd_info.chpid[chp] == res_data->chp->id && | ||
320 | (sch->ssd_info.fla[chp] & res_data->fla_mask) | ||
321 | == res_data->fla) { | ||
322 | found = 1; | ||
323 | break; | ||
324 | } | ||
325 | |||
326 | if (found == 0) | ||
327 | return 0; | ||
328 | |||
329 | /* | ||
330 | * Do a stsch to update our subchannel structure with the | ||
331 | * new path information and eventually check for logically | ||
332 | * offline chpids. | ||
333 | */ | ||
334 | ccode = stsch(sch->schid, &sch->schib); | ||
335 | if (ccode > 0) | ||
336 | return 0; | ||
337 | |||
338 | return 0x80 >> chp; | ||
339 | } | 206 | } |
340 | 207 | ||
341 | static int | 208 | static int |
342 | s390_process_res_acc_new_sch(struct subchannel_id schid) | 209 | s390_process_res_acc_new_sch(struct subchannel_id schid) |
343 | { | 210 | { |
344 | struct schib schib; | 211 | struct schib schib; |
345 | int ret; | ||
346 | /* | 212 | /* |
347 | * We don't know the device yet, but since a path | 213 | * We don't know the device yet, but since a path |
348 | * may be available now to the device we'll have | 214 | * may be available now to the device we'll have |
@@ -353,14 +219,35 @@ s390_process_res_acc_new_sch(struct subchannel_id schid) | |||
353 | */ | 219 | */ |
354 | if (stsch_err(schid, &schib)) | 220 | if (stsch_err(schid, &schib)) |
355 | /* We're through */ | 221 | /* We're through */ |
356 | return need_rescan ? -EAGAIN : -ENXIO; | 222 | return -ENXIO; |
357 | 223 | ||
358 | /* Put it on the slow path. */ | 224 | /* Put it on the slow path. */ |
359 | ret = css_enqueue_subchannel_slow(schid); | 225 | css_schedule_eval(schid); |
360 | if (ret) { | 226 | return 0; |
361 | css_clear_subchannel_slow_list(); | 227 | } |
362 | need_rescan = 1; | 228 | |
363 | return -EAGAIN; | 229 | struct res_acc_data { |
230 | struct chp_id chpid; | ||
231 | u32 fla_mask; | ||
232 | u16 fla; | ||
233 | }; | ||
234 | |||
235 | static int get_res_chpid_mask(struct chsc_ssd_info *ssd, | ||
236 | struct res_acc_data *data) | ||
237 | { | ||
238 | int i; | ||
239 | int mask; | ||
240 | |||
241 | for (i = 0; i < 8; i++) { | ||
242 | mask = 0x80 >> i; | ||
243 | if (!(ssd->path_mask & mask)) | ||
244 | continue; | ||
245 | if (!chp_id_is_equal(&ssd->chpid[i], &data->chpid)) | ||
246 | continue; | ||
247 | if ((ssd->fla_valid_mask & mask) && | ||
248 | ((ssd->fla[i] & data->fla_mask) != data->fla)) | ||
249 | continue; | ||
250 | return mask; | ||
364 | } | 251 | } |
365 | return 0; | 252 | return 0; |
366 | } | 253 | } |
@@ -379,14 +266,11 @@ __s390_process_res_acc(struct subchannel_id schid, void *data) | |||
379 | return s390_process_res_acc_new_sch(schid); | 266 | return s390_process_res_acc_new_sch(schid); |
380 | 267 | ||
381 | spin_lock_irq(sch->lock); | 268 | spin_lock_irq(sch->lock); |
382 | 269 | chp_mask = get_res_chpid_mask(&sch->ssd_info, res_data); | |
383 | chp_mask = s390_process_res_acc_sch(res_data, sch); | 270 | if (chp_mask == 0) |
384 | 271 | goto out; | |
385 | if (chp_mask == 0) { | 272 | if (stsch(sch->schid, &sch->schib)) |
386 | spin_unlock_irq(sch->lock); | 273 | goto out; |
387 | put_device(&sch->dev); | ||
388 | return 0; | ||
389 | } | ||
390 | old_lpm = sch->lpm; | 274 | old_lpm = sch->lpm; |
391 | sch->lpm = ((sch->schib.pmcw.pim & | 275 | sch->lpm = ((sch->schib.pmcw.pim & |
392 | sch->schib.pmcw.pam & | 276 | sch->schib.pmcw.pam & |
@@ -396,20 +280,18 @@ __s390_process_res_acc(struct subchannel_id schid, void *data) | |||
396 | device_trigger_reprobe(sch); | 280 | device_trigger_reprobe(sch); |
397 | else if (sch->driver && sch->driver->verify) | 281 | else if (sch->driver && sch->driver->verify) |
398 | sch->driver->verify(&sch->dev); | 282 | sch->driver->verify(&sch->dev); |
399 | 283 | out: | |
400 | spin_unlock_irq(sch->lock); | 284 | spin_unlock_irq(sch->lock); |
401 | put_device(&sch->dev); | 285 | put_device(&sch->dev); |
402 | return 0; | 286 | return 0; |
403 | } | 287 | } |
404 | 288 | ||
405 | 289 | static void s390_process_res_acc (struct res_acc_data *res_data) | |
406 | static int | ||
407 | s390_process_res_acc (struct res_acc_data *res_data) | ||
408 | { | 290 | { |
409 | int rc; | ||
410 | char dbf_txt[15]; | 291 | char dbf_txt[15]; |
411 | 292 | ||
412 | sprintf(dbf_txt, "accpr%x", res_data->chp->id); | 293 | sprintf(dbf_txt, "accpr%x.%02x", res_data->chpid.cssid, |
294 | res_data->chpid.id); | ||
413 | CIO_TRACE_EVENT( 2, dbf_txt); | 295 | CIO_TRACE_EVENT( 2, dbf_txt); |
414 | if (res_data->fla != 0) { | 296 | if (res_data->fla != 0) { |
415 | sprintf(dbf_txt, "fla%x", res_data->fla); | 297 | sprintf(dbf_txt, "fla%x", res_data->fla); |
@@ -423,12 +305,7 @@ s390_process_res_acc (struct res_acc_data *res_data) | |||
423 | * The more information we have (info), the less scanning | 305 | * The more information we have (info), the less scanning |
424 | * will we have to do. | 306 | * will we have to do. |
425 | */ | 307 | */ |
426 | rc = for_each_subchannel(__s390_process_res_acc, res_data); | 308 | for_each_subchannel(__s390_process_res_acc, res_data); |
427 | if (css_slow_subchannels_exist()) | ||
428 | rc = -EAGAIN; | ||
429 | else if (rc != -EAGAIN) | ||
430 | rc = 0; | ||
431 | return rc; | ||
432 | } | 309 | } |
433 | 310 | ||
434 | static int | 311 | static int |
@@ -480,43 +357,45 @@ struct chsc_sei_area { | |||
480 | /* ccdf has to be big enough for a link-incident record */ | 357 | /* ccdf has to be big enough for a link-incident record */ |
481 | } __attribute__ ((packed)); | 358 | } __attribute__ ((packed)); |
482 | 359 | ||
483 | static int chsc_process_sei_link_incident(struct chsc_sei_area *sei_area) | 360 | static void chsc_process_sei_link_incident(struct chsc_sei_area *sei_area) |
484 | { | 361 | { |
485 | int chpid; | 362 | struct chp_id chpid; |
363 | int id; | ||
486 | 364 | ||
487 | CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x)\n", | 365 | CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x)\n", |
488 | sei_area->rs, sei_area->rsid); | 366 | sei_area->rs, sei_area->rsid); |
489 | if (sei_area->rs != 4) | 367 | if (sei_area->rs != 4) |
490 | return 0; | 368 | return; |
491 | chpid = __get_chpid_from_lir(sei_area->ccdf); | 369 | id = __get_chpid_from_lir(sei_area->ccdf); |
492 | if (chpid < 0) | 370 | if (id < 0) |
493 | CIO_CRW_EVENT(4, "chsc: link incident - invalid LIR\n"); | 371 | CIO_CRW_EVENT(4, "chsc: link incident - invalid LIR\n"); |
494 | else | 372 | else { |
495 | s390_set_chpid_offline(chpid); | 373 | chp_id_init(&chpid); |
496 | 374 | chpid.id = id; | |
497 | return 0; | 375 | chsc_chp_offline(chpid); |
376 | } | ||
498 | } | 377 | } |
499 | 378 | ||
500 | static int chsc_process_sei_res_acc(struct chsc_sei_area *sei_area) | 379 | static void chsc_process_sei_res_acc(struct chsc_sei_area *sei_area) |
501 | { | 380 | { |
502 | struct res_acc_data res_data; | 381 | struct res_acc_data res_data; |
503 | struct device *dev; | 382 | struct chp_id chpid; |
504 | int status; | 383 | int status; |
505 | int rc; | ||
506 | 384 | ||
507 | CIO_CRW_EVENT(4, "chsc: resource accessibility event (rs=%02x, " | 385 | CIO_CRW_EVENT(4, "chsc: resource accessibility event (rs=%02x, " |
508 | "rs_id=%04x)\n", sei_area->rs, sei_area->rsid); | 386 | "rs_id=%04x)\n", sei_area->rs, sei_area->rsid); |
509 | if (sei_area->rs != 4) | 387 | if (sei_area->rs != 4) |
510 | return 0; | 388 | return; |
389 | chp_id_init(&chpid); | ||
390 | chpid.id = sei_area->rsid; | ||
511 | /* allocate a new channel path structure, if needed */ | 391 | /* allocate a new channel path structure, if needed */ |
512 | status = get_chp_status(sei_area->rsid); | 392 | status = chp_get_status(chpid); |
513 | if (status < 0) | 393 | if (status < 0) |
514 | new_channel_path(sei_area->rsid); | 394 | chp_new(chpid); |
515 | else if (!status) | 395 | else if (!status) |
516 | return 0; | 396 | return; |
517 | dev = get_device(&css[0]->chps[sei_area->rsid]->dev); | ||
518 | memset(&res_data, 0, sizeof(struct res_acc_data)); | 397 | memset(&res_data, 0, sizeof(struct res_acc_data)); |
519 | res_data.chp = to_channelpath(dev); | 398 | res_data.chpid = chpid; |
520 | if ((sei_area->vf & 0xc0) != 0) { | 399 | if ((sei_area->vf & 0xc0) != 0) { |
521 | res_data.fla = sei_area->fla; | 400 | res_data.fla = sei_area->fla; |
522 | if ((sei_area->vf & 0xc0) == 0xc0) | 401 | if ((sei_area->vf & 0xc0) == 0xc0) |
@@ -526,51 +405,82 @@ static int chsc_process_sei_res_acc(struct chsc_sei_area *sei_area) | |||
526 | /* link address */ | 405 | /* link address */ |
527 | res_data.fla_mask = 0xff00; | 406 | res_data.fla_mask = 0xff00; |
528 | } | 407 | } |
529 | rc = s390_process_res_acc(&res_data); | 408 | s390_process_res_acc(&res_data); |
530 | put_device(dev); | ||
531 | |||
532 | return rc; | ||
533 | } | 409 | } |
534 | 410 | ||
535 | static int chsc_process_sei(struct chsc_sei_area *sei_area) | 411 | struct chp_config_data { |
412 | u8 map[32]; | ||
413 | u8 op; | ||
414 | u8 pc; | ||
415 | }; | ||
416 | |||
417 | static void chsc_process_sei_chp_config(struct chsc_sei_area *sei_area) | ||
536 | { | 418 | { |
537 | int rc; | 419 | struct chp_config_data *data; |
420 | struct chp_id chpid; | ||
421 | int num; | ||
422 | |||
423 | CIO_CRW_EVENT(4, "chsc: channel-path-configuration notification\n"); | ||
424 | if (sei_area->rs != 0) | ||
425 | return; | ||
426 | data = (struct chp_config_data *) &(sei_area->ccdf); | ||
427 | chp_id_init(&chpid); | ||
428 | for (num = 0; num <= __MAX_CHPID; num++) { | ||
429 | if (!chp_test_bit(data->map, num)) | ||
430 | continue; | ||
431 | chpid.id = num; | ||
432 | printk(KERN_WARNING "cio: processing configure event %d for " | ||
433 | "chpid %x.%02x\n", data->op, chpid.cssid, chpid.id); | ||
434 | switch (data->op) { | ||
435 | case 0: | ||
436 | chp_cfg_schedule(chpid, 1); | ||
437 | break; | ||
438 | case 1: | ||
439 | chp_cfg_schedule(chpid, 0); | ||
440 | break; | ||
441 | case 2: | ||
442 | chp_cfg_cancel_deconfigure(chpid); | ||
443 | break; | ||
444 | } | ||
445 | } | ||
446 | } | ||
538 | 447 | ||
448 | static void chsc_process_sei(struct chsc_sei_area *sei_area) | ||
449 | { | ||
539 | /* Check if we might have lost some information. */ | 450 | /* Check if we might have lost some information. */ |
540 | if (sei_area->flags & 0x40) | 451 | if (sei_area->flags & 0x40) { |
541 | CIO_CRW_EVENT(2, "chsc: event overflow\n"); | 452 | CIO_CRW_EVENT(2, "chsc: event overflow\n"); |
453 | css_schedule_eval_all(); | ||
454 | } | ||
542 | /* which kind of information was stored? */ | 455 | /* which kind of information was stored? */ |
543 | rc = 0; | ||
544 | switch (sei_area->cc) { | 456 | switch (sei_area->cc) { |
545 | case 1: /* link incident*/ | 457 | case 1: /* link incident*/ |
546 | rc = chsc_process_sei_link_incident(sei_area); | 458 | chsc_process_sei_link_incident(sei_area); |
547 | break; | 459 | break; |
548 | case 2: /* i/o resource accessibiliy */ | 460 | case 2: /* i/o resource accessibiliy */ |
549 | rc = chsc_process_sei_res_acc(sei_area); | 461 | chsc_process_sei_res_acc(sei_area); |
462 | break; | ||
463 | case 8: /* channel-path-configuration notification */ | ||
464 | chsc_process_sei_chp_config(sei_area); | ||
550 | break; | 465 | break; |
551 | default: /* other stuff */ | 466 | default: /* other stuff */ |
552 | CIO_CRW_EVENT(4, "chsc: unhandled sei content code %d\n", | 467 | CIO_CRW_EVENT(4, "chsc: unhandled sei content code %d\n", |
553 | sei_area->cc); | 468 | sei_area->cc); |
554 | break; | 469 | break; |
555 | } | 470 | } |
556 | |||
557 | return rc; | ||
558 | } | 471 | } |
559 | 472 | ||
560 | int chsc_process_crw(void) | 473 | void chsc_process_crw(void) |
561 | { | 474 | { |
562 | struct chsc_sei_area *sei_area; | 475 | struct chsc_sei_area *sei_area; |
563 | int ret; | ||
564 | int rc; | ||
565 | 476 | ||
566 | if (!sei_page) | 477 | if (!sei_page) |
567 | return 0; | 478 | return; |
568 | /* Access to sei_page is serialized through machine check handler | 479 | /* Access to sei_page is serialized through machine check handler |
569 | * thread, so no need for locking. */ | 480 | * thread, so no need for locking. */ |
570 | sei_area = sei_page; | 481 | sei_area = sei_page; |
571 | 482 | ||
572 | CIO_TRACE_EVENT( 2, "prcss"); | 483 | CIO_TRACE_EVENT( 2, "prcss"); |
573 | ret = 0; | ||
574 | do { | 484 | do { |
575 | memset(sei_area, 0, sizeof(*sei_area)); | 485 | memset(sei_area, 0, sizeof(*sei_area)); |
576 | sei_area->request.length = 0x0010; | 486 | sei_area->request.length = 0x0010; |
@@ -580,37 +490,26 @@ int chsc_process_crw(void) | |||
580 | 490 | ||
581 | if (sei_area->response.code == 0x0001) { | 491 | if (sei_area->response.code == 0x0001) { |
582 | CIO_CRW_EVENT(4, "chsc: sei successful\n"); | 492 | CIO_CRW_EVENT(4, "chsc: sei successful\n"); |
583 | rc = chsc_process_sei(sei_area); | 493 | chsc_process_sei(sei_area); |
584 | if (rc) | ||
585 | ret = rc; | ||
586 | } else { | 494 | } else { |
587 | CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n", | 495 | CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n", |
588 | sei_area->response.code); | 496 | sei_area->response.code); |
589 | ret = 0; | ||
590 | break; | 497 | break; |
591 | } | 498 | } |
592 | } while (sei_area->flags & 0x80); | 499 | } while (sei_area->flags & 0x80); |
593 | |||
594 | return ret; | ||
595 | } | 500 | } |
596 | 501 | ||
597 | static int | 502 | static int |
598 | __chp_add_new_sch(struct subchannel_id schid) | 503 | __chp_add_new_sch(struct subchannel_id schid) |
599 | { | 504 | { |
600 | struct schib schib; | 505 | struct schib schib; |
601 | int ret; | ||
602 | 506 | ||
603 | if (stsch_err(schid, &schib)) | 507 | if (stsch_err(schid, &schib)) |
604 | /* We're through */ | 508 | /* We're through */ |
605 | return need_rescan ? -EAGAIN : -ENXIO; | 509 | return -ENXIO; |
606 | 510 | ||
607 | /* Put it on the slow path. */ | 511 | /* Put it on the slow path. */ |
608 | ret = css_enqueue_subchannel_slow(schid); | 512 | css_schedule_eval(schid); |
609 | if (ret) { | ||
610 | css_clear_subchannel_slow_list(); | ||
611 | need_rescan = 1; | ||
612 | return -EAGAIN; | ||
613 | } | ||
614 | return 0; | 513 | return 0; |
615 | } | 514 | } |
616 | 515 | ||
@@ -619,10 +518,10 @@ static int | |||
619 | __chp_add(struct subchannel_id schid, void *data) | 518 | __chp_add(struct subchannel_id schid, void *data) |
620 | { | 519 | { |
621 | int i, mask; | 520 | int i, mask; |
622 | struct channel_path *chp; | 521 | struct chp_id *chpid; |
623 | struct subchannel *sch; | 522 | struct subchannel *sch; |
624 | 523 | ||
625 | chp = data; | 524 | chpid = data; |
626 | sch = get_subchannel_by_schid(schid); | 525 | sch = get_subchannel_by_schid(schid); |
627 | if (!sch) | 526 | if (!sch) |
628 | /* Check if the subchannel is now available. */ | 527 | /* Check if the subchannel is now available. */ |
@@ -631,7 +530,7 @@ __chp_add(struct subchannel_id schid, void *data) | |||
631 | for (i=0; i<8; i++) { | 530 | for (i=0; i<8; i++) { |
632 | mask = 0x80 >> i; | 531 | mask = 0x80 >> i; |
633 | if ((sch->schib.pmcw.pim & mask) && | 532 | if ((sch->schib.pmcw.pim & mask) && |
634 | (sch->schib.pmcw.chpid[i] == chp->id)) { | 533 | (sch->schib.pmcw.chpid[i] == chpid->id)) { |
635 | if (stsch(sch->schid, &sch->schib) != 0) { | 534 | if (stsch(sch->schid, &sch->schib) != 0) { |
636 | /* Endgame. */ | 535 | /* Endgame. */ |
637 | spin_unlock_irq(sch->lock); | 536 | spin_unlock_irq(sch->lock); |
@@ -657,122 +556,58 @@ __chp_add(struct subchannel_id schid, void *data) | |||
657 | return 0; | 556 | return 0; |
658 | } | 557 | } |
659 | 558 | ||
660 | static int | 559 | void chsc_chp_online(struct chp_id chpid) |
661 | chp_add(int chpid) | ||
662 | { | 560 | { |
663 | int rc; | ||
664 | char dbf_txt[15]; | 561 | char dbf_txt[15]; |
665 | struct device *dev; | ||
666 | 562 | ||
667 | if (!get_chp_status(chpid)) | 563 | sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id); |
668 | return 0; /* no need to do the rest */ | ||
669 | |||
670 | sprintf(dbf_txt, "cadd%x", chpid); | ||
671 | CIO_TRACE_EVENT(2, dbf_txt); | 564 | CIO_TRACE_EVENT(2, dbf_txt); |
672 | 565 | ||
673 | dev = get_device(&css[0]->chps[chpid]->dev); | 566 | if (chp_get_status(chpid) != 0) |
674 | rc = for_each_subchannel(__chp_add, to_channelpath(dev)); | 567 | for_each_subchannel(__chp_add, &chpid); |
675 | if (css_slow_subchannels_exist()) | ||
676 | rc = -EAGAIN; | ||
677 | if (rc != -EAGAIN) | ||
678 | rc = 0; | ||
679 | put_device(dev); | ||
680 | return rc; | ||
681 | } | 568 | } |
682 | 569 | ||
683 | /* | 570 | static void __s390_subchannel_vary_chpid(struct subchannel *sch, |
684 | * Handling of crw machine checks with channel path source. | 571 | struct chp_id chpid, int on) |
685 | */ | ||
686 | int | ||
687 | chp_process_crw(int chpid, int on) | ||
688 | { | ||
689 | if (on == 0) { | ||
690 | /* Path has gone. We use the link incident routine.*/ | ||
691 | s390_set_chpid_offline(chpid); | ||
692 | return 0; /* De-register is async anyway. */ | ||
693 | } | ||
694 | /* | ||
695 | * Path has come. Allocate a new channel path structure, | ||
696 | * if needed. | ||
697 | */ | ||
698 | if (get_chp_status(chpid) < 0) | ||
699 | new_channel_path(chpid); | ||
700 | /* Avoid the extra overhead in process_rec_acc. */ | ||
701 | return chp_add(chpid); | ||
702 | } | ||
703 | |||
704 | static int check_for_io_on_path(struct subchannel *sch, int index) | ||
705 | { | ||
706 | int cc; | ||
707 | |||
708 | cc = stsch(sch->schid, &sch->schib); | ||
709 | if (cc) | ||
710 | return 0; | ||
711 | if (sch->schib.scsw.actl && sch->schib.pmcw.lpum == (0x80 >> index)) | ||
712 | return 1; | ||
713 | return 0; | ||
714 | } | ||
715 | |||
716 | static void terminate_internal_io(struct subchannel *sch) | ||
717 | { | ||
718 | if (cio_clear(sch)) { | ||
719 | /* Recheck device in case clear failed. */ | ||
720 | sch->lpm = 0; | ||
721 | if (device_trigger_verify(sch) != 0) { | ||
722 | if(css_enqueue_subchannel_slow(sch->schid)) { | ||
723 | css_clear_subchannel_slow_list(); | ||
724 | need_rescan = 1; | ||
725 | } | ||
726 | } | ||
727 | return; | ||
728 | } | ||
729 | /* Request retry of internal operation. */ | ||
730 | device_set_intretry(sch); | ||
731 | /* Call handler. */ | ||
732 | if (sch->driver && sch->driver->termination) | ||
733 | sch->driver->termination(&sch->dev); | ||
734 | } | ||
735 | |||
736 | static void | ||
737 | __s390_subchannel_vary_chpid(struct subchannel *sch, __u8 chpid, int on) | ||
738 | { | 572 | { |
739 | int chp, old_lpm; | 573 | int chp, old_lpm; |
574 | int mask; | ||
740 | unsigned long flags; | 575 | unsigned long flags; |
741 | 576 | ||
742 | if (!sch->ssd_info.valid) | ||
743 | return; | ||
744 | |||
745 | spin_lock_irqsave(sch->lock, flags); | 577 | spin_lock_irqsave(sch->lock, flags); |
746 | old_lpm = sch->lpm; | 578 | old_lpm = sch->lpm; |
747 | for (chp = 0; chp < 8; chp++) { | 579 | for (chp = 0; chp < 8; chp++) { |
748 | if (sch->ssd_info.chpid[chp] != chpid) | 580 | mask = 0x80 >> chp; |
581 | if (!(sch->ssd_info.path_mask & mask)) | ||
582 | continue; | ||
583 | if (!chp_id_is_equal(&sch->ssd_info.chpid[chp], &chpid)) | ||
749 | continue; | 584 | continue; |
750 | 585 | ||
751 | if (on) { | 586 | if (on) { |
752 | sch->opm |= (0x80 >> chp); | 587 | sch->opm |= mask; |
753 | sch->lpm |= (0x80 >> chp); | 588 | sch->lpm |= mask; |
754 | if (!old_lpm) | 589 | if (!old_lpm) |
755 | device_trigger_reprobe(sch); | 590 | device_trigger_reprobe(sch); |
756 | else if (sch->driver && sch->driver->verify) | 591 | else if (sch->driver && sch->driver->verify) |
757 | sch->driver->verify(&sch->dev); | 592 | sch->driver->verify(&sch->dev); |
758 | break; | 593 | break; |
759 | } | 594 | } |
760 | sch->opm &= ~(0x80 >> chp); | 595 | sch->opm &= ~mask; |
761 | sch->lpm &= ~(0x80 >> chp); | 596 | sch->lpm &= ~mask; |
762 | if (check_for_io_on_path(sch, chp)) { | 597 | if (check_for_io_on_path(sch, mask)) { |
763 | if (device_is_online(sch)) | 598 | if (device_is_online(sch)) |
764 | /* Path verification is done after killing. */ | 599 | /* Path verification is done after killing. */ |
765 | device_kill_io(sch); | 600 | device_kill_io(sch); |
766 | else | 601 | else { |
767 | /* Kill and retry internal I/O. */ | 602 | /* Kill and retry internal I/O. */ |
768 | terminate_internal_io(sch); | 603 | terminate_internal_io(sch); |
769 | } else if (!sch->lpm) { | 604 | /* Re-start path verification. */ |
770 | if (device_trigger_verify(sch) != 0) { | 605 | if (sch->driver && sch->driver->verify) |
771 | if (css_enqueue_subchannel_slow(sch->schid)) { | 606 | sch->driver->verify(&sch->dev); |
772 | css_clear_subchannel_slow_list(); | ||
773 | need_rescan = 1; | ||
774 | } | ||
775 | } | 607 | } |
608 | } else if (!sch->lpm) { | ||
609 | if (device_trigger_verify(sch) != 0) | ||
610 | css_schedule_eval(sch->schid); | ||
776 | } else if (sch->driver && sch->driver->verify) | 611 | } else if (sch->driver && sch->driver->verify) |
777 | sch->driver->verify(&sch->dev); | 612 | sch->driver->verify(&sch->dev); |
778 | break; | 613 | break; |
@@ -780,11 +615,10 @@ __s390_subchannel_vary_chpid(struct subchannel *sch, __u8 chpid, int on) | |||
780 | spin_unlock_irqrestore(sch->lock, flags); | 615 | spin_unlock_irqrestore(sch->lock, flags); |
781 | } | 616 | } |
782 | 617 | ||
783 | static int | 618 | static int s390_subchannel_vary_chpid_off(struct device *dev, void *data) |
784 | s390_subchannel_vary_chpid_off(struct device *dev, void *data) | ||
785 | { | 619 | { |
786 | struct subchannel *sch; | 620 | struct subchannel *sch; |
787 | __u8 *chpid; | 621 | struct chp_id *chpid; |
788 | 622 | ||
789 | sch = to_subchannel(dev); | 623 | sch = to_subchannel(dev); |
790 | chpid = data; | 624 | chpid = data; |
@@ -793,11 +627,10 @@ s390_subchannel_vary_chpid_off(struct device *dev, void *data) | |||
793 | return 0; | 627 | return 0; |
794 | } | 628 | } |
795 | 629 | ||
796 | static int | 630 | static int s390_subchannel_vary_chpid_on(struct device *dev, void *data) |
797 | s390_subchannel_vary_chpid_on(struct device *dev, void *data) | ||
798 | { | 631 | { |
799 | struct subchannel *sch; | 632 | struct subchannel *sch; |
800 | __u8 *chpid; | 633 | struct chp_id *chpid; |
801 | 634 | ||
802 | sch = to_subchannel(dev); | 635 | sch = to_subchannel(dev); |
803 | chpid = data; | 636 | chpid = data; |
@@ -821,40 +654,17 @@ __s390_vary_chpid_on(struct subchannel_id schid, void *data) | |||
821 | /* We're through */ | 654 | /* We're through */ |
822 | return -ENXIO; | 655 | return -ENXIO; |
823 | /* Put it on the slow path. */ | 656 | /* Put it on the slow path. */ |
824 | if (css_enqueue_subchannel_slow(schid)) { | 657 | css_schedule_eval(schid); |
825 | css_clear_subchannel_slow_list(); | ||
826 | need_rescan = 1; | ||
827 | return -EAGAIN; | ||
828 | } | ||
829 | return 0; | 658 | return 0; |
830 | } | 659 | } |
831 | 660 | ||
832 | /* | 661 | /** |
833 | * Function: s390_vary_chpid | 662 | * chsc_chp_vary - propagate channel-path vary operation to subchannels |
834 | * Varies the specified chpid online or offline | 663 | * @chpid: channl-path ID |
664 | * @on: non-zero for vary online, zero for vary offline | ||
835 | */ | 665 | */ |
836 | static int | 666 | int chsc_chp_vary(struct chp_id chpid, int on) |
837 | s390_vary_chpid( __u8 chpid, int on) | ||
838 | { | 667 | { |
839 | char dbf_text[15]; | ||
840 | int status; | ||
841 | |||
842 | sprintf(dbf_text, on?"varyon%x":"varyoff%x", chpid); | ||
843 | CIO_TRACE_EVENT( 2, dbf_text); | ||
844 | |||
845 | status = get_chp_status(chpid); | ||
846 | if (status < 0) { | ||
847 | printk(KERN_ERR "Can't vary unknown chpid %02X\n", chpid); | ||
848 | return -EINVAL; | ||
849 | } | ||
850 | |||
851 | if (!on && !status) { | ||
852 | printk(KERN_ERR "chpid %x is already offline\n", chpid); | ||
853 | return -EINVAL; | ||
854 | } | ||
855 | |||
856 | set_chp_logically_online(chpid, on); | ||
857 | |||
858 | /* | 668 | /* |
859 | * Redo PathVerification on the devices the chpid connects to | 669 | * Redo PathVerification on the devices the chpid connects to |
860 | */ | 670 | */ |
@@ -865,118 +675,9 @@ s390_vary_chpid( __u8 chpid, int on) | |||
865 | if (on) | 675 | if (on) |
866 | /* Scan for new devices on varied on path. */ | 676 | /* Scan for new devices on varied on path. */ |
867 | for_each_subchannel(__s390_vary_chpid_on, NULL); | 677 | for_each_subchannel(__s390_vary_chpid_on, NULL); |
868 | if (need_rescan || css_slow_subchannels_exist()) | ||
869 | queue_work(slow_path_wq, &slow_path_work); | ||
870 | return 0; | 678 | return 0; |
871 | } | 679 | } |
872 | 680 | ||
873 | /* | ||
874 | * Channel measurement related functions | ||
875 | */ | ||
876 | static ssize_t | ||
877 | chp_measurement_chars_read(struct kobject *kobj, char *buf, loff_t off, | ||
878 | size_t count) | ||
879 | { | ||
880 | struct channel_path *chp; | ||
881 | unsigned int size; | ||
882 | |||
883 | chp = to_channelpath(container_of(kobj, struct device, kobj)); | ||
884 | if (!chp->cmg_chars) | ||
885 | return 0; | ||
886 | |||
887 | size = sizeof(struct cmg_chars); | ||
888 | |||
889 | if (off > size) | ||
890 | return 0; | ||
891 | if (off + count > size) | ||
892 | count = size - off; | ||
893 | memcpy(buf, chp->cmg_chars + off, count); | ||
894 | return count; | ||
895 | } | ||
896 | |||
897 | static struct bin_attribute chp_measurement_chars_attr = { | ||
898 | .attr = { | ||
899 | .name = "measurement_chars", | ||
900 | .mode = S_IRUSR, | ||
901 | .owner = THIS_MODULE, | ||
902 | }, | ||
903 | .size = sizeof(struct cmg_chars), | ||
904 | .read = chp_measurement_chars_read, | ||
905 | }; | ||
906 | |||
907 | static void | ||
908 | chp_measurement_copy_block(struct cmg_entry *buf, | ||
909 | struct channel_subsystem *css, int chpid) | ||
910 | { | ||
911 | void *area; | ||
912 | struct cmg_entry *entry, reference_buf; | ||
913 | int idx; | ||
914 | |||
915 | if (chpid < 128) { | ||
916 | area = css->cub_addr1; | ||
917 | idx = chpid; | ||
918 | } else { | ||
919 | area = css->cub_addr2; | ||
920 | idx = chpid - 128; | ||
921 | } | ||
922 | entry = area + (idx * sizeof(struct cmg_entry)); | ||
923 | do { | ||
924 | memcpy(buf, entry, sizeof(*entry)); | ||
925 | memcpy(&reference_buf, entry, sizeof(*entry)); | ||
926 | } while (reference_buf.values[0] != buf->values[0]); | ||
927 | } | ||
928 | |||
929 | static ssize_t | ||
930 | chp_measurement_read(struct kobject *kobj, char *buf, loff_t off, size_t count) | ||
931 | { | ||
932 | struct channel_path *chp; | ||
933 | struct channel_subsystem *css; | ||
934 | unsigned int size; | ||
935 | |||
936 | chp = to_channelpath(container_of(kobj, struct device, kobj)); | ||
937 | css = to_css(chp->dev.parent); | ||
938 | |||
939 | size = sizeof(struct cmg_entry); | ||
940 | |||
941 | /* Only allow single reads. */ | ||
942 | if (off || count < size) | ||
943 | return 0; | ||
944 | chp_measurement_copy_block((struct cmg_entry *)buf, css, chp->id); | ||
945 | count = size; | ||
946 | return count; | ||
947 | } | ||
948 | |||
949 | static struct bin_attribute chp_measurement_attr = { | ||
950 | .attr = { | ||
951 | .name = "measurement", | ||
952 | .mode = S_IRUSR, | ||
953 | .owner = THIS_MODULE, | ||
954 | }, | ||
955 | .size = sizeof(struct cmg_entry), | ||
956 | .read = chp_measurement_read, | ||
957 | }; | ||
958 | |||
959 | static void | ||
960 | chsc_remove_chp_cmg_attr(struct channel_path *chp) | ||
961 | { | ||
962 | device_remove_bin_file(&chp->dev, &chp_measurement_chars_attr); | ||
963 | device_remove_bin_file(&chp->dev, &chp_measurement_attr); | ||
964 | } | ||
965 | |||
966 | static int | ||
967 | chsc_add_chp_cmg_attr(struct channel_path *chp) | ||
968 | { | ||
969 | int ret; | ||
970 | |||
971 | ret = device_create_bin_file(&chp->dev, &chp_measurement_chars_attr); | ||
972 | if (ret) | ||
973 | return ret; | ||
974 | ret = device_create_bin_file(&chp->dev, &chp_measurement_attr); | ||
975 | if (ret) | ||
976 | device_remove_bin_file(&chp->dev, &chp_measurement_chars_attr); | ||
977 | return ret; | ||
978 | } | ||
979 | |||
980 | static void | 681 | static void |
981 | chsc_remove_cmg_attr(struct channel_subsystem *css) | 682 | chsc_remove_cmg_attr(struct channel_subsystem *css) |
982 | { | 683 | { |
@@ -985,7 +686,7 @@ chsc_remove_cmg_attr(struct channel_subsystem *css) | |||
985 | for (i = 0; i <= __MAX_CHPID; i++) { | 686 | for (i = 0; i <= __MAX_CHPID; i++) { |
986 | if (!css->chps[i]) | 687 | if (!css->chps[i]) |
987 | continue; | 688 | continue; |
988 | chsc_remove_chp_cmg_attr(css->chps[i]); | 689 | chp_remove_cmg_attr(css->chps[i]); |
989 | } | 690 | } |
990 | } | 691 | } |
991 | 692 | ||
@@ -998,7 +699,7 @@ chsc_add_cmg_attr(struct channel_subsystem *css) | |||
998 | for (i = 0; i <= __MAX_CHPID; i++) { | 699 | for (i = 0; i <= __MAX_CHPID; i++) { |
999 | if (!css->chps[i]) | 700 | if (!css->chps[i]) |
1000 | continue; | 701 | continue; |
1001 | ret = chsc_add_chp_cmg_attr(css->chps[i]); | 702 | ret = chp_add_cmg_attr(css->chps[i]); |
1002 | if (ret) | 703 | if (ret) |
1003 | goto cleanup; | 704 | goto cleanup; |
1004 | } | 705 | } |
@@ -1007,12 +708,11 @@ cleanup: | |||
1007 | for (--i; i >= 0; i--) { | 708 | for (--i; i >= 0; i--) { |
1008 | if (!css->chps[i]) | 709 | if (!css->chps[i]) |
1009 | continue; | 710 | continue; |
1010 | chsc_remove_chp_cmg_attr(css->chps[i]); | 711 | chp_remove_cmg_attr(css->chps[i]); |
1011 | } | 712 | } |
1012 | return ret; | 713 | return ret; |
1013 | } | 714 | } |
1014 | 715 | ||
1015 | |||
1016 | static int | 716 | static int |
1017 | __chsc_do_secm(struct channel_subsystem *css, int enable, void *page) | 717 | __chsc_do_secm(struct channel_subsystem *css, int enable, void *page) |
1018 | { | 718 | { |
@@ -1118,7 +818,7 @@ chsc_secm(struct channel_subsystem *css, int enable) | |||
1118 | } else | 818 | } else |
1119 | chsc_remove_cmg_attr(css); | 819 | chsc_remove_cmg_attr(css); |
1120 | } | 820 | } |
1121 | if (enable && !css->cm_enabled) { | 821 | if (!css->cm_enabled) { |
1122 | free_page((unsigned long)css->cub_addr1); | 822 | free_page((unsigned long)css->cub_addr1); |
1123 | free_page((unsigned long)css->cub_addr2); | 823 | free_page((unsigned long)css->cub_addr2); |
1124 | } | 824 | } |
@@ -1127,109 +827,8 @@ chsc_secm(struct channel_subsystem *css, int enable) | |||
1127 | return ret; | 827 | return ret; |
1128 | } | 828 | } |
1129 | 829 | ||
1130 | /* | 830 | int chsc_determine_channel_path_description(struct chp_id chpid, |
1131 | * Files for the channel path entries. | 831 | struct channel_path_desc *desc) |
1132 | */ | ||
1133 | static ssize_t | ||
1134 | chp_status_show(struct device *dev, struct device_attribute *attr, char *buf) | ||
1135 | { | ||
1136 | struct channel_path *chp = container_of(dev, struct channel_path, dev); | ||
1137 | |||
1138 | if (!chp) | ||
1139 | return 0; | ||
1140 | return (get_chp_status(chp->id) ? sprintf(buf, "online\n") : | ||
1141 | sprintf(buf, "offline\n")); | ||
1142 | } | ||
1143 | |||
1144 | static ssize_t | ||
1145 | chp_status_write(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) | ||
1146 | { | ||
1147 | struct channel_path *cp = container_of(dev, struct channel_path, dev); | ||
1148 | char cmd[10]; | ||
1149 | int num_args; | ||
1150 | int error; | ||
1151 | |||
1152 | num_args = sscanf(buf, "%5s", cmd); | ||
1153 | if (!num_args) | ||
1154 | return count; | ||
1155 | |||
1156 | if (!strnicmp(cmd, "on", 2)) | ||
1157 | error = s390_vary_chpid(cp->id, 1); | ||
1158 | else if (!strnicmp(cmd, "off", 3)) | ||
1159 | error = s390_vary_chpid(cp->id, 0); | ||
1160 | else | ||
1161 | error = -EINVAL; | ||
1162 | |||
1163 | return error < 0 ? error : count; | ||
1164 | |||
1165 | } | ||
1166 | |||
1167 | static DEVICE_ATTR(status, 0644, chp_status_show, chp_status_write); | ||
1168 | |||
1169 | static ssize_t | ||
1170 | chp_type_show(struct device *dev, struct device_attribute *attr, char *buf) | ||
1171 | { | ||
1172 | struct channel_path *chp = container_of(dev, struct channel_path, dev); | ||
1173 | |||
1174 | if (!chp) | ||
1175 | return 0; | ||
1176 | return sprintf(buf, "%x\n", chp->desc.desc); | ||
1177 | } | ||
1178 | |||
1179 | static DEVICE_ATTR(type, 0444, chp_type_show, NULL); | ||
1180 | |||
1181 | static ssize_t | ||
1182 | chp_cmg_show(struct device *dev, struct device_attribute *attr, char *buf) | ||
1183 | { | ||
1184 | struct channel_path *chp = to_channelpath(dev); | ||
1185 | |||
1186 | if (!chp) | ||
1187 | return 0; | ||
1188 | if (chp->cmg == -1) /* channel measurements not available */ | ||
1189 | return sprintf(buf, "unknown\n"); | ||
1190 | return sprintf(buf, "%x\n", chp->cmg); | ||
1191 | } | ||
1192 | |||
1193 | static DEVICE_ATTR(cmg, 0444, chp_cmg_show, NULL); | ||
1194 | |||
1195 | static ssize_t | ||
1196 | chp_shared_show(struct device *dev, struct device_attribute *attr, char *buf) | ||
1197 | { | ||
1198 | struct channel_path *chp = to_channelpath(dev); | ||
1199 | |||
1200 | if (!chp) | ||
1201 | return 0; | ||
1202 | if (chp->shared == -1) /* channel measurements not available */ | ||
1203 | return sprintf(buf, "unknown\n"); | ||
1204 | return sprintf(buf, "%x\n", chp->shared); | ||
1205 | } | ||
1206 | |||
1207 | static DEVICE_ATTR(shared, 0444, chp_shared_show, NULL); | ||
1208 | |||
1209 | static struct attribute * chp_attrs[] = { | ||
1210 | &dev_attr_status.attr, | ||
1211 | &dev_attr_type.attr, | ||
1212 | &dev_attr_cmg.attr, | ||
1213 | &dev_attr_shared.attr, | ||
1214 | NULL, | ||
1215 | }; | ||
1216 | |||
1217 | static struct attribute_group chp_attr_group = { | ||
1218 | .attrs = chp_attrs, | ||
1219 | }; | ||
1220 | |||
1221 | static void | ||
1222 | chp_release(struct device *dev) | ||
1223 | { | ||
1224 | struct channel_path *cp; | ||
1225 | |||
1226 | cp = container_of(dev, struct channel_path, dev); | ||
1227 | kfree(cp); | ||
1228 | } | ||
1229 | |||
1230 | static int | ||
1231 | chsc_determine_channel_path_description(int chpid, | ||
1232 | struct channel_path_desc *desc) | ||
1233 | { | 832 | { |
1234 | int ccode, ret; | 833 | int ccode, ret; |
1235 | 834 | ||
@@ -1252,8 +851,8 @@ chsc_determine_channel_path_description(int chpid, | |||
1252 | scpd_area->request.length = 0x0010; | 851 | scpd_area->request.length = 0x0010; |
1253 | scpd_area->request.code = 0x0002; | 852 | scpd_area->request.code = 0x0002; |
1254 | 853 | ||
1255 | scpd_area->first_chpid = chpid; | 854 | scpd_area->first_chpid = chpid.id; |
1256 | scpd_area->last_chpid = chpid; | 855 | scpd_area->last_chpid = chpid.id; |
1257 | 856 | ||
1258 | ccode = chsc(scpd_area); | 857 | ccode = chsc(scpd_area); |
1259 | if (ccode > 0) { | 858 | if (ccode > 0) { |
@@ -1316,8 +915,7 @@ chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv, | |||
1316 | } | 915 | } |
1317 | } | 916 | } |
1318 | 917 | ||
1319 | static int | 918 | int chsc_get_channel_measurement_chars(struct channel_path *chp) |
1320 | chsc_get_channel_measurement_chars(struct channel_path *chp) | ||
1321 | { | 919 | { |
1322 | int ccode, ret; | 920 | int ccode, ret; |
1323 | 921 | ||
@@ -1349,8 +947,8 @@ chsc_get_channel_measurement_chars(struct channel_path *chp) | |||
1349 | scmc_area->request.length = 0x0010; | 947 | scmc_area->request.length = 0x0010; |
1350 | scmc_area->request.code = 0x0022; | 948 | scmc_area->request.code = 0x0022; |
1351 | 949 | ||
1352 | scmc_area->first_chpid = chp->id; | 950 | scmc_area->first_chpid = chp->chpid.id; |
1353 | scmc_area->last_chpid = chp->id; | 951 | scmc_area->last_chpid = chp->chpid.id; |
1354 | 952 | ||
1355 | ccode = chsc(scmc_area); | 953 | ccode = chsc(scmc_area); |
1356 | if (ccode > 0) { | 954 | if (ccode > 0) { |
@@ -1392,94 +990,6 @@ out: | |||
1392 | return ret; | 990 | return ret; |
1393 | } | 991 | } |
1394 | 992 | ||
1395 | /* | ||
1396 | * Entries for chpids on the system bus. | ||
1397 | * This replaces /proc/chpids. | ||
1398 | */ | ||
1399 | static int | ||
1400 | new_channel_path(int chpid) | ||
1401 | { | ||
1402 | struct channel_path *chp; | ||
1403 | int ret; | ||
1404 | |||
1405 | chp = kzalloc(sizeof(struct channel_path), GFP_KERNEL); | ||
1406 | if (!chp) | ||
1407 | return -ENOMEM; | ||
1408 | |||
1409 | /* fill in status, etc. */ | ||
1410 | chp->id = chpid; | ||
1411 | chp->state = 1; | ||
1412 | chp->dev.parent = &css[0]->device; | ||
1413 | chp->dev.release = chp_release; | ||
1414 | snprintf(chp->dev.bus_id, BUS_ID_SIZE, "chp0.%x", chpid); | ||
1415 | |||
1416 | /* Obtain channel path description and fill it in. */ | ||
1417 | ret = chsc_determine_channel_path_description(chpid, &chp->desc); | ||
1418 | if (ret) | ||
1419 | goto out_free; | ||
1420 | /* Get channel-measurement characteristics. */ | ||
1421 | if (css_characteristics_avail && css_chsc_characteristics.scmc | ||
1422 | && css_chsc_characteristics.secm) { | ||
1423 | ret = chsc_get_channel_measurement_chars(chp); | ||
1424 | if (ret) | ||
1425 | goto out_free; | ||
1426 | } else { | ||
1427 | static int msg_done; | ||
1428 | |||
1429 | if (!msg_done) { | ||
1430 | printk(KERN_WARNING "cio: Channel measurements not " | ||
1431 | "available, continuing.\n"); | ||
1432 | msg_done = 1; | ||
1433 | } | ||
1434 | chp->cmg = -1; | ||
1435 | } | ||
1436 | |||
1437 | /* make it known to the system */ | ||
1438 | ret = device_register(&chp->dev); | ||
1439 | if (ret) { | ||
1440 | printk(KERN_WARNING "%s: could not register %02x\n", | ||
1441 | __func__, chpid); | ||
1442 | goto out_free; | ||
1443 | } | ||
1444 | ret = sysfs_create_group(&chp->dev.kobj, &chp_attr_group); | ||
1445 | if (ret) { | ||
1446 | device_unregister(&chp->dev); | ||
1447 | goto out_free; | ||
1448 | } | ||
1449 | mutex_lock(&css[0]->mutex); | ||
1450 | if (css[0]->cm_enabled) { | ||
1451 | ret = chsc_add_chp_cmg_attr(chp); | ||
1452 | if (ret) { | ||
1453 | sysfs_remove_group(&chp->dev.kobj, &chp_attr_group); | ||
1454 | device_unregister(&chp->dev); | ||
1455 | mutex_unlock(&css[0]->mutex); | ||
1456 | goto out_free; | ||
1457 | } | ||
1458 | } | ||
1459 | css[0]->chps[chpid] = chp; | ||
1460 | mutex_unlock(&css[0]->mutex); | ||
1461 | return ret; | ||
1462 | out_free: | ||
1463 | kfree(chp); | ||
1464 | return ret; | ||
1465 | } | ||
1466 | |||
1467 | void * | ||
1468 | chsc_get_chp_desc(struct subchannel *sch, int chp_no) | ||
1469 | { | ||
1470 | struct channel_path *chp; | ||
1471 | struct channel_path_desc *desc; | ||
1472 | |||
1473 | chp = css[0]->chps[sch->schib.pmcw.chpid[chp_no]]; | ||
1474 | if (!chp) | ||
1475 | return NULL; | ||
1476 | desc = kmalloc(sizeof(struct channel_path_desc), GFP_KERNEL); | ||
1477 | if (!desc) | ||
1478 | return NULL; | ||
1479 | memcpy(desc, &chp->desc, sizeof(struct channel_path_desc)); | ||
1480 | return desc; | ||
1481 | } | ||
1482 | |||
1483 | static int __init | 993 | static int __init |
1484 | chsc_alloc_sei_area(void) | 994 | chsc_alloc_sei_area(void) |
1485 | { | 995 | { |
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h index 0fb2b024208f..2ad81d11cf7b 100644 --- a/drivers/s390/cio/chsc.h +++ b/drivers/s390/cio/chsc.h | |||
@@ -1,9 +1,10 @@ | |||
1 | #ifndef S390_CHSC_H | 1 | #ifndef S390_CHSC_H |
2 | #define S390_CHSC_H | 2 | #define S390_CHSC_H |
3 | 3 | ||
4 | #define CHSC_SEI_ACC_CHPID 1 | 4 | #include <linux/types.h> |
5 | #define CHSC_SEI_ACC_LINKADDR 2 | 5 | #include <linux/device.h> |
6 | #define CHSC_SEI_ACC_FULLLINKADDR 3 | 6 | #include <asm/chpid.h> |
7 | #include "schid.h" | ||
7 | 8 | ||
8 | #define CHSC_SDA_OC_MSS 0x2 | 9 | #define CHSC_SDA_OC_MSS 0x2 |
9 | 10 | ||
@@ -33,23 +34,9 @@ struct channel_path_desc { | |||
33 | u8 chpp; | 34 | u8 chpp; |
34 | } __attribute__ ((packed)); | 35 | } __attribute__ ((packed)); |
35 | 36 | ||
36 | struct channel_path { | 37 | struct channel_path; |
37 | int id; | ||
38 | int state; | ||
39 | struct channel_path_desc desc; | ||
40 | /* Channel-measurement related stuff: */ | ||
41 | int cmg; | ||
42 | int shared; | ||
43 | void *cmg_chars; | ||
44 | struct device dev; | ||
45 | }; | ||
46 | 38 | ||
47 | extern void s390_process_css( void ); | 39 | extern void chsc_process_crw(void); |
48 | extern void chsc_validate_chpids(struct subchannel *); | ||
49 | extern void chpid_is_actually_online(int); | ||
50 | extern int css_get_ssd_info(struct subchannel *); | ||
51 | extern int chsc_process_crw(void); | ||
52 | extern int chp_process_crw(int, int); | ||
53 | 40 | ||
54 | struct css_general_char { | 41 | struct css_general_char { |
55 | u64 : 41; | 42 | u64 : 41; |
@@ -82,15 +69,26 @@ struct css_chsc_char { | |||
82 | extern struct css_general_char css_general_characteristics; | 69 | extern struct css_general_char css_general_characteristics; |
83 | extern struct css_chsc_char css_chsc_characteristics; | 70 | extern struct css_chsc_char css_chsc_characteristics; |
84 | 71 | ||
72 | struct chsc_ssd_info { | ||
73 | u8 path_mask; | ||
74 | u8 fla_valid_mask; | ||
75 | struct chp_id chpid[8]; | ||
76 | u16 fla[8]; | ||
77 | }; | ||
78 | extern int chsc_get_ssd_info(struct subchannel_id schid, | ||
79 | struct chsc_ssd_info *ssd); | ||
85 | extern int chsc_determine_css_characteristics(void); | 80 | extern int chsc_determine_css_characteristics(void); |
86 | extern int css_characteristics_avail; | 81 | extern int css_characteristics_avail; |
87 | 82 | ||
88 | extern void *chsc_get_chp_desc(struct subchannel*, int); | ||
89 | |||
90 | extern int chsc_enable_facility(int); | 83 | extern int chsc_enable_facility(int); |
91 | struct channel_subsystem; | 84 | struct channel_subsystem; |
92 | extern int chsc_secm(struct channel_subsystem *, int); | 85 | extern int chsc_secm(struct channel_subsystem *, int); |
93 | 86 | ||
94 | #define to_channelpath(device) container_of(device, struct channel_path, dev) | 87 | int chsc_chp_vary(struct chp_id chpid, int on); |
88 | int chsc_determine_channel_path_description(struct chp_id chpid, | ||
89 | struct channel_path_desc *desc); | ||
90 | void chsc_chp_online(struct chp_id chpid); | ||
91 | void chsc_chp_offline(struct chp_id chpid); | ||
92 | int chsc_get_channel_measurement_chars(struct channel_path *chp); | ||
95 | 93 | ||
96 | #endif | 94 | #endif |
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c index 9cb129ab5be5..ea1defba5693 100644 --- a/drivers/s390/cio/cio.c +++ b/drivers/s390/cio/cio.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <asm/setup.h> | 22 | #include <asm/setup.h> |
23 | #include <asm/reset.h> | 23 | #include <asm/reset.h> |
24 | #include <asm/ipl.h> | 24 | #include <asm/ipl.h> |
25 | #include <asm/chpid.h> | ||
25 | #include "airq.h" | 26 | #include "airq.h" |
26 | #include "cio.h" | 27 | #include "cio.h" |
27 | #include "css.h" | 28 | #include "css.h" |
@@ -29,6 +30,7 @@ | |||
29 | #include "ioasm.h" | 30 | #include "ioasm.h" |
30 | #include "blacklist.h" | 31 | #include "blacklist.h" |
31 | #include "cio_debug.h" | 32 | #include "cio_debug.h" |
33 | #include "chp.h" | ||
32 | #include "../s390mach.h" | 34 | #include "../s390mach.h" |
33 | 35 | ||
34 | debug_info_t *cio_debug_msg_id; | 36 | debug_info_t *cio_debug_msg_id; |
@@ -592,9 +594,10 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid) | |||
592 | err = -ENODEV; | 594 | err = -ENODEV; |
593 | goto out; | 595 | goto out; |
594 | } | 596 | } |
595 | sch->opm = 0xff; | 597 | if (cio_is_console(sch->schid)) |
596 | if (!cio_is_console(sch->schid)) | 598 | sch->opm = 0xff; |
597 | chsc_validate_chpids(sch); | 599 | else |
600 | sch->opm = chp_get_sch_opm(sch); | ||
598 | sch->lpm = sch->schib.pmcw.pam & sch->opm; | 601 | sch->lpm = sch->schib.pmcw.pam & sch->opm; |
599 | 602 | ||
600 | CIO_DEBUG(KERN_INFO, 0, | 603 | CIO_DEBUG(KERN_INFO, 0, |
@@ -954,6 +957,7 @@ static void css_reset(void) | |||
954 | { | 957 | { |
955 | int i, ret; | 958 | int i, ret; |
956 | unsigned long long timeout; | 959 | unsigned long long timeout; |
960 | struct chp_id chpid; | ||
957 | 961 | ||
958 | /* Reset subchannels. */ | 962 | /* Reset subchannels. */ |
959 | for_each_subchannel(__shutdown_subchannel_easy, NULL); | 963 | for_each_subchannel(__shutdown_subchannel_easy, NULL); |
@@ -963,8 +967,10 @@ static void css_reset(void) | |||
963 | __ctl_set_bit(14, 28); | 967 | __ctl_set_bit(14, 28); |
964 | /* Temporarily reenable machine checks. */ | 968 | /* Temporarily reenable machine checks. */ |
965 | local_mcck_enable(); | 969 | local_mcck_enable(); |
970 | chp_id_init(&chpid); | ||
966 | for (i = 0; i <= __MAX_CHPID; i++) { | 971 | for (i = 0; i <= __MAX_CHPID; i++) { |
967 | ret = rchp(i); | 972 | chpid.id = i; |
973 | ret = rchp(chpid); | ||
968 | if ((ret == 0) || (ret == 2)) | 974 | if ((ret == 0) || (ret == 2)) |
969 | /* | 975 | /* |
970 | * rchp either succeeded, or another rchp is already | 976 | * rchp either succeeded, or another rchp is already |
@@ -1048,37 +1054,19 @@ void reipl_ccw_dev(struct ccw_dev_id *devid) | |||
1048 | do_reipl_asm(*((__u32*)&schid)); | 1054 | do_reipl_asm(*((__u32*)&schid)); |
1049 | } | 1055 | } |
1050 | 1056 | ||
1051 | static struct schib __initdata ipl_schib; | 1057 | int __init cio_get_iplinfo(struct cio_iplinfo *iplinfo) |
1052 | |||
1053 | /* | ||
1054 | * ipl_save_parameters gets called very early. It is not allowed to access | ||
1055 | * anything in the bss section at all. The bss section is not cleared yet, | ||
1056 | * but may contain some ipl parameters written by the firmware. | ||
1057 | * These parameters (if present) are copied to 0x2000. | ||
1058 | * To avoid corruption of the ipl parameters, all variables used by this | ||
1059 | * function must reside on the stack or in the data section. | ||
1060 | */ | ||
1061 | void ipl_save_parameters(void) | ||
1062 | { | 1058 | { |
1063 | struct subchannel_id schid; | 1059 | struct subchannel_id schid; |
1064 | unsigned int *ipl_ptr; | 1060 | struct schib schib; |
1065 | void *src, *dst; | ||
1066 | 1061 | ||
1067 | schid = *(struct subchannel_id *)__LC_SUBCHANNEL_ID; | 1062 | schid = *(struct subchannel_id *)__LC_SUBCHANNEL_ID; |
1068 | if (!schid.one) | 1063 | if (!schid.one) |
1069 | return; | 1064 | return -ENODEV; |
1070 | if (stsch(schid, &ipl_schib)) | 1065 | if (stsch(schid, &schib)) |
1071 | return; | 1066 | return -ENODEV; |
1072 | if (!ipl_schib.pmcw.dnv) | 1067 | if (!schib.pmcw.dnv) |
1073 | return; | 1068 | return -ENODEV; |
1074 | ipl_devno = ipl_schib.pmcw.dev; | 1069 | iplinfo->devno = schib.pmcw.dev; |
1075 | ipl_flags |= IPL_DEVNO_VALID; | 1070 | iplinfo->is_qdio = schib.pmcw.qf; |
1076 | if (!ipl_schib.pmcw.qf) | 1071 | return 0; |
1077 | return; | ||
1078 | ipl_flags |= IPL_PARMBLOCK_VALID; | ||
1079 | ipl_ptr = (unsigned int *)__LC_IPL_PARMBLOCK_PTR; | ||
1080 | src = (void *)(unsigned long)*ipl_ptr; | ||
1081 | dst = (void *)IPL_PARMBLOCK_ORIGIN; | ||
1082 | memmove(dst, src, PAGE_SIZE); | ||
1083 | *ipl_ptr = IPL_PARMBLOCK_ORIGIN; | ||
1084 | } | 1072 | } |
diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h index 35154a210357..7446c39951a7 100644 --- a/drivers/s390/cio/cio.h +++ b/drivers/s390/cio/cio.h | |||
@@ -1,18 +1,11 @@ | |||
1 | #ifndef S390_CIO_H | 1 | #ifndef S390_CIO_H |
2 | #define S390_CIO_H | 2 | #define S390_CIO_H |
3 | 3 | ||
4 | #include "schid.h" | ||
5 | #include <linux/mutex.h> | 4 | #include <linux/mutex.h> |
6 | 5 | #include <linux/device.h> | |
7 | /* | 6 | #include <asm/chpid.h> |
8 | * where we put the ssd info | 7 | #include "chsc.h" |
9 | */ | 8 | #include "schid.h" |
10 | struct ssd_info { | ||
11 | __u8 valid:1; | ||
12 | __u8 type:7; /* subchannel type */ | ||
13 | __u8 chpid[8]; /* chpids */ | ||
14 | __u16 fla[8]; /* full link addresses */ | ||
15 | } __attribute__ ((packed)); | ||
16 | 9 | ||
17 | /* | 10 | /* |
18 | * path management control word | 11 | * path management control word |
@@ -108,7 +101,7 @@ struct subchannel { | |||
108 | struct schib schib; /* subchannel information block */ | 101 | struct schib schib; /* subchannel information block */ |
109 | struct orb orb; /* operation request block */ | 102 | struct orb orb; /* operation request block */ |
110 | struct ccw1 sense_ccw; /* static ccw for sense command */ | 103 | struct ccw1 sense_ccw; /* static ccw for sense command */ |
111 | struct ssd_info ssd_info; /* subchannel description */ | 104 | struct chsc_ssd_info ssd_info; /* subchannel description */ |
112 | struct device dev; /* entry in device tree */ | 105 | struct device dev; /* entry in device tree */ |
113 | struct css_driver *driver; | 106 | struct css_driver *driver; |
114 | } __attribute__ ((aligned(8))); | 107 | } __attribute__ ((aligned(8))); |
diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c index 90b22faabbf7..28abd697be1a 100644 --- a/drivers/s390/cio/cmf.c +++ b/drivers/s390/cio/cmf.c | |||
@@ -476,7 +476,7 @@ struct cmb_area { | |||
476 | }; | 476 | }; |
477 | 477 | ||
478 | static struct cmb_area cmb_area = { | 478 | static struct cmb_area cmb_area = { |
479 | .lock = SPIN_LOCK_UNLOCKED, | 479 | .lock = __SPIN_LOCK_UNLOCKED(cmb_area.lock), |
480 | .list = LIST_HEAD_INIT(cmb_area.list), | 480 | .list = LIST_HEAD_INIT(cmb_area.list), |
481 | .num_channels = 1024, | 481 | .num_channels = 1024, |
482 | }; | 482 | }; |
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index fe0ace7aece8..27c6d9e55b23 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c | |||
@@ -20,8 +20,9 @@ | |||
20 | #include "ioasm.h" | 20 | #include "ioasm.h" |
21 | #include "chsc.h" | 21 | #include "chsc.h" |
22 | #include "device.h" | 22 | #include "device.h" |
23 | #include "idset.h" | ||
24 | #include "chp.h" | ||
23 | 25 | ||
24 | int need_rescan = 0; | ||
25 | int css_init_done = 0; | 26 | int css_init_done = 0; |
26 | static int need_reprobe = 0; | 27 | static int need_reprobe = 0; |
27 | static int max_ssid = 0; | 28 | static int max_ssid = 0; |
@@ -125,8 +126,52 @@ void css_sch_device_unregister(struct subchannel *sch) | |||
125 | mutex_unlock(&sch->reg_mutex); | 126 | mutex_unlock(&sch->reg_mutex); |
126 | } | 127 | } |
127 | 128 | ||
128 | static int | 129 | static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw) |
129 | css_register_subchannel(struct subchannel *sch) | 130 | { |
131 | int i; | ||
132 | int mask; | ||
133 | |||
134 | memset(ssd, 0, sizeof(struct chsc_ssd_info)); | ||
135 | ssd->path_mask = pmcw->pim; | ||
136 | for (i = 0; i < 8; i++) { | ||
137 | mask = 0x80 >> i; | ||
138 | if (pmcw->pim & mask) { | ||
139 | chp_id_init(&ssd->chpid[i]); | ||
140 | ssd->chpid[i].id = pmcw->chpid[i]; | ||
141 | } | ||
142 | } | ||
143 | } | ||
144 | |||
145 | static void ssd_register_chpids(struct chsc_ssd_info *ssd) | ||
146 | { | ||
147 | int i; | ||
148 | int mask; | ||
149 | |||
150 | for (i = 0; i < 8; i++) { | ||
151 | mask = 0x80 >> i; | ||
152 | if (ssd->path_mask & mask) | ||
153 | if (!chp_is_registered(ssd->chpid[i])) | ||
154 | chp_new(ssd->chpid[i]); | ||
155 | } | ||
156 | } | ||
157 | |||
158 | void css_update_ssd_info(struct subchannel *sch) | ||
159 | { | ||
160 | int ret; | ||
161 | |||
162 | if (cio_is_console(sch->schid)) { | ||
163 | /* Console is initialized too early for functions requiring | ||
164 | * memory allocation. */ | ||
165 | ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw); | ||
166 | } else { | ||
167 | ret = chsc_get_ssd_info(sch->schid, &sch->ssd_info); | ||
168 | if (ret) | ||
169 | ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw); | ||
170 | ssd_register_chpids(&sch->ssd_info); | ||
171 | } | ||
172 | } | ||
173 | |||
174 | static int css_register_subchannel(struct subchannel *sch) | ||
130 | { | 175 | { |
131 | int ret; | 176 | int ret; |
132 | 177 | ||
@@ -135,9 +180,7 @@ css_register_subchannel(struct subchannel *sch) | |||
135 | sch->dev.bus = &css_bus_type; | 180 | sch->dev.bus = &css_bus_type; |
136 | sch->dev.release = &css_subchannel_release; | 181 | sch->dev.release = &css_subchannel_release; |
137 | sch->dev.groups = subch_attr_groups; | 182 | sch->dev.groups = subch_attr_groups; |
138 | 183 | css_update_ssd_info(sch); | |
139 | css_get_ssd_info(sch); | ||
140 | |||
141 | /* make it known to the system */ | 184 | /* make it known to the system */ |
142 | ret = css_sch_device_register(sch); | 185 | ret = css_sch_device_register(sch); |
143 | if (ret) { | 186 | if (ret) { |
@@ -306,7 +349,7 @@ static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow) | |||
306 | return css_probe_device(schid); | 349 | return css_probe_device(schid); |
307 | } | 350 | } |
308 | 351 | ||
309 | static int css_evaluate_subchannel(struct subchannel_id schid, int slow) | 352 | static void css_evaluate_subchannel(struct subchannel_id schid, int slow) |
310 | { | 353 | { |
311 | struct subchannel *sch; | 354 | struct subchannel *sch; |
312 | int ret; | 355 | int ret; |
@@ -317,53 +360,66 @@ static int css_evaluate_subchannel(struct subchannel_id schid, int slow) | |||
317 | put_device(&sch->dev); | 360 | put_device(&sch->dev); |
318 | } else | 361 | } else |
319 | ret = css_evaluate_new_subchannel(schid, slow); | 362 | ret = css_evaluate_new_subchannel(schid, slow); |
320 | 363 | if (ret == -EAGAIN) | |
321 | return ret; | 364 | css_schedule_eval(schid); |
322 | } | 365 | } |
323 | 366 | ||
324 | static int | 367 | static struct idset *slow_subchannel_set; |
325 | css_rescan_devices(struct subchannel_id schid, void *data) | 368 | static spinlock_t slow_subchannel_lock; |
369 | |||
370 | static int __init slow_subchannel_init(void) | ||
326 | { | 371 | { |
327 | return css_evaluate_subchannel(schid, 1); | 372 | spin_lock_init(&slow_subchannel_lock); |
373 | slow_subchannel_set = idset_sch_new(); | ||
374 | if (!slow_subchannel_set) { | ||
375 | printk(KERN_WARNING "cio: could not allocate slow subchannel " | ||
376 | "set\n"); | ||
377 | return -ENOMEM; | ||
378 | } | ||
379 | return 0; | ||
328 | } | 380 | } |
329 | 381 | ||
330 | struct slow_subchannel { | 382 | subsys_initcall(slow_subchannel_init); |
331 | struct list_head slow_list; | ||
332 | struct subchannel_id schid; | ||
333 | }; | ||
334 | |||
335 | static LIST_HEAD(slow_subchannels_head); | ||
336 | static DEFINE_SPINLOCK(slow_subchannel_lock); | ||
337 | 383 | ||
338 | static void | 384 | static void css_slow_path_func(struct work_struct *unused) |
339 | css_trigger_slow_path(struct work_struct *unused) | ||
340 | { | 385 | { |
341 | CIO_TRACE_EVENT(4, "slowpath"); | 386 | struct subchannel_id schid; |
342 | |||
343 | if (need_rescan) { | ||
344 | need_rescan = 0; | ||
345 | for_each_subchannel(css_rescan_devices, NULL); | ||
346 | return; | ||
347 | } | ||
348 | 387 | ||
388 | CIO_TRACE_EVENT(4, "slowpath"); | ||
349 | spin_lock_irq(&slow_subchannel_lock); | 389 | spin_lock_irq(&slow_subchannel_lock); |
350 | while (!list_empty(&slow_subchannels_head)) { | 390 | init_subchannel_id(&schid); |
351 | struct slow_subchannel *slow_sch = | 391 | while (idset_sch_get_first(slow_subchannel_set, &schid)) { |
352 | list_entry(slow_subchannels_head.next, | 392 | idset_sch_del(slow_subchannel_set, schid); |
353 | struct slow_subchannel, slow_list); | ||
354 | |||
355 | list_del_init(slow_subchannels_head.next); | ||
356 | spin_unlock_irq(&slow_subchannel_lock); | 393 | spin_unlock_irq(&slow_subchannel_lock); |
357 | css_evaluate_subchannel(slow_sch->schid, 1); | 394 | css_evaluate_subchannel(schid, 1); |
358 | spin_lock_irq(&slow_subchannel_lock); | 395 | spin_lock_irq(&slow_subchannel_lock); |
359 | kfree(slow_sch); | ||
360 | } | 396 | } |
361 | spin_unlock_irq(&slow_subchannel_lock); | 397 | spin_unlock_irq(&slow_subchannel_lock); |
362 | } | 398 | } |
363 | 399 | ||
364 | DECLARE_WORK(slow_path_work, css_trigger_slow_path); | 400 | static DECLARE_WORK(slow_path_work, css_slow_path_func); |
365 | struct workqueue_struct *slow_path_wq; | 401 | struct workqueue_struct *slow_path_wq; |
366 | 402 | ||
403 | void css_schedule_eval(struct subchannel_id schid) | ||
404 | { | ||
405 | unsigned long flags; | ||
406 | |||
407 | spin_lock_irqsave(&slow_subchannel_lock, flags); | ||
408 | idset_sch_add(slow_subchannel_set, schid); | ||
409 | queue_work(slow_path_wq, &slow_path_work); | ||
410 | spin_unlock_irqrestore(&slow_subchannel_lock, flags); | ||
411 | } | ||
412 | |||
413 | void css_schedule_eval_all(void) | ||
414 | { | ||
415 | unsigned long flags; | ||
416 | |||
417 | spin_lock_irqsave(&slow_subchannel_lock, flags); | ||
418 | idset_fill(slow_subchannel_set); | ||
419 | queue_work(slow_path_wq, &slow_path_work); | ||
420 | spin_unlock_irqrestore(&slow_subchannel_lock, flags); | ||
421 | } | ||
422 | |||
367 | /* Reprobe subchannel if unregistered. */ | 423 | /* Reprobe subchannel if unregistered. */ |
368 | static int reprobe_subchannel(struct subchannel_id schid, void *data) | 424 | static int reprobe_subchannel(struct subchannel_id schid, void *data) |
369 | { | 425 | { |
@@ -426,33 +482,14 @@ void css_schedule_reprobe(void) | |||
426 | EXPORT_SYMBOL_GPL(css_schedule_reprobe); | 482 | EXPORT_SYMBOL_GPL(css_schedule_reprobe); |
427 | 483 | ||
428 | /* | 484 | /* |
429 | * Rescan for new devices. FIXME: This is slow. | ||
430 | * This function is called when we have lost CRWs due to overflows and we have | ||
431 | * to do subchannel housekeeping. | ||
432 | */ | ||
433 | void | ||
434 | css_reiterate_subchannels(void) | ||
435 | { | ||
436 | css_clear_subchannel_slow_list(); | ||
437 | need_rescan = 1; | ||
438 | } | ||
439 | |||
440 | /* | ||
441 | * Called from the machine check handler for subchannel report words. | 485 | * Called from the machine check handler for subchannel report words. |
442 | */ | 486 | */ |
443 | int | 487 | void css_process_crw(int rsid1, int rsid2) |
444 | css_process_crw(int rsid1, int rsid2) | ||
445 | { | 488 | { |
446 | int ret; | ||
447 | struct subchannel_id mchk_schid; | 489 | struct subchannel_id mchk_schid; |
448 | 490 | ||
449 | CIO_CRW_EVENT(2, "source is subchannel %04X, subsystem id %x\n", | 491 | CIO_CRW_EVENT(2, "source is subchannel %04X, subsystem id %x\n", |
450 | rsid1, rsid2); | 492 | rsid1, rsid2); |
451 | |||
452 | if (need_rescan) | ||
453 | /* We need to iterate all subchannels anyway. */ | ||
454 | return -EAGAIN; | ||
455 | |||
456 | init_subchannel_id(&mchk_schid); | 493 | init_subchannel_id(&mchk_schid); |
457 | mchk_schid.sch_no = rsid1; | 494 | mchk_schid.sch_no = rsid1; |
458 | if (rsid2 != 0) | 495 | if (rsid2 != 0) |
@@ -463,14 +500,7 @@ css_process_crw(int rsid1, int rsid2) | |||
463 | * use stsch() to find out if the subchannel in question has come | 500 | * use stsch() to find out if the subchannel in question has come |
464 | * or gone. | 501 | * or gone. |
465 | */ | 502 | */ |
466 | ret = css_evaluate_subchannel(mchk_schid, 0); | 503 | css_evaluate_subchannel(mchk_schid, 0); |
467 | if (ret == -EAGAIN) { | ||
468 | if (css_enqueue_subchannel_slow(mchk_schid)) { | ||
469 | css_clear_subchannel_slow_list(); | ||
470 | need_rescan = 1; | ||
471 | } | ||
472 | } | ||
473 | return ret; | ||
474 | } | 504 | } |
475 | 505 | ||
476 | static int __init | 506 | static int __init |
@@ -745,47 +775,6 @@ struct bus_type css_bus_type = { | |||
745 | 775 | ||
746 | subsys_initcall(init_channel_subsystem); | 776 | subsys_initcall(init_channel_subsystem); |
747 | 777 | ||
748 | int | ||
749 | css_enqueue_subchannel_slow(struct subchannel_id schid) | ||
750 | { | ||
751 | struct slow_subchannel *new_slow_sch; | ||
752 | unsigned long flags; | ||
753 | |||
754 | new_slow_sch = kzalloc(sizeof(struct slow_subchannel), GFP_ATOMIC); | ||
755 | if (!new_slow_sch) | ||
756 | return -ENOMEM; | ||
757 | new_slow_sch->schid = schid; | ||
758 | spin_lock_irqsave(&slow_subchannel_lock, flags); | ||
759 | list_add_tail(&new_slow_sch->slow_list, &slow_subchannels_head); | ||
760 | spin_unlock_irqrestore(&slow_subchannel_lock, flags); | ||
761 | return 0; | ||
762 | } | ||
763 | |||
764 | void | ||
765 | css_clear_subchannel_slow_list(void) | ||
766 | { | ||
767 | unsigned long flags; | ||
768 | |||
769 | spin_lock_irqsave(&slow_subchannel_lock, flags); | ||
770 | while (!list_empty(&slow_subchannels_head)) { | ||
771 | struct slow_subchannel *slow_sch = | ||
772 | list_entry(slow_subchannels_head.next, | ||
773 | struct slow_subchannel, slow_list); | ||
774 | |||
775 | list_del_init(slow_subchannels_head.next); | ||
776 | kfree(slow_sch); | ||
777 | } | ||
778 | spin_unlock_irqrestore(&slow_subchannel_lock, flags); | ||
779 | } | ||
780 | |||
781 | |||
782 | |||
783 | int | ||
784 | css_slow_subchannels_exist(void) | ||
785 | { | ||
786 | return (!list_empty(&slow_subchannels_head)); | ||
787 | } | ||
788 | |||
789 | MODULE_LICENSE("GPL"); | 778 | MODULE_LICENSE("GPL"); |
790 | EXPORT_SYMBOL(css_bus_type); | 779 | EXPORT_SYMBOL(css_bus_type); |
791 | EXPORT_SYMBOL_GPL(css_characteristics_avail); | 780 | EXPORT_SYMBOL_GPL(css_characteristics_avail); |
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h index ca2bab932a8a..71fcfdc42800 100644 --- a/drivers/s390/cio/css.h +++ b/drivers/s390/cio/css.h | |||
@@ -4,8 +4,11 @@ | |||
4 | #include <linux/mutex.h> | 4 | #include <linux/mutex.h> |
5 | #include <linux/wait.h> | 5 | #include <linux/wait.h> |
6 | #include <linux/workqueue.h> | 6 | #include <linux/workqueue.h> |
7 | #include <linux/device.h> | ||
8 | #include <linux/types.h> | ||
7 | 9 | ||
8 | #include <asm/cio.h> | 10 | #include <asm/cio.h> |
11 | #include <asm/chpid.h> | ||
9 | 12 | ||
10 | #include "schid.h" | 13 | #include "schid.h" |
11 | 14 | ||
@@ -143,13 +146,12 @@ extern void css_sch_device_unregister(struct subchannel *); | |||
143 | extern struct subchannel * get_subchannel_by_schid(struct subchannel_id); | 146 | extern struct subchannel * get_subchannel_by_schid(struct subchannel_id); |
144 | extern int css_init_done; | 147 | extern int css_init_done; |
145 | extern int for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *); | 148 | extern int for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *); |
146 | extern int css_process_crw(int, int); | 149 | extern void css_process_crw(int, int); |
147 | extern void css_reiterate_subchannels(void); | 150 | extern void css_reiterate_subchannels(void); |
151 | void css_update_ssd_info(struct subchannel *sch); | ||
148 | 152 | ||
149 | #define __MAX_SUBCHANNEL 65535 | 153 | #define __MAX_SUBCHANNEL 65535 |
150 | #define __MAX_SSID 3 | 154 | #define __MAX_SSID 3 |
151 | #define __MAX_CHPID 255 | ||
152 | #define __MAX_CSSID 0 | ||
153 | 155 | ||
154 | struct channel_subsystem { | 156 | struct channel_subsystem { |
155 | u8 cssid; | 157 | u8 cssid; |
@@ -185,16 +187,12 @@ int device_trigger_verify(struct subchannel *sch); | |||
185 | void device_kill_pending_timer(struct subchannel *); | 187 | void device_kill_pending_timer(struct subchannel *); |
186 | 188 | ||
187 | /* Helper functions to build lists for the slow path. */ | 189 | /* Helper functions to build lists for the slow path. */ |
188 | extern int css_enqueue_subchannel_slow(struct subchannel_id schid); | 190 | void css_schedule_eval(struct subchannel_id schid); |
189 | void css_walk_subchannel_slow_list(void (*fn)(unsigned long)); | 191 | void css_schedule_eval_all(void); |
190 | void css_clear_subchannel_slow_list(void); | ||
191 | int css_slow_subchannels_exist(void); | ||
192 | extern int need_rescan; | ||
193 | 192 | ||
194 | int sch_is_pseudo_sch(struct subchannel *); | 193 | int sch_is_pseudo_sch(struct subchannel *); |
195 | 194 | ||
196 | extern struct workqueue_struct *slow_path_wq; | 195 | extern struct workqueue_struct *slow_path_wq; |
197 | extern struct work_struct slow_path_work; | ||
198 | 196 | ||
199 | int subchannel_add_files (struct device *); | 197 | int subchannel_add_files (struct device *); |
200 | extern struct attribute_group *subch_attr_groups[]; | 198 | extern struct attribute_group *subch_attr_groups[]; |
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index e322111fb369..03355902c582 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c | |||
@@ -56,13 +56,12 @@ ccw_bus_match (struct device * dev, struct device_driver * drv) | |||
56 | /* Store modalias string delimited by prefix/suffix string into buffer with | 56 | /* Store modalias string delimited by prefix/suffix string into buffer with |
57 | * specified size. Return length of resulting string (excluding trailing '\0') | 57 | * specified size. Return length of resulting string (excluding trailing '\0') |
58 | * even if string doesn't fit buffer (snprintf semantics). */ | 58 | * even if string doesn't fit buffer (snprintf semantics). */ |
59 | static int snprint_alias(char *buf, size_t size, const char *prefix, | 59 | static int snprint_alias(char *buf, size_t size, |
60 | struct ccw_device_id *id, const char *suffix) | 60 | struct ccw_device_id *id, const char *suffix) |
61 | { | 61 | { |
62 | int len; | 62 | int len; |
63 | 63 | ||
64 | len = snprintf(buf, size, "%sccw:t%04Xm%02X", prefix, id->cu_type, | 64 | len = snprintf(buf, size, "ccw:t%04Xm%02X", id->cu_type, id->cu_model); |
65 | id->cu_model); | ||
66 | if (len > size) | 65 | if (len > size) |
67 | return len; | 66 | return len; |
68 | buf += len; | 67 | buf += len; |
@@ -85,53 +84,40 @@ static int ccw_uevent(struct device *dev, char **envp, int num_envp, | |||
85 | struct ccw_device *cdev = to_ccwdev(dev); | 84 | struct ccw_device *cdev = to_ccwdev(dev); |
86 | struct ccw_device_id *id = &(cdev->id); | 85 | struct ccw_device_id *id = &(cdev->id); |
87 | int i = 0; | 86 | int i = 0; |
88 | int len; | 87 | int len = 0; |
88 | int ret; | ||
89 | char modalias_buf[30]; | ||
89 | 90 | ||
90 | /* CU_TYPE= */ | 91 | /* CU_TYPE= */ |
91 | len = snprintf(buffer, buffer_size, "CU_TYPE=%04X", id->cu_type) + 1; | 92 | ret = add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &len, |
92 | if (len > buffer_size || i >= num_envp) | 93 | "CU_TYPE=%04X", id->cu_type); |
93 | return -ENOMEM; | 94 | if (ret) |
94 | envp[i++] = buffer; | 95 | return ret; |
95 | buffer += len; | ||
96 | buffer_size -= len; | ||
97 | 96 | ||
98 | /* CU_MODEL= */ | 97 | /* CU_MODEL= */ |
99 | len = snprintf(buffer, buffer_size, "CU_MODEL=%02X", id->cu_model) + 1; | 98 | ret = add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &len, |
100 | if (len > buffer_size || i >= num_envp) | 99 | "CU_MODEL=%02X", id->cu_model); |
101 | return -ENOMEM; | 100 | if (ret) |
102 | envp[i++] = buffer; | 101 | return ret; |
103 | buffer += len; | ||
104 | buffer_size -= len; | ||
105 | 102 | ||
106 | /* The next two can be zero, that's ok for us */ | 103 | /* The next two can be zero, that's ok for us */ |
107 | /* DEV_TYPE= */ | 104 | /* DEV_TYPE= */ |
108 | len = snprintf(buffer, buffer_size, "DEV_TYPE=%04X", id->dev_type) + 1; | 105 | ret = add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &len, |
109 | if (len > buffer_size || i >= num_envp) | 106 | "DEV_TYPE=%04X", id->dev_type); |
110 | return -ENOMEM; | 107 | if (ret) |
111 | envp[i++] = buffer; | 108 | return ret; |
112 | buffer += len; | ||
113 | buffer_size -= len; | ||
114 | 109 | ||
115 | /* DEV_MODEL= */ | 110 | /* DEV_MODEL= */ |
116 | len = snprintf(buffer, buffer_size, "DEV_MODEL=%02X", | 111 | ret = add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &len, |
117 | (unsigned char) id->dev_model) + 1; | 112 | "DEV_MODEL=%02X", id->dev_model); |
118 | if (len > buffer_size || i >= num_envp) | 113 | if (ret) |
119 | return -ENOMEM; | 114 | return ret; |
120 | envp[i++] = buffer; | ||
121 | buffer += len; | ||
122 | buffer_size -= len; | ||
123 | 115 | ||
124 | /* MODALIAS= */ | 116 | /* MODALIAS= */ |
125 | len = snprint_alias(buffer, buffer_size, "MODALIAS=", id, "") + 1; | 117 | snprint_alias(modalias_buf, sizeof(modalias_buf), id, ""); |
126 | if (len > buffer_size || i >= num_envp) | 118 | ret = add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &len, |
127 | return -ENOMEM; | 119 | "MODALIAS=%s", modalias_buf); |
128 | envp[i++] = buffer; | 120 | return ret; |
129 | buffer += len; | ||
130 | buffer_size -= len; | ||
131 | |||
132 | envp[i] = NULL; | ||
133 | |||
134 | return 0; | ||
135 | } | 121 | } |
136 | 122 | ||
137 | struct bus_type ccw_bus_type; | 123 | struct bus_type ccw_bus_type; |
@@ -230,12 +216,18 @@ static ssize_t | |||
230 | chpids_show (struct device * dev, struct device_attribute *attr, char * buf) | 216 | chpids_show (struct device * dev, struct device_attribute *attr, char * buf) |
231 | { | 217 | { |
232 | struct subchannel *sch = to_subchannel(dev); | 218 | struct subchannel *sch = to_subchannel(dev); |
233 | struct ssd_info *ssd = &sch->ssd_info; | 219 | struct chsc_ssd_info *ssd = &sch->ssd_info; |
234 | ssize_t ret = 0; | 220 | ssize_t ret = 0; |
235 | int chp; | 221 | int chp; |
222 | int mask; | ||
236 | 223 | ||
237 | for (chp = 0; chp < 8; chp++) | 224 | for (chp = 0; chp < 8; chp++) { |
238 | ret += sprintf (buf+ret, "%02x ", ssd->chpid[chp]); | 225 | mask = 0x80 >> chp; |
226 | if (ssd->path_mask & mask) | ||
227 | ret += sprintf(buf + ret, "%02x ", ssd->chpid[chp].id); | ||
228 | else | ||
229 | ret += sprintf(buf + ret, "00 "); | ||
230 | } | ||
239 | ret += sprintf (buf+ret, "\n"); | 231 | ret += sprintf (buf+ret, "\n"); |
240 | return min((ssize_t)PAGE_SIZE, ret); | 232 | return min((ssize_t)PAGE_SIZE, ret); |
241 | } | 233 | } |
@@ -280,7 +272,7 @@ modalias_show (struct device *dev, struct device_attribute *attr, char *buf) | |||
280 | struct ccw_device_id *id = &(cdev->id); | 272 | struct ccw_device_id *id = &(cdev->id); |
281 | int len; | 273 | int len; |
282 | 274 | ||
283 | len = snprint_alias(buf, PAGE_SIZE, "", id, "\n") + 1; | 275 | len = snprint_alias(buf, PAGE_SIZE, id, "\n") + 1; |
284 | 276 | ||
285 | return len > PAGE_SIZE ? PAGE_SIZE : len; | 277 | return len > PAGE_SIZE ? PAGE_SIZE : len; |
286 | } | 278 | } |
@@ -298,16 +290,10 @@ int ccw_device_is_orphan(struct ccw_device *cdev) | |||
298 | return sch_is_pseudo_sch(to_subchannel(cdev->dev.parent)); | 290 | return sch_is_pseudo_sch(to_subchannel(cdev->dev.parent)); |
299 | } | 291 | } |
300 | 292 | ||
301 | static void ccw_device_unregister(struct work_struct *work) | 293 | static void ccw_device_unregister(struct ccw_device *cdev) |
302 | { | 294 | { |
303 | struct ccw_device_private *priv; | ||
304 | struct ccw_device *cdev; | ||
305 | |||
306 | priv = container_of(work, struct ccw_device_private, kick_work); | ||
307 | cdev = priv->cdev; | ||
308 | if (test_and_clear_bit(1, &cdev->private->registered)) | 295 | if (test_and_clear_bit(1, &cdev->private->registered)) |
309 | device_unregister(&cdev->dev); | 296 | device_del(&cdev->dev); |
310 | put_device(&cdev->dev); | ||
311 | } | 297 | } |
312 | 298 | ||
313 | static void | 299 | static void |
@@ -324,11 +310,8 @@ ccw_device_remove_disconnected(struct ccw_device *cdev) | |||
324 | spin_lock_irqsave(cdev->ccwlock, flags); | 310 | spin_lock_irqsave(cdev->ccwlock, flags); |
325 | cdev->private->state = DEV_STATE_NOT_OPER; | 311 | cdev->private->state = DEV_STATE_NOT_OPER; |
326 | spin_unlock_irqrestore(cdev->ccwlock, flags); | 312 | spin_unlock_irqrestore(cdev->ccwlock, flags); |
327 | if (get_device(&cdev->dev)) { | 313 | ccw_device_unregister(cdev); |
328 | PREPARE_WORK(&cdev->private->kick_work, | 314 | put_device(&cdev->dev); |
329 | ccw_device_unregister); | ||
330 | queue_work(ccw_device_work, &cdev->private->kick_work); | ||
331 | } | ||
332 | return ; | 315 | return ; |
333 | } | 316 | } |
334 | sch = to_subchannel(cdev->dev.parent); | 317 | sch = to_subchannel(cdev->dev.parent); |
@@ -413,11 +396,60 @@ ccw_device_set_online(struct ccw_device *cdev) | |||
413 | return (ret == 0) ? -ENODEV : ret; | 396 | return (ret == 0) ? -ENODEV : ret; |
414 | } | 397 | } |
415 | 398 | ||
416 | static ssize_t | 399 | static void online_store_handle_offline(struct ccw_device *cdev) |
417 | online_store (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) | 400 | { |
401 | if (cdev->private->state == DEV_STATE_DISCONNECTED) | ||
402 | ccw_device_remove_disconnected(cdev); | ||
403 | else if (cdev->drv && cdev->drv->set_offline) | ||
404 | ccw_device_set_offline(cdev); | ||
405 | } | ||
406 | |||
407 | static int online_store_recog_and_online(struct ccw_device *cdev) | ||
408 | { | ||
409 | int ret; | ||
410 | |||
411 | /* Do device recognition, if needed. */ | ||
412 | if (cdev->id.cu_type == 0) { | ||
413 | ret = ccw_device_recognition(cdev); | ||
414 | if (ret) { | ||
415 | printk(KERN_WARNING"Couldn't start recognition " | ||
416 | "for device %s (ret=%d)\n", | ||
417 | cdev->dev.bus_id, ret); | ||
418 | return ret; | ||
419 | } | ||
420 | wait_event(cdev->private->wait_q, | ||
421 | cdev->private->flags.recog_done); | ||
422 | } | ||
423 | if (cdev->drv && cdev->drv->set_online) | ||
424 | ccw_device_set_online(cdev); | ||
425 | return 0; | ||
426 | } | ||
427 | static void online_store_handle_online(struct ccw_device *cdev, int force) | ||
428 | { | ||
429 | int ret; | ||
430 | |||
431 | ret = online_store_recog_and_online(cdev); | ||
432 | if (ret) | ||
433 | return; | ||
434 | if (force && cdev->private->state == DEV_STATE_BOXED) { | ||
435 | ret = ccw_device_stlck(cdev); | ||
436 | if (ret) { | ||
437 | printk(KERN_WARNING"ccw_device_stlck for device %s " | ||
438 | "returned %d!\n", cdev->dev.bus_id, ret); | ||
439 | return; | ||
440 | } | ||
441 | if (cdev->id.cu_type == 0) | ||
442 | cdev->private->state = DEV_STATE_NOT_OPER; | ||
443 | online_store_recog_and_online(cdev); | ||
444 | } | ||
445 | |||
446 | } | ||
447 | |||
448 | static ssize_t online_store (struct device *dev, struct device_attribute *attr, | ||
449 | const char *buf, size_t count) | ||
418 | { | 450 | { |
419 | struct ccw_device *cdev = to_ccwdev(dev); | 451 | struct ccw_device *cdev = to_ccwdev(dev); |
420 | int i, force, ret; | 452 | int i, force; |
421 | char *tmp; | 453 | char *tmp; |
422 | 454 | ||
423 | if (atomic_cmpxchg(&cdev->private->onoff, 0, 1) != 0) | 455 | if (atomic_cmpxchg(&cdev->private->onoff, 0, 1) != 0) |
@@ -434,51 +466,17 @@ online_store (struct device *dev, struct device_attribute *attr, const char *buf | |||
434 | force = 0; | 466 | force = 0; |
435 | i = simple_strtoul(buf, &tmp, 16); | 467 | i = simple_strtoul(buf, &tmp, 16); |
436 | } | 468 | } |
437 | if (i == 1) { | 469 | |
438 | /* Do device recognition, if needed. */ | 470 | switch (i) { |
439 | if (cdev->id.cu_type == 0) { | 471 | case 0: |
440 | ret = ccw_device_recognition(cdev); | 472 | online_store_handle_offline(cdev); |
441 | if (ret) { | 473 | break; |
442 | printk(KERN_WARNING"Couldn't start recognition " | 474 | case 1: |
443 | "for device %s (ret=%d)\n", | 475 | online_store_handle_online(cdev, force); |
444 | cdev->dev.bus_id, ret); | 476 | break; |
445 | goto out; | 477 | default: |
446 | } | 478 | count = -EINVAL; |
447 | wait_event(cdev->private->wait_q, | ||
448 | cdev->private->flags.recog_done); | ||
449 | } | ||
450 | if (cdev->drv && cdev->drv->set_online) | ||
451 | ccw_device_set_online(cdev); | ||
452 | } else if (i == 0) { | ||
453 | if (cdev->private->state == DEV_STATE_DISCONNECTED) | ||
454 | ccw_device_remove_disconnected(cdev); | ||
455 | else if (cdev->drv && cdev->drv->set_offline) | ||
456 | ccw_device_set_offline(cdev); | ||
457 | } | ||
458 | if (force && cdev->private->state == DEV_STATE_BOXED) { | ||
459 | ret = ccw_device_stlck(cdev); | ||
460 | if (ret) { | ||
461 | printk(KERN_WARNING"ccw_device_stlck for device %s " | ||
462 | "returned %d!\n", cdev->dev.bus_id, ret); | ||
463 | goto out; | ||
464 | } | ||
465 | /* Do device recognition, if needed. */ | ||
466 | if (cdev->id.cu_type == 0) { | ||
467 | cdev->private->state = DEV_STATE_NOT_OPER; | ||
468 | ret = ccw_device_recognition(cdev); | ||
469 | if (ret) { | ||
470 | printk(KERN_WARNING"Couldn't start recognition " | ||
471 | "for device %s (ret=%d)\n", | ||
472 | cdev->dev.bus_id, ret); | ||
473 | goto out; | ||
474 | } | ||
475 | wait_event(cdev->private->wait_q, | ||
476 | cdev->private->flags.recog_done); | ||
477 | } | ||
478 | if (cdev->drv && cdev->drv->set_online) | ||
479 | ccw_device_set_online(cdev); | ||
480 | } | 479 | } |
481 | out: | ||
482 | if (cdev->drv) | 480 | if (cdev->drv) |
483 | module_put(cdev->drv->owner); | 481 | module_put(cdev->drv->owner); |
484 | atomic_set(&cdev->private->onoff, 0); | 482 | atomic_set(&cdev->private->onoff, 0); |
@@ -548,17 +546,10 @@ static struct attribute_group ccwdev_attr_group = { | |||
548 | .attrs = ccwdev_attrs, | 546 | .attrs = ccwdev_attrs, |
549 | }; | 547 | }; |
550 | 548 | ||
551 | static int | 549 | struct attribute_group *ccwdev_attr_groups[] = { |
552 | device_add_files (struct device *dev) | 550 | &ccwdev_attr_group, |
553 | { | 551 | NULL, |
554 | return sysfs_create_group(&dev->kobj, &ccwdev_attr_group); | 552 | }; |
555 | } | ||
556 | |||
557 | static void | ||
558 | device_remove_files(struct device *dev) | ||
559 | { | ||
560 | sysfs_remove_group(&dev->kobj, &ccwdev_attr_group); | ||
561 | } | ||
562 | 553 | ||
563 | /* this is a simple abstraction for device_register that sets the | 554 | /* this is a simple abstraction for device_register that sets the |
564 | * correct bus type and adds the bus specific files */ | 555 | * correct bus type and adds the bus specific files */ |
@@ -573,10 +564,6 @@ static int ccw_device_register(struct ccw_device *cdev) | |||
573 | return ret; | 564 | return ret; |
574 | 565 | ||
575 | set_bit(1, &cdev->private->registered); | 566 | set_bit(1, &cdev->private->registered); |
576 | if ((ret = device_add_files(dev))) { | ||
577 | if (test_and_clear_bit(1, &cdev->private->registered)) | ||
578 | device_del(dev); | ||
579 | } | ||
580 | return ret; | 567 | return ret; |
581 | } | 568 | } |
582 | 569 | ||
@@ -648,10 +635,6 @@ ccw_device_add_changed(struct work_struct *work) | |||
648 | return; | 635 | return; |
649 | } | 636 | } |
650 | set_bit(1, &cdev->private->registered); | 637 | set_bit(1, &cdev->private->registered); |
651 | if (device_add_files(&cdev->dev)) { | ||
652 | if (test_and_clear_bit(1, &cdev->private->registered)) | ||
653 | device_unregister(&cdev->dev); | ||
654 | } | ||
655 | } | 638 | } |
656 | 639 | ||
657 | void ccw_device_do_unreg_rereg(struct work_struct *work) | 640 | void ccw_device_do_unreg_rereg(struct work_struct *work) |
@@ -664,9 +647,7 @@ void ccw_device_do_unreg_rereg(struct work_struct *work) | |||
664 | cdev = priv->cdev; | 647 | cdev = priv->cdev; |
665 | sch = to_subchannel(cdev->dev.parent); | 648 | sch = to_subchannel(cdev->dev.parent); |
666 | 649 | ||
667 | device_remove_files(&cdev->dev); | 650 | ccw_device_unregister(cdev); |
668 | if (test_and_clear_bit(1, &cdev->private->registered)) | ||
669 | device_del(&cdev->dev); | ||
670 | PREPARE_WORK(&cdev->private->kick_work, | 651 | PREPARE_WORK(&cdev->private->kick_work, |
671 | ccw_device_add_changed); | 652 | ccw_device_add_changed); |
672 | queue_work(ccw_device_work, &cdev->private->kick_work); | 653 | queue_work(ccw_device_work, &cdev->private->kick_work); |
@@ -705,6 +686,7 @@ static int io_subchannel_initialize_dev(struct subchannel *sch, | |||
705 | cdev->dev.parent = &sch->dev; | 686 | cdev->dev.parent = &sch->dev; |
706 | cdev->dev.release = ccw_device_release; | 687 | cdev->dev.release = ccw_device_release; |
707 | INIT_LIST_HEAD(&cdev->private->kick_work.entry); | 688 | INIT_LIST_HEAD(&cdev->private->kick_work.entry); |
689 | cdev->dev.groups = ccwdev_attr_groups; | ||
708 | /* Do first half of device_register. */ | 690 | /* Do first half of device_register. */ |
709 | device_initialize(&cdev->dev); | 691 | device_initialize(&cdev->dev); |
710 | if (!get_device(&sch->dev)) { | 692 | if (!get_device(&sch->dev)) { |
@@ -736,6 +718,7 @@ static int io_subchannel_recog(struct ccw_device *, struct subchannel *); | |||
736 | static void sch_attach_device(struct subchannel *sch, | 718 | static void sch_attach_device(struct subchannel *sch, |
737 | struct ccw_device *cdev) | 719 | struct ccw_device *cdev) |
738 | { | 720 | { |
721 | css_update_ssd_info(sch); | ||
739 | spin_lock_irq(sch->lock); | 722 | spin_lock_irq(sch->lock); |
740 | sch->dev.driver_data = cdev; | 723 | sch->dev.driver_data = cdev; |
741 | cdev->private->schid = sch->schid; | 724 | cdev->private->schid = sch->schid; |
@@ -871,7 +854,7 @@ io_subchannel_register(struct work_struct *work) | |||
871 | priv = container_of(work, struct ccw_device_private, kick_work); | 854 | priv = container_of(work, struct ccw_device_private, kick_work); |
872 | cdev = priv->cdev; | 855 | cdev = priv->cdev; |
873 | sch = to_subchannel(cdev->dev.parent); | 856 | sch = to_subchannel(cdev->dev.parent); |
874 | 857 | css_update_ssd_info(sch); | |
875 | /* | 858 | /* |
876 | * io_subchannel_register() will also be called after device | 859 | * io_subchannel_register() will also be called after device |
877 | * recognition has been done for a boxed device (which will already | 860 | * recognition has been done for a boxed device (which will already |
@@ -1133,15 +1116,8 @@ io_subchannel_remove (struct subchannel *sch) | |||
1133 | sch->dev.driver_data = NULL; | 1116 | sch->dev.driver_data = NULL; |
1134 | cdev->private->state = DEV_STATE_NOT_OPER; | 1117 | cdev->private->state = DEV_STATE_NOT_OPER; |
1135 | spin_unlock_irqrestore(cdev->ccwlock, flags); | 1118 | spin_unlock_irqrestore(cdev->ccwlock, flags); |
1136 | /* | 1119 | ccw_device_unregister(cdev); |
1137 | * Put unregistration on workqueue to avoid livelocks on the css bus | 1120 | put_device(&cdev->dev); |
1138 | * semaphore. | ||
1139 | */ | ||
1140 | if (get_device(&cdev->dev)) { | ||
1141 | PREPARE_WORK(&cdev->private->kick_work, | ||
1142 | ccw_device_unregister); | ||
1143 | queue_work(ccw_device_work, &cdev->private->kick_work); | ||
1144 | } | ||
1145 | return 0; | 1121 | return 0; |
1146 | } | 1122 | } |
1147 | 1123 | ||
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c index 089a3ddd6265..898ec3b2bebb 100644 --- a/drivers/s390/cio/device_fsm.c +++ b/drivers/s390/cio/device_fsm.c | |||
@@ -15,6 +15,7 @@ | |||
15 | 15 | ||
16 | #include <asm/ccwdev.h> | 16 | #include <asm/ccwdev.h> |
17 | #include <asm/cio.h> | 17 | #include <asm/cio.h> |
18 | #include <asm/chpid.h> | ||
18 | 19 | ||
19 | #include "cio.h" | 20 | #include "cio.h" |
20 | #include "cio_debug.h" | 21 | #include "cio_debug.h" |
@@ -22,6 +23,7 @@ | |||
22 | #include "device.h" | 23 | #include "device.h" |
23 | #include "chsc.h" | 24 | #include "chsc.h" |
24 | #include "ioasm.h" | 25 | #include "ioasm.h" |
26 | #include "chp.h" | ||
25 | 27 | ||
26 | int | 28 | int |
27 | device_is_online(struct subchannel *sch) | 29 | device_is_online(struct subchannel *sch) |
@@ -210,14 +212,18 @@ static void | |||
210 | __recover_lost_chpids(struct subchannel *sch, int old_lpm) | 212 | __recover_lost_chpids(struct subchannel *sch, int old_lpm) |
211 | { | 213 | { |
212 | int mask, i; | 214 | int mask, i; |
215 | struct chp_id chpid; | ||
213 | 216 | ||
217 | chp_id_init(&chpid); | ||
214 | for (i = 0; i<8; i++) { | 218 | for (i = 0; i<8; i++) { |
215 | mask = 0x80 >> i; | 219 | mask = 0x80 >> i; |
216 | if (!(sch->lpm & mask)) | 220 | if (!(sch->lpm & mask)) |
217 | continue; | 221 | continue; |
218 | if (old_lpm & mask) | 222 | if (old_lpm & mask) |
219 | continue; | 223 | continue; |
220 | chpid_is_actually_online(sch->schib.pmcw.chpid[i]); | 224 | chpid.id = sch->schib.pmcw.chpid[i]; |
225 | if (!chp_is_registered(chpid)) | ||
226 | css_schedule_eval_all(); | ||
221 | } | 227 | } |
222 | } | 228 | } |
223 | 229 | ||
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c index 7c7775aae38a..16f59fcb66b1 100644 --- a/drivers/s390/cio/device_ops.c +++ b/drivers/s390/cio/device_ops.c | |||
@@ -16,12 +16,14 @@ | |||
16 | 16 | ||
17 | #include <asm/ccwdev.h> | 17 | #include <asm/ccwdev.h> |
18 | #include <asm/idals.h> | 18 | #include <asm/idals.h> |
19 | #include <asm/chpid.h> | ||
19 | 20 | ||
20 | #include "cio.h" | 21 | #include "cio.h" |
21 | #include "cio_debug.h" | 22 | #include "cio_debug.h" |
22 | #include "css.h" | 23 | #include "css.h" |
23 | #include "chsc.h" | 24 | #include "chsc.h" |
24 | #include "device.h" | 25 | #include "device.h" |
26 | #include "chp.h" | ||
25 | 27 | ||
26 | int ccw_device_set_options_mask(struct ccw_device *cdev, unsigned long flags) | 28 | int ccw_device_set_options_mask(struct ccw_device *cdev, unsigned long flags) |
27 | { | 29 | { |
@@ -606,9 +608,12 @@ void * | |||
606 | ccw_device_get_chp_desc(struct ccw_device *cdev, int chp_no) | 608 | ccw_device_get_chp_desc(struct ccw_device *cdev, int chp_no) |
607 | { | 609 | { |
608 | struct subchannel *sch; | 610 | struct subchannel *sch; |
611 | struct chp_id chpid; | ||
609 | 612 | ||
610 | sch = to_subchannel(cdev->dev.parent); | 613 | sch = to_subchannel(cdev->dev.parent); |
611 | return chsc_get_chp_desc(sch, chp_no); | 614 | chp_id_init(&chpid); |
615 | chpid.id = sch->schib.pmcw.chpid[chp_no]; | ||
616 | return chp_get_chp_desc(chpid); | ||
612 | } | 617 | } |
613 | 618 | ||
614 | // FIXME: these have to go: | 619 | // FIXME: these have to go: |
diff --git a/drivers/s390/cio/idset.c b/drivers/s390/cio/idset.c new file mode 100644 index 000000000000..16ea828e99f7 --- /dev/null +++ b/drivers/s390/cio/idset.c | |||
@@ -0,0 +1,112 @@ | |||
1 | /* | ||
2 | * drivers/s390/cio/idset.c | ||
3 | * | ||
4 | * Copyright IBM Corp. 2007 | ||
5 | * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com> | ||
6 | */ | ||
7 | |||
8 | #include <linux/slab.h> | ||
9 | #include <asm/bitops.h> | ||
10 | #include "idset.h" | ||
11 | #include "css.h" | ||
12 | |||
13 | struct idset { | ||
14 | int num_ssid; | ||
15 | int num_id; | ||
16 | unsigned long bitmap[0]; | ||
17 | }; | ||
18 | |||
19 | static inline unsigned long bitmap_size(int num_ssid, int num_id) | ||
20 | { | ||
21 | return __BITOPS_WORDS(num_ssid * num_id) * sizeof(unsigned long); | ||
22 | } | ||
23 | |||
24 | static struct idset *idset_new(int num_ssid, int num_id) | ||
25 | { | ||
26 | struct idset *set; | ||
27 | |||
28 | set = kzalloc(sizeof(struct idset) + bitmap_size(num_ssid, num_id), | ||
29 | GFP_KERNEL); | ||
30 | if (set) { | ||
31 | set->num_ssid = num_ssid; | ||
32 | set->num_id = num_id; | ||
33 | } | ||
34 | return set; | ||
35 | } | ||
36 | |||
37 | void idset_free(struct idset *set) | ||
38 | { | ||
39 | kfree(set); | ||
40 | } | ||
41 | |||
42 | void idset_clear(struct idset *set) | ||
43 | { | ||
44 | memset(set->bitmap, 0, bitmap_size(set->num_ssid, set->num_id)); | ||
45 | } | ||
46 | |||
47 | void idset_fill(struct idset *set) | ||
48 | { | ||
49 | memset(set->bitmap, 0xff, bitmap_size(set->num_ssid, set->num_id)); | ||
50 | } | ||
51 | |||
52 | static inline void idset_add(struct idset *set, int ssid, int id) | ||
53 | { | ||
54 | set_bit(ssid * set->num_id + id, set->bitmap); | ||
55 | } | ||
56 | |||
57 | static inline void idset_del(struct idset *set, int ssid, int id) | ||
58 | { | ||
59 | clear_bit(ssid * set->num_id + id, set->bitmap); | ||
60 | } | ||
61 | |||
62 | static inline int idset_contains(struct idset *set, int ssid, int id) | ||
63 | { | ||
64 | return test_bit(ssid * set->num_id + id, set->bitmap); | ||
65 | } | ||
66 | |||
67 | static inline int idset_get_first(struct idset *set, int *ssid, int *id) | ||
68 | { | ||
69 | int bitnum; | ||
70 | |||
71 | bitnum = find_first_bit(set->bitmap, set->num_ssid * set->num_id); | ||
72 | if (bitnum >= set->num_ssid * set->num_id) | ||
73 | return 0; | ||
74 | *ssid = bitnum / set->num_id; | ||
75 | *id = bitnum % set->num_id; | ||
76 | return 1; | ||
77 | } | ||
78 | |||
79 | struct idset *idset_sch_new(void) | ||
80 | { | ||
81 | return idset_new(__MAX_SSID + 1, __MAX_SUBCHANNEL + 1); | ||
82 | } | ||
83 | |||
84 | void idset_sch_add(struct idset *set, struct subchannel_id schid) | ||
85 | { | ||
86 | idset_add(set, schid.ssid, schid.sch_no); | ||
87 | } | ||
88 | |||
89 | void idset_sch_del(struct idset *set, struct subchannel_id schid) | ||
90 | { | ||
91 | idset_del(set, schid.ssid, schid.sch_no); | ||
92 | } | ||
93 | |||
94 | int idset_sch_contains(struct idset *set, struct subchannel_id schid) | ||
95 | { | ||
96 | return idset_contains(set, schid.ssid, schid.sch_no); | ||
97 | } | ||
98 | |||
99 | int idset_sch_get_first(struct idset *set, struct subchannel_id *schid) | ||
100 | { | ||
101 | int ssid = 0; | ||
102 | int id = 0; | ||
103 | int rc; | ||
104 | |||
105 | rc = idset_get_first(set, &ssid, &id); | ||
106 | if (rc) { | ||
107 | init_subchannel_id(schid); | ||
108 | schid->ssid = ssid; | ||
109 | schid->sch_no = id; | ||
110 | } | ||
111 | return rc; | ||
112 | } | ||
diff --git a/drivers/s390/cio/idset.h b/drivers/s390/cio/idset.h new file mode 100644 index 000000000000..144466ab8c15 --- /dev/null +++ b/drivers/s390/cio/idset.h | |||
@@ -0,0 +1,25 @@ | |||
1 | /* | ||
2 | * drivers/s390/cio/idset.h | ||
3 | * | ||
4 | * Copyright IBM Corp. 2007 | ||
5 | * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com> | ||
6 | */ | ||
7 | |||
8 | #ifndef S390_IDSET_H | ||
9 | #define S390_IDSET_H S390_IDSET_H | ||
10 | |||
11 | #include "schid.h" | ||
12 | |||
13 | struct idset; | ||
14 | |||
15 | void idset_free(struct idset *set); | ||
16 | void idset_clear(struct idset *set); | ||
17 | void idset_fill(struct idset *set); | ||
18 | |||
19 | struct idset *idset_sch_new(void); | ||
20 | void idset_sch_add(struct idset *set, struct subchannel_id id); | ||
21 | void idset_sch_del(struct idset *set, struct subchannel_id id); | ||
22 | int idset_sch_contains(struct idset *set, struct subchannel_id id); | ||
23 | int idset_sch_get_first(struct idset *set, struct subchannel_id *id); | ||
24 | |||
25 | #endif /* S390_IDSET_H */ | ||
diff --git a/drivers/s390/cio/ioasm.h b/drivers/s390/cio/ioasm.h index ad6d82940069..7153dd959082 100644 --- a/drivers/s390/cio/ioasm.h +++ b/drivers/s390/cio/ioasm.h | |||
@@ -1,6 +1,7 @@ | |||
1 | #ifndef S390_CIO_IOASM_H | 1 | #ifndef S390_CIO_IOASM_H |
2 | #define S390_CIO_IOASM_H | 2 | #define S390_CIO_IOASM_H |
3 | 3 | ||
4 | #include <asm/chpid.h> | ||
4 | #include "schid.h" | 5 | #include "schid.h" |
5 | 6 | ||
6 | /* | 7 | /* |
@@ -189,9 +190,9 @@ static inline int chsc(void *chsc_area) | |||
189 | return cc; | 190 | return cc; |
190 | } | 191 | } |
191 | 192 | ||
192 | static inline int rchp(int chpid) | 193 | static inline int rchp(struct chp_id chpid) |
193 | { | 194 | { |
194 | register unsigned int reg1 asm ("1") = chpid; | 195 | register struct chp_id reg1 asm ("1") = chpid; |
195 | int ccode; | 196 | int ccode; |
196 | 197 | ||
197 | asm volatile( | 198 | asm volatile( |
diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c index 7809a79feec7..6dd64d0c8d45 100644 --- a/drivers/s390/net/claw.c +++ b/drivers/s390/net/claw.c | |||
@@ -3525,8 +3525,8 @@ unpack_next: | |||
3525 | memcpy(skb_put(skb,len_of_data), | 3525 | memcpy(skb_put(skb,len_of_data), |
3526 | privptr->p_mtc_envelope, | 3526 | privptr->p_mtc_envelope, |
3527 | len_of_data); | 3527 | len_of_data); |
3528 | skb->mac.raw=skb->data; | ||
3529 | skb->dev=dev; | 3528 | skb->dev=dev; |
3529 | skb_reset_mac_header(skb); | ||
3530 | skb->protocol=htons(ETH_P_IP); | 3530 | skb->protocol=htons(ETH_P_IP); |
3531 | skb->ip_summed=CHECKSUM_UNNECESSARY; | 3531 | skb->ip_summed=CHECKSUM_UNNECESSARY; |
3532 | privptr->stats.rx_packets++; | 3532 | privptr->stats.rx_packets++; |
diff --git a/drivers/s390/net/ctcmain.c b/drivers/s390/net/ctcmain.c index 0d6d5fcc128b..b20fd0681733 100644 --- a/drivers/s390/net/ctcmain.c +++ b/drivers/s390/net/ctcmain.c | |||
@@ -455,7 +455,7 @@ ctc_unpack_skb(struct channel *ch, struct sk_buff *pskb) | |||
455 | return; | 455 | return; |
456 | } | 456 | } |
457 | skb_put(pskb, header->length); | 457 | skb_put(pskb, header->length); |
458 | pskb->mac.raw = pskb->data; | 458 | skb_reset_mac_header(pskb); |
459 | len -= header->length; | 459 | len -= header->length; |
460 | skb = dev_alloc_skb(pskb->len); | 460 | skb = dev_alloc_skb(pskb->len); |
461 | if (!skb) { | 461 | if (!skb) { |
@@ -472,8 +472,9 @@ ctc_unpack_skb(struct channel *ch, struct sk_buff *pskb) | |||
472 | privptr->stats.rx_dropped++; | 472 | privptr->stats.rx_dropped++; |
473 | return; | 473 | return; |
474 | } | 474 | } |
475 | memcpy(skb_put(skb, pskb->len), pskb->data, pskb->len); | 475 | skb_copy_from_linear_data(pskb, skb_put(skb, pskb->len), |
476 | skb->mac.raw = skb->data; | 476 | pskb->len); |
477 | skb_reset_mac_header(skb); | ||
477 | skb->dev = pskb->dev; | 478 | skb->dev = pskb->dev; |
478 | skb->protocol = pskb->protocol; | 479 | skb->protocol = pskb->protocol; |
479 | pskb->ip_summed = CHECKSUM_UNNECESSARY; | 480 | pskb->ip_summed = CHECKSUM_UNNECESSARY; |
@@ -706,7 +707,8 @@ ch_action_txdone(fsm_instance * fi, int event, void *arg) | |||
706 | spin_unlock(&ch->collect_lock); | 707 | spin_unlock(&ch->collect_lock); |
707 | return; | 708 | return; |
708 | } | 709 | } |
709 | ch->trans_skb->tail = ch->trans_skb->data = ch->trans_skb_data; | 710 | ch->trans_skb->data = ch->trans_skb_data; |
711 | skb_reset_tail_pointer(ch->trans_skb); | ||
710 | ch->trans_skb->len = 0; | 712 | ch->trans_skb->len = 0; |
711 | if (ch->prof.maxmulti < (ch->collect_len + 2)) | 713 | if (ch->prof.maxmulti < (ch->collect_len + 2)) |
712 | ch->prof.maxmulti = ch->collect_len + 2; | 714 | ch->prof.maxmulti = ch->collect_len + 2; |
@@ -715,8 +717,9 @@ ch_action_txdone(fsm_instance * fi, int event, void *arg) | |||
715 | *((__u16 *) skb_put(ch->trans_skb, 2)) = ch->collect_len + 2; | 717 | *((__u16 *) skb_put(ch->trans_skb, 2)) = ch->collect_len + 2; |
716 | i = 0; | 718 | i = 0; |
717 | while ((skb = skb_dequeue(&ch->collect_queue))) { | 719 | while ((skb = skb_dequeue(&ch->collect_queue))) { |
718 | memcpy(skb_put(ch->trans_skb, skb->len), skb->data, | 720 | skb_copy_from_linear_data(skb, skb_put(ch->trans_skb, |
719 | skb->len); | 721 | skb->len), |
722 | skb->len); | ||
720 | privptr->stats.tx_packets++; | 723 | privptr->stats.tx_packets++; |
721 | privptr->stats.tx_bytes += skb->len - LL_HEADER_LENGTH; | 724 | privptr->stats.tx_bytes += skb->len - LL_HEADER_LENGTH; |
722 | atomic_dec(&skb->users); | 725 | atomic_dec(&skb->users); |
@@ -831,7 +834,8 @@ ch_action_rx(fsm_instance * fi, int event, void *arg) | |||
831 | ctc_unpack_skb(ch, skb); | 834 | ctc_unpack_skb(ch, skb); |
832 | } | 835 | } |
833 | again: | 836 | again: |
834 | skb->data = skb->tail = ch->trans_skb_data; | 837 | skb->data = ch->trans_skb_data; |
838 | skb_reset_tail_pointer(skb); | ||
835 | skb->len = 0; | 839 | skb->len = 0; |
836 | if (ctc_checkalloc_buffer(ch, 1)) | 840 | if (ctc_checkalloc_buffer(ch, 1)) |
837 | return; | 841 | return; |
@@ -1638,21 +1642,19 @@ add_channel(struct ccw_device *cdev, enum channel_types type) | |||
1638 | struct channel *ch; | 1642 | struct channel *ch; |
1639 | 1643 | ||
1640 | DBF_TEXT(trace, 2, __FUNCTION__); | 1644 | DBF_TEXT(trace, 2, __FUNCTION__); |
1641 | if ((ch = | 1645 | ch = kzalloc(sizeof(struct channel), GFP_KERNEL); |
1642 | (struct channel *) kmalloc(sizeof (struct channel), | 1646 | if (!ch) { |
1643 | GFP_KERNEL)) == NULL) { | ||
1644 | ctc_pr_warn("ctc: Out of memory in add_channel\n"); | 1647 | ctc_pr_warn("ctc: Out of memory in add_channel\n"); |
1645 | return -1; | 1648 | return -1; |
1646 | } | 1649 | } |
1647 | memset(ch, 0, sizeof (struct channel)); | 1650 | /* assure all flags and counters are reset */ |
1648 | if ((ch->ccw = kmalloc(8*sizeof(struct ccw1), | 1651 | ch->ccw = kzalloc(8 * sizeof(struct ccw1), GFP_KERNEL | GFP_DMA); |
1649 | GFP_KERNEL | GFP_DMA)) == NULL) { | 1652 | if (!ch->ccw) { |
1650 | kfree(ch); | 1653 | kfree(ch); |
1651 | ctc_pr_warn("ctc: Out of memory in add_channel\n"); | 1654 | ctc_pr_warn("ctc: Out of memory in add_channel\n"); |
1652 | return -1; | 1655 | return -1; |
1653 | } | 1656 | } |
1654 | 1657 | ||
1655 | memset(ch->ccw, 0, 8*sizeof(struct ccw1)); // assure all flags and counters are reset | ||
1656 | 1658 | ||
1657 | /** | 1659 | /** |
1658 | * "static" ccws are used in the following way: | 1660 | * "static" ccws are used in the following way: |
@@ -1692,15 +1694,14 @@ add_channel(struct ccw_device *cdev, enum channel_types type) | |||
1692 | return -1; | 1694 | return -1; |
1693 | } | 1695 | } |
1694 | fsm_newstate(ch->fsm, CH_STATE_IDLE); | 1696 | fsm_newstate(ch->fsm, CH_STATE_IDLE); |
1695 | if ((ch->irb = kmalloc(sizeof (struct irb), | 1697 | ch->irb = kzalloc(sizeof(struct irb), GFP_KERNEL); |
1696 | GFP_KERNEL)) == NULL) { | 1698 | if (!ch->irb) { |
1697 | ctc_pr_warn("ctc: Out of memory in add_channel\n"); | 1699 | ctc_pr_warn("ctc: Out of memory in add_channel\n"); |
1698 | kfree_fsm(ch->fsm); | 1700 | kfree_fsm(ch->fsm); |
1699 | kfree(ch->ccw); | 1701 | kfree(ch->ccw); |
1700 | kfree(ch); | 1702 | kfree(ch); |
1701 | return -1; | 1703 | return -1; |
1702 | } | 1704 | } |
1703 | memset(ch->irb, 0, sizeof (struct irb)); | ||
1704 | while (*c && less_than((*c)->id, ch->id)) | 1705 | while (*c && less_than((*c)->id, ch->id)) |
1705 | c = &(*c)->next; | 1706 | c = &(*c)->next; |
1706 | if (*c && (!strncmp((*c)->id, ch->id, CTC_ID_SIZE))) { | 1707 | if (*c && (!strncmp((*c)->id, ch->id, CTC_ID_SIZE))) { |
@@ -2226,7 +2227,8 @@ transmit_skb(struct channel *ch, struct sk_buff *skb) | |||
2226 | * IDAL support in CTC is broken, so we have to | 2227 | * IDAL support in CTC is broken, so we have to |
2227 | * care about skb's above 2G ourselves. | 2228 | * care about skb's above 2G ourselves. |
2228 | */ | 2229 | */ |
2229 | hi = ((unsigned long) skb->tail + LL_HEADER_LENGTH) >> 31; | 2230 | hi = ((unsigned long)skb_tail_pointer(skb) + |
2231 | LL_HEADER_LENGTH) >> 31; | ||
2230 | if (hi) { | 2232 | if (hi) { |
2231 | nskb = alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA); | 2233 | nskb = alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA); |
2232 | if (!nskb) { | 2234 | if (!nskb) { |
@@ -2262,11 +2264,12 @@ transmit_skb(struct channel *ch, struct sk_buff *skb) | |||
2262 | return -EBUSY; | 2264 | return -EBUSY; |
2263 | } | 2265 | } |
2264 | 2266 | ||
2265 | ch->trans_skb->tail = ch->trans_skb->data; | 2267 | skb_reset_tail_pointer(ch->trans_skb); |
2266 | ch->trans_skb->len = 0; | 2268 | ch->trans_skb->len = 0; |
2267 | ch->ccw[1].count = skb->len; | 2269 | ch->ccw[1].count = skb->len; |
2268 | memcpy(skb_put(ch->trans_skb, skb->len), skb->data, | 2270 | skb_copy_from_linear_data(skb, skb_put(ch->trans_skb, |
2269 | skb->len); | 2271 | skb->len), |
2272 | skb->len); | ||
2270 | atomic_dec(&skb->users); | 2273 | atomic_dec(&skb->users); |
2271 | dev_kfree_skb_irq(skb); | 2274 | dev_kfree_skb_irq(skb); |
2272 | ccw_idx = 0; | 2275 | ccw_idx = 0; |
@@ -2745,14 +2748,13 @@ ctc_probe_device(struct ccwgroup_device *cgdev) | |||
2745 | if (!get_device(&cgdev->dev)) | 2748 | if (!get_device(&cgdev->dev)) |
2746 | return -ENODEV; | 2749 | return -ENODEV; |
2747 | 2750 | ||
2748 | priv = kmalloc(sizeof (struct ctc_priv), GFP_KERNEL); | 2751 | priv = kzalloc(sizeof(struct ctc_priv), GFP_KERNEL); |
2749 | if (!priv) { | 2752 | if (!priv) { |
2750 | ctc_pr_err("%s: Out of memory\n", __func__); | 2753 | ctc_pr_err("%s: Out of memory\n", __func__); |
2751 | put_device(&cgdev->dev); | 2754 | put_device(&cgdev->dev); |
2752 | return -ENOMEM; | 2755 | return -ENOMEM; |
2753 | } | 2756 | } |
2754 | 2757 | ||
2755 | memset(priv, 0, sizeof (struct ctc_priv)); | ||
2756 | rc = ctc_add_files(&cgdev->dev); | 2758 | rc = ctc_add_files(&cgdev->dev); |
2757 | if (rc) { | 2759 | if (rc) { |
2758 | kfree(priv); | 2760 | kfree(priv); |
@@ -2793,10 +2795,9 @@ ctc_init_netdevice(struct net_device * dev, int alloc_device, | |||
2793 | DBF_TEXT(setup, 3, __FUNCTION__); | 2795 | DBF_TEXT(setup, 3, __FUNCTION__); |
2794 | 2796 | ||
2795 | if (alloc_device) { | 2797 | if (alloc_device) { |
2796 | dev = kmalloc(sizeof (struct net_device), GFP_KERNEL); | 2798 | dev = kzalloc(sizeof(struct net_device), GFP_KERNEL); |
2797 | if (!dev) | 2799 | if (!dev) |
2798 | return NULL; | 2800 | return NULL; |
2799 | memset(dev, 0, sizeof (struct net_device)); | ||
2800 | } | 2801 | } |
2801 | 2802 | ||
2802 | dev->priv = privptr; | 2803 | dev->priv = privptr; |
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c index ecca1046714e..08a994fdd1a4 100644 --- a/drivers/s390/net/lcs.c +++ b/drivers/s390/net/lcs.c | |||
@@ -1576,7 +1576,7 @@ __lcs_start_xmit(struct lcs_card *card, struct sk_buff *skb, | |||
1576 | header->offset = card->tx_buffer->count; | 1576 | header->offset = card->tx_buffer->count; |
1577 | header->type = card->lan_type; | 1577 | header->type = card->lan_type; |
1578 | header->slot = card->portno; | 1578 | header->slot = card->portno; |
1579 | memcpy(header + 1, skb->data, skb->len); | 1579 | skb_copy_from_linear_data(skb, header + 1, skb->len); |
1580 | spin_unlock(&card->lock); | 1580 | spin_unlock(&card->lock); |
1581 | card->stats.tx_bytes += skb->len; | 1581 | card->stats.tx_bytes += skb->len; |
1582 | card->stats.tx_packets++; | 1582 | card->stats.tx_packets++; |
@@ -1784,7 +1784,6 @@ lcs_get_skb(struct lcs_card *card, char *skb_data, unsigned int skb_len) | |||
1784 | card->stats.rx_dropped++; | 1784 | card->stats.rx_dropped++; |
1785 | return; | 1785 | return; |
1786 | } | 1786 | } |
1787 | skb->dev = card->dev; | ||
1788 | memcpy(skb_put(skb, skb_len), skb_data, skb_len); | 1787 | memcpy(skb_put(skb, skb_len), skb_data, skb_len); |
1789 | skb->protocol = card->lan_type_trans(skb, card->dev); | 1788 | skb->protocol = card->lan_type_trans(skb, card->dev); |
1790 | card->stats.rx_bytes += skb_len; | 1789 | card->stats.rx_bytes += skb_len; |
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c index 594320ca1b7c..e10e85e85c84 100644 --- a/drivers/s390/net/netiucv.c +++ b/drivers/s390/net/netiucv.c | |||
@@ -635,7 +635,7 @@ static void netiucv_unpack_skb(struct iucv_connection *conn, | |||
635 | return; | 635 | return; |
636 | } | 636 | } |
637 | skb_put(pskb, header->next); | 637 | skb_put(pskb, header->next); |
638 | pskb->mac.raw = pskb->data; | 638 | skb_reset_mac_header(pskb); |
639 | skb = dev_alloc_skb(pskb->len); | 639 | skb = dev_alloc_skb(pskb->len); |
640 | if (!skb) { | 640 | if (!skb) { |
641 | PRINT_WARN("%s Out of memory in netiucv_unpack_skb\n", | 641 | PRINT_WARN("%s Out of memory in netiucv_unpack_skb\n", |
@@ -645,8 +645,9 @@ static void netiucv_unpack_skb(struct iucv_connection *conn, | |||
645 | privptr->stats.rx_dropped++; | 645 | privptr->stats.rx_dropped++; |
646 | return; | 646 | return; |
647 | } | 647 | } |
648 | memcpy(skb_put(skb, pskb->len), pskb->data, pskb->len); | 648 | skb_copy_from_linear_data(pskb, skb_put(skb, pskb->len), |
649 | skb->mac.raw = skb->data; | 649 | pskb->len); |
650 | skb_reset_mac_header(skb); | ||
650 | skb->dev = pskb->dev; | 651 | skb->dev = pskb->dev; |
651 | skb->protocol = pskb->protocol; | 652 | skb->protocol = pskb->protocol; |
652 | pskb->ip_summed = CHECKSUM_UNNECESSARY; | 653 | pskb->ip_summed = CHECKSUM_UNNECESSARY; |
@@ -689,7 +690,8 @@ static void conn_action_rx(fsm_instance *fi, int event, void *arg) | |||
689 | msg->length, conn->max_buffsize); | 690 | msg->length, conn->max_buffsize); |
690 | return; | 691 | return; |
691 | } | 692 | } |
692 | conn->rx_buff->data = conn->rx_buff->tail = conn->rx_buff->head; | 693 | conn->rx_buff->data = conn->rx_buff->head; |
694 | skb_reset_tail_pointer(conn->rx_buff); | ||
693 | conn->rx_buff->len = 0; | 695 | conn->rx_buff->len = 0; |
694 | rc = iucv_message_receive(conn->path, msg, 0, conn->rx_buff->data, | 696 | rc = iucv_message_receive(conn->path, msg, 0, conn->rx_buff->data, |
695 | msg->length, NULL); | 697 | msg->length, NULL); |
@@ -735,14 +737,17 @@ static void conn_action_txdone(fsm_instance *fi, int event, void *arg) | |||
735 | } | 737 | } |
736 | } | 738 | } |
737 | } | 739 | } |
738 | conn->tx_buff->data = conn->tx_buff->tail = conn->tx_buff->head; | 740 | conn->tx_buff->data = conn->tx_buff->head; |
741 | skb_reset_tail_pointer(conn->tx_buff); | ||
739 | conn->tx_buff->len = 0; | 742 | conn->tx_buff->len = 0; |
740 | spin_lock_irqsave(&conn->collect_lock, saveflags); | 743 | spin_lock_irqsave(&conn->collect_lock, saveflags); |
741 | while ((skb = skb_dequeue(&conn->collect_queue))) { | 744 | while ((skb = skb_dequeue(&conn->collect_queue))) { |
742 | header.next = conn->tx_buff->len + skb->len + NETIUCV_HDRLEN; | 745 | header.next = conn->tx_buff->len + skb->len + NETIUCV_HDRLEN; |
743 | memcpy(skb_put(conn->tx_buff, NETIUCV_HDRLEN), &header, | 746 | memcpy(skb_put(conn->tx_buff, NETIUCV_HDRLEN), &header, |
744 | NETIUCV_HDRLEN); | 747 | NETIUCV_HDRLEN); |
745 | memcpy(skb_put(conn->tx_buff, skb->len), skb->data, skb->len); | 748 | skb_copy_from_linear_data(skb, |
749 | skb_put(conn->tx_buff, skb->len), | ||
750 | skb->len); | ||
746 | txbytes += skb->len; | 751 | txbytes += skb->len; |
747 | txpackets++; | 752 | txpackets++; |
748 | stat_maxcq++; | 753 | stat_maxcq++; |
@@ -1164,8 +1169,8 @@ static int netiucv_transmit_skb(struct iucv_connection *conn, | |||
1164 | * Copy the skb to a new allocated skb in lowmem only if the | 1169 | * Copy the skb to a new allocated skb in lowmem only if the |
1165 | * data is located above 2G in memory or tailroom is < 2. | 1170 | * data is located above 2G in memory or tailroom is < 2. |
1166 | */ | 1171 | */ |
1167 | unsigned long hi = | 1172 | unsigned long hi = ((unsigned long)(skb_tail_pointer(skb) + |
1168 | ((unsigned long)(skb->tail + NETIUCV_HDRLEN)) >> 31; | 1173 | NETIUCV_HDRLEN)) >> 31; |
1169 | int copied = 0; | 1174 | int copied = 0; |
1170 | if (hi || (skb_tailroom(skb) < 2)) { | 1175 | if (hi || (skb_tailroom(skb) < 2)) { |
1171 | nskb = alloc_skb(skb->len + NETIUCV_HDRLEN + | 1176 | nskb = alloc_skb(skb->len + NETIUCV_HDRLEN + |
diff --git a/drivers/s390/net/qeth_eddp.c b/drivers/s390/net/qeth_eddp.c index 7c735e1fe063..dd7034fbfff8 100644 --- a/drivers/s390/net/qeth_eddp.c +++ b/drivers/s390/net/qeth_eddp.c | |||
@@ -267,7 +267,8 @@ qeth_eddp_copy_data_tcp(char *dst, struct qeth_eddp_data *eddp, int len, | |||
267 | 267 | ||
268 | QETH_DBF_TEXT(trace, 5, "eddpcdtc"); | 268 | QETH_DBF_TEXT(trace, 5, "eddpcdtc"); |
269 | if (skb_shinfo(eddp->skb)->nr_frags == 0) { | 269 | if (skb_shinfo(eddp->skb)->nr_frags == 0) { |
270 | memcpy(dst, eddp->skb->data + eddp->skb_offset, len); | 270 | skb_copy_from_linear_data_offset(eddp->skb, eddp->skb_offset, |
271 | dst, len); | ||
271 | *hcsum = csum_partial(eddp->skb->data + eddp->skb_offset, len, | 272 | *hcsum = csum_partial(eddp->skb->data + eddp->skb_offset, len, |
272 | *hcsum); | 273 | *hcsum); |
273 | eddp->skb_offset += len; | 274 | eddp->skb_offset += len; |
@@ -416,7 +417,7 @@ __qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx, | |||
416 | eddp->skb_offset += VLAN_HLEN; | 417 | eddp->skb_offset += VLAN_HLEN; |
417 | #endif /* CONFIG_QETH_VLAN */ | 418 | #endif /* CONFIG_QETH_VLAN */ |
418 | } | 419 | } |
419 | tcph = eddp->skb->h.th; | 420 | tcph = tcp_hdr(eddp->skb); |
420 | while (eddp->skb_offset < eddp->skb->len) { | 421 | while (eddp->skb_offset < eddp->skb->len) { |
421 | data_len = min((int)skb_shinfo(eddp->skb)->gso_size, | 422 | data_len = min((int)skb_shinfo(eddp->skb)->gso_size, |
422 | (int)(eddp->skb->len - eddp->skb_offset)); | 423 | (int)(eddp->skb->len - eddp->skb_offset)); |
@@ -473,20 +474,24 @@ qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx, | |||
473 | QETH_DBF_TEXT(trace, 5, "eddpficx"); | 474 | QETH_DBF_TEXT(trace, 5, "eddpficx"); |
474 | /* create our segmentation headers and copy original headers */ | 475 | /* create our segmentation headers and copy original headers */ |
475 | if (skb->protocol == htons(ETH_P_IP)) | 476 | if (skb->protocol == htons(ETH_P_IP)) |
476 | eddp = qeth_eddp_create_eddp_data(qhdr, (u8 *)skb->nh.iph, | 477 | eddp = qeth_eddp_create_eddp_data(qhdr, |
477 | skb->nh.iph->ihl*4, | 478 | skb_network_header(skb), |
478 | (u8 *)skb->h.th, skb->h.th->doff*4); | 479 | ip_hdrlen(skb), |
480 | skb_transport_header(skb), | ||
481 | tcp_hdrlen(skb)); | ||
479 | else | 482 | else |
480 | eddp = qeth_eddp_create_eddp_data(qhdr, (u8 *)skb->nh.ipv6h, | 483 | eddp = qeth_eddp_create_eddp_data(qhdr, |
481 | sizeof(struct ipv6hdr), | 484 | skb_network_header(skb), |
482 | (u8 *)skb->h.th, skb->h.th->doff*4); | 485 | sizeof(struct ipv6hdr), |
486 | skb_transport_header(skb), | ||
487 | tcp_hdrlen(skb)); | ||
483 | 488 | ||
484 | if (eddp == NULL) { | 489 | if (eddp == NULL) { |
485 | QETH_DBF_TEXT(trace, 2, "eddpfcnm"); | 490 | QETH_DBF_TEXT(trace, 2, "eddpfcnm"); |
486 | return -ENOMEM; | 491 | return -ENOMEM; |
487 | } | 492 | } |
488 | if (qhdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2) { | 493 | if (qhdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2) { |
489 | skb->mac.raw = (skb->data) + sizeof(struct qeth_hdr); | 494 | skb_set_mac_header(skb, sizeof(struct qeth_hdr)); |
490 | memcpy(&eddp->mac, eth_hdr(skb), ETH_HLEN); | 495 | memcpy(&eddp->mac, eth_hdr(skb), ETH_HLEN); |
491 | #ifdef CONFIG_QETH_VLAN | 496 | #ifdef CONFIG_QETH_VLAN |
492 | if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q)) { | 497 | if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q)) { |
@@ -590,12 +595,13 @@ qeth_eddp_create_context_tcp(struct qeth_card *card, struct sk_buff *skb, | |||
590 | QETH_DBF_TEXT(trace, 5, "creddpct"); | 595 | QETH_DBF_TEXT(trace, 5, "creddpct"); |
591 | if (skb->protocol == htons(ETH_P_IP)) | 596 | if (skb->protocol == htons(ETH_P_IP)) |
592 | ctx = qeth_eddp_create_context_generic(card, skb, | 597 | ctx = qeth_eddp_create_context_generic(card, skb, |
593 | sizeof(struct qeth_hdr) + skb->nh.iph->ihl*4 + | 598 | (sizeof(struct qeth_hdr) + |
594 | skb->h.th->doff*4); | 599 | ip_hdrlen(skb) + |
600 | tcp_hdrlen(skb))); | ||
595 | else if (skb->protocol == htons(ETH_P_IPV6)) | 601 | else if (skb->protocol == htons(ETH_P_IPV6)) |
596 | ctx = qeth_eddp_create_context_generic(card, skb, | 602 | ctx = qeth_eddp_create_context_generic(card, skb, |
597 | sizeof(struct qeth_hdr) + sizeof(struct ipv6hdr) + | 603 | sizeof(struct qeth_hdr) + sizeof(struct ipv6hdr) + |
598 | skb->h.th->doff*4); | 604 | tcp_hdrlen(skb)); |
599 | else | 605 | else |
600 | QETH_DBF_TEXT(trace, 2, "cetcpinv"); | 606 | QETH_DBF_TEXT(trace, 2, "cetcpinv"); |
601 | 607 | ||
diff --git a/drivers/s390/net/qeth_main.c b/drivers/s390/net/qeth_main.c index d8a86f5af379..ad7792dc1a04 100644 --- a/drivers/s390/net/qeth_main.c +++ b/drivers/s390/net/qeth_main.c | |||
@@ -2278,7 +2278,7 @@ qeth_type_trans(struct sk_buff *skb, struct net_device *dev) | |||
2278 | (card->info.link_type == QETH_LINK_TYPE_LANE_TR)) | 2278 | (card->info.link_type == QETH_LINK_TYPE_LANE_TR)) |
2279 | return tr_type_trans(skb,dev); | 2279 | return tr_type_trans(skb,dev); |
2280 | #endif /* CONFIG_TR */ | 2280 | #endif /* CONFIG_TR */ |
2281 | skb->mac.raw = skb->data; | 2281 | skb_reset_mac_header(skb); |
2282 | skb_pull(skb, ETH_HLEN ); | 2282 | skb_pull(skb, ETH_HLEN ); |
2283 | eth = eth_hdr(skb); | 2283 | eth = eth_hdr(skb); |
2284 | 2284 | ||
@@ -2306,9 +2306,9 @@ qeth_rebuild_skb_fake_ll_tr(struct qeth_card *card, struct sk_buff *skb, | |||
2306 | struct iphdr *ip_hdr; | 2306 | struct iphdr *ip_hdr; |
2307 | 2307 | ||
2308 | QETH_DBF_TEXT(trace,5,"skbfktr"); | 2308 | QETH_DBF_TEXT(trace,5,"skbfktr"); |
2309 | skb->mac.raw = skb->data - QETH_FAKE_LL_LEN_TR; | 2309 | skb_set_mac_header(skb, -QETH_FAKE_LL_LEN_TR); |
2310 | /* this is a fake ethernet header */ | 2310 | /* this is a fake ethernet header */ |
2311 | fake_hdr = (struct trh_hdr *) skb->mac.raw; | 2311 | fake_hdr = tr_hdr(skb); |
2312 | 2312 | ||
2313 | /* the destination MAC address */ | 2313 | /* the destination MAC address */ |
2314 | switch (skb->pkt_type){ | 2314 | switch (skb->pkt_type){ |
@@ -2359,9 +2359,9 @@ qeth_rebuild_skb_fake_ll_eth(struct qeth_card *card, struct sk_buff *skb, | |||
2359 | struct iphdr *ip_hdr; | 2359 | struct iphdr *ip_hdr; |
2360 | 2360 | ||
2361 | QETH_DBF_TEXT(trace,5,"skbfketh"); | 2361 | QETH_DBF_TEXT(trace,5,"skbfketh"); |
2362 | skb->mac.raw = skb->data - QETH_FAKE_LL_LEN_ETH; | 2362 | skb_set_mac_header(skb, -QETH_FAKE_LL_LEN_ETH); |
2363 | /* this is a fake ethernet header */ | 2363 | /* this is a fake ethernet header */ |
2364 | fake_hdr = (struct ethhdr *) skb->mac.raw; | 2364 | fake_hdr = eth_hdr(skb); |
2365 | 2365 | ||
2366 | /* the destination MAC address */ | 2366 | /* the destination MAC address */ |
2367 | switch (skb->pkt_type){ | 2367 | switch (skb->pkt_type){ |
@@ -2461,7 +2461,7 @@ qeth_rebuild_skb(struct qeth_card *card, struct sk_buff *skb, | |||
2461 | if (card->options.fake_ll) | 2461 | if (card->options.fake_ll) |
2462 | qeth_rebuild_skb_fake_ll(card, skb, hdr); | 2462 | qeth_rebuild_skb_fake_ll(card, skb, hdr); |
2463 | else | 2463 | else |
2464 | skb->mac.raw = skb->data; | 2464 | skb_reset_mac_header(skb); |
2465 | skb->ip_summed = card->options.checksum_type; | 2465 | skb->ip_summed = card->options.checksum_type; |
2466 | if (card->options.checksum_type == HW_CHECKSUMMING){ | 2466 | if (card->options.checksum_type == HW_CHECKSUMMING){ |
2467 | if ( (hdr->hdr.l3.ext_flags & | 2467 | if ( (hdr->hdr.l3.ext_flags & |
@@ -2501,7 +2501,8 @@ qeth_process_inbound_buffer(struct qeth_card *card, | |||
2501 | vlan_tag = qeth_rebuild_skb(card, skb, hdr); | 2501 | vlan_tag = qeth_rebuild_skb(card, skb, hdr); |
2502 | else { /*in case of OSN*/ | 2502 | else { /*in case of OSN*/ |
2503 | skb_push(skb, sizeof(struct qeth_hdr)); | 2503 | skb_push(skb, sizeof(struct qeth_hdr)); |
2504 | memcpy(skb->data, hdr, sizeof(struct qeth_hdr)); | 2504 | skb_copy_to_linear_data(skb, hdr, |
2505 | sizeof(struct qeth_hdr)); | ||
2505 | } | 2506 | } |
2506 | /* is device UP ? */ | 2507 | /* is device UP ? */ |
2507 | if (!(card->dev->flags & IFF_UP)){ | 2508 | if (!(card->dev->flags & IFF_UP)){ |
@@ -3778,9 +3779,11 @@ qeth_get_cast_type(struct qeth_card *card, struct sk_buff *skb) | |||
3778 | } | 3779 | } |
3779 | /* try something else */ | 3780 | /* try something else */ |
3780 | if (skb->protocol == ETH_P_IPV6) | 3781 | if (skb->protocol == ETH_P_IPV6) |
3781 | return (skb->nh.raw[24] == 0xff) ? RTN_MULTICAST : 0; | 3782 | return (skb_network_header(skb)[24] == 0xff) ? |
3783 | RTN_MULTICAST : 0; | ||
3782 | else if (skb->protocol == ETH_P_IP) | 3784 | else if (skb->protocol == ETH_P_IP) |
3783 | return ((skb->nh.raw[16] & 0xf0) == 0xe0) ? RTN_MULTICAST : 0; | 3785 | return ((skb_network_header(skb)[16] & 0xf0) == 0xe0) ? |
3786 | RTN_MULTICAST : 0; | ||
3784 | /* ... */ | 3787 | /* ... */ |
3785 | if (!memcmp(skb->data, skb->dev->broadcast, 6)) | 3788 | if (!memcmp(skb->data, skb->dev->broadcast, 6)) |
3786 | return RTN_BROADCAST; | 3789 | return RTN_BROADCAST; |
@@ -3818,18 +3821,20 @@ qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb, | |||
3818 | return card->info.is_multicast_different & | 3821 | return card->info.is_multicast_different & |
3819 | (card->qdio.no_out_queues - 1); | 3822 | (card->qdio.no_out_queues - 1); |
3820 | if (card->qdio.do_prio_queueing && (ipv == 4)) { | 3823 | if (card->qdio.do_prio_queueing && (ipv == 4)) { |
3824 | const u8 tos = ip_hdr(skb)->tos; | ||
3825 | |||
3821 | if (card->qdio.do_prio_queueing==QETH_PRIO_Q_ING_TOS){ | 3826 | if (card->qdio.do_prio_queueing==QETH_PRIO_Q_ING_TOS){ |
3822 | if (skb->nh.iph->tos & IP_TOS_NOTIMPORTANT) | 3827 | if (tos & IP_TOS_NOTIMPORTANT) |
3823 | return 3; | 3828 | return 3; |
3824 | if (skb->nh.iph->tos & IP_TOS_HIGHRELIABILITY) | 3829 | if (tos & IP_TOS_HIGHRELIABILITY) |
3825 | return 2; | 3830 | return 2; |
3826 | if (skb->nh.iph->tos & IP_TOS_HIGHTHROUGHPUT) | 3831 | if (tos & IP_TOS_HIGHTHROUGHPUT) |
3827 | return 1; | 3832 | return 1; |
3828 | if (skb->nh.iph->tos & IP_TOS_LOWDELAY) | 3833 | if (tos & IP_TOS_LOWDELAY) |
3829 | return 0; | 3834 | return 0; |
3830 | } | 3835 | } |
3831 | if (card->qdio.do_prio_queueing==QETH_PRIO_Q_ING_PREC) | 3836 | if (card->qdio.do_prio_queueing==QETH_PRIO_Q_ING_PREC) |
3832 | return 3 - (skb->nh.iph->tos >> 6); | 3837 | return 3 - (tos >> 6); |
3833 | } else if (card->qdio.do_prio_queueing && (ipv == 6)) { | 3838 | } else if (card->qdio.do_prio_queueing && (ipv == 6)) { |
3834 | /* TODO: IPv6!!! */ | 3839 | /* TODO: IPv6!!! */ |
3835 | } | 3840 | } |
@@ -3866,9 +3871,9 @@ __qeth_prepare_skb(struct qeth_card *card, struct sk_buff *skb, int ipv) | |||
3866 | * memcpys instead of one memmove to save cycles. | 3871 | * memcpys instead of one memmove to save cycles. |
3867 | */ | 3872 | */ |
3868 | skb_push(skb, VLAN_HLEN); | 3873 | skb_push(skb, VLAN_HLEN); |
3869 | memcpy(skb->data, skb->data + 4, 4); | 3874 | skb_copy_to_linear_data(skb, skb->data + 4, 4); |
3870 | memcpy(skb->data + 4, skb->data + 8, 4); | 3875 | skb_copy_to_linear_data_offset(skb, 4, skb->data + 8, 4); |
3871 | memcpy(skb->data + 8, skb->data + 12, 4); | 3876 | skb_copy_to_linear_data_offset(skb, 8, skb->data + 12, 4); |
3872 | tag = (u16 *)(skb->data + 12); | 3877 | tag = (u16 *)(skb->data + 12); |
3873 | /* | 3878 | /* |
3874 | * first two bytes = ETH_P_8021Q (0x8100) | 3879 | * first two bytes = ETH_P_8021Q (0x8100) |
@@ -4039,7 +4044,8 @@ qeth_fill_header(struct qeth_card *card, struct qeth_hdr *hdr, | |||
4039 | *((u32 *) skb->dst->neighbour->primary_key); | 4044 | *((u32 *) skb->dst->neighbour->primary_key); |
4040 | } else { | 4045 | } else { |
4041 | /* fill in destination address used in ip header */ | 4046 | /* fill in destination address used in ip header */ |
4042 | *((u32 *) (&hdr->hdr.l3.dest_addr[12])) = skb->nh.iph->daddr; | 4047 | *((u32 *)(&hdr->hdr.l3.dest_addr[12])) = |
4048 | ip_hdr(skb)->daddr; | ||
4043 | } | 4049 | } |
4044 | } else if (ipv == 6) { /* IPv6 or passthru */ | 4050 | } else if (ipv == 6) { /* IPv6 or passthru */ |
4045 | hdr->hdr.l3.flags = qeth_get_qeth_hdr_flags6(cast_type); | 4051 | hdr->hdr.l3.flags = qeth_get_qeth_hdr_flags6(cast_type); |
@@ -4048,7 +4054,8 @@ qeth_fill_header(struct qeth_card *card, struct qeth_hdr *hdr, | |||
4048 | skb->dst->neighbour->primary_key, 16); | 4054 | skb->dst->neighbour->primary_key, 16); |
4049 | } else { | 4055 | } else { |
4050 | /* fill in destination address used in ip header */ | 4056 | /* fill in destination address used in ip header */ |
4051 | memcpy(hdr->hdr.l3.dest_addr, &skb->nh.ipv6h->daddr, 16); | 4057 | memcpy(hdr->hdr.l3.dest_addr, |
4058 | &ipv6_hdr(skb)->daddr, 16); | ||
4052 | } | 4059 | } |
4053 | } else { /* passthrough */ | 4060 | } else { /* passthrough */ |
4054 | if((skb->dev->type == ARPHRD_IEEE802_TR) && | 4061 | if((skb->dev->type == ARPHRD_IEEE802_TR) && |
diff --git a/drivers/s390/net/qeth_tso.h b/drivers/s390/net/qeth_tso.h index 14504afb044e..c20e923cf9ad 100644 --- a/drivers/s390/net/qeth_tso.h +++ b/drivers/s390/net/qeth_tso.h | |||
@@ -40,8 +40,8 @@ qeth_tso_fill_header(struct qeth_card *card, struct sk_buff *skb) | |||
40 | QETH_DBF_TEXT(trace, 5, "tsofhdr"); | 40 | QETH_DBF_TEXT(trace, 5, "tsofhdr"); |
41 | 41 | ||
42 | hdr = (struct qeth_hdr_tso *) skb->data; | 42 | hdr = (struct qeth_hdr_tso *) skb->data; |
43 | iph = skb->nh.iph; | 43 | iph = ip_hdr(skb); |
44 | tcph = skb->h.th; | 44 | tcph = tcp_hdr(skb); |
45 | /*fix header to TSO values ...*/ | 45 | /*fix header to TSO values ...*/ |
46 | hdr->hdr.hdr.l3.id = QETH_HEADER_TYPE_TSO; | 46 | hdr->hdr.hdr.l3.id = QETH_HEADER_TYPE_TSO; |
47 | /*set values which are fix for the first approach ...*/ | 47 | /*set values which are fix for the first approach ...*/ |
@@ -63,13 +63,9 @@ qeth_tso_fill_header(struct qeth_card *card, struct sk_buff *skb) | |||
63 | static inline void | 63 | static inline void |
64 | qeth_tso_set_tcpip_header(struct qeth_card *card, struct sk_buff *skb) | 64 | qeth_tso_set_tcpip_header(struct qeth_card *card, struct sk_buff *skb) |
65 | { | 65 | { |
66 | struct iphdr *iph; | 66 | struct iphdr *iph = ip_hdr(skb); |
67 | struct ipv6hdr *ip6h; | 67 | struct ipv6hdr *ip6h = ipv6_hdr(skb); |
68 | struct tcphdr *tcph; | 68 | struct tcphdr *tcph = tcp_hdr(skb); |
69 | |||
70 | iph = skb->nh.iph; | ||
71 | ip6h = skb->nh.ipv6h; | ||
72 | tcph = skb->h.th; | ||
73 | 69 | ||
74 | tcph->check = 0; | 70 | tcph->check = 0; |
75 | if (skb->protocol == ETH_P_IPV6) { | 71 | if (skb->protocol == ETH_P_IPV6) { |
diff --git a/drivers/s390/s390mach.c b/drivers/s390/s390mach.c index 806bb1a921eb..644a06eba828 100644 --- a/drivers/s390/s390mach.c +++ b/drivers/s390/s390mach.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include "cio/cio.h" | 21 | #include "cio/cio.h" |
22 | #include "cio/chsc.h" | 22 | #include "cio/chsc.h" |
23 | #include "cio/css.h" | 23 | #include "cio/css.h" |
24 | #include "cio/chp.h" | ||
24 | #include "s390mach.h" | 25 | #include "s390mach.h" |
25 | 26 | ||
26 | static struct semaphore m_sem; | 27 | static struct semaphore m_sem; |
@@ -44,14 +45,13 @@ static int | |||
44 | s390_collect_crw_info(void *param) | 45 | s390_collect_crw_info(void *param) |
45 | { | 46 | { |
46 | struct crw crw[2]; | 47 | struct crw crw[2]; |
47 | int ccode, ret, slow; | 48 | int ccode; |
48 | struct semaphore *sem; | 49 | struct semaphore *sem; |
49 | unsigned int chain; | 50 | unsigned int chain; |
50 | 51 | ||
51 | sem = (struct semaphore *)param; | 52 | sem = (struct semaphore *)param; |
52 | repeat: | 53 | repeat: |
53 | down_interruptible(sem); | 54 | down_interruptible(sem); |
54 | slow = 0; | ||
55 | chain = 0; | 55 | chain = 0; |
56 | while (1) { | 56 | while (1) { |
57 | if (unlikely(chain > 1)) { | 57 | if (unlikely(chain > 1)) { |
@@ -84,9 +84,8 @@ repeat: | |||
84 | /* Check for overflows. */ | 84 | /* Check for overflows. */ |
85 | if (crw[chain].oflw) { | 85 | if (crw[chain].oflw) { |
86 | pr_debug("%s: crw overflow detected!\n", __FUNCTION__); | 86 | pr_debug("%s: crw overflow detected!\n", __FUNCTION__); |
87 | css_reiterate_subchannels(); | 87 | css_schedule_eval_all(); |
88 | chain = 0; | 88 | chain = 0; |
89 | slow = 1; | ||
90 | continue; | 89 | continue; |
91 | } | 90 | } |
92 | switch (crw[chain].rsc) { | 91 | switch (crw[chain].rsc) { |
@@ -94,10 +93,7 @@ repeat: | |||
94 | if (crw[0].chn && !chain) | 93 | if (crw[0].chn && !chain) |
95 | break; | 94 | break; |
96 | pr_debug("source is subchannel %04X\n", crw[0].rsid); | 95 | pr_debug("source is subchannel %04X\n", crw[0].rsid); |
97 | ret = css_process_crw (crw[0].rsid, | 96 | css_process_crw(crw[0].rsid, chain ? crw[1].rsid : 0); |
98 | chain ? crw[1].rsid : 0); | ||
99 | if (ret == -EAGAIN) | ||
100 | slow = 1; | ||
101 | break; | 97 | break; |
102 | case CRW_RSC_MONITOR: | 98 | case CRW_RSC_MONITOR: |
103 | pr_debug("source is monitoring facility\n"); | 99 | pr_debug("source is monitoring facility\n"); |
@@ -116,28 +112,23 @@ repeat: | |||
116 | } | 112 | } |
117 | switch (crw[0].erc) { | 113 | switch (crw[0].erc) { |
118 | case CRW_ERC_IPARM: /* Path has come. */ | 114 | case CRW_ERC_IPARM: /* Path has come. */ |
119 | ret = chp_process_crw(crw[0].rsid, 1); | 115 | chp_process_crw(crw[0].rsid, 1); |
120 | break; | 116 | break; |
121 | case CRW_ERC_PERRI: /* Path has gone. */ | 117 | case CRW_ERC_PERRI: /* Path has gone. */ |
122 | case CRW_ERC_PERRN: | 118 | case CRW_ERC_PERRN: |
123 | ret = chp_process_crw(crw[0].rsid, 0); | 119 | chp_process_crw(crw[0].rsid, 0); |
124 | break; | 120 | break; |
125 | default: | 121 | default: |
126 | pr_debug("Don't know how to handle erc=%x\n", | 122 | pr_debug("Don't know how to handle erc=%x\n", |
127 | crw[0].erc); | 123 | crw[0].erc); |
128 | ret = 0; | ||
129 | } | 124 | } |
130 | if (ret == -EAGAIN) | ||
131 | slow = 1; | ||
132 | break; | 125 | break; |
133 | case CRW_RSC_CONFIG: | 126 | case CRW_RSC_CONFIG: |
134 | pr_debug("source is configuration-alert facility\n"); | 127 | pr_debug("source is configuration-alert facility\n"); |
135 | break; | 128 | break; |
136 | case CRW_RSC_CSS: | 129 | case CRW_RSC_CSS: |
137 | pr_debug("source is channel subsystem\n"); | 130 | pr_debug("source is channel subsystem\n"); |
138 | ret = chsc_process_crw(); | 131 | chsc_process_crw(); |
139 | if (ret == -EAGAIN) | ||
140 | slow = 1; | ||
141 | break; | 132 | break; |
142 | default: | 133 | default: |
143 | pr_debug("unknown source\n"); | 134 | pr_debug("unknown source\n"); |
@@ -146,8 +137,6 @@ repeat: | |||
146 | /* chain is always 0 or 1 here. */ | 137 | /* chain is always 0 or 1 here. */ |
147 | chain = crw[chain].chn ? chain + 1 : 0; | 138 | chain = crw[chain].chn ? chain + 1 : 0; |
148 | } | 139 | } |
149 | if (slow) | ||
150 | queue_work(slow_path_wq, &slow_path_work); | ||
151 | goto repeat; | 140 | goto repeat; |
152 | return 0; | 141 | return 0; |
153 | } | 142 | } |
diff --git a/drivers/s390/sysinfo.c b/drivers/s390/sysinfo.c index 090743d2f914..19343f9675c3 100644 --- a/drivers/s390/sysinfo.c +++ b/drivers/s390/sysinfo.c | |||
@@ -357,6 +357,24 @@ static __init int create_proc_sysinfo(void) | |||
357 | 357 | ||
358 | __initcall(create_proc_sysinfo); | 358 | __initcall(create_proc_sysinfo); |
359 | 359 | ||
360 | int get_cpu_capability(unsigned int *capability) | ||
361 | { | ||
362 | struct sysinfo_1_2_2 *info; | ||
363 | int rc; | ||
364 | |||
365 | info = (void *) get_zeroed_page(GFP_KERNEL); | ||
366 | if (!info) | ||
367 | return -ENOMEM; | ||
368 | rc = stsi(info, 1, 2, 2); | ||
369 | if (rc == -ENOSYS) | ||
370 | goto out; | ||
371 | rc = 0; | ||
372 | *capability = info->capability; | ||
373 | out: | ||
374 | free_page((unsigned long) info); | ||
375 | return rc; | ||
376 | } | ||
377 | |||
360 | /* | 378 | /* |
361 | * CPU capability might have changed. Therefore recalculate loops_per_jiffy. | 379 | * CPU capability might have changed. Therefore recalculate loops_per_jiffy. |
362 | */ | 380 | */ |
diff --git a/drivers/sbus/char/envctrl.c b/drivers/sbus/char/envctrl.c index 2cea4f5d2084..f2be2ead8742 100644 --- a/drivers/sbus/char/envctrl.c +++ b/drivers/sbus/char/envctrl.c | |||
@@ -726,7 +726,7 @@ static struct miscdevice envctrl_dev = { | |||
726 | * Return: None. | 726 | * Return: None. |
727 | */ | 727 | */ |
728 | static void envctrl_set_mon(struct i2c_child_t *pchild, | 728 | static void envctrl_set_mon(struct i2c_child_t *pchild, |
729 | char *chnl_desc, | 729 | const char *chnl_desc, |
730 | int chnl_no) | 730 | int chnl_no) |
731 | { | 731 | { |
732 | /* Firmware only has temperature type. It does not distinguish | 732 | /* Firmware only has temperature type. It does not distinguish |
@@ -763,8 +763,8 @@ static void envctrl_set_mon(struct i2c_child_t *pchild, | |||
763 | static void envctrl_init_adc(struct i2c_child_t *pchild, struct device_node *dp) | 763 | static void envctrl_init_adc(struct i2c_child_t *pchild, struct device_node *dp) |
764 | { | 764 | { |
765 | int i = 0, len; | 765 | int i = 0, len; |
766 | char *pos; | 766 | const char *pos; |
767 | unsigned int *pval; | 767 | const unsigned int *pval; |
768 | 768 | ||
769 | /* Firmware describe channels into a stream separated by a '\0'. */ | 769 | /* Firmware describe channels into a stream separated by a '\0'. */ |
770 | pos = of_get_property(dp, "channels-description", &len); | 770 | pos = of_get_property(dp, "channels-description", &len); |
@@ -859,7 +859,7 @@ static void envctrl_init_i2c_child(struct linux_ebus_child *edev_child, | |||
859 | { | 859 | { |
860 | int len, i, tbls_size = 0; | 860 | int len, i, tbls_size = 0; |
861 | struct device_node *dp = edev_child->prom_node; | 861 | struct device_node *dp = edev_child->prom_node; |
862 | void *pval; | 862 | const void *pval; |
863 | 863 | ||
864 | /* Get device address. */ | 864 | /* Get device address. */ |
865 | pval = of_get_property(dp, "reg", &len); | 865 | pval = of_get_property(dp, "reg", &len); |
diff --git a/drivers/sbus/char/flash.c b/drivers/sbus/char/flash.c index 6e99507aeb12..262f01e68592 100644 --- a/drivers/sbus/char/flash.c +++ b/drivers/sbus/char/flash.c | |||
@@ -190,7 +190,7 @@ static int __init flash_init(void) | |||
190 | } | 190 | } |
191 | if (!sdev) { | 191 | if (!sdev) { |
192 | #ifdef CONFIG_PCI | 192 | #ifdef CONFIG_PCI |
193 | struct linux_prom_registers *ebus_regs; | 193 | const struct linux_prom_registers *ebus_regs; |
194 | 194 | ||
195 | for_each_ebus(ebus) { | 195 | for_each_ebus(ebus) { |
196 | for_each_ebusdev(edev, ebus) { | 196 | for_each_ebusdev(edev, ebus) { |
diff --git a/drivers/sbus/char/openprom.c b/drivers/sbus/char/openprom.c index eec28c142a59..fbfeb89a6f32 100644 --- a/drivers/sbus/char/openprom.c +++ b/drivers/sbus/char/openprom.c | |||
@@ -44,7 +44,6 @@ | |||
44 | #include <asm/openpromio.h> | 44 | #include <asm/openpromio.h> |
45 | #ifdef CONFIG_PCI | 45 | #ifdef CONFIG_PCI |
46 | #include <linux/pci.h> | 46 | #include <linux/pci.h> |
47 | #include <asm/pbm.h> | ||
48 | #endif | 47 | #endif |
49 | 48 | ||
50 | MODULE_AUTHOR("Thomas K. Dyas (tdyas@noc.rutgers.edu) and Eddie C. Dost (ecd@skynet.be)"); | 49 | MODULE_AUTHOR("Thomas K. Dyas (tdyas@noc.rutgers.edu) and Eddie C. Dost (ecd@skynet.be)"); |
@@ -141,7 +140,7 @@ static int copyout(void __user *info, struct openpromio *opp, int len) | |||
141 | 140 | ||
142 | static int opromgetprop(void __user *argp, struct device_node *dp, struct openpromio *op, int bufsize) | 141 | static int opromgetprop(void __user *argp, struct device_node *dp, struct openpromio *op, int bufsize) |
143 | { | 142 | { |
144 | void *pval; | 143 | const void *pval; |
145 | int len; | 144 | int len; |
146 | 145 | ||
147 | if (!dp || | 146 | if (!dp || |
@@ -248,18 +247,18 @@ static int oprompci2node(void __user *argp, struct device_node *dp, struct openp | |||
248 | if (bufsize >= 2*sizeof(int)) { | 247 | if (bufsize >= 2*sizeof(int)) { |
249 | #ifdef CONFIG_PCI | 248 | #ifdef CONFIG_PCI |
250 | struct pci_dev *pdev; | 249 | struct pci_dev *pdev; |
251 | struct pcidev_cookie *pcp; | 250 | struct device_node *dp; |
252 | pdev = pci_find_slot (((int *) op->oprom_array)[0], | 251 | |
252 | pdev = pci_get_bus_and_slot (((int *) op->oprom_array)[0], | ||
253 | ((int *) op->oprom_array)[1]); | 253 | ((int *) op->oprom_array)[1]); |
254 | 254 | ||
255 | pcp = pdev->sysdata; | 255 | dp = pci_device_to_OF_node(pdev); |
256 | if (pcp != NULL) { | 256 | data->current_node = dp; |
257 | dp = pcp->prom_node; | 257 | *((int *)op->oprom_array) = dp->node; |
258 | data->current_node = dp; | 258 | op->oprom_size = sizeof(int); |
259 | *((int *)op->oprom_array) = dp->node; | 259 | err = copyout(argp, op, bufsize + sizeof(int)); |
260 | op->oprom_size = sizeof(int); | 260 | |
261 | err = copyout(argp, op, bufsize + sizeof(int)); | 261 | pci_dev_put(pdev); |
262 | } | ||
263 | #endif | 262 | #endif |
264 | } | 263 | } |
265 | 264 | ||
@@ -409,7 +408,7 @@ static int opiocget(void __user *argp, DATA *data) | |||
409 | struct opiocdesc op; | 408 | struct opiocdesc op; |
410 | struct device_node *dp; | 409 | struct device_node *dp; |
411 | char *str; | 410 | char *str; |
412 | void *pval; | 411 | const void *pval; |
413 | int err, len; | 412 | int err, len; |
414 | 413 | ||
415 | if (copy_from_user(&op, argp, sizeof(op))) | 414 | if (copy_from_user(&op, argp, sizeof(op))) |
diff --git a/drivers/sbus/char/vfc_dev.c b/drivers/sbus/char/vfc_dev.c index 8bfb67ccdcd4..c3135e2fbd5a 100644 --- a/drivers/sbus/char/vfc_dev.c +++ b/drivers/sbus/char/vfc_dev.c | |||
@@ -259,11 +259,10 @@ static int vfc_debug(struct vfc_dev *dev, int cmd, void __user *argp) | |||
259 | if (copy_from_user(&inout, argp, sizeof(inout))) | 259 | if (copy_from_user(&inout, argp, sizeof(inout))) |
260 | return -EFAULT; | 260 | return -EFAULT; |
261 | 261 | ||
262 | buffer = kmalloc(inout.len, GFP_KERNEL); | 262 | buffer = kzalloc(inout.len, GFP_KERNEL); |
263 | if (buffer == NULL) | 263 | if (buffer == NULL) |
264 | return -ENOMEM; | 264 | return -ENOMEM; |
265 | 265 | ||
266 | memset(buffer,0,inout.len); | ||
267 | vfc_lock_device(dev); | 266 | vfc_lock_device(dev); |
268 | inout.ret= | 267 | inout.ret= |
269 | vfc_i2c_recvbuf(dev,inout.addr & 0xff | 268 | vfc_i2c_recvbuf(dev,inout.addr & 0xff |
diff --git a/drivers/sbus/sbus.c b/drivers/sbus/sbus.c index 6349dd617f85..eee590a51d8a 100644 --- a/drivers/sbus/sbus.c +++ b/drivers/sbus/sbus.c | |||
@@ -35,7 +35,7 @@ struct sbus_bus *sbus_root; | |||
35 | static void __init fill_sbus_device(struct device_node *dp, struct sbus_dev *sdev) | 35 | static void __init fill_sbus_device(struct device_node *dp, struct sbus_dev *sdev) |
36 | { | 36 | { |
37 | unsigned long base; | 37 | unsigned long base; |
38 | void *pval; | 38 | const void *pval; |
39 | int len, err; | 39 | int len, err; |
40 | 40 | ||
41 | sdev->prom_node = dp->node; | 41 | sdev->prom_node = dp->node; |
@@ -86,7 +86,7 @@ static void __init fill_sbus_device(struct device_node *dp, struct sbus_dev *sde | |||
86 | 86 | ||
87 | static void __init sbus_bus_ranges_init(struct device_node *dp, struct sbus_bus *sbus) | 87 | static void __init sbus_bus_ranges_init(struct device_node *dp, struct sbus_bus *sbus) |
88 | { | 88 | { |
89 | void *pval; | 89 | const void *pval; |
90 | int len; | 90 | int len; |
91 | 91 | ||
92 | pval = of_get_property(dp, "ranges", &len); | 92 | pval = of_get_property(dp, "ranges", &len); |
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index 4cd280e86966..fcc4cb6c7f46 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig | |||
@@ -1763,9 +1763,15 @@ config SUN3X_ESP | |||
1763 | The ESP was an on-board SCSI controller used on Sun 3/80 | 1763 | The ESP was an on-board SCSI controller used on Sun 3/80 |
1764 | machines. Say Y here to compile in support for it. | 1764 | machines. Say Y here to compile in support for it. |
1765 | 1765 | ||
1766 | config SCSI_ESP_CORE | ||
1767 | tristate "ESP Scsi Driver Core" | ||
1768 | depends on SCSI | ||
1769 | select SCSI_SPI_ATTRS | ||
1770 | |||
1766 | config SCSI_SUNESP | 1771 | config SCSI_SUNESP |
1767 | tristate "Sparc ESP Scsi Driver" | 1772 | tristate "Sparc ESP Scsi Driver" |
1768 | depends on SBUS && SCSI | 1773 | depends on SBUS && SCSI |
1774 | select SCSI_ESP_CORE | ||
1769 | help | 1775 | help |
1770 | This is the driver for the Sun ESP SCSI host adapter. The ESP | 1776 | This is the driver for the Sun ESP SCSI host adapter. The ESP |
1771 | chipset is present in most SPARC SBUS-based computers. | 1777 | chipset is present in most SPARC SBUS-based computers. |
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile index 79ecf4ebe6eb..70cff4c599d7 100644 --- a/drivers/scsi/Makefile +++ b/drivers/scsi/Makefile | |||
@@ -106,7 +106,8 @@ obj-$(CONFIG_MEGARAID_LEGACY) += megaraid.o | |||
106 | obj-$(CONFIG_MEGARAID_NEWGEN) += megaraid/ | 106 | obj-$(CONFIG_MEGARAID_NEWGEN) += megaraid/ |
107 | obj-$(CONFIG_MEGARAID_SAS) += megaraid/ | 107 | obj-$(CONFIG_MEGARAID_SAS) += megaraid/ |
108 | obj-$(CONFIG_SCSI_ACARD) += atp870u.o | 108 | obj-$(CONFIG_SCSI_ACARD) += atp870u.o |
109 | obj-$(CONFIG_SCSI_SUNESP) += esp.o | 109 | obj-$(CONFIG_SCSI_ESP_CORE) += esp_scsi.o |
110 | obj-$(CONFIG_SCSI_SUNESP) += sun_esp.o | ||
110 | obj-$(CONFIG_SCSI_GDTH) += gdth.o | 111 | obj-$(CONFIG_SCSI_GDTH) += gdth.o |
111 | obj-$(CONFIG_SCSI_INITIO) += initio.o | 112 | obj-$(CONFIG_SCSI_INITIO) += initio.o |
112 | obj-$(CONFIG_SCSI_INIA100) += a100u2w.o | 113 | obj-$(CONFIG_SCSI_INIA100) += a100u2w.o |
diff --git a/drivers/scsi/esp.c b/drivers/scsi/esp.c deleted file mode 100644 index 2c2fe80bc42a..000000000000 --- a/drivers/scsi/esp.c +++ /dev/null | |||
@@ -1,4394 +0,0 @@ | |||
1 | /* esp.c: ESP Sun SCSI driver. | ||
2 | * | ||
3 | * Copyright (C) 1995, 1998, 2006 David S. Miller (davem@davemloft.net) | ||
4 | */ | ||
5 | |||
6 | /* TODO: | ||
7 | * | ||
8 | * 1) Maybe disable parity checking in config register one for SCSI1 | ||
9 | * targets. (Gilmore says parity error on the SBus can lock up | ||
10 | * old sun4c's) | ||
11 | * 2) Add support for DMA2 pipelining. | ||
12 | * 3) Add tagged queueing. | ||
13 | */ | ||
14 | |||
15 | #include <linux/kernel.h> | ||
16 | #include <linux/delay.h> | ||
17 | #include <linux/types.h> | ||
18 | #include <linux/string.h> | ||
19 | #include <linux/slab.h> | ||
20 | #include <linux/blkdev.h> | ||
21 | #include <linux/proc_fs.h> | ||
22 | #include <linux/stat.h> | ||
23 | #include <linux/init.h> | ||
24 | #include <linux/spinlock.h> | ||
25 | #include <linux/interrupt.h> | ||
26 | #include <linux/module.h> | ||
27 | |||
28 | #include "esp.h" | ||
29 | |||
30 | #include <asm/sbus.h> | ||
31 | #include <asm/dma.h> | ||
32 | #include <asm/system.h> | ||
33 | #include <asm/ptrace.h> | ||
34 | #include <asm/pgtable.h> | ||
35 | #include <asm/oplib.h> | ||
36 | #include <asm/io.h> | ||
37 | #include <asm/irq.h> | ||
38 | #ifndef __sparc_v9__ | ||
39 | #include <asm/machines.h> | ||
40 | #include <asm/idprom.h> | ||
41 | #endif | ||
42 | |||
43 | #include <scsi/scsi.h> | ||
44 | #include <scsi/scsi_cmnd.h> | ||
45 | #include <scsi/scsi_device.h> | ||
46 | #include <scsi/scsi_eh.h> | ||
47 | #include <scsi/scsi_host.h> | ||
48 | #include <scsi/scsi_tcq.h> | ||
49 | |||
50 | #define DRV_VERSION "1.101" | ||
51 | |||
52 | #define DEBUG_ESP | ||
53 | /* #define DEBUG_ESP_HME */ | ||
54 | /* #define DEBUG_ESP_DATA */ | ||
55 | /* #define DEBUG_ESP_QUEUE */ | ||
56 | /* #define DEBUG_ESP_DISCONNECT */ | ||
57 | /* #define DEBUG_ESP_STATUS */ | ||
58 | /* #define DEBUG_ESP_PHASES */ | ||
59 | /* #define DEBUG_ESP_WORKBUS */ | ||
60 | /* #define DEBUG_STATE_MACHINE */ | ||
61 | /* #define DEBUG_ESP_CMDS */ | ||
62 | /* #define DEBUG_ESP_IRQS */ | ||
63 | /* #define DEBUG_SDTR */ | ||
64 | /* #define DEBUG_ESP_SG */ | ||
65 | |||
66 | /* Use the following to sprinkle debugging messages in a way which | ||
67 | * suits you if combinations of the above become too verbose when | ||
68 | * trying to track down a specific problem. | ||
69 | */ | ||
70 | /* #define DEBUG_ESP_MISC */ | ||
71 | |||
72 | #if defined(DEBUG_ESP) | ||
73 | #define ESPLOG(foo) printk foo | ||
74 | #else | ||
75 | #define ESPLOG(foo) | ||
76 | #endif /* (DEBUG_ESP) */ | ||
77 | |||
78 | #if defined(DEBUG_ESP_HME) | ||
79 | #define ESPHME(foo) printk foo | ||
80 | #else | ||
81 | #define ESPHME(foo) | ||
82 | #endif | ||
83 | |||
84 | #if defined(DEBUG_ESP_DATA) | ||
85 | #define ESPDATA(foo) printk foo | ||
86 | #else | ||
87 | #define ESPDATA(foo) | ||
88 | #endif | ||
89 | |||
90 | #if defined(DEBUG_ESP_QUEUE) | ||
91 | #define ESPQUEUE(foo) printk foo | ||
92 | #else | ||
93 | #define ESPQUEUE(foo) | ||
94 | #endif | ||
95 | |||
96 | #if defined(DEBUG_ESP_DISCONNECT) | ||
97 | #define ESPDISC(foo) printk foo | ||
98 | #else | ||
99 | #define ESPDISC(foo) | ||
100 | #endif | ||
101 | |||
102 | #if defined(DEBUG_ESP_STATUS) | ||
103 | #define ESPSTAT(foo) printk foo | ||
104 | #else | ||
105 | #define ESPSTAT(foo) | ||
106 | #endif | ||
107 | |||
108 | #if defined(DEBUG_ESP_PHASES) | ||
109 | #define ESPPHASE(foo) printk foo | ||
110 | #else | ||
111 | #define ESPPHASE(foo) | ||
112 | #endif | ||
113 | |||
114 | #if defined(DEBUG_ESP_WORKBUS) | ||
115 | #define ESPBUS(foo) printk foo | ||
116 | #else | ||
117 | #define ESPBUS(foo) | ||
118 | #endif | ||
119 | |||
120 | #if defined(DEBUG_ESP_IRQS) | ||
121 | #define ESPIRQ(foo) printk foo | ||
122 | #else | ||
123 | #define ESPIRQ(foo) | ||
124 | #endif | ||
125 | |||
126 | #if defined(DEBUG_SDTR) | ||
127 | #define ESPSDTR(foo) printk foo | ||
128 | #else | ||
129 | #define ESPSDTR(foo) | ||
130 | #endif | ||
131 | |||
132 | #if defined(DEBUG_ESP_MISC) | ||
133 | #define ESPMISC(foo) printk foo | ||
134 | #else | ||
135 | #define ESPMISC(foo) | ||
136 | #endif | ||
137 | |||
138 | /* Command phase enumeration. */ | ||
139 | enum { | ||
140 | not_issued = 0x00, /* Still in the issue_SC queue. */ | ||
141 | |||
142 | /* Various forms of selecting a target. */ | ||
143 | #define in_slct_mask 0x10 | ||
144 | in_slct_norm = 0x10, /* ESP is arbitrating, normal selection */ | ||
145 | in_slct_stop = 0x11, /* ESP will select, then stop with IRQ */ | ||
146 | in_slct_msg = 0x12, /* select, then send a message */ | ||
147 | in_slct_tag = 0x13, /* select and send tagged queue msg */ | ||
148 | in_slct_sneg = 0x14, /* select and acquire sync capabilities */ | ||
149 | |||
150 | /* Any post selection activity. */ | ||
151 | #define in_phases_mask 0x20 | ||
152 | in_datain = 0x20, /* Data is transferring from the bus */ | ||
153 | in_dataout = 0x21, /* Data is transferring to the bus */ | ||
154 | in_data_done = 0x22, /* Last DMA data operation done (maybe) */ | ||
155 | in_msgin = 0x23, /* Eating message from target */ | ||
156 | in_msgincont = 0x24, /* Eating more msg bytes from target */ | ||
157 | in_msgindone = 0x25, /* Decide what to do with what we got */ | ||
158 | in_msgout = 0x26, /* Sending message to target */ | ||
159 | in_msgoutdone = 0x27, /* Done sending msg out */ | ||
160 | in_cmdbegin = 0x28, /* Sending cmd after abnormal selection */ | ||
161 | in_cmdend = 0x29, /* Done sending slow cmd */ | ||
162 | in_status = 0x2a, /* Was in status phase, finishing cmd */ | ||
163 | in_freeing = 0x2b, /* freeing the bus for cmd cmplt or disc */ | ||
164 | in_the_dark = 0x2c, /* Don't know what bus phase we are in */ | ||
165 | |||
166 | /* Special states, ie. not normal bus transitions... */ | ||
167 | #define in_spec_mask 0x80 | ||
168 | in_abortone = 0x80, /* Aborting one command currently */ | ||
169 | in_abortall = 0x81, /* Blowing away all commands we have */ | ||
170 | in_resetdev = 0x82, /* SCSI target reset in progress */ | ||
171 | in_resetbus = 0x83, /* SCSI bus reset in progress */ | ||
172 | in_tgterror = 0x84, /* Target did something stupid */ | ||
173 | }; | ||
174 | |||
175 | enum { | ||
176 | /* Zero has special meaning, see skipahead[12]. */ | ||
177 | /*0*/ do_never, | ||
178 | |||
179 | /*1*/ do_phase_determine, | ||
180 | /*2*/ do_reset_bus, | ||
181 | /*3*/ do_reset_complete, | ||
182 | /*4*/ do_work_bus, | ||
183 | /*5*/ do_intr_end | ||
184 | }; | ||
185 | |||
186 | /* Forward declarations. */ | ||
187 | static irqreturn_t esp_intr(int irq, void *dev_id); | ||
188 | |||
189 | /* Debugging routines */ | ||
190 | struct esp_cmdstrings { | ||
191 | u8 cmdchar; | ||
192 | char *text; | ||
193 | } esp_cmd_strings[] = { | ||
194 | /* Miscellaneous */ | ||
195 | { ESP_CMD_NULL, "ESP_NOP", }, | ||
196 | { ESP_CMD_FLUSH, "FIFO_FLUSH", }, | ||
197 | { ESP_CMD_RC, "RSTESP", }, | ||
198 | { ESP_CMD_RS, "RSTSCSI", }, | ||
199 | /* Disconnected State Group */ | ||
200 | { ESP_CMD_RSEL, "RESLCTSEQ", }, | ||
201 | { ESP_CMD_SEL, "SLCTNATN", }, | ||
202 | { ESP_CMD_SELA, "SLCTATN", }, | ||
203 | { ESP_CMD_SELAS, "SLCTATNSTOP", }, | ||
204 | { ESP_CMD_ESEL, "ENSLCTRESEL", }, | ||
205 | { ESP_CMD_DSEL, "DISSELRESEL", }, | ||
206 | { ESP_CMD_SA3, "SLCTATN3", }, | ||
207 | { ESP_CMD_RSEL3, "RESLCTSEQ", }, | ||
208 | /* Target State Group */ | ||
209 | { ESP_CMD_SMSG, "SNDMSG", }, | ||
210 | { ESP_CMD_SSTAT, "SNDSTATUS", }, | ||
211 | { ESP_CMD_SDATA, "SNDDATA", }, | ||
212 | { ESP_CMD_DSEQ, "DISCSEQ", }, | ||
213 | { ESP_CMD_TSEQ, "TERMSEQ", }, | ||
214 | { ESP_CMD_TCCSEQ, "TRGTCMDCOMPSEQ", }, | ||
215 | { ESP_CMD_DCNCT, "DISC", }, | ||
216 | { ESP_CMD_RMSG, "RCVMSG", }, | ||
217 | { ESP_CMD_RCMD, "RCVCMD", }, | ||
218 | { ESP_CMD_RDATA, "RCVDATA", }, | ||
219 | { ESP_CMD_RCSEQ, "RCVCMDSEQ", }, | ||
220 | /* Initiator State Group */ | ||
221 | { ESP_CMD_TI, "TRANSINFO", }, | ||
222 | { ESP_CMD_ICCSEQ, "INICMDSEQCOMP", }, | ||
223 | { ESP_CMD_MOK, "MSGACCEPTED", }, | ||
224 | { ESP_CMD_TPAD, "TPAD", }, | ||
225 | { ESP_CMD_SATN, "SATN", }, | ||
226 | { ESP_CMD_RATN, "RATN", }, | ||
227 | }; | ||
228 | #define NUM_ESP_COMMANDS ((sizeof(esp_cmd_strings)) / (sizeof(struct esp_cmdstrings))) | ||
229 | |||
230 | /* Print textual representation of an ESP command */ | ||
231 | static inline void esp_print_cmd(u8 espcmd) | ||
232 | { | ||
233 | u8 dma_bit = espcmd & ESP_CMD_DMA; | ||
234 | int i; | ||
235 | |||
236 | espcmd &= ~dma_bit; | ||
237 | for (i = 0; i < NUM_ESP_COMMANDS; i++) | ||
238 | if (esp_cmd_strings[i].cmdchar == espcmd) | ||
239 | break; | ||
240 | if (i == NUM_ESP_COMMANDS) | ||
241 | printk("ESP_Unknown"); | ||
242 | else | ||
243 | printk("%s%s", esp_cmd_strings[i].text, | ||
244 | ((dma_bit) ? "+DMA" : "")); | ||
245 | } | ||
246 | |||
247 | /* Print the status register's value */ | ||
248 | static inline void esp_print_statreg(u8 statreg) | ||
249 | { | ||
250 | u8 phase; | ||
251 | |||
252 | printk("STATUS<"); | ||
253 | phase = statreg & ESP_STAT_PMASK; | ||
254 | printk("%s,", (phase == ESP_DOP ? "DATA-OUT" : | ||
255 | (phase == ESP_DIP ? "DATA-IN" : | ||
256 | (phase == ESP_CMDP ? "COMMAND" : | ||
257 | (phase == ESP_STATP ? "STATUS" : | ||
258 | (phase == ESP_MOP ? "MSG-OUT" : | ||
259 | (phase == ESP_MIP ? "MSG_IN" : | ||
260 | "unknown"))))))); | ||
261 | if (statreg & ESP_STAT_TDONE) | ||
262 | printk("TRANS_DONE,"); | ||
263 | if (statreg & ESP_STAT_TCNT) | ||
264 | printk("TCOUNT_ZERO,"); | ||
265 | if (statreg & ESP_STAT_PERR) | ||
266 | printk("P_ERROR,"); | ||
267 | if (statreg & ESP_STAT_SPAM) | ||
268 | printk("SPAM,"); | ||
269 | if (statreg & ESP_STAT_INTR) | ||
270 | printk("IRQ,"); | ||
271 | printk(">"); | ||
272 | } | ||
273 | |||
274 | /* Print the interrupt register's value */ | ||
275 | static inline void esp_print_ireg(u8 intreg) | ||
276 | { | ||
277 | printk("INTREG< "); | ||
278 | if (intreg & ESP_INTR_S) | ||
279 | printk("SLCT_NATN "); | ||
280 | if (intreg & ESP_INTR_SATN) | ||
281 | printk("SLCT_ATN "); | ||
282 | if (intreg & ESP_INTR_RSEL) | ||
283 | printk("RSLCT "); | ||
284 | if (intreg & ESP_INTR_FDONE) | ||
285 | printk("FDONE "); | ||
286 | if (intreg & ESP_INTR_BSERV) | ||
287 | printk("BSERV "); | ||
288 | if (intreg & ESP_INTR_DC) | ||
289 | printk("DISCNCT "); | ||
290 | if (intreg & ESP_INTR_IC) | ||
291 | printk("ILL_CMD "); | ||
292 | if (intreg & ESP_INTR_SR) | ||
293 | printk("SCSI_BUS_RESET "); | ||
294 | printk(">"); | ||
295 | } | ||
296 | |||
297 | /* Print the sequence step registers contents */ | ||
298 | static inline void esp_print_seqreg(u8 stepreg) | ||
299 | { | ||
300 | stepreg &= ESP_STEP_VBITS; | ||
301 | printk("STEP<%s>", | ||
302 | (stepreg == ESP_STEP_ASEL ? "SLCT_ARB_CMPLT" : | ||
303 | (stepreg == ESP_STEP_SID ? "1BYTE_MSG_SENT" : | ||
304 | (stepreg == ESP_STEP_NCMD ? "NOT_IN_CMD_PHASE" : | ||
305 | (stepreg == ESP_STEP_PPC ? "CMD_BYTES_LOST" : | ||
306 | (stepreg == ESP_STEP_FINI4 ? "CMD_SENT_OK" : | ||
307 | "UNKNOWN")))))); | ||
308 | } | ||
309 | |||
310 | static char *phase_string(int phase) | ||
311 | { | ||
312 | switch (phase) { | ||
313 | case not_issued: | ||
314 | return "UNISSUED"; | ||
315 | case in_slct_norm: | ||
316 | return "SLCTNORM"; | ||
317 | case in_slct_stop: | ||
318 | return "SLCTSTOP"; | ||
319 | case in_slct_msg: | ||
320 | return "SLCTMSG"; | ||
321 | case in_slct_tag: | ||
322 | return "SLCTTAG"; | ||
323 | case in_slct_sneg: | ||
324 | return "SLCTSNEG"; | ||
325 | case in_datain: | ||
326 | return "DATAIN"; | ||
327 | case in_dataout: | ||
328 | return "DATAOUT"; | ||
329 | case in_data_done: | ||
330 | return "DATADONE"; | ||
331 | case in_msgin: | ||
332 | return "MSGIN"; | ||
333 | case in_msgincont: | ||
334 | return "MSGINCONT"; | ||
335 | case in_msgindone: | ||
336 | return "MSGINDONE"; | ||
337 | case in_msgout: | ||
338 | return "MSGOUT"; | ||
339 | case in_msgoutdone: | ||
340 | return "MSGOUTDONE"; | ||
341 | case in_cmdbegin: | ||
342 | return "CMDBEGIN"; | ||
343 | case in_cmdend: | ||
344 | return "CMDEND"; | ||
345 | case in_status: | ||
346 | return "STATUS"; | ||
347 | case in_freeing: | ||
348 | return "FREEING"; | ||
349 | case in_the_dark: | ||
350 | return "CLUELESS"; | ||
351 | case in_abortone: | ||
352 | return "ABORTONE"; | ||
353 | case in_abortall: | ||
354 | return "ABORTALL"; | ||
355 | case in_resetdev: | ||
356 | return "RESETDEV"; | ||
357 | case in_resetbus: | ||
358 | return "RESETBUS"; | ||
359 | case in_tgterror: | ||
360 | return "TGTERROR"; | ||
361 | default: | ||
362 | return "UNKNOWN"; | ||
363 | }; | ||
364 | } | ||
365 | |||
366 | #ifdef DEBUG_STATE_MACHINE | ||
367 | static inline void esp_advance_phase(struct scsi_cmnd *s, int newphase) | ||
368 | { | ||
369 | ESPLOG(("<%s>", phase_string(newphase))); | ||
370 | s->SCp.sent_command = s->SCp.phase; | ||
371 | s->SCp.phase = newphase; | ||
372 | } | ||
373 | #else | ||
374 | #define esp_advance_phase(__s, __newphase) \ | ||
375 | (__s)->SCp.sent_command = (__s)->SCp.phase; \ | ||
376 | (__s)->SCp.phase = (__newphase); | ||
377 | #endif | ||
378 | |||
379 | #ifdef DEBUG_ESP_CMDS | ||
380 | static inline void esp_cmd(struct esp *esp, u8 cmd) | ||
381 | { | ||
382 | esp->espcmdlog[esp->espcmdent] = cmd; | ||
383 | esp->espcmdent = (esp->espcmdent + 1) & 31; | ||
384 | sbus_writeb(cmd, esp->eregs + ESP_CMD); | ||
385 | } | ||
386 | #else | ||
387 | #define esp_cmd(__esp, __cmd) \ | ||
388 | sbus_writeb((__cmd), ((__esp)->eregs) + ESP_CMD) | ||
389 | #endif | ||
390 | |||
391 | #define ESP_INTSOFF(__dregs) \ | ||
392 | sbus_writel(sbus_readl((__dregs)+DMA_CSR)&~(DMA_INT_ENAB), (__dregs)+DMA_CSR) | ||
393 | #define ESP_INTSON(__dregs) \ | ||
394 | sbus_writel(sbus_readl((__dregs)+DMA_CSR)|DMA_INT_ENAB, (__dregs)+DMA_CSR) | ||
395 | #define ESP_IRQ_P(__dregs) \ | ||
396 | (sbus_readl((__dregs)+DMA_CSR) & (DMA_HNDL_INTR|DMA_HNDL_ERROR)) | ||
397 | |||
398 | /* How we use the various Linux SCSI data structures for operation. | ||
399 | * | ||
400 | * struct scsi_cmnd: | ||
401 | * | ||
402 | * We keep track of the synchronous capabilities of a target | ||
403 | * in the device member, using sync_min_period and | ||
404 | * sync_max_offset. These are the values we directly write | ||
405 | * into the ESP registers while running a command. If offset | ||
406 | * is zero the ESP will use asynchronous transfers. | ||
407 | * If the borken flag is set we assume we shouldn't even bother | ||
408 | * trying to negotiate for synchronous transfer as this target | ||
409 | * is really stupid. If we notice the target is dropping the | ||
410 | * bus, and we have been allowing it to disconnect, we clear | ||
411 | * the disconnect flag. | ||
412 | */ | ||
413 | |||
414 | |||
415 | /* Manipulation of the ESP command queues. Thanks to the aha152x driver | ||
416 | * and its author, Juergen E. Fischer, for the methods used here. | ||
417 | * Note that these are per-ESP queues, not global queues like | ||
418 | * the aha152x driver uses. | ||
419 | */ | ||
420 | static inline void append_SC(struct scsi_cmnd **SC, struct scsi_cmnd *new_SC) | ||
421 | { | ||
422 | struct scsi_cmnd *end; | ||
423 | |||
424 | new_SC->host_scribble = (unsigned char *) NULL; | ||
425 | if (!*SC) | ||
426 | *SC = new_SC; | ||
427 | else { | ||
428 | for (end=*SC;end->host_scribble;end=(struct scsi_cmnd *)end->host_scribble) | ||
429 | ; | ||
430 | end->host_scribble = (unsigned char *) new_SC; | ||
431 | } | ||
432 | } | ||
433 | |||
434 | static inline void prepend_SC(struct scsi_cmnd **SC, struct scsi_cmnd *new_SC) | ||
435 | { | ||
436 | new_SC->host_scribble = (unsigned char *) *SC; | ||
437 | *SC = new_SC; | ||
438 | } | ||
439 | |||
440 | static inline struct scsi_cmnd *remove_first_SC(struct scsi_cmnd **SC) | ||
441 | { | ||
442 | struct scsi_cmnd *ptr; | ||
443 | ptr = *SC; | ||
444 | if (ptr) | ||
445 | *SC = (struct scsi_cmnd *) (*SC)->host_scribble; | ||
446 | return ptr; | ||
447 | } | ||
448 | |||
449 | static inline struct scsi_cmnd *remove_SC(struct scsi_cmnd **SC, int target, int lun) | ||
450 | { | ||
451 | struct scsi_cmnd *ptr, *prev; | ||
452 | |||
453 | for (ptr = *SC, prev = NULL; | ||
454 | ptr && ((ptr->device->id != target) || (ptr->device->lun != lun)); | ||
455 | prev = ptr, ptr = (struct scsi_cmnd *) ptr->host_scribble) | ||
456 | ; | ||
457 | if (ptr) { | ||
458 | if (prev) | ||
459 | prev->host_scribble=ptr->host_scribble; | ||
460 | else | ||
461 | *SC=(struct scsi_cmnd *)ptr->host_scribble; | ||
462 | } | ||
463 | return ptr; | ||
464 | } | ||
465 | |||
466 | /* Resetting various pieces of the ESP scsi driver chipset/buses. */ | ||
467 | static void esp_reset_dma(struct esp *esp) | ||
468 | { | ||
469 | int can_do_burst16, can_do_burst32, can_do_burst64; | ||
470 | int can_do_sbus64; | ||
471 | u32 tmp; | ||
472 | |||
473 | can_do_burst16 = (esp->bursts & DMA_BURST16) != 0; | ||
474 | can_do_burst32 = (esp->bursts & DMA_BURST32) != 0; | ||
475 | can_do_burst64 = 0; | ||
476 | can_do_sbus64 = 0; | ||
477 | if (sbus_can_dma_64bit(esp->sdev)) | ||
478 | can_do_sbus64 = 1; | ||
479 | if (sbus_can_burst64(esp->sdev)) | ||
480 | can_do_burst64 = (esp->bursts & DMA_BURST64) != 0; | ||
481 | |||
482 | /* Punt the DVMA into a known state. */ | ||
483 | if (esp->dma->revision != dvmahme) { | ||
484 | tmp = sbus_readl(esp->dregs + DMA_CSR); | ||
485 | sbus_writel(tmp | DMA_RST_SCSI, esp->dregs + DMA_CSR); | ||
486 | sbus_writel(tmp & ~DMA_RST_SCSI, esp->dregs + DMA_CSR); | ||
487 | } | ||
488 | switch (esp->dma->revision) { | ||
489 | case dvmahme: | ||
490 | /* This is the HME DVMA gate array. */ | ||
491 | |||
492 | sbus_writel(DMA_RESET_FAS366, esp->dregs + DMA_CSR); | ||
493 | sbus_writel(DMA_RST_SCSI, esp->dregs + DMA_CSR); | ||
494 | |||
495 | esp->prev_hme_dmacsr = (DMA_PARITY_OFF|DMA_2CLKS|DMA_SCSI_DISAB|DMA_INT_ENAB); | ||
496 | esp->prev_hme_dmacsr &= ~(DMA_ENABLE|DMA_ST_WRITE|DMA_BRST_SZ); | ||
497 | |||
498 | if (can_do_burst64) | ||
499 | esp->prev_hme_dmacsr |= DMA_BRST64; | ||
500 | else if (can_do_burst32) | ||
501 | esp->prev_hme_dmacsr |= DMA_BRST32; | ||
502 | |||
503 | if (can_do_sbus64) { | ||
504 | esp->prev_hme_dmacsr |= DMA_SCSI_SBUS64; | ||
505 | sbus_set_sbus64(esp->sdev, esp->bursts); | ||
506 | } | ||
507 | |||
508 | /* This chip is horrible. */ | ||
509 | while (sbus_readl(esp->dregs + DMA_CSR) & DMA_PEND_READ) | ||
510 | udelay(1); | ||
511 | |||
512 | sbus_writel(0, esp->dregs + DMA_CSR); | ||
513 | sbus_writel(esp->prev_hme_dmacsr, esp->dregs + DMA_CSR); | ||
514 | |||
515 | /* This is necessary to avoid having the SCSI channel | ||
516 | * engine lock up on us. | ||
517 | */ | ||
518 | sbus_writel(0, esp->dregs + DMA_ADDR); | ||
519 | |||
520 | break; | ||
521 | case dvmarev2: | ||
522 | /* This is the gate array found in the sun4m | ||
523 | * NCR SBUS I/O subsystem. | ||
524 | */ | ||
525 | if (esp->erev != esp100) { | ||
526 | tmp = sbus_readl(esp->dregs + DMA_CSR); | ||
527 | sbus_writel(tmp | DMA_3CLKS, esp->dregs + DMA_CSR); | ||
528 | } | ||
529 | break; | ||
530 | case dvmarev3: | ||
531 | tmp = sbus_readl(esp->dregs + DMA_CSR); | ||
532 | tmp &= ~DMA_3CLKS; | ||
533 | tmp |= DMA_2CLKS; | ||
534 | if (can_do_burst32) { | ||
535 | tmp &= ~DMA_BRST_SZ; | ||
536 | tmp |= DMA_BRST32; | ||
537 | } | ||
538 | sbus_writel(tmp, esp->dregs + DMA_CSR); | ||
539 | break; | ||
540 | case dvmaesc1: | ||
541 | /* This is the DMA unit found on SCSI/Ether cards. */ | ||
542 | tmp = sbus_readl(esp->dregs + DMA_CSR); | ||
543 | tmp |= DMA_ADD_ENABLE; | ||
544 | tmp &= ~DMA_BCNT_ENAB; | ||
545 | if (!can_do_burst32 && can_do_burst16) { | ||
546 | tmp |= DMA_ESC_BURST; | ||
547 | } else { | ||
548 | tmp &= ~(DMA_ESC_BURST); | ||
549 | } | ||
550 | sbus_writel(tmp, esp->dregs + DMA_CSR); | ||
551 | break; | ||
552 | default: | ||
553 | break; | ||
554 | }; | ||
555 | ESP_INTSON(esp->dregs); | ||
556 | } | ||
557 | |||
558 | /* Reset the ESP chip, _not_ the SCSI bus. */ | ||
559 | static void __init esp_reset_esp(struct esp *esp) | ||
560 | { | ||
561 | u8 family_code, version; | ||
562 | int i; | ||
563 | |||
564 | /* Now reset the ESP chip */ | ||
565 | esp_cmd(esp, ESP_CMD_RC); | ||
566 | esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA); | ||
567 | esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA); | ||
568 | |||
569 | /* Reload the configuration registers */ | ||
570 | sbus_writeb(esp->cfact, esp->eregs + ESP_CFACT); | ||
571 | esp->prev_stp = 0; | ||
572 | sbus_writeb(esp->prev_stp, esp->eregs + ESP_STP); | ||
573 | esp->prev_soff = 0; | ||
574 | sbus_writeb(esp->prev_soff, esp->eregs + ESP_SOFF); | ||
575 | sbus_writeb(esp->neg_defp, esp->eregs + ESP_TIMEO); | ||
576 | |||
577 | /* This is the only point at which it is reliable to read | ||
578 | * the ID-code for a fast ESP chip variants. | ||
579 | */ | ||
580 | esp->max_period = ((35 * esp->ccycle) / 1000); | ||
581 | if (esp->erev == fast) { | ||
582 | version = sbus_readb(esp->eregs + ESP_UID); | ||
583 | family_code = (version & 0xf8) >> 3; | ||
584 | if (family_code == 0x02) | ||
585 | esp->erev = fas236; | ||
586 | else if (family_code == 0x0a) | ||
587 | esp->erev = fashme; /* Version is usually '5'. */ | ||
588 | else | ||
589 | esp->erev = fas100a; | ||
590 | ESPMISC(("esp%d: FAST chip is %s (family=%d, version=%d)\n", | ||
591 | esp->esp_id, | ||
592 | (esp->erev == fas236) ? "fas236" : | ||
593 | ((esp->erev == fas100a) ? "fas100a" : | ||
594 | "fasHME"), family_code, (version & 7))); | ||
595 | |||
596 | esp->min_period = ((4 * esp->ccycle) / 1000); | ||
597 | } else { | ||
598 | esp->min_period = ((5 * esp->ccycle) / 1000); | ||
599 | } | ||
600 | esp->max_period = (esp->max_period + 3)>>2; | ||
601 | esp->min_period = (esp->min_period + 3)>>2; | ||
602 | |||
603 | sbus_writeb(esp->config1, esp->eregs + ESP_CFG1); | ||
604 | switch (esp->erev) { | ||
605 | case esp100: | ||
606 | /* nothing to do */ | ||
607 | break; | ||
608 | case esp100a: | ||
609 | sbus_writeb(esp->config2, esp->eregs + ESP_CFG2); | ||
610 | break; | ||
611 | case esp236: | ||
612 | /* Slow 236 */ | ||
613 | sbus_writeb(esp->config2, esp->eregs + ESP_CFG2); | ||
614 | esp->prev_cfg3 = esp->config3[0]; | ||
615 | sbus_writeb(esp->prev_cfg3, esp->eregs + ESP_CFG3); | ||
616 | break; | ||
617 | case fashme: | ||
618 | esp->config2 |= (ESP_CONFIG2_HME32 | ESP_CONFIG2_HMEFENAB); | ||
619 | /* fallthrough... */ | ||
620 | case fas236: | ||
621 | /* Fast 236 or HME */ | ||
622 | sbus_writeb(esp->config2, esp->eregs + ESP_CFG2); | ||
623 | for (i = 0; i < 16; i++) { | ||
624 | if (esp->erev == fashme) { | ||
625 | u8 cfg3; | ||
626 | |||
627 | cfg3 = ESP_CONFIG3_FCLOCK | ESP_CONFIG3_OBPUSH; | ||
628 | if (esp->scsi_id >= 8) | ||
629 | cfg3 |= ESP_CONFIG3_IDBIT3; | ||
630 | esp->config3[i] |= cfg3; | ||
631 | } else { | ||
632 | esp->config3[i] |= ESP_CONFIG3_FCLK; | ||
633 | } | ||
634 | } | ||
635 | esp->prev_cfg3 = esp->config3[0]; | ||
636 | sbus_writeb(esp->prev_cfg3, esp->eregs + ESP_CFG3); | ||
637 | if (esp->erev == fashme) { | ||
638 | esp->radelay = 80; | ||
639 | } else { | ||
640 | if (esp->diff) | ||
641 | esp->radelay = 0; | ||
642 | else | ||
643 | esp->radelay = 96; | ||
644 | } | ||
645 | break; | ||
646 | case fas100a: | ||
647 | /* Fast 100a */ | ||
648 | sbus_writeb(esp->config2, esp->eregs + ESP_CFG2); | ||
649 | for (i = 0; i < 16; i++) | ||
650 | esp->config3[i] |= ESP_CONFIG3_FCLOCK; | ||
651 | esp->prev_cfg3 = esp->config3[0]; | ||
652 | sbus_writeb(esp->prev_cfg3, esp->eregs + ESP_CFG3); | ||
653 | esp->radelay = 32; | ||
654 | break; | ||
655 | default: | ||
656 | panic("esp: what could it be... I wonder..."); | ||
657 | break; | ||
658 | }; | ||
659 | |||
660 | /* Eat any bitrot in the chip */ | ||
661 | sbus_readb(esp->eregs + ESP_INTRPT); | ||
662 | udelay(100); | ||
663 | } | ||
664 | |||
665 | /* This places the ESP into a known state at boot time. */ | ||
666 | static void __init esp_bootup_reset(struct esp *esp) | ||
667 | { | ||
668 | u8 tmp; | ||
669 | |||
670 | /* Reset the DMA */ | ||
671 | esp_reset_dma(esp); | ||
672 | |||
673 | /* Reset the ESP */ | ||
674 | esp_reset_esp(esp); | ||
675 | |||
676 | /* Reset the SCSI bus, but tell ESP not to generate an irq */ | ||
677 | tmp = sbus_readb(esp->eregs + ESP_CFG1); | ||
678 | tmp |= ESP_CONFIG1_SRRDISAB; | ||
679 | sbus_writeb(tmp, esp->eregs + ESP_CFG1); | ||
680 | |||
681 | esp_cmd(esp, ESP_CMD_RS); | ||
682 | udelay(400); | ||
683 | |||
684 | sbus_writeb(esp->config1, esp->eregs + ESP_CFG1); | ||
685 | |||
686 | /* Eat any bitrot in the chip and we are done... */ | ||
687 | sbus_readb(esp->eregs + ESP_INTRPT); | ||
688 | } | ||
689 | |||
690 | static int __init esp_find_dvma(struct esp *esp, struct sbus_dev *dma_sdev) | ||
691 | { | ||
692 | struct sbus_dev *sdev = esp->sdev; | ||
693 | struct sbus_dma *dma; | ||
694 | |||
695 | if (dma_sdev != NULL) { | ||
696 | for_each_dvma(dma) { | ||
697 | if (dma->sdev == dma_sdev) | ||
698 | break; | ||
699 | } | ||
700 | } else { | ||
701 | for_each_dvma(dma) { | ||
702 | /* If allocated already, can't use it. */ | ||
703 | if (dma->allocated) | ||
704 | continue; | ||
705 | |||
706 | if (dma->sdev == NULL) | ||
707 | break; | ||
708 | |||
709 | /* If bus + slot are the same and it has the | ||
710 | * correct OBP name, it's ours. | ||
711 | */ | ||
712 | if (sdev->bus == dma->sdev->bus && | ||
713 | sdev->slot == dma->sdev->slot && | ||
714 | (!strcmp(dma->sdev->prom_name, "dma") || | ||
715 | !strcmp(dma->sdev->prom_name, "espdma"))) | ||
716 | break; | ||
717 | } | ||
718 | } | ||
719 | |||
720 | /* If we don't know how to handle the dvma, | ||
721 | * do not use this device. | ||
722 | */ | ||
723 | if (dma == NULL) { | ||
724 | printk("Cannot find dvma for ESP%d's SCSI\n", esp->esp_id); | ||
725 | return -1; | ||
726 | } | ||
727 | if (dma->allocated) { | ||
728 | printk("esp%d: can't use my espdma\n", esp->esp_id); | ||
729 | return -1; | ||
730 | } | ||
731 | dma->allocated = 1; | ||
732 | esp->dma = dma; | ||
733 | esp->dregs = dma->regs; | ||
734 | |||
735 | return 0; | ||
736 | } | ||
737 | |||
738 | static int __init esp_map_regs(struct esp *esp, int hme) | ||
739 | { | ||
740 | struct sbus_dev *sdev = esp->sdev; | ||
741 | struct resource *res; | ||
742 | |||
743 | /* On HME, two reg sets exist, first is DVMA, | ||
744 | * second is ESP registers. | ||
745 | */ | ||
746 | if (hme) | ||
747 | res = &sdev->resource[1]; | ||
748 | else | ||
749 | res = &sdev->resource[0]; | ||
750 | |||
751 | esp->eregs = sbus_ioremap(res, 0, ESP_REG_SIZE, "ESP Registers"); | ||
752 | |||
753 | if (esp->eregs == 0) | ||
754 | return -1; | ||
755 | return 0; | ||
756 | } | ||
757 | |||
758 | static int __init esp_map_cmdarea(struct esp *esp) | ||
759 | { | ||
760 | struct sbus_dev *sdev = esp->sdev; | ||
761 | |||
762 | esp->esp_command = sbus_alloc_consistent(sdev, 16, | ||
763 | &esp->esp_command_dvma); | ||
764 | if (esp->esp_command == NULL || | ||
765 | esp->esp_command_dvma == 0) | ||
766 | return -1; | ||
767 | return 0; | ||
768 | } | ||
769 | |||
770 | static int __init esp_register_irq(struct esp *esp) | ||
771 | { | ||
772 | esp->ehost->irq = esp->irq = esp->sdev->irqs[0]; | ||
773 | |||
774 | /* We used to try various overly-clever things to | ||
775 | * reduce the interrupt processing overhead on | ||
776 | * sun4c/sun4m when multiple ESP's shared the | ||
777 | * same IRQ. It was too complex and messy to | ||
778 | * sanely maintain. | ||
779 | */ | ||
780 | if (request_irq(esp->ehost->irq, esp_intr, | ||
781 | IRQF_SHARED, "ESP SCSI", esp)) { | ||
782 | printk("esp%d: Cannot acquire irq line\n", | ||
783 | esp->esp_id); | ||
784 | return -1; | ||
785 | } | ||
786 | |||
787 | printk("esp%d: IRQ %d ", esp->esp_id, | ||
788 | esp->ehost->irq); | ||
789 | |||
790 | return 0; | ||
791 | } | ||
792 | |||
793 | static void __init esp_get_scsi_id(struct esp *esp) | ||
794 | { | ||
795 | struct sbus_dev *sdev = esp->sdev; | ||
796 | struct device_node *dp = sdev->ofdev.node; | ||
797 | |||
798 | esp->scsi_id = of_getintprop_default(dp, | ||
799 | "initiator-id", | ||
800 | -1); | ||
801 | if (esp->scsi_id == -1) | ||
802 | esp->scsi_id = of_getintprop_default(dp, | ||
803 | "scsi-initiator-id", | ||
804 | -1); | ||
805 | if (esp->scsi_id == -1) | ||
806 | esp->scsi_id = (sdev->bus == NULL) ? 7 : | ||
807 | of_getintprop_default(sdev->bus->ofdev.node, | ||
808 | "scsi-initiator-id", | ||
809 | 7); | ||
810 | esp->ehost->this_id = esp->scsi_id; | ||
811 | esp->scsi_id_mask = (1 << esp->scsi_id); | ||
812 | |||
813 | } | ||
814 | |||
815 | static void __init esp_get_clock_params(struct esp *esp) | ||
816 | { | ||
817 | struct sbus_dev *sdev = esp->sdev; | ||
818 | int prom_node = esp->prom_node; | ||
819 | int sbus_prom_node; | ||
820 | unsigned int fmhz; | ||
821 | u8 ccf; | ||
822 | |||
823 | if (sdev != NULL && sdev->bus != NULL) | ||
824 | sbus_prom_node = sdev->bus->prom_node; | ||
825 | else | ||
826 | sbus_prom_node = 0; | ||
827 | |||
828 | /* This is getting messy but it has to be done | ||
829 | * correctly or else you get weird behavior all | ||
830 | * over the place. We are trying to basically | ||
831 | * figure out three pieces of information. | ||
832 | * | ||
833 | * a) Clock Conversion Factor | ||
834 | * | ||
835 | * This is a representation of the input | ||
836 | * crystal clock frequency going into the | ||
837 | * ESP on this machine. Any operation whose | ||
838 | * timing is longer than 400ns depends on this | ||
839 | * value being correct. For example, you'll | ||
840 | * get blips for arbitration/selection during | ||
841 | * high load or with multiple targets if this | ||
842 | * is not set correctly. | ||
843 | * | ||
844 | * b) Selection Time-Out | ||
845 | * | ||
846 | * The ESP isn't very bright and will arbitrate | ||
847 | * for the bus and try to select a target | ||
848 | * forever if you let it. This value tells | ||
849 | * the ESP when it has taken too long to | ||
850 | * negotiate and that it should interrupt | ||
851 | * the CPU so we can see what happened. | ||
852 | * The value is computed as follows (from | ||
853 | * NCR/Symbios chip docs). | ||
854 | * | ||
855 | * (Time Out Period) * (Input Clock) | ||
856 | * STO = ---------------------------------- | ||
857 | * (8192) * (Clock Conversion Factor) | ||
858 | * | ||
859 | * You usually want the time out period to be | ||
860 | * around 250ms, I think we'll set it a little | ||
861 | * bit higher to account for fully loaded SCSI | ||
862 | * bus's and slow devices that don't respond so | ||
863 | * quickly to selection attempts. (yeah, I know | ||
864 | * this is out of spec. but there is a lot of | ||
865 | * buggy pieces of firmware out there so bite me) | ||
866 | * | ||
867 | * c) Imperical constants for synchronous offset | ||
868 | * and transfer period register values | ||
869 | * | ||
870 | * This entails the smallest and largest sync | ||
871 | * period we could ever handle on this ESP. | ||
872 | */ | ||
873 | |||
874 | fmhz = prom_getintdefault(prom_node, "clock-frequency", -1); | ||
875 | if (fmhz == -1) | ||
876 | fmhz = (!sbus_prom_node) ? 0 : | ||
877 | prom_getintdefault(sbus_prom_node, "clock-frequency", -1); | ||
878 | |||
879 | if (fmhz <= (5000000)) | ||
880 | ccf = 0; | ||
881 | else | ||
882 | ccf = (((5000000 - 1) + (fmhz))/(5000000)); | ||
883 | |||
884 | if (!ccf || ccf > 8) { | ||
885 | /* If we can't find anything reasonable, | ||
886 | * just assume 20MHZ. This is the clock | ||
887 | * frequency of the older sun4c's where I've | ||
888 | * been unable to find the clock-frequency | ||
889 | * PROM property. All other machines provide | ||
890 | * useful values it seems. | ||
891 | */ | ||
892 | ccf = ESP_CCF_F4; | ||
893 | fmhz = (20000000); | ||
894 | } | ||
895 | |||
896 | if (ccf == (ESP_CCF_F7 + 1)) | ||
897 | esp->cfact = ESP_CCF_F0; | ||
898 | else if (ccf == ESP_CCF_NEVER) | ||
899 | esp->cfact = ESP_CCF_F2; | ||
900 | else | ||
901 | esp->cfact = ccf; | ||
902 | esp->raw_cfact = ccf; | ||
903 | |||
904 | esp->cfreq = fmhz; | ||
905 | esp->ccycle = ESP_MHZ_TO_CYCLE(fmhz); | ||
906 | esp->ctick = ESP_TICK(ccf, esp->ccycle); | ||
907 | esp->neg_defp = ESP_NEG_DEFP(fmhz, ccf); | ||
908 | esp->sync_defp = SYNC_DEFP_SLOW; | ||
909 | |||
910 | printk("SCSI ID %d Clk %dMHz CCYC=%d CCF=%d TOut %d ", | ||
911 | esp->scsi_id, (fmhz / 1000000), | ||
912 | (int)esp->ccycle, (int)ccf, (int) esp->neg_defp); | ||
913 | } | ||
914 | |||
915 | static void __init esp_get_bursts(struct esp *esp, struct sbus_dev *dma) | ||
916 | { | ||
917 | struct sbus_dev *sdev = esp->sdev; | ||
918 | u8 bursts; | ||
919 | |||
920 | bursts = prom_getintdefault(esp->prom_node, "burst-sizes", 0xff); | ||
921 | |||
922 | if (dma) { | ||
923 | u8 tmp = prom_getintdefault(dma->prom_node, | ||
924 | "burst-sizes", 0xff); | ||
925 | if (tmp != 0xff) | ||
926 | bursts &= tmp; | ||
927 | } | ||
928 | |||
929 | if (sdev->bus) { | ||
930 | u8 tmp = prom_getintdefault(sdev->bus->prom_node, | ||
931 | "burst-sizes", 0xff); | ||
932 | if (tmp != 0xff) | ||
933 | bursts &= tmp; | ||
934 | } | ||
935 | |||
936 | if (bursts == 0xff || | ||
937 | (bursts & DMA_BURST16) == 0 || | ||
938 | (bursts & DMA_BURST32) == 0) | ||
939 | bursts = (DMA_BURST32 - 1); | ||
940 | |||
941 | esp->bursts = bursts; | ||
942 | } | ||
943 | |||
944 | static void __init esp_get_revision(struct esp *esp) | ||
945 | { | ||
946 | u8 tmp; | ||
947 | |||
948 | esp->config1 = (ESP_CONFIG1_PENABLE | (esp->scsi_id & 7)); | ||
949 | esp->config2 = (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY); | ||
950 | sbus_writeb(esp->config2, esp->eregs + ESP_CFG2); | ||
951 | |||
952 | tmp = sbus_readb(esp->eregs + ESP_CFG2); | ||
953 | tmp &= ~ESP_CONFIG2_MAGIC; | ||
954 | if (tmp != (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY)) { | ||
955 | /* If what we write to cfg2 does not come back, cfg2 | ||
956 | * is not implemented, therefore this must be a plain | ||
957 | * esp100. | ||
958 | */ | ||
959 | esp->erev = esp100; | ||
960 | printk("NCR53C90(esp100)\n"); | ||
961 | } else { | ||
962 | esp->config2 = 0; | ||
963 | esp->prev_cfg3 = esp->config3[0] = 5; | ||
964 | sbus_writeb(esp->config2, esp->eregs + ESP_CFG2); | ||
965 | sbus_writeb(0, esp->eregs + ESP_CFG3); | ||
966 | sbus_writeb(esp->prev_cfg3, esp->eregs + ESP_CFG3); | ||
967 | |||
968 | tmp = sbus_readb(esp->eregs + ESP_CFG3); | ||
969 | if (tmp != 5) { | ||
970 | /* The cfg2 register is implemented, however | ||
971 | * cfg3 is not, must be esp100a. | ||
972 | */ | ||
973 | esp->erev = esp100a; | ||
974 | printk("NCR53C90A(esp100a)\n"); | ||
975 | } else { | ||
976 | int target; | ||
977 | |||
978 | for (target = 0; target < 16; target++) | ||
979 | esp->config3[target] = 0; | ||
980 | esp->prev_cfg3 = 0; | ||
981 | sbus_writeb(esp->prev_cfg3, esp->eregs + ESP_CFG3); | ||
982 | |||
983 | /* All of cfg{1,2,3} implemented, must be one of | ||
984 | * the fas variants, figure out which one. | ||
985 | */ | ||
986 | if (esp->raw_cfact > ESP_CCF_F5) { | ||
987 | esp->erev = fast; | ||
988 | esp->sync_defp = SYNC_DEFP_FAST; | ||
989 | printk("NCR53C9XF(espfast)\n"); | ||
990 | } else { | ||
991 | esp->erev = esp236; | ||
992 | printk("NCR53C9x(esp236)\n"); | ||
993 | } | ||
994 | esp->config2 = 0; | ||
995 | sbus_writeb(esp->config2, esp->eregs + ESP_CFG2); | ||
996 | } | ||
997 | } | ||
998 | } | ||
999 | |||
1000 | static void __init esp_init_swstate(struct esp *esp) | ||
1001 | { | ||
1002 | int i; | ||
1003 | |||
1004 | /* Command queues... */ | ||
1005 | esp->current_SC = NULL; | ||
1006 | esp->disconnected_SC = NULL; | ||
1007 | esp->issue_SC = NULL; | ||
1008 | |||
1009 | /* Target and current command state... */ | ||
1010 | esp->targets_present = 0; | ||
1011 | esp->resetting_bus = 0; | ||
1012 | esp->snip = 0; | ||
1013 | |||
1014 | init_waitqueue_head(&esp->reset_queue); | ||
1015 | |||
1016 | /* Debugging... */ | ||
1017 | for(i = 0; i < 32; i++) | ||
1018 | esp->espcmdlog[i] = 0; | ||
1019 | esp->espcmdent = 0; | ||
1020 | |||
1021 | /* MSG phase state... */ | ||
1022 | for(i = 0; i < 16; i++) { | ||
1023 | esp->cur_msgout[i] = 0; | ||
1024 | esp->cur_msgin[i] = 0; | ||
1025 | } | ||
1026 | esp->prevmsgout = esp->prevmsgin = 0; | ||
1027 | esp->msgout_len = esp->msgin_len = 0; | ||
1028 | |||
1029 | /* Clear the one behind caches to hold unmatchable values. */ | ||
1030 | esp->prev_soff = esp->prev_stp = esp->prev_cfg3 = 0xff; | ||
1031 | esp->prev_hme_dmacsr = 0xffffffff; | ||
1032 | } | ||
1033 | |||
1034 | static int __init detect_one_esp(struct scsi_host_template *tpnt, | ||
1035 | struct device *dev, | ||
1036 | struct sbus_dev *esp_dev, | ||
1037 | struct sbus_dev *espdma, | ||
1038 | struct sbus_bus *sbus, | ||
1039 | int hme) | ||
1040 | { | ||
1041 | static int instance; | ||
1042 | struct Scsi_Host *esp_host = scsi_host_alloc(tpnt, sizeof(struct esp)); | ||
1043 | struct esp *esp; | ||
1044 | |||
1045 | if (!esp_host) | ||
1046 | return -ENOMEM; | ||
1047 | |||
1048 | if (hme) | ||
1049 | esp_host->max_id = 16; | ||
1050 | esp = (struct esp *) esp_host->hostdata; | ||
1051 | esp->ehost = esp_host; | ||
1052 | esp->sdev = esp_dev; | ||
1053 | esp->esp_id = instance; | ||
1054 | esp->prom_node = esp_dev->prom_node; | ||
1055 | prom_getstring(esp->prom_node, "name", esp->prom_name, | ||
1056 | sizeof(esp->prom_name)); | ||
1057 | |||
1058 | if (esp_find_dvma(esp, espdma) < 0) | ||
1059 | goto fail_unlink; | ||
1060 | if (esp_map_regs(esp, hme) < 0) { | ||
1061 | printk("ESP registers unmappable"); | ||
1062 | goto fail_dvma_release; | ||
1063 | } | ||
1064 | if (esp_map_cmdarea(esp) < 0) { | ||
1065 | printk("ESP DVMA transport area unmappable"); | ||
1066 | goto fail_unmap_regs; | ||
1067 | } | ||
1068 | if (esp_register_irq(esp) < 0) | ||
1069 | goto fail_unmap_cmdarea; | ||
1070 | |||
1071 | esp_get_scsi_id(esp); | ||
1072 | |||
1073 | esp->diff = prom_getbool(esp->prom_node, "differential"); | ||
1074 | if (esp->diff) | ||
1075 | printk("Differential "); | ||
1076 | |||
1077 | esp_get_clock_params(esp); | ||
1078 | esp_get_bursts(esp, espdma); | ||
1079 | esp_get_revision(esp); | ||
1080 | esp_init_swstate(esp); | ||
1081 | |||
1082 | esp_bootup_reset(esp); | ||
1083 | |||
1084 | if (scsi_add_host(esp_host, dev)) | ||
1085 | goto fail_free_irq; | ||
1086 | |||
1087 | dev_set_drvdata(&esp_dev->ofdev.dev, esp); | ||
1088 | |||
1089 | scsi_scan_host(esp_host); | ||
1090 | instance++; | ||
1091 | |||
1092 | return 0; | ||
1093 | |||
1094 | fail_free_irq: | ||
1095 | free_irq(esp->ehost->irq, esp); | ||
1096 | |||
1097 | fail_unmap_cmdarea: | ||
1098 | sbus_free_consistent(esp->sdev, 16, | ||
1099 | (void *) esp->esp_command, | ||
1100 | esp->esp_command_dvma); | ||
1101 | |||
1102 | fail_unmap_regs: | ||
1103 | sbus_iounmap(esp->eregs, ESP_REG_SIZE); | ||
1104 | |||
1105 | fail_dvma_release: | ||
1106 | esp->dma->allocated = 0; | ||
1107 | |||
1108 | fail_unlink: | ||
1109 | scsi_host_put(esp_host); | ||
1110 | return -1; | ||
1111 | } | ||
1112 | |||
1113 | /* Detecting ESP chips on the machine. This is the simple and easy | ||
1114 | * version. | ||
1115 | */ | ||
1116 | static int __devexit esp_remove_common(struct esp *esp) | ||
1117 | { | ||
1118 | unsigned int irq = esp->ehost->irq; | ||
1119 | |||
1120 | scsi_remove_host(esp->ehost); | ||
1121 | |||
1122 | ESP_INTSOFF(esp->dregs); | ||
1123 | #if 0 | ||
1124 | esp_reset_dma(esp); | ||
1125 | esp_reset_esp(esp); | ||
1126 | #endif | ||
1127 | |||
1128 | free_irq(irq, esp); | ||
1129 | sbus_free_consistent(esp->sdev, 16, | ||
1130 | (void *) esp->esp_command, esp->esp_command_dvma); | ||
1131 | sbus_iounmap(esp->eregs, ESP_REG_SIZE); | ||
1132 | esp->dma->allocated = 0; | ||
1133 | |||
1134 | scsi_host_put(esp->ehost); | ||
1135 | |||
1136 | return 0; | ||
1137 | } | ||
1138 | |||
1139 | |||
1140 | #ifdef CONFIG_SUN4 | ||
1141 | |||
1142 | #include <asm/sun4paddr.h> | ||
1143 | |||
1144 | static struct sbus_dev sun4_esp_dev; | ||
1145 | |||
1146 | static int __init esp_sun4_probe(struct scsi_host_template *tpnt) | ||
1147 | { | ||
1148 | if (sun4_esp_physaddr) { | ||
1149 | memset(&sun4_esp_dev, 0, sizeof(sun4_esp_dev)); | ||
1150 | sun4_esp_dev.reg_addrs[0].phys_addr = sun4_esp_physaddr; | ||
1151 | sun4_esp_dev.irqs[0] = 4; | ||
1152 | sun4_esp_dev.resource[0].start = sun4_esp_physaddr; | ||
1153 | sun4_esp_dev.resource[0].end = | ||
1154 | sun4_esp_physaddr + ESP_REG_SIZE - 1; | ||
1155 | sun4_esp_dev.resource[0].flags = IORESOURCE_IO; | ||
1156 | |||
1157 | return detect_one_esp(tpnt, NULL, | ||
1158 | &sun4_esp_dev, NULL, NULL, 0); | ||
1159 | } | ||
1160 | return 0; | ||
1161 | } | ||
1162 | |||
1163 | static int __devexit esp_sun4_remove(void) | ||
1164 | { | ||
1165 | struct of_device *dev = &sun4_esp_dev.ofdev; | ||
1166 | struct esp *esp = dev_get_drvdata(&dev->dev); | ||
1167 | |||
1168 | return esp_remove_common(esp); | ||
1169 | } | ||
1170 | |||
1171 | #else /* !CONFIG_SUN4 */ | ||
1172 | |||
1173 | static int __devinit esp_sbus_probe(struct of_device *dev, const struct of_device_id *match) | ||
1174 | { | ||
1175 | struct sbus_dev *sdev = to_sbus_device(&dev->dev); | ||
1176 | struct device_node *dp = dev->node; | ||
1177 | struct sbus_dev *dma_sdev = NULL; | ||
1178 | int hme = 0; | ||
1179 | |||
1180 | if (dp->parent && | ||
1181 | (!strcmp(dp->parent->name, "espdma") || | ||
1182 | !strcmp(dp->parent->name, "dma"))) | ||
1183 | dma_sdev = sdev->parent; | ||
1184 | else if (!strcmp(dp->name, "SUNW,fas")) { | ||
1185 | dma_sdev = sdev; | ||
1186 | hme = 1; | ||
1187 | } | ||
1188 | |||
1189 | return detect_one_esp(match->data, &dev->dev, | ||
1190 | sdev, dma_sdev, sdev->bus, hme); | ||
1191 | } | ||
1192 | |||
1193 | static int __devexit esp_sbus_remove(struct of_device *dev) | ||
1194 | { | ||
1195 | struct esp *esp = dev_get_drvdata(&dev->dev); | ||
1196 | |||
1197 | return esp_remove_common(esp); | ||
1198 | } | ||
1199 | |||
1200 | #endif /* !CONFIG_SUN4 */ | ||
1201 | |||
1202 | /* The info function will return whatever useful | ||
1203 | * information the developer sees fit. If not provided, then | ||
1204 | * the name field will be used instead. | ||
1205 | */ | ||
1206 | static const char *esp_info(struct Scsi_Host *host) | ||
1207 | { | ||
1208 | struct esp *esp; | ||
1209 | |||
1210 | esp = (struct esp *) host->hostdata; | ||
1211 | switch (esp->erev) { | ||
1212 | case esp100: | ||
1213 | return "Sparc ESP100 (NCR53C90)"; | ||
1214 | case esp100a: | ||
1215 | return "Sparc ESP100A (NCR53C90A)"; | ||
1216 | case esp236: | ||
1217 | return "Sparc ESP236"; | ||
1218 | case fas236: | ||
1219 | return "Sparc ESP236-FAST"; | ||
1220 | case fashme: | ||
1221 | return "Sparc ESP366-HME"; | ||
1222 | case fas100a: | ||
1223 | return "Sparc ESP100A-FAST"; | ||
1224 | default: | ||
1225 | return "Bogon ESP revision"; | ||
1226 | }; | ||
1227 | } | ||
1228 | |||
1229 | /* From Wolfgang Stanglmeier's NCR scsi driver. */ | ||
1230 | struct info_str | ||
1231 | { | ||
1232 | char *buffer; | ||
1233 | int length; | ||
1234 | int offset; | ||
1235 | int pos; | ||
1236 | }; | ||
1237 | |||
1238 | static void copy_mem_info(struct info_str *info, char *data, int len) | ||
1239 | { | ||
1240 | if (info->pos + len > info->length) | ||
1241 | len = info->length - info->pos; | ||
1242 | |||
1243 | if (info->pos + len < info->offset) { | ||
1244 | info->pos += len; | ||
1245 | return; | ||
1246 | } | ||
1247 | if (info->pos < info->offset) { | ||
1248 | data += (info->offset - info->pos); | ||
1249 | len -= (info->offset - info->pos); | ||
1250 | } | ||
1251 | |||
1252 | if (len > 0) { | ||
1253 | memcpy(info->buffer + info->pos, data, len); | ||
1254 | info->pos += len; | ||
1255 | } | ||
1256 | } | ||
1257 | |||
1258 | static int copy_info(struct info_str *info, char *fmt, ...) | ||
1259 | { | ||
1260 | va_list args; | ||
1261 | char buf[81]; | ||
1262 | int len; | ||
1263 | |||
1264 | va_start(args, fmt); | ||
1265 | len = vsprintf(buf, fmt, args); | ||
1266 | va_end(args); | ||
1267 | |||
1268 | copy_mem_info(info, buf, len); | ||
1269 | return len; | ||
1270 | } | ||
1271 | |||
1272 | static int esp_host_info(struct esp *esp, char *ptr, off_t offset, int len) | ||
1273 | { | ||
1274 | struct scsi_device *sdev; | ||
1275 | struct info_str info; | ||
1276 | int i; | ||
1277 | |||
1278 | info.buffer = ptr; | ||
1279 | info.length = len; | ||
1280 | info.offset = offset; | ||
1281 | info.pos = 0; | ||
1282 | |||
1283 | copy_info(&info, "Sparc ESP Host Adapter:\n"); | ||
1284 | copy_info(&info, "\tPROM node\t\t%08x\n", (unsigned int) esp->prom_node); | ||
1285 | copy_info(&info, "\tPROM name\t\t%s\n", esp->prom_name); | ||
1286 | copy_info(&info, "\tESP Model\t\t"); | ||
1287 | switch (esp->erev) { | ||
1288 | case esp100: | ||
1289 | copy_info(&info, "ESP100\n"); | ||
1290 | break; | ||
1291 | case esp100a: | ||
1292 | copy_info(&info, "ESP100A\n"); | ||
1293 | break; | ||
1294 | case esp236: | ||
1295 | copy_info(&info, "ESP236\n"); | ||
1296 | break; | ||
1297 | case fas236: | ||
1298 | copy_info(&info, "FAS236\n"); | ||
1299 | break; | ||
1300 | case fas100a: | ||
1301 | copy_info(&info, "FAS100A\n"); | ||
1302 | break; | ||
1303 | case fast: | ||
1304 | copy_info(&info, "FAST\n"); | ||
1305 | break; | ||
1306 | case fashme: | ||
1307 | copy_info(&info, "Happy Meal FAS\n"); | ||
1308 | break; | ||
1309 | case espunknown: | ||
1310 | default: | ||
1311 | copy_info(&info, "Unknown!\n"); | ||
1312 | break; | ||
1313 | }; | ||
1314 | copy_info(&info, "\tDMA Revision\t\t"); | ||
1315 | switch (esp->dma->revision) { | ||
1316 | case dvmarev0: | ||
1317 | copy_info(&info, "Rev 0\n"); | ||
1318 | break; | ||
1319 | case dvmaesc1: | ||
1320 | copy_info(&info, "ESC Rev 1\n"); | ||
1321 | break; | ||
1322 | case dvmarev1: | ||
1323 | copy_info(&info, "Rev 1\n"); | ||
1324 | break; | ||
1325 | case dvmarev2: | ||
1326 | copy_info(&info, "Rev 2\n"); | ||
1327 | break; | ||
1328 | case dvmarev3: | ||
1329 | copy_info(&info, "Rev 3\n"); | ||
1330 | break; | ||
1331 | case dvmarevplus: | ||
1332 | copy_info(&info, "Rev 1+\n"); | ||
1333 | break; | ||
1334 | case dvmahme: | ||
1335 | copy_info(&info, "Rev HME/FAS\n"); | ||
1336 | break; | ||
1337 | default: | ||
1338 | copy_info(&info, "Unknown!\n"); | ||
1339 | break; | ||
1340 | }; | ||
1341 | copy_info(&info, "\tLive Targets\t\t[ "); | ||
1342 | for (i = 0; i < 15; i++) { | ||
1343 | if (esp->targets_present & (1 << i)) | ||
1344 | copy_info(&info, "%d ", i); | ||
1345 | } | ||
1346 | copy_info(&info, "]\n\n"); | ||
1347 | |||
1348 | /* Now describe the state of each existing target. */ | ||
1349 | copy_info(&info, "Target #\tconfig3\t\tSync Capabilities\tDisconnect\tWide\n"); | ||
1350 | |||
1351 | shost_for_each_device(sdev, esp->ehost) { | ||
1352 | struct esp_device *esp_dev = sdev->hostdata; | ||
1353 | uint id = sdev->id; | ||
1354 | |||
1355 | if (!(esp->targets_present & (1 << id))) | ||
1356 | continue; | ||
1357 | |||
1358 | copy_info(&info, "%d\t\t", id); | ||
1359 | copy_info(&info, "%08lx\t", esp->config3[id]); | ||
1360 | copy_info(&info, "[%02lx,%02lx]\t\t\t", | ||
1361 | esp_dev->sync_max_offset, | ||
1362 | esp_dev->sync_min_period); | ||
1363 | copy_info(&info, "%s\t\t", | ||
1364 | esp_dev->disconnect ? "yes" : "no"); | ||
1365 | copy_info(&info, "%s\n", | ||
1366 | (esp->config3[id] & ESP_CONFIG3_EWIDE) ? "yes" : "no"); | ||
1367 | } | ||
1368 | return info.pos > info.offset? info.pos - info.offset : 0; | ||
1369 | } | ||
1370 | |||
1371 | /* ESP proc filesystem code. */ | ||
1372 | static int esp_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset, | ||
1373 | int length, int inout) | ||
1374 | { | ||
1375 | struct esp *esp = (struct esp *) host->hostdata; | ||
1376 | |||
1377 | if (inout) | ||
1378 | return -EINVAL; /* not yet */ | ||
1379 | |||
1380 | if (start) | ||
1381 | *start = buffer; | ||
1382 | |||
1383 | return esp_host_info(esp, buffer, offset, length); | ||
1384 | } | ||
1385 | |||
1386 | static void esp_get_dmabufs(struct esp *esp, struct scsi_cmnd *sp) | ||
1387 | { | ||
1388 | if (sp->use_sg == 0) { | ||
1389 | sp->SCp.this_residual = sp->request_bufflen; | ||
1390 | sp->SCp.buffer = (struct scatterlist *) sp->request_buffer; | ||
1391 | sp->SCp.buffers_residual = 0; | ||
1392 | if (sp->request_bufflen) { | ||
1393 | sp->SCp.have_data_in = sbus_map_single(esp->sdev, sp->SCp.buffer, | ||
1394 | sp->SCp.this_residual, | ||
1395 | sp->sc_data_direction); | ||
1396 | sp->SCp.ptr = (char *) ((unsigned long)sp->SCp.have_data_in); | ||
1397 | } else { | ||
1398 | sp->SCp.ptr = NULL; | ||
1399 | } | ||
1400 | } else { | ||
1401 | sp->SCp.buffer = (struct scatterlist *) sp->request_buffer; | ||
1402 | sp->SCp.buffers_residual = sbus_map_sg(esp->sdev, | ||
1403 | sp->SCp.buffer, | ||
1404 | sp->use_sg, | ||
1405 | sp->sc_data_direction); | ||
1406 | sp->SCp.this_residual = sg_dma_len(sp->SCp.buffer); | ||
1407 | sp->SCp.ptr = (char *) ((unsigned long)sg_dma_address(sp->SCp.buffer)); | ||
1408 | } | ||
1409 | } | ||
1410 | |||
1411 | static void esp_release_dmabufs(struct esp *esp, struct scsi_cmnd *sp) | ||
1412 | { | ||
1413 | if (sp->use_sg) { | ||
1414 | sbus_unmap_sg(esp->sdev, sp->request_buffer, sp->use_sg, | ||
1415 | sp->sc_data_direction); | ||
1416 | } else if (sp->request_bufflen) { | ||
1417 | sbus_unmap_single(esp->sdev, | ||
1418 | sp->SCp.have_data_in, | ||
1419 | sp->request_bufflen, | ||
1420 | sp->sc_data_direction); | ||
1421 | } | ||
1422 | } | ||
1423 | |||
1424 | static void esp_restore_pointers(struct esp *esp, struct scsi_cmnd *sp) | ||
1425 | { | ||
1426 | struct esp_pointers *ep = &esp->data_pointers[sp->device->id]; | ||
1427 | |||
1428 | sp->SCp.ptr = ep->saved_ptr; | ||
1429 | sp->SCp.buffer = ep->saved_buffer; | ||
1430 | sp->SCp.this_residual = ep->saved_this_residual; | ||
1431 | sp->SCp.buffers_residual = ep->saved_buffers_residual; | ||
1432 | } | ||
1433 | |||
1434 | static void esp_save_pointers(struct esp *esp, struct scsi_cmnd *sp) | ||
1435 | { | ||
1436 | struct esp_pointers *ep = &esp->data_pointers[sp->device->id]; | ||
1437 | |||
1438 | ep->saved_ptr = sp->SCp.ptr; | ||
1439 | ep->saved_buffer = sp->SCp.buffer; | ||
1440 | ep->saved_this_residual = sp->SCp.this_residual; | ||
1441 | ep->saved_buffers_residual = sp->SCp.buffers_residual; | ||
1442 | } | ||
1443 | |||
1444 | /* Some rules: | ||
1445 | * | ||
1446 | * 1) Never ever panic while something is live on the bus. | ||
1447 | * If there is to be any chance of syncing the disks this | ||
1448 | * rule is to be obeyed. | ||
1449 | * | ||
1450 | * 2) Any target that causes a foul condition will no longer | ||
1451 | * have synchronous transfers done to it, no questions | ||
1452 | * asked. | ||
1453 | * | ||
1454 | * 3) Keep register accesses to a minimum. Think about some | ||
1455 | * day when we have Xbus machines this is running on and | ||
1456 | * the ESP chip is on the other end of the machine on a | ||
1457 | * different board from the cpu where this is running. | ||
1458 | */ | ||
1459 | |||
1460 | /* Fire off a command. We assume the bus is free and that the only | ||
1461 | * case where we could see an interrupt is where we have disconnected | ||
1462 | * commands active and they are trying to reselect us. | ||
1463 | */ | ||
1464 | static inline void esp_check_cmd(struct esp *esp, struct scsi_cmnd *sp) | ||
1465 | { | ||
1466 | switch (sp->cmd_len) { | ||
1467 | case 6: | ||
1468 | case 10: | ||
1469 | case 12: | ||
1470 | esp->esp_slowcmd = 0; | ||
1471 | break; | ||
1472 | |||
1473 | default: | ||
1474 | esp->esp_slowcmd = 1; | ||
1475 | esp->esp_scmdleft = sp->cmd_len; | ||
1476 | esp->esp_scmdp = &sp->cmnd[0]; | ||
1477 | break; | ||
1478 | }; | ||
1479 | } | ||
1480 | |||
1481 | static inline void build_sync_nego_msg(struct esp *esp, int period, int offset) | ||
1482 | { | ||
1483 | esp->cur_msgout[0] = EXTENDED_MESSAGE; | ||
1484 | esp->cur_msgout[1] = 3; | ||
1485 | esp->cur_msgout[2] = EXTENDED_SDTR; | ||
1486 | esp->cur_msgout[3] = period; | ||
1487 | esp->cur_msgout[4] = offset; | ||
1488 | esp->msgout_len = 5; | ||
1489 | } | ||
1490 | |||
1491 | /* SIZE is in bits, currently HME only supports 16 bit wide transfers. */ | ||
1492 | static inline void build_wide_nego_msg(struct esp *esp, int size) | ||
1493 | { | ||
1494 | esp->cur_msgout[0] = EXTENDED_MESSAGE; | ||
1495 | esp->cur_msgout[1] = 2; | ||
1496 | esp->cur_msgout[2] = EXTENDED_WDTR; | ||
1497 | switch (size) { | ||
1498 | case 32: | ||
1499 | esp->cur_msgout[3] = 2; | ||
1500 | break; | ||
1501 | case 16: | ||
1502 | esp->cur_msgout[3] = 1; | ||
1503 | break; | ||
1504 | case 8: | ||
1505 | default: | ||
1506 | esp->cur_msgout[3] = 0; | ||
1507 | break; | ||
1508 | }; | ||
1509 | |||
1510 | esp->msgout_len = 4; | ||
1511 | } | ||
1512 | |||
1513 | static void esp_exec_cmd(struct esp *esp) | ||
1514 | { | ||
1515 | struct scsi_cmnd *SCptr; | ||
1516 | struct scsi_device *SDptr; | ||
1517 | struct esp_device *esp_dev; | ||
1518 | volatile u8 *cmdp = esp->esp_command; | ||
1519 | u8 the_esp_command; | ||
1520 | int lun, target; | ||
1521 | int i; | ||
1522 | |||
1523 | /* Hold off if we have disconnected commands and | ||
1524 | * an IRQ is showing... | ||
1525 | */ | ||
1526 | if (esp->disconnected_SC && ESP_IRQ_P(esp->dregs)) | ||
1527 | return; | ||
1528 | |||
1529 | /* Grab first member of the issue queue. */ | ||
1530 | SCptr = esp->current_SC = remove_first_SC(&esp->issue_SC); | ||
1531 | |||
1532 | /* Safe to panic here because current_SC is null. */ | ||
1533 | if (!SCptr) | ||
1534 | panic("esp: esp_exec_cmd and issue queue is NULL"); | ||
1535 | |||
1536 | SDptr = SCptr->device; | ||
1537 | esp_dev = SDptr->hostdata; | ||
1538 | lun = SCptr->device->lun; | ||
1539 | target = SCptr->device->id; | ||
1540 | |||
1541 | esp->snip = 0; | ||
1542 | esp->msgout_len = 0; | ||
1543 | |||
1544 | /* Send it out whole, or piece by piece? The ESP | ||
1545 | * only knows how to automatically send out 6, 10, | ||
1546 | * and 12 byte commands. I used to think that the | ||
1547 | * Linux SCSI code would never throw anything other | ||
1548 | * than that to us, but then again there is the | ||
1549 | * SCSI generic driver which can send us anything. | ||
1550 | */ | ||
1551 | esp_check_cmd(esp, SCptr); | ||
1552 | |||
1553 | /* If arbitration/selection is successful, the ESP will leave | ||
1554 | * ATN asserted, causing the target to go into message out | ||
1555 | * phase. The ESP will feed the target the identify and then | ||
1556 | * the target can only legally go to one of command, | ||
1557 | * datain/out, status, or message in phase, or stay in message | ||
1558 | * out phase (should we be trying to send a sync negotiation | ||
1559 | * message after the identify). It is not allowed to drop | ||
1560 | * BSY, but some buggy targets do and we check for this | ||
1561 | * condition in the selection complete code. Most of the time | ||
1562 | * we'll make the command bytes available to the ESP and it | ||
1563 | * will not interrupt us until it finishes command phase, we | ||
1564 | * cannot do this for command sizes the ESP does not | ||
1565 | * understand and in this case we'll get interrupted right | ||
1566 | * when the target goes into command phase. | ||
1567 | * | ||
1568 | * It is absolutely _illegal_ in the presence of SCSI-2 devices | ||
1569 | * to use the ESP select w/o ATN command. When SCSI-2 devices are | ||
1570 | * present on the bus we _must_ always go straight to message out | ||
1571 | * phase with an identify message for the target. Being that | ||
1572 | * selection attempts in SCSI-1 w/o ATN was an option, doing SCSI-2 | ||
1573 | * selections should not confuse SCSI-1 we hope. | ||
1574 | */ | ||
1575 | |||
1576 | if (esp_dev->sync) { | ||
1577 | /* this targets sync is known */ | ||
1578 | #ifndef __sparc_v9__ | ||
1579 | do_sync_known: | ||
1580 | #endif | ||
1581 | if (esp_dev->disconnect) | ||
1582 | *cmdp++ = IDENTIFY(1, lun); | ||
1583 | else | ||
1584 | *cmdp++ = IDENTIFY(0, lun); | ||
1585 | |||
1586 | if (esp->esp_slowcmd) { | ||
1587 | the_esp_command = (ESP_CMD_SELAS | ESP_CMD_DMA); | ||
1588 | esp_advance_phase(SCptr, in_slct_stop); | ||
1589 | } else { | ||
1590 | the_esp_command = (ESP_CMD_SELA | ESP_CMD_DMA); | ||
1591 | esp_advance_phase(SCptr, in_slct_norm); | ||
1592 | } | ||
1593 | } else if (!(esp->targets_present & (1<<target)) || !(esp_dev->disconnect)) { | ||
1594 | /* After the bootup SCSI code sends both the | ||
1595 | * TEST_UNIT_READY and INQUIRY commands we want | ||
1596 | * to at least attempt allowing the device to | ||
1597 | * disconnect. | ||
1598 | */ | ||
1599 | ESPMISC(("esp: Selecting device for first time. target=%d " | ||
1600 | "lun=%d\n", target, SCptr->device->lun)); | ||
1601 | if (!SDptr->borken && !esp_dev->disconnect) | ||
1602 | esp_dev->disconnect = 1; | ||
1603 | |||
1604 | *cmdp++ = IDENTIFY(0, lun); | ||
1605 | esp->prevmsgout = NOP; | ||
1606 | esp_advance_phase(SCptr, in_slct_norm); | ||
1607 | the_esp_command = (ESP_CMD_SELA | ESP_CMD_DMA); | ||
1608 | |||
1609 | /* Take no chances... */ | ||
1610 | esp_dev->sync_max_offset = 0; | ||
1611 | esp_dev->sync_min_period = 0; | ||
1612 | } else { | ||
1613 | /* Sorry, I have had way too many problems with | ||
1614 | * various CDROM devices on ESP. -DaveM | ||
1615 | */ | ||
1616 | int cdrom_hwbug_wkaround = 0; | ||
1617 | |||
1618 | #ifndef __sparc_v9__ | ||
1619 | /* Never allow disconnects or synchronous transfers on | ||
1620 | * SparcStation1 and SparcStation1+. Allowing those | ||
1621 | * to be enabled seems to lockup the machine completely. | ||
1622 | */ | ||
1623 | if ((idprom->id_machtype == (SM_SUN4C | SM_4C_SS1)) || | ||
1624 | (idprom->id_machtype == (SM_SUN4C | SM_4C_SS1PLUS))) { | ||
1625 | /* But we are nice and allow tapes and removable | ||
1626 | * disks (but not CDROMs) to disconnect. | ||
1627 | */ | ||
1628 | if(SDptr->type == TYPE_TAPE || | ||
1629 | (SDptr->type != TYPE_ROM && SDptr->removable)) | ||
1630 | esp_dev->disconnect = 1; | ||
1631 | else | ||
1632 | esp_dev->disconnect = 0; | ||
1633 | esp_dev->sync_max_offset = 0; | ||
1634 | esp_dev->sync_min_period = 0; | ||
1635 | esp_dev->sync = 1; | ||
1636 | esp->snip = 0; | ||
1637 | goto do_sync_known; | ||
1638 | } | ||
1639 | #endif /* !(__sparc_v9__) */ | ||
1640 | |||
1641 | /* We've talked to this guy before, | ||
1642 | * but never negotiated. Let's try, | ||
1643 | * need to attempt WIDE first, before | ||
1644 | * sync nego, as per SCSI 2 standard. | ||
1645 | */ | ||
1646 | if (esp->erev == fashme && !esp_dev->wide) { | ||
1647 | if (!SDptr->borken && | ||
1648 | SDptr->type != TYPE_ROM && | ||
1649 | SDptr->removable == 0) { | ||
1650 | build_wide_nego_msg(esp, 16); | ||
1651 | esp_dev->wide = 1; | ||
1652 | esp->wnip = 1; | ||
1653 | goto after_nego_msg_built; | ||
1654 | } else { | ||
1655 | esp_dev->wide = 1; | ||
1656 | /* Fall through and try sync. */ | ||
1657 | } | ||
1658 | } | ||
1659 | |||
1660 | if (!SDptr->borken) { | ||
1661 | if ((SDptr->type == TYPE_ROM)) { | ||
1662 | /* Nice try sucker... */ | ||
1663 | ESPMISC(("esp%d: Disabling sync for buggy " | ||
1664 | "CDROM.\n", esp->esp_id)); | ||
1665 | cdrom_hwbug_wkaround = 1; | ||
1666 | build_sync_nego_msg(esp, 0, 0); | ||
1667 | } else if (SDptr->removable != 0) { | ||
1668 | ESPMISC(("esp%d: Not negotiating sync/wide but " | ||
1669 | "allowing disconnect for removable media.\n", | ||
1670 | esp->esp_id)); | ||
1671 | build_sync_nego_msg(esp, 0, 0); | ||
1672 | } else { | ||
1673 | build_sync_nego_msg(esp, esp->sync_defp, 15); | ||
1674 | } | ||
1675 | } else { | ||
1676 | build_sync_nego_msg(esp, 0, 0); | ||
1677 | } | ||
1678 | esp_dev->sync = 1; | ||
1679 | esp->snip = 1; | ||
1680 | |||
1681 | after_nego_msg_built: | ||
1682 | /* A fix for broken SCSI1 targets, when they disconnect | ||
1683 | * they lock up the bus and confuse ESP. So disallow | ||
1684 | * disconnects for SCSI1 targets for now until we | ||
1685 | * find a better fix. | ||
1686 | * | ||
1687 | * Addendum: This is funny, I figured out what was going | ||
1688 | * on. The blotzed SCSI1 target would disconnect, | ||
1689 | * one of the other SCSI2 targets or both would be | ||
1690 | * disconnected as well. The SCSI1 target would | ||
1691 | * stay disconnected long enough that we start | ||
1692 | * up a command on one of the SCSI2 targets. As | ||
1693 | * the ESP is arbitrating for the bus the SCSI1 | ||
1694 | * target begins to arbitrate as well to reselect | ||
1695 | * the ESP. The SCSI1 target refuses to drop it's | ||
1696 | * ID bit on the data bus even though the ESP is | ||
1697 | * at ID 7 and is the obvious winner for any | ||
1698 | * arbitration. The ESP is a poor sport and refuses | ||
1699 | * to lose arbitration, it will continue indefinitely | ||
1700 | * trying to arbitrate for the bus and can only be | ||
1701 | * stopped via a chip reset or SCSI bus reset. | ||
1702 | * Therefore _no_ disconnects for SCSI1 targets | ||
1703 | * thank you very much. ;-) | ||
1704 | */ | ||
1705 | if(((SDptr->scsi_level < 3) && | ||
1706 | (SDptr->type != TYPE_TAPE) && | ||
1707 | SDptr->removable == 0) || | ||
1708 | cdrom_hwbug_wkaround || SDptr->borken) { | ||
1709 | ESPMISC((KERN_INFO "esp%d: Disabling DISCONNECT for target %d " | ||
1710 | "lun %d\n", esp->esp_id, SCptr->device->id, SCptr->device->lun)); | ||
1711 | esp_dev->disconnect = 0; | ||
1712 | *cmdp++ = IDENTIFY(0, lun); | ||
1713 | } else { | ||
1714 | *cmdp++ = IDENTIFY(1, lun); | ||
1715 | } | ||
1716 | |||
1717 | /* ESP fifo is only so big... | ||
1718 | * Make this look like a slow command. | ||
1719 | */ | ||
1720 | esp->esp_slowcmd = 1; | ||
1721 | esp->esp_scmdleft = SCptr->cmd_len; | ||
1722 | esp->esp_scmdp = &SCptr->cmnd[0]; | ||
1723 | |||
1724 | the_esp_command = (ESP_CMD_SELAS | ESP_CMD_DMA); | ||
1725 | esp_advance_phase(SCptr, in_slct_msg); | ||
1726 | } | ||
1727 | |||
1728 | if (!esp->esp_slowcmd) | ||
1729 | for (i = 0; i < SCptr->cmd_len; i++) | ||
1730 | *cmdp++ = SCptr->cmnd[i]; | ||
1731 | |||
1732 | /* HME sucks... */ | ||
1733 | if (esp->erev == fashme) | ||
1734 | sbus_writeb((target & 0xf) | (ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT), | ||
1735 | esp->eregs + ESP_BUSID); | ||
1736 | else | ||
1737 | sbus_writeb(target & 7, esp->eregs + ESP_BUSID); | ||
1738 | if (esp->prev_soff != esp_dev->sync_max_offset || | ||
1739 | esp->prev_stp != esp_dev->sync_min_period || | ||
1740 | (esp->erev > esp100a && | ||
1741 | esp->prev_cfg3 != esp->config3[target])) { | ||
1742 | esp->prev_soff = esp_dev->sync_max_offset; | ||
1743 | esp->prev_stp = esp_dev->sync_min_period; | ||
1744 | sbus_writeb(esp->prev_soff, esp->eregs + ESP_SOFF); | ||
1745 | sbus_writeb(esp->prev_stp, esp->eregs + ESP_STP); | ||
1746 | if (esp->erev > esp100a) { | ||
1747 | esp->prev_cfg3 = esp->config3[target]; | ||
1748 | sbus_writeb(esp->prev_cfg3, esp->eregs + ESP_CFG3); | ||
1749 | } | ||
1750 | } | ||
1751 | i = (cmdp - esp->esp_command); | ||
1752 | |||
1753 | if (esp->erev == fashme) { | ||
1754 | esp_cmd(esp, ESP_CMD_FLUSH); /* Grrr! */ | ||
1755 | |||
1756 | /* Set up the DMA and HME counters */ | ||
1757 | sbus_writeb(i, esp->eregs + ESP_TCLOW); | ||
1758 | sbus_writeb(0, esp->eregs + ESP_TCMED); | ||
1759 | sbus_writeb(0, esp->eregs + FAS_RLO); | ||
1760 | sbus_writeb(0, esp->eregs + FAS_RHI); | ||
1761 | esp_cmd(esp, the_esp_command); | ||
1762 | |||
1763 | /* Talk about touchy hardware... */ | ||
1764 | esp->prev_hme_dmacsr = ((esp->prev_hme_dmacsr | | ||
1765 | (DMA_SCSI_DISAB | DMA_ENABLE)) & | ||
1766 | ~(DMA_ST_WRITE)); | ||
1767 | sbus_writel(16, esp->dregs + DMA_COUNT); | ||
1768 | sbus_writel(esp->esp_command_dvma, esp->dregs + DMA_ADDR); | ||
1769 | sbus_writel(esp->prev_hme_dmacsr, esp->dregs + DMA_CSR); | ||
1770 | } else { | ||
1771 | u32 tmp; | ||
1772 | |||
1773 | /* Set up the DMA and ESP counters */ | ||
1774 | sbus_writeb(i, esp->eregs + ESP_TCLOW); | ||
1775 | sbus_writeb(0, esp->eregs + ESP_TCMED); | ||
1776 | tmp = sbus_readl(esp->dregs + DMA_CSR); | ||
1777 | tmp &= ~DMA_ST_WRITE; | ||
1778 | tmp |= DMA_ENABLE; | ||
1779 | sbus_writel(tmp, esp->dregs + DMA_CSR); | ||
1780 | if (esp->dma->revision == dvmaesc1) { | ||
1781 | if (i) /* Workaround ESC gate array SBUS rerun bug. */ | ||
1782 | sbus_writel(PAGE_SIZE, esp->dregs + DMA_COUNT); | ||
1783 | } | ||
1784 | sbus_writel(esp->esp_command_dvma, esp->dregs + DMA_ADDR); | ||
1785 | |||
1786 | /* Tell ESP to "go". */ | ||
1787 | esp_cmd(esp, the_esp_command); | ||
1788 | } | ||
1789 | } | ||
1790 | |||
1791 | /* Queue a SCSI command delivered from the mid-level Linux SCSI code. */ | ||
1792 | static int esp_queue(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) | ||
1793 | { | ||
1794 | struct esp *esp; | ||
1795 | |||
1796 | /* Set up func ptr and initial driver cmd-phase. */ | ||
1797 | SCpnt->scsi_done = done; | ||
1798 | SCpnt->SCp.phase = not_issued; | ||
1799 | |||
1800 | /* We use the scratch area. */ | ||
1801 | ESPQUEUE(("esp_queue: target=%d lun=%d ", SCpnt->device->id, SCpnt->device->lun)); | ||
1802 | ESPDISC(("N<%02x,%02x>", SCpnt->device->id, SCpnt->device->lun)); | ||
1803 | |||
1804 | esp = (struct esp *) SCpnt->device->host->hostdata; | ||
1805 | esp_get_dmabufs(esp, SCpnt); | ||
1806 | esp_save_pointers(esp, SCpnt); /* FIXME for tag queueing */ | ||
1807 | |||
1808 | SCpnt->SCp.Status = CHECK_CONDITION; | ||
1809 | SCpnt->SCp.Message = 0xff; | ||
1810 | SCpnt->SCp.sent_command = 0; | ||
1811 | |||
1812 | /* Place into our queue. */ | ||
1813 | if (SCpnt->cmnd[0] == REQUEST_SENSE) { | ||
1814 | ESPQUEUE(("RQSENSE\n")); | ||
1815 | prepend_SC(&esp->issue_SC, SCpnt); | ||
1816 | } else { | ||
1817 | ESPQUEUE(("\n")); | ||
1818 | append_SC(&esp->issue_SC, SCpnt); | ||
1819 | } | ||
1820 | |||
1821 | /* Run it now if we can. */ | ||
1822 | if (!esp->current_SC && !esp->resetting_bus) | ||
1823 | esp_exec_cmd(esp); | ||
1824 | |||
1825 | return 0; | ||
1826 | } | ||
1827 | |||
1828 | /* Dump driver state. */ | ||
1829 | static void esp_dump_cmd(struct scsi_cmnd *SCptr) | ||
1830 | { | ||
1831 | ESPLOG(("[tgt<%02x> lun<%02x> " | ||
1832 | "pphase<%s> cphase<%s>]", | ||
1833 | SCptr->device->id, SCptr->device->lun, | ||
1834 | phase_string(SCptr->SCp.sent_command), | ||
1835 | phase_string(SCptr->SCp.phase))); | ||
1836 | } | ||
1837 | |||
1838 | static void esp_dump_state(struct esp *esp) | ||
1839 | { | ||
1840 | struct scsi_cmnd *SCptr = esp->current_SC; | ||
1841 | #ifdef DEBUG_ESP_CMDS | ||
1842 | int i; | ||
1843 | #endif | ||
1844 | |||
1845 | ESPLOG(("esp%d: dumping state\n", esp->esp_id)); | ||
1846 | ESPLOG(("esp%d: dma -- cond_reg<%08x> addr<%08x>\n", | ||
1847 | esp->esp_id, | ||
1848 | sbus_readl(esp->dregs + DMA_CSR), | ||
1849 | sbus_readl(esp->dregs + DMA_ADDR))); | ||
1850 | ESPLOG(("esp%d: SW [sreg<%02x> sstep<%02x> ireg<%02x>]\n", | ||
1851 | esp->esp_id, esp->sreg, esp->seqreg, esp->ireg)); | ||
1852 | ESPLOG(("esp%d: HW reread [sreg<%02x> sstep<%02x> ireg<%02x>]\n", | ||
1853 | esp->esp_id, | ||
1854 | sbus_readb(esp->eregs + ESP_STATUS), | ||
1855 | sbus_readb(esp->eregs + ESP_SSTEP), | ||
1856 | sbus_readb(esp->eregs + ESP_INTRPT))); | ||
1857 | #ifdef DEBUG_ESP_CMDS | ||
1858 | printk("esp%d: last ESP cmds [", esp->esp_id); | ||
1859 | i = (esp->espcmdent - 1) & 31; | ||
1860 | printk("<"); esp_print_cmd(esp->espcmdlog[i]); printk(">"); | ||
1861 | i = (i - 1) & 31; | ||
1862 | printk("<"); esp_print_cmd(esp->espcmdlog[i]); printk(">"); | ||
1863 | i = (i - 1) & 31; | ||
1864 | printk("<"); esp_print_cmd(esp->espcmdlog[i]); printk(">"); | ||
1865 | i = (i - 1) & 31; | ||
1866 | printk("<"); esp_print_cmd(esp->espcmdlog[i]); printk(">"); | ||
1867 | printk("]\n"); | ||
1868 | #endif /* (DEBUG_ESP_CMDS) */ | ||
1869 | |||
1870 | if (SCptr) { | ||
1871 | ESPLOG(("esp%d: current command ", esp->esp_id)); | ||
1872 | esp_dump_cmd(SCptr); | ||
1873 | } | ||
1874 | ESPLOG(("\n")); | ||
1875 | SCptr = esp->disconnected_SC; | ||
1876 | ESPLOG(("esp%d: disconnected ", esp->esp_id)); | ||
1877 | while (SCptr) { | ||
1878 | esp_dump_cmd(SCptr); | ||
1879 | SCptr = (struct scsi_cmnd *) SCptr->host_scribble; | ||
1880 | } | ||
1881 | ESPLOG(("\n")); | ||
1882 | } | ||
1883 | |||
1884 | /* Abort a command. The host_lock is acquired by caller. */ | ||
1885 | static int esp_abort(struct scsi_cmnd *SCptr) | ||
1886 | { | ||
1887 | struct esp *esp = (struct esp *) SCptr->device->host->hostdata; | ||
1888 | int don; | ||
1889 | |||
1890 | ESPLOG(("esp%d: Aborting command\n", esp->esp_id)); | ||
1891 | esp_dump_state(esp); | ||
1892 | |||
1893 | /* Wheee, if this is the current command on the bus, the | ||
1894 | * best we can do is assert ATN and wait for msgout phase. | ||
1895 | * This should even fix a hung SCSI bus when we lose state | ||
1896 | * in the driver and timeout because the eventual phase change | ||
1897 | * will cause the ESP to (eventually) give an interrupt. | ||
1898 | */ | ||
1899 | if (esp->current_SC == SCptr) { | ||
1900 | esp->cur_msgout[0] = ABORT; | ||
1901 | esp->msgout_len = 1; | ||
1902 | esp->msgout_ctr = 0; | ||
1903 | esp_cmd(esp, ESP_CMD_SATN); | ||
1904 | return SUCCESS; | ||
1905 | } | ||
1906 | |||
1907 | /* If it is still in the issue queue then we can safely | ||
1908 | * call the completion routine and report abort success. | ||
1909 | */ | ||
1910 | don = (sbus_readl(esp->dregs + DMA_CSR) & DMA_INT_ENAB); | ||
1911 | if (don) { | ||
1912 | ESP_INTSOFF(esp->dregs); | ||
1913 | } | ||
1914 | if (esp->issue_SC) { | ||
1915 | struct scsi_cmnd **prev, *this; | ||
1916 | for (prev = (&esp->issue_SC), this = esp->issue_SC; | ||
1917 | this != NULL; | ||
1918 | prev = (struct scsi_cmnd **) &(this->host_scribble), | ||
1919 | this = (struct scsi_cmnd *) this->host_scribble) { | ||
1920 | |||
1921 | if (this == SCptr) { | ||
1922 | *prev = (struct scsi_cmnd *) this->host_scribble; | ||
1923 | this->host_scribble = NULL; | ||
1924 | |||
1925 | esp_release_dmabufs(esp, this); | ||
1926 | this->result = DID_ABORT << 16; | ||
1927 | this->scsi_done(this); | ||
1928 | |||
1929 | if (don) | ||
1930 | ESP_INTSON(esp->dregs); | ||
1931 | |||
1932 | return SUCCESS; | ||
1933 | } | ||
1934 | } | ||
1935 | } | ||
1936 | |||
1937 | /* Yuck, the command to abort is disconnected, it is not | ||
1938 | * worth trying to abort it now if something else is live | ||
1939 | * on the bus at this time. So, we let the SCSI code wait | ||
1940 | * a little bit and try again later. | ||
1941 | */ | ||
1942 | if (esp->current_SC) { | ||
1943 | if (don) | ||
1944 | ESP_INTSON(esp->dregs); | ||
1945 | return FAILED; | ||
1946 | } | ||
1947 | |||
1948 | /* It's disconnected, we have to reconnect to re-establish | ||
1949 | * the nexus and tell the device to abort. However, we really | ||
1950 | * cannot 'reconnect' per se. Don't try to be fancy, just | ||
1951 | * indicate failure, which causes our caller to reset the whole | ||
1952 | * bus. | ||
1953 | */ | ||
1954 | |||
1955 | if (don) | ||
1956 | ESP_INTSON(esp->dregs); | ||
1957 | |||
1958 | return FAILED; | ||
1959 | } | ||
1960 | |||
1961 | /* We've sent ESP_CMD_RS to the ESP, the interrupt had just | ||
1962 | * arrived indicating the end of the SCSI bus reset. Our job | ||
1963 | * is to clean out the command queues and begin re-execution | ||
1964 | * of SCSI commands once more. | ||
1965 | */ | ||
1966 | static int esp_finish_reset(struct esp *esp) | ||
1967 | { | ||
1968 | struct scsi_cmnd *sp = esp->current_SC; | ||
1969 | |||
1970 | /* Clean up currently executing command, if any. */ | ||
1971 | if (sp != NULL) { | ||
1972 | esp->current_SC = NULL; | ||
1973 | |||
1974 | esp_release_dmabufs(esp, sp); | ||
1975 | sp->result = (DID_RESET << 16); | ||
1976 | |||
1977 | sp->scsi_done(sp); | ||
1978 | } | ||
1979 | |||
1980 | /* Clean up disconnected queue, they have been invalidated | ||
1981 | * by the bus reset. | ||
1982 | */ | ||
1983 | if (esp->disconnected_SC) { | ||
1984 | while ((sp = remove_first_SC(&esp->disconnected_SC)) != NULL) { | ||
1985 | esp_release_dmabufs(esp, sp); | ||
1986 | sp->result = (DID_RESET << 16); | ||
1987 | |||
1988 | sp->scsi_done(sp); | ||
1989 | } | ||
1990 | } | ||
1991 | |||
1992 | /* SCSI bus reset is complete. */ | ||
1993 | esp->resetting_bus = 0; | ||
1994 | wake_up(&esp->reset_queue); | ||
1995 | |||
1996 | /* Ok, now it is safe to get commands going once more. */ | ||
1997 | if (esp->issue_SC) | ||
1998 | esp_exec_cmd(esp); | ||
1999 | |||
2000 | return do_intr_end; | ||
2001 | } | ||
2002 | |||
2003 | static int esp_do_resetbus(struct esp *esp) | ||
2004 | { | ||
2005 | ESPLOG(("esp%d: Resetting scsi bus\n", esp->esp_id)); | ||
2006 | esp->resetting_bus = 1; | ||
2007 | esp_cmd(esp, ESP_CMD_RS); | ||
2008 | |||
2009 | return do_intr_end; | ||
2010 | } | ||
2011 | |||
2012 | /* Reset ESP chip, reset hanging bus, then kill active and | ||
2013 | * disconnected commands for targets without soft reset. | ||
2014 | * | ||
2015 | * The host_lock is acquired by caller. | ||
2016 | */ | ||
2017 | static int esp_reset(struct scsi_cmnd *SCptr) | ||
2018 | { | ||
2019 | struct esp *esp = (struct esp *) SCptr->device->host->hostdata; | ||
2020 | |||
2021 | spin_lock_irq(esp->ehost->host_lock); | ||
2022 | (void) esp_do_resetbus(esp); | ||
2023 | spin_unlock_irq(esp->ehost->host_lock); | ||
2024 | |||
2025 | wait_event(esp->reset_queue, (esp->resetting_bus == 0)); | ||
2026 | |||
2027 | return SUCCESS; | ||
2028 | } | ||
2029 | |||
2030 | /* Internal ESP done function. */ | ||
2031 | static void esp_done(struct esp *esp, int error) | ||
2032 | { | ||
2033 | struct scsi_cmnd *done_SC = esp->current_SC; | ||
2034 | |||
2035 | esp->current_SC = NULL; | ||
2036 | |||
2037 | esp_release_dmabufs(esp, done_SC); | ||
2038 | done_SC->result = error; | ||
2039 | |||
2040 | done_SC->scsi_done(done_SC); | ||
2041 | |||
2042 | /* Bus is free, issue any commands in the queue. */ | ||
2043 | if (esp->issue_SC && !esp->current_SC) | ||
2044 | esp_exec_cmd(esp); | ||
2045 | |||
2046 | } | ||
2047 | |||
2048 | /* Wheee, ESP interrupt engine. */ | ||
2049 | |||
2050 | /* Forward declarations. */ | ||
2051 | static int esp_do_phase_determine(struct esp *esp); | ||
2052 | static int esp_do_data_finale(struct esp *esp); | ||
2053 | static int esp_select_complete(struct esp *esp); | ||
2054 | static int esp_do_status(struct esp *esp); | ||
2055 | static int esp_do_msgin(struct esp *esp); | ||
2056 | static int esp_do_msgindone(struct esp *esp); | ||
2057 | static int esp_do_msgout(struct esp *esp); | ||
2058 | static int esp_do_cmdbegin(struct esp *esp); | ||
2059 | |||
2060 | #define sreg_datainp(__sreg) (((__sreg) & ESP_STAT_PMASK) == ESP_DIP) | ||
2061 | #define sreg_dataoutp(__sreg) (((__sreg) & ESP_STAT_PMASK) == ESP_DOP) | ||
2062 | |||
2063 | /* Read any bytes found in the FAS366 fifo, storing them into | ||
2064 | * the ESP driver software state structure. | ||
2065 | */ | ||
2066 | static void hme_fifo_read(struct esp *esp) | ||
2067 | { | ||
2068 | u8 count = 0; | ||
2069 | u8 status = esp->sreg; | ||
2070 | |||
2071 | /* Cannot safely frob the fifo for these following cases, but | ||
2072 | * we must always read the fifo when the reselect interrupt | ||
2073 | * is pending. | ||
2074 | */ | ||
2075 | if (((esp->ireg & ESP_INTR_RSEL) == 0) && | ||
2076 | (sreg_datainp(status) || | ||
2077 | sreg_dataoutp(status) || | ||
2078 | (esp->current_SC && | ||
2079 | esp->current_SC->SCp.phase == in_data_done))) { | ||
2080 | ESPHME(("<wkaround_skipped>")); | ||
2081 | } else { | ||
2082 | unsigned long fcnt = sbus_readb(esp->eregs + ESP_FFLAGS) & ESP_FF_FBYTES; | ||
2083 | |||
2084 | /* The HME stores bytes in multiples of 2 in the fifo. */ | ||
2085 | ESPHME(("hme_fifo[fcnt=%d", (int)fcnt)); | ||
2086 | while (fcnt) { | ||
2087 | esp->hme_fifo_workaround_buffer[count++] = | ||
2088 | sbus_readb(esp->eregs + ESP_FDATA); | ||
2089 | esp->hme_fifo_workaround_buffer[count++] = | ||
2090 | sbus_readb(esp->eregs + ESP_FDATA); | ||
2091 | ESPHME(("<%02x,%02x>", esp->hme_fifo_workaround_buffer[count-2], esp->hme_fifo_workaround_buffer[count-1])); | ||
2092 | fcnt--; | ||
2093 | } | ||
2094 | if (sbus_readb(esp->eregs + ESP_STATUS2) & ESP_STAT2_F1BYTE) { | ||
2095 | ESPHME(("<poke_byte>")); | ||
2096 | sbus_writeb(0, esp->eregs + ESP_FDATA); | ||
2097 | esp->hme_fifo_workaround_buffer[count++] = | ||
2098 | sbus_readb(esp->eregs + ESP_FDATA); | ||
2099 | ESPHME(("<%02x,0x00>", esp->hme_fifo_workaround_buffer[count-1])); | ||
2100 | ESPHME(("CMD_FLUSH")); | ||
2101 | esp_cmd(esp, ESP_CMD_FLUSH); | ||
2102 | } else { | ||
2103 | ESPHME(("no_xtra_byte")); | ||
2104 | } | ||
2105 | } | ||
2106 | ESPHME(("wkarnd_cnt=%d]", (int)count)); | ||
2107 | esp->hme_fifo_workaround_count = count; | ||
2108 | } | ||
2109 | |||
2110 | static inline void hme_fifo_push(struct esp *esp, u8 *bytes, u8 count) | ||
2111 | { | ||
2112 | esp_cmd(esp, ESP_CMD_FLUSH); | ||
2113 | while (count) { | ||
2114 | u8 tmp = *bytes++; | ||
2115 | sbus_writeb(tmp, esp->eregs + ESP_FDATA); | ||
2116 | sbus_writeb(0, esp->eregs + ESP_FDATA); | ||
2117 | count--; | ||
2118 | } | ||
2119 | } | ||
2120 | |||
2121 | /* We try to avoid some interrupts by jumping ahead and see if the ESP | ||
2122 | * has gotten far enough yet. Hence the following. | ||
2123 | */ | ||
2124 | static inline int skipahead1(struct esp *esp, struct scsi_cmnd *scp, | ||
2125 | int prev_phase, int new_phase) | ||
2126 | { | ||
2127 | if (scp->SCp.sent_command != prev_phase) | ||
2128 | return 0; | ||
2129 | if (ESP_IRQ_P(esp->dregs)) { | ||
2130 | /* Yes, we are able to save an interrupt. */ | ||
2131 | if (esp->erev == fashme) | ||
2132 | esp->sreg2 = sbus_readb(esp->eregs + ESP_STATUS2); | ||
2133 | esp->sreg = (sbus_readb(esp->eregs + ESP_STATUS) & ~(ESP_STAT_INTR)); | ||
2134 | esp->ireg = sbus_readb(esp->eregs + ESP_INTRPT); | ||
2135 | if (esp->erev == fashme) { | ||
2136 | /* This chip is really losing. */ | ||
2137 | ESPHME(("HME[")); | ||
2138 | /* Must latch fifo before reading the interrupt | ||
2139 | * register else garbage ends up in the FIFO | ||
2140 | * which confuses the driver utterly. | ||
2141 | * Happy Meal indeed.... | ||
2142 | */ | ||
2143 | ESPHME(("fifo_workaround]")); | ||
2144 | if (!(esp->sreg2 & ESP_STAT2_FEMPTY) || | ||
2145 | (esp->sreg2 & ESP_STAT2_F1BYTE)) | ||
2146 | hme_fifo_read(esp); | ||
2147 | } | ||
2148 | if (!(esp->ireg & ESP_INTR_SR)) | ||
2149 | return 0; | ||
2150 | else | ||
2151 | return do_reset_complete; | ||
2152 | } | ||
2153 | /* Ho hum, target is taking forever... */ | ||
2154 | scp->SCp.sent_command = new_phase; /* so we don't recurse... */ | ||
2155 | return do_intr_end; | ||
2156 | } | ||
2157 | |||
2158 | static inline int skipahead2(struct esp *esp, struct scsi_cmnd *scp, | ||
2159 | int prev_phase1, int prev_phase2, int new_phase) | ||
2160 | { | ||
2161 | if (scp->SCp.sent_command != prev_phase1 && | ||
2162 | scp->SCp.sent_command != prev_phase2) | ||
2163 | return 0; | ||
2164 | if (ESP_IRQ_P(esp->dregs)) { | ||
2165 | /* Yes, we are able to save an interrupt. */ | ||
2166 | if (esp->erev == fashme) | ||
2167 | esp->sreg2 = sbus_readb(esp->eregs + ESP_STATUS2); | ||
2168 | esp->sreg = (sbus_readb(esp->eregs + ESP_STATUS) & ~(ESP_STAT_INTR)); | ||
2169 | esp->ireg = sbus_readb(esp->eregs + ESP_INTRPT); | ||
2170 | if (esp->erev == fashme) { | ||
2171 | /* This chip is really losing. */ | ||
2172 | ESPHME(("HME[")); | ||
2173 | |||
2174 | /* Must latch fifo before reading the interrupt | ||
2175 | * register else garbage ends up in the FIFO | ||
2176 | * which confuses the driver utterly. | ||
2177 | * Happy Meal indeed.... | ||
2178 | */ | ||
2179 | ESPHME(("fifo_workaround]")); | ||
2180 | if (!(esp->sreg2 & ESP_STAT2_FEMPTY) || | ||
2181 | (esp->sreg2 & ESP_STAT2_F1BYTE)) | ||
2182 | hme_fifo_read(esp); | ||
2183 | } | ||
2184 | if (!(esp->ireg & ESP_INTR_SR)) | ||
2185 | return 0; | ||
2186 | else | ||
2187 | return do_reset_complete; | ||
2188 | } | ||
2189 | /* Ho hum, target is taking forever... */ | ||
2190 | scp->SCp.sent_command = new_phase; /* so we don't recurse... */ | ||
2191 | return do_intr_end; | ||
2192 | } | ||
2193 | |||
2194 | /* Now some dma helpers. */ | ||
2195 | static void dma_setup(struct esp *esp, __u32 addr, int count, int write) | ||
2196 | { | ||
2197 | u32 nreg = sbus_readl(esp->dregs + DMA_CSR); | ||
2198 | |||
2199 | if (write) | ||
2200 | nreg |= DMA_ST_WRITE; | ||
2201 | else | ||
2202 | nreg &= ~(DMA_ST_WRITE); | ||
2203 | nreg |= DMA_ENABLE; | ||
2204 | sbus_writel(nreg, esp->dregs + DMA_CSR); | ||
2205 | if (esp->dma->revision == dvmaesc1) { | ||
2206 | /* This ESC gate array sucks! */ | ||
2207 | __u32 src = addr; | ||
2208 | __u32 dest = src + count; | ||
2209 | |||
2210 | if (dest & (PAGE_SIZE - 1)) | ||
2211 | count = PAGE_ALIGN(count); | ||
2212 | sbus_writel(count, esp->dregs + DMA_COUNT); | ||
2213 | } | ||
2214 | sbus_writel(addr, esp->dregs + DMA_ADDR); | ||
2215 | } | ||
2216 | |||
2217 | static void dma_drain(struct esp *esp) | ||
2218 | { | ||
2219 | u32 tmp; | ||
2220 | |||
2221 | if (esp->dma->revision == dvmahme) | ||
2222 | return; | ||
2223 | if ((tmp = sbus_readl(esp->dregs + DMA_CSR)) & DMA_FIFO_ISDRAIN) { | ||
2224 | switch (esp->dma->revision) { | ||
2225 | default: | ||
2226 | tmp |= DMA_FIFO_STDRAIN; | ||
2227 | sbus_writel(tmp, esp->dregs + DMA_CSR); | ||
2228 | |||
2229 | case dvmarev3: | ||
2230 | case dvmaesc1: | ||
2231 | while (sbus_readl(esp->dregs + DMA_CSR) & DMA_FIFO_ISDRAIN) | ||
2232 | udelay(1); | ||
2233 | }; | ||
2234 | } | ||
2235 | } | ||
2236 | |||
2237 | static void dma_invalidate(struct esp *esp) | ||
2238 | { | ||
2239 | u32 tmp; | ||
2240 | |||
2241 | if (esp->dma->revision == dvmahme) { | ||
2242 | sbus_writel(DMA_RST_SCSI, esp->dregs + DMA_CSR); | ||
2243 | |||
2244 | esp->prev_hme_dmacsr = ((esp->prev_hme_dmacsr | | ||
2245 | (DMA_PARITY_OFF | DMA_2CLKS | | ||
2246 | DMA_SCSI_DISAB | DMA_INT_ENAB)) & | ||
2247 | ~(DMA_ST_WRITE | DMA_ENABLE)); | ||
2248 | |||
2249 | sbus_writel(0, esp->dregs + DMA_CSR); | ||
2250 | sbus_writel(esp->prev_hme_dmacsr, esp->dregs + DMA_CSR); | ||
2251 | |||
2252 | /* This is necessary to avoid having the SCSI channel | ||
2253 | * engine lock up on us. | ||
2254 | */ | ||
2255 | sbus_writel(0, esp->dregs + DMA_ADDR); | ||
2256 | } else { | ||
2257 | while ((tmp = sbus_readl(esp->dregs + DMA_CSR)) & DMA_PEND_READ) | ||
2258 | udelay(1); | ||
2259 | |||
2260 | tmp &= ~(DMA_ENABLE | DMA_ST_WRITE | DMA_BCNT_ENAB); | ||
2261 | tmp |= DMA_FIFO_INV; | ||
2262 | sbus_writel(tmp, esp->dregs + DMA_CSR); | ||
2263 | tmp &= ~DMA_FIFO_INV; | ||
2264 | sbus_writel(tmp, esp->dregs + DMA_CSR); | ||
2265 | } | ||
2266 | } | ||
2267 | |||
2268 | static inline void dma_flashclear(struct esp *esp) | ||
2269 | { | ||
2270 | dma_drain(esp); | ||
2271 | dma_invalidate(esp); | ||
2272 | } | ||
2273 | |||
2274 | static int dma_can_transfer(struct esp *esp, struct scsi_cmnd *sp) | ||
2275 | { | ||
2276 | __u32 base, end, sz; | ||
2277 | |||
2278 | if (esp->dma->revision == dvmarev3) { | ||
2279 | sz = sp->SCp.this_residual; | ||
2280 | if (sz > 0x1000000) | ||
2281 | sz = 0x1000000; | ||
2282 | } else { | ||
2283 | base = ((__u32)((unsigned long)sp->SCp.ptr)); | ||
2284 | base &= (0x1000000 - 1); | ||
2285 | end = (base + sp->SCp.this_residual); | ||
2286 | if (end > 0x1000000) | ||
2287 | end = 0x1000000; | ||
2288 | sz = (end - base); | ||
2289 | } | ||
2290 | return sz; | ||
2291 | } | ||
2292 | |||
2293 | /* Misc. esp helper macros. */ | ||
2294 | #define esp_setcount(__eregs, __cnt, __hme) \ | ||
2295 | sbus_writeb(((__cnt)&0xff), (__eregs) + ESP_TCLOW); \ | ||
2296 | sbus_writeb((((__cnt)>>8)&0xff), (__eregs) + ESP_TCMED); \ | ||
2297 | if (__hme) { \ | ||
2298 | sbus_writeb((((__cnt)>>16)&0xff), (__eregs) + FAS_RLO); \ | ||
2299 | sbus_writeb(0, (__eregs) + FAS_RHI); \ | ||
2300 | } | ||
2301 | |||
2302 | #define esp_getcount(__eregs, __hme) \ | ||
2303 | ((sbus_readb((__eregs) + ESP_TCLOW)&0xff) | \ | ||
2304 | ((sbus_readb((__eregs) + ESP_TCMED)&0xff) << 8) | \ | ||
2305 | ((__hme) ? sbus_readb((__eregs) + FAS_RLO) << 16 : 0)) | ||
2306 | |||
2307 | #define fcount(__esp) \ | ||
2308 | (((__esp)->erev == fashme) ? \ | ||
2309 | (__esp)->hme_fifo_workaround_count : \ | ||
2310 | sbus_readb(((__esp)->eregs) + ESP_FFLAGS) & ESP_FF_FBYTES) | ||
2311 | |||
2312 | #define fnzero(__esp) \ | ||
2313 | (((__esp)->erev == fashme) ? 0 : \ | ||
2314 | sbus_readb(((__esp)->eregs) + ESP_FFLAGS) & ESP_FF_ONOTZERO) | ||
2315 | |||
2316 | /* XXX speculative nops unnecessary when continuing amidst a data phase | ||
2317 | * XXX even on esp100!!! another case of flooding the bus with I/O reg | ||
2318 | * XXX writes... | ||
2319 | */ | ||
2320 | #define esp_maybe_nop(__esp) \ | ||
2321 | if ((__esp)->erev == esp100) \ | ||
2322 | esp_cmd((__esp), ESP_CMD_NULL) | ||
2323 | |||
2324 | #define sreg_to_dataphase(__sreg) \ | ||
2325 | ((((__sreg) & ESP_STAT_PMASK) == ESP_DOP) ? in_dataout : in_datain) | ||
2326 | |||
2327 | /* The ESP100 when in synchronous data phase, can mistake a long final | ||
2328 | * REQ pulse from the target as an extra byte, it places whatever is on | ||
2329 | * the data lines into the fifo. For now, we will assume when this | ||
2330 | * happens that the target is a bit quirky and we don't want to | ||
2331 | * be talking synchronously to it anyways. Regardless, we need to | ||
2332 | * tell the ESP to eat the extraneous byte so that we can proceed | ||
2333 | * to the next phase. | ||
2334 | */ | ||
2335 | static int esp100_sync_hwbug(struct esp *esp, struct scsi_cmnd *sp, int fifocnt) | ||
2336 | { | ||
2337 | /* Do not touch this piece of code. */ | ||
2338 | if ((!(esp->erev == esp100)) || | ||
2339 | (!(sreg_datainp((esp->sreg = sbus_readb(esp->eregs + ESP_STATUS))) && | ||
2340 | !fifocnt) && | ||
2341 | !(sreg_dataoutp(esp->sreg) && !fnzero(esp)))) { | ||
2342 | if (sp->SCp.phase == in_dataout) | ||
2343 | esp_cmd(esp, ESP_CMD_FLUSH); | ||
2344 | return 0; | ||
2345 | } else { | ||
2346 | /* Async mode for this guy. */ | ||
2347 | build_sync_nego_msg(esp, 0, 0); | ||
2348 | |||
2349 | /* Ack the bogus byte, but set ATN first. */ | ||
2350 | esp_cmd(esp, ESP_CMD_SATN); | ||
2351 | esp_cmd(esp, ESP_CMD_MOK); | ||
2352 | return 1; | ||
2353 | } | ||
2354 | } | ||
2355 | |||
2356 | /* This closes the window during a selection with a reselect pending, because | ||
2357 | * we use DMA for the selection process the FIFO should hold the correct | ||
2358 | * contents if we get reselected during this process. So we just need to | ||
2359 | * ack the possible illegal cmd interrupt pending on the esp100. | ||
2360 | */ | ||
2361 | static inline int esp100_reconnect_hwbug(struct esp *esp) | ||
2362 | { | ||
2363 | u8 tmp; | ||
2364 | |||
2365 | if (esp->erev != esp100) | ||
2366 | return 0; | ||
2367 | tmp = sbus_readb(esp->eregs + ESP_INTRPT); | ||
2368 | if (tmp & ESP_INTR_SR) | ||
2369 | return 1; | ||
2370 | return 0; | ||
2371 | } | ||
2372 | |||
2373 | /* This verifies the BUSID bits during a reselection so that we know which | ||
2374 | * target is talking to us. | ||
2375 | */ | ||
2376 | static inline int reconnect_target(struct esp *esp) | ||
2377 | { | ||
2378 | int it, me = esp->scsi_id_mask, targ = 0; | ||
2379 | |||
2380 | if (2 != fcount(esp)) | ||
2381 | return -1; | ||
2382 | if (esp->erev == fashme) { | ||
2383 | /* HME does not latch it's own BUS ID bits during | ||
2384 | * a reselection. Also the target number is given | ||
2385 | * as an unsigned char, not as a sole bit number | ||
2386 | * like the other ESP's do. | ||
2387 | * Happy Meal indeed.... | ||
2388 | */ | ||
2389 | targ = esp->hme_fifo_workaround_buffer[0]; | ||
2390 | } else { | ||
2391 | it = sbus_readb(esp->eregs + ESP_FDATA); | ||
2392 | if (!(it & me)) | ||
2393 | return -1; | ||
2394 | it &= ~me; | ||
2395 | if (it & (it - 1)) | ||
2396 | return -1; | ||
2397 | while (!(it & 1)) | ||
2398 | targ++, it >>= 1; | ||
2399 | } | ||
2400 | return targ; | ||
2401 | } | ||
2402 | |||
2403 | /* This verifies the identify from the target so that we know which lun is | ||
2404 | * being reconnected. | ||
2405 | */ | ||
2406 | static inline int reconnect_lun(struct esp *esp) | ||
2407 | { | ||
2408 | int lun; | ||
2409 | |||
2410 | if ((esp->sreg & ESP_STAT_PMASK) != ESP_MIP) | ||
2411 | return -1; | ||
2412 | if (esp->erev == fashme) | ||
2413 | lun = esp->hme_fifo_workaround_buffer[1]; | ||
2414 | else | ||
2415 | lun = sbus_readb(esp->eregs + ESP_FDATA); | ||
2416 | |||
2417 | /* Yes, you read this correctly. We report lun of zero | ||
2418 | * if we see parity error. ESP reports parity error for | ||
2419 | * the lun byte, and this is the only way to hope to recover | ||
2420 | * because the target is connected. | ||
2421 | */ | ||
2422 | if (esp->sreg & ESP_STAT_PERR) | ||
2423 | return 0; | ||
2424 | |||
2425 | /* Check for illegal bits being set in the lun. */ | ||
2426 | if ((lun & 0x40) || !(lun & 0x80)) | ||
2427 | return -1; | ||
2428 | |||
2429 | return lun & 7; | ||
2430 | } | ||
2431 | |||
2432 | /* This puts the driver in a state where it can revitalize a command that | ||
2433 | * is being continued due to reselection. | ||
2434 | */ | ||
2435 | static inline void esp_connect(struct esp *esp, struct scsi_cmnd *sp) | ||
2436 | { | ||
2437 | struct esp_device *esp_dev = sp->device->hostdata; | ||
2438 | |||
2439 | if (esp->prev_soff != esp_dev->sync_max_offset || | ||
2440 | esp->prev_stp != esp_dev->sync_min_period || | ||
2441 | (esp->erev > esp100a && | ||
2442 | esp->prev_cfg3 != esp->config3[sp->device->id])) { | ||
2443 | esp->prev_soff = esp_dev->sync_max_offset; | ||
2444 | esp->prev_stp = esp_dev->sync_min_period; | ||
2445 | sbus_writeb(esp->prev_soff, esp->eregs + ESP_SOFF); | ||
2446 | sbus_writeb(esp->prev_stp, esp->eregs + ESP_STP); | ||
2447 | if (esp->erev > esp100a) { | ||
2448 | esp->prev_cfg3 = esp->config3[sp->device->id]; | ||
2449 | sbus_writeb(esp->prev_cfg3, esp->eregs + ESP_CFG3); | ||
2450 | } | ||
2451 | } | ||
2452 | esp->current_SC = sp; | ||
2453 | } | ||
2454 | |||
2455 | /* This will place the current working command back into the issue queue | ||
2456 | * if we are to receive a reselection amidst a selection attempt. | ||
2457 | */ | ||
2458 | static inline void esp_reconnect(struct esp *esp, struct scsi_cmnd *sp) | ||
2459 | { | ||
2460 | if (!esp->disconnected_SC) | ||
2461 | ESPLOG(("esp%d: Weird, being reselected but disconnected " | ||
2462 | "command queue is empty.\n", esp->esp_id)); | ||
2463 | esp->snip = 0; | ||
2464 | esp->current_SC = NULL; | ||
2465 | sp->SCp.phase = not_issued; | ||
2466 | append_SC(&esp->issue_SC, sp); | ||
2467 | } | ||
2468 | |||
2469 | /* Begin message in phase. */ | ||
2470 | static int esp_do_msgin(struct esp *esp) | ||
2471 | { | ||
2472 | /* Must be very careful with the fifo on the HME */ | ||
2473 | if ((esp->erev != fashme) || | ||
2474 | !(sbus_readb(esp->eregs + ESP_STATUS2) & ESP_STAT2_FEMPTY)) | ||
2475 | esp_cmd(esp, ESP_CMD_FLUSH); | ||
2476 | esp_maybe_nop(esp); | ||
2477 | esp_cmd(esp, ESP_CMD_TI); | ||
2478 | esp->msgin_len = 1; | ||
2479 | esp->msgin_ctr = 0; | ||
2480 | esp_advance_phase(esp->current_SC, in_msgindone); | ||
2481 | return do_work_bus; | ||
2482 | } | ||
2483 | |||
2484 | /* This uses various DMA csr fields and the fifo flags count value to | ||
2485 | * determine how many bytes were successfully sent/received by the ESP. | ||
2486 | */ | ||
2487 | static inline int esp_bytes_sent(struct esp *esp, int fifo_count) | ||
2488 | { | ||
2489 | int rval = sbus_readl(esp->dregs + DMA_ADDR) - esp->esp_command_dvma; | ||
2490 | |||
2491 | if (esp->dma->revision == dvmarev1) | ||
2492 | rval -= (4 - ((sbus_readl(esp->dregs + DMA_CSR) & DMA_READ_AHEAD)>>11)); | ||
2493 | return rval - fifo_count; | ||
2494 | } | ||
2495 | |||
2496 | static inline void advance_sg(struct scsi_cmnd *sp) | ||
2497 | { | ||
2498 | ++sp->SCp.buffer; | ||
2499 | --sp->SCp.buffers_residual; | ||
2500 | sp->SCp.this_residual = sg_dma_len(sp->SCp.buffer); | ||
2501 | sp->SCp.ptr = (char *)((unsigned long)sg_dma_address(sp->SCp.buffer)); | ||
2502 | } | ||
2503 | |||
2504 | /* Please note that the way I've coded these routines is that I _always_ | ||
2505 | * check for a disconnect during any and all information transfer | ||
2506 | * phases. The SCSI standard states that the target _can_ cause a BUS | ||
2507 | * FREE condition by dropping all MSG/CD/IO/BSY signals. Also note | ||
2508 | * that during information transfer phases the target controls every | ||
2509 | * change in phase, the only thing the initiator can do is "ask" for | ||
2510 | * a message out phase by driving ATN true. The target can, and sometimes | ||
2511 | * will, completely ignore this request so we cannot assume anything when | ||
2512 | * we try to force a message out phase to abort/reset a target. Most of | ||
2513 | * the time the target will eventually be nice and go to message out, so | ||
2514 | * we may have to hold on to our state about what we want to tell the target | ||
2515 | * for some period of time. | ||
2516 | */ | ||
2517 | |||
2518 | /* I think I have things working here correctly. Even partial transfers | ||
2519 | * within a buffer or sub-buffer should not upset us at all no matter | ||
2520 | * how bad the target and/or ESP fucks things up. | ||
2521 | */ | ||
2522 | static int esp_do_data(struct esp *esp) | ||
2523 | { | ||
2524 | struct scsi_cmnd *SCptr = esp->current_SC; | ||
2525 | int thisphase, hmuch; | ||
2526 | |||
2527 | ESPDATA(("esp_do_data: ")); | ||
2528 | esp_maybe_nop(esp); | ||
2529 | thisphase = sreg_to_dataphase(esp->sreg); | ||
2530 | esp_advance_phase(SCptr, thisphase); | ||
2531 | ESPDATA(("newphase<%s> ", (thisphase == in_datain) ? "DATAIN" : "DATAOUT")); | ||
2532 | hmuch = dma_can_transfer(esp, SCptr); | ||
2533 | if (hmuch > (64 * 1024) && (esp->erev != fashme)) | ||
2534 | hmuch = (64 * 1024); | ||
2535 | ESPDATA(("hmuch<%d> ", hmuch)); | ||
2536 | esp->current_transfer_size = hmuch; | ||
2537 | |||
2538 | if (esp->erev == fashme) { | ||
2539 | u32 tmp = esp->prev_hme_dmacsr; | ||
2540 | |||
2541 | /* Always set the ESP count registers first. */ | ||
2542 | esp_setcount(esp->eregs, hmuch, 1); | ||
2543 | |||
2544 | /* Get the DMA csr computed. */ | ||
2545 | tmp |= (DMA_SCSI_DISAB | DMA_ENABLE); | ||
2546 | if (thisphase == in_datain) | ||
2547 | tmp |= DMA_ST_WRITE; | ||
2548 | else | ||
2549 | tmp &= ~(DMA_ST_WRITE); | ||
2550 | esp->prev_hme_dmacsr = tmp; | ||
2551 | |||
2552 | ESPDATA(("DMA|TI --> do_intr_end\n")); | ||
2553 | if (thisphase == in_datain) { | ||
2554 | sbus_writel(hmuch, esp->dregs + DMA_COUNT); | ||
2555 | esp_cmd(esp, ESP_CMD_DMA | ESP_CMD_TI); | ||
2556 | } else { | ||
2557 | esp_cmd(esp, ESP_CMD_DMA | ESP_CMD_TI); | ||
2558 | sbus_writel(hmuch, esp->dregs + DMA_COUNT); | ||
2559 | } | ||
2560 | sbus_writel((__u32)((unsigned long)SCptr->SCp.ptr), esp->dregs+DMA_ADDR); | ||
2561 | sbus_writel(esp->prev_hme_dmacsr, esp->dregs + DMA_CSR); | ||
2562 | } else { | ||
2563 | esp_setcount(esp->eregs, hmuch, 0); | ||
2564 | dma_setup(esp, ((__u32)((unsigned long)SCptr->SCp.ptr)), | ||
2565 | hmuch, (thisphase == in_datain)); | ||
2566 | ESPDATA(("DMA|TI --> do_intr_end\n")); | ||
2567 | esp_cmd(esp, ESP_CMD_DMA | ESP_CMD_TI); | ||
2568 | } | ||
2569 | return do_intr_end; | ||
2570 | } | ||
2571 | |||
2572 | /* See how successful the data transfer was. */ | ||
2573 | static int esp_do_data_finale(struct esp *esp) | ||
2574 | { | ||
2575 | struct scsi_cmnd *SCptr = esp->current_SC; | ||
2576 | struct esp_device *esp_dev = SCptr->device->hostdata; | ||
2577 | int bogus_data = 0, bytes_sent = 0, fifocnt, ecount = 0; | ||
2578 | |||
2579 | ESPDATA(("esp_do_data_finale: ")); | ||
2580 | |||
2581 | if (SCptr->SCp.phase == in_datain) { | ||
2582 | if (esp->sreg & ESP_STAT_PERR) { | ||
2583 | /* Yuck, parity error. The ESP asserts ATN | ||
2584 | * so that we can go to message out phase | ||
2585 | * immediately and inform the target that | ||
2586 | * something bad happened. | ||
2587 | */ | ||
2588 | ESPLOG(("esp%d: data bad parity detected.\n", | ||
2589 | esp->esp_id)); | ||
2590 | esp->cur_msgout[0] = INITIATOR_ERROR; | ||
2591 | esp->msgout_len = 1; | ||
2592 | } | ||
2593 | dma_drain(esp); | ||
2594 | } | ||
2595 | dma_invalidate(esp); | ||
2596 | |||
2597 | /* This could happen for the above parity error case. */ | ||
2598 | if (esp->ireg != ESP_INTR_BSERV) { | ||
2599 | /* Please go to msgout phase, please please please... */ | ||
2600 | ESPLOG(("esp%d: !BSERV after data, probably to msgout\n", | ||
2601 | esp->esp_id)); | ||
2602 | return esp_do_phase_determine(esp); | ||
2603 | } | ||
2604 | |||
2605 | /* Check for partial transfers and other horrible events. | ||
2606 | * Note, here we read the real fifo flags register even | ||
2607 | * on HME broken adapters because we skip the HME fifo | ||
2608 | * workaround code in esp_handle() if we are doing data | ||
2609 | * phase things. We don't want to fuck directly with | ||
2610 | * the fifo like that, especially if doing synchronous | ||
2611 | * transfers! Also, will need to double the count on | ||
2612 | * HME if we are doing wide transfers, as the HME fifo | ||
2613 | * will move and count 16-bit quantities during wide data. | ||
2614 | * SMCC _and_ Qlogic can both bite me. | ||
2615 | */ | ||
2616 | fifocnt = (sbus_readb(esp->eregs + ESP_FFLAGS) & ESP_FF_FBYTES); | ||
2617 | if (esp->erev != fashme) | ||
2618 | ecount = esp_getcount(esp->eregs, 0); | ||
2619 | bytes_sent = esp->current_transfer_size; | ||
2620 | |||
2621 | ESPDATA(("trans_sz(%d), ", bytes_sent)); | ||
2622 | if (esp->erev == fashme) { | ||
2623 | if (!(esp->sreg & ESP_STAT_TCNT)) { | ||
2624 | ecount = esp_getcount(esp->eregs, 1); | ||
2625 | bytes_sent -= ecount; | ||
2626 | } | ||
2627 | |||
2628 | /* Always subtract any cruft remaining in the FIFO. */ | ||
2629 | if (esp->prev_cfg3 & ESP_CONFIG3_EWIDE) | ||
2630 | fifocnt <<= 1; | ||
2631 | if (SCptr->SCp.phase == in_dataout) | ||
2632 | bytes_sent -= fifocnt; | ||
2633 | |||
2634 | /* I have an IBM disk which exhibits the following | ||
2635 | * behavior during writes to it. It disconnects in | ||
2636 | * the middle of a partial transfer, the current sglist | ||
2637 | * buffer is 1024 bytes, the disk stops data transfer | ||
2638 | * at 512 bytes. | ||
2639 | * | ||
2640 | * However the FAS366 reports that 32 more bytes were | ||
2641 | * transferred than really were. This is precisely | ||
2642 | * the size of a fully loaded FIFO in wide scsi mode. | ||
2643 | * The FIFO state recorded indicates that it is empty. | ||
2644 | * | ||
2645 | * I have no idea if this is a bug in the FAS366 chip | ||
2646 | * or a bug in the firmware on this IBM disk. In any | ||
2647 | * event the following seems to be a good workaround. -DaveM | ||
2648 | */ | ||
2649 | if (bytes_sent != esp->current_transfer_size && | ||
2650 | SCptr->SCp.phase == in_dataout) { | ||
2651 | int mask = (64 - 1); | ||
2652 | |||
2653 | if ((esp->prev_cfg3 & ESP_CONFIG3_EWIDE) == 0) | ||
2654 | mask >>= 1; | ||
2655 | |||
2656 | if (bytes_sent & mask) | ||
2657 | bytes_sent -= (bytes_sent & mask); | ||
2658 | } | ||
2659 | } else { | ||
2660 | if (!(esp->sreg & ESP_STAT_TCNT)) | ||
2661 | bytes_sent -= ecount; | ||
2662 | if (SCptr->SCp.phase == in_dataout) | ||
2663 | bytes_sent -= fifocnt; | ||
2664 | } | ||
2665 | |||
2666 | ESPDATA(("bytes_sent(%d), ", bytes_sent)); | ||
2667 | |||
2668 | /* If we were in synchronous mode, check for peculiarities. */ | ||
2669 | if (esp->erev == fashme) { | ||
2670 | if (esp_dev->sync_max_offset) { | ||
2671 | if (SCptr->SCp.phase == in_dataout) | ||
2672 | esp_cmd(esp, ESP_CMD_FLUSH); | ||
2673 | } else { | ||
2674 | esp_cmd(esp, ESP_CMD_FLUSH); | ||
2675 | } | ||
2676 | } else { | ||
2677 | if (esp_dev->sync_max_offset) | ||
2678 | bogus_data = esp100_sync_hwbug(esp, SCptr, fifocnt); | ||
2679 | else | ||
2680 | esp_cmd(esp, ESP_CMD_FLUSH); | ||
2681 | } | ||
2682 | |||
2683 | /* Until we are sure of what has happened, we are certainly | ||
2684 | * in the dark. | ||
2685 | */ | ||
2686 | esp_advance_phase(SCptr, in_the_dark); | ||
2687 | |||
2688 | if (bytes_sent < 0) { | ||
2689 | /* I've seen this happen due to lost state in this | ||
2690 | * driver. No idea why it happened, but allowing | ||
2691 | * this value to be negative caused things to | ||
2692 | * lock up. This allows greater chance of recovery. | ||
2693 | * In fact every time I've seen this, it has been | ||
2694 | * a driver bug without question. | ||
2695 | */ | ||
2696 | ESPLOG(("esp%d: yieee, bytes_sent < 0!\n", esp->esp_id)); | ||
2697 | ESPLOG(("esp%d: csz=%d fifocount=%d ecount=%d\n", | ||
2698 | esp->esp_id, | ||
2699 | esp->current_transfer_size, fifocnt, ecount)); | ||
2700 | ESPLOG(("esp%d: use_sg=%d ptr=%p this_residual=%d\n", | ||
2701 | esp->esp_id, | ||
2702 | SCptr->use_sg, SCptr->SCp.ptr, SCptr->SCp.this_residual)); | ||
2703 | ESPLOG(("esp%d: Forcing async for target %d\n", esp->esp_id, | ||
2704 | SCptr->device->id)); | ||
2705 | SCptr->device->borken = 1; | ||
2706 | esp_dev->sync = 0; | ||
2707 | bytes_sent = 0; | ||
2708 | } | ||
2709 | |||
2710 | /* Update the state of our transfer. */ | ||
2711 | SCptr->SCp.ptr += bytes_sent; | ||
2712 | SCptr->SCp.this_residual -= bytes_sent; | ||
2713 | if (SCptr->SCp.this_residual < 0) { | ||
2714 | /* shit */ | ||
2715 | ESPLOG(("esp%d: Data transfer overrun.\n", esp->esp_id)); | ||
2716 | SCptr->SCp.this_residual = 0; | ||
2717 | } | ||
2718 | |||
2719 | /* Maybe continue. */ | ||
2720 | if (!bogus_data) { | ||
2721 | ESPDATA(("!bogus_data, ")); | ||
2722 | |||
2723 | /* NO MATTER WHAT, we advance the scatterlist, | ||
2724 | * if the target should decide to disconnect | ||
2725 | * in between scatter chunks (which is common) | ||
2726 | * we could die horribly! I used to have the sg | ||
2727 | * advance occur only if we are going back into | ||
2728 | * (or are staying in) a data phase, you can | ||
2729 | * imagine the hell I went through trying to | ||
2730 | * figure this out. | ||
2731 | */ | ||
2732 | if (SCptr->use_sg && !SCptr->SCp.this_residual) | ||
2733 | advance_sg(SCptr); | ||
2734 | if (sreg_datainp(esp->sreg) || sreg_dataoutp(esp->sreg)) { | ||
2735 | ESPDATA(("to more data\n")); | ||
2736 | return esp_do_data(esp); | ||
2737 | } | ||
2738 | ESPDATA(("to new phase\n")); | ||
2739 | return esp_do_phase_determine(esp); | ||
2740 | } | ||
2741 | /* Bogus data, just wait for next interrupt. */ | ||
2742 | ESPLOG(("esp%d: bogus_data during end of data phase\n", | ||
2743 | esp->esp_id)); | ||
2744 | return do_intr_end; | ||
2745 | } | ||
2746 | |||
2747 | /* We received a non-good status return at the end of | ||
2748 | * running a SCSI command. This is used to decide if | ||
2749 | * we should clear our synchronous transfer state for | ||
2750 | * such a device when that happens. | ||
2751 | * | ||
2752 | * The idea is that when spinning up a disk or rewinding | ||
2753 | * a tape, we don't want to go into a loop re-negotiating | ||
2754 | * synchronous capabilities over and over. | ||
2755 | */ | ||
2756 | static int esp_should_clear_sync(struct scsi_cmnd *sp) | ||
2757 | { | ||
2758 | u8 cmd = sp->cmnd[0]; | ||
2759 | |||
2760 | /* These cases are for spinning up a disk and | ||
2761 | * waiting for that spinup to complete. | ||
2762 | */ | ||
2763 | if (cmd == START_STOP) | ||
2764 | return 0; | ||
2765 | |||
2766 | if (cmd == TEST_UNIT_READY) | ||
2767 | return 0; | ||
2768 | |||
2769 | /* One more special case for SCSI tape drives, | ||
2770 | * this is what is used to probe the device for | ||
2771 | * completion of a rewind or tape load operation. | ||
2772 | */ | ||
2773 | if (sp->device->type == TYPE_TAPE) { | ||
2774 | if (cmd == MODE_SENSE) | ||
2775 | return 0; | ||
2776 | } | ||
2777 | |||
2778 | return 1; | ||
2779 | } | ||
2780 | |||
2781 | /* Either a command is completing or a target is dropping off the bus | ||
2782 | * to continue the command in the background so we can do other work. | ||
2783 | */ | ||
2784 | static int esp_do_freebus(struct esp *esp) | ||
2785 | { | ||
2786 | struct scsi_cmnd *SCptr = esp->current_SC; | ||
2787 | struct esp_device *esp_dev = SCptr->device->hostdata; | ||
2788 | int rval; | ||
2789 | |||
2790 | rval = skipahead2(esp, SCptr, in_status, in_msgindone, in_freeing); | ||
2791 | if (rval) | ||
2792 | return rval; | ||
2793 | if (esp->ireg != ESP_INTR_DC) { | ||
2794 | ESPLOG(("esp%d: Target will not disconnect\n", esp->esp_id)); | ||
2795 | return do_reset_bus; /* target will not drop BSY... */ | ||
2796 | } | ||
2797 | esp->msgout_len = 0; | ||
2798 | esp->prevmsgout = NOP; | ||
2799 | if (esp->prevmsgin == COMMAND_COMPLETE) { | ||
2800 | /* Normal end of nexus. */ | ||
2801 | if (esp->disconnected_SC || (esp->erev == fashme)) | ||
2802 | esp_cmd(esp, ESP_CMD_ESEL); | ||
2803 | |||
2804 | if (SCptr->SCp.Status != GOOD && | ||
2805 | SCptr->SCp.Status != CONDITION_GOOD && | ||
2806 | ((1<<SCptr->device->id) & esp->targets_present) && | ||
2807 | esp_dev->sync && | ||
2808 | esp_dev->sync_max_offset) { | ||
2809 | /* SCSI standard says that the synchronous capabilities | ||
2810 | * should be renegotiated at this point. Most likely | ||
2811 | * we are about to request sense from this target | ||
2812 | * in which case we want to avoid using sync | ||
2813 | * transfers until we are sure of the current target | ||
2814 | * state. | ||
2815 | */ | ||
2816 | ESPMISC(("esp: Status <%d> for target %d lun %d\n", | ||
2817 | SCptr->SCp.Status, SCptr->device->id, SCptr->device->lun)); | ||
2818 | |||
2819 | /* But don't do this when spinning up a disk at | ||
2820 | * boot time while we poll for completion as it | ||
2821 | * fills up the console with messages. Also, tapes | ||
2822 | * can report not ready many times right after | ||
2823 | * loading up a tape. | ||
2824 | */ | ||
2825 | if (esp_should_clear_sync(SCptr) != 0) | ||
2826 | esp_dev->sync = 0; | ||
2827 | } | ||
2828 | ESPDISC(("F<%02x,%02x>", SCptr->device->id, SCptr->device->lun)); | ||
2829 | esp_done(esp, ((SCptr->SCp.Status & 0xff) | | ||
2830 | ((SCptr->SCp.Message & 0xff)<<8) | | ||
2831 | (DID_OK << 16))); | ||
2832 | } else if (esp->prevmsgin == DISCONNECT) { | ||
2833 | /* Normal disconnect. */ | ||
2834 | esp_cmd(esp, ESP_CMD_ESEL); | ||
2835 | ESPDISC(("D<%02x,%02x>", SCptr->device->id, SCptr->device->lun)); | ||
2836 | append_SC(&esp->disconnected_SC, SCptr); | ||
2837 | esp->current_SC = NULL; | ||
2838 | if (esp->issue_SC) | ||
2839 | esp_exec_cmd(esp); | ||
2840 | } else { | ||
2841 | /* Driver bug, we do not expect a disconnect here | ||
2842 | * and should not have advanced the state engine | ||
2843 | * to in_freeing. | ||
2844 | */ | ||
2845 | ESPLOG(("esp%d: last msg not disc and not cmd cmplt.\n", | ||
2846 | esp->esp_id)); | ||
2847 | return do_reset_bus; | ||
2848 | } | ||
2849 | return do_intr_end; | ||
2850 | } | ||
2851 | |||
2852 | /* When a reselect occurs, and we cannot find the command to | ||
2853 | * reconnect to in our queues, we do this. | ||
2854 | */ | ||
2855 | static int esp_bad_reconnect(struct esp *esp) | ||
2856 | { | ||
2857 | struct scsi_cmnd *sp; | ||
2858 | |||
2859 | ESPLOG(("esp%d: Eieeee, reconnecting unknown command!\n", | ||
2860 | esp->esp_id)); | ||
2861 | ESPLOG(("QUEUE DUMP\n")); | ||
2862 | sp = esp->issue_SC; | ||
2863 | ESPLOG(("esp%d: issue_SC[", esp->esp_id)); | ||
2864 | while (sp) { | ||
2865 | ESPLOG(("<%02x,%02x>", sp->device->id, sp->device->lun)); | ||
2866 | sp = (struct scsi_cmnd *) sp->host_scribble; | ||
2867 | } | ||
2868 | ESPLOG(("]\n")); | ||
2869 | sp = esp->current_SC; | ||
2870 | ESPLOG(("esp%d: current_SC[", esp->esp_id)); | ||
2871 | if (sp) | ||
2872 | ESPLOG(("<%02x,%02x>", sp->device->id, sp->device->lun)); | ||
2873 | else | ||
2874 | ESPLOG(("<NULL>")); | ||
2875 | ESPLOG(("]\n")); | ||
2876 | sp = esp->disconnected_SC; | ||
2877 | ESPLOG(("esp%d: disconnected_SC[", esp->esp_id)); | ||
2878 | while (sp) { | ||
2879 | ESPLOG(("<%02x,%02x>", sp->device->id, sp->device->lun)); | ||
2880 | sp = (struct scsi_cmnd *) sp->host_scribble; | ||
2881 | } | ||
2882 | ESPLOG(("]\n")); | ||
2883 | return do_reset_bus; | ||
2884 | } | ||
2885 | |||
2886 | /* Do the needy when a target tries to reconnect to us. */ | ||
2887 | static int esp_do_reconnect(struct esp *esp) | ||
2888 | { | ||
2889 | int lun, target; | ||
2890 | struct scsi_cmnd *SCptr; | ||
2891 | |||
2892 | /* Check for all bogus conditions first. */ | ||
2893 | target = reconnect_target(esp); | ||
2894 | if (target < 0) { | ||
2895 | ESPDISC(("bad bus bits\n")); | ||
2896 | return do_reset_bus; | ||
2897 | } | ||
2898 | lun = reconnect_lun(esp); | ||
2899 | if (lun < 0) { | ||
2900 | ESPDISC(("target=%2x, bad identify msg\n", target)); | ||
2901 | return do_reset_bus; | ||
2902 | } | ||
2903 | |||
2904 | /* Things look ok... */ | ||
2905 | ESPDISC(("R<%02x,%02x>", target, lun)); | ||
2906 | |||
2907 | /* Must not flush FIFO or DVMA on HME. */ | ||
2908 | if (esp->erev != fashme) { | ||
2909 | esp_cmd(esp, ESP_CMD_FLUSH); | ||
2910 | if (esp100_reconnect_hwbug(esp)) | ||
2911 | return do_reset_bus; | ||
2912 | esp_cmd(esp, ESP_CMD_NULL); | ||
2913 | } | ||
2914 | |||
2915 | SCptr = remove_SC(&esp->disconnected_SC, (u8) target, (u8) lun); | ||
2916 | if (!SCptr) | ||
2917 | return esp_bad_reconnect(esp); | ||
2918 | |||
2919 | esp_connect(esp, SCptr); | ||
2920 | esp_cmd(esp, ESP_CMD_MOK); | ||
2921 | |||
2922 | if (esp->erev == fashme) | ||
2923 | sbus_writeb(((SCptr->device->id & 0xf) | | ||
2924 | (ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT)), | ||
2925 | esp->eregs + ESP_BUSID); | ||
2926 | |||
2927 | /* Reconnect implies a restore pointers operation. */ | ||
2928 | esp_restore_pointers(esp, SCptr); | ||
2929 | |||
2930 | esp->snip = 0; | ||
2931 | esp_advance_phase(SCptr, in_the_dark); | ||
2932 | return do_intr_end; | ||
2933 | } | ||
2934 | |||
2935 | /* End of NEXUS (hopefully), pick up status + message byte then leave if | ||
2936 | * all goes well. | ||
2937 | */ | ||
2938 | static int esp_do_status(struct esp *esp) | ||
2939 | { | ||
2940 | struct scsi_cmnd *SCptr = esp->current_SC; | ||
2941 | int intr, rval; | ||
2942 | |||
2943 | rval = skipahead1(esp, SCptr, in_the_dark, in_status); | ||
2944 | if (rval) | ||
2945 | return rval; | ||
2946 | intr = esp->ireg; | ||
2947 | ESPSTAT(("esp_do_status: ")); | ||
2948 | if (intr != ESP_INTR_DC) { | ||
2949 | int message_out = 0; /* for parity problems */ | ||
2950 | |||
2951 | /* Ack the message. */ | ||
2952 | ESPSTAT(("ack msg, ")); | ||
2953 | esp_cmd(esp, ESP_CMD_MOK); | ||
2954 | |||
2955 | if (esp->erev != fashme) { | ||
2956 | dma_flashclear(esp); | ||
2957 | |||
2958 | /* Wait till the first bits settle. */ | ||
2959 | while (esp->esp_command[0] == 0xff) | ||
2960 | udelay(1); | ||
2961 | } else { | ||
2962 | esp->esp_command[0] = esp->hme_fifo_workaround_buffer[0]; | ||
2963 | esp->esp_command[1] = esp->hme_fifo_workaround_buffer[1]; | ||
2964 | } | ||
2965 | |||
2966 | ESPSTAT(("got something, ")); | ||
2967 | /* ESP chimes in with one of | ||
2968 | * | ||
2969 | * 1) function done interrupt: | ||
2970 | * both status and message in bytes | ||
2971 | * are available | ||
2972 | * | ||
2973 | * 2) bus service interrupt: | ||
2974 | * only status byte was acquired | ||
2975 | * | ||
2976 | * 3) Anything else: | ||
2977 | * can't happen, but we test for it | ||
2978 | * anyways | ||
2979 | * | ||
2980 | * ALSO: If bad parity was detected on either | ||
2981 | * the status _or_ the message byte then | ||
2982 | * the ESP has asserted ATN on the bus | ||
2983 | * and we must therefore wait for the | ||
2984 | * next phase change. | ||
2985 | */ | ||
2986 | if (intr & ESP_INTR_FDONE) { | ||
2987 | /* We got it all, hallejulia. */ | ||
2988 | ESPSTAT(("got both, ")); | ||
2989 | SCptr->SCp.Status = esp->esp_command[0]; | ||
2990 | SCptr->SCp.Message = esp->esp_command[1]; | ||
2991 | esp->prevmsgin = SCptr->SCp.Message; | ||
2992 | esp->cur_msgin[0] = SCptr->SCp.Message; | ||
2993 | if (esp->sreg & ESP_STAT_PERR) { | ||
2994 | /* There was bad parity for the | ||
2995 | * message byte, the status byte | ||
2996 | * was ok. | ||
2997 | */ | ||
2998 | message_out = MSG_PARITY_ERROR; | ||
2999 | } | ||
3000 | } else if (intr == ESP_INTR_BSERV) { | ||
3001 | /* Only got status byte. */ | ||
3002 | ESPLOG(("esp%d: got status only, ", esp->esp_id)); | ||
3003 | if (!(esp->sreg & ESP_STAT_PERR)) { | ||
3004 | SCptr->SCp.Status = esp->esp_command[0]; | ||
3005 | SCptr->SCp.Message = 0xff; | ||
3006 | } else { | ||
3007 | /* The status byte had bad parity. | ||
3008 | * we leave the scsi_pointer Status | ||
3009 | * field alone as we set it to a default | ||
3010 | * of CHECK_CONDITION in esp_queue. | ||
3011 | */ | ||
3012 | message_out = INITIATOR_ERROR; | ||
3013 | } | ||
3014 | } else { | ||
3015 | /* This shouldn't happen ever. */ | ||
3016 | ESPSTAT(("got bolixed\n")); | ||
3017 | esp_advance_phase(SCptr, in_the_dark); | ||
3018 | return esp_do_phase_determine(esp); | ||
3019 | } | ||
3020 | |||
3021 | if (!message_out) { | ||
3022 | ESPSTAT(("status=%2x msg=%2x, ", SCptr->SCp.Status, | ||
3023 | SCptr->SCp.Message)); | ||
3024 | if (SCptr->SCp.Message == COMMAND_COMPLETE) { | ||
3025 | ESPSTAT(("and was COMMAND_COMPLETE\n")); | ||
3026 | esp_advance_phase(SCptr, in_freeing); | ||
3027 | return esp_do_freebus(esp); | ||
3028 | } else { | ||
3029 | ESPLOG(("esp%d: and _not_ COMMAND_COMPLETE\n", | ||
3030 | esp->esp_id)); | ||
3031 | esp->msgin_len = esp->msgin_ctr = 1; | ||
3032 | esp_advance_phase(SCptr, in_msgindone); | ||
3033 | return esp_do_msgindone(esp); | ||
3034 | } | ||
3035 | } else { | ||
3036 | /* With luck we'll be able to let the target | ||
3037 | * know that bad parity happened, it will know | ||
3038 | * which byte caused the problems and send it | ||
3039 | * again. For the case where the status byte | ||
3040 | * receives bad parity, I do not believe most | ||
3041 | * targets recover very well. We'll see. | ||
3042 | */ | ||
3043 | ESPLOG(("esp%d: bad parity somewhere mout=%2x\n", | ||
3044 | esp->esp_id, message_out)); | ||
3045 | esp->cur_msgout[0] = message_out; | ||
3046 | esp->msgout_len = esp->msgout_ctr = 1; | ||
3047 | esp_advance_phase(SCptr, in_the_dark); | ||
3048 | return esp_do_phase_determine(esp); | ||
3049 | } | ||
3050 | } else { | ||
3051 | /* If we disconnect now, all hell breaks loose. */ | ||
3052 | ESPLOG(("esp%d: whoops, disconnect\n", esp->esp_id)); | ||
3053 | esp_advance_phase(SCptr, in_the_dark); | ||
3054 | return esp_do_phase_determine(esp); | ||
3055 | } | ||
3056 | } | ||
3057 | |||
3058 | static int esp_enter_status(struct esp *esp) | ||
3059 | { | ||
3060 | u8 thecmd = ESP_CMD_ICCSEQ; | ||
3061 | |||
3062 | esp_cmd(esp, ESP_CMD_FLUSH); | ||
3063 | if (esp->erev != fashme) { | ||
3064 | u32 tmp; | ||
3065 | |||
3066 | esp->esp_command[0] = esp->esp_command[1] = 0xff; | ||
3067 | sbus_writeb(2, esp->eregs + ESP_TCLOW); | ||
3068 | sbus_writeb(0, esp->eregs + ESP_TCMED); | ||
3069 | tmp = sbus_readl(esp->dregs + DMA_CSR); | ||
3070 | tmp |= (DMA_ST_WRITE | DMA_ENABLE); | ||
3071 | sbus_writel(tmp, esp->dregs + DMA_CSR); | ||
3072 | if (esp->dma->revision == dvmaesc1) | ||
3073 | sbus_writel(0x100, esp->dregs + DMA_COUNT); | ||
3074 | sbus_writel(esp->esp_command_dvma, esp->dregs + DMA_ADDR); | ||
3075 | thecmd |= ESP_CMD_DMA; | ||
3076 | } | ||
3077 | esp_cmd(esp, thecmd); | ||
3078 | esp_advance_phase(esp->current_SC, in_status); | ||
3079 | |||
3080 | return esp_do_status(esp); | ||
3081 | } | ||
3082 | |||
3083 | static int esp_disconnect_amidst_phases(struct esp *esp) | ||
3084 | { | ||
3085 | struct scsi_cmnd *sp = esp->current_SC; | ||
3086 | struct esp_device *esp_dev = sp->device->hostdata; | ||
3087 | |||
3088 | /* This means real problems if we see this | ||
3089 | * here. Unless we were actually trying | ||
3090 | * to force the device to abort/reset. | ||
3091 | */ | ||
3092 | ESPLOG(("esp%d Disconnect amidst phases, ", esp->esp_id)); | ||
3093 | ESPLOG(("pphase<%s> cphase<%s>, ", | ||
3094 | phase_string(sp->SCp.phase), | ||
3095 | phase_string(sp->SCp.sent_command))); | ||
3096 | |||
3097 | if (esp->disconnected_SC != NULL || (esp->erev == fashme)) | ||
3098 | esp_cmd(esp, ESP_CMD_ESEL); | ||
3099 | |||
3100 | switch (esp->cur_msgout[0]) { | ||
3101 | default: | ||
3102 | /* We didn't expect this to happen at all. */ | ||
3103 | ESPLOG(("device is bolixed\n")); | ||
3104 | esp_advance_phase(sp, in_tgterror); | ||
3105 | esp_done(esp, (DID_ERROR << 16)); | ||
3106 | break; | ||
3107 | |||
3108 | case BUS_DEVICE_RESET: | ||
3109 | ESPLOG(("device reset successful\n")); | ||
3110 | esp_dev->sync_max_offset = 0; | ||
3111 | esp_dev->sync_min_period = 0; | ||
3112 | esp_dev->sync = 0; | ||
3113 | esp_advance_phase(sp, in_resetdev); | ||
3114 | esp_done(esp, (DID_RESET << 16)); | ||
3115 | break; | ||
3116 | |||
3117 | case ABORT: | ||
3118 | ESPLOG(("device abort successful\n")); | ||
3119 | esp_advance_phase(sp, in_abortone); | ||
3120 | esp_done(esp, (DID_ABORT << 16)); | ||
3121 | break; | ||
3122 | |||
3123 | }; | ||
3124 | return do_intr_end; | ||
3125 | } | ||
3126 | |||
3127 | static int esp_enter_msgout(struct esp *esp) | ||
3128 | { | ||
3129 | esp_advance_phase(esp->current_SC, in_msgout); | ||
3130 | return esp_do_msgout(esp); | ||
3131 | } | ||
3132 | |||
3133 | static int esp_enter_msgin(struct esp *esp) | ||
3134 | { | ||
3135 | esp_advance_phase(esp->current_SC, in_msgin); | ||
3136 | return esp_do_msgin(esp); | ||
3137 | } | ||
3138 | |||
3139 | static int esp_enter_cmd(struct esp *esp) | ||
3140 | { | ||
3141 | esp_advance_phase(esp->current_SC, in_cmdbegin); | ||
3142 | return esp_do_cmdbegin(esp); | ||
3143 | } | ||
3144 | |||
3145 | static int esp_enter_badphase(struct esp *esp) | ||
3146 | { | ||
3147 | ESPLOG(("esp%d: Bizarre bus phase %2x.\n", esp->esp_id, | ||
3148 | esp->sreg & ESP_STAT_PMASK)); | ||
3149 | return do_reset_bus; | ||
3150 | } | ||
3151 | |||
3152 | typedef int (*espfunc_t)(struct esp *); | ||
3153 | |||
3154 | static espfunc_t phase_vector[] = { | ||
3155 | esp_do_data, /* ESP_DOP */ | ||
3156 | esp_do_data, /* ESP_DIP */ | ||
3157 | esp_enter_cmd, /* ESP_CMDP */ | ||
3158 | esp_enter_status, /* ESP_STATP */ | ||
3159 | esp_enter_badphase, /* ESP_STAT_PMSG */ | ||
3160 | esp_enter_badphase, /* ESP_STAT_PMSG | ESP_STAT_PIO */ | ||
3161 | esp_enter_msgout, /* ESP_MOP */ | ||
3162 | esp_enter_msgin, /* ESP_MIP */ | ||
3163 | }; | ||
3164 | |||
3165 | /* The target has control of the bus and we have to see where it has | ||
3166 | * taken us. | ||
3167 | */ | ||
3168 | static int esp_do_phase_determine(struct esp *esp) | ||
3169 | { | ||
3170 | if ((esp->ireg & ESP_INTR_DC) != 0) | ||
3171 | return esp_disconnect_amidst_phases(esp); | ||
3172 | return phase_vector[esp->sreg & ESP_STAT_PMASK](esp); | ||
3173 | } | ||
3174 | |||
3175 | /* First interrupt after exec'ing a cmd comes here. */ | ||
3176 | static int esp_select_complete(struct esp *esp) | ||
3177 | { | ||
3178 | struct scsi_cmnd *SCptr = esp->current_SC; | ||
3179 | struct esp_device *esp_dev = SCptr->device->hostdata; | ||
3180 | int cmd_bytes_sent, fcnt; | ||
3181 | |||
3182 | if (esp->erev != fashme) | ||
3183 | esp->seqreg = (sbus_readb(esp->eregs + ESP_SSTEP) & ESP_STEP_VBITS); | ||
3184 | |||
3185 | if (esp->erev == fashme) | ||
3186 | fcnt = esp->hme_fifo_workaround_count; | ||
3187 | else | ||
3188 | fcnt = (sbus_readb(esp->eregs + ESP_FFLAGS) & ESP_FF_FBYTES); | ||
3189 | |||
3190 | cmd_bytes_sent = esp_bytes_sent(esp, fcnt); | ||
3191 | dma_invalidate(esp); | ||
3192 | |||
3193 | /* Let's check to see if a reselect happened | ||
3194 | * while we we're trying to select. This must | ||
3195 | * be checked first. | ||
3196 | */ | ||
3197 | if (esp->ireg == (ESP_INTR_RSEL | ESP_INTR_FDONE)) { | ||
3198 | esp_reconnect(esp, SCptr); | ||
3199 | return esp_do_reconnect(esp); | ||
3200 | } | ||
3201 | |||
3202 | /* Looks like things worked, we should see a bus service & | ||
3203 | * a function complete interrupt at this point. Note we | ||
3204 | * are doing a direct comparison because we don't want to | ||
3205 | * be fooled into thinking selection was successful if | ||
3206 | * ESP_INTR_DC is set, see below. | ||
3207 | */ | ||
3208 | if (esp->ireg == (ESP_INTR_FDONE | ESP_INTR_BSERV)) { | ||
3209 | /* target speaks... */ | ||
3210 | esp->targets_present |= (1<<SCptr->device->id); | ||
3211 | |||
3212 | /* What if the target ignores the sdtr? */ | ||
3213 | if (esp->snip) | ||
3214 | esp_dev->sync = 1; | ||
3215 | |||
3216 | /* See how far, if at all, we got in getting | ||
3217 | * the information out to the target. | ||
3218 | */ | ||
3219 | switch (esp->seqreg) { | ||
3220 | default: | ||
3221 | |||
3222 | case ESP_STEP_ASEL: | ||
3223 | /* Arbitration won, target selected, but | ||
3224 | * we are in some phase which is not command | ||
3225 | * phase nor is it message out phase. | ||
3226 | * | ||
3227 | * XXX We've confused the target, obviously. | ||
3228 | * XXX So clear it's state, but we also end | ||
3229 | * XXX up clearing everyone elses. That isn't | ||
3230 | * XXX so nice. I'd like to just reset this | ||
3231 | * XXX target, but if I cannot even get it's | ||
3232 | * XXX attention and finish selection to talk | ||
3233 | * XXX to it, there is not much more I can do. | ||
3234 | * XXX If we have a loaded bus we're going to | ||
3235 | * XXX spend the next second or so renegotiating | ||
3236 | * XXX for synchronous transfers. | ||
3237 | */ | ||
3238 | ESPLOG(("esp%d: STEP_ASEL for tgt %d\n", | ||
3239 | esp->esp_id, SCptr->device->id)); | ||
3240 | |||
3241 | case ESP_STEP_SID: | ||
3242 | /* Arbitration won, target selected, went | ||
3243 | * to message out phase, sent one message | ||
3244 | * byte, then we stopped. ATN is asserted | ||
3245 | * on the SCSI bus and the target is still | ||
3246 | * there hanging on. This is a legal | ||
3247 | * sequence step if we gave the ESP a select | ||
3248 | * and stop command. | ||
3249 | * | ||
3250 | * XXX See above, I could set the borken flag | ||
3251 | * XXX in the device struct and retry the | ||
3252 | * XXX command. But would that help for | ||
3253 | * XXX tagged capable targets? | ||
3254 | */ | ||
3255 | |||
3256 | case ESP_STEP_NCMD: | ||
3257 | /* Arbitration won, target selected, maybe | ||
3258 | * sent the one message byte in message out | ||
3259 | * phase, but we did not go to command phase | ||
3260 | * in the end. Actually, we could have sent | ||
3261 | * only some of the message bytes if we tried | ||
3262 | * to send out the entire identify and tag | ||
3263 | * message using ESP_CMD_SA3. | ||
3264 | */ | ||
3265 | cmd_bytes_sent = 0; | ||
3266 | break; | ||
3267 | |||
3268 | case ESP_STEP_PPC: | ||
3269 | /* No, not the powerPC pinhead. Arbitration | ||
3270 | * won, all message bytes sent if we went to | ||
3271 | * message out phase, went to command phase | ||
3272 | * but only part of the command was sent. | ||
3273 | * | ||
3274 | * XXX I've seen this, but usually in conjunction | ||
3275 | * XXX with a gross error which appears to have | ||
3276 | * XXX occurred between the time I told the | ||
3277 | * XXX ESP to arbitrate and when I got the | ||
3278 | * XXX interrupt. Could I have misloaded the | ||
3279 | * XXX command bytes into the fifo? Actually, | ||
3280 | * XXX I most likely missed a phase, and therefore | ||
3281 | * XXX went into never never land and didn't even | ||
3282 | * XXX know it. That was the old driver though. | ||
3283 | * XXX What is even more peculiar is that the ESP | ||
3284 | * XXX showed the proper function complete and | ||
3285 | * XXX bus service bits in the interrupt register. | ||
3286 | */ | ||
3287 | |||
3288 | case ESP_STEP_FINI4: | ||
3289 | case ESP_STEP_FINI5: | ||
3290 | case ESP_STEP_FINI6: | ||
3291 | case ESP_STEP_FINI7: | ||
3292 | /* Account for the identify message */ | ||
3293 | if (SCptr->SCp.phase == in_slct_norm) | ||
3294 | cmd_bytes_sent -= 1; | ||
3295 | }; | ||
3296 | |||
3297 | if (esp->erev != fashme) | ||
3298 | esp_cmd(esp, ESP_CMD_NULL); | ||
3299 | |||
3300 | /* Be careful, we could really get fucked during synchronous | ||
3301 | * data transfers if we try to flush the fifo now. | ||
3302 | */ | ||
3303 | if ((esp->erev != fashme) && /* not a Happy Meal and... */ | ||
3304 | !fcnt && /* Fifo is empty and... */ | ||
3305 | /* either we are not doing synchronous transfers or... */ | ||
3306 | (!esp_dev->sync_max_offset || | ||
3307 | /* We are not going into data in phase. */ | ||
3308 | ((esp->sreg & ESP_STAT_PMASK) != ESP_DIP))) | ||
3309 | esp_cmd(esp, ESP_CMD_FLUSH); /* flush is safe */ | ||
3310 | |||
3311 | /* See how far we got if this is not a slow command. */ | ||
3312 | if (!esp->esp_slowcmd) { | ||
3313 | if (cmd_bytes_sent < 0) | ||
3314 | cmd_bytes_sent = 0; | ||
3315 | if (cmd_bytes_sent != SCptr->cmd_len) { | ||
3316 | /* Crapola, mark it as a slowcmd | ||
3317 | * so that we have some chance of | ||
3318 | * keeping the command alive with | ||
3319 | * good luck. | ||
3320 | * | ||
3321 | * XXX Actually, if we didn't send it all | ||
3322 | * XXX this means either we didn't set things | ||
3323 | * XXX up properly (driver bug) or the target | ||
3324 | * XXX or the ESP detected parity on one of | ||
3325 | * XXX the command bytes. This makes much | ||
3326 | * XXX more sense, and therefore this code | ||
3327 | * XXX should be changed to send out a | ||
3328 | * XXX parity error message or if the status | ||
3329 | * XXX register shows no parity error then | ||
3330 | * XXX just expect the target to bring the | ||
3331 | * XXX bus into message in phase so that it | ||
3332 | * XXX can send us the parity error message. | ||
3333 | * XXX SCSI sucks... | ||
3334 | */ | ||
3335 | esp->esp_slowcmd = 1; | ||
3336 | esp->esp_scmdp = &(SCptr->cmnd[cmd_bytes_sent]); | ||
3337 | esp->esp_scmdleft = (SCptr->cmd_len - cmd_bytes_sent); | ||
3338 | } | ||
3339 | } | ||
3340 | |||
3341 | /* Now figure out where we went. */ | ||
3342 | esp_advance_phase(SCptr, in_the_dark); | ||
3343 | return esp_do_phase_determine(esp); | ||
3344 | } | ||
3345 | |||
3346 | /* Did the target even make it? */ | ||
3347 | if (esp->ireg == ESP_INTR_DC) { | ||
3348 | /* wheee... nobody there or they didn't like | ||
3349 | * what we told it to do, clean up. | ||
3350 | */ | ||
3351 | |||
3352 | /* If anyone is off the bus, but working on | ||
3353 | * a command in the background for us, tell | ||
3354 | * the ESP to listen for them. | ||
3355 | */ | ||
3356 | if (esp->disconnected_SC) | ||
3357 | esp_cmd(esp, ESP_CMD_ESEL); | ||
3358 | |||
3359 | if (((1<<SCptr->device->id) & esp->targets_present) && | ||
3360 | esp->seqreg != 0 && | ||
3361 | (esp->cur_msgout[0] == EXTENDED_MESSAGE) && | ||
3362 | (SCptr->SCp.phase == in_slct_msg || | ||
3363 | SCptr->SCp.phase == in_slct_stop)) { | ||
3364 | /* shit */ | ||
3365 | esp->snip = 0; | ||
3366 | ESPLOG(("esp%d: Failed synchronous negotiation for target %d " | ||
3367 | "lun %d\n", esp->esp_id, SCptr->device->id, SCptr->device->lun)); | ||
3368 | esp_dev->sync_max_offset = 0; | ||
3369 | esp_dev->sync_min_period = 0; | ||
3370 | esp_dev->sync = 1; /* so we don't negotiate again */ | ||
3371 | |||
3372 | /* Run the command again, this time though we | ||
3373 | * won't try to negotiate for synchronous transfers. | ||
3374 | * | ||
3375 | * XXX I'd like to do something like send an | ||
3376 | * XXX INITIATOR_ERROR or ABORT message to the | ||
3377 | * XXX target to tell it, "Sorry I confused you, | ||
3378 | * XXX please come back and I will be nicer next | ||
3379 | * XXX time". But that requires having the target | ||
3380 | * XXX on the bus, and it has dropped BSY on us. | ||
3381 | */ | ||
3382 | esp->current_SC = NULL; | ||
3383 | esp_advance_phase(SCptr, not_issued); | ||
3384 | prepend_SC(&esp->issue_SC, SCptr); | ||
3385 | esp_exec_cmd(esp); | ||
3386 | return do_intr_end; | ||
3387 | } | ||
3388 | |||
3389 | /* Ok, this is normal, this is what we see during boot | ||
3390 | * or whenever when we are scanning the bus for targets. | ||
3391 | * But first make sure that is really what is happening. | ||
3392 | */ | ||
3393 | if (((1<<SCptr->device->id) & esp->targets_present)) { | ||
3394 | ESPLOG(("esp%d: Warning, live target %d not responding to " | ||
3395 | "selection.\n", esp->esp_id, SCptr->device->id)); | ||
3396 | |||
3397 | /* This _CAN_ happen. The SCSI standard states that | ||
3398 | * the target is to _not_ respond to selection if | ||
3399 | * _it_ detects bad parity on the bus for any reason. | ||
3400 | * Therefore, we assume that if we've talked successfully | ||
3401 | * to this target before, bad parity is the problem. | ||
3402 | */ | ||
3403 | esp_done(esp, (DID_PARITY << 16)); | ||
3404 | } else { | ||
3405 | /* Else, there really isn't anyone there. */ | ||
3406 | ESPMISC(("esp: selection failure, maybe nobody there?\n")); | ||
3407 | ESPMISC(("esp: target %d lun %d\n", | ||
3408 | SCptr->device->id, SCptr->device->lun)); | ||
3409 | esp_done(esp, (DID_BAD_TARGET << 16)); | ||
3410 | } | ||
3411 | return do_intr_end; | ||
3412 | } | ||
3413 | |||
3414 | ESPLOG(("esp%d: Selection failure.\n", esp->esp_id)); | ||
3415 | printk("esp%d: Currently -- ", esp->esp_id); | ||
3416 | esp_print_ireg(esp->ireg); printk(" "); | ||
3417 | esp_print_statreg(esp->sreg); printk(" "); | ||
3418 | esp_print_seqreg(esp->seqreg); printk("\n"); | ||
3419 | printk("esp%d: New -- ", esp->esp_id); | ||
3420 | esp->sreg = sbus_readb(esp->eregs + ESP_STATUS); | ||
3421 | esp->seqreg = sbus_readb(esp->eregs + ESP_SSTEP); | ||
3422 | esp->ireg = sbus_readb(esp->eregs + ESP_INTRPT); | ||
3423 | esp_print_ireg(esp->ireg); printk(" "); | ||
3424 | esp_print_statreg(esp->sreg); printk(" "); | ||
3425 | esp_print_seqreg(esp->seqreg); printk("\n"); | ||
3426 | ESPLOG(("esp%d: resetting bus\n", esp->esp_id)); | ||
3427 | return do_reset_bus; /* ugh... */ | ||
3428 | } | ||
3429 | |||
3430 | /* Continue reading bytes for msgin phase. */ | ||
3431 | static int esp_do_msgincont(struct esp *esp) | ||
3432 | { | ||
3433 | if (esp->ireg & ESP_INTR_BSERV) { | ||
3434 | /* in the right phase too? */ | ||
3435 | if ((esp->sreg & ESP_STAT_PMASK) == ESP_MIP) { | ||
3436 | /* phew... */ | ||
3437 | esp_cmd(esp, ESP_CMD_TI); | ||
3438 | esp_advance_phase(esp->current_SC, in_msgindone); | ||
3439 | return do_intr_end; | ||
3440 | } | ||
3441 | |||
3442 | /* We changed phase but ESP shows bus service, | ||
3443 | * in this case it is most likely that we, the | ||
3444 | * hacker who has been up for 20hrs straight | ||
3445 | * staring at the screen, drowned in coffee | ||
3446 | * smelling like retched cigarette ashes | ||
3447 | * have miscoded something..... so, try to | ||
3448 | * recover as best we can. | ||
3449 | */ | ||
3450 | ESPLOG(("esp%d: message in mis-carriage.\n", esp->esp_id)); | ||
3451 | } | ||
3452 | esp_advance_phase(esp->current_SC, in_the_dark); | ||
3453 | return do_phase_determine; | ||
3454 | } | ||
3455 | |||
3456 | static int check_singlebyte_msg(struct esp *esp) | ||
3457 | { | ||
3458 | esp->prevmsgin = esp->cur_msgin[0]; | ||
3459 | if (esp->cur_msgin[0] & 0x80) { | ||
3460 | /* wheee... */ | ||
3461 | ESPLOG(("esp%d: target sends identify amidst phases\n", | ||
3462 | esp->esp_id)); | ||
3463 | esp_advance_phase(esp->current_SC, in_the_dark); | ||
3464 | return 0; | ||
3465 | } else if (((esp->cur_msgin[0] & 0xf0) == 0x20) || | ||
3466 | (esp->cur_msgin[0] == EXTENDED_MESSAGE)) { | ||
3467 | esp->msgin_len = 2; | ||
3468 | esp_advance_phase(esp->current_SC, in_msgincont); | ||
3469 | return 0; | ||
3470 | } | ||
3471 | esp_advance_phase(esp->current_SC, in_the_dark); | ||
3472 | switch (esp->cur_msgin[0]) { | ||
3473 | default: | ||
3474 | /* We don't want to hear about it. */ | ||
3475 | ESPLOG(("esp%d: msg %02x which we don't know about\n", esp->esp_id, | ||
3476 | esp->cur_msgin[0])); | ||
3477 | return MESSAGE_REJECT; | ||
3478 | |||
3479 | case NOP: | ||
3480 | ESPLOG(("esp%d: target %d sends a nop\n", esp->esp_id, | ||
3481 | esp->current_SC->device->id)); | ||
3482 | return 0; | ||
3483 | |||
3484 | case RESTORE_POINTERS: | ||
3485 | /* In this case we might also have to backup the | ||
3486 | * "slow command" pointer. It is rare to get such | ||
3487 | * a save/restore pointer sequence so early in the | ||
3488 | * bus transition sequences, but cover it. | ||
3489 | */ | ||
3490 | if (esp->esp_slowcmd) { | ||
3491 | esp->esp_scmdleft = esp->current_SC->cmd_len; | ||
3492 | esp->esp_scmdp = &esp->current_SC->cmnd[0]; | ||
3493 | } | ||
3494 | esp_restore_pointers(esp, esp->current_SC); | ||
3495 | return 0; | ||
3496 | |||
3497 | case SAVE_POINTERS: | ||
3498 | esp_save_pointers(esp, esp->current_SC); | ||
3499 | return 0; | ||
3500 | |||
3501 | case COMMAND_COMPLETE: | ||
3502 | case DISCONNECT: | ||
3503 | /* Freeing the bus, let it go. */ | ||
3504 | esp->current_SC->SCp.phase = in_freeing; | ||
3505 | return 0; | ||
3506 | |||
3507 | case MESSAGE_REJECT: | ||
3508 | ESPMISC(("msg reject, ")); | ||
3509 | if (esp->prevmsgout == EXTENDED_MESSAGE) { | ||
3510 | struct esp_device *esp_dev = esp->current_SC->device->hostdata; | ||
3511 | |||
3512 | /* Doesn't look like this target can | ||
3513 | * do synchronous or WIDE transfers. | ||
3514 | */ | ||
3515 | ESPSDTR(("got reject, was trying nego, clearing sync/WIDE\n")); | ||
3516 | esp_dev->sync = 1; | ||
3517 | esp_dev->wide = 1; | ||
3518 | esp_dev->sync_min_period = 0; | ||
3519 | esp_dev->sync_max_offset = 0; | ||
3520 | return 0; | ||
3521 | } else { | ||
3522 | ESPMISC(("not sync nego, sending ABORT\n")); | ||
3523 | return ABORT; | ||
3524 | } | ||
3525 | }; | ||
3526 | } | ||
3527 | |||
3528 | /* Target negotiates for synchronous transfers before we do, this | ||
3529 | * is legal although very strange. What is even funnier is that | ||
3530 | * the SCSI2 standard specifically recommends against targets doing | ||
3531 | * this because so many initiators cannot cope with this occurring. | ||
3532 | */ | ||
3533 | static int target_with_ants_in_pants(struct esp *esp, | ||
3534 | struct scsi_cmnd *SCptr, | ||
3535 | struct esp_device *esp_dev) | ||
3536 | { | ||
3537 | if (esp_dev->sync || SCptr->device->borken) { | ||
3538 | /* sorry, no can do */ | ||
3539 | ESPSDTR(("forcing to async, ")); | ||
3540 | build_sync_nego_msg(esp, 0, 0); | ||
3541 | esp_dev->sync = 1; | ||
3542 | esp->snip = 1; | ||
3543 | ESPLOG(("esp%d: hoping for msgout\n", esp->esp_id)); | ||
3544 | esp_advance_phase(SCptr, in_the_dark); | ||
3545 | return EXTENDED_MESSAGE; | ||
3546 | } | ||
3547 | |||
3548 | /* Ok, we'll check them out... */ | ||
3549 | return 0; | ||
3550 | } | ||
3551 | |||
3552 | static void sync_report(struct esp *esp) | ||
3553 | { | ||
3554 | int msg3, msg4; | ||
3555 | char *type; | ||
3556 | |||
3557 | msg3 = esp->cur_msgin[3]; | ||
3558 | msg4 = esp->cur_msgin[4]; | ||
3559 | if (msg4) { | ||
3560 | int hz = 1000000000 / (msg3 * 4); | ||
3561 | int integer = hz / 1000000; | ||
3562 | int fraction = (hz - (integer * 1000000)) / 10000; | ||
3563 | if ((esp->erev == fashme) && | ||
3564 | (esp->config3[esp->current_SC->device->id] & ESP_CONFIG3_EWIDE)) { | ||
3565 | type = "FAST-WIDE"; | ||
3566 | integer <<= 1; | ||
3567 | fraction <<= 1; | ||
3568 | } else if ((msg3 * 4) < 200) { | ||
3569 | type = "FAST"; | ||
3570 | } else { | ||
3571 | type = "synchronous"; | ||
3572 | } | ||
3573 | |||
3574 | /* Do not transform this back into one big printk | ||
3575 | * again, it triggers a bug in our sparc64-gcc272 | ||
3576 | * sibling call optimization. -DaveM | ||
3577 | */ | ||
3578 | ESPLOG((KERN_INFO "esp%d: target %d ", | ||
3579 | esp->esp_id, esp->current_SC->device->id)); | ||
3580 | ESPLOG(("[period %dns offset %d %d.%02dMHz ", | ||
3581 | (int) msg3 * 4, (int) msg4, | ||
3582 | integer, fraction)); | ||
3583 | ESPLOG(("%s SCSI%s]\n", type, | ||
3584 | (((msg3 * 4) < 200) ? "-II" : ""))); | ||
3585 | } else { | ||
3586 | ESPLOG((KERN_INFO "esp%d: target %d asynchronous\n", | ||
3587 | esp->esp_id, esp->current_SC->device->id)); | ||
3588 | } | ||
3589 | } | ||
3590 | |||
3591 | static int check_multibyte_msg(struct esp *esp) | ||
3592 | { | ||
3593 | struct scsi_cmnd *SCptr = esp->current_SC; | ||
3594 | struct esp_device *esp_dev = SCptr->device->hostdata; | ||
3595 | u8 regval = 0; | ||
3596 | int message_out = 0; | ||
3597 | |||
3598 | ESPSDTR(("chk multibyte msg: ")); | ||
3599 | if (esp->cur_msgin[2] == EXTENDED_SDTR) { | ||
3600 | int period = esp->cur_msgin[3]; | ||
3601 | int offset = esp->cur_msgin[4]; | ||
3602 | |||
3603 | ESPSDTR(("is sync nego response, ")); | ||
3604 | if (!esp->snip) { | ||
3605 | int rval; | ||
3606 | |||
3607 | /* Target negotiates first! */ | ||
3608 | ESPSDTR(("target jumps the gun, ")); | ||
3609 | message_out = EXTENDED_MESSAGE; /* we must respond */ | ||
3610 | rval = target_with_ants_in_pants(esp, SCptr, esp_dev); | ||
3611 | if (rval) | ||
3612 | return rval; | ||
3613 | } | ||
3614 | |||
3615 | ESPSDTR(("examining sdtr, ")); | ||
3616 | |||
3617 | /* Offset cannot be larger than ESP fifo size. */ | ||
3618 | if (offset > 15) { | ||
3619 | ESPSDTR(("offset too big %2x, ", offset)); | ||
3620 | offset = 15; | ||
3621 | ESPSDTR(("sending back new offset\n")); | ||
3622 | build_sync_nego_msg(esp, period, offset); | ||
3623 | return EXTENDED_MESSAGE; | ||
3624 | } | ||
3625 | |||
3626 | if (offset && period > esp->max_period) { | ||
3627 | /* Yeee, async for this slow device. */ | ||
3628 | ESPSDTR(("period too long %2x, ", period)); | ||
3629 | build_sync_nego_msg(esp, 0, 0); | ||
3630 | ESPSDTR(("hoping for msgout\n")); | ||
3631 | esp_advance_phase(esp->current_SC, in_the_dark); | ||
3632 | return EXTENDED_MESSAGE; | ||
3633 | } else if (offset && period < esp->min_period) { | ||
3634 | ESPSDTR(("period too short %2x, ", period)); | ||
3635 | period = esp->min_period; | ||
3636 | if (esp->erev > esp236) | ||
3637 | regval = 4; | ||
3638 | else | ||
3639 | regval = 5; | ||
3640 | } else if (offset) { | ||
3641 | int tmp; | ||
3642 | |||
3643 | ESPSDTR(("period is ok, ")); | ||
3644 | tmp = esp->ccycle / 1000; | ||
3645 | regval = (((period << 2) + tmp - 1) / tmp); | ||
3646 | if (regval && ((esp->erev == fas100a || | ||
3647 | esp->erev == fas236 || | ||
3648 | esp->erev == fashme))) { | ||
3649 | if (period >= 50) | ||
3650 | regval--; | ||
3651 | } | ||
3652 | } | ||
3653 | |||
3654 | if (offset) { | ||
3655 | u8 bit; | ||
3656 | |||
3657 | esp_dev->sync_min_period = (regval & 0x1f); | ||
3658 | esp_dev->sync_max_offset = (offset | esp->radelay); | ||
3659 | if (esp->erev == fas100a || esp->erev == fas236 || esp->erev == fashme) { | ||
3660 | if ((esp->erev == fas100a) || (esp->erev == fashme)) | ||
3661 | bit = ESP_CONFIG3_FAST; | ||
3662 | else | ||
3663 | bit = ESP_CONFIG3_FSCSI; | ||
3664 | if (period < 50) { | ||
3665 | /* On FAS366, if using fast-20 synchronous transfers | ||
3666 | * we need to make sure the REQ/ACK assert/deassert | ||
3667 | * control bits are clear. | ||
3668 | */ | ||
3669 | if (esp->erev == fashme) | ||
3670 | esp_dev->sync_max_offset &= ~esp->radelay; | ||
3671 | esp->config3[SCptr->device->id] |= bit; | ||
3672 | } else { | ||
3673 | esp->config3[SCptr->device->id] &= ~bit; | ||
3674 | } | ||
3675 | esp->prev_cfg3 = esp->config3[SCptr->device->id]; | ||
3676 | sbus_writeb(esp->prev_cfg3, esp->eregs + ESP_CFG3); | ||
3677 | } | ||
3678 | esp->prev_soff = esp_dev->sync_max_offset; | ||
3679 | esp->prev_stp = esp_dev->sync_min_period; | ||
3680 | sbus_writeb(esp->prev_soff, esp->eregs + ESP_SOFF); | ||
3681 | sbus_writeb(esp->prev_stp, esp->eregs + ESP_STP); | ||
3682 | ESPSDTR(("soff=%2x stp=%2x cfg3=%2x\n", | ||
3683 | esp_dev->sync_max_offset, | ||
3684 | esp_dev->sync_min_period, | ||
3685 | esp->config3[SCptr->device->id])); | ||
3686 | |||
3687 | esp->snip = 0; | ||
3688 | } else if (esp_dev->sync_max_offset) { | ||
3689 | u8 bit; | ||
3690 | |||
3691 | /* back to async mode */ | ||
3692 | ESPSDTR(("unaccaptable sync nego, forcing async\n")); | ||
3693 | esp_dev->sync_max_offset = 0; | ||
3694 | esp_dev->sync_min_period = 0; | ||
3695 | esp->prev_soff = 0; | ||
3696 | esp->prev_stp = 0; | ||
3697 | sbus_writeb(esp->prev_soff, esp->eregs + ESP_SOFF); | ||
3698 | sbus_writeb(esp->prev_stp, esp->eregs + ESP_STP); | ||
3699 | if (esp->erev == fas100a || esp->erev == fas236 || esp->erev == fashme) { | ||
3700 | if ((esp->erev == fas100a) || (esp->erev == fashme)) | ||
3701 | bit = ESP_CONFIG3_FAST; | ||
3702 | else | ||
3703 | bit = ESP_CONFIG3_FSCSI; | ||
3704 | esp->config3[SCptr->device->id] &= ~bit; | ||
3705 | esp->prev_cfg3 = esp->config3[SCptr->device->id]; | ||
3706 | sbus_writeb(esp->prev_cfg3, esp->eregs + ESP_CFG3); | ||
3707 | } | ||
3708 | } | ||
3709 | |||
3710 | sync_report(esp); | ||
3711 | |||
3712 | ESPSDTR(("chk multibyte msg: sync is known, ")); | ||
3713 | esp_dev->sync = 1; | ||
3714 | |||
3715 | if (message_out) { | ||
3716 | ESPLOG(("esp%d: sending sdtr back, hoping for msgout\n", | ||
3717 | esp->esp_id)); | ||
3718 | build_sync_nego_msg(esp, period, offset); | ||
3719 | esp_advance_phase(SCptr, in_the_dark); | ||
3720 | return EXTENDED_MESSAGE; | ||
3721 | } | ||
3722 | |||
3723 | ESPSDTR(("returning zero\n")); | ||
3724 | esp_advance_phase(SCptr, in_the_dark); /* ...or else! */ | ||
3725 | return 0; | ||
3726 | } else if (esp->cur_msgin[2] == EXTENDED_WDTR) { | ||
3727 | int size = 8 << esp->cur_msgin[3]; | ||
3728 | |||
3729 | esp->wnip = 0; | ||
3730 | if (esp->erev != fashme) { | ||
3731 | ESPLOG(("esp%d: AIEEE wide msg received and not HME.\n", | ||
3732 | esp->esp_id)); | ||
3733 | message_out = MESSAGE_REJECT; | ||
3734 | } else if (size > 16) { | ||
3735 | ESPLOG(("esp%d: AIEEE wide transfer for %d size " | ||
3736 | "not supported.\n", esp->esp_id, size)); | ||
3737 | message_out = MESSAGE_REJECT; | ||
3738 | } else { | ||
3739 | /* Things look good; let's see what we got. */ | ||
3740 | if (size == 16) { | ||
3741 | /* Set config 3 register for this target. */ | ||
3742 | esp->config3[SCptr->device->id] |= ESP_CONFIG3_EWIDE; | ||
3743 | } else { | ||
3744 | /* Just make sure it was one byte sized. */ | ||
3745 | if (size != 8) { | ||
3746 | ESPLOG(("esp%d: Aieee, wide nego of %d size.\n", | ||
3747 | esp->esp_id, size)); | ||
3748 | message_out = MESSAGE_REJECT; | ||
3749 | goto finish; | ||
3750 | } | ||
3751 | /* Pure paranoia. */ | ||
3752 | esp->config3[SCptr->device->id] &= ~(ESP_CONFIG3_EWIDE); | ||
3753 | } | ||
3754 | esp->prev_cfg3 = esp->config3[SCptr->device->id]; | ||
3755 | sbus_writeb(esp->prev_cfg3, esp->eregs + ESP_CFG3); | ||
3756 | |||
3757 | /* Regardless, next try for sync transfers. */ | ||
3758 | build_sync_nego_msg(esp, esp->sync_defp, 15); | ||
3759 | esp_dev->sync = 1; | ||
3760 | esp->snip = 1; | ||
3761 | message_out = EXTENDED_MESSAGE; | ||
3762 | } | ||
3763 | } else if (esp->cur_msgin[2] == EXTENDED_MODIFY_DATA_POINTER) { | ||
3764 | ESPLOG(("esp%d: rejecting modify data ptr msg\n", esp->esp_id)); | ||
3765 | message_out = MESSAGE_REJECT; | ||
3766 | } | ||
3767 | finish: | ||
3768 | esp_advance_phase(SCptr, in_the_dark); | ||
3769 | return message_out; | ||
3770 | } | ||
3771 | |||
3772 | static int esp_do_msgindone(struct esp *esp) | ||
3773 | { | ||
3774 | struct scsi_cmnd *SCptr = esp->current_SC; | ||
3775 | int message_out = 0, it = 0, rval; | ||
3776 | |||
3777 | rval = skipahead1(esp, SCptr, in_msgin, in_msgindone); | ||
3778 | if (rval) | ||
3779 | return rval; | ||
3780 | if (SCptr->SCp.sent_command != in_status) { | ||
3781 | if (!(esp->ireg & ESP_INTR_DC)) { | ||
3782 | if (esp->msgin_len && (esp->sreg & ESP_STAT_PERR)) { | ||
3783 | message_out = MSG_PARITY_ERROR; | ||
3784 | esp_cmd(esp, ESP_CMD_FLUSH); | ||
3785 | } else if (esp->erev != fashme && | ||
3786 | (it = (sbus_readb(esp->eregs + ESP_FFLAGS) & ESP_FF_FBYTES)) != 1) { | ||
3787 | /* We certainly dropped the ball somewhere. */ | ||
3788 | message_out = INITIATOR_ERROR; | ||
3789 | esp_cmd(esp, ESP_CMD_FLUSH); | ||
3790 | } else if (!esp->msgin_len) { | ||
3791 | if (esp->erev == fashme) | ||
3792 | it = esp->hme_fifo_workaround_buffer[0]; | ||
3793 | else | ||
3794 | it = sbus_readb(esp->eregs + ESP_FDATA); | ||
3795 | esp_advance_phase(SCptr, in_msgincont); | ||
3796 | } else { | ||
3797 | /* it is ok and we want it */ | ||
3798 | if (esp->erev == fashme) | ||
3799 | it = esp->cur_msgin[esp->msgin_ctr] = | ||
3800 | esp->hme_fifo_workaround_buffer[0]; | ||
3801 | else | ||
3802 | it = esp->cur_msgin[esp->msgin_ctr] = | ||
3803 | sbus_readb(esp->eregs + ESP_FDATA); | ||
3804 | esp->msgin_ctr++; | ||
3805 | } | ||
3806 | } else { | ||
3807 | esp_advance_phase(SCptr, in_the_dark); | ||
3808 | return do_work_bus; | ||
3809 | } | ||
3810 | } else { | ||
3811 | it = esp->cur_msgin[0]; | ||
3812 | } | ||
3813 | if (!message_out && esp->msgin_len) { | ||
3814 | if (esp->msgin_ctr < esp->msgin_len) { | ||
3815 | esp_advance_phase(SCptr, in_msgincont); | ||
3816 | } else if (esp->msgin_len == 1) { | ||
3817 | message_out = check_singlebyte_msg(esp); | ||
3818 | } else if (esp->msgin_len == 2) { | ||
3819 | if (esp->cur_msgin[0] == EXTENDED_MESSAGE) { | ||
3820 | if ((it + 2) >= 15) { | ||
3821 | message_out = MESSAGE_REJECT; | ||
3822 | } else { | ||
3823 | esp->msgin_len = (it + 2); | ||
3824 | esp_advance_phase(SCptr, in_msgincont); | ||
3825 | } | ||
3826 | } else { | ||
3827 | message_out = MESSAGE_REJECT; /* foo on you */ | ||
3828 | } | ||
3829 | } else { | ||
3830 | message_out = check_multibyte_msg(esp); | ||
3831 | } | ||
3832 | } | ||
3833 | if (message_out < 0) { | ||
3834 | return -message_out; | ||
3835 | } else if (message_out) { | ||
3836 | if (((message_out != 1) && | ||
3837 | ((message_out < 0x20) || (message_out & 0x80)))) | ||
3838 | esp->msgout_len = 1; | ||
3839 | esp->cur_msgout[0] = message_out; | ||
3840 | esp_cmd(esp, ESP_CMD_SATN); | ||
3841 | esp_advance_phase(SCptr, in_the_dark); | ||
3842 | esp->msgin_len = 0; | ||
3843 | } | ||
3844 | esp->sreg = sbus_readb(esp->eregs + ESP_STATUS); | ||
3845 | esp->sreg &= ~(ESP_STAT_INTR); | ||
3846 | if ((esp->sreg & (ESP_STAT_PMSG|ESP_STAT_PCD)) == (ESP_STAT_PMSG|ESP_STAT_PCD)) | ||
3847 | esp_cmd(esp, ESP_CMD_MOK); | ||
3848 | if ((SCptr->SCp.sent_command == in_msgindone) && | ||
3849 | (SCptr->SCp.phase == in_freeing)) | ||
3850 | return esp_do_freebus(esp); | ||
3851 | return do_intr_end; | ||
3852 | } | ||
3853 | |||
3854 | static int esp_do_cmdbegin(struct esp *esp) | ||
3855 | { | ||
3856 | struct scsi_cmnd *SCptr = esp->current_SC; | ||
3857 | |||
3858 | esp_advance_phase(SCptr, in_cmdend); | ||
3859 | if (esp->erev == fashme) { | ||
3860 | u32 tmp = sbus_readl(esp->dregs + DMA_CSR); | ||
3861 | int i; | ||
3862 | |||
3863 | for (i = 0; i < esp->esp_scmdleft; i++) | ||
3864 | esp->esp_command[i] = *esp->esp_scmdp++; | ||
3865 | esp->esp_scmdleft = 0; | ||
3866 | esp_cmd(esp, ESP_CMD_FLUSH); | ||
3867 | esp_setcount(esp->eregs, i, 1); | ||
3868 | esp_cmd(esp, (ESP_CMD_DMA | ESP_CMD_TI)); | ||
3869 | tmp |= (DMA_SCSI_DISAB | DMA_ENABLE); | ||
3870 | tmp &= ~(DMA_ST_WRITE); | ||
3871 | sbus_writel(i, esp->dregs + DMA_COUNT); | ||
3872 | sbus_writel(esp->esp_command_dvma, esp->dregs + DMA_ADDR); | ||
3873 | sbus_writel(tmp, esp->dregs + DMA_CSR); | ||
3874 | } else { | ||
3875 | u8 tmp; | ||
3876 | |||
3877 | esp_cmd(esp, ESP_CMD_FLUSH); | ||
3878 | tmp = *esp->esp_scmdp++; | ||
3879 | esp->esp_scmdleft--; | ||
3880 | sbus_writeb(tmp, esp->eregs + ESP_FDATA); | ||
3881 | esp_cmd(esp, ESP_CMD_TI); | ||
3882 | } | ||
3883 | return do_intr_end; | ||
3884 | } | ||
3885 | |||
3886 | static int esp_do_cmddone(struct esp *esp) | ||
3887 | { | ||
3888 | if (esp->erev == fashme) | ||
3889 | dma_invalidate(esp); | ||
3890 | else | ||
3891 | esp_cmd(esp, ESP_CMD_NULL); | ||
3892 | |||
3893 | if (esp->ireg & ESP_INTR_BSERV) { | ||
3894 | esp_advance_phase(esp->current_SC, in_the_dark); | ||
3895 | return esp_do_phase_determine(esp); | ||
3896 | } | ||
3897 | |||
3898 | ESPLOG(("esp%d: in do_cmddone() but didn't get BSERV interrupt.\n", | ||
3899 | esp->esp_id)); | ||
3900 | return do_reset_bus; | ||
3901 | } | ||
3902 | |||
3903 | static int esp_do_msgout(struct esp *esp) | ||
3904 | { | ||
3905 | esp_cmd(esp, ESP_CMD_FLUSH); | ||
3906 | switch (esp->msgout_len) { | ||
3907 | case 1: | ||
3908 | if (esp->erev == fashme) | ||
3909 | hme_fifo_push(esp, &esp->cur_msgout[0], 1); | ||
3910 | else | ||
3911 | sbus_writeb(esp->cur_msgout[0], esp->eregs + ESP_FDATA); | ||
3912 | |||
3913 | esp_cmd(esp, ESP_CMD_TI); | ||
3914 | break; | ||
3915 | |||
3916 | case 2: | ||
3917 | esp->esp_command[0] = esp->cur_msgout[0]; | ||
3918 | esp->esp_command[1] = esp->cur_msgout[1]; | ||
3919 | |||
3920 | if (esp->erev == fashme) { | ||
3921 | hme_fifo_push(esp, &esp->cur_msgout[0], 2); | ||
3922 | esp_cmd(esp, ESP_CMD_TI); | ||
3923 | } else { | ||
3924 | dma_setup(esp, esp->esp_command_dvma, 2, 0); | ||
3925 | esp_setcount(esp->eregs, 2, 0); | ||
3926 | esp_cmd(esp, ESP_CMD_DMA | ESP_CMD_TI); | ||
3927 | } | ||
3928 | break; | ||
3929 | |||
3930 | case 4: | ||
3931 | esp->esp_command[0] = esp->cur_msgout[0]; | ||
3932 | esp->esp_command[1] = esp->cur_msgout[1]; | ||
3933 | esp->esp_command[2] = esp->cur_msgout[2]; | ||
3934 | esp->esp_command[3] = esp->cur_msgout[3]; | ||
3935 | esp->snip = 1; | ||
3936 | |||
3937 | if (esp->erev == fashme) { | ||
3938 | hme_fifo_push(esp, &esp->cur_msgout[0], 4); | ||
3939 | esp_cmd(esp, ESP_CMD_TI); | ||
3940 | } else { | ||
3941 | dma_setup(esp, esp->esp_command_dvma, 4, 0); | ||
3942 | esp_setcount(esp->eregs, 4, 0); | ||
3943 | esp_cmd(esp, ESP_CMD_DMA | ESP_CMD_TI); | ||
3944 | } | ||
3945 | break; | ||
3946 | |||
3947 | case 5: | ||
3948 | esp->esp_command[0] = esp->cur_msgout[0]; | ||
3949 | esp->esp_command[1] = esp->cur_msgout[1]; | ||
3950 | esp->esp_command[2] = esp->cur_msgout[2]; | ||
3951 | esp->esp_command[3] = esp->cur_msgout[3]; | ||
3952 | esp->esp_command[4] = esp->cur_msgout[4]; | ||
3953 | esp->snip = 1; | ||
3954 | |||
3955 | if (esp->erev == fashme) { | ||
3956 | hme_fifo_push(esp, &esp->cur_msgout[0], 5); | ||
3957 | esp_cmd(esp, ESP_CMD_TI); | ||
3958 | } else { | ||
3959 | dma_setup(esp, esp->esp_command_dvma, 5, 0); | ||
3960 | esp_setcount(esp->eregs, 5, 0); | ||
3961 | esp_cmd(esp, ESP_CMD_DMA | ESP_CMD_TI); | ||
3962 | } | ||
3963 | break; | ||
3964 | |||
3965 | default: | ||
3966 | /* whoops */ | ||
3967 | ESPMISC(("bogus msgout sending NOP\n")); | ||
3968 | esp->cur_msgout[0] = NOP; | ||
3969 | |||
3970 | if (esp->erev == fashme) { | ||
3971 | hme_fifo_push(esp, &esp->cur_msgout[0], 1); | ||
3972 | } else { | ||
3973 | sbus_writeb(esp->cur_msgout[0], esp->eregs + ESP_FDATA); | ||
3974 | } | ||
3975 | |||
3976 | esp->msgout_len = 1; | ||
3977 | esp_cmd(esp, ESP_CMD_TI); | ||
3978 | break; | ||
3979 | }; | ||
3980 | |||
3981 | esp_advance_phase(esp->current_SC, in_msgoutdone); | ||
3982 | return do_intr_end; | ||
3983 | } | ||
3984 | |||
3985 | static int esp_do_msgoutdone(struct esp *esp) | ||
3986 | { | ||
3987 | if (esp->msgout_len > 1) { | ||
3988 | /* XXX HME/FAS ATN deassert workaround required, | ||
3989 | * XXX no DMA flushing, only possible ESP_CMD_FLUSH | ||
3990 | * XXX to kill the fifo. | ||
3991 | */ | ||
3992 | if (esp->erev != fashme) { | ||
3993 | u32 tmp; | ||
3994 | |||
3995 | while ((tmp = sbus_readl(esp->dregs + DMA_CSR)) & DMA_PEND_READ) | ||
3996 | udelay(1); | ||
3997 | tmp &= ~DMA_ENABLE; | ||
3998 | sbus_writel(tmp, esp->dregs + DMA_CSR); | ||
3999 | dma_invalidate(esp); | ||
4000 | } else { | ||
4001 | esp_cmd(esp, ESP_CMD_FLUSH); | ||
4002 | } | ||
4003 | } | ||
4004 | if (!(esp->ireg & ESP_INTR_DC)) { | ||
4005 | if (esp->erev != fashme) | ||
4006 | esp_cmd(esp, ESP_CMD_NULL); | ||
4007 | switch (esp->sreg & ESP_STAT_PMASK) { | ||
4008 | case ESP_MOP: | ||
4009 | /* whoops, parity error */ | ||
4010 | ESPLOG(("esp%d: still in msgout, parity error assumed\n", | ||
4011 | esp->esp_id)); | ||
4012 | if (esp->msgout_len > 1) | ||
4013 | esp_cmd(esp, ESP_CMD_SATN); | ||
4014 | esp_advance_phase(esp->current_SC, in_msgout); | ||
4015 | return do_work_bus; | ||
4016 | |||
4017 | case ESP_DIP: | ||
4018 | break; | ||
4019 | |||
4020 | default: | ||
4021 | /* Happy Meal fifo is touchy... */ | ||
4022 | if ((esp->erev != fashme) && | ||
4023 | !fcount(esp) && | ||
4024 | !(((struct esp_device *)esp->current_SC->device->hostdata)->sync_max_offset)) | ||
4025 | esp_cmd(esp, ESP_CMD_FLUSH); | ||
4026 | break; | ||
4027 | |||
4028 | }; | ||
4029 | } else { | ||
4030 | ESPLOG(("esp%d: disconnect, resetting bus\n", esp->esp_id)); | ||
4031 | return do_reset_bus; | ||
4032 | } | ||
4033 | |||
4034 | /* If we sent out a synchronous negotiation message, update | ||
4035 | * our state. | ||
4036 | */ | ||
4037 | if (esp->cur_msgout[2] == EXTENDED_MESSAGE && | ||
4038 | esp->cur_msgout[4] == EXTENDED_SDTR) { | ||
4039 | esp->snip = 1; /* anal retentiveness... */ | ||
4040 | } | ||
4041 | |||
4042 | esp->prevmsgout = esp->cur_msgout[0]; | ||
4043 | esp->msgout_len = 0; | ||
4044 | esp_advance_phase(esp->current_SC, in_the_dark); | ||
4045 | return esp_do_phase_determine(esp); | ||
4046 | } | ||
4047 | |||
4048 | static int esp_bus_unexpected(struct esp *esp) | ||
4049 | { | ||
4050 | ESPLOG(("esp%d: command in weird state %2x\n", | ||
4051 | esp->esp_id, esp->current_SC->SCp.phase)); | ||
4052 | return do_reset_bus; | ||
4053 | } | ||
4054 | |||
4055 | static espfunc_t bus_vector[] = { | ||
4056 | esp_do_data_finale, | ||
4057 | esp_do_data_finale, | ||
4058 | esp_bus_unexpected, | ||
4059 | esp_do_msgin, | ||
4060 | esp_do_msgincont, | ||
4061 | esp_do_msgindone, | ||
4062 | esp_do_msgout, | ||
4063 | esp_do_msgoutdone, | ||
4064 | esp_do_cmdbegin, | ||
4065 | esp_do_cmddone, | ||
4066 | esp_do_status, | ||
4067 | esp_do_freebus, | ||
4068 | esp_do_phase_determine, | ||
4069 | esp_bus_unexpected, | ||
4070 | esp_bus_unexpected, | ||
4071 | esp_bus_unexpected, | ||
4072 | }; | ||
4073 | |||
4074 | /* This is the second tier in our dual-level SCSI state machine. */ | ||
4075 | static int esp_work_bus(struct esp *esp) | ||
4076 | { | ||
4077 | struct scsi_cmnd *SCptr = esp->current_SC; | ||
4078 | unsigned int phase; | ||
4079 | |||
4080 | ESPBUS(("esp_work_bus: ")); | ||
4081 | if (!SCptr) { | ||
4082 | ESPBUS(("reconnect\n")); | ||
4083 | return esp_do_reconnect(esp); | ||
4084 | } | ||
4085 | phase = SCptr->SCp.phase; | ||
4086 | if ((phase & 0xf0) == in_phases_mask) | ||
4087 | return bus_vector[(phase & 0x0f)](esp); | ||
4088 | else if ((phase & 0xf0) == in_slct_mask) | ||
4089 | return esp_select_complete(esp); | ||
4090 | else | ||
4091 | return esp_bus_unexpected(esp); | ||
4092 | } | ||
4093 | |||
4094 | static espfunc_t isvc_vector[] = { | ||
4095 | NULL, | ||
4096 | esp_do_phase_determine, | ||
4097 | esp_do_resetbus, | ||
4098 | esp_finish_reset, | ||
4099 | esp_work_bus | ||
4100 | }; | ||
4101 | |||
4102 | /* Main interrupt handler for an esp adapter. */ | ||
4103 | static void esp_handle(struct esp *esp) | ||
4104 | { | ||
4105 | struct scsi_cmnd *SCptr; | ||
4106 | int what_next = do_intr_end; | ||
4107 | |||
4108 | SCptr = esp->current_SC; | ||
4109 | |||
4110 | /* Check for errors. */ | ||
4111 | esp->sreg = sbus_readb(esp->eregs + ESP_STATUS); | ||
4112 | esp->sreg &= (~ESP_STAT_INTR); | ||
4113 | if (esp->erev == fashme) { | ||
4114 | esp->sreg2 = sbus_readb(esp->eregs + ESP_STATUS2); | ||
4115 | esp->seqreg = (sbus_readb(esp->eregs + ESP_SSTEP) & ESP_STEP_VBITS); | ||
4116 | } | ||
4117 | |||
4118 | if (esp->sreg & (ESP_STAT_SPAM)) { | ||
4119 | /* Gross error, could be due to one of: | ||
4120 | * | ||
4121 | * - top of fifo overwritten, could be because | ||
4122 | * we tried to do a synchronous transfer with | ||
4123 | * an offset greater than ESP fifo size | ||
4124 | * | ||
4125 | * - top of command register overwritten | ||
4126 | * | ||
4127 | * - DMA setup to go in one direction, SCSI | ||
4128 | * bus points in the other, whoops | ||
4129 | * | ||
4130 | * - weird phase change during asynchronous | ||
4131 | * data phase while we are initiator | ||
4132 | */ | ||
4133 | ESPLOG(("esp%d: Gross error sreg=%2x\n", esp->esp_id, esp->sreg)); | ||
4134 | |||
4135 | /* If a command is live on the bus we cannot safely | ||
4136 | * reset the bus, so we'll just let the pieces fall | ||
4137 | * where they may. Here we are hoping that the | ||
4138 | * target will be able to cleanly go away soon | ||
4139 | * so we can safely reset things. | ||
4140 | */ | ||
4141 | if (!SCptr) { | ||
4142 | ESPLOG(("esp%d: No current cmd during gross error, " | ||
4143 | "resetting bus\n", esp->esp_id)); | ||
4144 | what_next = do_reset_bus; | ||
4145 | goto state_machine; | ||
4146 | } | ||
4147 | } | ||
4148 | |||
4149 | if (sbus_readl(esp->dregs + DMA_CSR) & DMA_HNDL_ERROR) { | ||
4150 | /* A DMA gate array error. Here we must | ||
4151 | * be seeing one of two things. Either the | ||
4152 | * virtual to physical address translation | ||
4153 | * on the SBUS could not occur, else the | ||
4154 | * translation it did get pointed to a bogus | ||
4155 | * page. Ho hum... | ||
4156 | */ | ||
4157 | ESPLOG(("esp%d: DMA error %08x\n", esp->esp_id, | ||
4158 | sbus_readl(esp->dregs + DMA_CSR))); | ||
4159 | |||
4160 | /* DMA gate array itself must be reset to clear the | ||
4161 | * error condition. | ||
4162 | */ | ||
4163 | esp_reset_dma(esp); | ||
4164 | |||
4165 | what_next = do_reset_bus; | ||
4166 | goto state_machine; | ||
4167 | } | ||
4168 | |||
4169 | esp->ireg = sbus_readb(esp->eregs + ESP_INTRPT); /* Unlatch intr reg */ | ||
4170 | |||
4171 | if (esp->erev == fashme) { | ||
4172 | /* This chip is really losing. */ | ||
4173 | ESPHME(("HME[")); | ||
4174 | |||
4175 | ESPHME(("sreg2=%02x,", esp->sreg2)); | ||
4176 | /* Must latch fifo before reading the interrupt | ||
4177 | * register else garbage ends up in the FIFO | ||
4178 | * which confuses the driver utterly. | ||
4179 | */ | ||
4180 | if (!(esp->sreg2 & ESP_STAT2_FEMPTY) || | ||
4181 | (esp->sreg2 & ESP_STAT2_F1BYTE)) { | ||
4182 | ESPHME(("fifo_workaround]")); | ||
4183 | hme_fifo_read(esp); | ||
4184 | } else { | ||
4185 | ESPHME(("no_fifo_workaround]")); | ||
4186 | } | ||
4187 | } | ||
4188 | |||
4189 | /* No current cmd is only valid at this point when there are | ||
4190 | * commands off the bus or we are trying a reset. | ||
4191 | */ | ||
4192 | if (!SCptr && !esp->disconnected_SC && !(esp->ireg & ESP_INTR_SR)) { | ||
4193 | /* Panic is safe, since current_SC is null. */ | ||
4194 | ESPLOG(("esp%d: no command in esp_handle()\n", esp->esp_id)); | ||
4195 | panic("esp_handle: current_SC == penguin within interrupt!"); | ||
4196 | } | ||
4197 | |||
4198 | if (esp->ireg & (ESP_INTR_IC)) { | ||
4199 | /* Illegal command fed to ESP. Outside of obvious | ||
4200 | * software bugs that could cause this, there is | ||
4201 | * a condition with esp100 where we can confuse the | ||
4202 | * ESP into an erroneous illegal command interrupt | ||
4203 | * because it does not scrape the FIFO properly | ||
4204 | * for reselection. See esp100_reconnect_hwbug() | ||
4205 | * to see how we try very hard to avoid this. | ||
4206 | */ | ||
4207 | ESPLOG(("esp%d: invalid command\n", esp->esp_id)); | ||
4208 | |||
4209 | esp_dump_state(esp); | ||
4210 | |||
4211 | if (SCptr != NULL) { | ||
4212 | /* Devices with very buggy firmware can drop BSY | ||
4213 | * during a scatter list interrupt when using sync | ||
4214 | * mode transfers. We continue the transfer as | ||
4215 | * expected, the target drops the bus, the ESP | ||
4216 | * gets confused, and we get a illegal command | ||
4217 | * interrupt because the bus is in the disconnected | ||
4218 | * state now and ESP_CMD_TI is only allowed when | ||
4219 | * a nexus is alive on the bus. | ||
4220 | */ | ||
4221 | ESPLOG(("esp%d: Forcing async and disabling disconnect for " | ||
4222 | "target %d\n", esp->esp_id, SCptr->device->id)); | ||
4223 | SCptr->device->borken = 1; /* foo on you */ | ||
4224 | } | ||
4225 | |||
4226 | what_next = do_reset_bus; | ||
4227 | } else if (!(esp->ireg & ~(ESP_INTR_FDONE | ESP_INTR_BSERV | ESP_INTR_DC))) { | ||
4228 | if (SCptr) { | ||
4229 | unsigned int phase = SCptr->SCp.phase; | ||
4230 | |||
4231 | if (phase & in_phases_mask) { | ||
4232 | what_next = esp_work_bus(esp); | ||
4233 | } else if (phase & in_slct_mask) { | ||
4234 | what_next = esp_select_complete(esp); | ||
4235 | } else { | ||
4236 | ESPLOG(("esp%d: interrupt for no good reason...\n", | ||
4237 | esp->esp_id)); | ||
4238 | what_next = do_intr_end; | ||
4239 | } | ||
4240 | } else { | ||
4241 | ESPLOG(("esp%d: BSERV or FDONE or DC while SCptr==NULL\n", | ||
4242 | esp->esp_id)); | ||
4243 | what_next = do_reset_bus; | ||
4244 | } | ||
4245 | } else if (esp->ireg & ESP_INTR_SR) { | ||
4246 | ESPLOG(("esp%d: SCSI bus reset interrupt\n", esp->esp_id)); | ||
4247 | what_next = do_reset_complete; | ||
4248 | } else if (esp->ireg & (ESP_INTR_S | ESP_INTR_SATN)) { | ||
4249 | ESPLOG(("esp%d: AIEEE we have been selected by another initiator!\n", | ||
4250 | esp->esp_id)); | ||
4251 | what_next = do_reset_bus; | ||
4252 | } else if (esp->ireg & ESP_INTR_RSEL) { | ||
4253 | if (SCptr == NULL) { | ||
4254 | /* This is ok. */ | ||
4255 | what_next = esp_do_reconnect(esp); | ||
4256 | } else if (SCptr->SCp.phase & in_slct_mask) { | ||
4257 | /* Only selection code knows how to clean | ||
4258 | * up properly. | ||
4259 | */ | ||
4260 | ESPDISC(("Reselected during selection attempt\n")); | ||
4261 | what_next = esp_select_complete(esp); | ||
4262 | } else { | ||
4263 | ESPLOG(("esp%d: Reselected while bus is busy\n", | ||
4264 | esp->esp_id)); | ||
4265 | what_next = do_reset_bus; | ||
4266 | } | ||
4267 | } | ||
4268 | |||
4269 | /* This is tier-one in our dual level SCSI state machine. */ | ||
4270 | state_machine: | ||
4271 | while (what_next != do_intr_end) { | ||
4272 | if (what_next >= do_phase_determine && | ||
4273 | what_next < do_intr_end) { | ||
4274 | what_next = isvc_vector[what_next](esp); | ||
4275 | } else { | ||
4276 | /* state is completely lost ;-( */ | ||
4277 | ESPLOG(("esp%d: interrupt engine loses state, resetting bus\n", | ||
4278 | esp->esp_id)); | ||
4279 | what_next = do_reset_bus; | ||
4280 | } | ||
4281 | } | ||
4282 | } | ||
4283 | |||
4284 | /* Service only the ESP described by dev_id. */ | ||
4285 | static irqreturn_t esp_intr(int irq, void *dev_id) | ||
4286 | { | ||
4287 | struct esp *esp = dev_id; | ||
4288 | unsigned long flags; | ||
4289 | |||
4290 | spin_lock_irqsave(esp->ehost->host_lock, flags); | ||
4291 | if (ESP_IRQ_P(esp->dregs)) { | ||
4292 | ESP_INTSOFF(esp->dregs); | ||
4293 | |||
4294 | ESPIRQ(("I[%d:%d](", smp_processor_id(), esp->esp_id)); | ||
4295 | esp_handle(esp); | ||
4296 | ESPIRQ((")")); | ||
4297 | |||
4298 | ESP_INTSON(esp->dregs); | ||
4299 | } | ||
4300 | spin_unlock_irqrestore(esp->ehost->host_lock, flags); | ||
4301 | |||
4302 | return IRQ_HANDLED; | ||
4303 | } | ||
4304 | |||
4305 | static int esp_slave_alloc(struct scsi_device *SDptr) | ||
4306 | { | ||
4307 | struct esp_device *esp_dev = | ||
4308 | kmalloc(sizeof(struct esp_device), GFP_ATOMIC); | ||
4309 | |||
4310 | if (!esp_dev) | ||
4311 | return -ENOMEM; | ||
4312 | memset(esp_dev, 0, sizeof(struct esp_device)); | ||
4313 | SDptr->hostdata = esp_dev; | ||
4314 | return 0; | ||
4315 | } | ||
4316 | |||
4317 | static void esp_slave_destroy(struct scsi_device *SDptr) | ||
4318 | { | ||
4319 | struct esp *esp = (struct esp *) SDptr->host->hostdata; | ||
4320 | |||
4321 | esp->targets_present &= ~(1 << SDptr->id); | ||
4322 | kfree(SDptr->hostdata); | ||
4323 | SDptr->hostdata = NULL; | ||
4324 | } | ||
4325 | |||
4326 | static struct scsi_host_template esp_template = { | ||
4327 | .module = THIS_MODULE, | ||
4328 | .name = "esp", | ||
4329 | .info = esp_info, | ||
4330 | .slave_alloc = esp_slave_alloc, | ||
4331 | .slave_destroy = esp_slave_destroy, | ||
4332 | .queuecommand = esp_queue, | ||
4333 | .eh_abort_handler = esp_abort, | ||
4334 | .eh_bus_reset_handler = esp_reset, | ||
4335 | .can_queue = 7, | ||
4336 | .this_id = 7, | ||
4337 | .sg_tablesize = SG_ALL, | ||
4338 | .cmd_per_lun = 1, | ||
4339 | .use_clustering = ENABLE_CLUSTERING, | ||
4340 | .proc_name = "esp", | ||
4341 | .proc_info = esp_proc_info, | ||
4342 | }; | ||
4343 | |||
4344 | #ifndef CONFIG_SUN4 | ||
4345 | static struct of_device_id esp_match[] = { | ||
4346 | { | ||
4347 | .name = "SUNW,esp", | ||
4348 | .data = &esp_template, | ||
4349 | }, | ||
4350 | { | ||
4351 | .name = "SUNW,fas", | ||
4352 | .data = &esp_template, | ||
4353 | }, | ||
4354 | { | ||
4355 | .name = "esp", | ||
4356 | .data = &esp_template, | ||
4357 | }, | ||
4358 | {}, | ||
4359 | }; | ||
4360 | MODULE_DEVICE_TABLE(of, esp_match); | ||
4361 | |||
4362 | static struct of_platform_driver esp_sbus_driver = { | ||
4363 | .name = "esp", | ||
4364 | .match_table = esp_match, | ||
4365 | .probe = esp_sbus_probe, | ||
4366 | .remove = __devexit_p(esp_sbus_remove), | ||
4367 | }; | ||
4368 | #endif | ||
4369 | |||
4370 | static int __init esp_init(void) | ||
4371 | { | ||
4372 | #ifdef CONFIG_SUN4 | ||
4373 | return esp_sun4_probe(&esp_template); | ||
4374 | #else | ||
4375 | return of_register_driver(&esp_sbus_driver, &sbus_bus_type); | ||
4376 | #endif | ||
4377 | } | ||
4378 | |||
4379 | static void __exit esp_exit(void) | ||
4380 | { | ||
4381 | #ifdef CONFIG_SUN4 | ||
4382 | esp_sun4_remove(); | ||
4383 | #else | ||
4384 | of_unregister_driver(&esp_sbus_driver); | ||
4385 | #endif | ||
4386 | } | ||
4387 | |||
4388 | MODULE_DESCRIPTION("ESP Sun SCSI driver"); | ||
4389 | MODULE_AUTHOR("David S. Miller (davem@davemloft.net)"); | ||
4390 | MODULE_LICENSE("GPL"); | ||
4391 | MODULE_VERSION(DRV_VERSION); | ||
4392 | |||
4393 | module_init(esp_init); | ||
4394 | module_exit(esp_exit); | ||
diff --git a/drivers/scsi/esp.h b/drivers/scsi/esp.h deleted file mode 100644 index a98cda9121fc..000000000000 --- a/drivers/scsi/esp.h +++ /dev/null | |||
@@ -1,406 +0,0 @@ | |||
1 | /* $Id: esp.h,v 1.29 2001/12/11 04:55:47 davem Exp $ | ||
2 | * esp.h: Defines and structures for the Sparc ESP (Enhanced SCSI | ||
3 | * Processor) driver under Linux. | ||
4 | * | ||
5 | * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) | ||
6 | */ | ||
7 | |||
8 | #ifndef _SPARC_ESP_H | ||
9 | #define _SPARC_ESP_H | ||
10 | |||
11 | /* For dvma controller register definitions. */ | ||
12 | #include <asm/dma.h> | ||
13 | |||
14 | /* The ESP SCSI controllers have their register sets in three | ||
15 | * "classes": | ||
16 | * | ||
17 | * 1) Registers which are both read and write. | ||
18 | * 2) Registers which are read only. | ||
19 | * 3) Registers which are write only. | ||
20 | * | ||
21 | * Yet, they all live within the same IO space. | ||
22 | */ | ||
23 | |||
24 | /* All the ESP registers are one byte each and are accessed longwords | ||
25 | * apart with a big-endian ordering to the bytes. | ||
26 | */ | ||
27 | /* Access Description Offset */ | ||
28 | #define ESP_TCLOW 0x00UL /* rw Low bits of the transfer count 0x00 */ | ||
29 | #define ESP_TCMED 0x04UL /* rw Mid bits of the transfer count 0x04 */ | ||
30 | #define ESP_FDATA 0x08UL /* rw FIFO data bits 0x08 */ | ||
31 | #define ESP_CMD 0x0cUL /* rw SCSI command bits 0x0c */ | ||
32 | #define ESP_STATUS 0x10UL /* ro ESP status register 0x10 */ | ||
33 | #define ESP_BUSID ESP_STATUS /* wo Bus ID for select/reselect 0x10 */ | ||
34 | #define ESP_INTRPT 0x14UL /* ro Kind of interrupt 0x14 */ | ||
35 | #define ESP_TIMEO ESP_INTRPT /* wo Timeout value for select/resel 0x14 */ | ||
36 | #define ESP_SSTEP 0x18UL /* ro Sequence step register 0x18 */ | ||
37 | #define ESP_STP ESP_SSTEP /* wo Transfer period per sync 0x18 */ | ||
38 | #define ESP_FFLAGS 0x1cUL /* ro Bits of current FIFO info 0x1c */ | ||
39 | #define ESP_SOFF ESP_FFLAGS /* wo Sync offset 0x1c */ | ||
40 | #define ESP_CFG1 0x20UL /* rw First configuration register 0x20 */ | ||
41 | #define ESP_CFACT 0x24UL /* wo Clock conversion factor 0x24 */ | ||
42 | #define ESP_STATUS2 ESP_CFACT /* ro HME status2 register 0x24 */ | ||
43 | #define ESP_CTEST 0x28UL /* wo Chip test register 0x28 */ | ||
44 | #define ESP_CFG2 0x2cUL /* rw Second configuration register 0x2c */ | ||
45 | #define ESP_CFG3 0x30UL /* rw Third configuration register 0x30 */ | ||
46 | #define ESP_TCHI 0x38UL /* rw High bits of transfer count 0x38 */ | ||
47 | #define ESP_UID ESP_TCHI /* ro Unique ID code 0x38 */ | ||
48 | #define FAS_RLO ESP_TCHI /* rw HME extended counter 0x38 */ | ||
49 | #define ESP_FGRND 0x3cUL /* rw Data base for fifo 0x3c */ | ||
50 | #define FAS_RHI ESP_FGRND /* rw HME extended counter 0x3c */ | ||
51 | #define ESP_REG_SIZE 0x40UL | ||
52 | |||
53 | /* Various revisions of the ESP board. */ | ||
54 | enum esp_rev { | ||
55 | esp100 = 0x00, /* NCR53C90 - very broken */ | ||
56 | esp100a = 0x01, /* NCR53C90A */ | ||
57 | esp236 = 0x02, | ||
58 | fas236 = 0x03, | ||
59 | fas100a = 0x04, | ||
60 | fast = 0x05, | ||
61 | fashme = 0x06, | ||
62 | espunknown = 0x07 | ||
63 | }; | ||
64 | |||
65 | /* We allocate one of these for each scsi device and attach it to | ||
66 | * SDptr->hostdata for use in the driver | ||
67 | */ | ||
68 | struct esp_device { | ||
69 | unsigned char sync_min_period; | ||
70 | unsigned char sync_max_offset; | ||
71 | unsigned sync:1; | ||
72 | unsigned wide:1; | ||
73 | unsigned disconnect:1; | ||
74 | }; | ||
75 | |||
76 | struct scsi_cmnd; | ||
77 | |||
78 | /* We get one of these for each ESP probed. */ | ||
79 | struct esp { | ||
80 | void __iomem *eregs; /* ESP controller registers */ | ||
81 | void __iomem *dregs; /* DMA controller registers */ | ||
82 | struct sbus_dma *dma; /* DMA controller sw state */ | ||
83 | struct Scsi_Host *ehost; /* Backpointer to SCSI Host */ | ||
84 | struct sbus_dev *sdev; /* Pointer to SBus entry */ | ||
85 | |||
86 | /* ESP Configuration Registers */ | ||
87 | u8 config1; /* Copy of the 1st config register */ | ||
88 | u8 config2; /* Copy of the 2nd config register */ | ||
89 | u8 config3[16]; /* Copy of the 3rd config register */ | ||
90 | |||
91 | /* The current command we are sending to the ESP chip. This esp_command | ||
92 | * ptr needs to be mapped in DVMA area so we can send commands and read | ||
93 | * from the ESP fifo without burning precious CPU cycles. Programmed I/O | ||
94 | * sucks when we have the DVMA to do it for us. The ESP is stupid and will | ||
95 | * only send out 6, 10, and 12 byte SCSI commands, others we need to send | ||
96 | * one byte at a time. esp_slowcmd being set says that we are doing one | ||
97 | * of the command types ESP doesn't understand, esp_scmdp keeps track of | ||
98 | * which byte we are sending, esp_scmdleft says how many bytes to go. | ||
99 | */ | ||
100 | volatile u8 *esp_command; /* Location of command (CPU view) */ | ||
101 | __u32 esp_command_dvma;/* Location of command (DVMA view) */ | ||
102 | unsigned char esp_clen; /* Length of this command */ | ||
103 | unsigned char esp_slowcmd; | ||
104 | unsigned char *esp_scmdp; | ||
105 | unsigned char esp_scmdleft; | ||
106 | |||
107 | /* The following are used to determine the cause of an IRQ. Upon every | ||
108 | * IRQ entry we synchronize these with the hardware registers. | ||
109 | */ | ||
110 | u8 ireg; /* Copy of ESP interrupt register */ | ||
111 | u8 sreg; /* Copy of ESP status register */ | ||
112 | u8 seqreg; /* Copy of ESP sequence step register */ | ||
113 | u8 sreg2; /* Copy of HME status2 register */ | ||
114 | |||
115 | /* To save register writes to the ESP, which can be expensive, we | ||
116 | * keep track of the previous value that various registers had for | ||
117 | * the last target we connected to. If they are the same for the | ||
118 | * current target, we skip the register writes as they are not needed. | ||
119 | */ | ||
120 | u8 prev_soff, prev_stp; | ||
121 | u8 prev_cfg3, __cache_pad; | ||
122 | |||
123 | /* We also keep a cache of the previous FAS/HME DMA CSR register value. */ | ||
124 | u32 prev_hme_dmacsr; | ||
125 | |||
126 | /* The HME is the biggest piece of shit I have ever seen. */ | ||
127 | u8 hme_fifo_workaround_buffer[16 * 2]; | ||
128 | u8 hme_fifo_workaround_count; | ||
129 | |||
130 | /* For each target we keep track of save/restore data | ||
131 | * pointer information. This needs to be updated majorly | ||
132 | * when we add support for tagged queueing. -DaveM | ||
133 | */ | ||
134 | struct esp_pointers { | ||
135 | char *saved_ptr; | ||
136 | struct scatterlist *saved_buffer; | ||
137 | int saved_this_residual; | ||
138 | int saved_buffers_residual; | ||
139 | } data_pointers[16] /*XXX [MAX_TAGS_PER_TARGET]*/; | ||
140 | |||
141 | /* Clock periods, frequencies, synchronization, etc. */ | ||
142 | unsigned int cfreq; /* Clock frequency in HZ */ | ||
143 | unsigned int cfact; /* Clock conversion factor */ | ||
144 | unsigned int raw_cfact; /* Raw copy from probing */ | ||
145 | unsigned int ccycle; /* One ESP clock cycle */ | ||
146 | unsigned int ctick; /* One ESP clock time */ | ||
147 | unsigned int radelay; /* FAST chip req/ack delay */ | ||
148 | unsigned int neg_defp; /* Default negotiation period */ | ||
149 | unsigned int sync_defp; /* Default sync transfer period */ | ||
150 | unsigned int max_period; /* longest our period can be */ | ||
151 | unsigned int min_period; /* shortest period we can withstand */ | ||
152 | |||
153 | struct esp *next; /* Next ESP we probed or NULL */ | ||
154 | char prom_name[64]; /* Name of ESP device from prom */ | ||
155 | int prom_node; /* Prom node where ESP found */ | ||
156 | int esp_id; /* Unique per-ESP ID number */ | ||
157 | |||
158 | /* For slow to medium speed input clock rates we shoot for 5mb/s, | ||
159 | * but for high input clock rates we try to do 10mb/s although I | ||
160 | * don't think a transfer can even run that fast with an ESP even | ||
161 | * with DMA2 scatter gather pipelining. | ||
162 | */ | ||
163 | #define SYNC_DEFP_SLOW 0x32 /* 5mb/s */ | ||
164 | #define SYNC_DEFP_FAST 0x19 /* 10mb/s */ | ||
165 | |||
166 | unsigned int snip; /* Sync. negotiation in progress */ | ||
167 | unsigned int wnip; /* WIDE negotiation in progress */ | ||
168 | unsigned int targets_present;/* targets spoken to before */ | ||
169 | |||
170 | int current_transfer_size; /* Set at beginning of data dma */ | ||
171 | |||
172 | u8 espcmdlog[32]; /* Log of current esp cmds sent. */ | ||
173 | u8 espcmdent; /* Current entry in esp cmd log. */ | ||
174 | |||
175 | /* Misc. info about this ESP */ | ||
176 | enum esp_rev erev; /* ESP revision */ | ||
177 | int irq; /* SBus IRQ for this ESP */ | ||
178 | int scsi_id; /* Who am I as initiator? */ | ||
179 | int scsi_id_mask; /* Bitmask of 'me'. */ | ||
180 | int diff; /* Differential SCSI bus? */ | ||
181 | int bursts; /* Burst sizes our DVMA supports */ | ||
182 | |||
183 | /* Our command queues, only one cmd lives in the current_SC queue. */ | ||
184 | struct scsi_cmnd *issue_SC; /* Commands to be issued */ | ||
185 | struct scsi_cmnd *current_SC; /* Who is currently working the bus */ | ||
186 | struct scsi_cmnd *disconnected_SC;/* Commands disconnected from the bus */ | ||
187 | |||
188 | /* Message goo */ | ||
189 | u8 cur_msgout[16]; | ||
190 | u8 cur_msgin[16]; | ||
191 | u8 prevmsgout, prevmsgin; | ||
192 | u8 msgout_len, msgin_len; | ||
193 | u8 msgout_ctr, msgin_ctr; | ||
194 | |||
195 | /* States that we cannot keep in the per cmd structure because they | ||
196 | * cannot be assosciated with any specific command. | ||
197 | */ | ||
198 | u8 resetting_bus; | ||
199 | wait_queue_head_t reset_queue; | ||
200 | }; | ||
201 | |||
202 | /* Bitfield meanings for the above registers. */ | ||
203 | |||
204 | /* ESP config reg 1, read-write, found on all ESP chips */ | ||
205 | #define ESP_CONFIG1_ID 0x07 /* My BUS ID bits */ | ||
206 | #define ESP_CONFIG1_CHTEST 0x08 /* Enable ESP chip tests */ | ||
207 | #define ESP_CONFIG1_PENABLE 0x10 /* Enable parity checks */ | ||
208 | #define ESP_CONFIG1_PARTEST 0x20 /* Parity test mode enabled? */ | ||
209 | #define ESP_CONFIG1_SRRDISAB 0x40 /* Disable SCSI reset reports */ | ||
210 | #define ESP_CONFIG1_SLCABLE 0x80 /* Enable slow cable mode */ | ||
211 | |||
212 | /* ESP config reg 2, read-write, found only on esp100a+esp200+esp236 chips */ | ||
213 | #define ESP_CONFIG2_DMAPARITY 0x01 /* enable DMA Parity (200,236) */ | ||
214 | #define ESP_CONFIG2_REGPARITY 0x02 /* enable reg Parity (200,236) */ | ||
215 | #define ESP_CONFIG2_BADPARITY 0x04 /* Bad parity target abort */ | ||
216 | #define ESP_CONFIG2_SCSI2ENAB 0x08 /* Enable SCSI-2 features (tmode only) */ | ||
217 | #define ESP_CONFIG2_HI 0x10 /* High Impedance DREQ ??? */ | ||
218 | #define ESP_CONFIG2_HMEFENAB 0x10 /* HME features enable */ | ||
219 | #define ESP_CONFIG2_BCM 0x20 /* Enable byte-ctrl (236) */ | ||
220 | #define ESP_CONFIG2_DISPINT 0x20 /* Disable pause irq (hme) */ | ||
221 | #define ESP_CONFIG2_FENAB 0x40 /* Enable features (fas100,esp216) */ | ||
222 | #define ESP_CONFIG2_SPL 0x40 /* Enable status-phase latch (esp236) */ | ||
223 | #define ESP_CONFIG2_MKDONE 0x40 /* HME magic feature */ | ||
224 | #define ESP_CONFIG2_HME32 0x80 /* HME 32 extended */ | ||
225 | #define ESP_CONFIG2_MAGIC 0xe0 /* Invalid bits... */ | ||
226 | |||
227 | /* ESP config register 3 read-write, found only esp236+fas236+fas100a+hme chips */ | ||
228 | #define ESP_CONFIG3_FCLOCK 0x01 /* FAST SCSI clock rate (esp100a/hme) */ | ||
229 | #define ESP_CONFIG3_TEM 0x01 /* Enable thresh-8 mode (esp/fas236) */ | ||
230 | #define ESP_CONFIG3_FAST 0x02 /* Enable FAST SCSI (esp100a/hme) */ | ||
231 | #define ESP_CONFIG3_ADMA 0x02 /* Enable alternate-dma (esp/fas236) */ | ||
232 | #define ESP_CONFIG3_TENB 0x04 /* group2 SCSI2 support (esp100a/hme) */ | ||
233 | #define ESP_CONFIG3_SRB 0x04 /* Save residual byte (esp/fas236) */ | ||
234 | #define ESP_CONFIG3_TMS 0x08 /* Three-byte msg's ok (esp100a/hme) */ | ||
235 | #define ESP_CONFIG3_FCLK 0x08 /* Fast SCSI clock rate (esp/fas236) */ | ||
236 | #define ESP_CONFIG3_IDMSG 0x10 /* ID message checking (esp100a/hme) */ | ||
237 | #define ESP_CONFIG3_FSCSI 0x10 /* Enable FAST SCSI (esp/fas236) */ | ||
238 | #define ESP_CONFIG3_GTM 0x20 /* group2 SCSI2 support (esp/fas236) */ | ||
239 | #define ESP_CONFIG3_IDBIT3 0x20 /* Bit 3 of HME SCSI-ID (hme) */ | ||
240 | #define ESP_CONFIG3_TBMS 0x40 /* Three-byte msg's ok (esp/fas236) */ | ||
241 | #define ESP_CONFIG3_EWIDE 0x40 /* Enable Wide-SCSI (hme) */ | ||
242 | #define ESP_CONFIG3_IMS 0x80 /* ID msg chk'ng (esp/fas236) */ | ||
243 | #define ESP_CONFIG3_OBPUSH 0x80 /* Push odd-byte to dma (hme) */ | ||
244 | |||
245 | /* ESP command register read-write */ | ||
246 | /* Group 1 commands: These may be sent at any point in time to the ESP | ||
247 | * chip. None of them can generate interrupts 'cept | ||
248 | * the "SCSI bus reset" command if you have not disabled | ||
249 | * SCSI reset interrupts in the config1 ESP register. | ||
250 | */ | ||
251 | #define ESP_CMD_NULL 0x00 /* Null command, ie. a nop */ | ||
252 | #define ESP_CMD_FLUSH 0x01 /* FIFO Flush */ | ||
253 | #define ESP_CMD_RC 0x02 /* Chip reset */ | ||
254 | #define ESP_CMD_RS 0x03 /* SCSI bus reset */ | ||
255 | |||
256 | /* Group 2 commands: ESP must be an initiator and connected to a target | ||
257 | * for these commands to work. | ||
258 | */ | ||
259 | #define ESP_CMD_TI 0x10 /* Transfer Information */ | ||
260 | #define ESP_CMD_ICCSEQ 0x11 /* Initiator cmd complete sequence */ | ||
261 | #define ESP_CMD_MOK 0x12 /* Message okie-dokie */ | ||
262 | #define ESP_CMD_TPAD 0x18 /* Transfer Pad */ | ||
263 | #define ESP_CMD_SATN 0x1a /* Set ATN */ | ||
264 | #define ESP_CMD_RATN 0x1b /* De-assert ATN */ | ||
265 | |||
266 | /* Group 3 commands: ESP must be in the MSGOUT or MSGIN state and be connected | ||
267 | * to a target as the initiator for these commands to work. | ||
268 | */ | ||
269 | #define ESP_CMD_SMSG 0x20 /* Send message */ | ||
270 | #define ESP_CMD_SSTAT 0x21 /* Send status */ | ||
271 | #define ESP_CMD_SDATA 0x22 /* Send data */ | ||
272 | #define ESP_CMD_DSEQ 0x23 /* Discontinue Sequence */ | ||
273 | #define ESP_CMD_TSEQ 0x24 /* Terminate Sequence */ | ||
274 | #define ESP_CMD_TCCSEQ 0x25 /* Target cmd cmplt sequence */ | ||
275 | #define ESP_CMD_DCNCT 0x27 /* Disconnect */ | ||
276 | #define ESP_CMD_RMSG 0x28 /* Receive Message */ | ||
277 | #define ESP_CMD_RCMD 0x29 /* Receive Command */ | ||
278 | #define ESP_CMD_RDATA 0x2a /* Receive Data */ | ||
279 | #define ESP_CMD_RCSEQ 0x2b /* Receive cmd sequence */ | ||
280 | |||
281 | /* Group 4 commands: The ESP must be in the disconnected state and must | ||
282 | * not be connected to any targets as initiator for | ||
283 | * these commands to work. | ||
284 | */ | ||
285 | #define ESP_CMD_RSEL 0x40 /* Reselect */ | ||
286 | #define ESP_CMD_SEL 0x41 /* Select w/o ATN */ | ||
287 | #define ESP_CMD_SELA 0x42 /* Select w/ATN */ | ||
288 | #define ESP_CMD_SELAS 0x43 /* Select w/ATN & STOP */ | ||
289 | #define ESP_CMD_ESEL 0x44 /* Enable selection */ | ||
290 | #define ESP_CMD_DSEL 0x45 /* Disable selections */ | ||
291 | #define ESP_CMD_SA3 0x46 /* Select w/ATN3 */ | ||
292 | #define ESP_CMD_RSEL3 0x47 /* Reselect3 */ | ||
293 | |||
294 | /* This bit enables the ESP's DMA on the SBus */ | ||
295 | #define ESP_CMD_DMA 0x80 /* Do DMA? */ | ||
296 | |||
297 | |||
298 | /* ESP status register read-only */ | ||
299 | #define ESP_STAT_PIO 0x01 /* IO phase bit */ | ||
300 | #define ESP_STAT_PCD 0x02 /* CD phase bit */ | ||
301 | #define ESP_STAT_PMSG 0x04 /* MSG phase bit */ | ||
302 | #define ESP_STAT_PMASK 0x07 /* Mask of phase bits */ | ||
303 | #define ESP_STAT_TDONE 0x08 /* Transfer Completed */ | ||
304 | #define ESP_STAT_TCNT 0x10 /* Transfer Counter Is Zero */ | ||
305 | #define ESP_STAT_PERR 0x20 /* Parity error */ | ||
306 | #define ESP_STAT_SPAM 0x40 /* Real bad error */ | ||
307 | /* This indicates the 'interrupt pending' condition on esp236, it is a reserved | ||
308 | * bit on other revs of the ESP. | ||
309 | */ | ||
310 | #define ESP_STAT_INTR 0x80 /* Interrupt */ | ||
311 | |||
312 | /* HME only: status 2 register */ | ||
313 | #define ESP_STAT2_SCHBIT 0x01 /* Upper bits 3-7 of sstep enabled */ | ||
314 | #define ESP_STAT2_FFLAGS 0x02 /* The fifo flags are now latched */ | ||
315 | #define ESP_STAT2_XCNT 0x04 /* The transfer counter is latched */ | ||
316 | #define ESP_STAT2_CREGA 0x08 /* The command reg is active now */ | ||
317 | #define ESP_STAT2_WIDE 0x10 /* Interface on this adapter is wide */ | ||
318 | #define ESP_STAT2_F1BYTE 0x20 /* There is one byte at top of fifo */ | ||
319 | #define ESP_STAT2_FMSB 0x40 /* Next byte in fifo is most significant */ | ||
320 | #define ESP_STAT2_FEMPTY 0x80 /* FIFO is empty */ | ||
321 | |||
322 | /* The status register can be masked with ESP_STAT_PMASK and compared | ||
323 | * with the following values to determine the current phase the ESP | ||
324 | * (at least thinks it) is in. For our purposes we also add our own | ||
325 | * software 'done' bit for our phase management engine. | ||
326 | */ | ||
327 | #define ESP_DOP (0) /* Data Out */ | ||
328 | #define ESP_DIP (ESP_STAT_PIO) /* Data In */ | ||
329 | #define ESP_CMDP (ESP_STAT_PCD) /* Command */ | ||
330 | #define ESP_STATP (ESP_STAT_PCD|ESP_STAT_PIO) /* Status */ | ||
331 | #define ESP_MOP (ESP_STAT_PMSG|ESP_STAT_PCD) /* Message Out */ | ||
332 | #define ESP_MIP (ESP_STAT_PMSG|ESP_STAT_PCD|ESP_STAT_PIO) /* Message In */ | ||
333 | |||
334 | /* ESP interrupt register read-only */ | ||
335 | #define ESP_INTR_S 0x01 /* Select w/o ATN */ | ||
336 | #define ESP_INTR_SATN 0x02 /* Select w/ATN */ | ||
337 | #define ESP_INTR_RSEL 0x04 /* Reselected */ | ||
338 | #define ESP_INTR_FDONE 0x08 /* Function done */ | ||
339 | #define ESP_INTR_BSERV 0x10 /* Bus service */ | ||
340 | #define ESP_INTR_DC 0x20 /* Disconnect */ | ||
341 | #define ESP_INTR_IC 0x40 /* Illegal command given */ | ||
342 | #define ESP_INTR_SR 0x80 /* SCSI bus reset detected */ | ||
343 | |||
344 | /* Interrupt status macros */ | ||
345 | #define ESP_SRESET_IRQ(esp) ((esp)->intreg & (ESP_INTR_SR)) | ||
346 | #define ESP_ILLCMD_IRQ(esp) ((esp)->intreg & (ESP_INTR_IC)) | ||
347 | #define ESP_SELECT_WITH_ATN_IRQ(esp) ((esp)->intreg & (ESP_INTR_SATN)) | ||
348 | #define ESP_SELECT_WITHOUT_ATN_IRQ(esp) ((esp)->intreg & (ESP_INTR_S)) | ||
349 | #define ESP_SELECTION_IRQ(esp) ((ESP_SELECT_WITH_ATN_IRQ(esp)) || \ | ||
350 | (ESP_SELECT_WITHOUT_ATN_IRQ(esp))) | ||
351 | #define ESP_RESELECTION_IRQ(esp) ((esp)->intreg & (ESP_INTR_RSEL)) | ||
352 | |||
353 | /* ESP sequence step register read-only */ | ||
354 | #define ESP_STEP_VBITS 0x07 /* Valid bits */ | ||
355 | #define ESP_STEP_ASEL 0x00 /* Selection&Arbitrate cmplt */ | ||
356 | #define ESP_STEP_SID 0x01 /* One msg byte sent */ | ||
357 | #define ESP_STEP_NCMD 0x02 /* Was not in command phase */ | ||
358 | #define ESP_STEP_PPC 0x03 /* Early phase chg caused cmnd | ||
359 | * bytes to be lost | ||
360 | */ | ||
361 | #define ESP_STEP_FINI4 0x04 /* Command was sent ok */ | ||
362 | |||
363 | /* Ho hum, some ESP's set the step register to this as well... */ | ||
364 | #define ESP_STEP_FINI5 0x05 | ||
365 | #define ESP_STEP_FINI6 0x06 | ||
366 | #define ESP_STEP_FINI7 0x07 | ||
367 | |||
368 | /* ESP chip-test register read-write */ | ||
369 | #define ESP_TEST_TARG 0x01 /* Target test mode */ | ||
370 | #define ESP_TEST_INI 0x02 /* Initiator test mode */ | ||
371 | #define ESP_TEST_TS 0x04 /* Tristate test mode */ | ||
372 | |||
373 | /* ESP unique ID register read-only, found on fas236+fas100a only */ | ||
374 | #define ESP_UID_F100A 0x00 /* ESP FAS100A */ | ||
375 | #define ESP_UID_F236 0x02 /* ESP FAS236 */ | ||
376 | #define ESP_UID_REV 0x07 /* ESP revision */ | ||
377 | #define ESP_UID_FAM 0xf8 /* ESP family */ | ||
378 | |||
379 | /* ESP fifo flags register read-only */ | ||
380 | /* Note that the following implies a 16 byte FIFO on the ESP. */ | ||
381 | #define ESP_FF_FBYTES 0x1f /* Num bytes in FIFO */ | ||
382 | #define ESP_FF_ONOTZERO 0x20 /* offset ctr not zero (esp100) */ | ||
383 | #define ESP_FF_SSTEP 0xe0 /* Sequence step */ | ||
384 | |||
385 | /* ESP clock conversion factor register write-only */ | ||
386 | #define ESP_CCF_F0 0x00 /* 35.01MHz - 40MHz */ | ||
387 | #define ESP_CCF_NEVER 0x01 /* Set it to this and die */ | ||
388 | #define ESP_CCF_F2 0x02 /* 10MHz */ | ||
389 | #define ESP_CCF_F3 0x03 /* 10.01MHz - 15MHz */ | ||
390 | #define ESP_CCF_F4 0x04 /* 15.01MHz - 20MHz */ | ||
391 | #define ESP_CCF_F5 0x05 /* 20.01MHz - 25MHz */ | ||
392 | #define ESP_CCF_F6 0x06 /* 25.01MHz - 30MHz */ | ||
393 | #define ESP_CCF_F7 0x07 /* 30.01MHz - 35MHz */ | ||
394 | |||
395 | /* HME only... */ | ||
396 | #define ESP_BUSID_RESELID 0x10 | ||
397 | #define ESP_BUSID_CTR32BIT 0x40 | ||
398 | |||
399 | #define ESP_BUS_TIMEOUT 275 /* In milli-seconds */ | ||
400 | #define ESP_TIMEO_CONST 8192 | ||
401 | #define ESP_NEG_DEFP(mhz, cfact) \ | ||
402 | ((ESP_BUS_TIMEOUT * ((mhz) / 1000)) / (8192 * (cfact))) | ||
403 | #define ESP_MHZ_TO_CYCLE(mhertz) ((1000000000) / ((mhertz) / 1000)) | ||
404 | #define ESP_TICK(ccf, cycle) ((7682 * (ccf) * (cycle) / 1000)) | ||
405 | |||
406 | #endif /* !(_SPARC_ESP_H) */ | ||
diff --git a/drivers/scsi/esp_scsi.c b/drivers/scsi/esp_scsi.c new file mode 100644 index 000000000000..3cd5bf723da4 --- /dev/null +++ b/drivers/scsi/esp_scsi.c | |||
@@ -0,0 +1,2710 @@ | |||
1 | /* esp_scsi.c: ESP SCSI driver. | ||
2 | * | ||
3 | * Copyright (C) 2007 David S. Miller (davem@davemloft.net) | ||
4 | */ | ||
5 | |||
6 | #include <linux/kernel.h> | ||
7 | #include <linux/types.h> | ||
8 | #include <linux/slab.h> | ||
9 | #include <linux/delay.h> | ||
10 | #include <linux/list.h> | ||
11 | #include <linux/completion.h> | ||
12 | #include <linux/kallsyms.h> | ||
13 | #include <linux/module.h> | ||
14 | #include <linux/moduleparam.h> | ||
15 | #include <linux/init.h> | ||
16 | |||
17 | #include <asm/irq.h> | ||
18 | #include <asm/io.h> | ||
19 | #include <asm/dma.h> | ||
20 | |||
21 | #include <scsi/scsi.h> | ||
22 | #include <scsi/scsi_host.h> | ||
23 | #include <scsi/scsi_cmnd.h> | ||
24 | #include <scsi/scsi_device.h> | ||
25 | #include <scsi/scsi_tcq.h> | ||
26 | #include <scsi/scsi_dbg.h> | ||
27 | #include <scsi/scsi_transport_spi.h> | ||
28 | |||
29 | #include "esp_scsi.h" | ||
30 | |||
31 | #define DRV_MODULE_NAME "esp" | ||
32 | #define PFX DRV_MODULE_NAME ": " | ||
33 | #define DRV_VERSION "2.000" | ||
34 | #define DRV_MODULE_RELDATE "April 19, 2007" | ||
35 | |||
36 | /* SCSI bus reset settle time in seconds. */ | ||
37 | static int esp_bus_reset_settle = 3; | ||
38 | |||
39 | static u32 esp_debug; | ||
40 | #define ESP_DEBUG_INTR 0x00000001 | ||
41 | #define ESP_DEBUG_SCSICMD 0x00000002 | ||
42 | #define ESP_DEBUG_RESET 0x00000004 | ||
43 | #define ESP_DEBUG_MSGIN 0x00000008 | ||
44 | #define ESP_DEBUG_MSGOUT 0x00000010 | ||
45 | #define ESP_DEBUG_CMDDONE 0x00000020 | ||
46 | #define ESP_DEBUG_DISCONNECT 0x00000040 | ||
47 | #define ESP_DEBUG_DATASTART 0x00000080 | ||
48 | #define ESP_DEBUG_DATADONE 0x00000100 | ||
49 | #define ESP_DEBUG_RECONNECT 0x00000200 | ||
50 | #define ESP_DEBUG_AUTOSENSE 0x00000400 | ||
51 | |||
52 | #define esp_log_intr(f, a...) \ | ||
53 | do { if (esp_debug & ESP_DEBUG_INTR) \ | ||
54 | printk(f, ## a); \ | ||
55 | } while (0) | ||
56 | |||
57 | #define esp_log_reset(f, a...) \ | ||
58 | do { if (esp_debug & ESP_DEBUG_RESET) \ | ||
59 | printk(f, ## a); \ | ||
60 | } while (0) | ||
61 | |||
62 | #define esp_log_msgin(f, a...) \ | ||
63 | do { if (esp_debug & ESP_DEBUG_MSGIN) \ | ||
64 | printk(f, ## a); \ | ||
65 | } while (0) | ||
66 | |||
67 | #define esp_log_msgout(f, a...) \ | ||
68 | do { if (esp_debug & ESP_DEBUG_MSGOUT) \ | ||
69 | printk(f, ## a); \ | ||
70 | } while (0) | ||
71 | |||
72 | #define esp_log_cmddone(f, a...) \ | ||
73 | do { if (esp_debug & ESP_DEBUG_CMDDONE) \ | ||
74 | printk(f, ## a); \ | ||
75 | } while (0) | ||
76 | |||
77 | #define esp_log_disconnect(f, a...) \ | ||
78 | do { if (esp_debug & ESP_DEBUG_DISCONNECT) \ | ||
79 | printk(f, ## a); \ | ||
80 | } while (0) | ||
81 | |||
82 | #define esp_log_datastart(f, a...) \ | ||
83 | do { if (esp_debug & ESP_DEBUG_DATASTART) \ | ||
84 | printk(f, ## a); \ | ||
85 | } while (0) | ||
86 | |||
87 | #define esp_log_datadone(f, a...) \ | ||
88 | do { if (esp_debug & ESP_DEBUG_DATADONE) \ | ||
89 | printk(f, ## a); \ | ||
90 | } while (0) | ||
91 | |||
92 | #define esp_log_reconnect(f, a...) \ | ||
93 | do { if (esp_debug & ESP_DEBUG_RECONNECT) \ | ||
94 | printk(f, ## a); \ | ||
95 | } while (0) | ||
96 | |||
97 | #define esp_log_autosense(f, a...) \ | ||
98 | do { if (esp_debug & ESP_DEBUG_AUTOSENSE) \ | ||
99 | printk(f, ## a); \ | ||
100 | } while (0) | ||
101 | |||
102 | #define esp_read8(REG) esp->ops->esp_read8(esp, REG) | ||
103 | #define esp_write8(VAL,REG) esp->ops->esp_write8(esp, VAL, REG) | ||
104 | |||
105 | static void esp_log_fill_regs(struct esp *esp, | ||
106 | struct esp_event_ent *p) | ||
107 | { | ||
108 | p->sreg = esp->sreg; | ||
109 | p->seqreg = esp->seqreg; | ||
110 | p->sreg2 = esp->sreg2; | ||
111 | p->ireg = esp->ireg; | ||
112 | p->select_state = esp->select_state; | ||
113 | p->event = esp->event; | ||
114 | } | ||
115 | |||
116 | void scsi_esp_cmd(struct esp *esp, u8 val) | ||
117 | { | ||
118 | struct esp_event_ent *p; | ||
119 | int idx = esp->esp_event_cur; | ||
120 | |||
121 | p = &esp->esp_event_log[idx]; | ||
122 | p->type = ESP_EVENT_TYPE_CMD; | ||
123 | p->val = val; | ||
124 | esp_log_fill_regs(esp, p); | ||
125 | |||
126 | esp->esp_event_cur = (idx + 1) & (ESP_EVENT_LOG_SZ - 1); | ||
127 | |||
128 | esp_write8(val, ESP_CMD); | ||
129 | } | ||
130 | EXPORT_SYMBOL(scsi_esp_cmd); | ||
131 | |||
132 | static void esp_event(struct esp *esp, u8 val) | ||
133 | { | ||
134 | struct esp_event_ent *p; | ||
135 | int idx = esp->esp_event_cur; | ||
136 | |||
137 | p = &esp->esp_event_log[idx]; | ||
138 | p->type = ESP_EVENT_TYPE_EVENT; | ||
139 | p->val = val; | ||
140 | esp_log_fill_regs(esp, p); | ||
141 | |||
142 | esp->esp_event_cur = (idx + 1) & (ESP_EVENT_LOG_SZ - 1); | ||
143 | |||
144 | esp->event = val; | ||
145 | } | ||
146 | |||
147 | static void esp_dump_cmd_log(struct esp *esp) | ||
148 | { | ||
149 | int idx = esp->esp_event_cur; | ||
150 | int stop = idx; | ||
151 | |||
152 | printk(KERN_INFO PFX "esp%d: Dumping command log\n", | ||
153 | esp->host->unique_id); | ||
154 | do { | ||
155 | struct esp_event_ent *p = &esp->esp_event_log[idx]; | ||
156 | |||
157 | printk(KERN_INFO PFX "esp%d: ent[%d] %s ", | ||
158 | esp->host->unique_id, idx, | ||
159 | p->type == ESP_EVENT_TYPE_CMD ? "CMD" : "EVENT"); | ||
160 | |||
161 | printk("val[%02x] sreg[%02x] seqreg[%02x] " | ||
162 | "sreg2[%02x] ireg[%02x] ss[%02x] event[%02x]\n", | ||
163 | p->val, p->sreg, p->seqreg, | ||
164 | p->sreg2, p->ireg, p->select_state, p->event); | ||
165 | |||
166 | idx = (idx + 1) & (ESP_EVENT_LOG_SZ - 1); | ||
167 | } while (idx != stop); | ||
168 | } | ||
169 | |||
170 | static void esp_flush_fifo(struct esp *esp) | ||
171 | { | ||
172 | scsi_esp_cmd(esp, ESP_CMD_FLUSH); | ||
173 | if (esp->rev == ESP236) { | ||
174 | int lim = 1000; | ||
175 | |||
176 | while (esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES) { | ||
177 | if (--lim == 0) { | ||
178 | printk(KERN_ALERT PFX "esp%d: ESP_FF_BYTES " | ||
179 | "will not clear!\n", | ||
180 | esp->host->unique_id); | ||
181 | break; | ||
182 | } | ||
183 | udelay(1); | ||
184 | } | ||
185 | } | ||
186 | } | ||
187 | |||
188 | static void hme_read_fifo(struct esp *esp) | ||
189 | { | ||
190 | int fcnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES; | ||
191 | int idx = 0; | ||
192 | |||
193 | while (fcnt--) { | ||
194 | esp->fifo[idx++] = esp_read8(ESP_FDATA); | ||
195 | esp->fifo[idx++] = esp_read8(ESP_FDATA); | ||
196 | } | ||
197 | if (esp->sreg2 & ESP_STAT2_F1BYTE) { | ||
198 | esp_write8(0, ESP_FDATA); | ||
199 | esp->fifo[idx++] = esp_read8(ESP_FDATA); | ||
200 | scsi_esp_cmd(esp, ESP_CMD_FLUSH); | ||
201 | } | ||
202 | esp->fifo_cnt = idx; | ||
203 | } | ||
204 | |||
205 | static void esp_set_all_config3(struct esp *esp, u8 val) | ||
206 | { | ||
207 | int i; | ||
208 | |||
209 | for (i = 0; i < ESP_MAX_TARGET; i++) | ||
210 | esp->target[i].esp_config3 = val; | ||
211 | } | ||
212 | |||
213 | /* Reset the ESP chip, _not_ the SCSI bus. */ | ||
214 | static void esp_reset_esp(struct esp *esp) | ||
215 | { | ||
216 | u8 family_code, version; | ||
217 | |||
218 | /* Now reset the ESP chip */ | ||
219 | scsi_esp_cmd(esp, ESP_CMD_RC); | ||
220 | scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA); | ||
221 | scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA); | ||
222 | |||
223 | /* Reload the configuration registers */ | ||
224 | esp_write8(esp->cfact, ESP_CFACT); | ||
225 | |||
226 | esp->prev_stp = 0; | ||
227 | esp_write8(esp->prev_stp, ESP_STP); | ||
228 | |||
229 | esp->prev_soff = 0; | ||
230 | esp_write8(esp->prev_soff, ESP_SOFF); | ||
231 | |||
232 | esp_write8(esp->neg_defp, ESP_TIMEO); | ||
233 | |||
234 | /* This is the only point at which it is reliable to read | ||
235 | * the ID-code for a fast ESP chip variants. | ||
236 | */ | ||
237 | esp->max_period = ((35 * esp->ccycle) / 1000); | ||
238 | if (esp->rev == FAST) { | ||
239 | version = esp_read8(ESP_UID); | ||
240 | family_code = (version & 0xf8) >> 3; | ||
241 | if (family_code == 0x02) | ||
242 | esp->rev = FAS236; | ||
243 | else if (family_code == 0x0a) | ||
244 | esp->rev = FASHME; /* Version is usually '5'. */ | ||
245 | else | ||
246 | esp->rev = FAS100A; | ||
247 | esp->min_period = ((4 * esp->ccycle) / 1000); | ||
248 | } else { | ||
249 | esp->min_period = ((5 * esp->ccycle) / 1000); | ||
250 | } | ||
251 | esp->max_period = (esp->max_period + 3)>>2; | ||
252 | esp->min_period = (esp->min_period + 3)>>2; | ||
253 | |||
254 | esp_write8(esp->config1, ESP_CFG1); | ||
255 | switch (esp->rev) { | ||
256 | case ESP100: | ||
257 | /* nothing to do */ | ||
258 | break; | ||
259 | |||
260 | case ESP100A: | ||
261 | esp_write8(esp->config2, ESP_CFG2); | ||
262 | break; | ||
263 | |||
264 | case ESP236: | ||
265 | /* Slow 236 */ | ||
266 | esp_write8(esp->config2, ESP_CFG2); | ||
267 | esp->prev_cfg3 = esp->target[0].esp_config3; | ||
268 | esp_write8(esp->prev_cfg3, ESP_CFG3); | ||
269 | break; | ||
270 | |||
271 | case FASHME: | ||
272 | esp->config2 |= (ESP_CONFIG2_HME32 | ESP_CONFIG2_HMEFENAB); | ||
273 | /* fallthrough... */ | ||
274 | |||
275 | case FAS236: | ||
276 | /* Fast 236 or HME */ | ||
277 | esp_write8(esp->config2, ESP_CFG2); | ||
278 | if (esp->rev == FASHME) { | ||
279 | u8 cfg3 = esp->target[0].esp_config3; | ||
280 | |||
281 | cfg3 |= ESP_CONFIG3_FCLOCK | ESP_CONFIG3_OBPUSH; | ||
282 | if (esp->scsi_id >= 8) | ||
283 | cfg3 |= ESP_CONFIG3_IDBIT3; | ||
284 | esp_set_all_config3(esp, cfg3); | ||
285 | } else { | ||
286 | u32 cfg3 = esp->target[0].esp_config3; | ||
287 | |||
288 | cfg3 |= ESP_CONFIG3_FCLK; | ||
289 | esp_set_all_config3(esp, cfg3); | ||
290 | } | ||
291 | esp->prev_cfg3 = esp->target[0].esp_config3; | ||
292 | esp_write8(esp->prev_cfg3, ESP_CFG3); | ||
293 | if (esp->rev == FASHME) { | ||
294 | esp->radelay = 80; | ||
295 | } else { | ||
296 | if (esp->flags & ESP_FLAG_DIFFERENTIAL) | ||
297 | esp->radelay = 0; | ||
298 | else | ||
299 | esp->radelay = 96; | ||
300 | } | ||
301 | break; | ||
302 | |||
303 | case FAS100A: | ||
304 | /* Fast 100a */ | ||
305 | esp_write8(esp->config2, ESP_CFG2); | ||
306 | esp_set_all_config3(esp, | ||
307 | (esp->target[0].esp_config3 | | ||
308 | ESP_CONFIG3_FCLOCK)); | ||
309 | esp->prev_cfg3 = esp->target[0].esp_config3; | ||
310 | esp_write8(esp->prev_cfg3, ESP_CFG3); | ||
311 | esp->radelay = 32; | ||
312 | break; | ||
313 | |||
314 | default: | ||
315 | break; | ||
316 | } | ||
317 | |||
318 | /* Eat any bitrot in the chip */ | ||
319 | esp_read8(ESP_INTRPT); | ||
320 | udelay(100); | ||
321 | } | ||
322 | |||
323 | static void esp_map_dma(struct esp *esp, struct scsi_cmnd *cmd) | ||
324 | { | ||
325 | struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd); | ||
326 | struct scatterlist *sg = cmd->request_buffer; | ||
327 | int dir = cmd->sc_data_direction; | ||
328 | int total, i; | ||
329 | |||
330 | if (dir == DMA_NONE) | ||
331 | return; | ||
332 | |||
333 | BUG_ON(cmd->use_sg == 0); | ||
334 | |||
335 | spriv->u.num_sg = esp->ops->map_sg(esp, sg, | ||
336 | cmd->use_sg, dir); | ||
337 | spriv->cur_residue = sg_dma_len(sg); | ||
338 | spriv->cur_sg = sg; | ||
339 | |||
340 | total = 0; | ||
341 | for (i = 0; i < spriv->u.num_sg; i++) | ||
342 | total += sg_dma_len(&sg[i]); | ||
343 | spriv->tot_residue = total; | ||
344 | } | ||
345 | |||
346 | static dma_addr_t esp_cur_dma_addr(struct esp_cmd_entry *ent, | ||
347 | struct scsi_cmnd *cmd) | ||
348 | { | ||
349 | struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd); | ||
350 | |||
351 | if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) { | ||
352 | return ent->sense_dma + | ||
353 | (ent->sense_ptr - cmd->sense_buffer); | ||
354 | } | ||
355 | |||
356 | return sg_dma_address(p->cur_sg) + | ||
357 | (sg_dma_len(p->cur_sg) - | ||
358 | p->cur_residue); | ||
359 | } | ||
360 | |||
361 | static unsigned int esp_cur_dma_len(struct esp_cmd_entry *ent, | ||
362 | struct scsi_cmnd *cmd) | ||
363 | { | ||
364 | struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd); | ||
365 | |||
366 | if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) { | ||
367 | return SCSI_SENSE_BUFFERSIZE - | ||
368 | (ent->sense_ptr - cmd->sense_buffer); | ||
369 | } | ||
370 | return p->cur_residue; | ||
371 | } | ||
372 | |||
373 | static void esp_advance_dma(struct esp *esp, struct esp_cmd_entry *ent, | ||
374 | struct scsi_cmnd *cmd, unsigned int len) | ||
375 | { | ||
376 | struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd); | ||
377 | |||
378 | if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) { | ||
379 | ent->sense_ptr += len; | ||
380 | return; | ||
381 | } | ||
382 | |||
383 | p->cur_residue -= len; | ||
384 | p->tot_residue -= len; | ||
385 | if (p->cur_residue < 0 || p->tot_residue < 0) { | ||
386 | printk(KERN_ERR PFX "esp%d: Data transfer overflow.\n", | ||
387 | esp->host->unique_id); | ||
388 | printk(KERN_ERR PFX "esp%d: cur_residue[%d] tot_residue[%d] " | ||
389 | "len[%u]\n", | ||
390 | esp->host->unique_id, | ||
391 | p->cur_residue, p->tot_residue, len); | ||
392 | p->cur_residue = 0; | ||
393 | p->tot_residue = 0; | ||
394 | } | ||
395 | if (!p->cur_residue && p->tot_residue) { | ||
396 | p->cur_sg++; | ||
397 | p->cur_residue = sg_dma_len(p->cur_sg); | ||
398 | } | ||
399 | } | ||
400 | |||
401 | static void esp_unmap_dma(struct esp *esp, struct scsi_cmnd *cmd) | ||
402 | { | ||
403 | struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd); | ||
404 | int dir = cmd->sc_data_direction; | ||
405 | |||
406 | if (dir == DMA_NONE) | ||
407 | return; | ||
408 | |||
409 | esp->ops->unmap_sg(esp, cmd->request_buffer, | ||
410 | spriv->u.num_sg, dir); | ||
411 | } | ||
412 | |||
413 | static void esp_save_pointers(struct esp *esp, struct esp_cmd_entry *ent) | ||
414 | { | ||
415 | struct scsi_cmnd *cmd = ent->cmd; | ||
416 | struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd); | ||
417 | |||
418 | if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) { | ||
419 | ent->saved_sense_ptr = ent->sense_ptr; | ||
420 | return; | ||
421 | } | ||
422 | ent->saved_cur_residue = spriv->cur_residue; | ||
423 | ent->saved_cur_sg = spriv->cur_sg; | ||
424 | ent->saved_tot_residue = spriv->tot_residue; | ||
425 | } | ||
426 | |||
427 | static void esp_restore_pointers(struct esp *esp, struct esp_cmd_entry *ent) | ||
428 | { | ||
429 | struct scsi_cmnd *cmd = ent->cmd; | ||
430 | struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd); | ||
431 | |||
432 | if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) { | ||
433 | ent->sense_ptr = ent->saved_sense_ptr; | ||
434 | return; | ||
435 | } | ||
436 | spriv->cur_residue = ent->saved_cur_residue; | ||
437 | spriv->cur_sg = ent->saved_cur_sg; | ||
438 | spriv->tot_residue = ent->saved_tot_residue; | ||
439 | } | ||
440 | |||
441 | static void esp_check_command_len(struct esp *esp, struct scsi_cmnd *cmd) | ||
442 | { | ||
443 | if (cmd->cmd_len == 6 || | ||
444 | cmd->cmd_len == 10 || | ||
445 | cmd->cmd_len == 12) { | ||
446 | esp->flags &= ~ESP_FLAG_DOING_SLOWCMD; | ||
447 | } else { | ||
448 | esp->flags |= ESP_FLAG_DOING_SLOWCMD; | ||
449 | } | ||
450 | } | ||
451 | |||
452 | static void esp_write_tgt_config3(struct esp *esp, int tgt) | ||
453 | { | ||
454 | if (esp->rev > ESP100A) { | ||
455 | u8 val = esp->target[tgt].esp_config3; | ||
456 | |||
457 | if (val != esp->prev_cfg3) { | ||
458 | esp->prev_cfg3 = val; | ||
459 | esp_write8(val, ESP_CFG3); | ||
460 | } | ||
461 | } | ||
462 | } | ||
463 | |||
464 | static void esp_write_tgt_sync(struct esp *esp, int tgt) | ||
465 | { | ||
466 | u8 off = esp->target[tgt].esp_offset; | ||
467 | u8 per = esp->target[tgt].esp_period; | ||
468 | |||
469 | if (off != esp->prev_soff) { | ||
470 | esp->prev_soff = off; | ||
471 | esp_write8(off, ESP_SOFF); | ||
472 | } | ||
473 | if (per != esp->prev_stp) { | ||
474 | esp->prev_stp = per; | ||
475 | esp_write8(per, ESP_STP); | ||
476 | } | ||
477 | } | ||
478 | |||
479 | static u32 esp_dma_length_limit(struct esp *esp, u32 dma_addr, u32 dma_len) | ||
480 | { | ||
481 | if (esp->rev == FASHME) { | ||
482 | /* Arbitrary segment boundaries, 24-bit counts. */ | ||
483 | if (dma_len > (1U << 24)) | ||
484 | dma_len = (1U << 24); | ||
485 | } else { | ||
486 | u32 base, end; | ||
487 | |||
488 | /* ESP chip limits other variants by 16-bits of transfer | ||
489 | * count. Actually on FAS100A and FAS236 we could get | ||
490 | * 24-bits of transfer count by enabling ESP_CONFIG2_FENAB | ||
491 | * in the ESP_CFG2 register but that causes other unwanted | ||
492 | * changes so we don't use it currently. | ||
493 | */ | ||
494 | if (dma_len > (1U << 16)) | ||
495 | dma_len = (1U << 16); | ||
496 | |||
497 | /* All of the DMA variants hooked up to these chips | ||
498 | * cannot handle crossing a 24-bit address boundary. | ||
499 | */ | ||
500 | base = dma_addr & ((1U << 24) - 1U); | ||
501 | end = base + dma_len; | ||
502 | if (end > (1U << 24)) | ||
503 | end = (1U <<24); | ||
504 | dma_len = end - base; | ||
505 | } | ||
506 | return dma_len; | ||
507 | } | ||
508 | |||
509 | static int esp_need_to_nego_wide(struct esp_target_data *tp) | ||
510 | { | ||
511 | struct scsi_target *target = tp->starget; | ||
512 | |||
513 | return spi_width(target) != tp->nego_goal_width; | ||
514 | } | ||
515 | |||
516 | static int esp_need_to_nego_sync(struct esp_target_data *tp) | ||
517 | { | ||
518 | struct scsi_target *target = tp->starget; | ||
519 | |||
520 | /* When offset is zero, period is "don't care". */ | ||
521 | if (!spi_offset(target) && !tp->nego_goal_offset) | ||
522 | return 0; | ||
523 | |||
524 | if (spi_offset(target) == tp->nego_goal_offset && | ||
525 | spi_period(target) == tp->nego_goal_period) | ||
526 | return 0; | ||
527 | |||
528 | return 1; | ||
529 | } | ||
530 | |||
531 | static int esp_alloc_lun_tag(struct esp_cmd_entry *ent, | ||
532 | struct esp_lun_data *lp) | ||
533 | { | ||
534 | if (!ent->tag[0]) { | ||
535 | /* Non-tagged, slot already taken? */ | ||
536 | if (lp->non_tagged_cmd) | ||
537 | return -EBUSY; | ||
538 | |||
539 | if (lp->hold) { | ||
540 | /* We are being held by active tagged | ||
541 | * commands. | ||
542 | */ | ||
543 | if (lp->num_tagged) | ||
544 | return -EBUSY; | ||
545 | |||
546 | /* Tagged commands completed, we can unplug | ||
547 | * the queue and run this untagged command. | ||
548 | */ | ||
549 | lp->hold = 0; | ||
550 | } else if (lp->num_tagged) { | ||
551 | /* Plug the queue until num_tagged decreases | ||
552 | * to zero in esp_free_lun_tag. | ||
553 | */ | ||
554 | lp->hold = 1; | ||
555 | return -EBUSY; | ||
556 | } | ||
557 | |||
558 | lp->non_tagged_cmd = ent; | ||
559 | return 0; | ||
560 | } else { | ||
561 | /* Tagged command, see if blocked by a | ||
562 | * non-tagged one. | ||
563 | */ | ||
564 | if (lp->non_tagged_cmd || lp->hold) | ||
565 | return -EBUSY; | ||
566 | } | ||
567 | |||
568 | BUG_ON(lp->tagged_cmds[ent->tag[1]]); | ||
569 | |||
570 | lp->tagged_cmds[ent->tag[1]] = ent; | ||
571 | lp->num_tagged++; | ||
572 | |||
573 | return 0; | ||
574 | } | ||
575 | |||
576 | static void esp_free_lun_tag(struct esp_cmd_entry *ent, | ||
577 | struct esp_lun_data *lp) | ||
578 | { | ||
579 | if (ent->tag[0]) { | ||
580 | BUG_ON(lp->tagged_cmds[ent->tag[1]] != ent); | ||
581 | lp->tagged_cmds[ent->tag[1]] = NULL; | ||
582 | lp->num_tagged--; | ||
583 | } else { | ||
584 | BUG_ON(lp->non_tagged_cmd != ent); | ||
585 | lp->non_tagged_cmd = NULL; | ||
586 | } | ||
587 | } | ||
588 | |||
589 | /* When a contingent allegiance conditon is created, we force feed a | ||
590 | * REQUEST_SENSE command to the device to fetch the sense data. I | ||
591 | * tried many other schemes, relying on the scsi error handling layer | ||
592 | * to send out the REQUEST_SENSE automatically, but this was difficult | ||
593 | * to get right especially in the presence of applications like smartd | ||
594 | * which use SG_IO to send out their own REQUEST_SENSE commands. | ||
595 | */ | ||
596 | static void esp_autosense(struct esp *esp, struct esp_cmd_entry *ent) | ||
597 | { | ||
598 | struct scsi_cmnd *cmd = ent->cmd; | ||
599 | struct scsi_device *dev = cmd->device; | ||
600 | int tgt, lun; | ||
601 | u8 *p, val; | ||
602 | |||
603 | tgt = dev->id; | ||
604 | lun = dev->lun; | ||
605 | |||
606 | |||
607 | if (!ent->sense_ptr) { | ||
608 | esp_log_autosense("esp%d: Doing auto-sense for " | ||
609 | "tgt[%d] lun[%d]\n", | ||
610 | esp->host->unique_id, tgt, lun); | ||
611 | |||
612 | ent->sense_ptr = cmd->sense_buffer; | ||
613 | ent->sense_dma = esp->ops->map_single(esp, | ||
614 | ent->sense_ptr, | ||
615 | SCSI_SENSE_BUFFERSIZE, | ||
616 | DMA_FROM_DEVICE); | ||
617 | } | ||
618 | ent->saved_sense_ptr = ent->sense_ptr; | ||
619 | |||
620 | esp->active_cmd = ent; | ||
621 | |||
622 | p = esp->command_block; | ||
623 | esp->msg_out_len = 0; | ||
624 | |||
625 | *p++ = IDENTIFY(0, lun); | ||
626 | *p++ = REQUEST_SENSE; | ||
627 | *p++ = ((dev->scsi_level <= SCSI_2) ? | ||
628 | (lun << 5) : 0); | ||
629 | *p++ = 0; | ||
630 | *p++ = 0; | ||
631 | *p++ = SCSI_SENSE_BUFFERSIZE; | ||
632 | *p++ = 0; | ||
633 | |||
634 | esp->select_state = ESP_SELECT_BASIC; | ||
635 | |||
636 | val = tgt; | ||
637 | if (esp->rev == FASHME) | ||
638 | val |= ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT; | ||
639 | esp_write8(val, ESP_BUSID); | ||
640 | |||
641 | esp_write_tgt_sync(esp, tgt); | ||
642 | esp_write_tgt_config3(esp, tgt); | ||
643 | |||
644 | val = (p - esp->command_block); | ||
645 | |||
646 | if (esp->rev == FASHME) | ||
647 | scsi_esp_cmd(esp, ESP_CMD_FLUSH); | ||
648 | esp->ops->send_dma_cmd(esp, esp->command_block_dma, | ||
649 | val, 16, 0, ESP_CMD_DMA | ESP_CMD_SELA); | ||
650 | } | ||
651 | |||
652 | static struct esp_cmd_entry *find_and_prep_issuable_command(struct esp *esp) | ||
653 | { | ||
654 | struct esp_cmd_entry *ent; | ||
655 | |||
656 | list_for_each_entry(ent, &esp->queued_cmds, list) { | ||
657 | struct scsi_cmnd *cmd = ent->cmd; | ||
658 | struct scsi_device *dev = cmd->device; | ||
659 | struct esp_lun_data *lp = dev->hostdata; | ||
660 | |||
661 | if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) { | ||
662 | ent->tag[0] = 0; | ||
663 | ent->tag[1] = 0; | ||
664 | return ent; | ||
665 | } | ||
666 | |||
667 | if (!scsi_populate_tag_msg(cmd, &ent->tag[0])) { | ||
668 | ent->tag[0] = 0; | ||
669 | ent->tag[1] = 0; | ||
670 | } | ||
671 | |||
672 | if (esp_alloc_lun_tag(ent, lp) < 0) | ||
673 | continue; | ||
674 | |||
675 | return ent; | ||
676 | } | ||
677 | |||
678 | return NULL; | ||
679 | } | ||
680 | |||
681 | static void esp_maybe_execute_command(struct esp *esp) | ||
682 | { | ||
683 | struct esp_target_data *tp; | ||
684 | struct esp_lun_data *lp; | ||
685 | struct scsi_device *dev; | ||
686 | struct scsi_cmnd *cmd; | ||
687 | struct esp_cmd_entry *ent; | ||
688 | int tgt, lun, i; | ||
689 | u32 val, start_cmd; | ||
690 | u8 *p; | ||
691 | |||
692 | if (esp->active_cmd || | ||
693 | (esp->flags & ESP_FLAG_RESETTING)) | ||
694 | return; | ||
695 | |||
696 | ent = find_and_prep_issuable_command(esp); | ||
697 | if (!ent) | ||
698 | return; | ||
699 | |||
700 | if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) { | ||
701 | esp_autosense(esp, ent); | ||
702 | return; | ||
703 | } | ||
704 | |||
705 | cmd = ent->cmd; | ||
706 | dev = cmd->device; | ||
707 | tgt = dev->id; | ||
708 | lun = dev->lun; | ||
709 | tp = &esp->target[tgt]; | ||
710 | lp = dev->hostdata; | ||
711 | |||
712 | list_del(&ent->list); | ||
713 | list_add(&ent->list, &esp->active_cmds); | ||
714 | |||
715 | esp->active_cmd = ent; | ||
716 | |||
717 | esp_map_dma(esp, cmd); | ||
718 | esp_save_pointers(esp, ent); | ||
719 | |||
720 | esp_check_command_len(esp, cmd); | ||
721 | |||
722 | p = esp->command_block; | ||
723 | |||
724 | esp->msg_out_len = 0; | ||
725 | if (tp->flags & ESP_TGT_CHECK_NEGO) { | ||
726 | /* Need to negotiate. If the target is broken | ||
727 | * go for synchronous transfers and non-wide. | ||
728 | */ | ||
729 | if (tp->flags & ESP_TGT_BROKEN) { | ||
730 | tp->flags &= ~ESP_TGT_DISCONNECT; | ||
731 | tp->nego_goal_period = 0; | ||
732 | tp->nego_goal_offset = 0; | ||
733 | tp->nego_goal_width = 0; | ||
734 | tp->nego_goal_tags = 0; | ||
735 | } | ||
736 | |||
737 | /* If the settings are not changing, skip this. */ | ||
738 | if (spi_width(tp->starget) == tp->nego_goal_width && | ||
739 | spi_period(tp->starget) == tp->nego_goal_period && | ||
740 | spi_offset(tp->starget) == tp->nego_goal_offset) { | ||
741 | tp->flags &= ~ESP_TGT_CHECK_NEGO; | ||
742 | goto build_identify; | ||
743 | } | ||
744 | |||
745 | if (esp->rev == FASHME && esp_need_to_nego_wide(tp)) { | ||
746 | esp->msg_out_len = | ||
747 | spi_populate_width_msg(&esp->msg_out[0], | ||
748 | (tp->nego_goal_width ? | ||
749 | 1 : 0)); | ||
750 | tp->flags |= ESP_TGT_NEGO_WIDE; | ||
751 | } else if (esp_need_to_nego_sync(tp)) { | ||
752 | esp->msg_out_len = | ||
753 | spi_populate_sync_msg(&esp->msg_out[0], | ||
754 | tp->nego_goal_period, | ||
755 | tp->nego_goal_offset); | ||
756 | tp->flags |= ESP_TGT_NEGO_SYNC; | ||
757 | } else { | ||
758 | tp->flags &= ~ESP_TGT_CHECK_NEGO; | ||
759 | } | ||
760 | |||
761 | /* Process it like a slow command. */ | ||
762 | if (tp->flags & (ESP_TGT_NEGO_WIDE | ESP_TGT_NEGO_SYNC)) | ||
763 | esp->flags |= ESP_FLAG_DOING_SLOWCMD; | ||
764 | } | ||
765 | |||
766 | build_identify: | ||
767 | /* If we don't have a lun-data struct yet, we're probing | ||
768 | * so do not disconnect. Also, do not disconnect unless | ||
769 | * we have a tag on this command. | ||
770 | */ | ||
771 | if (lp && (tp->flags & ESP_TGT_DISCONNECT) && ent->tag[0]) | ||
772 | *p++ = IDENTIFY(1, lun); | ||
773 | else | ||
774 | *p++ = IDENTIFY(0, lun); | ||
775 | |||
776 | if (ent->tag[0] && esp->rev == ESP100) { | ||
777 | /* ESP100 lacks select w/atn3 command, use select | ||
778 | * and stop instead. | ||
779 | */ | ||
780 | esp->flags |= ESP_FLAG_DOING_SLOWCMD; | ||
781 | } | ||
782 | |||
783 | if (!(esp->flags & ESP_FLAG_DOING_SLOWCMD)) { | ||
784 | start_cmd = ESP_CMD_DMA | ESP_CMD_SELA; | ||
785 | if (ent->tag[0]) { | ||
786 | *p++ = ent->tag[0]; | ||
787 | *p++ = ent->tag[1]; | ||
788 | |||
789 | start_cmd = ESP_CMD_DMA | ESP_CMD_SA3; | ||
790 | } | ||
791 | |||
792 | for (i = 0; i < cmd->cmd_len; i++) | ||
793 | *p++ = cmd->cmnd[i]; | ||
794 | |||
795 | esp->select_state = ESP_SELECT_BASIC; | ||
796 | } else { | ||
797 | esp->cmd_bytes_left = cmd->cmd_len; | ||
798 | esp->cmd_bytes_ptr = &cmd->cmnd[0]; | ||
799 | |||
800 | if (ent->tag[0]) { | ||
801 | for (i = esp->msg_out_len - 1; | ||
802 | i >= 0; i--) | ||
803 | esp->msg_out[i + 2] = esp->msg_out[i]; | ||
804 | esp->msg_out[0] = ent->tag[0]; | ||
805 | esp->msg_out[1] = ent->tag[1]; | ||
806 | esp->msg_out_len += 2; | ||
807 | } | ||
808 | |||
809 | start_cmd = ESP_CMD_DMA | ESP_CMD_SELAS; | ||
810 | esp->select_state = ESP_SELECT_MSGOUT; | ||
811 | } | ||
812 | val = tgt; | ||
813 | if (esp->rev == FASHME) | ||
814 | val |= ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT; | ||
815 | esp_write8(val, ESP_BUSID); | ||
816 | |||
817 | esp_write_tgt_sync(esp, tgt); | ||
818 | esp_write_tgt_config3(esp, tgt); | ||
819 | |||
820 | val = (p - esp->command_block); | ||
821 | |||
822 | if (esp_debug & ESP_DEBUG_SCSICMD) { | ||
823 | printk("ESP: tgt[%d] lun[%d] scsi_cmd [ ", tgt, lun); | ||
824 | for (i = 0; i < cmd->cmd_len; i++) | ||
825 | printk("%02x ", cmd->cmnd[i]); | ||
826 | printk("]\n"); | ||
827 | } | ||
828 | |||
829 | if (esp->rev == FASHME) | ||
830 | scsi_esp_cmd(esp, ESP_CMD_FLUSH); | ||
831 | esp->ops->send_dma_cmd(esp, esp->command_block_dma, | ||
832 | val, 16, 0, start_cmd); | ||
833 | } | ||
834 | |||
835 | static struct esp_cmd_entry *esp_get_ent(struct esp *esp) | ||
836 | { | ||
837 | struct list_head *head = &esp->esp_cmd_pool; | ||
838 | struct esp_cmd_entry *ret; | ||
839 | |||
840 | if (list_empty(head)) { | ||
841 | ret = kzalloc(sizeof(struct esp_cmd_entry), GFP_ATOMIC); | ||
842 | } else { | ||
843 | ret = list_entry(head->next, struct esp_cmd_entry, list); | ||
844 | list_del(&ret->list); | ||
845 | memset(ret, 0, sizeof(*ret)); | ||
846 | } | ||
847 | return ret; | ||
848 | } | ||
849 | |||
850 | static void esp_put_ent(struct esp *esp, struct esp_cmd_entry *ent) | ||
851 | { | ||
852 | list_add(&ent->list, &esp->esp_cmd_pool); | ||
853 | } | ||
854 | |||
855 | static void esp_cmd_is_done(struct esp *esp, struct esp_cmd_entry *ent, | ||
856 | struct scsi_cmnd *cmd, unsigned int result) | ||
857 | { | ||
858 | struct scsi_device *dev = cmd->device; | ||
859 | int tgt = dev->id; | ||
860 | int lun = dev->lun; | ||
861 | |||
862 | esp->active_cmd = NULL; | ||
863 | esp_unmap_dma(esp, cmd); | ||
864 | esp_free_lun_tag(ent, dev->hostdata); | ||
865 | cmd->result = result; | ||
866 | |||
867 | if (ent->eh_done) { | ||
868 | complete(ent->eh_done); | ||
869 | ent->eh_done = NULL; | ||
870 | } | ||
871 | |||
872 | if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) { | ||
873 | esp->ops->unmap_single(esp, ent->sense_dma, | ||
874 | SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE); | ||
875 | ent->sense_ptr = NULL; | ||
876 | |||
877 | /* Restore the message/status bytes to what we actually | ||
878 | * saw originally. Also, report that we are providing | ||
879 | * the sense data. | ||
880 | */ | ||
881 | cmd->result = ((DRIVER_SENSE << 24) | | ||
882 | (DID_OK << 16) | | ||
883 | (COMMAND_COMPLETE << 8) | | ||
884 | (SAM_STAT_CHECK_CONDITION << 0)); | ||
885 | |||
886 | ent->flags &= ~ESP_CMD_FLAG_AUTOSENSE; | ||
887 | if (esp_debug & ESP_DEBUG_AUTOSENSE) { | ||
888 | int i; | ||
889 | |||
890 | printk("esp%d: tgt[%d] lun[%d] AUTO SENSE[ ", | ||
891 | esp->host->unique_id, tgt, lun); | ||
892 | for (i = 0; i < 18; i++) | ||
893 | printk("%02x ", cmd->sense_buffer[i]); | ||
894 | printk("]\n"); | ||
895 | } | ||
896 | } | ||
897 | |||
898 | cmd->scsi_done(cmd); | ||
899 | |||
900 | list_del(&ent->list); | ||
901 | esp_put_ent(esp, ent); | ||
902 | |||
903 | esp_maybe_execute_command(esp); | ||
904 | } | ||
905 | |||
906 | static unsigned int compose_result(unsigned int status, unsigned int message, | ||
907 | unsigned int driver_code) | ||
908 | { | ||
909 | return (status | (message << 8) | (driver_code << 16)); | ||
910 | } | ||
911 | |||
912 | static void esp_event_queue_full(struct esp *esp, struct esp_cmd_entry *ent) | ||
913 | { | ||
914 | struct scsi_device *dev = ent->cmd->device; | ||
915 | struct esp_lun_data *lp = dev->hostdata; | ||
916 | |||
917 | scsi_track_queue_full(dev, lp->num_tagged - 1); | ||
918 | } | ||
919 | |||
920 | static int esp_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) | ||
921 | { | ||
922 | struct scsi_device *dev = cmd->device; | ||
923 | struct esp *esp = host_to_esp(dev->host); | ||
924 | struct esp_cmd_priv *spriv; | ||
925 | struct esp_cmd_entry *ent; | ||
926 | |||
927 | ent = esp_get_ent(esp); | ||
928 | if (!ent) | ||
929 | return SCSI_MLQUEUE_HOST_BUSY; | ||
930 | |||
931 | ent->cmd = cmd; | ||
932 | |||
933 | cmd->scsi_done = done; | ||
934 | |||
935 | spriv = ESP_CMD_PRIV(cmd); | ||
936 | spriv->u.dma_addr = ~(dma_addr_t)0x0; | ||
937 | |||
938 | list_add_tail(&ent->list, &esp->queued_cmds); | ||
939 | |||
940 | esp_maybe_execute_command(esp); | ||
941 | |||
942 | return 0; | ||
943 | } | ||
944 | |||
945 | static int esp_check_gross_error(struct esp *esp) | ||
946 | { | ||
947 | if (esp->sreg & ESP_STAT_SPAM) { | ||
948 | /* Gross Error, could be one of: | ||
949 | * - top of fifo overwritten | ||
950 | * - top of command register overwritten | ||
951 | * - DMA programmed with wrong direction | ||
952 | * - improper phase change | ||
953 | */ | ||
954 | printk(KERN_ERR PFX "esp%d: Gross error sreg[%02x]\n", | ||
955 | esp->host->unique_id, esp->sreg); | ||
956 | /* XXX Reset the chip. XXX */ | ||
957 | return 1; | ||
958 | } | ||
959 | return 0; | ||
960 | } | ||
961 | |||
962 | static int esp_check_spur_intr(struct esp *esp) | ||
963 | { | ||
964 | switch (esp->rev) { | ||
965 | case ESP100: | ||
966 | case ESP100A: | ||
967 | /* The interrupt pending bit of the status register cannot | ||
968 | * be trusted on these revisions. | ||
969 | */ | ||
970 | esp->sreg &= ~ESP_STAT_INTR; | ||
971 | break; | ||
972 | |||
973 | default: | ||
974 | if (!(esp->sreg & ESP_STAT_INTR)) { | ||
975 | esp->ireg = esp_read8(ESP_INTRPT); | ||
976 | if (esp->ireg & ESP_INTR_SR) | ||
977 | return 1; | ||
978 | |||
979 | /* If the DMA is indicating interrupt pending and the | ||
980 | * ESP is not, the only possibility is a DMA error. | ||
981 | */ | ||
982 | if (!esp->ops->dma_error(esp)) { | ||
983 | printk(KERN_ERR PFX "esp%d: Spurious irq, " | ||
984 | "sreg=%x.\n", | ||
985 | esp->host->unique_id, esp->sreg); | ||
986 | return -1; | ||
987 | } | ||
988 | |||
989 | printk(KERN_ERR PFX "esp%d: DMA error\n", | ||
990 | esp->host->unique_id); | ||
991 | |||
992 | /* XXX Reset the chip. XXX */ | ||
993 | return -1; | ||
994 | } | ||
995 | break; | ||
996 | } | ||
997 | |||
998 | return 0; | ||
999 | } | ||
1000 | |||
1001 | static void esp_schedule_reset(struct esp *esp) | ||
1002 | { | ||
1003 | esp_log_reset("ESP: esp_schedule_reset() from %p\n", | ||
1004 | __builtin_return_address(0)); | ||
1005 | esp->flags |= ESP_FLAG_RESETTING; | ||
1006 | esp_event(esp, ESP_EVENT_RESET); | ||
1007 | } | ||
1008 | |||
1009 | /* In order to avoid having to add a special half-reconnected state | ||
1010 | * into the driver we just sit here and poll through the rest of | ||
1011 | * the reselection process to get the tag message bytes. | ||
1012 | */ | ||
1013 | static struct esp_cmd_entry *esp_reconnect_with_tag(struct esp *esp, | ||
1014 | struct esp_lun_data *lp) | ||
1015 | { | ||
1016 | struct esp_cmd_entry *ent; | ||
1017 | int i; | ||
1018 | |||
1019 | if (!lp->num_tagged) { | ||
1020 | printk(KERN_ERR PFX "esp%d: Reconnect w/num_tagged==0\n", | ||
1021 | esp->host->unique_id); | ||
1022 | return NULL; | ||
1023 | } | ||
1024 | |||
1025 | esp_log_reconnect("ESP: reconnect tag, "); | ||
1026 | |||
1027 | for (i = 0; i < ESP_QUICKIRQ_LIMIT; i++) { | ||
1028 | if (esp->ops->irq_pending(esp)) | ||
1029 | break; | ||
1030 | } | ||
1031 | if (i == ESP_QUICKIRQ_LIMIT) { | ||
1032 | printk(KERN_ERR PFX "esp%d: Reconnect IRQ1 timeout\n", | ||
1033 | esp->host->unique_id); | ||
1034 | return NULL; | ||
1035 | } | ||
1036 | |||
1037 | esp->sreg = esp_read8(ESP_STATUS); | ||
1038 | esp->ireg = esp_read8(ESP_INTRPT); | ||
1039 | |||
1040 | esp_log_reconnect("IRQ(%d:%x:%x), ", | ||
1041 | i, esp->ireg, esp->sreg); | ||
1042 | |||
1043 | if (esp->ireg & ESP_INTR_DC) { | ||
1044 | printk(KERN_ERR PFX "esp%d: Reconnect, got disconnect.\n", | ||
1045 | esp->host->unique_id); | ||
1046 | return NULL; | ||
1047 | } | ||
1048 | |||
1049 | if ((esp->sreg & ESP_STAT_PMASK) != ESP_MIP) { | ||
1050 | printk(KERN_ERR PFX "esp%d: Reconnect, not MIP sreg[%02x].\n", | ||
1051 | esp->host->unique_id, esp->sreg); | ||
1052 | return NULL; | ||
1053 | } | ||
1054 | |||
1055 | /* DMA in the tag bytes... */ | ||
1056 | esp->command_block[0] = 0xff; | ||
1057 | esp->command_block[1] = 0xff; | ||
1058 | esp->ops->send_dma_cmd(esp, esp->command_block_dma, | ||
1059 | 2, 2, 1, ESP_CMD_DMA | ESP_CMD_TI); | ||
1060 | |||
1061 | /* ACK the msssage. */ | ||
1062 | scsi_esp_cmd(esp, ESP_CMD_MOK); | ||
1063 | |||
1064 | for (i = 0; i < ESP_RESELECT_TAG_LIMIT; i++) { | ||
1065 | if (esp->ops->irq_pending(esp)) { | ||
1066 | esp->sreg = esp_read8(ESP_STATUS); | ||
1067 | esp->ireg = esp_read8(ESP_INTRPT); | ||
1068 | if (esp->ireg & ESP_INTR_FDONE) | ||
1069 | break; | ||
1070 | } | ||
1071 | udelay(1); | ||
1072 | } | ||
1073 | if (i == ESP_RESELECT_TAG_LIMIT) { | ||
1074 | printk(KERN_ERR PFX "esp%d: Reconnect IRQ2 timeout\n", | ||
1075 | esp->host->unique_id); | ||
1076 | return NULL; | ||
1077 | } | ||
1078 | esp->ops->dma_drain(esp); | ||
1079 | esp->ops->dma_invalidate(esp); | ||
1080 | |||
1081 | esp_log_reconnect("IRQ2(%d:%x:%x) tag[%x:%x]\n", | ||
1082 | i, esp->ireg, esp->sreg, | ||
1083 | esp->command_block[0], | ||
1084 | esp->command_block[1]); | ||
1085 | |||
1086 | if (esp->command_block[0] < SIMPLE_QUEUE_TAG || | ||
1087 | esp->command_block[0] > ORDERED_QUEUE_TAG) { | ||
1088 | printk(KERN_ERR PFX "esp%d: Reconnect, bad tag " | ||
1089 | "type %02x.\n", | ||
1090 | esp->host->unique_id, esp->command_block[0]); | ||
1091 | return NULL; | ||
1092 | } | ||
1093 | |||
1094 | ent = lp->tagged_cmds[esp->command_block[1]]; | ||
1095 | if (!ent) { | ||
1096 | printk(KERN_ERR PFX "esp%d: Reconnect, no entry for " | ||
1097 | "tag %02x.\n", | ||
1098 | esp->host->unique_id, esp->command_block[1]); | ||
1099 | return NULL; | ||
1100 | } | ||
1101 | |||
1102 | return ent; | ||
1103 | } | ||
1104 | |||
1105 | static int esp_reconnect(struct esp *esp) | ||
1106 | { | ||
1107 | struct esp_cmd_entry *ent; | ||
1108 | struct esp_target_data *tp; | ||
1109 | struct esp_lun_data *lp; | ||
1110 | struct scsi_device *dev; | ||
1111 | int target, lun; | ||
1112 | |||
1113 | BUG_ON(esp->active_cmd); | ||
1114 | if (esp->rev == FASHME) { | ||
1115 | /* FASHME puts the target and lun numbers directly | ||
1116 | * into the fifo. | ||
1117 | */ | ||
1118 | target = esp->fifo[0]; | ||
1119 | lun = esp->fifo[1] & 0x7; | ||
1120 | } else { | ||
1121 | u8 bits = esp_read8(ESP_FDATA); | ||
1122 | |||
1123 | /* Older chips put the lun directly into the fifo, but | ||
1124 | * the target is given as a sample of the arbitration | ||
1125 | * lines on the bus at reselection time. So we should | ||
1126 | * see the ID of the ESP and the one reconnecting target | ||
1127 | * set in the bitmap. | ||
1128 | */ | ||
1129 | if (!(bits & esp->scsi_id_mask)) | ||
1130 | goto do_reset; | ||
1131 | bits &= ~esp->scsi_id_mask; | ||
1132 | if (!bits || (bits & (bits - 1))) | ||
1133 | goto do_reset; | ||
1134 | |||
1135 | target = ffs(bits) - 1; | ||
1136 | lun = (esp_read8(ESP_FDATA) & 0x7); | ||
1137 | |||
1138 | scsi_esp_cmd(esp, ESP_CMD_FLUSH); | ||
1139 | if (esp->rev == ESP100) { | ||
1140 | u8 ireg = esp_read8(ESP_INTRPT); | ||
1141 | /* This chip has a bug during reselection that can | ||
1142 | * cause a spurious illegal-command interrupt, which | ||
1143 | * we simply ACK here. Another possibility is a bus | ||
1144 | * reset so we must check for that. | ||
1145 | */ | ||
1146 | if (ireg & ESP_INTR_SR) | ||
1147 | goto do_reset; | ||
1148 | } | ||
1149 | scsi_esp_cmd(esp, ESP_CMD_NULL); | ||
1150 | } | ||
1151 | |||
1152 | esp_write_tgt_sync(esp, target); | ||
1153 | esp_write_tgt_config3(esp, target); | ||
1154 | |||
1155 | scsi_esp_cmd(esp, ESP_CMD_MOK); | ||
1156 | |||
1157 | if (esp->rev == FASHME) | ||
1158 | esp_write8(target | ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT, | ||
1159 | ESP_BUSID); | ||
1160 | |||
1161 | tp = &esp->target[target]; | ||
1162 | dev = __scsi_device_lookup_by_target(tp->starget, lun); | ||
1163 | if (!dev) { | ||
1164 | printk(KERN_ERR PFX "esp%d: Reconnect, no lp " | ||
1165 | "tgt[%u] lun[%u]\n", | ||
1166 | esp->host->unique_id, target, lun); | ||
1167 | goto do_reset; | ||
1168 | } | ||
1169 | lp = dev->hostdata; | ||
1170 | |||
1171 | ent = lp->non_tagged_cmd; | ||
1172 | if (!ent) { | ||
1173 | ent = esp_reconnect_with_tag(esp, lp); | ||
1174 | if (!ent) | ||
1175 | goto do_reset; | ||
1176 | } | ||
1177 | |||
1178 | esp->active_cmd = ent; | ||
1179 | |||
1180 | if (ent->flags & ESP_CMD_FLAG_ABORT) { | ||
1181 | esp->msg_out[0] = ABORT_TASK_SET; | ||
1182 | esp->msg_out_len = 1; | ||
1183 | scsi_esp_cmd(esp, ESP_CMD_SATN); | ||
1184 | } | ||
1185 | |||
1186 | esp_event(esp, ESP_EVENT_CHECK_PHASE); | ||
1187 | esp_restore_pointers(esp, ent); | ||
1188 | esp->flags |= ESP_FLAG_QUICKIRQ_CHECK; | ||
1189 | return 1; | ||
1190 | |||
1191 | do_reset: | ||
1192 | esp_schedule_reset(esp); | ||
1193 | return 0; | ||
1194 | } | ||
1195 | |||
1196 | static int esp_finish_select(struct esp *esp) | ||
1197 | { | ||
1198 | struct esp_cmd_entry *ent; | ||
1199 | struct scsi_cmnd *cmd; | ||
1200 | u8 orig_select_state; | ||
1201 | |||
1202 | orig_select_state = esp->select_state; | ||
1203 | |||
1204 | /* No longer selecting. */ | ||
1205 | esp->select_state = ESP_SELECT_NONE; | ||
1206 | |||
1207 | esp->seqreg = esp_read8(ESP_SSTEP) & ESP_STEP_VBITS; | ||
1208 | ent = esp->active_cmd; | ||
1209 | cmd = ent->cmd; | ||
1210 | |||
1211 | if (esp->ops->dma_error(esp)) { | ||
1212 | /* If we see a DMA error during or as a result of selection, | ||
1213 | * all bets are off. | ||
1214 | */ | ||
1215 | esp_schedule_reset(esp); | ||
1216 | esp_cmd_is_done(esp, ent, cmd, (DID_ERROR << 16)); | ||
1217 | return 0; | ||
1218 | } | ||
1219 | |||
1220 | esp->ops->dma_invalidate(esp); | ||
1221 | |||
1222 | if (esp->ireg == (ESP_INTR_RSEL | ESP_INTR_FDONE)) { | ||
1223 | struct esp_target_data *tp = &esp->target[cmd->device->id]; | ||
1224 | |||
1225 | /* Carefully back out of the selection attempt. Release | ||
1226 | * resources (such as DMA mapping & TAG) and reset state (such | ||
1227 | * as message out and command delivery variables). | ||
1228 | */ | ||
1229 | if (!(ent->flags & ESP_CMD_FLAG_AUTOSENSE)) { | ||
1230 | esp_unmap_dma(esp, cmd); | ||
1231 | esp_free_lun_tag(ent, cmd->device->hostdata); | ||
1232 | tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_NEGO_WIDE); | ||
1233 | esp->flags &= ~ESP_FLAG_DOING_SLOWCMD; | ||
1234 | esp->cmd_bytes_ptr = NULL; | ||
1235 | esp->cmd_bytes_left = 0; | ||
1236 | } else { | ||
1237 | esp->ops->unmap_single(esp, ent->sense_dma, | ||
1238 | SCSI_SENSE_BUFFERSIZE, | ||
1239 | DMA_FROM_DEVICE); | ||
1240 | ent->sense_ptr = NULL; | ||
1241 | } | ||
1242 | |||
1243 | /* Now that the state is unwound properly, put back onto | ||
1244 | * the issue queue. This command is no longer active. | ||
1245 | */ | ||
1246 | list_del(&ent->list); | ||
1247 | list_add(&ent->list, &esp->queued_cmds); | ||
1248 | esp->active_cmd = NULL; | ||
1249 | |||
1250 | /* Return value ignored by caller, it directly invokes | ||
1251 | * esp_reconnect(). | ||
1252 | */ | ||
1253 | return 0; | ||
1254 | } | ||
1255 | |||
1256 | if (esp->ireg == ESP_INTR_DC) { | ||
1257 | struct scsi_device *dev = cmd->device; | ||
1258 | |||
1259 | /* Disconnect. Make sure we re-negotiate sync and | ||
1260 | * wide parameters if this target starts responding | ||
1261 | * again in the future. | ||
1262 | */ | ||
1263 | esp->target[dev->id].flags |= ESP_TGT_CHECK_NEGO; | ||
1264 | |||
1265 | scsi_esp_cmd(esp, ESP_CMD_ESEL); | ||
1266 | esp_cmd_is_done(esp, ent, cmd, (DID_BAD_TARGET << 16)); | ||
1267 | return 1; | ||
1268 | } | ||
1269 | |||
1270 | if (esp->ireg == (ESP_INTR_FDONE | ESP_INTR_BSERV)) { | ||
1271 | /* Selection successful. On pre-FAST chips we have | ||
1272 | * to do a NOP and possibly clean out the FIFO. | ||
1273 | */ | ||
1274 | if (esp->rev <= ESP236) { | ||
1275 | int fcnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES; | ||
1276 | |||
1277 | scsi_esp_cmd(esp, ESP_CMD_NULL); | ||
1278 | |||
1279 | if (!fcnt && | ||
1280 | (!esp->prev_soff || | ||
1281 | ((esp->sreg & ESP_STAT_PMASK) != ESP_DIP))) | ||
1282 | esp_flush_fifo(esp); | ||
1283 | } | ||
1284 | |||
1285 | /* If we are doing a slow command, negotiation, etc. | ||
1286 | * we'll do the right thing as we transition to the | ||
1287 | * next phase. | ||
1288 | */ | ||
1289 | esp_event(esp, ESP_EVENT_CHECK_PHASE); | ||
1290 | return 0; | ||
1291 | } | ||
1292 | |||
1293 | printk("ESP: Unexpected selection completion ireg[%x].\n", | ||
1294 | esp->ireg); | ||
1295 | esp_schedule_reset(esp); | ||
1296 | return 0; | ||
1297 | } | ||
1298 | |||
1299 | static int esp_data_bytes_sent(struct esp *esp, struct esp_cmd_entry *ent, | ||
1300 | struct scsi_cmnd *cmd) | ||
1301 | { | ||
1302 | int fifo_cnt, ecount, bytes_sent, flush_fifo; | ||
1303 | |||
1304 | fifo_cnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES; | ||
1305 | if (esp->prev_cfg3 & ESP_CONFIG3_EWIDE) | ||
1306 | fifo_cnt <<= 1; | ||
1307 | |||
1308 | ecount = 0; | ||
1309 | if (!(esp->sreg & ESP_STAT_TCNT)) { | ||
1310 | ecount = ((unsigned int)esp_read8(ESP_TCLOW) | | ||
1311 | (((unsigned int)esp_read8(ESP_TCMED)) << 8)); | ||
1312 | if (esp->rev == FASHME) | ||
1313 | ecount |= ((unsigned int)esp_read8(FAS_RLO)) << 16; | ||
1314 | } | ||
1315 | |||
1316 | bytes_sent = esp->data_dma_len; | ||
1317 | bytes_sent -= ecount; | ||
1318 | |||
1319 | if (!(ent->flags & ESP_CMD_FLAG_WRITE)) | ||
1320 | bytes_sent -= fifo_cnt; | ||
1321 | |||
1322 | flush_fifo = 0; | ||
1323 | if (!esp->prev_soff) { | ||
1324 | /* Synchronous data transfer, always flush fifo. */ | ||
1325 | flush_fifo = 1; | ||
1326 | } else { | ||
1327 | if (esp->rev == ESP100) { | ||
1328 | u32 fflags, phase; | ||
1329 | |||
1330 | /* ESP100 has a chip bug where in the synchronous data | ||
1331 | * phase it can mistake a final long REQ pulse from the | ||
1332 | * target as an extra data byte. Fun. | ||
1333 | * | ||
1334 | * To detect this case we resample the status register | ||
1335 | * and fifo flags. If we're still in a data phase and | ||
1336 | * we see spurious chunks in the fifo, we return error | ||
1337 | * to the caller which should reset and set things up | ||
1338 | * such that we only try future transfers to this | ||
1339 | * target in synchronous mode. | ||
1340 | */ | ||
1341 | esp->sreg = esp_read8(ESP_STATUS); | ||
1342 | phase = esp->sreg & ESP_STAT_PMASK; | ||
1343 | fflags = esp_read8(ESP_FFLAGS); | ||
1344 | |||
1345 | if ((phase == ESP_DOP && | ||
1346 | (fflags & ESP_FF_ONOTZERO)) || | ||
1347 | (phase == ESP_DIP && | ||
1348 | (fflags & ESP_FF_FBYTES))) | ||
1349 | return -1; | ||
1350 | } | ||
1351 | if (!(ent->flags & ESP_CMD_FLAG_WRITE)) | ||
1352 | flush_fifo = 1; | ||
1353 | } | ||
1354 | |||
1355 | if (flush_fifo) | ||
1356 | esp_flush_fifo(esp); | ||
1357 | |||
1358 | return bytes_sent; | ||
1359 | } | ||
1360 | |||
1361 | static void esp_setsync(struct esp *esp, struct esp_target_data *tp, | ||
1362 | u8 scsi_period, u8 scsi_offset, | ||
1363 | u8 esp_stp, u8 esp_soff) | ||
1364 | { | ||
1365 | spi_period(tp->starget) = scsi_period; | ||
1366 | spi_offset(tp->starget) = scsi_offset; | ||
1367 | spi_width(tp->starget) = (tp->flags & ESP_TGT_WIDE) ? 1 : 0; | ||
1368 | |||
1369 | if (esp_soff) { | ||
1370 | esp_stp &= 0x1f; | ||
1371 | esp_soff |= esp->radelay; | ||
1372 | if (esp->rev >= FAS236) { | ||
1373 | u8 bit = ESP_CONFIG3_FSCSI; | ||
1374 | if (esp->rev >= FAS100A) | ||
1375 | bit = ESP_CONFIG3_FAST; | ||
1376 | |||
1377 | if (scsi_period < 50) { | ||
1378 | if (esp->rev == FASHME) | ||
1379 | esp_soff &= ~esp->radelay; | ||
1380 | tp->esp_config3 |= bit; | ||
1381 | } else { | ||
1382 | tp->esp_config3 &= ~bit; | ||
1383 | } | ||
1384 | esp->prev_cfg3 = tp->esp_config3; | ||
1385 | esp_write8(esp->prev_cfg3, ESP_CFG3); | ||
1386 | } | ||
1387 | } | ||
1388 | |||
1389 | tp->esp_period = esp->prev_stp = esp_stp; | ||
1390 | tp->esp_offset = esp->prev_soff = esp_soff; | ||
1391 | |||
1392 | esp_write8(esp_soff, ESP_SOFF); | ||
1393 | esp_write8(esp_stp, ESP_STP); | ||
1394 | |||
1395 | tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_CHECK_NEGO); | ||
1396 | |||
1397 | spi_display_xfer_agreement(tp->starget); | ||
1398 | } | ||
1399 | |||
1400 | static void esp_msgin_reject(struct esp *esp) | ||
1401 | { | ||
1402 | struct esp_cmd_entry *ent = esp->active_cmd; | ||
1403 | struct scsi_cmnd *cmd = ent->cmd; | ||
1404 | struct esp_target_data *tp; | ||
1405 | int tgt; | ||
1406 | |||
1407 | tgt = cmd->device->id; | ||
1408 | tp = &esp->target[tgt]; | ||
1409 | |||
1410 | if (tp->flags & ESP_TGT_NEGO_WIDE) { | ||
1411 | tp->flags &= ~(ESP_TGT_NEGO_WIDE | ESP_TGT_WIDE); | ||
1412 | |||
1413 | if (!esp_need_to_nego_sync(tp)) { | ||
1414 | tp->flags &= ~ESP_TGT_CHECK_NEGO; | ||
1415 | scsi_esp_cmd(esp, ESP_CMD_RATN); | ||
1416 | } else { | ||
1417 | esp->msg_out_len = | ||
1418 | spi_populate_sync_msg(&esp->msg_out[0], | ||
1419 | tp->nego_goal_period, | ||
1420 | tp->nego_goal_offset); | ||
1421 | tp->flags |= ESP_TGT_NEGO_SYNC; | ||
1422 | scsi_esp_cmd(esp, ESP_CMD_SATN); | ||
1423 | } | ||
1424 | return; | ||
1425 | } | ||
1426 | |||
1427 | if (tp->flags & ESP_TGT_NEGO_SYNC) { | ||
1428 | tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_CHECK_NEGO); | ||
1429 | tp->esp_period = 0; | ||
1430 | tp->esp_offset = 0; | ||
1431 | esp_setsync(esp, tp, 0, 0, 0, 0); | ||
1432 | scsi_esp_cmd(esp, ESP_CMD_RATN); | ||
1433 | return; | ||
1434 | } | ||
1435 | |||
1436 | esp->msg_out[0] = ABORT_TASK_SET; | ||
1437 | esp->msg_out_len = 1; | ||
1438 | scsi_esp_cmd(esp, ESP_CMD_SATN); | ||
1439 | } | ||
1440 | |||
1441 | static void esp_msgin_sdtr(struct esp *esp, struct esp_target_data *tp) | ||
1442 | { | ||
1443 | u8 period = esp->msg_in[3]; | ||
1444 | u8 offset = esp->msg_in[4]; | ||
1445 | u8 stp; | ||
1446 | |||
1447 | if (!(tp->flags & ESP_TGT_NEGO_SYNC)) | ||
1448 | goto do_reject; | ||
1449 | |||
1450 | if (offset > 15) | ||
1451 | goto do_reject; | ||
1452 | |||
1453 | if (offset) { | ||
1454 | int rounded_up, one_clock; | ||
1455 | |||
1456 | if (period > esp->max_period) { | ||
1457 | period = offset = 0; | ||
1458 | goto do_sdtr; | ||
1459 | } | ||
1460 | if (period < esp->min_period) | ||
1461 | goto do_reject; | ||
1462 | |||
1463 | one_clock = esp->ccycle / 1000; | ||
1464 | rounded_up = (period << 2); | ||
1465 | rounded_up = (rounded_up + one_clock - 1) / one_clock; | ||
1466 | stp = rounded_up; | ||
1467 | if (stp && esp->rev >= FAS236) { | ||
1468 | if (stp >= 50) | ||
1469 | stp--; | ||
1470 | } | ||
1471 | } else { | ||
1472 | stp = 0; | ||
1473 | } | ||
1474 | |||
1475 | esp_setsync(esp, tp, period, offset, stp, offset); | ||
1476 | return; | ||
1477 | |||
1478 | do_reject: | ||
1479 | esp->msg_out[0] = MESSAGE_REJECT; | ||
1480 | esp->msg_out_len = 1; | ||
1481 | scsi_esp_cmd(esp, ESP_CMD_SATN); | ||
1482 | return; | ||
1483 | |||
1484 | do_sdtr: | ||
1485 | tp->nego_goal_period = period; | ||
1486 | tp->nego_goal_offset = offset; | ||
1487 | esp->msg_out_len = | ||
1488 | spi_populate_sync_msg(&esp->msg_out[0], | ||
1489 | tp->nego_goal_period, | ||
1490 | tp->nego_goal_offset); | ||
1491 | scsi_esp_cmd(esp, ESP_CMD_SATN); | ||
1492 | } | ||
1493 | |||
1494 | static void esp_msgin_wdtr(struct esp *esp, struct esp_target_data *tp) | ||
1495 | { | ||
1496 | int size = 8 << esp->msg_in[3]; | ||
1497 | u8 cfg3; | ||
1498 | |||
1499 | if (esp->rev != FASHME) | ||
1500 | goto do_reject; | ||
1501 | |||
1502 | if (size != 8 && size != 16) | ||
1503 | goto do_reject; | ||
1504 | |||
1505 | if (!(tp->flags & ESP_TGT_NEGO_WIDE)) | ||
1506 | goto do_reject; | ||
1507 | |||
1508 | cfg3 = tp->esp_config3; | ||
1509 | if (size == 16) { | ||
1510 | tp->flags |= ESP_TGT_WIDE; | ||
1511 | cfg3 |= ESP_CONFIG3_EWIDE; | ||
1512 | } else { | ||
1513 | tp->flags &= ~ESP_TGT_WIDE; | ||
1514 | cfg3 &= ~ESP_CONFIG3_EWIDE; | ||
1515 | } | ||
1516 | tp->esp_config3 = cfg3; | ||
1517 | esp->prev_cfg3 = cfg3; | ||
1518 | esp_write8(cfg3, ESP_CFG3); | ||
1519 | |||
1520 | tp->flags &= ~ESP_TGT_NEGO_WIDE; | ||
1521 | |||
1522 | spi_period(tp->starget) = 0; | ||
1523 | spi_offset(tp->starget) = 0; | ||
1524 | if (!esp_need_to_nego_sync(tp)) { | ||
1525 | tp->flags &= ~ESP_TGT_CHECK_NEGO; | ||
1526 | scsi_esp_cmd(esp, ESP_CMD_RATN); | ||
1527 | } else { | ||
1528 | esp->msg_out_len = | ||
1529 | spi_populate_sync_msg(&esp->msg_out[0], | ||
1530 | tp->nego_goal_period, | ||
1531 | tp->nego_goal_offset); | ||
1532 | tp->flags |= ESP_TGT_NEGO_SYNC; | ||
1533 | scsi_esp_cmd(esp, ESP_CMD_SATN); | ||
1534 | } | ||
1535 | return; | ||
1536 | |||
1537 | do_reject: | ||
1538 | esp->msg_out[0] = MESSAGE_REJECT; | ||
1539 | esp->msg_out_len = 1; | ||
1540 | scsi_esp_cmd(esp, ESP_CMD_SATN); | ||
1541 | } | ||
1542 | |||
1543 | static void esp_msgin_extended(struct esp *esp) | ||
1544 | { | ||
1545 | struct esp_cmd_entry *ent = esp->active_cmd; | ||
1546 | struct scsi_cmnd *cmd = ent->cmd; | ||
1547 | struct esp_target_data *tp; | ||
1548 | int tgt = cmd->device->id; | ||
1549 | |||
1550 | tp = &esp->target[tgt]; | ||
1551 | if (esp->msg_in[2] == EXTENDED_SDTR) { | ||
1552 | esp_msgin_sdtr(esp, tp); | ||
1553 | return; | ||
1554 | } | ||
1555 | if (esp->msg_in[2] == EXTENDED_WDTR) { | ||
1556 | esp_msgin_wdtr(esp, tp); | ||
1557 | return; | ||
1558 | } | ||
1559 | |||
1560 | printk("ESP: Unexpected extended msg type %x\n", | ||
1561 | esp->msg_in[2]); | ||
1562 | |||
1563 | esp->msg_out[0] = ABORT_TASK_SET; | ||
1564 | esp->msg_out_len = 1; | ||
1565 | scsi_esp_cmd(esp, ESP_CMD_SATN); | ||
1566 | } | ||
1567 | |||
1568 | /* Analyze msgin bytes received from target so far. Return non-zero | ||
1569 | * if there are more bytes needed to complete the message. | ||
1570 | */ | ||
1571 | static int esp_msgin_process(struct esp *esp) | ||
1572 | { | ||
1573 | u8 msg0 = esp->msg_in[0]; | ||
1574 | int len = esp->msg_in_len; | ||
1575 | |||
1576 | if (msg0 & 0x80) { | ||
1577 | /* Identify */ | ||
1578 | printk("ESP: Unexpected msgin identify\n"); | ||
1579 | return 0; | ||
1580 | } | ||
1581 | |||
1582 | switch (msg0) { | ||
1583 | case EXTENDED_MESSAGE: | ||
1584 | if (len == 1) | ||
1585 | return 1; | ||
1586 | if (len < esp->msg_in[1] + 2) | ||
1587 | return 1; | ||
1588 | esp_msgin_extended(esp); | ||
1589 | return 0; | ||
1590 | |||
1591 | case IGNORE_WIDE_RESIDUE: { | ||
1592 | struct esp_cmd_entry *ent; | ||
1593 | struct esp_cmd_priv *spriv; | ||
1594 | if (len == 1) | ||
1595 | return 1; | ||
1596 | |||
1597 | if (esp->msg_in[1] != 1) | ||
1598 | goto do_reject; | ||
1599 | |||
1600 | ent = esp->active_cmd; | ||
1601 | spriv = ESP_CMD_PRIV(ent->cmd); | ||
1602 | |||
1603 | if (spriv->cur_residue == sg_dma_len(spriv->cur_sg)) { | ||
1604 | spriv->cur_sg--; | ||
1605 | spriv->cur_residue = 1; | ||
1606 | } else | ||
1607 | spriv->cur_residue++; | ||
1608 | spriv->tot_residue++; | ||
1609 | return 0; | ||
1610 | } | ||
1611 | case NOP: | ||
1612 | return 0; | ||
1613 | case RESTORE_POINTERS: | ||
1614 | esp_restore_pointers(esp, esp->active_cmd); | ||
1615 | return 0; | ||
1616 | case SAVE_POINTERS: | ||
1617 | esp_save_pointers(esp, esp->active_cmd); | ||
1618 | return 0; | ||
1619 | |||
1620 | case COMMAND_COMPLETE: | ||
1621 | case DISCONNECT: { | ||
1622 | struct esp_cmd_entry *ent = esp->active_cmd; | ||
1623 | |||
1624 | ent->message = msg0; | ||
1625 | esp_event(esp, ESP_EVENT_FREE_BUS); | ||
1626 | esp->flags |= ESP_FLAG_QUICKIRQ_CHECK; | ||
1627 | return 0; | ||
1628 | } | ||
1629 | case MESSAGE_REJECT: | ||
1630 | esp_msgin_reject(esp); | ||
1631 | return 0; | ||
1632 | |||
1633 | default: | ||
1634 | do_reject: | ||
1635 | esp->msg_out[0] = MESSAGE_REJECT; | ||
1636 | esp->msg_out_len = 1; | ||
1637 | scsi_esp_cmd(esp, ESP_CMD_SATN); | ||
1638 | return 0; | ||
1639 | } | ||
1640 | } | ||
1641 | |||
1642 | static int esp_process_event(struct esp *esp) | ||
1643 | { | ||
1644 | int write; | ||
1645 | |||
1646 | again: | ||
1647 | write = 0; | ||
1648 | switch (esp->event) { | ||
1649 | case ESP_EVENT_CHECK_PHASE: | ||
1650 | switch (esp->sreg & ESP_STAT_PMASK) { | ||
1651 | case ESP_DOP: | ||
1652 | esp_event(esp, ESP_EVENT_DATA_OUT); | ||
1653 | break; | ||
1654 | case ESP_DIP: | ||
1655 | esp_event(esp, ESP_EVENT_DATA_IN); | ||
1656 | break; | ||
1657 | case ESP_STATP: | ||
1658 | esp_flush_fifo(esp); | ||
1659 | scsi_esp_cmd(esp, ESP_CMD_ICCSEQ); | ||
1660 | esp_event(esp, ESP_EVENT_STATUS); | ||
1661 | esp->flags |= ESP_FLAG_QUICKIRQ_CHECK; | ||
1662 | return 1; | ||
1663 | |||
1664 | case ESP_MOP: | ||
1665 | esp_event(esp, ESP_EVENT_MSGOUT); | ||
1666 | break; | ||
1667 | |||
1668 | case ESP_MIP: | ||
1669 | esp_event(esp, ESP_EVENT_MSGIN); | ||
1670 | break; | ||
1671 | |||
1672 | case ESP_CMDP: | ||
1673 | esp_event(esp, ESP_EVENT_CMD_START); | ||
1674 | break; | ||
1675 | |||
1676 | default: | ||
1677 | printk("ESP: Unexpected phase, sreg=%02x\n", | ||
1678 | esp->sreg); | ||
1679 | esp_schedule_reset(esp); | ||
1680 | return 0; | ||
1681 | } | ||
1682 | goto again; | ||
1683 | break; | ||
1684 | |||
1685 | case ESP_EVENT_DATA_IN: | ||
1686 | write = 1; | ||
1687 | /* fallthru */ | ||
1688 | |||
1689 | case ESP_EVENT_DATA_OUT: { | ||
1690 | struct esp_cmd_entry *ent = esp->active_cmd; | ||
1691 | struct scsi_cmnd *cmd = ent->cmd; | ||
1692 | dma_addr_t dma_addr = esp_cur_dma_addr(ent, cmd); | ||
1693 | unsigned int dma_len = esp_cur_dma_len(ent, cmd); | ||
1694 | |||
1695 | if (esp->rev == ESP100) | ||
1696 | scsi_esp_cmd(esp, ESP_CMD_NULL); | ||
1697 | |||
1698 | if (write) | ||
1699 | ent->flags |= ESP_CMD_FLAG_WRITE; | ||
1700 | else | ||
1701 | ent->flags &= ~ESP_CMD_FLAG_WRITE; | ||
1702 | |||
1703 | dma_len = esp_dma_length_limit(esp, dma_addr, dma_len); | ||
1704 | esp->data_dma_len = dma_len; | ||
1705 | |||
1706 | if (!dma_len) { | ||
1707 | printk(KERN_ERR PFX "esp%d: DMA length is zero!\n", | ||
1708 | esp->host->unique_id); | ||
1709 | printk(KERN_ERR PFX "esp%d: cur adr[%08x] len[%08x]\n", | ||
1710 | esp->host->unique_id, | ||
1711 | esp_cur_dma_addr(ent, cmd), | ||
1712 | esp_cur_dma_len(ent, cmd)); | ||
1713 | esp_schedule_reset(esp); | ||
1714 | return 0; | ||
1715 | } | ||
1716 | |||
1717 | esp_log_datastart("ESP: start data addr[%08x] len[%u] " | ||
1718 | "write(%d)\n", | ||
1719 | dma_addr, dma_len, write); | ||
1720 | |||
1721 | esp->ops->send_dma_cmd(esp, dma_addr, dma_len, dma_len, | ||
1722 | write, ESP_CMD_DMA | ESP_CMD_TI); | ||
1723 | esp_event(esp, ESP_EVENT_DATA_DONE); | ||
1724 | break; | ||
1725 | } | ||
1726 | case ESP_EVENT_DATA_DONE: { | ||
1727 | struct esp_cmd_entry *ent = esp->active_cmd; | ||
1728 | struct scsi_cmnd *cmd = ent->cmd; | ||
1729 | int bytes_sent; | ||
1730 | |||
1731 | if (esp->ops->dma_error(esp)) { | ||
1732 | printk("ESP: data done, DMA error, resetting\n"); | ||
1733 | esp_schedule_reset(esp); | ||
1734 | return 0; | ||
1735 | } | ||
1736 | |||
1737 | if (ent->flags & ESP_CMD_FLAG_WRITE) { | ||
1738 | /* XXX parity errors, etc. XXX */ | ||
1739 | |||
1740 | esp->ops->dma_drain(esp); | ||
1741 | } | ||
1742 | esp->ops->dma_invalidate(esp); | ||
1743 | |||
1744 | if (esp->ireg != ESP_INTR_BSERV) { | ||
1745 | /* We should always see exactly a bus-service | ||
1746 | * interrupt at the end of a successful transfer. | ||
1747 | */ | ||
1748 | printk("ESP: data done, not BSERV, resetting\n"); | ||
1749 | esp_schedule_reset(esp); | ||
1750 | return 0; | ||
1751 | } | ||
1752 | |||
1753 | bytes_sent = esp_data_bytes_sent(esp, ent, cmd); | ||
1754 | |||
1755 | esp_log_datadone("ESP: data done flgs[%x] sent[%d]\n", | ||
1756 | ent->flags, bytes_sent); | ||
1757 | |||
1758 | if (bytes_sent < 0) { | ||
1759 | /* XXX force sync mode for this target XXX */ | ||
1760 | esp_schedule_reset(esp); | ||
1761 | return 0; | ||
1762 | } | ||
1763 | |||
1764 | esp_advance_dma(esp, ent, cmd, bytes_sent); | ||
1765 | esp_event(esp, ESP_EVENT_CHECK_PHASE); | ||
1766 | goto again; | ||
1767 | break; | ||
1768 | } | ||
1769 | |||
1770 | case ESP_EVENT_STATUS: { | ||
1771 | struct esp_cmd_entry *ent = esp->active_cmd; | ||
1772 | |||
1773 | if (esp->ireg & ESP_INTR_FDONE) { | ||
1774 | ent->status = esp_read8(ESP_FDATA); | ||
1775 | ent->message = esp_read8(ESP_FDATA); | ||
1776 | scsi_esp_cmd(esp, ESP_CMD_MOK); | ||
1777 | } else if (esp->ireg == ESP_INTR_BSERV) { | ||
1778 | ent->status = esp_read8(ESP_FDATA); | ||
1779 | ent->message = 0xff; | ||
1780 | esp_event(esp, ESP_EVENT_MSGIN); | ||
1781 | return 0; | ||
1782 | } | ||
1783 | |||
1784 | if (ent->message != COMMAND_COMPLETE) { | ||
1785 | printk("ESP: Unexpected message %x in status\n", | ||
1786 | ent->message); | ||
1787 | esp_schedule_reset(esp); | ||
1788 | return 0; | ||
1789 | } | ||
1790 | |||
1791 | esp_event(esp, ESP_EVENT_FREE_BUS); | ||
1792 | esp->flags |= ESP_FLAG_QUICKIRQ_CHECK; | ||
1793 | break; | ||
1794 | } | ||
1795 | case ESP_EVENT_FREE_BUS: { | ||
1796 | struct esp_cmd_entry *ent = esp->active_cmd; | ||
1797 | struct scsi_cmnd *cmd = ent->cmd; | ||
1798 | |||
1799 | if (ent->message == COMMAND_COMPLETE || | ||
1800 | ent->message == DISCONNECT) | ||
1801 | scsi_esp_cmd(esp, ESP_CMD_ESEL); | ||
1802 | |||
1803 | if (ent->message == COMMAND_COMPLETE) { | ||
1804 | esp_log_cmddone("ESP: Command done status[%x] " | ||
1805 | "message[%x]\n", | ||
1806 | ent->status, ent->message); | ||
1807 | if (ent->status == SAM_STAT_TASK_SET_FULL) | ||
1808 | esp_event_queue_full(esp, ent); | ||
1809 | |||
1810 | if (ent->status == SAM_STAT_CHECK_CONDITION && | ||
1811 | !(ent->flags & ESP_CMD_FLAG_AUTOSENSE)) { | ||
1812 | ent->flags |= ESP_CMD_FLAG_AUTOSENSE; | ||
1813 | esp_autosense(esp, ent); | ||
1814 | } else { | ||
1815 | esp_cmd_is_done(esp, ent, cmd, | ||
1816 | compose_result(ent->status, | ||
1817 | ent->message, | ||
1818 | DID_OK)); | ||
1819 | } | ||
1820 | } else if (ent->message == DISCONNECT) { | ||
1821 | esp_log_disconnect("ESP: Disconnecting tgt[%d] " | ||
1822 | "tag[%x:%x]\n", | ||
1823 | cmd->device->id, | ||
1824 | ent->tag[0], ent->tag[1]); | ||
1825 | |||
1826 | esp->active_cmd = NULL; | ||
1827 | esp_maybe_execute_command(esp); | ||
1828 | } else { | ||
1829 | printk("ESP: Unexpected message %x in freebus\n", | ||
1830 | ent->message); | ||
1831 | esp_schedule_reset(esp); | ||
1832 | return 0; | ||
1833 | } | ||
1834 | if (esp->active_cmd) | ||
1835 | esp->flags |= ESP_FLAG_QUICKIRQ_CHECK; | ||
1836 | break; | ||
1837 | } | ||
1838 | case ESP_EVENT_MSGOUT: { | ||
1839 | scsi_esp_cmd(esp, ESP_CMD_FLUSH); | ||
1840 | |||
1841 | if (esp_debug & ESP_DEBUG_MSGOUT) { | ||
1842 | int i; | ||
1843 | printk("ESP: Sending message [ "); | ||
1844 | for (i = 0; i < esp->msg_out_len; i++) | ||
1845 | printk("%02x ", esp->msg_out[i]); | ||
1846 | printk("]\n"); | ||
1847 | } | ||
1848 | |||
1849 | if (esp->rev == FASHME) { | ||
1850 | int i; | ||
1851 | |||
1852 | /* Always use the fifo. */ | ||
1853 | for (i = 0; i < esp->msg_out_len; i++) { | ||
1854 | esp_write8(esp->msg_out[i], ESP_FDATA); | ||
1855 | esp_write8(0, ESP_FDATA); | ||
1856 | } | ||
1857 | scsi_esp_cmd(esp, ESP_CMD_TI); | ||
1858 | } else { | ||
1859 | if (esp->msg_out_len == 1) { | ||
1860 | esp_write8(esp->msg_out[0], ESP_FDATA); | ||
1861 | scsi_esp_cmd(esp, ESP_CMD_TI); | ||
1862 | } else { | ||
1863 | /* Use DMA. */ | ||
1864 | memcpy(esp->command_block, | ||
1865 | esp->msg_out, | ||
1866 | esp->msg_out_len); | ||
1867 | |||
1868 | esp->ops->send_dma_cmd(esp, | ||
1869 | esp->command_block_dma, | ||
1870 | esp->msg_out_len, | ||
1871 | esp->msg_out_len, | ||
1872 | 0, | ||
1873 | ESP_CMD_DMA|ESP_CMD_TI); | ||
1874 | } | ||
1875 | } | ||
1876 | esp_event(esp, ESP_EVENT_MSGOUT_DONE); | ||
1877 | break; | ||
1878 | } | ||
1879 | case ESP_EVENT_MSGOUT_DONE: | ||
1880 | if (esp->rev == FASHME) { | ||
1881 | scsi_esp_cmd(esp, ESP_CMD_FLUSH); | ||
1882 | } else { | ||
1883 | if (esp->msg_out_len > 1) | ||
1884 | esp->ops->dma_invalidate(esp); | ||
1885 | } | ||
1886 | |||
1887 | if (!(esp->ireg & ESP_INTR_DC)) { | ||
1888 | if (esp->rev != FASHME) | ||
1889 | scsi_esp_cmd(esp, ESP_CMD_NULL); | ||
1890 | } | ||
1891 | esp_event(esp, ESP_EVENT_CHECK_PHASE); | ||
1892 | goto again; | ||
1893 | case ESP_EVENT_MSGIN: | ||
1894 | if (esp->ireg & ESP_INTR_BSERV) { | ||
1895 | if (esp->rev == FASHME) { | ||
1896 | if (!(esp_read8(ESP_STATUS2) & | ||
1897 | ESP_STAT2_FEMPTY)) | ||
1898 | scsi_esp_cmd(esp, ESP_CMD_FLUSH); | ||
1899 | } else { | ||
1900 | scsi_esp_cmd(esp, ESP_CMD_FLUSH); | ||
1901 | if (esp->rev == ESP100) | ||
1902 | scsi_esp_cmd(esp, ESP_CMD_NULL); | ||
1903 | } | ||
1904 | scsi_esp_cmd(esp, ESP_CMD_TI); | ||
1905 | esp->flags |= ESP_FLAG_QUICKIRQ_CHECK; | ||
1906 | return 1; | ||
1907 | } | ||
1908 | if (esp->ireg & ESP_INTR_FDONE) { | ||
1909 | u8 val; | ||
1910 | |||
1911 | if (esp->rev == FASHME) | ||
1912 | val = esp->fifo[0]; | ||
1913 | else | ||
1914 | val = esp_read8(ESP_FDATA); | ||
1915 | esp->msg_in[esp->msg_in_len++] = val; | ||
1916 | |||
1917 | esp_log_msgin("ESP: Got msgin byte %x\n", val); | ||
1918 | |||
1919 | if (!esp_msgin_process(esp)) | ||
1920 | esp->msg_in_len = 0; | ||
1921 | |||
1922 | if (esp->rev == FASHME) | ||
1923 | scsi_esp_cmd(esp, ESP_CMD_FLUSH); | ||
1924 | |||
1925 | scsi_esp_cmd(esp, ESP_CMD_MOK); | ||
1926 | |||
1927 | if (esp->event != ESP_EVENT_FREE_BUS) | ||
1928 | esp_event(esp, ESP_EVENT_CHECK_PHASE); | ||
1929 | } else { | ||
1930 | printk("ESP: MSGIN neither BSERV not FDON, resetting"); | ||
1931 | esp_schedule_reset(esp); | ||
1932 | return 0; | ||
1933 | } | ||
1934 | break; | ||
1935 | case ESP_EVENT_CMD_START: | ||
1936 | memcpy(esp->command_block, esp->cmd_bytes_ptr, | ||
1937 | esp->cmd_bytes_left); | ||
1938 | if (esp->rev == FASHME) | ||
1939 | scsi_esp_cmd(esp, ESP_CMD_FLUSH); | ||
1940 | esp->ops->send_dma_cmd(esp, esp->command_block_dma, | ||
1941 | esp->cmd_bytes_left, 16, 0, | ||
1942 | ESP_CMD_DMA | ESP_CMD_TI); | ||
1943 | esp_event(esp, ESP_EVENT_CMD_DONE); | ||
1944 | esp->flags |= ESP_FLAG_QUICKIRQ_CHECK; | ||
1945 | break; | ||
1946 | case ESP_EVENT_CMD_DONE: | ||
1947 | esp->ops->dma_invalidate(esp); | ||
1948 | if (esp->ireg & ESP_INTR_BSERV) { | ||
1949 | esp_event(esp, ESP_EVENT_CHECK_PHASE); | ||
1950 | goto again; | ||
1951 | } | ||
1952 | esp_schedule_reset(esp); | ||
1953 | return 0; | ||
1954 | break; | ||
1955 | |||
1956 | case ESP_EVENT_RESET: | ||
1957 | scsi_esp_cmd(esp, ESP_CMD_RS); | ||
1958 | break; | ||
1959 | |||
1960 | default: | ||
1961 | printk("ESP: Unexpected event %x, resetting\n", | ||
1962 | esp->event); | ||
1963 | esp_schedule_reset(esp); | ||
1964 | return 0; | ||
1965 | break; | ||
1966 | } | ||
1967 | return 1; | ||
1968 | } | ||
1969 | |||
1970 | static void esp_reset_cleanup_one(struct esp *esp, struct esp_cmd_entry *ent) | ||
1971 | { | ||
1972 | struct scsi_cmnd *cmd = ent->cmd; | ||
1973 | |||
1974 | esp_unmap_dma(esp, cmd); | ||
1975 | esp_free_lun_tag(ent, cmd->device->hostdata); | ||
1976 | cmd->result = DID_RESET << 16; | ||
1977 | |||
1978 | if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) { | ||
1979 | esp->ops->unmap_single(esp, ent->sense_dma, | ||
1980 | SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE); | ||
1981 | ent->sense_ptr = NULL; | ||
1982 | } | ||
1983 | |||
1984 | cmd->scsi_done(cmd); | ||
1985 | list_del(&ent->list); | ||
1986 | esp_put_ent(esp, ent); | ||
1987 | } | ||
1988 | |||
1989 | static void esp_clear_hold(struct scsi_device *dev, void *data) | ||
1990 | { | ||
1991 | struct esp_lun_data *lp = dev->hostdata; | ||
1992 | |||
1993 | BUG_ON(lp->num_tagged); | ||
1994 | lp->hold = 0; | ||
1995 | } | ||
1996 | |||
1997 | static void esp_reset_cleanup(struct esp *esp) | ||
1998 | { | ||
1999 | struct esp_cmd_entry *ent, *tmp; | ||
2000 | int i; | ||
2001 | |||
2002 | list_for_each_entry_safe(ent, tmp, &esp->queued_cmds, list) { | ||
2003 | struct scsi_cmnd *cmd = ent->cmd; | ||
2004 | |||
2005 | list_del(&ent->list); | ||
2006 | cmd->result = DID_RESET << 16; | ||
2007 | cmd->scsi_done(cmd); | ||
2008 | esp_put_ent(esp, ent); | ||
2009 | } | ||
2010 | |||
2011 | list_for_each_entry_safe(ent, tmp, &esp->active_cmds, list) { | ||
2012 | if (ent == esp->active_cmd) | ||
2013 | esp->active_cmd = NULL; | ||
2014 | esp_reset_cleanup_one(esp, ent); | ||
2015 | } | ||
2016 | |||
2017 | BUG_ON(esp->active_cmd != NULL); | ||
2018 | |||
2019 | /* Force renegotiation of sync/wide transfers. */ | ||
2020 | for (i = 0; i < ESP_MAX_TARGET; i++) { | ||
2021 | struct esp_target_data *tp = &esp->target[i]; | ||
2022 | |||
2023 | tp->esp_period = 0; | ||
2024 | tp->esp_offset = 0; | ||
2025 | tp->esp_config3 &= ~(ESP_CONFIG3_EWIDE | | ||
2026 | ESP_CONFIG3_FSCSI | | ||
2027 | ESP_CONFIG3_FAST); | ||
2028 | tp->flags &= ~ESP_TGT_WIDE; | ||
2029 | tp->flags |= ESP_TGT_CHECK_NEGO; | ||
2030 | |||
2031 | if (tp->starget) | ||
2032 | starget_for_each_device(tp->starget, NULL, | ||
2033 | esp_clear_hold); | ||
2034 | } | ||
2035 | } | ||
2036 | |||
2037 | /* Runs under host->lock */ | ||
2038 | static void __esp_interrupt(struct esp *esp) | ||
2039 | { | ||
2040 | int finish_reset, intr_done; | ||
2041 | u8 phase; | ||
2042 | |||
2043 | esp->sreg = esp_read8(ESP_STATUS); | ||
2044 | |||
2045 | if (esp->flags & ESP_FLAG_RESETTING) { | ||
2046 | finish_reset = 1; | ||
2047 | } else { | ||
2048 | if (esp_check_gross_error(esp)) | ||
2049 | return; | ||
2050 | |||
2051 | finish_reset = esp_check_spur_intr(esp); | ||
2052 | if (finish_reset < 0) | ||
2053 | return; | ||
2054 | } | ||
2055 | |||
2056 | esp->ireg = esp_read8(ESP_INTRPT); | ||
2057 | |||
2058 | if (esp->ireg & ESP_INTR_SR) | ||
2059 | finish_reset = 1; | ||
2060 | |||
2061 | if (finish_reset) { | ||
2062 | esp_reset_cleanup(esp); | ||
2063 | if (esp->eh_reset) { | ||
2064 | complete(esp->eh_reset); | ||
2065 | esp->eh_reset = NULL; | ||
2066 | } | ||
2067 | return; | ||
2068 | } | ||
2069 | |||
2070 | phase = (esp->sreg & ESP_STAT_PMASK); | ||
2071 | if (esp->rev == FASHME) { | ||
2072 | if (((phase != ESP_DIP && phase != ESP_DOP) && | ||
2073 | esp->select_state == ESP_SELECT_NONE && | ||
2074 | esp->event != ESP_EVENT_STATUS && | ||
2075 | esp->event != ESP_EVENT_DATA_DONE) || | ||
2076 | (esp->ireg & ESP_INTR_RSEL)) { | ||
2077 | esp->sreg2 = esp_read8(ESP_STATUS2); | ||
2078 | if (!(esp->sreg2 & ESP_STAT2_FEMPTY) || | ||
2079 | (esp->sreg2 & ESP_STAT2_F1BYTE)) | ||
2080 | hme_read_fifo(esp); | ||
2081 | } | ||
2082 | } | ||
2083 | |||
2084 | esp_log_intr("ESP: intr sreg[%02x] seqreg[%02x] " | ||
2085 | "sreg2[%02x] ireg[%02x]\n", | ||
2086 | esp->sreg, esp->seqreg, esp->sreg2, esp->ireg); | ||
2087 | |||
2088 | intr_done = 0; | ||
2089 | |||
2090 | if (esp->ireg & (ESP_INTR_S | ESP_INTR_SATN | ESP_INTR_IC)) { | ||
2091 | printk("ESP: unexpected IREG %02x\n", esp->ireg); | ||
2092 | if (esp->ireg & ESP_INTR_IC) | ||
2093 | esp_dump_cmd_log(esp); | ||
2094 | |||
2095 | esp_schedule_reset(esp); | ||
2096 | } else { | ||
2097 | if (!(esp->ireg & ESP_INTR_RSEL)) { | ||
2098 | /* Some combination of FDONE, BSERV, DC. */ | ||
2099 | if (esp->select_state != ESP_SELECT_NONE) | ||
2100 | intr_done = esp_finish_select(esp); | ||
2101 | } else if (esp->ireg & ESP_INTR_RSEL) { | ||
2102 | if (esp->active_cmd) | ||
2103 | (void) esp_finish_select(esp); | ||
2104 | intr_done = esp_reconnect(esp); | ||
2105 | } | ||
2106 | } | ||
2107 | while (!intr_done) | ||
2108 | intr_done = esp_process_event(esp); | ||
2109 | } | ||
2110 | |||
2111 | irqreturn_t scsi_esp_intr(int irq, void *dev_id) | ||
2112 | { | ||
2113 | struct esp *esp = dev_id; | ||
2114 | unsigned long flags; | ||
2115 | irqreturn_t ret; | ||
2116 | |||
2117 | spin_lock_irqsave(esp->host->host_lock, flags); | ||
2118 | ret = IRQ_NONE; | ||
2119 | if (esp->ops->irq_pending(esp)) { | ||
2120 | ret = IRQ_HANDLED; | ||
2121 | for (;;) { | ||
2122 | int i; | ||
2123 | |||
2124 | __esp_interrupt(esp); | ||
2125 | if (!(esp->flags & ESP_FLAG_QUICKIRQ_CHECK)) | ||
2126 | break; | ||
2127 | esp->flags &= ~ESP_FLAG_QUICKIRQ_CHECK; | ||
2128 | |||
2129 | for (i = 0; i < ESP_QUICKIRQ_LIMIT; i++) { | ||
2130 | if (esp->ops->irq_pending(esp)) | ||
2131 | break; | ||
2132 | } | ||
2133 | if (i == ESP_QUICKIRQ_LIMIT) | ||
2134 | break; | ||
2135 | } | ||
2136 | } | ||
2137 | spin_unlock_irqrestore(esp->host->host_lock, flags); | ||
2138 | |||
2139 | return ret; | ||
2140 | } | ||
2141 | EXPORT_SYMBOL(scsi_esp_intr); | ||
2142 | |||
2143 | static void __devinit esp_get_revision(struct esp *esp) | ||
2144 | { | ||
2145 | u8 val; | ||
2146 | |||
2147 | esp->config1 = (ESP_CONFIG1_PENABLE | (esp->scsi_id & 7)); | ||
2148 | esp->config2 = (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY); | ||
2149 | esp_write8(esp->config2, ESP_CFG2); | ||
2150 | |||
2151 | val = esp_read8(ESP_CFG2); | ||
2152 | val &= ~ESP_CONFIG2_MAGIC; | ||
2153 | if (val != (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY)) { | ||
2154 | /* If what we write to cfg2 does not come back, cfg2 is not | ||
2155 | * implemented, therefore this must be a plain esp100. | ||
2156 | */ | ||
2157 | esp->rev = ESP100; | ||
2158 | } else { | ||
2159 | esp->config2 = 0; | ||
2160 | esp_set_all_config3(esp, 5); | ||
2161 | esp->prev_cfg3 = 5; | ||
2162 | esp_write8(esp->config2, ESP_CFG2); | ||
2163 | esp_write8(0, ESP_CFG3); | ||
2164 | esp_write8(esp->prev_cfg3, ESP_CFG3); | ||
2165 | |||
2166 | val = esp_read8(ESP_CFG3); | ||
2167 | if (val != 5) { | ||
2168 | /* The cfg2 register is implemented, however | ||
2169 | * cfg3 is not, must be esp100a. | ||
2170 | */ | ||
2171 | esp->rev = ESP100A; | ||
2172 | } else { | ||
2173 | esp_set_all_config3(esp, 0); | ||
2174 | esp->prev_cfg3 = 0; | ||
2175 | esp_write8(esp->prev_cfg3, ESP_CFG3); | ||
2176 | |||
2177 | /* All of cfg{1,2,3} implemented, must be one of | ||
2178 | * the fas variants, figure out which one. | ||
2179 | */ | ||
2180 | if (esp->cfact == 0 || esp->cfact > ESP_CCF_F5) { | ||
2181 | esp->rev = FAST; | ||
2182 | esp->sync_defp = SYNC_DEFP_FAST; | ||
2183 | } else { | ||
2184 | esp->rev = ESP236; | ||
2185 | } | ||
2186 | esp->config2 = 0; | ||
2187 | esp_write8(esp->config2, ESP_CFG2); | ||
2188 | } | ||
2189 | } | ||
2190 | } | ||
2191 | |||
2192 | static void __devinit esp_init_swstate(struct esp *esp) | ||
2193 | { | ||
2194 | int i; | ||
2195 | |||
2196 | INIT_LIST_HEAD(&esp->queued_cmds); | ||
2197 | INIT_LIST_HEAD(&esp->active_cmds); | ||
2198 | INIT_LIST_HEAD(&esp->esp_cmd_pool); | ||
2199 | |||
2200 | /* Start with a clear state, domain validation (via ->slave_configure, | ||
2201 | * spi_dv_device()) will attempt to enable SYNC, WIDE, and tagged | ||
2202 | * commands. | ||
2203 | */ | ||
2204 | for (i = 0 ; i < ESP_MAX_TARGET; i++) { | ||
2205 | esp->target[i].flags = 0; | ||
2206 | esp->target[i].nego_goal_period = 0; | ||
2207 | esp->target[i].nego_goal_offset = 0; | ||
2208 | esp->target[i].nego_goal_width = 0; | ||
2209 | esp->target[i].nego_goal_tags = 0; | ||
2210 | } | ||
2211 | } | ||
2212 | |||
2213 | /* This places the ESP into a known state at boot time. */ | ||
2214 | static void __devinit esp_bootup_reset(struct esp *esp) | ||
2215 | { | ||
2216 | u8 val; | ||
2217 | |||
2218 | /* Reset the DMA */ | ||
2219 | esp->ops->reset_dma(esp); | ||
2220 | |||
2221 | /* Reset the ESP */ | ||
2222 | esp_reset_esp(esp); | ||
2223 | |||
2224 | /* Reset the SCSI bus, but tell ESP not to generate an irq */ | ||
2225 | val = esp_read8(ESP_CFG1); | ||
2226 | val |= ESP_CONFIG1_SRRDISAB; | ||
2227 | esp_write8(val, ESP_CFG1); | ||
2228 | |||
2229 | scsi_esp_cmd(esp, ESP_CMD_RS); | ||
2230 | udelay(400); | ||
2231 | |||
2232 | esp_write8(esp->config1, ESP_CFG1); | ||
2233 | |||
2234 | /* Eat any bitrot in the chip and we are done... */ | ||
2235 | esp_read8(ESP_INTRPT); | ||
2236 | } | ||
2237 | |||
2238 | static void __devinit esp_set_clock_params(struct esp *esp) | ||
2239 | { | ||
2240 | int fmhz; | ||
2241 | u8 ccf; | ||
2242 | |||
2243 | /* This is getting messy but it has to be done correctly or else | ||
2244 | * you get weird behavior all over the place. We are trying to | ||
2245 | * basically figure out three pieces of information. | ||
2246 | * | ||
2247 | * a) Clock Conversion Factor | ||
2248 | * | ||
2249 | * This is a representation of the input crystal clock frequency | ||
2250 | * going into the ESP on this machine. Any operation whose timing | ||
2251 | * is longer than 400ns depends on this value being correct. For | ||
2252 | * example, you'll get blips for arbitration/selection during high | ||
2253 | * load or with multiple targets if this is not set correctly. | ||
2254 | * | ||
2255 | * b) Selection Time-Out | ||
2256 | * | ||
2257 | * The ESP isn't very bright and will arbitrate for the bus and try | ||
2258 | * to select a target forever if you let it. This value tells the | ||
2259 | * ESP when it has taken too long to negotiate and that it should | ||
2260 | * interrupt the CPU so we can see what happened. The value is | ||
2261 | * computed as follows (from NCR/Symbios chip docs). | ||
2262 | * | ||
2263 | * (Time Out Period) * (Input Clock) | ||
2264 | * STO = ---------------------------------- | ||
2265 | * (8192) * (Clock Conversion Factor) | ||
2266 | * | ||
2267 | * We use a time out period of 250ms (ESP_BUS_TIMEOUT). | ||
2268 | * | ||
2269 | * c) Imperical constants for synchronous offset and transfer period | ||
2270 | * register values | ||
2271 | * | ||
2272 | * This entails the smallest and largest sync period we could ever | ||
2273 | * handle on this ESP. | ||
2274 | */ | ||
2275 | fmhz = esp->cfreq; | ||
2276 | |||
2277 | ccf = ((fmhz / 1000000) + 4) / 5; | ||
2278 | if (ccf == 1) | ||
2279 | ccf = 2; | ||
2280 | |||
2281 | /* If we can't find anything reasonable, just assume 20MHZ. | ||
2282 | * This is the clock frequency of the older sun4c's where I've | ||
2283 | * been unable to find the clock-frequency PROM property. All | ||
2284 | * other machines provide useful values it seems. | ||
2285 | */ | ||
2286 | if (fmhz <= 5000000 || ccf < 1 || ccf > 8) { | ||
2287 | fmhz = 20000000; | ||
2288 | ccf = 4; | ||
2289 | } | ||
2290 | |||
2291 | esp->cfact = (ccf == 8 ? 0 : ccf); | ||
2292 | esp->cfreq = fmhz; | ||
2293 | esp->ccycle = ESP_MHZ_TO_CYCLE(fmhz); | ||
2294 | esp->ctick = ESP_TICK(ccf, esp->ccycle); | ||
2295 | esp->neg_defp = ESP_NEG_DEFP(fmhz, ccf); | ||
2296 | esp->sync_defp = SYNC_DEFP_SLOW; | ||
2297 | } | ||
2298 | |||
2299 | static const char *esp_chip_names[] = { | ||
2300 | "ESP100", | ||
2301 | "ESP100A", | ||
2302 | "ESP236", | ||
2303 | "FAS236", | ||
2304 | "FAS100A", | ||
2305 | "FAST", | ||
2306 | "FASHME", | ||
2307 | }; | ||
2308 | |||
2309 | static struct scsi_transport_template *esp_transport_template; | ||
2310 | |||
2311 | int __devinit scsi_esp_register(struct esp *esp, struct device *dev) | ||
2312 | { | ||
2313 | static int instance; | ||
2314 | int err; | ||
2315 | |||
2316 | esp->host->transportt = esp_transport_template; | ||
2317 | esp->host->max_lun = ESP_MAX_LUN; | ||
2318 | esp->host->cmd_per_lun = 2; | ||
2319 | |||
2320 | esp_set_clock_params(esp); | ||
2321 | |||
2322 | esp_get_revision(esp); | ||
2323 | |||
2324 | esp_init_swstate(esp); | ||
2325 | |||
2326 | esp_bootup_reset(esp); | ||
2327 | |||
2328 | printk(KERN_INFO PFX "esp%u, regs[%1p:%1p] irq[%u]\n", | ||
2329 | esp->host->unique_id, esp->regs, esp->dma_regs, | ||
2330 | esp->host->irq); | ||
2331 | printk(KERN_INFO PFX "esp%u is a %s, %u MHz (ccf=%u), SCSI ID %u\n", | ||
2332 | esp->host->unique_id, esp_chip_names[esp->rev], | ||
2333 | esp->cfreq / 1000000, esp->cfact, esp->scsi_id); | ||
2334 | |||
2335 | /* Let the SCSI bus reset settle. */ | ||
2336 | ssleep(esp_bus_reset_settle); | ||
2337 | |||
2338 | err = scsi_add_host(esp->host, dev); | ||
2339 | if (err) | ||
2340 | return err; | ||
2341 | |||
2342 | esp->host->unique_id = instance++; | ||
2343 | |||
2344 | scsi_scan_host(esp->host); | ||
2345 | |||
2346 | return 0; | ||
2347 | } | ||
2348 | EXPORT_SYMBOL(scsi_esp_register); | ||
2349 | |||
2350 | void __devexit scsi_esp_unregister(struct esp *esp) | ||
2351 | { | ||
2352 | scsi_remove_host(esp->host); | ||
2353 | } | ||
2354 | EXPORT_SYMBOL(scsi_esp_unregister); | ||
2355 | |||
2356 | static int esp_slave_alloc(struct scsi_device *dev) | ||
2357 | { | ||
2358 | struct esp *esp = host_to_esp(dev->host); | ||
2359 | struct esp_target_data *tp = &esp->target[dev->id]; | ||
2360 | struct esp_lun_data *lp; | ||
2361 | |||
2362 | lp = kzalloc(sizeof(*lp), GFP_KERNEL); | ||
2363 | if (!lp) | ||
2364 | return -ENOMEM; | ||
2365 | dev->hostdata = lp; | ||
2366 | |||
2367 | tp->starget = dev->sdev_target; | ||
2368 | |||
2369 | spi_min_period(tp->starget) = esp->min_period; | ||
2370 | spi_max_offset(tp->starget) = 15; | ||
2371 | |||
2372 | if (esp->flags & ESP_FLAG_WIDE_CAPABLE) | ||
2373 | spi_max_width(tp->starget) = 1; | ||
2374 | else | ||
2375 | spi_max_width(tp->starget) = 0; | ||
2376 | |||
2377 | return 0; | ||
2378 | } | ||
2379 | |||
2380 | static int esp_slave_configure(struct scsi_device *dev) | ||
2381 | { | ||
2382 | struct esp *esp = host_to_esp(dev->host); | ||
2383 | struct esp_target_data *tp = &esp->target[dev->id]; | ||
2384 | int goal_tags, queue_depth; | ||
2385 | |||
2386 | goal_tags = 0; | ||
2387 | |||
2388 | if (dev->tagged_supported) { | ||
2389 | /* XXX make this configurable somehow XXX */ | ||
2390 | goal_tags = ESP_DEFAULT_TAGS; | ||
2391 | |||
2392 | if (goal_tags > ESP_MAX_TAG) | ||
2393 | goal_tags = ESP_MAX_TAG; | ||
2394 | } | ||
2395 | |||
2396 | queue_depth = goal_tags; | ||
2397 | if (queue_depth < dev->host->cmd_per_lun) | ||
2398 | queue_depth = dev->host->cmd_per_lun; | ||
2399 | |||
2400 | if (goal_tags) { | ||
2401 | scsi_set_tag_type(dev, MSG_ORDERED_TAG); | ||
2402 | scsi_activate_tcq(dev, queue_depth); | ||
2403 | } else { | ||
2404 | scsi_deactivate_tcq(dev, queue_depth); | ||
2405 | } | ||
2406 | tp->flags |= ESP_TGT_DISCONNECT; | ||
2407 | |||
2408 | if (!spi_initial_dv(dev->sdev_target)) | ||
2409 | spi_dv_device(dev); | ||
2410 | |||
2411 | return 0; | ||
2412 | } | ||
2413 | |||
2414 | static void esp_slave_destroy(struct scsi_device *dev) | ||
2415 | { | ||
2416 | struct esp_lun_data *lp = dev->hostdata; | ||
2417 | |||
2418 | kfree(lp); | ||
2419 | dev->hostdata = NULL; | ||
2420 | } | ||
2421 | |||
2422 | static int esp_eh_abort_handler(struct scsi_cmnd *cmd) | ||
2423 | { | ||
2424 | struct esp *esp = host_to_esp(cmd->device->host); | ||
2425 | struct esp_cmd_entry *ent, *tmp; | ||
2426 | struct completion eh_done; | ||
2427 | unsigned long flags; | ||
2428 | |||
2429 | /* XXX This helps a lot with debugging but might be a bit | ||
2430 | * XXX much for the final driver. | ||
2431 | */ | ||
2432 | spin_lock_irqsave(esp->host->host_lock, flags); | ||
2433 | printk(KERN_ERR PFX "esp%d: Aborting command [%p:%02x]\n", | ||
2434 | esp->host->unique_id, cmd, cmd->cmnd[0]); | ||
2435 | ent = esp->active_cmd; | ||
2436 | if (ent) | ||
2437 | printk(KERN_ERR PFX "esp%d: Current command [%p:%02x]\n", | ||
2438 | esp->host->unique_id, ent->cmd, ent->cmd->cmnd[0]); | ||
2439 | list_for_each_entry(ent, &esp->queued_cmds, list) { | ||
2440 | printk(KERN_ERR PFX "esp%d: Queued command [%p:%02x]\n", | ||
2441 | esp->host->unique_id, ent->cmd, ent->cmd->cmnd[0]); | ||
2442 | } | ||
2443 | list_for_each_entry(ent, &esp->active_cmds, list) { | ||
2444 | printk(KERN_ERR PFX "esp%d: Active command [%p:%02x]\n", | ||
2445 | esp->host->unique_id, ent->cmd, ent->cmd->cmnd[0]); | ||
2446 | } | ||
2447 | esp_dump_cmd_log(esp); | ||
2448 | spin_unlock_irqrestore(esp->host->host_lock, flags); | ||
2449 | |||
2450 | spin_lock_irqsave(esp->host->host_lock, flags); | ||
2451 | |||
2452 | ent = NULL; | ||
2453 | list_for_each_entry(tmp, &esp->queued_cmds, list) { | ||
2454 | if (tmp->cmd == cmd) { | ||
2455 | ent = tmp; | ||
2456 | break; | ||
2457 | } | ||
2458 | } | ||
2459 | |||
2460 | if (ent) { | ||
2461 | /* Easiest case, we didn't even issue the command | ||
2462 | * yet so it is trivial to abort. | ||
2463 | */ | ||
2464 | list_del(&ent->list); | ||
2465 | |||
2466 | cmd->result = DID_ABORT << 16; | ||
2467 | cmd->scsi_done(cmd); | ||
2468 | |||
2469 | esp_put_ent(esp, ent); | ||
2470 | |||
2471 | goto out_success; | ||
2472 | } | ||
2473 | |||
2474 | init_completion(&eh_done); | ||
2475 | |||
2476 | ent = esp->active_cmd; | ||
2477 | if (ent && ent->cmd == cmd) { | ||
2478 | /* Command is the currently active command on | ||
2479 | * the bus. If we already have an output message | ||
2480 | * pending, no dice. | ||
2481 | */ | ||
2482 | if (esp->msg_out_len) | ||
2483 | goto out_failure; | ||
2484 | |||
2485 | /* Send out an abort, encouraging the target to | ||
2486 | * go to MSGOUT phase by asserting ATN. | ||
2487 | */ | ||
2488 | esp->msg_out[0] = ABORT_TASK_SET; | ||
2489 | esp->msg_out_len = 1; | ||
2490 | ent->eh_done = &eh_done; | ||
2491 | |||
2492 | scsi_esp_cmd(esp, ESP_CMD_SATN); | ||
2493 | } else { | ||
2494 | /* The command is disconnected. This is not easy to | ||
2495 | * abort. For now we fail and let the scsi error | ||
2496 | * handling layer go try a scsi bus reset or host | ||
2497 | * reset. | ||
2498 | * | ||
2499 | * What we could do is put together a scsi command | ||
2500 | * solely for the purpose of sending an abort message | ||
2501 | * to the target. Coming up with all the code to | ||
2502 | * cook up scsi commands, special case them everywhere, | ||
2503 | * etc. is for questionable gain and it would be better | ||
2504 | * if the generic scsi error handling layer could do at | ||
2505 | * least some of that for us. | ||
2506 | * | ||
2507 | * Anyways this is an area for potential future improvement | ||
2508 | * in this driver. | ||
2509 | */ | ||
2510 | goto out_failure; | ||
2511 | } | ||
2512 | |||
2513 | spin_unlock_irqrestore(esp->host->host_lock, flags); | ||
2514 | |||
2515 | if (!wait_for_completion_timeout(&eh_done, 5 * HZ)) { | ||
2516 | spin_lock_irqsave(esp->host->host_lock, flags); | ||
2517 | ent->eh_done = NULL; | ||
2518 | spin_unlock_irqrestore(esp->host->host_lock, flags); | ||
2519 | |||
2520 | return FAILED; | ||
2521 | } | ||
2522 | |||
2523 | return SUCCESS; | ||
2524 | |||
2525 | out_success: | ||
2526 | spin_unlock_irqrestore(esp->host->host_lock, flags); | ||
2527 | return SUCCESS; | ||
2528 | |||
2529 | out_failure: | ||
2530 | /* XXX This might be a good location to set ESP_TGT_BROKEN | ||
2531 | * XXX since we know which target/lun in particular is | ||
2532 | * XXX causing trouble. | ||
2533 | */ | ||
2534 | spin_unlock_irqrestore(esp->host->host_lock, flags); | ||
2535 | return FAILED; | ||
2536 | } | ||
2537 | |||
2538 | static int esp_eh_bus_reset_handler(struct scsi_cmnd *cmd) | ||
2539 | { | ||
2540 | struct esp *esp = host_to_esp(cmd->device->host); | ||
2541 | struct completion eh_reset; | ||
2542 | unsigned long flags; | ||
2543 | |||
2544 | init_completion(&eh_reset); | ||
2545 | |||
2546 | spin_lock_irqsave(esp->host->host_lock, flags); | ||
2547 | |||
2548 | esp->eh_reset = &eh_reset; | ||
2549 | |||
2550 | /* XXX This is too simple... We should add lots of | ||
2551 | * XXX checks here so that if we find that the chip is | ||
2552 | * XXX very wedged we return failure immediately so | ||
2553 | * XXX that we can perform a full chip reset. | ||
2554 | */ | ||
2555 | esp->flags |= ESP_FLAG_RESETTING; | ||
2556 | scsi_esp_cmd(esp, ESP_CMD_RS); | ||
2557 | |||
2558 | spin_unlock_irqrestore(esp->host->host_lock, flags); | ||
2559 | |||
2560 | ssleep(esp_bus_reset_settle); | ||
2561 | |||
2562 | if (!wait_for_completion_timeout(&eh_reset, 5 * HZ)) { | ||
2563 | spin_lock_irqsave(esp->host->host_lock, flags); | ||
2564 | esp->eh_reset = NULL; | ||
2565 | spin_unlock_irqrestore(esp->host->host_lock, flags); | ||
2566 | |||
2567 | return FAILED; | ||
2568 | } | ||
2569 | |||
2570 | return SUCCESS; | ||
2571 | } | ||
2572 | |||
2573 | /* All bets are off, reset the entire device. */ | ||
2574 | static int esp_eh_host_reset_handler(struct scsi_cmnd *cmd) | ||
2575 | { | ||
2576 | struct esp *esp = host_to_esp(cmd->device->host); | ||
2577 | unsigned long flags; | ||
2578 | |||
2579 | spin_lock_irqsave(esp->host->host_lock, flags); | ||
2580 | esp_bootup_reset(esp); | ||
2581 | esp_reset_cleanup(esp); | ||
2582 | spin_unlock_irqrestore(esp->host->host_lock, flags); | ||
2583 | |||
2584 | ssleep(esp_bus_reset_settle); | ||
2585 | |||
2586 | return SUCCESS; | ||
2587 | } | ||
2588 | |||
2589 | static const char *esp_info(struct Scsi_Host *host) | ||
2590 | { | ||
2591 | return "esp"; | ||
2592 | } | ||
2593 | |||
2594 | struct scsi_host_template scsi_esp_template = { | ||
2595 | .module = THIS_MODULE, | ||
2596 | .name = "esp", | ||
2597 | .info = esp_info, | ||
2598 | .queuecommand = esp_queuecommand, | ||
2599 | .slave_alloc = esp_slave_alloc, | ||
2600 | .slave_configure = esp_slave_configure, | ||
2601 | .slave_destroy = esp_slave_destroy, | ||
2602 | .eh_abort_handler = esp_eh_abort_handler, | ||
2603 | .eh_bus_reset_handler = esp_eh_bus_reset_handler, | ||
2604 | .eh_host_reset_handler = esp_eh_host_reset_handler, | ||
2605 | .can_queue = 7, | ||
2606 | .this_id = 7, | ||
2607 | .sg_tablesize = SG_ALL, | ||
2608 | .use_clustering = ENABLE_CLUSTERING, | ||
2609 | .max_sectors = 0xffff, | ||
2610 | .skip_settle_delay = 1, | ||
2611 | }; | ||
2612 | EXPORT_SYMBOL(scsi_esp_template); | ||
2613 | |||
2614 | static void esp_get_signalling(struct Scsi_Host *host) | ||
2615 | { | ||
2616 | struct esp *esp = host_to_esp(host); | ||
2617 | enum spi_signal_type type; | ||
2618 | |||
2619 | if (esp->flags & ESP_FLAG_DIFFERENTIAL) | ||
2620 | type = SPI_SIGNAL_HVD; | ||
2621 | else | ||
2622 | type = SPI_SIGNAL_SE; | ||
2623 | |||
2624 | spi_signalling(host) = type; | ||
2625 | } | ||
2626 | |||
2627 | static void esp_set_offset(struct scsi_target *target, int offset) | ||
2628 | { | ||
2629 | struct Scsi_Host *host = dev_to_shost(target->dev.parent); | ||
2630 | struct esp *esp = host_to_esp(host); | ||
2631 | struct esp_target_data *tp = &esp->target[target->id]; | ||
2632 | |||
2633 | tp->nego_goal_offset = offset; | ||
2634 | tp->flags |= ESP_TGT_CHECK_NEGO; | ||
2635 | } | ||
2636 | |||
2637 | static void esp_set_period(struct scsi_target *target, int period) | ||
2638 | { | ||
2639 | struct Scsi_Host *host = dev_to_shost(target->dev.parent); | ||
2640 | struct esp *esp = host_to_esp(host); | ||
2641 | struct esp_target_data *tp = &esp->target[target->id]; | ||
2642 | |||
2643 | tp->nego_goal_period = period; | ||
2644 | tp->flags |= ESP_TGT_CHECK_NEGO; | ||
2645 | } | ||
2646 | |||
2647 | static void esp_set_width(struct scsi_target *target, int width) | ||
2648 | { | ||
2649 | struct Scsi_Host *host = dev_to_shost(target->dev.parent); | ||
2650 | struct esp *esp = host_to_esp(host); | ||
2651 | struct esp_target_data *tp = &esp->target[target->id]; | ||
2652 | |||
2653 | tp->nego_goal_width = (width ? 1 : 0); | ||
2654 | tp->flags |= ESP_TGT_CHECK_NEGO; | ||
2655 | } | ||
2656 | |||
2657 | static struct spi_function_template esp_transport_ops = { | ||
2658 | .set_offset = esp_set_offset, | ||
2659 | .show_offset = 1, | ||
2660 | .set_period = esp_set_period, | ||
2661 | .show_period = 1, | ||
2662 | .set_width = esp_set_width, | ||
2663 | .show_width = 1, | ||
2664 | .get_signalling = esp_get_signalling, | ||
2665 | }; | ||
2666 | |||
2667 | static int __init esp_init(void) | ||
2668 | { | ||
2669 | BUILD_BUG_ON(sizeof(struct scsi_pointer) < | ||
2670 | sizeof(struct esp_cmd_priv)); | ||
2671 | |||
2672 | esp_transport_template = spi_attach_transport(&esp_transport_ops); | ||
2673 | if (!esp_transport_template) | ||
2674 | return -ENODEV; | ||
2675 | |||
2676 | return 0; | ||
2677 | } | ||
2678 | |||
2679 | static void __exit esp_exit(void) | ||
2680 | { | ||
2681 | spi_release_transport(esp_transport_template); | ||
2682 | } | ||
2683 | |||
2684 | MODULE_DESCRIPTION("ESP SCSI driver core"); | ||
2685 | MODULE_AUTHOR("David S. Miller (davem@davemloft.net)"); | ||
2686 | MODULE_LICENSE("GPL"); | ||
2687 | MODULE_VERSION(DRV_VERSION); | ||
2688 | |||
2689 | module_param(esp_bus_reset_settle, int, 0); | ||
2690 | MODULE_PARM_DESC(esp_bus_reset_settle, | ||
2691 | "ESP scsi bus reset delay in seconds"); | ||
2692 | |||
2693 | module_param(esp_debug, int, 0); | ||
2694 | MODULE_PARM_DESC(esp_debug, | ||
2695 | "ESP bitmapped debugging message enable value:\n" | ||
2696 | " 0x00000001 Log interrupt events\n" | ||
2697 | " 0x00000002 Log scsi commands\n" | ||
2698 | " 0x00000004 Log resets\n" | ||
2699 | " 0x00000008 Log message in events\n" | ||
2700 | " 0x00000010 Log message out events\n" | ||
2701 | " 0x00000020 Log command completion\n" | ||
2702 | " 0x00000040 Log disconnects\n" | ||
2703 | " 0x00000080 Log data start\n" | ||
2704 | " 0x00000100 Log data done\n" | ||
2705 | " 0x00000200 Log reconnects\n" | ||
2706 | " 0x00000400 Log auto-sense data\n" | ||
2707 | ); | ||
2708 | |||
2709 | module_init(esp_init); | ||
2710 | module_exit(esp_exit); | ||
diff --git a/drivers/scsi/esp_scsi.h b/drivers/scsi/esp_scsi.h new file mode 100644 index 000000000000..8d4a6690401f --- /dev/null +++ b/drivers/scsi/esp_scsi.h | |||
@@ -0,0 +1,560 @@ | |||
1 | /* esp_scsi.h: Defines and structures for the ESP drier. | ||
2 | * | ||
3 | * Copyright (C) 2007 David S. Miller (davem@davemloft.net) | ||
4 | */ | ||
5 | |||
6 | #ifndef _ESP_SCSI_H | ||
7 | #define _ESP_SCSI_H | ||
8 | |||
9 | /* Access Description Offset */ | ||
10 | #define ESP_TCLOW 0x00UL /* rw Low bits transfer count 0x00 */ | ||
11 | #define ESP_TCMED 0x01UL /* rw Mid bits transfer count 0x04 */ | ||
12 | #define ESP_FDATA 0x02UL /* rw FIFO data bits 0x08 */ | ||
13 | #define ESP_CMD 0x03UL /* rw SCSI command bits 0x0c */ | ||
14 | #define ESP_STATUS 0x04UL /* ro ESP status register 0x10 */ | ||
15 | #define ESP_BUSID ESP_STATUS /* wo BusID for sel/resel 0x10 */ | ||
16 | #define ESP_INTRPT 0x05UL /* ro Kind of interrupt 0x14 */ | ||
17 | #define ESP_TIMEO ESP_INTRPT /* wo Timeout for sel/resel 0x14 */ | ||
18 | #define ESP_SSTEP 0x06UL /* ro Sequence step register 0x18 */ | ||
19 | #define ESP_STP ESP_SSTEP /* wo Transfer period/sync 0x18 */ | ||
20 | #define ESP_FFLAGS 0x07UL /* ro Bits current FIFO info 0x1c */ | ||
21 | #define ESP_SOFF ESP_FFLAGS /* wo Sync offset 0x1c */ | ||
22 | #define ESP_CFG1 0x08UL /* rw First cfg register 0x20 */ | ||
23 | #define ESP_CFACT 0x09UL /* wo Clock conv factor 0x24 */ | ||
24 | #define ESP_STATUS2 ESP_CFACT /* ro HME status2 register 0x24 */ | ||
25 | #define ESP_CTEST 0x0aUL /* wo Chip test register 0x28 */ | ||
26 | #define ESP_CFG2 0x0bUL /* rw Second cfg register 0x2c */ | ||
27 | #define ESP_CFG3 0x0cUL /* rw Third cfg register 0x30 */ | ||
28 | #define ESP_TCHI 0x0eUL /* rw High bits transf count 0x38 */ | ||
29 | #define ESP_UID ESP_TCHI /* ro Unique ID code 0x38 */ | ||
30 | #define FAS_RLO ESP_TCHI /* rw HME extended counter 0x38 */ | ||
31 | #define ESP_FGRND 0x0fUL /* rw Data base for fifo 0x3c */ | ||
32 | #define FAS_RHI ESP_FGRND /* rw HME extended counter 0x3c */ | ||
33 | |||
34 | #define SBUS_ESP_REG_SIZE 0x40UL | ||
35 | |||
36 | /* Bitfield meanings for the above registers. */ | ||
37 | |||
38 | /* ESP config reg 1, read-write, found on all ESP chips */ | ||
39 | #define ESP_CONFIG1_ID 0x07 /* My BUS ID bits */ | ||
40 | #define ESP_CONFIG1_CHTEST 0x08 /* Enable ESP chip tests */ | ||
41 | #define ESP_CONFIG1_PENABLE 0x10 /* Enable parity checks */ | ||
42 | #define ESP_CONFIG1_PARTEST 0x20 /* Parity test mode enabled? */ | ||
43 | #define ESP_CONFIG1_SRRDISAB 0x40 /* Disable SCSI reset reports */ | ||
44 | #define ESP_CONFIG1_SLCABLE 0x80 /* Enable slow cable mode */ | ||
45 | |||
46 | /* ESP config reg 2, read-write, found only on esp100a+esp200+esp236 chips */ | ||
47 | #define ESP_CONFIG2_DMAPARITY 0x01 /* enable DMA Parity (200,236) */ | ||
48 | #define ESP_CONFIG2_REGPARITY 0x02 /* enable reg Parity (200,236) */ | ||
49 | #define ESP_CONFIG2_BADPARITY 0x04 /* Bad parity target abort */ | ||
50 | #define ESP_CONFIG2_SCSI2ENAB 0x08 /* Enable SCSI-2 features (tgtmode) */ | ||
51 | #define ESP_CONFIG2_HI 0x10 /* High Impedance DREQ ??? */ | ||
52 | #define ESP_CONFIG2_HMEFENAB 0x10 /* HME features enable */ | ||
53 | #define ESP_CONFIG2_BCM 0x20 /* Enable byte-ctrl (236) */ | ||
54 | #define ESP_CONFIG2_DISPINT 0x20 /* Disable pause irq (hme) */ | ||
55 | #define ESP_CONFIG2_FENAB 0x40 /* Enable features (fas100,216) */ | ||
56 | #define ESP_CONFIG2_SPL 0x40 /* Enable status-phase latch (236) */ | ||
57 | #define ESP_CONFIG2_MKDONE 0x40 /* HME magic feature */ | ||
58 | #define ESP_CONFIG2_HME32 0x80 /* HME 32 extended */ | ||
59 | #define ESP_CONFIG2_MAGIC 0xe0 /* Invalid bits... */ | ||
60 | |||
61 | /* ESP config register 3 read-write, found only esp236+fas236+fas100a+hme chips */ | ||
62 | #define ESP_CONFIG3_FCLOCK 0x01 /* FAST SCSI clock rate (esp100a/hme) */ | ||
63 | #define ESP_CONFIG3_TEM 0x01 /* Enable thresh-8 mode (esp/fas236) */ | ||
64 | #define ESP_CONFIG3_FAST 0x02 /* Enable FAST SCSI (esp100a/hme) */ | ||
65 | #define ESP_CONFIG3_ADMA 0x02 /* Enable alternate-dma (esp/fas236) */ | ||
66 | #define ESP_CONFIG3_TENB 0x04 /* group2 SCSI2 support (esp100a/hme) */ | ||
67 | #define ESP_CONFIG3_SRB 0x04 /* Save residual byte (esp/fas236) */ | ||
68 | #define ESP_CONFIG3_TMS 0x08 /* Three-byte msg's ok (esp100a/hme) */ | ||
69 | #define ESP_CONFIG3_FCLK 0x08 /* Fast SCSI clock rate (esp/fas236) */ | ||
70 | #define ESP_CONFIG3_IDMSG 0x10 /* ID message checking (esp100a/hme) */ | ||
71 | #define ESP_CONFIG3_FSCSI 0x10 /* Enable FAST SCSI (esp/fas236) */ | ||
72 | #define ESP_CONFIG3_GTM 0x20 /* group2 SCSI2 support (esp/fas236) */ | ||
73 | #define ESP_CONFIG3_IDBIT3 0x20 /* Bit 3 of HME SCSI-ID (hme) */ | ||
74 | #define ESP_CONFIG3_TBMS 0x40 /* Three-byte msg's ok (esp/fas236) */ | ||
75 | #define ESP_CONFIG3_EWIDE 0x40 /* Enable Wide-SCSI (hme) */ | ||
76 | #define ESP_CONFIG3_IMS 0x80 /* ID msg chk'ng (esp/fas236) */ | ||
77 | #define ESP_CONFIG3_OBPUSH 0x80 /* Push odd-byte to dma (hme) */ | ||
78 | |||
79 | /* ESP command register read-write */ | ||
80 | /* Group 1 commands: These may be sent at any point in time to the ESP | ||
81 | * chip. None of them can generate interrupts 'cept | ||
82 | * the "SCSI bus reset" command if you have not disabled | ||
83 | * SCSI reset interrupts in the config1 ESP register. | ||
84 | */ | ||
85 | #define ESP_CMD_NULL 0x00 /* Null command, ie. a nop */ | ||
86 | #define ESP_CMD_FLUSH 0x01 /* FIFO Flush */ | ||
87 | #define ESP_CMD_RC 0x02 /* Chip reset */ | ||
88 | #define ESP_CMD_RS 0x03 /* SCSI bus reset */ | ||
89 | |||
90 | /* Group 2 commands: ESP must be an initiator and connected to a target | ||
91 | * for these commands to work. | ||
92 | */ | ||
93 | #define ESP_CMD_TI 0x10 /* Transfer Information */ | ||
94 | #define ESP_CMD_ICCSEQ 0x11 /* Initiator cmd complete sequence */ | ||
95 | #define ESP_CMD_MOK 0x12 /* Message okie-dokie */ | ||
96 | #define ESP_CMD_TPAD 0x18 /* Transfer Pad */ | ||
97 | #define ESP_CMD_SATN 0x1a /* Set ATN */ | ||
98 | #define ESP_CMD_RATN 0x1b /* De-assert ATN */ | ||
99 | |||
100 | /* Group 3 commands: ESP must be in the MSGOUT or MSGIN state and be connected | ||
101 | * to a target as the initiator for these commands to work. | ||
102 | */ | ||
103 | #define ESP_CMD_SMSG 0x20 /* Send message */ | ||
104 | #define ESP_CMD_SSTAT 0x21 /* Send status */ | ||
105 | #define ESP_CMD_SDATA 0x22 /* Send data */ | ||
106 | #define ESP_CMD_DSEQ 0x23 /* Discontinue Sequence */ | ||
107 | #define ESP_CMD_TSEQ 0x24 /* Terminate Sequence */ | ||
108 | #define ESP_CMD_TCCSEQ 0x25 /* Target cmd cmplt sequence */ | ||
109 | #define ESP_CMD_DCNCT 0x27 /* Disconnect */ | ||
110 | #define ESP_CMD_RMSG 0x28 /* Receive Message */ | ||
111 | #define ESP_CMD_RCMD 0x29 /* Receive Command */ | ||
112 | #define ESP_CMD_RDATA 0x2a /* Receive Data */ | ||
113 | #define ESP_CMD_RCSEQ 0x2b /* Receive cmd sequence */ | ||
114 | |||
115 | /* Group 4 commands: The ESP must be in the disconnected state and must | ||
116 | * not be connected to any targets as initiator for | ||
117 | * these commands to work. | ||
118 | */ | ||
119 | #define ESP_CMD_RSEL 0x40 /* Reselect */ | ||
120 | #define ESP_CMD_SEL 0x41 /* Select w/o ATN */ | ||
121 | #define ESP_CMD_SELA 0x42 /* Select w/ATN */ | ||
122 | #define ESP_CMD_SELAS 0x43 /* Select w/ATN & STOP */ | ||
123 | #define ESP_CMD_ESEL 0x44 /* Enable selection */ | ||
124 | #define ESP_CMD_DSEL 0x45 /* Disable selections */ | ||
125 | #define ESP_CMD_SA3 0x46 /* Select w/ATN3 */ | ||
126 | #define ESP_CMD_RSEL3 0x47 /* Reselect3 */ | ||
127 | |||
128 | /* This bit enables the ESP's DMA on the SBus */ | ||
129 | #define ESP_CMD_DMA 0x80 /* Do DMA? */ | ||
130 | |||
131 | /* ESP status register read-only */ | ||
132 | #define ESP_STAT_PIO 0x01 /* IO phase bit */ | ||
133 | #define ESP_STAT_PCD 0x02 /* CD phase bit */ | ||
134 | #define ESP_STAT_PMSG 0x04 /* MSG phase bit */ | ||
135 | #define ESP_STAT_PMASK 0x07 /* Mask of phase bits */ | ||
136 | #define ESP_STAT_TDONE 0x08 /* Transfer Completed */ | ||
137 | #define ESP_STAT_TCNT 0x10 /* Transfer Counter Is Zero */ | ||
138 | #define ESP_STAT_PERR 0x20 /* Parity error */ | ||
139 | #define ESP_STAT_SPAM 0x40 /* Real bad error */ | ||
140 | /* This indicates the 'interrupt pending' condition on esp236, it is a reserved | ||
141 | * bit on other revs of the ESP. | ||
142 | */ | ||
143 | #define ESP_STAT_INTR 0x80 /* Interrupt */ | ||
144 | |||
145 | /* The status register can be masked with ESP_STAT_PMASK and compared | ||
146 | * with the following values to determine the current phase the ESP | ||
147 | * (at least thinks it) is in. For our purposes we also add our own | ||
148 | * software 'done' bit for our phase management engine. | ||
149 | */ | ||
150 | #define ESP_DOP (0) /* Data Out */ | ||
151 | #define ESP_DIP (ESP_STAT_PIO) /* Data In */ | ||
152 | #define ESP_CMDP (ESP_STAT_PCD) /* Command */ | ||
153 | #define ESP_STATP (ESP_STAT_PCD|ESP_STAT_PIO) /* Status */ | ||
154 | #define ESP_MOP (ESP_STAT_PMSG|ESP_STAT_PCD) /* Message Out */ | ||
155 | #define ESP_MIP (ESP_STAT_PMSG|ESP_STAT_PCD|ESP_STAT_PIO) /* Message In */ | ||
156 | |||
157 | /* HME only: status 2 register */ | ||
158 | #define ESP_STAT2_SCHBIT 0x01 /* Upper bits 3-7 of sstep enabled */ | ||
159 | #define ESP_STAT2_FFLAGS 0x02 /* The fifo flags are now latched */ | ||
160 | #define ESP_STAT2_XCNT 0x04 /* The transfer counter is latched */ | ||
161 | #define ESP_STAT2_CREGA 0x08 /* The command reg is active now */ | ||
162 | #define ESP_STAT2_WIDE 0x10 /* Interface on this adapter is wide */ | ||
163 | #define ESP_STAT2_F1BYTE 0x20 /* There is one byte at top of fifo */ | ||
164 | #define ESP_STAT2_FMSB 0x40 /* Next byte in fifo is most significant */ | ||
165 | #define ESP_STAT2_FEMPTY 0x80 /* FIFO is empty */ | ||
166 | |||
167 | /* ESP interrupt register read-only */ | ||
168 | #define ESP_INTR_S 0x01 /* Select w/o ATN */ | ||
169 | #define ESP_INTR_SATN 0x02 /* Select w/ATN */ | ||
170 | #define ESP_INTR_RSEL 0x04 /* Reselected */ | ||
171 | #define ESP_INTR_FDONE 0x08 /* Function done */ | ||
172 | #define ESP_INTR_BSERV 0x10 /* Bus service */ | ||
173 | #define ESP_INTR_DC 0x20 /* Disconnect */ | ||
174 | #define ESP_INTR_IC 0x40 /* Illegal command given */ | ||
175 | #define ESP_INTR_SR 0x80 /* SCSI bus reset detected */ | ||
176 | |||
177 | /* ESP sequence step register read-only */ | ||
178 | #define ESP_STEP_VBITS 0x07 /* Valid bits */ | ||
179 | #define ESP_STEP_ASEL 0x00 /* Selection&Arbitrate cmplt */ | ||
180 | #define ESP_STEP_SID 0x01 /* One msg byte sent */ | ||
181 | #define ESP_STEP_NCMD 0x02 /* Was not in command phase */ | ||
182 | #define ESP_STEP_PPC 0x03 /* Early phase chg caused cmnd | ||
183 | * bytes to be lost | ||
184 | */ | ||
185 | #define ESP_STEP_FINI4 0x04 /* Command was sent ok */ | ||
186 | |||
187 | /* Ho hum, some ESP's set the step register to this as well... */ | ||
188 | #define ESP_STEP_FINI5 0x05 | ||
189 | #define ESP_STEP_FINI6 0x06 | ||
190 | #define ESP_STEP_FINI7 0x07 | ||
191 | |||
192 | /* ESP chip-test register read-write */ | ||
193 | #define ESP_TEST_TARG 0x01 /* Target test mode */ | ||
194 | #define ESP_TEST_INI 0x02 /* Initiator test mode */ | ||
195 | #define ESP_TEST_TS 0x04 /* Tristate test mode */ | ||
196 | |||
197 | /* ESP unique ID register read-only, found on fas236+fas100a only */ | ||
198 | #define ESP_UID_F100A 0x00 /* ESP FAS100A */ | ||
199 | #define ESP_UID_F236 0x02 /* ESP FAS236 */ | ||
200 | #define ESP_UID_REV 0x07 /* ESP revision */ | ||
201 | #define ESP_UID_FAM 0xf8 /* ESP family */ | ||
202 | |||
203 | /* ESP fifo flags register read-only */ | ||
204 | /* Note that the following implies a 16 byte FIFO on the ESP. */ | ||
205 | #define ESP_FF_FBYTES 0x1f /* Num bytes in FIFO */ | ||
206 | #define ESP_FF_ONOTZERO 0x20 /* offset ctr not zero (esp100) */ | ||
207 | #define ESP_FF_SSTEP 0xe0 /* Sequence step */ | ||
208 | |||
209 | /* ESP clock conversion factor register write-only */ | ||
210 | #define ESP_CCF_F0 0x00 /* 35.01MHz - 40MHz */ | ||
211 | #define ESP_CCF_NEVER 0x01 /* Set it to this and die */ | ||
212 | #define ESP_CCF_F2 0x02 /* 10MHz */ | ||
213 | #define ESP_CCF_F3 0x03 /* 10.01MHz - 15MHz */ | ||
214 | #define ESP_CCF_F4 0x04 /* 15.01MHz - 20MHz */ | ||
215 | #define ESP_CCF_F5 0x05 /* 20.01MHz - 25MHz */ | ||
216 | #define ESP_CCF_F6 0x06 /* 25.01MHz - 30MHz */ | ||
217 | #define ESP_CCF_F7 0x07 /* 30.01MHz - 35MHz */ | ||
218 | |||
219 | /* HME only... */ | ||
220 | #define ESP_BUSID_RESELID 0x10 | ||
221 | #define ESP_BUSID_CTR32BIT 0x40 | ||
222 | |||
223 | #define ESP_BUS_TIMEOUT 250 /* In milli-seconds */ | ||
224 | #define ESP_TIMEO_CONST 8192 | ||
225 | #define ESP_NEG_DEFP(mhz, cfact) \ | ||
226 | ((ESP_BUS_TIMEOUT * ((mhz) / 1000)) / (8192 * (cfact))) | ||
227 | #define ESP_MHZ_TO_CYCLE(mhertz) ((1000000000) / ((mhertz) / 1000)) | ||
228 | #define ESP_TICK(ccf, cycle) ((7682 * (ccf) * (cycle) / 1000)) | ||
229 | |||
230 | /* For slow to medium speed input clock rates we shoot for 5mb/s, but for high | ||
231 | * input clock rates we try to do 10mb/s although I don't think a transfer can | ||
232 | * even run that fast with an ESP even with DMA2 scatter gather pipelining. | ||
233 | */ | ||
234 | #define SYNC_DEFP_SLOW 0x32 /* 5mb/s */ | ||
235 | #define SYNC_DEFP_FAST 0x19 /* 10mb/s */ | ||
236 | |||
237 | struct esp_cmd_priv { | ||
238 | union { | ||
239 | dma_addr_t dma_addr; | ||
240 | int num_sg; | ||
241 | } u; | ||
242 | |||
243 | unsigned int cur_residue; | ||
244 | struct scatterlist *cur_sg; | ||
245 | unsigned int tot_residue; | ||
246 | }; | ||
247 | #define ESP_CMD_PRIV(CMD) ((struct esp_cmd_priv *)(&(CMD)->SCp)) | ||
248 | |||
249 | enum esp_rev { | ||
250 | ESP100 = 0x00, /* NCR53C90 - very broken */ | ||
251 | ESP100A = 0x01, /* NCR53C90A */ | ||
252 | ESP236 = 0x02, | ||
253 | FAS236 = 0x03, | ||
254 | FAS100A = 0x04, | ||
255 | FAST = 0x05, | ||
256 | FASHME = 0x06, | ||
257 | }; | ||
258 | |||
259 | struct esp_cmd_entry { | ||
260 | struct list_head list; | ||
261 | |||
262 | struct scsi_cmnd *cmd; | ||
263 | |||
264 | unsigned int saved_cur_residue; | ||
265 | struct scatterlist *saved_cur_sg; | ||
266 | unsigned int saved_tot_residue; | ||
267 | |||
268 | u8 flags; | ||
269 | #define ESP_CMD_FLAG_WRITE 0x01 /* DMA is a write */ | ||
270 | #define ESP_CMD_FLAG_ABORT 0x02 /* being aborted */ | ||
271 | #define ESP_CMD_FLAG_AUTOSENSE 0x04 /* Doing automatic REQUEST_SENSE */ | ||
272 | |||
273 | u8 tag[2]; | ||
274 | |||
275 | u8 status; | ||
276 | u8 message; | ||
277 | |||
278 | unsigned char *sense_ptr; | ||
279 | unsigned char *saved_sense_ptr; | ||
280 | dma_addr_t sense_dma; | ||
281 | |||
282 | struct completion *eh_done; | ||
283 | }; | ||
284 | |||
285 | /* XXX make this configurable somehow XXX */ | ||
286 | #define ESP_DEFAULT_TAGS 16 | ||
287 | |||
288 | #define ESP_MAX_TARGET 16 | ||
289 | #define ESP_MAX_LUN 8 | ||
290 | #define ESP_MAX_TAG 256 | ||
291 | |||
292 | struct esp_lun_data { | ||
293 | struct esp_cmd_entry *non_tagged_cmd; | ||
294 | int num_tagged; | ||
295 | int hold; | ||
296 | struct esp_cmd_entry *tagged_cmds[ESP_MAX_TAG]; | ||
297 | }; | ||
298 | |||
299 | struct esp_target_data { | ||
300 | /* These are the ESP_STP, ESP_SOFF, and ESP_CFG3 register values which | ||
301 | * match the currently negotiated settings for this target. The SCSI | ||
302 | * protocol values are maintained in spi_{offset,period,wide}(starget). | ||
303 | */ | ||
304 | u8 esp_period; | ||
305 | u8 esp_offset; | ||
306 | u8 esp_config3; | ||
307 | |||
308 | u8 flags; | ||
309 | #define ESP_TGT_WIDE 0x01 | ||
310 | #define ESP_TGT_DISCONNECT 0x02 | ||
311 | #define ESP_TGT_NEGO_WIDE 0x04 | ||
312 | #define ESP_TGT_NEGO_SYNC 0x08 | ||
313 | #define ESP_TGT_CHECK_NEGO 0x40 | ||
314 | #define ESP_TGT_BROKEN 0x80 | ||
315 | |||
316 | /* When ESP_TGT_CHECK_NEGO is set, on the next scsi command to this | ||
317 | * device we will try to negotiate the following parameters. | ||
318 | */ | ||
319 | u8 nego_goal_period; | ||
320 | u8 nego_goal_offset; | ||
321 | u8 nego_goal_width; | ||
322 | u8 nego_goal_tags; | ||
323 | |||
324 | struct scsi_target *starget; | ||
325 | }; | ||
326 | |||
327 | struct esp_event_ent { | ||
328 | u8 type; | ||
329 | #define ESP_EVENT_TYPE_EVENT 0x01 | ||
330 | #define ESP_EVENT_TYPE_CMD 0x02 | ||
331 | u8 val; | ||
332 | |||
333 | u8 sreg; | ||
334 | u8 seqreg; | ||
335 | u8 sreg2; | ||
336 | u8 ireg; | ||
337 | u8 select_state; | ||
338 | u8 event; | ||
339 | u8 __pad; | ||
340 | }; | ||
341 | |||
342 | struct esp; | ||
343 | struct esp_driver_ops { | ||
344 | /* Read and write the ESP 8-bit registers. On some | ||
345 | * applications of the ESP chip the registers are at 4-byte | ||
346 | * instead of 1-byte intervals. | ||
347 | */ | ||
348 | void (*esp_write8)(struct esp *esp, u8 val, unsigned long reg); | ||
349 | u8 (*esp_read8)(struct esp *esp, unsigned long reg); | ||
350 | |||
351 | /* Map and unmap DMA memory. Eventually the driver will be | ||
352 | * converted to the generic DMA API as soon as SBUS is able to | ||
353 | * cope with that. At such time we can remove this. | ||
354 | */ | ||
355 | dma_addr_t (*map_single)(struct esp *esp, void *buf, | ||
356 | size_t sz, int dir); | ||
357 | int (*map_sg)(struct esp *esp, struct scatterlist *sg, | ||
358 | int num_sg, int dir); | ||
359 | void (*unmap_single)(struct esp *esp, dma_addr_t addr, | ||
360 | size_t sz, int dir); | ||
361 | void (*unmap_sg)(struct esp *esp, struct scatterlist *sg, | ||
362 | int num_sg, int dir); | ||
363 | |||
364 | /* Return non-zero if there is an IRQ pending. Usually this | ||
365 | * status bit lives in the DMA controller sitting in front of | ||
366 | * the ESP. This has to be accurate or else the ESP interrupt | ||
367 | * handler will not run. | ||
368 | */ | ||
369 | int (*irq_pending)(struct esp *esp); | ||
370 | |||
371 | /* Reset the DMA engine entirely. On return, ESP interrupts | ||
372 | * should be enabled. Often the interrupt enabling is | ||
373 | * controlled in the DMA engine. | ||
374 | */ | ||
375 | void (*reset_dma)(struct esp *esp); | ||
376 | |||
377 | /* Drain any pending DMA in the DMA engine after a transfer. | ||
378 | * This is for writes to memory. | ||
379 | */ | ||
380 | void (*dma_drain)(struct esp *esp); | ||
381 | |||
382 | /* Invalidate the DMA engine after a DMA transfer. */ | ||
383 | void (*dma_invalidate)(struct esp *esp); | ||
384 | |||
385 | /* Setup an ESP command that will use a DMA transfer. | ||
386 | * The 'esp_count' specifies what transfer length should be | ||
387 | * programmed into the ESP transfer counter registers, whereas | ||
388 | * the 'dma_count' is the length that should be programmed into | ||
389 | * the DMA controller. Usually they are the same. If 'write' | ||
390 | * is non-zero, this transfer is a write into memory. 'cmd' | ||
391 | * holds the ESP command that should be issued by calling | ||
392 | * scsi_esp_cmd() at the appropriate time while programming | ||
393 | * the DMA hardware. | ||
394 | */ | ||
395 | void (*send_dma_cmd)(struct esp *esp, u32 dma_addr, u32 esp_count, | ||
396 | u32 dma_count, int write, u8 cmd); | ||
397 | |||
398 | /* Return non-zero if the DMA engine is reporting an error | ||
399 | * currently. | ||
400 | */ | ||
401 | int (*dma_error)(struct esp *esp); | ||
402 | }; | ||
403 | |||
404 | #define ESP_MAX_MSG_SZ 8 | ||
405 | #define ESP_EVENT_LOG_SZ 32 | ||
406 | |||
407 | #define ESP_QUICKIRQ_LIMIT 100 | ||
408 | #define ESP_RESELECT_TAG_LIMIT 2500 | ||
409 | |||
410 | struct esp { | ||
411 | void __iomem *regs; | ||
412 | void __iomem *dma_regs; | ||
413 | |||
414 | const struct esp_driver_ops *ops; | ||
415 | |||
416 | struct Scsi_Host *host; | ||
417 | void *dev; | ||
418 | |||
419 | struct esp_cmd_entry *active_cmd; | ||
420 | |||
421 | struct list_head queued_cmds; | ||
422 | struct list_head active_cmds; | ||
423 | |||
424 | u8 *command_block; | ||
425 | dma_addr_t command_block_dma; | ||
426 | |||
427 | unsigned int data_dma_len; | ||
428 | |||
429 | /* The following are used to determine the cause of an IRQ. Upon every | ||
430 | * IRQ entry we synchronize these with the hardware registers. | ||
431 | */ | ||
432 | u8 sreg; | ||
433 | u8 seqreg; | ||
434 | u8 sreg2; | ||
435 | u8 ireg; | ||
436 | |||
437 | u32 prev_hme_dmacsr; | ||
438 | u8 prev_soff; | ||
439 | u8 prev_stp; | ||
440 | u8 prev_cfg3; | ||
441 | u8 __pad; | ||
442 | |||
443 | struct list_head esp_cmd_pool; | ||
444 | |||
445 | struct esp_target_data target[ESP_MAX_TARGET]; | ||
446 | |||
447 | int fifo_cnt; | ||
448 | u8 fifo[16]; | ||
449 | |||
450 | struct esp_event_ent esp_event_log[ESP_EVENT_LOG_SZ]; | ||
451 | int esp_event_cur; | ||
452 | |||
453 | u8 msg_out[ESP_MAX_MSG_SZ]; | ||
454 | int msg_out_len; | ||
455 | |||
456 | u8 msg_in[ESP_MAX_MSG_SZ]; | ||
457 | int msg_in_len; | ||
458 | |||
459 | u8 bursts; | ||
460 | u8 config1; | ||
461 | u8 config2; | ||
462 | |||
463 | u8 scsi_id; | ||
464 | u32 scsi_id_mask; | ||
465 | |||
466 | enum esp_rev rev; | ||
467 | |||
468 | u32 flags; | ||
469 | #define ESP_FLAG_DIFFERENTIAL 0x00000001 | ||
470 | #define ESP_FLAG_RESETTING 0x00000002 | ||
471 | #define ESP_FLAG_DOING_SLOWCMD 0x00000004 | ||
472 | #define ESP_FLAG_WIDE_CAPABLE 0x00000008 | ||
473 | #define ESP_FLAG_QUICKIRQ_CHECK 0x00000010 | ||
474 | |||
475 | u8 select_state; | ||
476 | #define ESP_SELECT_NONE 0x00 /* Not selecting */ | ||
477 | #define ESP_SELECT_BASIC 0x01 /* Select w/o MSGOUT phase */ | ||
478 | #define ESP_SELECT_MSGOUT 0x02 /* Select with MSGOUT */ | ||
479 | |||
480 | /* When we are not selecting, we are expecting an event. */ | ||
481 | u8 event; | ||
482 | #define ESP_EVENT_NONE 0x00 | ||
483 | #define ESP_EVENT_CMD_START 0x01 | ||
484 | #define ESP_EVENT_CMD_DONE 0x02 | ||
485 | #define ESP_EVENT_DATA_IN 0x03 | ||
486 | #define ESP_EVENT_DATA_OUT 0x04 | ||
487 | #define ESP_EVENT_DATA_DONE 0x05 | ||
488 | #define ESP_EVENT_MSGIN 0x06 | ||
489 | #define ESP_EVENT_MSGIN_MORE 0x07 | ||
490 | #define ESP_EVENT_MSGIN_DONE 0x08 | ||
491 | #define ESP_EVENT_MSGOUT 0x09 | ||
492 | #define ESP_EVENT_MSGOUT_DONE 0x0a | ||
493 | #define ESP_EVENT_STATUS 0x0b | ||
494 | #define ESP_EVENT_FREE_BUS 0x0c | ||
495 | #define ESP_EVENT_CHECK_PHASE 0x0d | ||
496 | #define ESP_EVENT_RESET 0x10 | ||
497 | |||
498 | /* Probed in esp_get_clock_params() */ | ||
499 | u32 cfact; | ||
500 | u32 cfreq; | ||
501 | u32 ccycle; | ||
502 | u32 ctick; | ||
503 | u32 neg_defp; | ||
504 | u32 sync_defp; | ||
505 | |||
506 | /* Computed in esp_reset_esp() */ | ||
507 | u32 max_period; | ||
508 | u32 min_period; | ||
509 | u32 radelay; | ||
510 | |||
511 | /* Slow command state. */ | ||
512 | u8 *cmd_bytes_ptr; | ||
513 | int cmd_bytes_left; | ||
514 | |||
515 | struct completion *eh_reset; | ||
516 | |||
517 | struct sbus_dma *dma; | ||
518 | }; | ||
519 | |||
520 | #define host_to_esp(host) ((struct esp *)(host)->hostdata) | ||
521 | |||
522 | /* A front-end driver for the ESP chip should do the following in | ||
523 | * it's device probe routine: | ||
524 | * 1) Allocate the host and private area using scsi_host_alloc() | ||
525 | * with size 'sizeof(struct esp)'. The first argument to | ||
526 | * scsi_host_alloc() should be &scsi_esp_template. | ||
527 | * 2) Set host->max_id as appropriate. | ||
528 | * 3) Set esp->host to the scsi_host itself, and esp->dev | ||
529 | * to the device object pointer. | ||
530 | * 4) Hook up esp->ops to the front-end implementation. | ||
531 | * 5) If the ESP chip supports wide transfers, set ESP_FLAG_WIDE_CAPABLE | ||
532 | * in esp->flags. | ||
533 | * 6) Map the DMA and ESP chip registers. | ||
534 | * 7) DMA map the ESP command block, store the DMA address | ||
535 | * in esp->command_block_dma. | ||
536 | * 8) Register the scsi_esp_intr() interrupt handler. | ||
537 | * 9) Probe for and provide the following chip properties: | ||
538 | * esp->scsi_id (assign to esp->host->this_id too) | ||
539 | * esp->scsi_id_mask | ||
540 | * If ESP bus is differential, set ESP_FLAG_DIFFERENTIAL | ||
541 | * esp->cfreq | ||
542 | * DMA burst bit mask in esp->bursts, if necessary | ||
543 | * 10) Perform any actions necessary before the ESP device can | ||
544 | * be programmed for the first time. On some configs, for | ||
545 | * example, the DMA engine has to be reset before ESP can | ||
546 | * be programmed. | ||
547 | * 11) If necessary, call dev_set_drvdata() as needed. | ||
548 | * 12) Call scsi_esp_register() with prepared 'esp' structure | ||
549 | * and a device pointer if possible. | ||
550 | * 13) Check scsi_esp_register() return value, release all resources | ||
551 | * if an error was returned. | ||
552 | */ | ||
553 | extern struct scsi_host_template scsi_esp_template; | ||
554 | extern int scsi_esp_register(struct esp *, struct device *); | ||
555 | |||
556 | extern void scsi_esp_unregister(struct esp *); | ||
557 | extern irqreturn_t scsi_esp_intr(int, void *); | ||
558 | extern void scsi_esp_cmd(struct esp *, u8); | ||
559 | |||
560 | #endif /* !(_ESP_SCSI_H) */ | ||
diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c index 9f10689905a8..c4195ea869e9 100644 --- a/drivers/scsi/qlogicpti.c +++ b/drivers/scsi/qlogicpti.c | |||
@@ -1403,7 +1403,7 @@ static int __devinit qpti_sbus_probe(struct of_device *dev, const struct of_devi | |||
1403 | struct scsi_host_template *tpnt = match->data; | 1403 | struct scsi_host_template *tpnt = match->data; |
1404 | struct Scsi_Host *host; | 1404 | struct Scsi_Host *host; |
1405 | struct qlogicpti *qpti; | 1405 | struct qlogicpti *qpti; |
1406 | char *fcode; | 1406 | const char *fcode; |
1407 | 1407 | ||
1408 | /* Sometimes Antares cards come up not completely | 1408 | /* Sometimes Antares cards come up not completely |
1409 | * setup, and we get a report of a zero IRQ. | 1409 | * setup, and we get a report of a zero IRQ. |
diff --git a/drivers/scsi/scsi_netlink.c b/drivers/scsi/scsi_netlink.c index 1b59b27e887f..4bf9aa547c78 100644 --- a/drivers/scsi/scsi_netlink.c +++ b/drivers/scsi/scsi_netlink.c | |||
@@ -50,7 +50,7 @@ scsi_nl_rcv_msg(struct sk_buff *skb) | |||
50 | while (skb->len >= NLMSG_SPACE(0)) { | 50 | while (skb->len >= NLMSG_SPACE(0)) { |
51 | err = 0; | 51 | err = 0; |
52 | 52 | ||
53 | nlh = (struct nlmsghdr *) skb->data; | 53 | nlh = nlmsg_hdr(skb); |
54 | if ((nlh->nlmsg_len < (sizeof(*nlh) + sizeof(*hdr))) || | 54 | if ((nlh->nlmsg_len < (sizeof(*nlh) + sizeof(*hdr))) || |
55 | (skb->len < nlh->nlmsg_len)) { | 55 | (skb->len < nlh->nlmsg_len)) { |
56 | printk(KERN_WARNING "%s: discarding partial skb\n", | 56 | printk(KERN_WARNING "%s: discarding partial skb\n", |
@@ -168,7 +168,8 @@ scsi_netlink_init(void) | |||
168 | } | 168 | } |
169 | 169 | ||
170 | scsi_nl_sock = netlink_kernel_create(NETLINK_SCSITRANSPORT, | 170 | scsi_nl_sock = netlink_kernel_create(NETLINK_SCSITRANSPORT, |
171 | SCSI_NL_GRP_CNT, scsi_nl_rcv, THIS_MODULE); | 171 | SCSI_NL_GRP_CNT, scsi_nl_rcv, NULL, |
172 | THIS_MODULE); | ||
172 | if (!scsi_nl_sock) { | 173 | if (!scsi_nl_sock) { |
173 | printk(KERN_ERR "%s: register of recieve handler failed\n", | 174 | printk(KERN_ERR "%s: register of recieve handler failed\n", |
174 | __FUNCTION__); | 175 | __FUNCTION__); |
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index ce0d14af33c8..aabaa0576ab4 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c | |||
@@ -1081,7 +1081,7 @@ iscsi_if_rx(struct sock *sk, int len) | |||
1081 | struct nlmsghdr *nlh; | 1081 | struct nlmsghdr *nlh; |
1082 | struct iscsi_uevent *ev; | 1082 | struct iscsi_uevent *ev; |
1083 | 1083 | ||
1084 | nlh = (struct nlmsghdr *)skb->data; | 1084 | nlh = nlmsg_hdr(skb); |
1085 | if (nlh->nlmsg_len < sizeof(*nlh) || | 1085 | if (nlh->nlmsg_len < sizeof(*nlh) || |
1086 | skb->len < nlh->nlmsg_len) { | 1086 | skb->len < nlh->nlmsg_len) { |
1087 | break; | 1087 | break; |
@@ -1435,7 +1435,7 @@ static __init int iscsi_transport_init(void) | |||
1435 | if (err) | 1435 | if (err) |
1436 | goto unregister_conn_class; | 1436 | goto unregister_conn_class; |
1437 | 1437 | ||
1438 | nls = netlink_kernel_create(NETLINK_ISCSI, 1, iscsi_if_rx, | 1438 | nls = netlink_kernel_create(NETLINK_ISCSI, 1, iscsi_if_rx, NULL, |
1439 | THIS_MODULE); | 1439 | THIS_MODULE); |
1440 | if (!nls) { | 1440 | if (!nls) { |
1441 | err = -ENOBUFS; | 1441 | err = -ENOBUFS; |
diff --git a/drivers/scsi/sun_esp.c b/drivers/scsi/sun_esp.c new file mode 100644 index 000000000000..8c766bcd1095 --- /dev/null +++ b/drivers/scsi/sun_esp.c | |||
@@ -0,0 +1,634 @@ | |||
1 | /* sun_esp.c: ESP front-end for Sparc SBUS systems. | ||
2 | * | ||
3 | * Copyright (C) 2007 David S. Miller (davem@davemloft.net) | ||
4 | */ | ||
5 | |||
6 | #include <linux/kernel.h> | ||
7 | #include <linux/types.h> | ||
8 | #include <linux/module.h> | ||
9 | #include <linux/init.h> | ||
10 | |||
11 | #include <asm/irq.h> | ||
12 | #include <asm/io.h> | ||
13 | #include <asm/dma.h> | ||
14 | |||
15 | #include <asm/sbus.h> | ||
16 | |||
17 | #include <scsi/scsi_host.h> | ||
18 | |||
19 | #include "esp_scsi.h" | ||
20 | |||
21 | #define DRV_MODULE_NAME "sun_esp" | ||
22 | #define PFX DRV_MODULE_NAME ": " | ||
23 | #define DRV_VERSION "1.000" | ||
24 | #define DRV_MODULE_RELDATE "April 19, 2007" | ||
25 | |||
26 | #define dma_read32(REG) \ | ||
27 | sbus_readl(esp->dma_regs + (REG)) | ||
28 | #define dma_write32(VAL, REG) \ | ||
29 | sbus_writel((VAL), esp->dma_regs + (REG)) | ||
30 | |||
31 | static int __devinit esp_sbus_find_dma(struct esp *esp, struct sbus_dev *dma_sdev) | ||
32 | { | ||
33 | struct sbus_dev *sdev = esp->dev; | ||
34 | struct sbus_dma *dma; | ||
35 | |||
36 | if (dma_sdev != NULL) { | ||
37 | for_each_dvma(dma) { | ||
38 | if (dma->sdev == dma_sdev) | ||
39 | break; | ||
40 | } | ||
41 | } else { | ||
42 | for_each_dvma(dma) { | ||
43 | if (dma->sdev == NULL) | ||
44 | break; | ||
45 | |||
46 | /* If bus + slot are the same and it has the | ||
47 | * correct OBP name, it's ours. | ||
48 | */ | ||
49 | if (sdev->bus == dma->sdev->bus && | ||
50 | sdev->slot == dma->sdev->slot && | ||
51 | (!strcmp(dma->sdev->prom_name, "dma") || | ||
52 | !strcmp(dma->sdev->prom_name, "espdma"))) | ||
53 | break; | ||
54 | } | ||
55 | } | ||
56 | |||
57 | if (dma == NULL) { | ||
58 | printk(KERN_ERR PFX "[%s] Cannot find dma.\n", | ||
59 | sdev->ofdev.node->full_name); | ||
60 | return -ENODEV; | ||
61 | } | ||
62 | esp->dma = dma; | ||
63 | esp->dma_regs = dma->regs; | ||
64 | |||
65 | return 0; | ||
66 | |||
67 | } | ||
68 | |||
69 | static int __devinit esp_sbus_map_regs(struct esp *esp, int hme) | ||
70 | { | ||
71 | struct sbus_dev *sdev = esp->dev; | ||
72 | struct resource *res; | ||
73 | |||
74 | /* On HME, two reg sets exist, first is DVMA, | ||
75 | * second is ESP registers. | ||
76 | */ | ||
77 | if (hme) | ||
78 | res = &sdev->resource[1]; | ||
79 | else | ||
80 | res = &sdev->resource[0]; | ||
81 | |||
82 | esp->regs = sbus_ioremap(res, 0, SBUS_ESP_REG_SIZE, "ESP"); | ||
83 | if (!esp->regs) | ||
84 | return -ENOMEM; | ||
85 | |||
86 | return 0; | ||
87 | } | ||
88 | |||
89 | static int __devinit esp_sbus_map_command_block(struct esp *esp) | ||
90 | { | ||
91 | struct sbus_dev *sdev = esp->dev; | ||
92 | |||
93 | esp->command_block = sbus_alloc_consistent(sdev, 16, | ||
94 | &esp->command_block_dma); | ||
95 | if (!esp->command_block) | ||
96 | return -ENOMEM; | ||
97 | return 0; | ||
98 | } | ||
99 | |||
100 | static int __devinit esp_sbus_register_irq(struct esp *esp) | ||
101 | { | ||
102 | struct Scsi_Host *host = esp->host; | ||
103 | struct sbus_dev *sdev = esp->dev; | ||
104 | |||
105 | host->irq = sdev->irqs[0]; | ||
106 | return request_irq(host->irq, scsi_esp_intr, IRQF_SHARED, "ESP", esp); | ||
107 | } | ||
108 | |||
109 | static void __devinit esp_get_scsi_id(struct esp *esp) | ||
110 | { | ||
111 | struct sbus_dev *sdev = esp->dev; | ||
112 | struct device_node *dp = sdev->ofdev.node; | ||
113 | |||
114 | esp->scsi_id = of_getintprop_default(dp, "initiator-id", 0xff); | ||
115 | if (esp->scsi_id != 0xff) | ||
116 | goto done; | ||
117 | |||
118 | esp->scsi_id = of_getintprop_default(dp, "scsi-initiator-id", 0xff); | ||
119 | if (esp->scsi_id != 0xff) | ||
120 | goto done; | ||
121 | |||
122 | if (!sdev->bus) { | ||
123 | /* SUN4 */ | ||
124 | esp->scsi_id = 7; | ||
125 | goto done; | ||
126 | } | ||
127 | |||
128 | esp->scsi_id = of_getintprop_default(sdev->bus->ofdev.node, | ||
129 | "scsi-initiator-id", 7); | ||
130 | |||
131 | done: | ||
132 | esp->host->this_id = esp->scsi_id; | ||
133 | esp->scsi_id_mask = (1 << esp->scsi_id); | ||
134 | } | ||
135 | |||
136 | static void __devinit esp_get_differential(struct esp *esp) | ||
137 | { | ||
138 | struct sbus_dev *sdev = esp->dev; | ||
139 | struct device_node *dp = sdev->ofdev.node; | ||
140 | |||
141 | if (of_find_property(dp, "differential", NULL)) | ||
142 | esp->flags |= ESP_FLAG_DIFFERENTIAL; | ||
143 | else | ||
144 | esp->flags &= ~ESP_FLAG_DIFFERENTIAL; | ||
145 | } | ||
146 | |||
147 | static void __devinit esp_get_clock_params(struct esp *esp) | ||
148 | { | ||
149 | struct sbus_dev *sdev = esp->dev; | ||
150 | struct device_node *dp = sdev->ofdev.node; | ||
151 | struct device_node *bus_dp; | ||
152 | int fmhz; | ||
153 | |||
154 | bus_dp = NULL; | ||
155 | if (sdev != NULL && sdev->bus != NULL) | ||
156 | bus_dp = sdev->bus->ofdev.node; | ||
157 | |||
158 | fmhz = of_getintprop_default(dp, "clock-frequency", 0); | ||
159 | if (fmhz == 0) | ||
160 | fmhz = (!bus_dp) ? 0 : | ||
161 | of_getintprop_default(bus_dp, "clock-frequency", 0); | ||
162 | |||
163 | esp->cfreq = fmhz; | ||
164 | } | ||
165 | |||
166 | static void __devinit esp_get_bursts(struct esp *esp, struct sbus_dev *dma) | ||
167 | { | ||
168 | struct sbus_dev *sdev = esp->dev; | ||
169 | struct device_node *dp = sdev->ofdev.node; | ||
170 | u8 bursts; | ||
171 | |||
172 | bursts = of_getintprop_default(dp, "burst-sizes", 0xff); | ||
173 | if (dma) { | ||
174 | struct device_node *dma_dp = dma->ofdev.node; | ||
175 | u8 val = of_getintprop_default(dma_dp, "burst-sizes", 0xff); | ||
176 | if (val != 0xff) | ||
177 | bursts &= val; | ||
178 | } | ||
179 | |||
180 | if (sdev->bus) { | ||
181 | u8 val = of_getintprop_default(sdev->bus->ofdev.node, | ||
182 | "burst-sizes", 0xff); | ||
183 | if (val != 0xff) | ||
184 | bursts &= val; | ||
185 | } | ||
186 | |||
187 | if (bursts == 0xff || | ||
188 | (bursts & DMA_BURST16) == 0 || | ||
189 | (bursts & DMA_BURST32) == 0) | ||
190 | bursts = (DMA_BURST32 - 1); | ||
191 | |||
192 | esp->bursts = bursts; | ||
193 | } | ||
194 | |||
195 | static void __devinit esp_sbus_get_props(struct esp *esp, struct sbus_dev *espdma) | ||
196 | { | ||
197 | esp_get_scsi_id(esp); | ||
198 | esp_get_differential(esp); | ||
199 | esp_get_clock_params(esp); | ||
200 | esp_get_bursts(esp, espdma); | ||
201 | } | ||
202 | |||
203 | static void sbus_esp_write8(struct esp *esp, u8 val, unsigned long reg) | ||
204 | { | ||
205 | sbus_writeb(val, esp->regs + (reg * 4UL)); | ||
206 | } | ||
207 | |||
208 | static u8 sbus_esp_read8(struct esp *esp, unsigned long reg) | ||
209 | { | ||
210 | return sbus_readb(esp->regs + (reg * 4UL)); | ||
211 | } | ||
212 | |||
213 | static dma_addr_t sbus_esp_map_single(struct esp *esp, void *buf, | ||
214 | size_t sz, int dir) | ||
215 | { | ||
216 | return sbus_map_single(esp->dev, buf, sz, dir); | ||
217 | } | ||
218 | |||
219 | static int sbus_esp_map_sg(struct esp *esp, struct scatterlist *sg, | ||
220 | int num_sg, int dir) | ||
221 | { | ||
222 | return sbus_map_sg(esp->dev, sg, num_sg, dir); | ||
223 | } | ||
224 | |||
225 | static void sbus_esp_unmap_single(struct esp *esp, dma_addr_t addr, | ||
226 | size_t sz, int dir) | ||
227 | { | ||
228 | sbus_unmap_single(esp->dev, addr, sz, dir); | ||
229 | } | ||
230 | |||
231 | static void sbus_esp_unmap_sg(struct esp *esp, struct scatterlist *sg, | ||
232 | int num_sg, int dir) | ||
233 | { | ||
234 | sbus_unmap_sg(esp->dev, sg, num_sg, dir); | ||
235 | } | ||
236 | |||
237 | static int sbus_esp_irq_pending(struct esp *esp) | ||
238 | { | ||
239 | if (dma_read32(DMA_CSR) & (DMA_HNDL_INTR | DMA_HNDL_ERROR)) | ||
240 | return 1; | ||
241 | return 0; | ||
242 | } | ||
243 | |||
244 | static void sbus_esp_reset_dma(struct esp *esp) | ||
245 | { | ||
246 | int can_do_burst16, can_do_burst32, can_do_burst64; | ||
247 | int can_do_sbus64, lim; | ||
248 | u32 val; | ||
249 | |||
250 | can_do_burst16 = (esp->bursts & DMA_BURST16) != 0; | ||
251 | can_do_burst32 = (esp->bursts & DMA_BURST32) != 0; | ||
252 | can_do_burst64 = 0; | ||
253 | can_do_sbus64 = 0; | ||
254 | if (sbus_can_dma_64bit(esp->dev)) | ||
255 | can_do_sbus64 = 1; | ||
256 | if (sbus_can_burst64(esp->sdev)) | ||
257 | can_do_burst64 = (esp->bursts & DMA_BURST64) != 0; | ||
258 | |||
259 | /* Put the DVMA into a known state. */ | ||
260 | if (esp->dma->revision != dvmahme) { | ||
261 | val = dma_read32(DMA_CSR); | ||
262 | dma_write32(val | DMA_RST_SCSI, DMA_CSR); | ||
263 | dma_write32(val & ~DMA_RST_SCSI, DMA_CSR); | ||
264 | } | ||
265 | switch (esp->dma->revision) { | ||
266 | case dvmahme: | ||
267 | dma_write32(DMA_RESET_FAS366, DMA_CSR); | ||
268 | dma_write32(DMA_RST_SCSI, DMA_CSR); | ||
269 | |||
270 | esp->prev_hme_dmacsr = (DMA_PARITY_OFF | DMA_2CLKS | | ||
271 | DMA_SCSI_DISAB | DMA_INT_ENAB); | ||
272 | |||
273 | esp->prev_hme_dmacsr &= ~(DMA_ENABLE | DMA_ST_WRITE | | ||
274 | DMA_BRST_SZ); | ||
275 | |||
276 | if (can_do_burst64) | ||
277 | esp->prev_hme_dmacsr |= DMA_BRST64; | ||
278 | else if (can_do_burst32) | ||
279 | esp->prev_hme_dmacsr |= DMA_BRST32; | ||
280 | |||
281 | if (can_do_sbus64) { | ||
282 | esp->prev_hme_dmacsr |= DMA_SCSI_SBUS64; | ||
283 | sbus_set_sbus64(esp->dev, esp->bursts); | ||
284 | } | ||
285 | |||
286 | lim = 1000; | ||
287 | while (dma_read32(DMA_CSR) & DMA_PEND_READ) { | ||
288 | if (--lim == 0) { | ||
289 | printk(KERN_ALERT PFX "esp%d: DMA_PEND_READ " | ||
290 | "will not clear!\n", | ||
291 | esp->host->unique_id); | ||
292 | break; | ||
293 | } | ||
294 | udelay(1); | ||
295 | } | ||
296 | |||
297 | dma_write32(0, DMA_CSR); | ||
298 | dma_write32(esp->prev_hme_dmacsr, DMA_CSR); | ||
299 | |||
300 | dma_write32(0, DMA_ADDR); | ||
301 | break; | ||
302 | |||
303 | case dvmarev2: | ||
304 | if (esp->rev != ESP100) { | ||
305 | val = dma_read32(DMA_CSR); | ||
306 | dma_write32(val | DMA_3CLKS, DMA_CSR); | ||
307 | } | ||
308 | break; | ||
309 | |||
310 | case dvmarev3: | ||
311 | val = dma_read32(DMA_CSR); | ||
312 | val &= ~DMA_3CLKS; | ||
313 | val |= DMA_2CLKS; | ||
314 | if (can_do_burst32) { | ||
315 | val &= ~DMA_BRST_SZ; | ||
316 | val |= DMA_BRST32; | ||
317 | } | ||
318 | dma_write32(val, DMA_CSR); | ||
319 | break; | ||
320 | |||
321 | case dvmaesc1: | ||
322 | val = dma_read32(DMA_CSR); | ||
323 | val |= DMA_ADD_ENABLE; | ||
324 | val &= ~DMA_BCNT_ENAB; | ||
325 | if (!can_do_burst32 && can_do_burst16) { | ||
326 | val |= DMA_ESC_BURST; | ||
327 | } else { | ||
328 | val &= ~(DMA_ESC_BURST); | ||
329 | } | ||
330 | dma_write32(val, DMA_CSR); | ||
331 | break; | ||
332 | |||
333 | default: | ||
334 | break; | ||
335 | } | ||
336 | |||
337 | /* Enable interrupts. */ | ||
338 | val = dma_read32(DMA_CSR); | ||
339 | dma_write32(val | DMA_INT_ENAB, DMA_CSR); | ||
340 | } | ||
341 | |||
342 | static void sbus_esp_dma_drain(struct esp *esp) | ||
343 | { | ||
344 | u32 csr; | ||
345 | int lim; | ||
346 | |||
347 | if (esp->dma->revision == dvmahme) | ||
348 | return; | ||
349 | |||
350 | csr = dma_read32(DMA_CSR); | ||
351 | if (!(csr & DMA_FIFO_ISDRAIN)) | ||
352 | return; | ||
353 | |||
354 | if (esp->dma->revision != dvmarev3 && esp->dma->revision != dvmaesc1) | ||
355 | dma_write32(csr | DMA_FIFO_STDRAIN, DMA_CSR); | ||
356 | |||
357 | lim = 1000; | ||
358 | while (dma_read32(DMA_CSR) & DMA_FIFO_ISDRAIN) { | ||
359 | if (--lim == 0) { | ||
360 | printk(KERN_ALERT PFX "esp%d: DMA will not drain!\n", | ||
361 | esp->host->unique_id); | ||
362 | break; | ||
363 | } | ||
364 | udelay(1); | ||
365 | } | ||
366 | } | ||
367 | |||
368 | static void sbus_esp_dma_invalidate(struct esp *esp) | ||
369 | { | ||
370 | if (esp->dma->revision == dvmahme) { | ||
371 | dma_write32(DMA_RST_SCSI, DMA_CSR); | ||
372 | |||
373 | esp->prev_hme_dmacsr = ((esp->prev_hme_dmacsr | | ||
374 | (DMA_PARITY_OFF | DMA_2CLKS | | ||
375 | DMA_SCSI_DISAB | DMA_INT_ENAB)) & | ||
376 | ~(DMA_ST_WRITE | DMA_ENABLE)); | ||
377 | |||
378 | dma_write32(0, DMA_CSR); | ||
379 | dma_write32(esp->prev_hme_dmacsr, DMA_CSR); | ||
380 | |||
381 | /* This is necessary to avoid having the SCSI channel | ||
382 | * engine lock up on us. | ||
383 | */ | ||
384 | dma_write32(0, DMA_ADDR); | ||
385 | } else { | ||
386 | u32 val; | ||
387 | int lim; | ||
388 | |||
389 | lim = 1000; | ||
390 | while ((val = dma_read32(DMA_CSR)) & DMA_PEND_READ) { | ||
391 | if (--lim == 0) { | ||
392 | printk(KERN_ALERT PFX "esp%d: DMA will not " | ||
393 | "invalidate!\n", esp->host->unique_id); | ||
394 | break; | ||
395 | } | ||
396 | udelay(1); | ||
397 | } | ||
398 | |||
399 | val &= ~(DMA_ENABLE | DMA_ST_WRITE | DMA_BCNT_ENAB); | ||
400 | val |= DMA_FIFO_INV; | ||
401 | dma_write32(val, DMA_CSR); | ||
402 | val &= ~DMA_FIFO_INV; | ||
403 | dma_write32(val, DMA_CSR); | ||
404 | } | ||
405 | } | ||
406 | |||
407 | static void sbus_esp_send_dma_cmd(struct esp *esp, u32 addr, u32 esp_count, | ||
408 | u32 dma_count, int write, u8 cmd) | ||
409 | { | ||
410 | u32 csr; | ||
411 | |||
412 | BUG_ON(!(cmd & ESP_CMD_DMA)); | ||
413 | |||
414 | sbus_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW); | ||
415 | sbus_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED); | ||
416 | if (esp->rev == FASHME) { | ||
417 | sbus_esp_write8(esp, (esp_count >> 16) & 0xff, FAS_RLO); | ||
418 | sbus_esp_write8(esp, 0, FAS_RHI); | ||
419 | |||
420 | scsi_esp_cmd(esp, cmd); | ||
421 | |||
422 | csr = esp->prev_hme_dmacsr; | ||
423 | csr |= DMA_SCSI_DISAB | DMA_ENABLE; | ||
424 | if (write) | ||
425 | csr |= DMA_ST_WRITE; | ||
426 | else | ||
427 | csr &= ~DMA_ST_WRITE; | ||
428 | esp->prev_hme_dmacsr = csr; | ||
429 | |||
430 | dma_write32(dma_count, DMA_COUNT); | ||
431 | dma_write32(addr, DMA_ADDR); | ||
432 | dma_write32(csr, DMA_CSR); | ||
433 | } else { | ||
434 | csr = dma_read32(DMA_CSR); | ||
435 | csr |= DMA_ENABLE; | ||
436 | if (write) | ||
437 | csr |= DMA_ST_WRITE; | ||
438 | else | ||
439 | csr &= ~DMA_ST_WRITE; | ||
440 | dma_write32(csr, DMA_CSR); | ||
441 | if (esp->dma->revision == dvmaesc1) { | ||
442 | u32 end = PAGE_ALIGN(addr + dma_count + 16U); | ||
443 | dma_write32(end - addr, DMA_COUNT); | ||
444 | } | ||
445 | dma_write32(addr, DMA_ADDR); | ||
446 | |||
447 | scsi_esp_cmd(esp, cmd); | ||
448 | } | ||
449 | |||
450 | } | ||
451 | |||
452 | static int sbus_esp_dma_error(struct esp *esp) | ||
453 | { | ||
454 | u32 csr = dma_read32(DMA_CSR); | ||
455 | |||
456 | if (csr & DMA_HNDL_ERROR) | ||
457 | return 1; | ||
458 | |||
459 | return 0; | ||
460 | } | ||
461 | |||
462 | static const struct esp_driver_ops sbus_esp_ops = { | ||
463 | .esp_write8 = sbus_esp_write8, | ||
464 | .esp_read8 = sbus_esp_read8, | ||
465 | .map_single = sbus_esp_map_single, | ||
466 | .map_sg = sbus_esp_map_sg, | ||
467 | .unmap_single = sbus_esp_unmap_single, | ||
468 | .unmap_sg = sbus_esp_unmap_sg, | ||
469 | .irq_pending = sbus_esp_irq_pending, | ||
470 | .reset_dma = sbus_esp_reset_dma, | ||
471 | .dma_drain = sbus_esp_dma_drain, | ||
472 | .dma_invalidate = sbus_esp_dma_invalidate, | ||
473 | .send_dma_cmd = sbus_esp_send_dma_cmd, | ||
474 | .dma_error = sbus_esp_dma_error, | ||
475 | }; | ||
476 | |||
477 | static int __devinit esp_sbus_probe_one(struct device *dev, | ||
478 | struct sbus_dev *esp_dev, | ||
479 | struct sbus_dev *espdma, | ||
480 | struct sbus_bus *sbus, | ||
481 | int hme) | ||
482 | { | ||
483 | struct scsi_host_template *tpnt = &scsi_esp_template; | ||
484 | struct Scsi_Host *host; | ||
485 | struct esp *esp; | ||
486 | int err; | ||
487 | |||
488 | host = scsi_host_alloc(tpnt, sizeof(struct esp)); | ||
489 | |||
490 | err = -ENOMEM; | ||
491 | if (!host) | ||
492 | goto fail; | ||
493 | |||
494 | host->max_id = (hme ? 16 : 8); | ||
495 | esp = host_to_esp(host); | ||
496 | |||
497 | esp->host = host; | ||
498 | esp->dev = esp_dev; | ||
499 | esp->ops = &sbus_esp_ops; | ||
500 | |||
501 | if (hme) | ||
502 | esp->flags |= ESP_FLAG_WIDE_CAPABLE; | ||
503 | |||
504 | err = esp_sbus_find_dma(esp, espdma); | ||
505 | if (err < 0) | ||
506 | goto fail_unlink; | ||
507 | |||
508 | err = esp_sbus_map_regs(esp, hme); | ||
509 | if (err < 0) | ||
510 | goto fail_unlink; | ||
511 | |||
512 | err = esp_sbus_map_command_block(esp); | ||
513 | if (err < 0) | ||
514 | goto fail_unmap_regs; | ||
515 | |||
516 | err = esp_sbus_register_irq(esp); | ||
517 | if (err < 0) | ||
518 | goto fail_unmap_command_block; | ||
519 | |||
520 | esp_sbus_get_props(esp, espdma); | ||
521 | |||
522 | /* Before we try to touch the ESP chip, ESC1 dma can | ||
523 | * come up with the reset bit set, so make sure that | ||
524 | * is clear first. | ||
525 | */ | ||
526 | if (esp->dma->revision == dvmaesc1) { | ||
527 | u32 val = dma_read32(DMA_CSR); | ||
528 | |||
529 | dma_write32(val & ~DMA_RST_SCSI, DMA_CSR); | ||
530 | } | ||
531 | |||
532 | dev_set_drvdata(&esp_dev->ofdev.dev, esp); | ||
533 | |||
534 | err = scsi_esp_register(esp, dev); | ||
535 | if (err) | ||
536 | goto fail_free_irq; | ||
537 | |||
538 | return 0; | ||
539 | |||
540 | fail_free_irq: | ||
541 | free_irq(host->irq, esp); | ||
542 | fail_unmap_command_block: | ||
543 | sbus_free_consistent(esp->dev, 16, | ||
544 | esp->command_block, | ||
545 | esp->command_block_dma); | ||
546 | fail_unmap_regs: | ||
547 | sbus_iounmap(esp->regs, SBUS_ESP_REG_SIZE); | ||
548 | fail_unlink: | ||
549 | scsi_host_put(host); | ||
550 | fail: | ||
551 | return err; | ||
552 | } | ||
553 | |||
554 | static int __devinit esp_sbus_probe(struct of_device *dev, const struct of_device_id *match) | ||
555 | { | ||
556 | struct sbus_dev *sdev = to_sbus_device(&dev->dev); | ||
557 | struct device_node *dp = dev->node; | ||
558 | struct sbus_dev *dma_sdev = NULL; | ||
559 | int hme = 0; | ||
560 | |||
561 | if (dp->parent && | ||
562 | (!strcmp(dp->parent->name, "espdma") || | ||
563 | !strcmp(dp->parent->name, "dma"))) | ||
564 | dma_sdev = sdev->parent; | ||
565 | else if (!strcmp(dp->name, "SUNW,fas")) { | ||
566 | dma_sdev = sdev; | ||
567 | hme = 1; | ||
568 | } | ||
569 | |||
570 | return esp_sbus_probe_one(&dev->dev, sdev, dma_sdev, | ||
571 | sdev->bus, hme); | ||
572 | } | ||
573 | |||
574 | static int __devexit esp_sbus_remove(struct of_device *dev) | ||
575 | { | ||
576 | struct esp *esp = dev_get_drvdata(&dev->dev); | ||
577 | unsigned int irq = esp->host->irq; | ||
578 | u32 val; | ||
579 | |||
580 | scsi_esp_unregister(esp); | ||
581 | |||
582 | /* Disable interrupts. */ | ||
583 | val = dma_read32(DMA_CSR); | ||
584 | dma_write32(val & ~DMA_INT_ENAB, DMA_CSR); | ||
585 | |||
586 | free_irq(irq, esp); | ||
587 | sbus_free_consistent(esp->dev, 16, | ||
588 | esp->command_block, | ||
589 | esp->command_block_dma); | ||
590 | sbus_iounmap(esp->regs, SBUS_ESP_REG_SIZE); | ||
591 | |||
592 | scsi_host_put(esp->host); | ||
593 | |||
594 | return 0; | ||
595 | } | ||
596 | |||
597 | static struct of_device_id esp_match[] = { | ||
598 | { | ||
599 | .name = "SUNW,esp", | ||
600 | }, | ||
601 | { | ||
602 | .name = "SUNW,fas", | ||
603 | }, | ||
604 | { | ||
605 | .name = "esp", | ||
606 | }, | ||
607 | {}, | ||
608 | }; | ||
609 | MODULE_DEVICE_TABLE(of, esp_match); | ||
610 | |||
611 | static struct of_platform_driver esp_sbus_driver = { | ||
612 | .name = "esp", | ||
613 | .match_table = esp_match, | ||
614 | .probe = esp_sbus_probe, | ||
615 | .remove = __devexit_p(esp_sbus_remove), | ||
616 | }; | ||
617 | |||
618 | static int __init sunesp_init(void) | ||
619 | { | ||
620 | return of_register_driver(&esp_sbus_driver, &sbus_bus_type); | ||
621 | } | ||
622 | |||
623 | static void __exit sunesp_exit(void) | ||
624 | { | ||
625 | of_unregister_driver(&esp_sbus_driver); | ||
626 | } | ||
627 | |||
628 | MODULE_DESCRIPTION("Sun ESP SCSI driver"); | ||
629 | MODULE_AUTHOR("David S. Miller (davem@davemloft.net)"); | ||
630 | MODULE_LICENSE("GPL"); | ||
631 | MODULE_VERSION(DRV_VERSION); | ||
632 | |||
633 | module_init(sunesp_init); | ||
634 | module_exit(sunesp_exit); | ||
diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c index c129a0e8e807..90621c3312bc 100644 --- a/drivers/serial/8250.c +++ b/drivers/serial/8250.c | |||
@@ -1310,7 +1310,8 @@ static unsigned int check_modem_status(struct uart_8250_port *up) | |||
1310 | { | 1310 | { |
1311 | unsigned int status = serial_in(up, UART_MSR); | 1311 | unsigned int status = serial_in(up, UART_MSR); |
1312 | 1312 | ||
1313 | if (status & UART_MSR_ANY_DELTA && up->ier & UART_IER_MSI) { | 1313 | if (status & UART_MSR_ANY_DELTA && up->ier & UART_IER_MSI && |
1314 | up->port.info != NULL) { | ||
1314 | if (status & UART_MSR_TERI) | 1315 | if (status & UART_MSR_TERI) |
1315 | up->port.icount.rng++; | 1316 | up->port.icount.rng++; |
1316 | if (status & UART_MSR_DDSR) | 1317 | if (status & UART_MSR_DDSR) |
@@ -1333,8 +1334,9 @@ static inline void | |||
1333 | serial8250_handle_port(struct uart_8250_port *up) | 1334 | serial8250_handle_port(struct uart_8250_port *up) |
1334 | { | 1335 | { |
1335 | unsigned int status; | 1336 | unsigned int status; |
1337 | unsigned long flags; | ||
1336 | 1338 | ||
1337 | spin_lock(&up->port.lock); | 1339 | spin_lock_irqsave(&up->port.lock, flags); |
1338 | 1340 | ||
1339 | status = serial_inp(up, UART_LSR); | 1341 | status = serial_inp(up, UART_LSR); |
1340 | 1342 | ||
@@ -1346,7 +1348,7 @@ serial8250_handle_port(struct uart_8250_port *up) | |||
1346 | if (status & UART_LSR_THRE) | 1348 | if (status & UART_LSR_THRE) |
1347 | transmit_chars(up); | 1349 | transmit_chars(up); |
1348 | 1350 | ||
1349 | spin_unlock(&up->port.lock); | 1351 | spin_unlock_irqrestore(&up->port.lock, flags); |
1350 | } | 1352 | } |
1351 | 1353 | ||
1352 | /* | 1354 | /* |
diff --git a/drivers/serial/icom.c b/drivers/serial/icom.c index 41431d0d5512..246c5572667b 100644 --- a/drivers/serial/icom.c +++ b/drivers/serial/icom.c | |||
@@ -164,7 +164,7 @@ static void free_port_memory(struct icom_port *icom_port) | |||
164 | } | 164 | } |
165 | } | 165 | } |
166 | 166 | ||
167 | static int __init get_port_memory(struct icom_port *icom_port) | 167 | static int __devinit get_port_memory(struct icom_port *icom_port) |
168 | { | 168 | { |
169 | int index; | 169 | int index; |
170 | unsigned long stgAddr; | 170 | unsigned long stgAddr; |
@@ -1380,7 +1380,7 @@ static void icom_port_active(struct icom_port *icom_port, struct icom_adapter *i | |||
1380 | 0x8024 + 2 - 2 * (icom_port->port - 2); | 1380 | 0x8024 + 2 - 2 * (icom_port->port - 2); |
1381 | } | 1381 | } |
1382 | } | 1382 | } |
1383 | static int __init icom_load_ports(struct icom_adapter *icom_adapter) | 1383 | static int __devinit icom_load_ports(struct icom_adapter *icom_adapter) |
1384 | { | 1384 | { |
1385 | struct icom_port *icom_port; | 1385 | struct icom_port *icom_port; |
1386 | int port_num; | 1386 | int port_num; |
@@ -1473,7 +1473,7 @@ static void icom_remove_adapter(struct icom_adapter *icom_adapter) | |||
1473 | } | 1473 | } |
1474 | } | 1474 | } |
1475 | 1475 | ||
1476 | free_irq(icom_adapter->irq_number, (void *) icom_adapter); | 1476 | free_irq(icom_adapter->pci_dev->irq, (void *) icom_adapter); |
1477 | iounmap(icom_adapter->base_addr); | 1477 | iounmap(icom_adapter->base_addr); |
1478 | icom_free_adapter(icom_adapter); | 1478 | icom_free_adapter(icom_adapter); |
1479 | pci_release_regions(icom_adapter->pci_dev); | 1479 | pci_release_regions(icom_adapter->pci_dev); |
@@ -1539,7 +1539,6 @@ static int __devinit icom_probe(struct pci_dev *dev, | |||
1539 | } | 1539 | } |
1540 | 1540 | ||
1541 | icom_adapter->base_addr_pci = pci_resource_start(dev, 0); | 1541 | icom_adapter->base_addr_pci = pci_resource_start(dev, 0); |
1542 | icom_adapter->irq_number = dev->irq; | ||
1543 | icom_adapter->pci_dev = dev; | 1542 | icom_adapter->pci_dev = dev; |
1544 | icom_adapter->version = ent->driver_data; | 1543 | icom_adapter->version = ent->driver_data; |
1545 | icom_adapter->subsystem_id = ent->subdevice; | 1544 | icom_adapter->subsystem_id = ent->subdevice; |
@@ -1570,7 +1569,7 @@ static int __devinit icom_probe(struct pci_dev *dev, | |||
1570 | icom_port = &icom_adapter->port_info[index]; | 1569 | icom_port = &icom_adapter->port_info[index]; |
1571 | 1570 | ||
1572 | if (icom_port->status == ICOM_PORT_ACTIVE) { | 1571 | if (icom_port->status == ICOM_PORT_ACTIVE) { |
1573 | icom_port->uart_port.irq = icom_port->adapter->irq_number; | 1572 | icom_port->uart_port.irq = icom_port->adapter->pci_dev->irq; |
1574 | icom_port->uart_port.type = PORT_ICOM; | 1573 | icom_port->uart_port.type = PORT_ICOM; |
1575 | icom_port->uart_port.iotype = UPIO_MEM; | 1574 | icom_port->uart_port.iotype = UPIO_MEM; |
1576 | icom_port->uart_port.membase = | 1575 | icom_port->uart_port.membase = |
diff --git a/drivers/serial/icom.h b/drivers/serial/icom.h index 798f1ef23712..e8578d8cd35e 100644 --- a/drivers/serial/icom.h +++ b/drivers/serial/icom.h | |||
@@ -258,7 +258,6 @@ struct icom_port { | |||
258 | struct icom_adapter { | 258 | struct icom_adapter { |
259 | void __iomem * base_addr; | 259 | void __iomem * base_addr; |
260 | unsigned long base_addr_pci; | 260 | unsigned long base_addr_pci; |
261 | unsigned char irq_number; | ||
262 | struct pci_dev *pci_dev; | 261 | struct pci_dev *pci_dev; |
263 | struct icom_port port_info[4]; | 262 | struct icom_port port_info[4]; |
264 | int index; | 263 | int index; |
diff --git a/drivers/serial/sunsu.c b/drivers/serial/sunsu.c index 96a852aa1903..bfd44177a215 100644 --- a/drivers/serial/sunsu.c +++ b/drivers/serial/sunsu.c | |||
@@ -1387,8 +1387,8 @@ static enum su_type __devinit su_get_type(struct device_node *dp) | |||
1387 | struct device_node *ap = of_find_node_by_path("/aliases"); | 1387 | struct device_node *ap = of_find_node_by_path("/aliases"); |
1388 | 1388 | ||
1389 | if (ap) { | 1389 | if (ap) { |
1390 | char *keyb = of_get_property(ap, "keyboard", NULL); | 1390 | const char *keyb = of_get_property(ap, "keyboard", NULL); |
1391 | char *ms = of_get_property(ap, "mouse", NULL); | 1391 | const char *ms = of_get_property(ap, "mouse", NULL); |
1392 | 1392 | ||
1393 | if (keyb) { | 1393 | if (keyb) { |
1394 | if (dp == of_find_node_by_path(keyb)) | 1394 | if (dp == of_find_node_by_path(keyb)) |
diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c index ec63b0ee0743..d3e2c5f90a26 100644 --- a/drivers/usb/atm/usbatm.c +++ b/drivers/usb/atm/usbatm.c | |||
@@ -343,7 +343,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char | |||
343 | UDSL_ASSERT(sarb->tail + ATM_CELL_PAYLOAD <= sarb->end); | 343 | UDSL_ASSERT(sarb->tail + ATM_CELL_PAYLOAD <= sarb->end); |
344 | } | 344 | } |
345 | 345 | ||
346 | memcpy(sarb->tail, source + ATM_CELL_HEADER, ATM_CELL_PAYLOAD); | 346 | memcpy(skb_tail_pointer(sarb), source + ATM_CELL_HEADER, ATM_CELL_PAYLOAD); |
347 | __skb_put(sarb, ATM_CELL_PAYLOAD); | 347 | __skb_put(sarb, ATM_CELL_PAYLOAD); |
348 | 348 | ||
349 | if (pti & 1) { | 349 | if (pti & 1) { |
@@ -370,7 +370,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char | |||
370 | goto out; | 370 | goto out; |
371 | } | 371 | } |
372 | 372 | ||
373 | if (crc32_be(~0, sarb->tail - pdu_length, pdu_length) != 0xc704dd7b) { | 373 | if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) { |
374 | atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n", | 374 | atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n", |
375 | __func__, vcc); | 375 | __func__, vcc); |
376 | atomic_inc(&vcc->stats->rx_err); | 376 | atomic_inc(&vcc->stats->rx_err); |
@@ -396,7 +396,9 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char | |||
396 | goto out; /* atm_charge increments rx_drop */ | 396 | goto out; /* atm_charge increments rx_drop */ |
397 | } | 397 | } |
398 | 398 | ||
399 | memcpy(skb->data, sarb->tail - pdu_length, length); | 399 | skb_copy_to_linear_data(skb, |
400 | skb_tail_pointer(sarb) - pdu_length, | ||
401 | length); | ||
400 | __skb_put(skb, length); | 402 | __skb_put(skb, length); |
401 | 403 | ||
402 | vdbg("%s: sending skb 0x%p, skb->len %u, skb->truesize %u", | 404 | vdbg("%s: sending skb 0x%p, skb->len %u, skb->truesize %u", |
@@ -484,7 +486,7 @@ static unsigned int usbatm_write_cells(struct usbatm_data *instance, | |||
484 | ptr[4] = 0xec; | 486 | ptr[4] = 0xec; |
485 | ptr += ATM_CELL_HEADER; | 487 | ptr += ATM_CELL_HEADER; |
486 | 488 | ||
487 | memcpy(ptr, skb->data, data_len); | 489 | skb_copy_from_linear_data(skb, ptr, data_len); |
488 | ptr += data_len; | 490 | ptr += data_len; |
489 | __skb_pull(skb, data_len); | 491 | __skb_pull(skb, data_len); |
490 | 492 | ||
diff --git a/drivers/usb/gadget/ether.c b/drivers/usb/gadget/ether.c index 04e6b8508fb6..8f9f217e0a68 100644 --- a/drivers/usb/gadget/ether.c +++ b/drivers/usb/gadget/ether.c | |||
@@ -1766,7 +1766,6 @@ static void rx_complete (struct usb_ep *ep, struct usb_request *req) | |||
1766 | break; | 1766 | break; |
1767 | } | 1767 | } |
1768 | 1768 | ||
1769 | skb->dev = dev->net; | ||
1770 | skb->protocol = eth_type_trans (skb, dev->net); | 1769 | skb->protocol = eth_type_trans (skb, dev->net); |
1771 | dev->stats.rx_packets++; | 1770 | dev->stats.rx_packets++; |
1772 | dev->stats.rx_bytes += skb->len; | 1771 | dev->stats.rx_bytes += skb->len; |
diff --git a/drivers/usb/net/asix.c b/drivers/usb/net/asix.c index 5808ea082459..d5ef97bc4d01 100644 --- a/drivers/usb/net/asix.c +++ b/drivers/usb/net/asix.c | |||
@@ -298,7 +298,7 @@ static int asix_rx_fixup(struct usbnet *dev, struct sk_buff *skb) | |||
298 | if (ax_skb) { | 298 | if (ax_skb) { |
299 | ax_skb->len = size; | 299 | ax_skb->len = size; |
300 | ax_skb->data = packet; | 300 | ax_skb->data = packet; |
301 | ax_skb->tail = packet + size; | 301 | skb_set_tail_pointer(ax_skb, size); |
302 | usbnet_skb_return(dev, ax_skb); | 302 | usbnet_skb_return(dev, ax_skb); |
303 | } else { | 303 | } else { |
304 | return 0; | 304 | return 0; |
@@ -338,7 +338,7 @@ static struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb, | |||
338 | && ((headroom + tailroom) >= (4 + padlen))) { | 338 | && ((headroom + tailroom) >= (4 + padlen))) { |
339 | if ((headroom < 4) || (tailroom < padlen)) { | 339 | if ((headroom < 4) || (tailroom < padlen)) { |
340 | skb->data = memmove(skb->head + 4, skb->data, skb->len); | 340 | skb->data = memmove(skb->head + 4, skb->data, skb->len); |
341 | skb->tail = skb->data + skb->len; | 341 | skb_set_tail_pointer(skb, skb->len); |
342 | } | 342 | } |
343 | } else { | 343 | } else { |
344 | struct sk_buff *skb2; | 344 | struct sk_buff *skb2; |
@@ -352,11 +352,11 @@ static struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb, | |||
352 | skb_push(skb, 4); | 352 | skb_push(skb, 4); |
353 | packet_len = (((skb->len - 4) ^ 0x0000ffff) << 16) + (skb->len - 4); | 353 | packet_len = (((skb->len - 4) ^ 0x0000ffff) << 16) + (skb->len - 4); |
354 | cpu_to_le32s(&packet_len); | 354 | cpu_to_le32s(&packet_len); |
355 | memcpy(skb->data, &packet_len, sizeof(packet_len)); | 355 | skb_copy_to_linear_data(skb, &packet_len, sizeof(packet_len)); |
356 | 356 | ||
357 | if ((skb->len % 512) == 0) { | 357 | if ((skb->len % 512) == 0) { |
358 | cpu_to_le32s(&padbytes); | 358 | cpu_to_le32s(&padbytes); |
359 | memcpy( skb->tail, &padbytes, sizeof(padbytes)); | 359 | memcpy(skb_tail_pointer(skb), &padbytes, sizeof(padbytes)); |
360 | skb_put(skb, sizeof(padbytes)); | 360 | skb_put(skb, sizeof(padbytes)); |
361 | } | 361 | } |
362 | return skb; | 362 | return skb; |
diff --git a/drivers/usb/net/catc.c b/drivers/usb/net/catc.c index 4852012735f6..ffec2e01b896 100644 --- a/drivers/usb/net/catc.c +++ b/drivers/usb/net/catc.c | |||
@@ -255,7 +255,6 @@ static void catc_rx_done(struct urb *urb) | |||
255 | if (!(skb = dev_alloc_skb(pkt_len))) | 255 | if (!(skb = dev_alloc_skb(pkt_len))) |
256 | return; | 256 | return; |
257 | 257 | ||
258 | skb->dev = catc->netdev; | ||
259 | eth_copy_and_sum(skb, pkt_start + pkt_offset, pkt_len, 0); | 258 | eth_copy_and_sum(skb, pkt_start + pkt_offset, pkt_len, 0); |
260 | skb_put(skb, pkt_len); | 259 | skb_put(skb, pkt_len); |
261 | 260 | ||
@@ -419,7 +418,7 @@ static int catc_hard_start_xmit(struct sk_buff *skb, struct net_device *netdev) | |||
419 | catc->tx_ptr = (((catc->tx_ptr - 1) >> 6) + 1) << 6; | 418 | catc->tx_ptr = (((catc->tx_ptr - 1) >> 6) + 1) << 6; |
420 | tx_buf = catc->tx_buf[catc->tx_idx] + catc->tx_ptr; | 419 | tx_buf = catc->tx_buf[catc->tx_idx] + catc->tx_ptr; |
421 | *((u16*)tx_buf) = (catc->is_f5u011) ? cpu_to_be16((u16)skb->len) : cpu_to_le16((u16)skb->len); | 420 | *((u16*)tx_buf) = (catc->is_f5u011) ? cpu_to_be16((u16)skb->len) : cpu_to_le16((u16)skb->len); |
422 | memcpy(tx_buf + 2, skb->data, skb->len); | 421 | skb_copy_from_linear_data(skb, tx_buf + 2, skb->len); |
423 | catc->tx_ptr += skb->len + 2; | 422 | catc->tx_ptr += skb->len + 2; |
424 | 423 | ||
425 | if (!test_and_set_bit(TX_RUNNING, &catc->flags)) | 424 | if (!test_and_set_bit(TX_RUNNING, &catc->flags)) |
diff --git a/drivers/usb/net/gl620a.c b/drivers/usb/net/gl620a.c index d257a8e026d6..031cf5ca4dbb 100644 --- a/drivers/usb/net/gl620a.c +++ b/drivers/usb/net/gl620a.c | |||
@@ -157,7 +157,7 @@ genelink_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags) | |||
157 | if ((headroom < (4 + 4*1)) || (tailroom < padlen)) { | 157 | if ((headroom < (4 + 4*1)) || (tailroom < padlen)) { |
158 | skb->data = memmove(skb->head + (4 + 4*1), | 158 | skb->data = memmove(skb->head + (4 + 4*1), |
159 | skb->data, skb->len); | 159 | skb->data, skb->len); |
160 | skb->tail = skb->data + skb->len; | 160 | skb_set_tail_pointer(skb, skb->len); |
161 | } | 161 | } |
162 | } else { | 162 | } else { |
163 | struct sk_buff *skb2; | 163 | struct sk_buff *skb2; |
diff --git a/drivers/usb/net/kaweth.c b/drivers/usb/net/kaweth.c index de95268ae4b8..a0cc05d21a6a 100644 --- a/drivers/usb/net/kaweth.c +++ b/drivers/usb/net/kaweth.c | |||
@@ -636,8 +636,6 @@ static void kaweth_usb_receive(struct urb *urb) | |||
636 | 636 | ||
637 | skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ | 637 | skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ |
638 | 638 | ||
639 | skb->dev = net; | ||
640 | |||
641 | eth_copy_and_sum(skb, kaweth->rx_buf + 2, pkt_len, 0); | 639 | eth_copy_and_sum(skb, kaweth->rx_buf + 2, pkt_len, 0); |
642 | 640 | ||
643 | skb_put(skb, pkt_len); | 641 | skb_put(skb, pkt_len); |
diff --git a/drivers/usb/net/net1080.c b/drivers/usb/net/net1080.c index ccebfdef4751..19bf8dae70c9 100644 --- a/drivers/usb/net/net1080.c +++ b/drivers/usb/net/net1080.c | |||
@@ -520,7 +520,7 @@ net1080_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags) | |||
520 | skb->data = memmove(skb->head | 520 | skb->data = memmove(skb->head |
521 | + sizeof (struct nc_header), | 521 | + sizeof (struct nc_header), |
522 | skb->data, skb->len); | 522 | skb->data, skb->len); |
523 | skb->tail = skb->data + len; | 523 | skb_set_tail_pointer(skb, len); |
524 | goto encapsulate; | 524 | goto encapsulate; |
525 | } | 525 | } |
526 | } | 526 | } |
diff --git a/drivers/usb/net/pegasus.c b/drivers/usb/net/pegasus.c index d48c024cff59..1ad4ee54b186 100644 --- a/drivers/usb/net/pegasus.c +++ b/drivers/usb/net/pegasus.c | |||
@@ -316,6 +316,7 @@ static int update_eth_regs_async(pegasus_t * pegasus) | |||
316 | return ret; | 316 | return ret; |
317 | } | 317 | } |
318 | 318 | ||
319 | /* Returns 0 on success, error on failure */ | ||
319 | static int read_mii_word(pegasus_t * pegasus, __u8 phy, __u8 indx, __u16 * regd) | 320 | static int read_mii_word(pegasus_t * pegasus, __u8 phy, __u8 indx, __u16 * regd) |
320 | { | 321 | { |
321 | int i; | 322 | int i; |
@@ -574,7 +575,6 @@ static void fill_skb_pool(pegasus_t * pegasus) | |||
574 | */ | 575 | */ |
575 | if (pegasus->rx_pool[i] == NULL) | 576 | if (pegasus->rx_pool[i] == NULL) |
576 | return; | 577 | return; |
577 | pegasus->rx_pool[i]->dev = pegasus->net; | ||
578 | skb_reserve(pegasus->rx_pool[i], 2); | 578 | skb_reserve(pegasus->rx_pool[i], 2); |
579 | } | 579 | } |
580 | } | 580 | } |
@@ -847,10 +847,16 @@ static void intr_callback(struct urb *urb) | |||
847 | * d[0].NO_CARRIER kicks in only with failed TX. | 847 | * d[0].NO_CARRIER kicks in only with failed TX. |
848 | * ... so monitoring with MII may be safest. | 848 | * ... so monitoring with MII may be safest. |
849 | */ | 849 | */ |
850 | if (d[0] & NO_CARRIER) | 850 | if (pegasus->features & TRUST_LINK_STATUS) { |
851 | netif_carrier_off(net); | 851 | if (d[5] & LINK_STATUS) |
852 | else | 852 | netif_carrier_on(net); |
853 | netif_carrier_on(net); | 853 | else |
854 | netif_carrier_off(net); | ||
855 | } else { | ||
856 | /* Never set carrier _on_ based on ! NO_CARRIER */ | ||
857 | if (d[0] & NO_CARRIER) | ||
858 | netif_carrier_off(net); | ||
859 | } | ||
854 | 860 | ||
855 | /* bytes 3-4 == rx_lostpkt, reg 2E/2F */ | 861 | /* bytes 3-4 == rx_lostpkt, reg 2E/2F */ |
856 | pegasus->stats.rx_missed_errors += ((d[3] & 0x7f) << 8) | d[4]; | 862 | pegasus->stats.rx_missed_errors += ((d[3] & 0x7f) << 8) | d[4]; |
@@ -883,7 +889,7 @@ static int pegasus_start_xmit(struct sk_buff *skb, struct net_device *net) | |||
883 | netif_stop_queue(net); | 889 | netif_stop_queue(net); |
884 | 890 | ||
885 | ((__le16 *) pegasus->tx_buff)[0] = cpu_to_le16(l16); | 891 | ((__le16 *) pegasus->tx_buff)[0] = cpu_to_le16(l16); |
886 | memcpy(pegasus->tx_buff + 2, skb->data, skb->len); | 892 | skb_copy_from_linear_data(skb, pegasus->tx_buff + 2, skb->len); |
887 | usb_fill_bulk_urb(pegasus->tx_urb, pegasus->usb, | 893 | usb_fill_bulk_urb(pegasus->tx_urb, pegasus->usb, |
888 | usb_sndbulkpipe(pegasus->usb, 2), | 894 | usb_sndbulkpipe(pegasus->usb, 2), |
889 | pegasus->tx_buff, count, | 895 | pegasus->tx_buff, count, |
@@ -950,7 +956,7 @@ static void set_carrier(struct net_device *net) | |||
950 | pegasus_t *pegasus = netdev_priv(net); | 956 | pegasus_t *pegasus = netdev_priv(net); |
951 | u16 tmp; | 957 | u16 tmp; |
952 | 958 | ||
953 | if (!read_mii_word(pegasus, pegasus->phy, MII_BMSR, &tmp)) | 959 | if (read_mii_word(pegasus, pegasus->phy, MII_BMSR, &tmp)) |
954 | return; | 960 | return; |
955 | 961 | ||
956 | if (tmp & BMSR_LSTATUS) | 962 | if (tmp & BMSR_LSTATUS) |
@@ -1408,8 +1414,10 @@ static void pegasus_disconnect(struct usb_interface *intf) | |||
1408 | unlink_all_urbs(pegasus); | 1414 | unlink_all_urbs(pegasus); |
1409 | free_all_urbs(pegasus); | 1415 | free_all_urbs(pegasus); |
1410 | free_skb_pool(pegasus); | 1416 | free_skb_pool(pegasus); |
1411 | if (pegasus->rx_skb) | 1417 | if (pegasus->rx_skb != NULL) { |
1412 | dev_kfree_skb(pegasus->rx_skb); | 1418 | dev_kfree_skb(pegasus->rx_skb); |
1419 | pegasus->rx_skb = NULL; | ||
1420 | } | ||
1413 | free_netdev(pegasus->net); | 1421 | free_netdev(pegasus->net); |
1414 | } | 1422 | } |
1415 | 1423 | ||
diff --git a/drivers/usb/net/pegasus.h b/drivers/usb/net/pegasus.h index c7467823cd1c..c7aadb413e8c 100644 --- a/drivers/usb/net/pegasus.h +++ b/drivers/usb/net/pegasus.h | |||
@@ -11,6 +11,7 @@ | |||
11 | 11 | ||
12 | #define PEGASUS_II 0x80000000 | 12 | #define PEGASUS_II 0x80000000 |
13 | #define HAS_HOME_PNA 0x40000000 | 13 | #define HAS_HOME_PNA 0x40000000 |
14 | #define TRUST_LINK_STATUS 0x20000000 | ||
14 | 15 | ||
15 | #define PEGASUS_MTU 1536 | 16 | #define PEGASUS_MTU 1536 |
16 | #define RX_SKBS 4 | 17 | #define RX_SKBS 4 |
@@ -203,7 +204,7 @@ PEGASUS_DEV( "AEI USB Fast Ethernet Adapter", VENDOR_AEILAB, 0x1701, | |||
203 | PEGASUS_DEV( "Allied Telesyn Int. AT-USB100", VENDOR_ALLIEDTEL, 0xb100, | 204 | PEGASUS_DEV( "Allied Telesyn Int. AT-USB100", VENDOR_ALLIEDTEL, 0xb100, |
204 | DEFAULT_GPIO_RESET | PEGASUS_II ) | 205 | DEFAULT_GPIO_RESET | PEGASUS_II ) |
205 | PEGASUS_DEV( "Belkin F5D5050 USB Ethernet", VENDOR_BELKIN, 0x0121, | 206 | PEGASUS_DEV( "Belkin F5D5050 USB Ethernet", VENDOR_BELKIN, 0x0121, |
206 | DEFAULT_GPIO_RESET | PEGASUS_II ) | 207 | DEFAULT_GPIO_RESET | PEGASUS_II | TRUST_LINK_STATUS ) |
207 | PEGASUS_DEV( "Billionton USB-100", VENDOR_BILLIONTON, 0x0986, | 208 | PEGASUS_DEV( "Billionton USB-100", VENDOR_BILLIONTON, 0x0986, |
208 | DEFAULT_GPIO_RESET ) | 209 | DEFAULT_GPIO_RESET ) |
209 | PEGASUS_DEV( "Billionton USBLP-100", VENDOR_BILLIONTON, 0x0987, | 210 | PEGASUS_DEV( "Billionton USBLP-100", VENDOR_BILLIONTON, 0x0987, |
diff --git a/drivers/usb/net/rndis_host.c b/drivers/usb/net/rndis_host.c index 39a21c74fdf4..1d36772ba6e1 100644 --- a/drivers/usb/net/rndis_host.c +++ b/drivers/usb/net/rndis_host.c | |||
@@ -588,7 +588,7 @@ rndis_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags) | |||
588 | if (likely((sizeof *hdr) <= room)) { | 588 | if (likely((sizeof *hdr) <= room)) { |
589 | skb->data = memmove(skb->head + sizeof *hdr, | 589 | skb->data = memmove(skb->head + sizeof *hdr, |
590 | skb->data, len); | 590 | skb->data, len); |
591 | skb->tail = skb->data + len; | 591 | skb_set_tail_pointer(skb, len); |
592 | goto fill; | 592 | goto fill; |
593 | } | 593 | } |
594 | } | 594 | } |
diff --git a/drivers/usb/net/rtl8150.c b/drivers/usb/net/rtl8150.c index ea153dc9b0ac..fa598f0340cf 100644 --- a/drivers/usb/net/rtl8150.c +++ b/drivers/usb/net/rtl8150.c | |||
@@ -646,7 +646,6 @@ static void fill_skb_pool(rtl8150_t *dev) | |||
646 | if (!skb) { | 646 | if (!skb) { |
647 | return; | 647 | return; |
648 | } | 648 | } |
649 | skb->dev = dev->netdev; | ||
650 | skb_reserve(skb, 2); | 649 | skb_reserve(skb, 2); |
651 | dev->rx_skb_pool[i] = skb; | 650 | dev->rx_skb_pool[i] = skb; |
652 | } | 651 | } |
diff --git a/drivers/usb/net/usbnet.c b/drivers/usb/net/usbnet.c index de69b183bd2f..0c5465a7909b 100644 --- a/drivers/usb/net/usbnet.c +++ b/drivers/usb/net/usbnet.c | |||
@@ -203,7 +203,6 @@ void usbnet_skb_return (struct usbnet *dev, struct sk_buff *skb) | |||
203 | { | 203 | { |
204 | int status; | 204 | int status; |
205 | 205 | ||
206 | skb->dev = dev->net; | ||
207 | skb->protocol = eth_type_trans (skb, dev->net); | 206 | skb->protocol = eth_type_trans (skb, dev->net); |
208 | dev->stats.rx_packets++; | 207 | dev->stats.rx_packets++; |
209 | dev->stats.rx_bytes += skb->len; | 208 | dev->stats.rx_bytes += skb->len; |
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig index e4f0dd00ae85..8372ace4a0d9 100644 --- a/drivers/video/Kconfig +++ b/drivers/video/Kconfig | |||
@@ -139,7 +139,7 @@ config FB_TILEBLITTING | |||
139 | This is particularly important to one driver, matroxfb. If | 139 | This is particularly important to one driver, matroxfb. If |
140 | unsure, say N. | 140 | unsure, say N. |
141 | 141 | ||
142 | comment "Frambuffer hardware drivers" | 142 | comment "Frame buffer hardware drivers" |
143 | depends on FB | 143 | depends on FB |
144 | 144 | ||
145 | config FB_CIRRUS | 145 | config FB_CIRRUS |
diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c index d7627fc4f11e..8514f2a6f060 100644 --- a/drivers/video/aty/atyfb_base.c +++ b/drivers/video/aty/atyfb_base.c | |||
@@ -2899,7 +2899,7 @@ static int __devinit atyfb_setup_sparc(struct pci_dev *pdev, | |||
2899 | struct fb_info *info, unsigned long addr) | 2899 | struct fb_info *info, unsigned long addr) |
2900 | { | 2900 | { |
2901 | struct atyfb_par *par = info->par; | 2901 | struct atyfb_par *par = info->par; |
2902 | struct pcidev_cookie *pcp; | 2902 | struct device_node *dp; |
2903 | char prop[128]; | 2903 | char prop[128]; |
2904 | int node, len, i, j, ret; | 2904 | int node, len, i, j, ret; |
2905 | u32 mem, chip_id; | 2905 | u32 mem, chip_id; |
@@ -3037,8 +3037,8 @@ static int __devinit atyfb_setup_sparc(struct pci_dev *pdev, | |||
3037 | node = 0; | 3037 | node = 0; |
3038 | } | 3038 | } |
3039 | 3039 | ||
3040 | pcp = pdev->sysdata; | 3040 | dp = pci_device_to_OF_node(pdev); |
3041 | if (node == pcp->prom_node->node) { | 3041 | if (node == dp->node) { |
3042 | struct fb_var_screeninfo *var = &default_var; | 3042 | struct fb_var_screeninfo *var = &default_var; |
3043 | unsigned int N, P, Q, M, T, R; | 3043 | unsigned int N, P, Q, M, T, R; |
3044 | u32 v_total, h_total; | 3044 | u32 v_total, h_total; |
diff --git a/drivers/video/aty/radeon_base.c b/drivers/video/aty/radeon_base.c index 1bf6f42eb400..a4b3fd185de7 100644 --- a/drivers/video/aty/radeon_base.c +++ b/drivers/video/aty/radeon_base.c | |||
@@ -410,7 +410,7 @@ static int __devinit radeon_find_mem_vbios(struct radeonfb_info *rinfo) | |||
410 | } | 410 | } |
411 | #endif | 411 | #endif |
412 | 412 | ||
413 | #ifdef CONFIG_PPC_OF | 413 | #if defined(CONFIG_PPC_OF) || defined(CONFIG_SPARC) |
414 | /* | 414 | /* |
415 | * Read XTAL (ref clock), SCLK and MCLK from Open Firmware device | 415 | * Read XTAL (ref clock), SCLK and MCLK from Open Firmware device |
416 | * tree. Hopefully, ATI OF driver is kind enough to fill these | 416 | * tree. Hopefully, ATI OF driver is kind enough to fill these |
@@ -440,7 +440,7 @@ static int __devinit radeon_read_xtal_OF (struct radeonfb_info *rinfo) | |||
440 | 440 | ||
441 | return 0; | 441 | return 0; |
442 | } | 442 | } |
443 | #endif /* CONFIG_PPC_OF */ | 443 | #endif /* CONFIG_PPC_OF || CONFIG_SPARC */ |
444 | 444 | ||
445 | /* | 445 | /* |
446 | * Read PLL infos from chip registers | 446 | * Read PLL infos from chip registers |
@@ -645,7 +645,7 @@ static void __devinit radeon_get_pllinfo(struct radeonfb_info *rinfo) | |||
645 | rinfo->pll.ref_div = INPLL(PPLL_REF_DIV) & PPLL_REF_DIV_MASK; | 645 | rinfo->pll.ref_div = INPLL(PPLL_REF_DIV) & PPLL_REF_DIV_MASK; |
646 | 646 | ||
647 | 647 | ||
648 | #ifdef CONFIG_PPC_OF | 648 | #if defined(CONFIG_PPC_OF) || defined(CONFIG_SPARC) |
649 | /* | 649 | /* |
650 | * Retrieve PLL infos from Open Firmware first | 650 | * Retrieve PLL infos from Open Firmware first |
651 | */ | 651 | */ |
@@ -653,7 +653,7 @@ static void __devinit radeon_get_pllinfo(struct radeonfb_info *rinfo) | |||
653 | printk(KERN_INFO "radeonfb: Retrieved PLL infos from Open Firmware\n"); | 653 | printk(KERN_INFO "radeonfb: Retrieved PLL infos from Open Firmware\n"); |
654 | goto found; | 654 | goto found; |
655 | } | 655 | } |
656 | #endif /* CONFIG_PPC_OF */ | 656 | #endif /* CONFIG_PPC_OF || CONFIG_SPARC */ |
657 | 657 | ||
658 | /* | 658 | /* |
659 | * Check out if we have an X86 which gave us some PLL informations | 659 | * Check out if we have an X86 which gave us some PLL informations |
@@ -2231,7 +2231,7 @@ static int __devinit radeonfb_pci_register (struct pci_dev *pdev, | |||
2231 | rinfo->family == CHIP_FAMILY_RS200) | 2231 | rinfo->family == CHIP_FAMILY_RS200) |
2232 | rinfo->errata |= CHIP_ERRATA_PLL_DELAY; | 2232 | rinfo->errata |= CHIP_ERRATA_PLL_DELAY; |
2233 | 2233 | ||
2234 | #ifdef CONFIG_PPC_OF | 2234 | #if defined(CONFIG_PPC_OF) || defined(CONFIG_SPARC) |
2235 | /* On PPC, we obtain the OF device-node pointer to the firmware | 2235 | /* On PPC, we obtain the OF device-node pointer to the firmware |
2236 | * data for this chip | 2236 | * data for this chip |
2237 | */ | 2237 | */ |
@@ -2240,6 +2240,8 @@ static int __devinit radeonfb_pci_register (struct pci_dev *pdev, | |||
2240 | printk(KERN_WARNING "radeonfb (%s): Cannot match card to OF node !\n", | 2240 | printk(KERN_WARNING "radeonfb (%s): Cannot match card to OF node !\n", |
2241 | pci_name(rinfo->pdev)); | 2241 | pci_name(rinfo->pdev)); |
2242 | 2242 | ||
2243 | #endif /* CONFIG_PPC_OF || CONFIG_SPARC */ | ||
2244 | #ifdef CONFIG_PPC_OF | ||
2243 | /* On PPC, the firmware sets up a memory mapping that tends | 2245 | /* On PPC, the firmware sets up a memory mapping that tends |
2244 | * to cause lockups when enabling the engine. We reconfigure | 2246 | * to cause lockups when enabling the engine. We reconfigure |
2245 | * the card internal memory mappings properly | 2247 | * the card internal memory mappings properly |
diff --git a/drivers/video/aty/radeon_monitor.c b/drivers/video/aty/radeon_monitor.c index 38c7dbf8c151..737b5c09dbdb 100644 --- a/drivers/video/aty/radeon_monitor.c +++ b/drivers/video/aty/radeon_monitor.c | |||
@@ -52,7 +52,7 @@ static char *radeon_get_mon_name(int type) | |||
52 | } | 52 | } |
53 | 53 | ||
54 | 54 | ||
55 | #ifdef CONFIG_PPC_OF | 55 | #if defined(CONFIG_PPC_OF) || defined(CONFIG_SPARC) |
56 | /* | 56 | /* |
57 | * Try to find monitor informations & EDID data out of the Open Firmware | 57 | * Try to find monitor informations & EDID data out of the Open Firmware |
58 | * device-tree. This also contains some "hacks" to work around a few machine | 58 | * device-tree. This also contains some "hacks" to work around a few machine |
@@ -156,7 +156,7 @@ static int __devinit radeon_probe_OF_head(struct radeonfb_info *rinfo, int head_ | |||
156 | } | 156 | } |
157 | return MT_NONE; | 157 | return MT_NONE; |
158 | } | 158 | } |
159 | #endif /* CONFIG_PPC_OF */ | 159 | #endif /* CONFIG_PPC_OF || CONFIG_SPARC */ |
160 | 160 | ||
161 | 161 | ||
162 | static int __devinit radeon_get_panel_info_BIOS(struct radeonfb_info *rinfo) | 162 | static int __devinit radeon_get_panel_info_BIOS(struct radeonfb_info *rinfo) |
@@ -495,11 +495,11 @@ void __devinit radeon_probe_screens(struct radeonfb_info *rinfo, | |||
495 | * Old single head cards | 495 | * Old single head cards |
496 | */ | 496 | */ |
497 | if (!rinfo->has_CRTC2) { | 497 | if (!rinfo->has_CRTC2) { |
498 | #ifdef CONFIG_PPC_OF | 498 | #if defined(CONFIG_PPC_OF) || defined(CONFIG_SPARC) |
499 | if (rinfo->mon1_type == MT_NONE) | 499 | if (rinfo->mon1_type == MT_NONE) |
500 | rinfo->mon1_type = radeon_probe_OF_head(rinfo, 0, | 500 | rinfo->mon1_type = radeon_probe_OF_head(rinfo, 0, |
501 | &rinfo->mon1_EDID); | 501 | &rinfo->mon1_EDID); |
502 | #endif /* CONFIG_PPC_OF */ | 502 | #endif /* CONFIG_PPC_OF || CONFIG_SPARC */ |
503 | #ifdef CONFIG_FB_RADEON_I2C | 503 | #ifdef CONFIG_FB_RADEON_I2C |
504 | if (rinfo->mon1_type == MT_NONE) | 504 | if (rinfo->mon1_type == MT_NONE) |
505 | rinfo->mon1_type = | 505 | rinfo->mon1_type = |
@@ -544,11 +544,11 @@ void __devinit radeon_probe_screens(struct radeonfb_info *rinfo, | |||
544 | /* | 544 | /* |
545 | * Probe primary head (DVI or laptop internal panel) | 545 | * Probe primary head (DVI or laptop internal panel) |
546 | */ | 546 | */ |
547 | #ifdef CONFIG_PPC_OF | 547 | #if defined(CONFIG_PPC_OF) || defined(CONFIG_SPARC) |
548 | if (rinfo->mon1_type == MT_NONE) | 548 | if (rinfo->mon1_type == MT_NONE) |
549 | rinfo->mon1_type = radeon_probe_OF_head(rinfo, 0, | 549 | rinfo->mon1_type = radeon_probe_OF_head(rinfo, 0, |
550 | &rinfo->mon1_EDID); | 550 | &rinfo->mon1_EDID); |
551 | #endif /* CONFIG_PPC_OF */ | 551 | #endif /* CONFIG_PPC_OF || CONFIG_SPARC */ |
552 | #ifdef CONFIG_FB_RADEON_I2C | 552 | #ifdef CONFIG_FB_RADEON_I2C |
553 | if (rinfo->mon1_type == MT_NONE) | 553 | if (rinfo->mon1_type == MT_NONE) |
554 | rinfo->mon1_type = radeon_probe_i2c_connector(rinfo, ddc_dvi, | 554 | rinfo->mon1_type = radeon_probe_i2c_connector(rinfo, ddc_dvi, |
@@ -572,11 +572,11 @@ void __devinit radeon_probe_screens(struct radeonfb_info *rinfo, | |||
572 | /* | 572 | /* |
573 | * Probe secondary head (mostly VGA, can be DVI) | 573 | * Probe secondary head (mostly VGA, can be DVI) |
574 | */ | 574 | */ |
575 | #ifdef CONFIG_PPC_OF | 575 | #if defined(CONFIG_PPC_OF) || defined(CONFIG_SPARC) |
576 | if (rinfo->mon2_type == MT_NONE) | 576 | if (rinfo->mon2_type == MT_NONE) |
577 | rinfo->mon2_type = radeon_probe_OF_head(rinfo, 1, | 577 | rinfo->mon2_type = radeon_probe_OF_head(rinfo, 1, |
578 | &rinfo->mon2_EDID); | 578 | &rinfo->mon2_EDID); |
579 | #endif /* CONFIG_PPC_OF */ | 579 | #endif /* CONFIG_PPC_OF || defined(CONFIG_SPARC) */ |
580 | #ifdef CONFIG_FB_RADEON_I2C | 580 | #ifdef CONFIG_FB_RADEON_I2C |
581 | if (rinfo->mon2_type == MT_NONE) | 581 | if (rinfo->mon2_type == MT_NONE) |
582 | rinfo->mon2_type = radeon_probe_i2c_connector(rinfo, ddc_vga, | 582 | rinfo->mon2_type = radeon_probe_i2c_connector(rinfo, ddc_vga, |
diff --git a/drivers/video/aty/radeonfb.h b/drivers/video/aty/radeonfb.h index d5ff224a6258..319000360285 100644 --- a/drivers/video/aty/radeonfb.h +++ b/drivers/video/aty/radeonfb.h | |||
@@ -16,7 +16,7 @@ | |||
16 | 16 | ||
17 | #include <asm/io.h> | 17 | #include <asm/io.h> |
18 | 18 | ||
19 | #ifdef CONFIG_PPC_OF | 19 | #if defined(CONFIG_PPC_OF) || defined(CONFIG_SPARC) |
20 | #include <asm/prom.h> | 20 | #include <asm/prom.h> |
21 | #endif | 21 | #endif |
22 | 22 | ||
@@ -292,7 +292,7 @@ struct radeonfb_info { | |||
292 | unsigned long fb_local_base; | 292 | unsigned long fb_local_base; |
293 | 293 | ||
294 | struct pci_dev *pdev; | 294 | struct pci_dev *pdev; |
295 | #ifdef CONFIG_PPC_OF | 295 | #if defined(CONFIG_PPC_OF) || defined(CONFIG_SPARC) |
296 | struct device_node *of_node; | 296 | struct device_node *of_node; |
297 | #endif | 297 | #endif |
298 | 298 | ||
diff --git a/drivers/video/cg3.c b/drivers/video/cg3.c index 767c850f8eb7..f042428a84f4 100644 --- a/drivers/video/cg3.c +++ b/drivers/video/cg3.c | |||
@@ -266,7 +266,7 @@ static void __devinit cg3_init_fix(struct fb_info *info, int linebytes, | |||
266 | static void __devinit cg3_rdi_maybe_fixup_var(struct fb_var_screeninfo *var, | 266 | static void __devinit cg3_rdi_maybe_fixup_var(struct fb_var_screeninfo *var, |
267 | struct device_node *dp) | 267 | struct device_node *dp) |
268 | { | 268 | { |
269 | char *params; | 269 | const char *params; |
270 | char *p; | 270 | char *p; |
271 | int ww, hh; | 271 | int ww, hh; |
272 | 272 | ||
diff --git a/drivers/video/igafb.c b/drivers/video/igafb.c index 90592fb59156..eb1a4812ad1d 100644 --- a/drivers/video/igafb.c +++ b/drivers/video/igafb.c | |||
@@ -44,8 +44,8 @@ | |||
44 | 44 | ||
45 | #include <asm/io.h> | 45 | #include <asm/io.h> |
46 | 46 | ||
47 | #ifdef __sparc__ | 47 | #ifdef CONFIG_SPARC |
48 | #include <asm/pbm.h> | 48 | #include <asm/prom.h> |
49 | #include <asm/pcic.h> | 49 | #include <asm/pcic.h> |
50 | #endif | 50 | #endif |
51 | 51 | ||
@@ -96,7 +96,7 @@ struct fb_var_screeninfo default_var = { | |||
96 | .vmode = FB_VMODE_NONINTERLACED | 96 | .vmode = FB_VMODE_NONINTERLACED |
97 | }; | 97 | }; |
98 | 98 | ||
99 | #ifdef __sparc__ | 99 | #ifdef CONFIG_SPARC |
100 | struct fb_var_screeninfo default_var_1024x768 __initdata = { | 100 | struct fb_var_screeninfo default_var_1024x768 __initdata = { |
101 | /* 1024x768, 75 Hz, Non-Interlaced (78.75 MHz dotclock) */ | 101 | /* 1024x768, 75 Hz, Non-Interlaced (78.75 MHz dotclock) */ |
102 | .xres = 1024, | 102 | .xres = 1024, |
@@ -188,7 +188,7 @@ static inline void iga_outb(struct iga_par *par, unsigned char val, | |||
188 | pci_outb(par, val, reg+1); | 188 | pci_outb(par, val, reg+1); |
189 | } | 189 | } |
190 | 190 | ||
191 | #endif /* __sparc__ */ | 191 | #endif /* CONFIG_SPARC */ |
192 | 192 | ||
193 | /* | 193 | /* |
194 | * Very important functionality for the JavaEngine1 computer: | 194 | * Very important functionality for the JavaEngine1 computer: |
@@ -217,7 +217,7 @@ static void iga_blank_border(struct iga_par *par) | |||
217 | iga_outb(par, 0, IGA_EXT_CNTRL, IGA_IDX_OVERSCAN_COLOR + i); | 217 | iga_outb(par, 0, IGA_EXT_CNTRL, IGA_IDX_OVERSCAN_COLOR + i); |
218 | } | 218 | } |
219 | 219 | ||
220 | #ifdef __sparc__ | 220 | #ifdef CONFIG_SPARC |
221 | static int igafb_mmap(struct fb_info *info, | 221 | static int igafb_mmap(struct fb_info *info, |
222 | struct vm_area_struct *vma) | 222 | struct vm_area_struct *vma) |
223 | { | 223 | { |
@@ -271,7 +271,7 @@ static int igafb_mmap(struct fb_info *info, | |||
271 | vma->vm_flags |= VM_IO; | 271 | vma->vm_flags |= VM_IO; |
272 | return 0; | 272 | return 0; |
273 | } | 273 | } |
274 | #endif /* __sparc__ */ | 274 | #endif /* CONFIG_SPARC */ |
275 | 275 | ||
276 | static int igafb_setcolreg(unsigned regno, unsigned red, unsigned green, | 276 | static int igafb_setcolreg(unsigned regno, unsigned red, unsigned green, |
277 | unsigned blue, unsigned transp, | 277 | unsigned blue, unsigned transp, |
@@ -323,7 +323,7 @@ static struct fb_ops igafb_ops = { | |||
323 | .fb_fillrect = cfb_fillrect, | 323 | .fb_fillrect = cfb_fillrect, |
324 | .fb_copyarea = cfb_copyarea, | 324 | .fb_copyarea = cfb_copyarea, |
325 | .fb_imageblit = cfb_imageblit, | 325 | .fb_imageblit = cfb_imageblit, |
326 | #ifdef __sparc__ | 326 | #ifdef CONFIG_SPARC |
327 | .fb_mmap = igafb_mmap, | 327 | .fb_mmap = igafb_mmap, |
328 | #endif | 328 | #endif |
329 | }; | 329 | }; |
@@ -424,7 +424,7 @@ int __init igafb_init(void) | |||
424 | 424 | ||
425 | par->frame_buffer_phys = addr & PCI_BASE_ADDRESS_MEM_MASK; | 425 | par->frame_buffer_phys = addr & PCI_BASE_ADDRESS_MEM_MASK; |
426 | 426 | ||
427 | #ifdef __sparc__ | 427 | #ifdef CONFIG_SPARC |
428 | /* | 428 | /* |
429 | * The following is sparc specific and this is why: | 429 | * The following is sparc specific and this is why: |
430 | * | 430 | * |
@@ -477,8 +477,8 @@ int __init igafb_init(void) | |||
477 | * Set default vmode and cmode from PROM properties. | 477 | * Set default vmode and cmode from PROM properties. |
478 | */ | 478 | */ |
479 | { | 479 | { |
480 | struct pcidev_cookie *cookie = pdev->sysdata; | 480 | struct device_node *dp = pci_device_to_OF_node(pdev); |
481 | int node = cookie->prom_node; | 481 | int node = dp->node; |
482 | int width = prom_getintdefault(node, "width", 1024); | 482 | int width = prom_getintdefault(node, "width", 1024); |
483 | int height = prom_getintdefault(node, "height", 768); | 483 | int height = prom_getintdefault(node, "height", 768); |
484 | int depth = prom_getintdefault(node, "depth", 8); | 484 | int depth = prom_getintdefault(node, "depth", 8); |
@@ -534,7 +534,7 @@ int __init igafb_init(void) | |||
534 | kfree(info); | 534 | kfree(info); |
535 | } | 535 | } |
536 | 536 | ||
537 | #ifdef __sparc__ | 537 | #ifdef CONFIG_SPARC |
538 | /* | 538 | /* |
539 | * Add /dev/fb mmap values. | 539 | * Add /dev/fb mmap values. |
540 | */ | 540 | */ |
@@ -552,7 +552,7 @@ int __init igafb_init(void) | |||
552 | par->mmap_map[1].size = PAGE_SIZE * 2; /* X wants 2 pages */ | 552 | par->mmap_map[1].size = PAGE_SIZE * 2; /* X wants 2 pages */ |
553 | par->mmap_map[1].prot_mask = SRMMU_CACHE; | 553 | par->mmap_map[1].prot_mask = SRMMU_CACHE; |
554 | par->mmap_map[1].prot_flag = SRMMU_WRITE; | 554 | par->mmap_map[1].prot_flag = SRMMU_WRITE; |
555 | #endif /* __sparc__ */ | 555 | #endif /* CONFIG_SPARC */ |
556 | 556 | ||
557 | return 0; | 557 | return 0; |
558 | } | 558 | } |