diff options
author | David Woodhouse <dwmw2@infradead.org> | 2007-04-27 14:16:19 -0400 |
---|---|---|
committer | David Woodhouse <dwmw2@infradead.org> | 2007-04-27 14:16:19 -0400 |
commit | d1da4e50e5d09f02c340927a4fcb7f54202fa033 (patch) | |
tree | 7f98317bdd45dbdb7644e9179891c5af6a3a8ef1 /drivers | |
parent | 78ab67da1002d954ea4c3e2b441e2483c41f94e8 (diff) | |
parent | a205752d1ad2d37d6597aaae5a56fc396a770868 (diff) |
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Conflicts:
drivers/mtd/Kconfig
Signed-off-by: David Woodhouse <dwmw2@infradead.org>
Diffstat (limited to 'drivers')
417 files changed, 20919 insertions, 8373 deletions
diff --git a/drivers/Makefile b/drivers/Makefile index 3a718f51350e..920c975bb6d4 100644 --- a/drivers/Makefile +++ b/drivers/Makefile | |||
@@ -72,7 +72,6 @@ obj-$(CONFIG_CPU_FREQ) += cpufreq/ | |||
72 | obj-$(CONFIG_MMC) += mmc/ | 72 | obj-$(CONFIG_MMC) += mmc/ |
73 | obj-$(CONFIG_NEW_LEDS) += leds/ | 73 | obj-$(CONFIG_NEW_LEDS) += leds/ |
74 | obj-$(CONFIG_INFINIBAND) += infiniband/ | 74 | obj-$(CONFIG_INFINIBAND) += infiniband/ |
75 | obj-$(CONFIG_IPATH_CORE) += infiniband/ | ||
76 | obj-$(CONFIG_SGI_SN) += sn/ | 75 | obj-$(CONFIG_SGI_SN) += sn/ |
77 | obj-y += firmware/ | 76 | obj-y += firmware/ |
78 | obj-$(CONFIG_CRYPTO) += crypto/ | 77 | obj-$(CONFIG_CRYPTO) += crypto/ |
diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c index 3c372e08f77d..59651abfa4f8 100644 --- a/drivers/atm/ambassador.c +++ b/drivers/atm/ambassador.c | |||
@@ -821,7 +821,7 @@ static inline void fill_rx_pool (amb_dev * dev, unsigned char pool, | |||
821 | } | 821 | } |
822 | // cast needed as there is no %? for pointer differences | 822 | // cast needed as there is no %? for pointer differences |
823 | PRINTD (DBG_SKB, "allocated skb at %p, head %p, area %li", | 823 | PRINTD (DBG_SKB, "allocated skb at %p, head %p, area %li", |
824 | skb, skb->head, (long) (skb->end - skb->head)); | 824 | skb, skb->head, (long) (skb_end_pointer(skb) - skb->head)); |
825 | rx.handle = virt_to_bus (skb); | 825 | rx.handle = virt_to_bus (skb); |
826 | rx.host_address = cpu_to_be32 (virt_to_bus (skb->data)); | 826 | rx.host_address = cpu_to_be32 (virt_to_bus (skb->data)); |
827 | if (rx_give (dev, &rx, pool)) | 827 | if (rx_give (dev, &rx, pool)) |
diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c index fc518d85543d..02ad83d6b562 100644 --- a/drivers/atm/atmtcp.c +++ b/drivers/atm/atmtcp.c | |||
@@ -221,7 +221,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb) | |||
221 | hdr->vpi = htons(vcc->vpi); | 221 | hdr->vpi = htons(vcc->vpi); |
222 | hdr->vci = htons(vcc->vci); | 222 | hdr->vci = htons(vcc->vci); |
223 | hdr->length = htonl(skb->len); | 223 | hdr->length = htonl(skb->len); |
224 | memcpy(skb_put(new_skb,skb->len),skb->data,skb->len); | 224 | skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len); |
225 | if (vcc->pop) vcc->pop(vcc,skb); | 225 | if (vcc->pop) vcc->pop(vcc,skb); |
226 | else dev_kfree_skb(skb); | 226 | else dev_kfree_skb(skb); |
227 | out_vcc->push(out_vcc,new_skb); | 227 | out_vcc->push(out_vcc,new_skb); |
@@ -310,7 +310,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb) | |||
310 | goto done; | 310 | goto done; |
311 | } | 311 | } |
312 | __net_timestamp(new_skb); | 312 | __net_timestamp(new_skb); |
313 | memcpy(skb_put(new_skb,skb->len),skb->data,skb->len); | 313 | skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len); |
314 | out_vcc->push(out_vcc,new_skb); | 314 | out_vcc->push(out_vcc,new_skb); |
315 | atomic_inc(&vcc->stats->tx); | 315 | atomic_inc(&vcc->stats->tx); |
316 | atomic_inc(&out_vcc->stats->rx); | 316 | atomic_inc(&out_vcc->stats->rx); |
@@ -352,7 +352,7 @@ static struct atm_dev atmtcp_control_dev = { | |||
352 | .ops = &atmtcp_c_dev_ops, | 352 | .ops = &atmtcp_c_dev_ops, |
353 | .type = "atmtcp", | 353 | .type = "atmtcp", |
354 | .number = 999, | 354 | .number = 999, |
355 | .lock = SPIN_LOCK_UNLOCKED | 355 | .lock = __SPIN_LOCK_UNLOCKED(atmtcp_control_dev.lock) |
356 | }; | 356 | }; |
357 | 357 | ||
358 | 358 | ||
diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c index 8fccf018f165..0d3a38b1cb0b 100644 --- a/drivers/atm/eni.c +++ b/drivers/atm/eni.c | |||
@@ -536,7 +536,7 @@ static int rx_aal0(struct atm_vcc *vcc) | |||
536 | return 0; | 536 | return 0; |
537 | } | 537 | } |
538 | skb_put(skb,length); | 538 | skb_put(skb,length); |
539 | skb_set_timestamp(skb, &eni_vcc->timestamp); | 539 | skb->tstamp = eni_vcc->timestamp; |
540 | DPRINTK("got len %ld\n",length); | 540 | DPRINTK("got len %ld\n",length); |
541 | if (do_rx_dma(vcc,skb,1,length >> 2,length >> 2)) return 1; | 541 | if (do_rx_dma(vcc,skb,1,length >> 2,length >> 2)) return 1; |
542 | eni_vcc->rxing++; | 542 | eni_vcc->rxing++; |
@@ -701,7 +701,7 @@ static void get_service(struct atm_dev *dev) | |||
701 | DPRINTK("Grr, servicing VCC %ld twice\n",vci); | 701 | DPRINTK("Grr, servicing VCC %ld twice\n",vci); |
702 | continue; | 702 | continue; |
703 | } | 703 | } |
704 | do_gettimeofday(&ENI_VCC(vcc)->timestamp); | 704 | ENI_VCC(vcc)->timestamp = ktime_get_real(); |
705 | ENI_VCC(vcc)->next = NULL; | 705 | ENI_VCC(vcc)->next = NULL; |
706 | if (vcc->qos.rxtp.traffic_class == ATM_CBR) { | 706 | if (vcc->qos.rxtp.traffic_class == ATM_CBR) { |
707 | if (eni_dev->fast) | 707 | if (eni_dev->fast) |
diff --git a/drivers/atm/eni.h b/drivers/atm/eni.h index 385090c2a580..d04fefb0841f 100644 --- a/drivers/atm/eni.h +++ b/drivers/atm/eni.h | |||
@@ -59,7 +59,7 @@ struct eni_vcc { | |||
59 | int rxing; /* number of pending PDUs */ | 59 | int rxing; /* number of pending PDUs */ |
60 | int servicing; /* number of waiting VCs (0 or 1) */ | 60 | int servicing; /* number of waiting VCs (0 or 1) */ |
61 | int txing; /* number of pending TX bytes */ | 61 | int txing; /* number of pending TX bytes */ |
62 | struct timeval timestamp; /* for RX timing */ | 62 | ktime_t timestamp; /* for RX timing */ |
63 | struct atm_vcc *next; /* next pending RX */ | 63 | struct atm_vcc *next; /* next pending RX */ |
64 | struct sk_buff *last; /* last PDU being DMAed (used to carry | 64 | struct sk_buff *last; /* last PDU being DMAed (used to carry |
65 | discard information) */ | 65 | discard information) */ |
diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c index a7c0ed3107e3..405ee5e09221 100644 --- a/drivers/atm/fore200e.c +++ b/drivers/atm/fore200e.c | |||
@@ -1,6 +1,4 @@ | |||
1 | /* | 1 | /* |
2 | $Id: fore200e.c,v 1.5 2000/04/14 10:10:34 davem Exp $ | ||
3 | |||
4 | A FORE Systems 200E-series driver for ATM on Linux. | 2 | A FORE Systems 200E-series driver for ATM on Linux. |
5 | Christophe Lizzi (lizzi@cnam.fr), October 1999-March 2003. | 3 | Christophe Lizzi (lizzi@cnam.fr), October 1999-March 2003. |
6 | 4 | ||
@@ -1502,9 +1500,9 @@ fore200e_open(struct atm_vcc *vcc) | |||
1502 | /* pseudo-CBR bandwidth requested? */ | 1500 | /* pseudo-CBR bandwidth requested? */ |
1503 | if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) { | 1501 | if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) { |
1504 | 1502 | ||
1505 | down(&fore200e->rate_sf); | 1503 | mutex_lock(&fore200e->rate_mtx); |
1506 | if (fore200e->available_cell_rate < vcc->qos.txtp.max_pcr) { | 1504 | if (fore200e->available_cell_rate < vcc->qos.txtp.max_pcr) { |
1507 | up(&fore200e->rate_sf); | 1505 | mutex_unlock(&fore200e->rate_mtx); |
1508 | 1506 | ||
1509 | kfree(fore200e_vcc); | 1507 | kfree(fore200e_vcc); |
1510 | vc_map->vcc = NULL; | 1508 | vc_map->vcc = NULL; |
@@ -1513,7 +1511,7 @@ fore200e_open(struct atm_vcc *vcc) | |||
1513 | 1511 | ||
1514 | /* reserve bandwidth */ | 1512 | /* reserve bandwidth */ |
1515 | fore200e->available_cell_rate -= vcc->qos.txtp.max_pcr; | 1513 | fore200e->available_cell_rate -= vcc->qos.txtp.max_pcr; |
1516 | up(&fore200e->rate_sf); | 1514 | mutex_unlock(&fore200e->rate_mtx); |
1517 | } | 1515 | } |
1518 | 1516 | ||
1519 | vcc->itf = vcc->dev->number; | 1517 | vcc->itf = vcc->dev->number; |
@@ -1599,9 +1597,9 @@ fore200e_close(struct atm_vcc* vcc) | |||
1599 | /* release reserved bandwidth, if any */ | 1597 | /* release reserved bandwidth, if any */ |
1600 | if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) { | 1598 | if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) { |
1601 | 1599 | ||
1602 | down(&fore200e->rate_sf); | 1600 | mutex_lock(&fore200e->rate_mtx); |
1603 | fore200e->available_cell_rate += vcc->qos.txtp.max_pcr; | 1601 | fore200e->available_cell_rate += vcc->qos.txtp.max_pcr; |
1604 | up(&fore200e->rate_sf); | 1602 | mutex_unlock(&fore200e->rate_mtx); |
1605 | 1603 | ||
1606 | clear_bit(ATM_VF_HASQOS, &vcc->flags); | 1604 | clear_bit(ATM_VF_HASQOS, &vcc->flags); |
1607 | } | 1605 | } |
@@ -2064,16 +2062,16 @@ fore200e_change_qos(struct atm_vcc* vcc,struct atm_qos* qos, int flags) | |||
2064 | 2062 | ||
2065 | if ((qos->txtp.traffic_class == ATM_CBR) && (qos->txtp.max_pcr > 0)) { | 2063 | if ((qos->txtp.traffic_class == ATM_CBR) && (qos->txtp.max_pcr > 0)) { |
2066 | 2064 | ||
2067 | down(&fore200e->rate_sf); | 2065 | mutex_lock(&fore200e->rate_mtx); |
2068 | if (fore200e->available_cell_rate + vcc->qos.txtp.max_pcr < qos->txtp.max_pcr) { | 2066 | if (fore200e->available_cell_rate + vcc->qos.txtp.max_pcr < qos->txtp.max_pcr) { |
2069 | up(&fore200e->rate_sf); | 2067 | mutex_unlock(&fore200e->rate_mtx); |
2070 | return -EAGAIN; | 2068 | return -EAGAIN; |
2071 | } | 2069 | } |
2072 | 2070 | ||
2073 | fore200e->available_cell_rate += vcc->qos.txtp.max_pcr; | 2071 | fore200e->available_cell_rate += vcc->qos.txtp.max_pcr; |
2074 | fore200e->available_cell_rate -= qos->txtp.max_pcr; | 2072 | fore200e->available_cell_rate -= qos->txtp.max_pcr; |
2075 | 2073 | ||
2076 | up(&fore200e->rate_sf); | 2074 | mutex_unlock(&fore200e->rate_mtx); |
2077 | 2075 | ||
2078 | memcpy(&vcc->qos, qos, sizeof(struct atm_qos)); | 2076 | memcpy(&vcc->qos, qos, sizeof(struct atm_qos)); |
2079 | 2077 | ||
@@ -2459,7 +2457,7 @@ fore200e_initialize(struct fore200e* fore200e) | |||
2459 | 2457 | ||
2460 | DPRINTK(2, "device %s being initialized\n", fore200e->name); | 2458 | DPRINTK(2, "device %s being initialized\n", fore200e->name); |
2461 | 2459 | ||
2462 | init_MUTEX(&fore200e->rate_sf); | 2460 | mutex_init(&fore200e->rate_mtx); |
2463 | spin_lock_init(&fore200e->q_lock); | 2461 | spin_lock_init(&fore200e->q_lock); |
2464 | 2462 | ||
2465 | cpq = fore200e->cp_queues = fore200e->virt_base + FORE200E_CP_QUEUES_OFFSET; | 2463 | cpq = fore200e->cp_queues = fore200e->virt_base + FORE200E_CP_QUEUES_OFFSET; |
diff --git a/drivers/atm/fore200e.h b/drivers/atm/fore200e.h index f9abfdac33e4..b85a54613dea 100644 --- a/drivers/atm/fore200e.h +++ b/drivers/atm/fore200e.h | |||
@@ -869,7 +869,7 @@ typedef struct fore200e { | |||
869 | 869 | ||
870 | struct stats* stats; /* last snapshot of the stats */ | 870 | struct stats* stats; /* last snapshot of the stats */ |
871 | 871 | ||
872 | struct semaphore rate_sf; /* protects rate reservation ops */ | 872 | struct mutex rate_mtx; /* protects rate reservation ops */ |
873 | spinlock_t q_lock; /* protects queue ops */ | 873 | spinlock_t q_lock; /* protects queue ops */ |
874 | #ifdef FORE200E_USE_TASKLET | 874 | #ifdef FORE200E_USE_TASKLET |
875 | struct tasklet_struct tx_tasklet; /* performs tx interrupt work */ | 875 | struct tasklet_struct tx_tasklet; /* performs tx interrupt work */ |
diff --git a/drivers/atm/he.c b/drivers/atm/he.c index 8510026b690a..d33aba6864c2 100644 --- a/drivers/atm/he.c +++ b/drivers/atm/he.c | |||
@@ -1901,13 +1901,13 @@ he_service_rbrq(struct he_dev *he_dev, int group) | |||
1901 | case ATM_AAL0: | 1901 | case ATM_AAL0: |
1902 | /* 2.10.1.5 raw cell receive */ | 1902 | /* 2.10.1.5 raw cell receive */ |
1903 | skb->len = ATM_AAL0_SDU; | 1903 | skb->len = ATM_AAL0_SDU; |
1904 | skb->tail = skb->data + skb->len; | 1904 | skb_set_tail_pointer(skb, skb->len); |
1905 | break; | 1905 | break; |
1906 | case ATM_AAL5: | 1906 | case ATM_AAL5: |
1907 | /* 2.10.1.2 aal5 receive */ | 1907 | /* 2.10.1.2 aal5 receive */ |
1908 | 1908 | ||
1909 | skb->len = AAL5_LEN(skb->data, he_vcc->pdu_len); | 1909 | skb->len = AAL5_LEN(skb->data, he_vcc->pdu_len); |
1910 | skb->tail = skb->data + skb->len; | 1910 | skb_set_tail_pointer(skb, skb->len); |
1911 | #ifdef USE_CHECKSUM_HW | 1911 | #ifdef USE_CHECKSUM_HW |
1912 | if (vcc->vpi == 0 && vcc->vci >= ATM_NOT_RSV_VCI) { | 1912 | if (vcc->vpi == 0 && vcc->vci >= ATM_NOT_RSV_VCI) { |
1913 | skb->ip_summed = CHECKSUM_COMPLETE; | 1913 | skb->ip_summed = CHECKSUM_COMPLETE; |
diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c index b4b80140c398..057efbc55d38 100644 --- a/drivers/atm/idt77252.c +++ b/drivers/atm/idt77252.c | |||
@@ -1065,7 +1065,8 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe) | |||
1065 | vcc = vc->rx_vcc; | 1065 | vcc = vc->rx_vcc; |
1066 | 1066 | ||
1067 | pci_dma_sync_single_for_cpu(card->pcidev, IDT77252_PRV_PADDR(skb), | 1067 | pci_dma_sync_single_for_cpu(card->pcidev, IDT77252_PRV_PADDR(skb), |
1068 | skb->end - skb->data, PCI_DMA_FROMDEVICE); | 1068 | skb_end_pointer(skb) - skb->data, |
1069 | PCI_DMA_FROMDEVICE); | ||
1069 | 1070 | ||
1070 | if ((vcc->qos.aal == ATM_AAL0) || | 1071 | if ((vcc->qos.aal == ATM_AAL0) || |
1071 | (vcc->qos.aal == ATM_AAL34)) { | 1072 | (vcc->qos.aal == ATM_AAL34)) { |
@@ -1194,7 +1195,8 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe) | |||
1194 | } | 1195 | } |
1195 | 1196 | ||
1196 | pci_unmap_single(card->pcidev, IDT77252_PRV_PADDR(skb), | 1197 | pci_unmap_single(card->pcidev, IDT77252_PRV_PADDR(skb), |
1197 | skb->end - skb->data, PCI_DMA_FROMDEVICE); | 1198 | skb_end_pointer(skb) - skb->data, |
1199 | PCI_DMA_FROMDEVICE); | ||
1198 | sb_pool_remove(card, skb); | 1200 | sb_pool_remove(card, skb); |
1199 | 1201 | ||
1200 | skb_trim(skb, len); | 1202 | skb_trim(skb, len); |
@@ -1267,7 +1269,7 @@ idt77252_rx_raw(struct idt77252_dev *card) | |||
1267 | tail = readl(SAR_REG_RAWCT); | 1269 | tail = readl(SAR_REG_RAWCT); |
1268 | 1270 | ||
1269 | pci_dma_sync_single_for_cpu(card->pcidev, IDT77252_PRV_PADDR(queue), | 1271 | pci_dma_sync_single_for_cpu(card->pcidev, IDT77252_PRV_PADDR(queue), |
1270 | queue->end - queue->head - 16, | 1272 | skb_end_pointer(queue) - queue->head - 16, |
1271 | PCI_DMA_FROMDEVICE); | 1273 | PCI_DMA_FROMDEVICE); |
1272 | 1274 | ||
1273 | while (head != tail) { | 1275 | while (head != tail) { |
@@ -1363,7 +1365,8 @@ drop: | |||
1363 | queue = card->raw_cell_head; | 1365 | queue = card->raw_cell_head; |
1364 | pci_dma_sync_single_for_cpu(card->pcidev, | 1366 | pci_dma_sync_single_for_cpu(card->pcidev, |
1365 | IDT77252_PRV_PADDR(queue), | 1367 | IDT77252_PRV_PADDR(queue), |
1366 | queue->end - queue->data, | 1368 | (skb_end_pointer(queue) - |
1369 | queue->data), | ||
1367 | PCI_DMA_FROMDEVICE); | 1370 | PCI_DMA_FROMDEVICE); |
1368 | } else { | 1371 | } else { |
1369 | card->raw_cell_head = NULL; | 1372 | card->raw_cell_head = NULL; |
@@ -1816,7 +1819,8 @@ push_rx_skb(struct idt77252_dev *card, struct sk_buff *skb, int queue) | |||
1816 | u32 handle; | 1819 | u32 handle; |
1817 | u32 addr; | 1820 | u32 addr; |
1818 | 1821 | ||
1819 | skb->data = skb->tail = skb->head; | 1822 | skb->data = skb->head; |
1823 | skb_reset_tail_pointer(skb); | ||
1820 | skb->len = 0; | 1824 | skb->len = 0; |
1821 | 1825 | ||
1822 | skb_reserve(skb, 16); | 1826 | skb_reserve(skb, 16); |
@@ -1835,7 +1839,6 @@ push_rx_skb(struct idt77252_dev *card, struct sk_buff *skb, int queue) | |||
1835 | skb_put(skb, SAR_FB_SIZE_3); | 1839 | skb_put(skb, SAR_FB_SIZE_3); |
1836 | break; | 1840 | break; |
1837 | default: | 1841 | default: |
1838 | dev_kfree_skb(skb); | ||
1839 | return -1; | 1842 | return -1; |
1840 | } | 1843 | } |
1841 | 1844 | ||
@@ -1874,7 +1877,7 @@ add_rx_skb(struct idt77252_dev *card, int queue, | |||
1874 | } | 1877 | } |
1875 | 1878 | ||
1876 | paddr = pci_map_single(card->pcidev, skb->data, | 1879 | paddr = pci_map_single(card->pcidev, skb->data, |
1877 | skb->end - skb->data, | 1880 | skb_end_pointer(skb) - skb->data, |
1878 | PCI_DMA_FROMDEVICE); | 1881 | PCI_DMA_FROMDEVICE); |
1879 | IDT77252_PRV_PADDR(skb) = paddr; | 1882 | IDT77252_PRV_PADDR(skb) = paddr; |
1880 | 1883 | ||
@@ -1888,7 +1891,7 @@ add_rx_skb(struct idt77252_dev *card, int queue, | |||
1888 | 1891 | ||
1889 | outunmap: | 1892 | outunmap: |
1890 | pci_unmap_single(card->pcidev, IDT77252_PRV_PADDR(skb), | 1893 | pci_unmap_single(card->pcidev, IDT77252_PRV_PADDR(skb), |
1891 | skb->end - skb->data, PCI_DMA_FROMDEVICE); | 1894 | skb_end_pointer(skb) - skb->data, PCI_DMA_FROMDEVICE); |
1892 | 1895 | ||
1893 | handle = IDT77252_PRV_POOL(skb); | 1896 | handle = IDT77252_PRV_POOL(skb); |
1894 | card->sbpool[POOL_QUEUE(handle)].skb[POOL_INDEX(handle)] = NULL; | 1897 | card->sbpool[POOL_QUEUE(handle)].skb[POOL_INDEX(handle)] = NULL; |
@@ -1905,12 +1908,14 @@ recycle_rx_skb(struct idt77252_dev *card, struct sk_buff *skb) | |||
1905 | int err; | 1908 | int err; |
1906 | 1909 | ||
1907 | pci_dma_sync_single_for_device(card->pcidev, IDT77252_PRV_PADDR(skb), | 1910 | pci_dma_sync_single_for_device(card->pcidev, IDT77252_PRV_PADDR(skb), |
1908 | skb->end - skb->data, PCI_DMA_FROMDEVICE); | 1911 | skb_end_pointer(skb) - skb->data, |
1912 | PCI_DMA_FROMDEVICE); | ||
1909 | 1913 | ||
1910 | err = push_rx_skb(card, skb, POOL_QUEUE(handle)); | 1914 | err = push_rx_skb(card, skb, POOL_QUEUE(handle)); |
1911 | if (err) { | 1915 | if (err) { |
1912 | pci_unmap_single(card->pcidev, IDT77252_PRV_PADDR(skb), | 1916 | pci_unmap_single(card->pcidev, IDT77252_PRV_PADDR(skb), |
1913 | skb->end - skb->data, PCI_DMA_FROMDEVICE); | 1917 | skb_end_pointer(skb) - skb->data, |
1918 | PCI_DMA_FROMDEVICE); | ||
1914 | sb_pool_remove(card, skb); | 1919 | sb_pool_remove(card, skb); |
1915 | dev_kfree_skb(skb); | 1920 | dev_kfree_skb(skb); |
1916 | } | 1921 | } |
@@ -3122,7 +3127,8 @@ deinit_card(struct idt77252_dev *card) | |||
3122 | if (skb) { | 3127 | if (skb) { |
3123 | pci_unmap_single(card->pcidev, | 3128 | pci_unmap_single(card->pcidev, |
3124 | IDT77252_PRV_PADDR(skb), | 3129 | IDT77252_PRV_PADDR(skb), |
3125 | skb->end - skb->data, | 3130 | (skb_end_pointer(skb) - |
3131 | skb->data), | ||
3126 | PCI_DMA_FROMDEVICE); | 3132 | PCI_DMA_FROMDEVICE); |
3127 | card->sbpool[i].skb[j] = NULL; | 3133 | card->sbpool[i].skb[j] = NULL; |
3128 | dev_kfree_skb(skb); | 3134 | dev_kfree_skb(skb); |
diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c index aab9b3733d52..14ced85b3f54 100644 --- a/drivers/atm/nicstar.c +++ b/drivers/atm/nicstar.c | |||
@@ -2208,7 +2208,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe) | |||
2208 | if (i == 1 && ns_rsqe_eopdu(rsqe)) | 2208 | if (i == 1 && ns_rsqe_eopdu(rsqe)) |
2209 | *((u32 *) sb->data) |= 0x00000002; | 2209 | *((u32 *) sb->data) |= 0x00000002; |
2210 | skb_put(sb, NS_AAL0_HEADER); | 2210 | skb_put(sb, NS_AAL0_HEADER); |
2211 | memcpy(sb->tail, cell, ATM_CELL_PAYLOAD); | 2211 | memcpy(skb_tail_pointer(sb), cell, ATM_CELL_PAYLOAD); |
2212 | skb_put(sb, ATM_CELL_PAYLOAD); | 2212 | skb_put(sb, ATM_CELL_PAYLOAD); |
2213 | ATM_SKB(sb)->vcc = vcc; | 2213 | ATM_SKB(sb)->vcc = vcc; |
2214 | __net_timestamp(sb); | 2214 | __net_timestamp(sb); |
@@ -2252,7 +2252,8 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe) | |||
2252 | vc->rx_iov = iovb; | 2252 | vc->rx_iov = iovb; |
2253 | NS_SKB(iovb)->iovcnt = 0; | 2253 | NS_SKB(iovb)->iovcnt = 0; |
2254 | iovb->len = 0; | 2254 | iovb->len = 0; |
2255 | iovb->tail = iovb->data = iovb->head; | 2255 | iovb->data = iovb->head; |
2256 | skb_reset_tail_pointer(iovb); | ||
2256 | NS_SKB(iovb)->vcc = vcc; | 2257 | NS_SKB(iovb)->vcc = vcc; |
2257 | /* IMPORTANT: a pointer to the sk_buff containing the small or large | 2258 | /* IMPORTANT: a pointer to the sk_buff containing the small or large |
2258 | buffer is stored as iovec base, NOT a pointer to the | 2259 | buffer is stored as iovec base, NOT a pointer to the |
@@ -2265,7 +2266,8 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe) | |||
2265 | recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data, NS_MAX_IOVECS); | 2266 | recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data, NS_MAX_IOVECS); |
2266 | NS_SKB(iovb)->iovcnt = 0; | 2267 | NS_SKB(iovb)->iovcnt = 0; |
2267 | iovb->len = 0; | 2268 | iovb->len = 0; |
2268 | iovb->tail = iovb->data = iovb->head; | 2269 | iovb->data = iovb->head; |
2270 | skb_reset_tail_pointer(iovb); | ||
2269 | NS_SKB(iovb)->vcc = vcc; | 2271 | NS_SKB(iovb)->vcc = vcc; |
2270 | } | 2272 | } |
2271 | iov = &((struct iovec *) iovb->data)[NS_SKB(iovb)->iovcnt++]; | 2273 | iov = &((struct iovec *) iovb->data)[NS_SKB(iovb)->iovcnt++]; |
@@ -2393,7 +2395,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe) | |||
2393 | skb->destructor = ns_lb_destructor; | 2395 | skb->destructor = ns_lb_destructor; |
2394 | #endif /* NS_USE_DESTRUCTORS */ | 2396 | #endif /* NS_USE_DESTRUCTORS */ |
2395 | skb_push(skb, NS_SMBUFSIZE); | 2397 | skb_push(skb, NS_SMBUFSIZE); |
2396 | memcpy(skb->data, sb->data, NS_SMBUFSIZE); | 2398 | skb_copy_from_linear_data(sb, skb->data, NS_SMBUFSIZE); |
2397 | skb_put(skb, len - NS_SMBUFSIZE); | 2399 | skb_put(skb, len - NS_SMBUFSIZE); |
2398 | ATM_SKB(skb)->vcc = vcc; | 2400 | ATM_SKB(skb)->vcc = vcc; |
2399 | __net_timestamp(skb); | 2401 | __net_timestamp(skb); |
@@ -2477,7 +2479,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe) | |||
2477 | { | 2479 | { |
2478 | /* Copy the small buffer to the huge buffer */ | 2480 | /* Copy the small buffer to the huge buffer */ |
2479 | sb = (struct sk_buff *) iov->iov_base; | 2481 | sb = (struct sk_buff *) iov->iov_base; |
2480 | memcpy(hb->data, sb->data, iov->iov_len); | 2482 | skb_copy_from_linear_data(sb, hb->data, iov->iov_len); |
2481 | skb_put(hb, iov->iov_len); | 2483 | skb_put(hb, iov->iov_len); |
2482 | remaining = len - iov->iov_len; | 2484 | remaining = len - iov->iov_len; |
2483 | iov++; | 2485 | iov++; |
@@ -2489,7 +2491,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe) | |||
2489 | { | 2491 | { |
2490 | lb = (struct sk_buff *) iov->iov_base; | 2492 | lb = (struct sk_buff *) iov->iov_base; |
2491 | tocopy = min_t(int, remaining, iov->iov_len); | 2493 | tocopy = min_t(int, remaining, iov->iov_len); |
2492 | memcpy(hb->tail, lb->data, tocopy); | 2494 | skb_copy_from_linear_data(lb, skb_tail_pointer(hb), tocopy); |
2493 | skb_put(hb, tocopy); | 2495 | skb_put(hb, tocopy); |
2494 | iov++; | 2496 | iov++; |
2495 | remaining -= tocopy; | 2497 | remaining -= tocopy; |
diff --git a/drivers/block/aoe/aoe.h b/drivers/block/aoe/aoe.h index 2308e83e5f33..1d8466817943 100644 --- a/drivers/block/aoe/aoe.h +++ b/drivers/block/aoe/aoe.h | |||
@@ -48,6 +48,15 @@ struct aoe_hdr { | |||
48 | __be32 tag; | 48 | __be32 tag; |
49 | }; | 49 | }; |
50 | 50 | ||
51 | #ifdef __KERNEL__ | ||
52 | #include <linux/skbuff.h> | ||
53 | |||
54 | static inline struct aoe_hdr *aoe_hdr(const struct sk_buff *skb) | ||
55 | { | ||
56 | return (struct aoe_hdr *)skb_mac_header(skb); | ||
57 | } | ||
58 | #endif | ||
59 | |||
51 | struct aoe_atahdr { | 60 | struct aoe_atahdr { |
52 | unsigned char aflags; | 61 | unsigned char aflags; |
53 | unsigned char errfeat; | 62 | unsigned char errfeat; |
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c index 8d17d8df3662..1a6aeac5a1c3 100644 --- a/drivers/block/aoe/aoecmd.c +++ b/drivers/block/aoe/aoecmd.c | |||
@@ -27,7 +27,8 @@ new_skb(ulong len) | |||
27 | 27 | ||
28 | skb = alloc_skb(len, GFP_ATOMIC); | 28 | skb = alloc_skb(len, GFP_ATOMIC); |
29 | if (skb) { | 29 | if (skb) { |
30 | skb->nh.raw = skb->mac.raw = skb->data; | 30 | skb_reset_mac_header(skb); |
31 | skb_reset_network_header(skb); | ||
31 | skb->protocol = __constant_htons(ETH_P_AOE); | 32 | skb->protocol = __constant_htons(ETH_P_AOE); |
32 | skb->priority = 0; | 33 | skb->priority = 0; |
33 | skb->next = skb->prev = NULL; | 34 | skb->next = skb->prev = NULL; |
@@ -118,7 +119,7 @@ aoecmd_ata_rw(struct aoedev *d, struct frame *f) | |||
118 | 119 | ||
119 | /* initialize the headers & frame */ | 120 | /* initialize the headers & frame */ |
120 | skb = f->skb; | 121 | skb = f->skb; |
121 | h = (struct aoe_hdr *) skb->mac.raw; | 122 | h = aoe_hdr(skb); |
122 | ah = (struct aoe_atahdr *) (h+1); | 123 | ah = (struct aoe_atahdr *) (h+1); |
123 | skb_put(skb, sizeof *h + sizeof *ah); | 124 | skb_put(skb, sizeof *h + sizeof *ah); |
124 | memset(h, 0, skb->len); | 125 | memset(h, 0, skb->len); |
@@ -207,7 +208,7 @@ aoecmd_cfg_pkts(ushort aoemajor, unsigned char aoeminor, struct sk_buff **tail) | |||
207 | skb->dev = ifp; | 208 | skb->dev = ifp; |
208 | if (sl_tail == NULL) | 209 | if (sl_tail == NULL) |
209 | sl_tail = skb; | 210 | sl_tail = skb; |
210 | h = (struct aoe_hdr *) skb->mac.raw; | 211 | h = aoe_hdr(skb); |
211 | memset(h, 0, sizeof *h + sizeof *ch); | 212 | memset(h, 0, sizeof *h + sizeof *ch); |
212 | 213 | ||
213 | memset(h->dst, 0xff, sizeof h->dst); | 214 | memset(h->dst, 0xff, sizeof h->dst); |
@@ -300,7 +301,7 @@ rexmit(struct aoedev *d, struct frame *f) | |||
300 | aoechr_error(buf); | 301 | aoechr_error(buf); |
301 | 302 | ||
302 | skb = f->skb; | 303 | skb = f->skb; |
303 | h = (struct aoe_hdr *) skb->mac.raw; | 304 | h = aoe_hdr(skb); |
304 | ah = (struct aoe_atahdr *) (h+1); | 305 | ah = (struct aoe_atahdr *) (h+1); |
305 | f->tag = n; | 306 | f->tag = n; |
306 | h->tag = cpu_to_be32(n); | 307 | h->tag = cpu_to_be32(n); |
@@ -529,7 +530,7 @@ aoecmd_ata_rsp(struct sk_buff *skb) | |||
529 | char ebuf[128]; | 530 | char ebuf[128]; |
530 | u16 aoemajor; | 531 | u16 aoemajor; |
531 | 532 | ||
532 | hin = (struct aoe_hdr *) skb->mac.raw; | 533 | hin = aoe_hdr(skb); |
533 | aoemajor = be16_to_cpu(get_unaligned(&hin->major)); | 534 | aoemajor = be16_to_cpu(get_unaligned(&hin->major)); |
534 | d = aoedev_by_aoeaddr(aoemajor, hin->minor); | 535 | d = aoedev_by_aoeaddr(aoemajor, hin->minor); |
535 | if (d == NULL) { | 536 | if (d == NULL) { |
@@ -561,7 +562,7 @@ aoecmd_ata_rsp(struct sk_buff *skb) | |||
561 | calc_rttavg(d, tsince(f->tag)); | 562 | calc_rttavg(d, tsince(f->tag)); |
562 | 563 | ||
563 | ahin = (struct aoe_atahdr *) (hin+1); | 564 | ahin = (struct aoe_atahdr *) (hin+1); |
564 | hout = (struct aoe_hdr *) f->skb->mac.raw; | 565 | hout = aoe_hdr(f->skb); |
565 | ahout = (struct aoe_atahdr *) (hout+1); | 566 | ahout = (struct aoe_atahdr *) (hout+1); |
566 | buf = f->buf; | 567 | buf = f->buf; |
567 | 568 | ||
@@ -695,7 +696,7 @@ aoecmd_ata_id(struct aoedev *d) | |||
695 | 696 | ||
696 | /* initialize the headers & frame */ | 697 | /* initialize the headers & frame */ |
697 | skb = f->skb; | 698 | skb = f->skb; |
698 | h = (struct aoe_hdr *) skb->mac.raw; | 699 | h = aoe_hdr(skb); |
699 | ah = (struct aoe_atahdr *) (h+1); | 700 | ah = (struct aoe_atahdr *) (h+1); |
700 | skb_put(skb, sizeof *h + sizeof *ah); | 701 | skb_put(skb, sizeof *h + sizeof *ah); |
701 | memset(h, 0, skb->len); | 702 | memset(h, 0, skb->len); |
@@ -726,7 +727,7 @@ aoecmd_cfg_rsp(struct sk_buff *skb) | |||
726 | enum { MAXFRAMES = 16 }; | 727 | enum { MAXFRAMES = 16 }; |
727 | u16 n; | 728 | u16 n; |
728 | 729 | ||
729 | h = (struct aoe_hdr *) skb->mac.raw; | 730 | h = aoe_hdr(skb); |
730 | ch = (struct aoe_cfghdr *) (h+1); | 731 | ch = (struct aoe_cfghdr *) (h+1); |
731 | 732 | ||
732 | /* | 733 | /* |
diff --git a/drivers/block/aoe/aoenet.c b/drivers/block/aoe/aoenet.c index aab6d91a2c22..f9ddfda4d9cb 100644 --- a/drivers/block/aoe/aoenet.c +++ b/drivers/block/aoe/aoenet.c | |||
@@ -123,7 +123,7 @@ aoenet_rcv(struct sk_buff *skb, struct net_device *ifp, struct packet_type *pt, | |||
123 | goto exit; | 123 | goto exit; |
124 | skb_push(skb, ETH_HLEN); /* (1) */ | 124 | skb_push(skb, ETH_HLEN); /* (1) */ |
125 | 125 | ||
126 | h = (struct aoe_hdr *) skb->mac.raw; | 126 | h = aoe_hdr(skb); |
127 | n = be32_to_cpu(get_unaligned(&h->tag)); | 127 | n = be32_to_cpu(get_unaligned(&h->tag)); |
128 | if ((h->verfl & AOEFL_RSP) == 0 || (n & 1<<31)) | 128 | if ((h->verfl & AOEFL_RSP) == 0 || (n & 1<<31)) |
129 | goto exit; | 129 | goto exit; |
diff --git a/drivers/bluetooth/bfusb.c b/drivers/bluetooth/bfusb.c index 4c766f36d884..b990805806af 100644 --- a/drivers/bluetooth/bfusb.c +++ b/drivers/bluetooth/bfusb.c | |||
@@ -527,7 +527,7 @@ static int bfusb_send_frame(struct sk_buff *skb) | |||
527 | buf[2] = (size == BFUSB_MAX_BLOCK_SIZE) ? 0 : size; | 527 | buf[2] = (size == BFUSB_MAX_BLOCK_SIZE) ? 0 : size; |
528 | 528 | ||
529 | memcpy(skb_put(nskb, 3), buf, 3); | 529 | memcpy(skb_put(nskb, 3), buf, 3); |
530 | memcpy(skb_put(nskb, size), skb->data + sent, size); | 530 | skb_copy_from_linear_data_offset(skb, sent, skb_put(nskb, size), size); |
531 | 531 | ||
532 | sent += size; | 532 | sent += size; |
533 | count -= size; | 533 | count -= size; |
diff --git a/drivers/bluetooth/bluecard_cs.c b/drivers/bluetooth/bluecard_cs.c index acfb6a430dcc..851de4d5b7de 100644 --- a/drivers/bluetooth/bluecard_cs.c +++ b/drivers/bluetooth/bluecard_cs.c | |||
@@ -461,20 +461,20 @@ static void bluecard_receive(bluecard_info_t *info, unsigned int offset) | |||
461 | switch (info->rx_state) { | 461 | switch (info->rx_state) { |
462 | 462 | ||
463 | case RECV_WAIT_EVENT_HEADER: | 463 | case RECV_WAIT_EVENT_HEADER: |
464 | eh = (struct hci_event_hdr *)(info->rx_skb->data); | 464 | eh = hci_event_hdr(info->rx_skb); |
465 | info->rx_state = RECV_WAIT_DATA; | 465 | info->rx_state = RECV_WAIT_DATA; |
466 | info->rx_count = eh->plen; | 466 | info->rx_count = eh->plen; |
467 | break; | 467 | break; |
468 | 468 | ||
469 | case RECV_WAIT_ACL_HEADER: | 469 | case RECV_WAIT_ACL_HEADER: |
470 | ah = (struct hci_acl_hdr *)(info->rx_skb->data); | 470 | ah = hci_acl_hdr(info->rx_skb); |
471 | dlen = __le16_to_cpu(ah->dlen); | 471 | dlen = __le16_to_cpu(ah->dlen); |
472 | info->rx_state = RECV_WAIT_DATA; | 472 | info->rx_state = RECV_WAIT_DATA; |
473 | info->rx_count = dlen; | 473 | info->rx_count = dlen; |
474 | break; | 474 | break; |
475 | 475 | ||
476 | case RECV_WAIT_SCO_HEADER: | 476 | case RECV_WAIT_SCO_HEADER: |
477 | sh = (struct hci_sco_hdr *)(info->rx_skb->data); | 477 | sh = hci_sco_hdr(info->rx_skb); |
478 | info->rx_state = RECV_WAIT_DATA; | 478 | info->rx_state = RECV_WAIT_DATA; |
479 | info->rx_count = sh->dlen; | 479 | info->rx_count = sh->dlen; |
480 | break; | 480 | break; |
diff --git a/drivers/bluetooth/bpa10x.c b/drivers/bluetooth/bpa10x.c index 9fca6513562d..e8ebd5d3de86 100644 --- a/drivers/bluetooth/bpa10x.c +++ b/drivers/bluetooth/bpa10x.c | |||
@@ -231,7 +231,7 @@ static void bpa10x_wakeup(struct bpa10x_data *data) | |||
231 | cr = (struct usb_ctrlrequest *) urb->setup_packet; | 231 | cr = (struct usb_ctrlrequest *) urb->setup_packet; |
232 | cr->wLength = __cpu_to_le16(skb->len); | 232 | cr->wLength = __cpu_to_le16(skb->len); |
233 | 233 | ||
234 | memcpy(urb->transfer_buffer, skb->data, skb->len); | 234 | skb_copy_from_linear_data(skb, urb->transfer_buffer, skb->len); |
235 | urb->transfer_buffer_length = skb->len; | 235 | urb->transfer_buffer_length = skb->len; |
236 | 236 | ||
237 | err = usb_submit_urb(urb, GFP_ATOMIC); | 237 | err = usb_submit_urb(urb, GFP_ATOMIC); |
@@ -250,7 +250,7 @@ static void bpa10x_wakeup(struct bpa10x_data *data) | |||
250 | skb = skb_dequeue(&data->tx_queue); | 250 | skb = skb_dequeue(&data->tx_queue); |
251 | 251 | ||
252 | if (skb) { | 252 | if (skb) { |
253 | memcpy(urb->transfer_buffer, skb->data, skb->len); | 253 | skb_copy_from_linear_data(skb, urb->transfer_buffer, skb->len); |
254 | urb->transfer_buffer_length = skb->len; | 254 | urb->transfer_buffer_length = skb->len; |
255 | 255 | ||
256 | err = usb_submit_urb(urb, GFP_ATOMIC); | 256 | err = usb_submit_urb(urb, GFP_ATOMIC); |
diff --git a/drivers/bluetooth/bt3c_cs.c b/drivers/bluetooth/bt3c_cs.c index 18b0f3992c5b..39516074636b 100644 --- a/drivers/bluetooth/bt3c_cs.c +++ b/drivers/bluetooth/bt3c_cs.c | |||
@@ -303,20 +303,20 @@ static void bt3c_receive(bt3c_info_t *info) | |||
303 | switch (info->rx_state) { | 303 | switch (info->rx_state) { |
304 | 304 | ||
305 | case RECV_WAIT_EVENT_HEADER: | 305 | case RECV_WAIT_EVENT_HEADER: |
306 | eh = (struct hci_event_hdr *)(info->rx_skb->data); | 306 | eh = hci_event_hdr(info->rx_skb); |
307 | info->rx_state = RECV_WAIT_DATA; | 307 | info->rx_state = RECV_WAIT_DATA; |
308 | info->rx_count = eh->plen; | 308 | info->rx_count = eh->plen; |
309 | break; | 309 | break; |
310 | 310 | ||
311 | case RECV_WAIT_ACL_HEADER: | 311 | case RECV_WAIT_ACL_HEADER: |
312 | ah = (struct hci_acl_hdr *)(info->rx_skb->data); | 312 | ah = hci_acl_hdr(info->rx_skb); |
313 | dlen = __le16_to_cpu(ah->dlen); | 313 | dlen = __le16_to_cpu(ah->dlen); |
314 | info->rx_state = RECV_WAIT_DATA; | 314 | info->rx_state = RECV_WAIT_DATA; |
315 | info->rx_count = dlen; | 315 | info->rx_count = dlen; |
316 | break; | 316 | break; |
317 | 317 | ||
318 | case RECV_WAIT_SCO_HEADER: | 318 | case RECV_WAIT_SCO_HEADER: |
319 | sh = (struct hci_sco_hdr *)(info->rx_skb->data); | 319 | sh = hci_sco_hdr(info->rx_skb); |
320 | info->rx_state = RECV_WAIT_DATA; | 320 | info->rx_state = RECV_WAIT_DATA; |
321 | info->rx_count = sh->dlen; | 321 | info->rx_count = sh->dlen; |
322 | break; | 322 | break; |
diff --git a/drivers/bluetooth/btuart_cs.c b/drivers/bluetooth/btuart_cs.c index c1bce75148fe..d7d2ea0d86a1 100644 --- a/drivers/bluetooth/btuart_cs.c +++ b/drivers/bluetooth/btuart_cs.c | |||
@@ -250,20 +250,20 @@ static void btuart_receive(btuart_info_t *info) | |||
250 | switch (info->rx_state) { | 250 | switch (info->rx_state) { |
251 | 251 | ||
252 | case RECV_WAIT_EVENT_HEADER: | 252 | case RECV_WAIT_EVENT_HEADER: |
253 | eh = (struct hci_event_hdr *)(info->rx_skb->data); | 253 | eh = hci_event_hdr(info->rx_skb); |
254 | info->rx_state = RECV_WAIT_DATA; | 254 | info->rx_state = RECV_WAIT_DATA; |
255 | info->rx_count = eh->plen; | 255 | info->rx_count = eh->plen; |
256 | break; | 256 | break; |
257 | 257 | ||
258 | case RECV_WAIT_ACL_HEADER: | 258 | case RECV_WAIT_ACL_HEADER: |
259 | ah = (struct hci_acl_hdr *)(info->rx_skb->data); | 259 | ah = hci_acl_hdr(info->rx_skb); |
260 | dlen = __le16_to_cpu(ah->dlen); | 260 | dlen = __le16_to_cpu(ah->dlen); |
261 | info->rx_state = RECV_WAIT_DATA; | 261 | info->rx_state = RECV_WAIT_DATA; |
262 | info->rx_count = dlen; | 262 | info->rx_count = dlen; |
263 | break; | 263 | break; |
264 | 264 | ||
265 | case RECV_WAIT_SCO_HEADER: | 265 | case RECV_WAIT_SCO_HEADER: |
266 | sh = (struct hci_sco_hdr *)(info->rx_skb->data); | 266 | sh = hci_sco_hdr(info->rx_skb); |
267 | info->rx_state = RECV_WAIT_DATA; | 267 | info->rx_state = RECV_WAIT_DATA; |
268 | info->rx_count = sh->dlen; | 268 | info->rx_count = sh->dlen; |
269 | break; | 269 | break; |
diff --git a/drivers/bluetooth/dtl1_cs.c b/drivers/bluetooth/dtl1_cs.c index 459aa97937ab..7f9c54b9964a 100644 --- a/drivers/bluetooth/dtl1_cs.c +++ b/drivers/bluetooth/dtl1_cs.c | |||
@@ -425,7 +425,7 @@ static int dtl1_hci_send_frame(struct sk_buff *skb) | |||
425 | return -ENOMEM; | 425 | return -ENOMEM; |
426 | 426 | ||
427 | skb_reserve(s, NSHL); | 427 | skb_reserve(s, NSHL); |
428 | memcpy(skb_put(s, skb->len), skb->data, skb->len); | 428 | skb_copy_from_linear_data(skb, skb_put(s, skb->len), skb->len); |
429 | if (skb->len & 0x0001) | 429 | if (skb->len & 0x0001) |
430 | *skb_put(s, 1) = 0; /* PAD */ | 430 | *skb_put(s, 1) = 0; /* PAD */ |
431 | 431 | ||
diff --git a/drivers/bluetooth/hci_h4.c b/drivers/bluetooth/hci_h4.c index 34f0afc42407..bfbae14cf93d 100644 --- a/drivers/bluetooth/hci_h4.c +++ b/drivers/bluetooth/hci_h4.c | |||
@@ -188,7 +188,7 @@ static int h4_recv(struct hci_uart *hu, void *data, int count) | |||
188 | continue; | 188 | continue; |
189 | 189 | ||
190 | case H4_W4_EVENT_HDR: | 190 | case H4_W4_EVENT_HDR: |
191 | eh = (struct hci_event_hdr *) h4->rx_skb->data; | 191 | eh = hci_event_hdr(h4->rx_skb); |
192 | 192 | ||
193 | BT_DBG("Event header: evt 0x%2.2x plen %d", eh->evt, eh->plen); | 193 | BT_DBG("Event header: evt 0x%2.2x plen %d", eh->evt, eh->plen); |
194 | 194 | ||
@@ -196,7 +196,7 @@ static int h4_recv(struct hci_uart *hu, void *data, int count) | |||
196 | continue; | 196 | continue; |
197 | 197 | ||
198 | case H4_W4_ACL_HDR: | 198 | case H4_W4_ACL_HDR: |
199 | ah = (struct hci_acl_hdr *) h4->rx_skb->data; | 199 | ah = hci_acl_hdr(h4->rx_skb); |
200 | dlen = __le16_to_cpu(ah->dlen); | 200 | dlen = __le16_to_cpu(ah->dlen); |
201 | 201 | ||
202 | BT_DBG("ACL header: dlen %d", dlen); | 202 | BT_DBG("ACL header: dlen %d", dlen); |
@@ -205,7 +205,7 @@ static int h4_recv(struct hci_uart *hu, void *data, int count) | |||
205 | continue; | 205 | continue; |
206 | 206 | ||
207 | case H4_W4_SCO_HDR: | 207 | case H4_W4_SCO_HDR: |
208 | sh = (struct hci_sco_hdr *) h4->rx_skb->data; | 208 | sh = hci_sco_hdr(h4->rx_skb); |
209 | 209 | ||
210 | BT_DBG("SCO header: dlen %d", sh->dlen); | 210 | BT_DBG("SCO header: dlen %d", sh->dlen); |
211 | 211 | ||
diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c index 8d025e9b5bce..157b1d09ab55 100644 --- a/drivers/char/pcmcia/synclink_cs.c +++ b/drivers/char/pcmcia/synclink_cs.c | |||
@@ -4169,7 +4169,7 @@ static int hdlcdev_xmit(struct sk_buff *skb, struct net_device *dev) | |||
4169 | netif_stop_queue(dev); | 4169 | netif_stop_queue(dev); |
4170 | 4170 | ||
4171 | /* copy data to device buffers */ | 4171 | /* copy data to device buffers */ |
4172 | memcpy(info->tx_buf, skb->data, skb->len); | 4172 | skb_copy_from_linear_data(skb, info->tx_buf, skb->len); |
4173 | info->tx_get = 0; | 4173 | info->tx_get = 0; |
4174 | info->tx_put = info->tx_count = skb->len; | 4174 | info->tx_put = info->tx_count = skb->len; |
4175 | 4175 | ||
diff --git a/drivers/char/random.c b/drivers/char/random.c index b9dc7aa1dfb3..46c1b97748b6 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c | |||
@@ -881,15 +881,15 @@ EXPORT_SYMBOL(get_random_bytes); | |||
881 | */ | 881 | */ |
882 | static void init_std_data(struct entropy_store *r) | 882 | static void init_std_data(struct entropy_store *r) |
883 | { | 883 | { |
884 | struct timeval tv; | 884 | ktime_t now; |
885 | unsigned long flags; | 885 | unsigned long flags; |
886 | 886 | ||
887 | spin_lock_irqsave(&r->lock, flags); | 887 | spin_lock_irqsave(&r->lock, flags); |
888 | r->entropy_count = 0; | 888 | r->entropy_count = 0; |
889 | spin_unlock_irqrestore(&r->lock, flags); | 889 | spin_unlock_irqrestore(&r->lock, flags); |
890 | 890 | ||
891 | do_gettimeofday(&tv); | 891 | now = ktime_get_real(); |
892 | add_entropy_words(r, (__u32 *)&tv, sizeof(tv)/4); | 892 | add_entropy_words(r, (__u32 *)&now, sizeof(now)/4); |
893 | add_entropy_words(r, (__u32 *)utsname(), | 893 | add_entropy_words(r, (__u32 *)utsname(), |
894 | sizeof(*(utsname()))/4); | 894 | sizeof(*(utsname()))/4); |
895 | } | 895 | } |
@@ -911,14 +911,12 @@ void rand_initialize_irq(int irq) | |||
911 | return; | 911 | return; |
912 | 912 | ||
913 | /* | 913 | /* |
914 | * If kmalloc returns null, we just won't use that entropy | 914 | * If kzalloc returns null, we just won't use that entropy |
915 | * source. | 915 | * source. |
916 | */ | 916 | */ |
917 | state = kmalloc(sizeof(struct timer_rand_state), GFP_KERNEL); | 917 | state = kzalloc(sizeof(struct timer_rand_state), GFP_KERNEL); |
918 | if (state) { | 918 | if (state) |
919 | memset(state, 0, sizeof(struct timer_rand_state)); | ||
920 | irq_timer_state[irq] = state; | 919 | irq_timer_state[irq] = state; |
921 | } | ||
922 | } | 920 | } |
923 | 921 | ||
924 | #ifdef CONFIG_BLOCK | 922 | #ifdef CONFIG_BLOCK |
@@ -927,14 +925,12 @@ void rand_initialize_disk(struct gendisk *disk) | |||
927 | struct timer_rand_state *state; | 925 | struct timer_rand_state *state; |
928 | 926 | ||
929 | /* | 927 | /* |
930 | * If kmalloc returns null, we just won't use that entropy | 928 | * If kzalloc returns null, we just won't use that entropy |
931 | * source. | 929 | * source. |
932 | */ | 930 | */ |
933 | state = kmalloc(sizeof(struct timer_rand_state), GFP_KERNEL); | 931 | state = kzalloc(sizeof(struct timer_rand_state), GFP_KERNEL); |
934 | if (state) { | 932 | if (state) |
935 | memset(state, 0, sizeof(struct timer_rand_state)); | ||
936 | disk->random = state; | 933 | disk->random = state; |
937 | } | ||
938 | } | 934 | } |
939 | #endif | 935 | #endif |
940 | 936 | ||
@@ -1469,7 +1465,6 @@ late_initcall(seqgen_init); | |||
1469 | __u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr, | 1465 | __u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr, |
1470 | __be16 sport, __be16 dport) | 1466 | __be16 sport, __be16 dport) |
1471 | { | 1467 | { |
1472 | struct timeval tv; | ||
1473 | __u32 seq; | 1468 | __u32 seq; |
1474 | __u32 hash[12]; | 1469 | __u32 hash[12]; |
1475 | struct keydata *keyptr = get_keyptr(); | 1470 | struct keydata *keyptr = get_keyptr(); |
@@ -1485,8 +1480,7 @@ __u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr, | |||
1485 | seq = twothirdsMD4Transform((const __u32 *)daddr, hash) & HASH_MASK; | 1480 | seq = twothirdsMD4Transform((const __u32 *)daddr, hash) & HASH_MASK; |
1486 | seq += keyptr->count; | 1481 | seq += keyptr->count; |
1487 | 1482 | ||
1488 | do_gettimeofday(&tv); | 1483 | seq += ktime_get_real().tv64; |
1489 | seq += tv.tv_usec + tv.tv_sec * 1000000; | ||
1490 | 1484 | ||
1491 | return seq; | 1485 | return seq; |
1492 | } | 1486 | } |
@@ -1521,7 +1515,6 @@ __u32 secure_ip_id(__be32 daddr) | |||
1521 | __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr, | 1515 | __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr, |
1522 | __be16 sport, __be16 dport) | 1516 | __be16 sport, __be16 dport) |
1523 | { | 1517 | { |
1524 | struct timeval tv; | ||
1525 | __u32 seq; | 1518 | __u32 seq; |
1526 | __u32 hash[4]; | 1519 | __u32 hash[4]; |
1527 | struct keydata *keyptr = get_keyptr(); | 1520 | struct keydata *keyptr = get_keyptr(); |
@@ -1543,12 +1536,11 @@ __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr, | |||
1543 | * As close as possible to RFC 793, which | 1536 | * As close as possible to RFC 793, which |
1544 | * suggests using a 250 kHz clock. | 1537 | * suggests using a 250 kHz clock. |
1545 | * Further reading shows this assumes 2 Mb/s networks. | 1538 | * Further reading shows this assumes 2 Mb/s networks. |
1546 | * For 10 Mb/s Ethernet, a 1 MHz clock is appropriate. | 1539 | * For 10 Gb/s Ethernet, a 1 GHz clock is appropriate. |
1547 | * That's funny, Linux has one built in! Use it! | 1540 | * That's funny, Linux has one built in! Use it! |
1548 | * (Networks are faster now - should this be increased?) | 1541 | * (Networks are faster now - should this be increased?) |
1549 | */ | 1542 | */ |
1550 | do_gettimeofday(&tv); | 1543 | seq += ktime_get_real().tv64; |
1551 | seq += tv.tv_usec + tv.tv_sec * 1000000; | ||
1552 | #if 0 | 1544 | #if 0 |
1553 | printk("init_seq(%lx, %lx, %d, %d) = %d\n", | 1545 | printk("init_seq(%lx, %lx, %d, %d) = %d\n", |
1554 | saddr, daddr, sport, dport, seq); | 1546 | saddr, daddr, sport, dport, seq); |
@@ -1556,8 +1548,6 @@ __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr, | |||
1556 | return seq; | 1548 | return seq; |
1557 | } | 1549 | } |
1558 | 1550 | ||
1559 | EXPORT_SYMBOL(secure_tcp_sequence_number); | ||
1560 | |||
1561 | /* Generate secure starting point for ephemeral IPV4 transport port search */ | 1551 | /* Generate secure starting point for ephemeral IPV4 transport port search */ |
1562 | u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport) | 1552 | u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport) |
1563 | { | 1553 | { |
@@ -1598,7 +1588,6 @@ u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr, __be16 | |||
1598 | u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr, | 1588 | u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr, |
1599 | __be16 sport, __be16 dport) | 1589 | __be16 sport, __be16 dport) |
1600 | { | 1590 | { |
1601 | struct timeval tv; | ||
1602 | u64 seq; | 1591 | u64 seq; |
1603 | __u32 hash[4]; | 1592 | __u32 hash[4]; |
1604 | struct keydata *keyptr = get_keyptr(); | 1593 | struct keydata *keyptr = get_keyptr(); |
@@ -1611,8 +1600,7 @@ u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr, | |||
1611 | seq = half_md4_transform(hash, keyptr->secret); | 1600 | seq = half_md4_transform(hash, keyptr->secret); |
1612 | seq |= ((u64)keyptr->count) << (32 - HASH_BITS); | 1601 | seq |= ((u64)keyptr->count) << (32 - HASH_BITS); |
1613 | 1602 | ||
1614 | do_gettimeofday(&tv); | 1603 | seq += ktime_get_real().tv64; |
1615 | seq += tv.tv_usec + tv.tv_sec * 1000000; | ||
1616 | seq &= (1ull << 48) - 1; | 1604 | seq &= (1ull << 48) - 1; |
1617 | #if 0 | 1605 | #if 0 |
1618 | printk("dccp init_seq(%lx, %lx, %d, %d) = %d\n", | 1606 | printk("dccp init_seq(%lx, %lx, %d, %d) = %d\n", |
diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c index a905f7820331..a7b9e9bb3e8d 100644 --- a/drivers/connector/connector.c +++ b/drivers/connector/connector.c | |||
@@ -212,7 +212,7 @@ static void cn_rx_skb(struct sk_buff *__skb) | |||
212 | skb = skb_get(__skb); | 212 | skb = skb_get(__skb); |
213 | 213 | ||
214 | if (skb->len >= NLMSG_SPACE(0)) { | 214 | if (skb->len >= NLMSG_SPACE(0)) { |
215 | nlh = (struct nlmsghdr *)skb->data; | 215 | nlh = nlmsg_hdr(skb); |
216 | 216 | ||
217 | if (nlh->nlmsg_len < sizeof(struct cn_msg) || | 217 | if (nlh->nlmsg_len < sizeof(struct cn_msg) || |
218 | skb->len < nlh->nlmsg_len || | 218 | skb->len < nlh->nlmsg_len || |
@@ -448,7 +448,7 @@ static int __devinit cn_init(void) | |||
448 | 448 | ||
449 | dev->nls = netlink_kernel_create(NETLINK_CONNECTOR, | 449 | dev->nls = netlink_kernel_create(NETLINK_CONNECTOR, |
450 | CN_NETLINK_USERS + 0xf, | 450 | CN_NETLINK_USERS + 0xf, |
451 | dev->input, THIS_MODULE); | 451 | dev->input, NULL, THIS_MODULE); |
452 | if (!dev->nls) | 452 | if (!dev->nls) |
453 | return -EIO; | 453 | return -EIO; |
454 | 454 | ||
diff --git a/drivers/ieee1394/eth1394.c b/drivers/ieee1394/eth1394.c index 03e44b337eb0..a364003ba47f 100644 --- a/drivers/ieee1394/eth1394.c +++ b/drivers/ieee1394/eth1394.c | |||
@@ -834,7 +834,7 @@ static inline u16 ether1394_type_trans(struct sk_buff *skb, | |||
834 | struct eth1394hdr *eth; | 834 | struct eth1394hdr *eth; |
835 | unsigned char *rawp; | 835 | unsigned char *rawp; |
836 | 836 | ||
837 | skb->mac.raw = skb->data; | 837 | skb_reset_mac_header(skb); |
838 | skb_pull (skb, ETH1394_HLEN); | 838 | skb_pull (skb, ETH1394_HLEN); |
839 | eth = eth1394_hdr(skb); | 839 | eth = eth1394_hdr(skb); |
840 | 840 | ||
@@ -1668,7 +1668,7 @@ static int ether1394_tx (struct sk_buff *skb, struct net_device *dev) | |||
1668 | if (memcmp(eth->h_dest, dev->broadcast, ETH1394_ALEN) == 0 || | 1668 | if (memcmp(eth->h_dest, dev->broadcast, ETH1394_ALEN) == 0 || |
1669 | proto == htons(ETH_P_ARP) || | 1669 | proto == htons(ETH_P_ARP) || |
1670 | (proto == htons(ETH_P_IP) && | 1670 | (proto == htons(ETH_P_IP) && |
1671 | IN_MULTICAST(ntohl(skb->nh.iph->daddr)))) { | 1671 | IN_MULTICAST(ntohl(ip_hdr(skb)->daddr)))) { |
1672 | tx_type = ETH1394_GASP; | 1672 | tx_type = ETH1394_GASP; |
1673 | dest_node = LOCAL_BUS | ALL_NODES; | 1673 | dest_node = LOCAL_BUS | ALL_NODES; |
1674 | max_payload = priv->bc_maxpayload - ETHER1394_GASP_OVERHEAD; | 1674 | max_payload = priv->bc_maxpayload - ETHER1394_GASP_OVERHEAD; |
diff --git a/drivers/ieee1394/eth1394.h b/drivers/ieee1394/eth1394.h index c45cbff9138d..1e8356535149 100644 --- a/drivers/ieee1394/eth1394.h +++ b/drivers/ieee1394/eth1394.h | |||
@@ -90,7 +90,7 @@ struct eth1394hdr { | |||
90 | 90 | ||
91 | static inline struct eth1394hdr *eth1394_hdr(const struct sk_buff *skb) | 91 | static inline struct eth1394hdr *eth1394_hdr(const struct sk_buff *skb) |
92 | { | 92 | { |
93 | return (struct eth1394hdr *)skb->mac.raw; | 93 | return (struct eth1394hdr *)skb_mac_header(skb); |
94 | } | 94 | } |
95 | #endif | 95 | #endif |
96 | 96 | ||
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index 13efd4170349..6edfecf1be72 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2004, 2005 Voltaire, Inc. All rights reserved. | 2 | * Copyright (c) 2004-2007 Voltaire, Inc. All rights reserved. |
3 | * Copyright (c) 2005 Intel Corporation. All rights reserved. | 3 | * Copyright (c) 2005 Intel Corporation. All rights reserved. |
4 | * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved. | 4 | * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved. |
5 | * | 5 | * |
@@ -31,7 +31,6 @@ | |||
31 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | 31 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
32 | * SOFTWARE. | 32 | * SOFTWARE. |
33 | * | 33 | * |
34 | * $Id: mad.c 5596 2006-03-03 01:00:07Z sean.hefty $ | ||
35 | */ | 34 | */ |
36 | #include <linux/dma-mapping.h> | 35 | #include <linux/dma-mapping.h> |
37 | #include <rdma/ib_cache.h> | 36 | #include <rdma/ib_cache.h> |
@@ -668,7 +667,7 @@ static void build_smp_wc(struct ib_qp *qp, | |||
668 | static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv, | 667 | static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv, |
669 | struct ib_mad_send_wr_private *mad_send_wr) | 668 | struct ib_mad_send_wr_private *mad_send_wr) |
670 | { | 669 | { |
671 | int ret; | 670 | int ret = 0; |
672 | struct ib_smp *smp = mad_send_wr->send_buf.mad; | 671 | struct ib_smp *smp = mad_send_wr->send_buf.mad; |
673 | unsigned long flags; | 672 | unsigned long flags; |
674 | struct ib_mad_local_private *local; | 673 | struct ib_mad_local_private *local; |
@@ -688,14 +687,15 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv, | |||
688 | */ | 687 | */ |
689 | if ((ib_get_smp_direction(smp) ? smp->dr_dlid : smp->dr_slid) == | 688 | if ((ib_get_smp_direction(smp) ? smp->dr_dlid : smp->dr_slid) == |
690 | IB_LID_PERMISSIVE && | 689 | IB_LID_PERMISSIVE && |
691 | !smi_handle_dr_smp_send(smp, device->node_type, port_num)) { | 690 | smi_handle_dr_smp_send(smp, device->node_type, port_num) == |
691 | IB_SMI_DISCARD) { | ||
692 | ret = -EINVAL; | 692 | ret = -EINVAL; |
693 | printk(KERN_ERR PFX "Invalid directed route\n"); | 693 | printk(KERN_ERR PFX "Invalid directed route\n"); |
694 | goto out; | 694 | goto out; |
695 | } | 695 | } |
696 | |||
696 | /* Check to post send on QP or process locally */ | 697 | /* Check to post send on QP or process locally */ |
697 | ret = smi_check_local_smp(smp, device); | 698 | if (smi_check_local_smp(smp, device) == IB_SMI_DISCARD) |
698 | if (!ret) | ||
699 | goto out; | 699 | goto out; |
700 | 700 | ||
701 | local = kmalloc(sizeof *local, GFP_ATOMIC); | 701 | local = kmalloc(sizeof *local, GFP_ATOMIC); |
@@ -1874,18 +1874,22 @@ static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv, | |||
1874 | 1874 | ||
1875 | if (recv->mad.mad.mad_hdr.mgmt_class == | 1875 | if (recv->mad.mad.mad_hdr.mgmt_class == |
1876 | IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { | 1876 | IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { |
1877 | if (!smi_handle_dr_smp_recv(&recv->mad.smp, | 1877 | if (smi_handle_dr_smp_recv(&recv->mad.smp, |
1878 | port_priv->device->node_type, | 1878 | port_priv->device->node_type, |
1879 | port_priv->port_num, | 1879 | port_priv->port_num, |
1880 | port_priv->device->phys_port_cnt)) | 1880 | port_priv->device->phys_port_cnt) == |
1881 | IB_SMI_DISCARD) | ||
1881 | goto out; | 1882 | goto out; |
1882 | if (!smi_check_forward_dr_smp(&recv->mad.smp)) | 1883 | |
1884 | if (smi_check_forward_dr_smp(&recv->mad.smp) == IB_SMI_LOCAL) | ||
1883 | goto local; | 1885 | goto local; |
1884 | if (!smi_handle_dr_smp_send(&recv->mad.smp, | 1886 | |
1885 | port_priv->device->node_type, | 1887 | if (smi_handle_dr_smp_send(&recv->mad.smp, |
1886 | port_priv->port_num)) | 1888 | port_priv->device->node_type, |
1889 | port_priv->port_num) == IB_SMI_DISCARD) | ||
1887 | goto out; | 1890 | goto out; |
1888 | if (!smi_check_local_smp(&recv->mad.smp, port_priv->device)) | 1891 | |
1892 | if (smi_check_local_smp(&recv->mad.smp, port_priv->device) == IB_SMI_DISCARD) | ||
1889 | goto out; | 1893 | goto out; |
1890 | } | 1894 | } |
1891 | 1895 | ||
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c index 68db633711c5..9a7eaadb1688 100644 --- a/drivers/infiniband/core/sa_query.c +++ b/drivers/infiniband/core/sa_query.c | |||
@@ -57,6 +57,7 @@ MODULE_LICENSE("Dual BSD/GPL"); | |||
57 | struct ib_sa_sm_ah { | 57 | struct ib_sa_sm_ah { |
58 | struct ib_ah *ah; | 58 | struct ib_ah *ah; |
59 | struct kref ref; | 59 | struct kref ref; |
60 | u8 src_path_mask; | ||
60 | }; | 61 | }; |
61 | 62 | ||
62 | struct ib_sa_port { | 63 | struct ib_sa_port { |
@@ -380,6 +381,7 @@ static void update_sm_ah(struct work_struct *work) | |||
380 | } | 381 | } |
381 | 382 | ||
382 | kref_init(&new_ah->ref); | 383 | kref_init(&new_ah->ref); |
384 | new_ah->src_path_mask = (1 << port_attr.lmc) - 1; | ||
383 | 385 | ||
384 | memset(&ah_attr, 0, sizeof ah_attr); | 386 | memset(&ah_attr, 0, sizeof ah_attr); |
385 | ah_attr.dlid = port_attr.sm_lid; | 387 | ah_attr.dlid = port_attr.sm_lid; |
@@ -460,6 +462,25 @@ void ib_sa_cancel_query(int id, struct ib_sa_query *query) | |||
460 | } | 462 | } |
461 | EXPORT_SYMBOL(ib_sa_cancel_query); | 463 | EXPORT_SYMBOL(ib_sa_cancel_query); |
462 | 464 | ||
465 | static u8 get_src_path_mask(struct ib_device *device, u8 port_num) | ||
466 | { | ||
467 | struct ib_sa_device *sa_dev; | ||
468 | struct ib_sa_port *port; | ||
469 | unsigned long flags; | ||
470 | u8 src_path_mask; | ||
471 | |||
472 | sa_dev = ib_get_client_data(device, &sa_client); | ||
473 | if (!sa_dev) | ||
474 | return 0x7f; | ||
475 | |||
476 | port = &sa_dev->port[port_num - sa_dev->start_port]; | ||
477 | spin_lock_irqsave(&port->ah_lock, flags); | ||
478 | src_path_mask = port->sm_ah ? port->sm_ah->src_path_mask : 0x7f; | ||
479 | spin_unlock_irqrestore(&port->ah_lock, flags); | ||
480 | |||
481 | return src_path_mask; | ||
482 | } | ||
483 | |||
463 | int ib_init_ah_from_path(struct ib_device *device, u8 port_num, | 484 | int ib_init_ah_from_path(struct ib_device *device, u8 port_num, |
464 | struct ib_sa_path_rec *rec, struct ib_ah_attr *ah_attr) | 485 | struct ib_sa_path_rec *rec, struct ib_ah_attr *ah_attr) |
465 | { | 486 | { |
@@ -469,7 +490,8 @@ int ib_init_ah_from_path(struct ib_device *device, u8 port_num, | |||
469 | memset(ah_attr, 0, sizeof *ah_attr); | 490 | memset(ah_attr, 0, sizeof *ah_attr); |
470 | ah_attr->dlid = be16_to_cpu(rec->dlid); | 491 | ah_attr->dlid = be16_to_cpu(rec->dlid); |
471 | ah_attr->sl = rec->sl; | 492 | ah_attr->sl = rec->sl; |
472 | ah_attr->src_path_bits = be16_to_cpu(rec->slid) & 0x7f; | 493 | ah_attr->src_path_bits = be16_to_cpu(rec->slid) & |
494 | get_src_path_mask(device, port_num); | ||
473 | ah_attr->port_num = port_num; | 495 | ah_attr->port_num = port_num; |
474 | ah_attr->static_rate = rec->rate; | 496 | ah_attr->static_rate = rec->rate; |
475 | 497 | ||
diff --git a/drivers/infiniband/core/smi.c b/drivers/infiniband/core/smi.c index 54b81e17ad50..2bca753eb622 100644 --- a/drivers/infiniband/core/smi.c +++ b/drivers/infiniband/core/smi.c | |||
@@ -3,7 +3,7 @@ | |||
3 | * Copyright (c) 2004, 2005 Infinicon Corporation. All rights reserved. | 3 | * Copyright (c) 2004, 2005 Infinicon Corporation. All rights reserved. |
4 | * Copyright (c) 2004, 2005 Intel Corporation. All rights reserved. | 4 | * Copyright (c) 2004, 2005 Intel Corporation. All rights reserved. |
5 | * Copyright (c) 2004, 2005 Topspin Corporation. All rights reserved. | 5 | * Copyright (c) 2004, 2005 Topspin Corporation. All rights reserved. |
6 | * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved. | 6 | * Copyright (c) 2004-2007 Voltaire Corporation. All rights reserved. |
7 | * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. | 7 | * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. |
8 | * | 8 | * |
9 | * This software is available to you under a choice of one of two | 9 | * This software is available to you under a choice of one of two |
@@ -34,7 +34,6 @@ | |||
34 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | 34 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
35 | * SOFTWARE. | 35 | * SOFTWARE. |
36 | * | 36 | * |
37 | * $Id: smi.c 1389 2004-12-27 22:56:47Z roland $ | ||
38 | */ | 37 | */ |
39 | 38 | ||
40 | #include <rdma/ib_smi.h> | 39 | #include <rdma/ib_smi.h> |
@@ -44,9 +43,8 @@ | |||
44 | * Fixup a directed route SMP for sending | 43 | * Fixup a directed route SMP for sending |
45 | * Return 0 if the SMP should be discarded | 44 | * Return 0 if the SMP should be discarded |
46 | */ | 45 | */ |
47 | int smi_handle_dr_smp_send(struct ib_smp *smp, | 46 | enum smi_action smi_handle_dr_smp_send(struct ib_smp *smp, |
48 | u8 node_type, | 47 | u8 node_type, int port_num) |
49 | int port_num) | ||
50 | { | 48 | { |
51 | u8 hop_ptr, hop_cnt; | 49 | u8 hop_ptr, hop_cnt; |
52 | 50 | ||
@@ -59,18 +57,18 @@ int smi_handle_dr_smp_send(struct ib_smp *smp, | |||
59 | if (hop_cnt && hop_ptr == 0) { | 57 | if (hop_cnt && hop_ptr == 0) { |
60 | smp->hop_ptr++; | 58 | smp->hop_ptr++; |
61 | return (smp->initial_path[smp->hop_ptr] == | 59 | return (smp->initial_path[smp->hop_ptr] == |
62 | port_num); | 60 | port_num ? IB_SMI_HANDLE : IB_SMI_DISCARD); |
63 | } | 61 | } |
64 | 62 | ||
65 | /* C14-9:2 */ | 63 | /* C14-9:2 */ |
66 | if (hop_ptr && hop_ptr < hop_cnt) { | 64 | if (hop_ptr && hop_ptr < hop_cnt) { |
67 | if (node_type != RDMA_NODE_IB_SWITCH) | 65 | if (node_type != RDMA_NODE_IB_SWITCH) |
68 | return 0; | 66 | return IB_SMI_DISCARD; |
69 | 67 | ||
70 | /* smp->return_path set when received */ | 68 | /* smp->return_path set when received */ |
71 | smp->hop_ptr++; | 69 | smp->hop_ptr++; |
72 | return (smp->initial_path[smp->hop_ptr] == | 70 | return (smp->initial_path[smp->hop_ptr] == |
73 | port_num); | 71 | port_num ? IB_SMI_HANDLE : IB_SMI_DISCARD); |
74 | } | 72 | } |
75 | 73 | ||
76 | /* C14-9:3 -- We're at the end of the DR segment of path */ | 74 | /* C14-9:3 -- We're at the end of the DR segment of path */ |
@@ -78,29 +76,30 @@ int smi_handle_dr_smp_send(struct ib_smp *smp, | |||
78 | /* smp->return_path set when received */ | 76 | /* smp->return_path set when received */ |
79 | smp->hop_ptr++; | 77 | smp->hop_ptr++; |
80 | return (node_type == RDMA_NODE_IB_SWITCH || | 78 | return (node_type == RDMA_NODE_IB_SWITCH || |
81 | smp->dr_dlid == IB_LID_PERMISSIVE); | 79 | smp->dr_dlid == IB_LID_PERMISSIVE ? |
80 | IB_SMI_HANDLE : IB_SMI_DISCARD); | ||
82 | } | 81 | } |
83 | 82 | ||
84 | /* C14-9:4 -- hop_ptr = hop_cnt + 1 -> give to SMA/SM */ | 83 | /* C14-9:4 -- hop_ptr = hop_cnt + 1 -> give to SMA/SM */ |
85 | /* C14-9:5 -- Fail unreasonable hop pointer */ | 84 | /* C14-9:5 -- Fail unreasonable hop pointer */ |
86 | return (hop_ptr == hop_cnt + 1); | 85 | return (hop_ptr == hop_cnt + 1 ? IB_SMI_HANDLE : IB_SMI_DISCARD); |
87 | 86 | ||
88 | } else { | 87 | } else { |
89 | /* C14-13:1 */ | 88 | /* C14-13:1 */ |
90 | if (hop_cnt && hop_ptr == hop_cnt + 1) { | 89 | if (hop_cnt && hop_ptr == hop_cnt + 1) { |
91 | smp->hop_ptr--; | 90 | smp->hop_ptr--; |
92 | return (smp->return_path[smp->hop_ptr] == | 91 | return (smp->return_path[smp->hop_ptr] == |
93 | port_num); | 92 | port_num ? IB_SMI_HANDLE : IB_SMI_DISCARD); |
94 | } | 93 | } |
95 | 94 | ||
96 | /* C14-13:2 */ | 95 | /* C14-13:2 */ |
97 | if (2 <= hop_ptr && hop_ptr <= hop_cnt) { | 96 | if (2 <= hop_ptr && hop_ptr <= hop_cnt) { |
98 | if (node_type != RDMA_NODE_IB_SWITCH) | 97 | if (node_type != RDMA_NODE_IB_SWITCH) |
99 | return 0; | 98 | return IB_SMI_DISCARD; |
100 | 99 | ||
101 | smp->hop_ptr--; | 100 | smp->hop_ptr--; |
102 | return (smp->return_path[smp->hop_ptr] == | 101 | return (smp->return_path[smp->hop_ptr] == |
103 | port_num); | 102 | port_num ? IB_SMI_HANDLE : IB_SMI_DISCARD); |
104 | } | 103 | } |
105 | 104 | ||
106 | /* C14-13:3 -- at the end of the DR segment of path */ | 105 | /* C14-13:3 -- at the end of the DR segment of path */ |
@@ -108,15 +107,16 @@ int smi_handle_dr_smp_send(struct ib_smp *smp, | |||
108 | smp->hop_ptr--; | 107 | smp->hop_ptr--; |
109 | /* C14-13:3 -- SMPs destined for SM shouldn't be here */ | 108 | /* C14-13:3 -- SMPs destined for SM shouldn't be here */ |
110 | return (node_type == RDMA_NODE_IB_SWITCH || | 109 | return (node_type == RDMA_NODE_IB_SWITCH || |
111 | smp->dr_slid == IB_LID_PERMISSIVE); | 110 | smp->dr_slid == IB_LID_PERMISSIVE ? |
111 | IB_SMI_HANDLE : IB_SMI_DISCARD); | ||
112 | } | 112 | } |
113 | 113 | ||
114 | /* C14-13:4 -- hop_ptr = 0 -> should have gone to SM */ | 114 | /* C14-13:4 -- hop_ptr = 0 -> should have gone to SM */ |
115 | if (hop_ptr == 0) | 115 | if (hop_ptr == 0) |
116 | return 1; | 116 | return IB_SMI_HANDLE; |
117 | 117 | ||
118 | /* C14-13:5 -- Check for unreasonable hop pointer */ | 118 | /* C14-13:5 -- Check for unreasonable hop pointer */ |
119 | return 0; | 119 | return IB_SMI_DISCARD; |
120 | } | 120 | } |
121 | } | 121 | } |
122 | 122 | ||
@@ -124,10 +124,8 @@ int smi_handle_dr_smp_send(struct ib_smp *smp, | |||
124 | * Adjust information for a received SMP | 124 | * Adjust information for a received SMP |
125 | * Return 0 if the SMP should be dropped | 125 | * Return 0 if the SMP should be dropped |
126 | */ | 126 | */ |
127 | int smi_handle_dr_smp_recv(struct ib_smp *smp, | 127 | enum smi_action smi_handle_dr_smp_recv(struct ib_smp *smp, u8 node_type, |
128 | u8 node_type, | 128 | int port_num, int phys_port_cnt) |
129 | int port_num, | ||
130 | int phys_port_cnt) | ||
131 | { | 129 | { |
132 | u8 hop_ptr, hop_cnt; | 130 | u8 hop_ptr, hop_cnt; |
133 | 131 | ||
@@ -138,16 +136,17 @@ int smi_handle_dr_smp_recv(struct ib_smp *smp, | |||
138 | if (!ib_get_smp_direction(smp)) { | 136 | if (!ib_get_smp_direction(smp)) { |
139 | /* C14-9:1 -- sender should have incremented hop_ptr */ | 137 | /* C14-9:1 -- sender should have incremented hop_ptr */ |
140 | if (hop_cnt && hop_ptr == 0) | 138 | if (hop_cnt && hop_ptr == 0) |
141 | return 0; | 139 | return IB_SMI_DISCARD; |
142 | 140 | ||
143 | /* C14-9:2 -- intermediate hop */ | 141 | /* C14-9:2 -- intermediate hop */ |
144 | if (hop_ptr && hop_ptr < hop_cnt) { | 142 | if (hop_ptr && hop_ptr < hop_cnt) { |
145 | if (node_type != RDMA_NODE_IB_SWITCH) | 143 | if (node_type != RDMA_NODE_IB_SWITCH) |
146 | return 0; | 144 | return IB_SMI_DISCARD; |
147 | 145 | ||
148 | smp->return_path[hop_ptr] = port_num; | 146 | smp->return_path[hop_ptr] = port_num; |
149 | /* smp->hop_ptr updated when sending */ | 147 | /* smp->hop_ptr updated when sending */ |
150 | return (smp->initial_path[hop_ptr+1] <= phys_port_cnt); | 148 | return (smp->initial_path[hop_ptr+1] <= phys_port_cnt ? |
149 | IB_SMI_HANDLE : IB_SMI_DISCARD); | ||
151 | } | 150 | } |
152 | 151 | ||
153 | /* C14-9:3 -- We're at the end of the DR segment of path */ | 152 | /* C14-9:3 -- We're at the end of the DR segment of path */ |
@@ -157,12 +156,13 @@ int smi_handle_dr_smp_recv(struct ib_smp *smp, | |||
157 | /* smp->hop_ptr updated when sending */ | 156 | /* smp->hop_ptr updated when sending */ |
158 | 157 | ||
159 | return (node_type == RDMA_NODE_IB_SWITCH || | 158 | return (node_type == RDMA_NODE_IB_SWITCH || |
160 | smp->dr_dlid == IB_LID_PERMISSIVE); | 159 | smp->dr_dlid == IB_LID_PERMISSIVE ? |
160 | IB_SMI_HANDLE : IB_SMI_DISCARD); | ||
161 | } | 161 | } |
162 | 162 | ||
163 | /* C14-9:4 -- hop_ptr = hop_cnt + 1 -> give to SMA/SM */ | 163 | /* C14-9:4 -- hop_ptr = hop_cnt + 1 -> give to SMA/SM */ |
164 | /* C14-9:5 -- fail unreasonable hop pointer */ | 164 | /* C14-9:5 -- fail unreasonable hop pointer */ |
165 | return (hop_ptr == hop_cnt + 1); | 165 | return (hop_ptr == hop_cnt + 1 ? IB_SMI_HANDLE : IB_SMI_DISCARD); |
166 | 166 | ||
167 | } else { | 167 | } else { |
168 | 168 | ||
@@ -170,16 +170,17 @@ int smi_handle_dr_smp_recv(struct ib_smp *smp, | |||
170 | if (hop_cnt && hop_ptr == hop_cnt + 1) { | 170 | if (hop_cnt && hop_ptr == hop_cnt + 1) { |
171 | smp->hop_ptr--; | 171 | smp->hop_ptr--; |
172 | return (smp->return_path[smp->hop_ptr] == | 172 | return (smp->return_path[smp->hop_ptr] == |
173 | port_num); | 173 | port_num ? IB_SMI_HANDLE : IB_SMI_DISCARD); |
174 | } | 174 | } |
175 | 175 | ||
176 | /* C14-13:2 */ | 176 | /* C14-13:2 */ |
177 | if (2 <= hop_ptr && hop_ptr <= hop_cnt) { | 177 | if (2 <= hop_ptr && hop_ptr <= hop_cnt) { |
178 | if (node_type != RDMA_NODE_IB_SWITCH) | 178 | if (node_type != RDMA_NODE_IB_SWITCH) |
179 | return 0; | 179 | return IB_SMI_DISCARD; |
180 | 180 | ||
181 | /* smp->hop_ptr updated when sending */ | 181 | /* smp->hop_ptr updated when sending */ |
182 | return (smp->return_path[hop_ptr-1] <= phys_port_cnt); | 182 | return (smp->return_path[hop_ptr-1] <= phys_port_cnt ? |
183 | IB_SMI_HANDLE : IB_SMI_DISCARD); | ||
183 | } | 184 | } |
184 | 185 | ||
185 | /* C14-13:3 -- We're at the end of the DR segment of path */ | 186 | /* C14-13:3 -- We're at the end of the DR segment of path */ |
@@ -187,23 +188,20 @@ int smi_handle_dr_smp_recv(struct ib_smp *smp, | |||
187 | if (smp->dr_slid == IB_LID_PERMISSIVE) { | 188 | if (smp->dr_slid == IB_LID_PERMISSIVE) { |
188 | /* giving SMP to SM - update hop_ptr */ | 189 | /* giving SMP to SM - update hop_ptr */ |
189 | smp->hop_ptr--; | 190 | smp->hop_ptr--; |
190 | return 1; | 191 | return IB_SMI_HANDLE; |
191 | } | 192 | } |
192 | /* smp->hop_ptr updated when sending */ | 193 | /* smp->hop_ptr updated when sending */ |
193 | return (node_type == RDMA_NODE_IB_SWITCH); | 194 | return (node_type == RDMA_NODE_IB_SWITCH ? |
195 | IB_SMI_HANDLE: IB_SMI_DISCARD); | ||
194 | } | 196 | } |
195 | 197 | ||
196 | /* C14-13:4 -- hop_ptr = 0 -> give to SM */ | 198 | /* C14-13:4 -- hop_ptr = 0 -> give to SM */ |
197 | /* C14-13:5 -- Check for unreasonable hop pointer */ | 199 | /* C14-13:5 -- Check for unreasonable hop pointer */ |
198 | return (hop_ptr == 0); | 200 | return (hop_ptr == 0 ? IB_SMI_HANDLE : IB_SMI_DISCARD); |
199 | } | 201 | } |
200 | } | 202 | } |
201 | 203 | ||
202 | /* | 204 | enum smi_forward_action smi_check_forward_dr_smp(struct ib_smp *smp) |
203 | * Return 1 if the received DR SMP should be forwarded to the send queue | ||
204 | * Return 0 if the SMP should be completed up the stack | ||
205 | */ | ||
206 | int smi_check_forward_dr_smp(struct ib_smp *smp) | ||
207 | { | 205 | { |
208 | u8 hop_ptr, hop_cnt; | 206 | u8 hop_ptr, hop_cnt; |
209 | 207 | ||
@@ -213,23 +211,25 @@ int smi_check_forward_dr_smp(struct ib_smp *smp) | |||
213 | if (!ib_get_smp_direction(smp)) { | 211 | if (!ib_get_smp_direction(smp)) { |
214 | /* C14-9:2 -- intermediate hop */ | 212 | /* C14-9:2 -- intermediate hop */ |
215 | if (hop_ptr && hop_ptr < hop_cnt) | 213 | if (hop_ptr && hop_ptr < hop_cnt) |
216 | return 1; | 214 | return IB_SMI_SEND; |
217 | 215 | ||
218 | /* C14-9:3 -- at the end of the DR segment of path */ | 216 | /* C14-9:3 -- at the end of the DR segment of path */ |
219 | if (hop_ptr == hop_cnt) | 217 | if (hop_ptr == hop_cnt) |
220 | return (smp->dr_dlid == IB_LID_PERMISSIVE); | 218 | return (smp->dr_dlid == IB_LID_PERMISSIVE ? |
219 | IB_SMI_SEND : IB_SMI_LOCAL); | ||
221 | 220 | ||
222 | /* C14-9:4 -- hop_ptr = hop_cnt + 1 -> give to SMA/SM */ | 221 | /* C14-9:4 -- hop_ptr = hop_cnt + 1 -> give to SMA/SM */ |
223 | if (hop_ptr == hop_cnt + 1) | 222 | if (hop_ptr == hop_cnt + 1) |
224 | return 1; | 223 | return IB_SMI_SEND; |
225 | } else { | 224 | } else { |
226 | /* C14-13:2 */ | 225 | /* C14-13:2 -- intermediate hop */ |
227 | if (2 <= hop_ptr && hop_ptr <= hop_cnt) | 226 | if (2 <= hop_ptr && hop_ptr <= hop_cnt) |
228 | return 1; | 227 | return IB_SMI_SEND; |
229 | 228 | ||
230 | /* C14-13:3 -- at the end of the DR segment of path */ | 229 | /* C14-13:3 -- at the end of the DR segment of path */ |
231 | if (hop_ptr == 1) | 230 | if (hop_ptr == 1) |
232 | return (smp->dr_slid != IB_LID_PERMISSIVE); | 231 | return (smp->dr_slid != IB_LID_PERMISSIVE ? |
232 | IB_SMI_SEND : IB_SMI_LOCAL); | ||
233 | } | 233 | } |
234 | return 0; | 234 | return IB_SMI_LOCAL; |
235 | } | 235 | } |
diff --git a/drivers/infiniband/core/smi.h b/drivers/infiniband/core/smi.h index 3011bfd86dc5..9a4b349efc30 100644 --- a/drivers/infiniband/core/smi.h +++ b/drivers/infiniband/core/smi.h | |||
@@ -3,7 +3,7 @@ | |||
3 | * Copyright (c) 2004 Infinicon Corporation. All rights reserved. | 3 | * Copyright (c) 2004 Infinicon Corporation. All rights reserved. |
4 | * Copyright (c) 2004 Intel Corporation. All rights reserved. | 4 | * Copyright (c) 2004 Intel Corporation. All rights reserved. |
5 | * Copyright (c) 2004 Topspin Corporation. All rights reserved. | 5 | * Copyright (c) 2004 Topspin Corporation. All rights reserved. |
6 | * Copyright (c) 2004 Voltaire Corporation. All rights reserved. | 6 | * Copyright (c) 2004-2007 Voltaire Corporation. All rights reserved. |
7 | * | 7 | * |
8 | * This software is available to you under a choice of one of two | 8 | * This software is available to you under a choice of one of two |
9 | * licenses. You may choose to be licensed under the terms of the GNU | 9 | * licenses. You may choose to be licensed under the terms of the GNU |
@@ -33,7 +33,6 @@ | |||
33 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | 33 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
34 | * SOFTWARE. | 34 | * SOFTWARE. |
35 | * | 35 | * |
36 | * $Id: smi.h 1389 2004-12-27 22:56:47Z roland $ | ||
37 | */ | 36 | */ |
38 | 37 | ||
39 | #ifndef __SMI_H_ | 38 | #ifndef __SMI_H_ |
@@ -41,26 +40,33 @@ | |||
41 | 40 | ||
42 | #include <rdma/ib_smi.h> | 41 | #include <rdma/ib_smi.h> |
43 | 42 | ||
44 | int smi_handle_dr_smp_recv(struct ib_smp *smp, | 43 | enum smi_action { |
45 | u8 node_type, | 44 | IB_SMI_DISCARD, |
46 | int port_num, | 45 | IB_SMI_HANDLE |
47 | int phys_port_cnt); | 46 | }; |
48 | extern int smi_check_forward_dr_smp(struct ib_smp *smp); | 47 | |
49 | extern int smi_handle_dr_smp_send(struct ib_smp *smp, | 48 | enum smi_forward_action { |
50 | u8 node_type, | 49 | IB_SMI_LOCAL, /* SMP should be completed up the stack */ |
51 | int port_num); | 50 | IB_SMI_SEND, /* received DR SMP should be forwarded to the send queue */ |
51 | }; | ||
52 | |||
53 | enum smi_action smi_handle_dr_smp_recv(struct ib_smp *smp, u8 node_type, | ||
54 | int port_num, int phys_port_cnt); | ||
55 | extern enum smi_forward_action smi_check_forward_dr_smp(struct ib_smp *smp); | ||
56 | extern enum smi_action smi_handle_dr_smp_send(struct ib_smp *smp, | ||
57 | u8 node_type, int port_num); | ||
52 | 58 | ||
53 | /* | 59 | /* |
54 | * Return 1 if the SMP should be handled by the local SMA/SM via process_mad | 60 | * Return 1 if the SMP should be handled by the local SMA/SM via process_mad |
55 | */ | 61 | */ |
56 | static inline int smi_check_local_smp(struct ib_smp *smp, | 62 | static inline enum smi_action smi_check_local_smp(struct ib_smp *smp, |
57 | struct ib_device *device) | 63 | struct ib_device *device) |
58 | { | 64 | { |
59 | /* C14-9:3 -- We're at the end of the DR segment of path */ | 65 | /* C14-9:3 -- We're at the end of the DR segment of path */ |
60 | /* C14-9:4 -- Hop Pointer = Hop Count + 1 -> give to SMA/SM */ | 66 | /* C14-9:4 -- Hop Pointer = Hop Count + 1 -> give to SMA/SM */ |
61 | return ((device->process_mad && | 67 | return ((device->process_mad && |
62 | !ib_get_smp_direction(smp) && | 68 | !ib_get_smp_direction(smp) && |
63 | (smp->hop_ptr == smp->hop_cnt + 1))); | 69 | (smp->hop_ptr == smp->hop_cnt + 1)) ? |
70 | IB_SMI_HANDLE : IB_SMI_DISCARD); | ||
64 | } | 71 | } |
65 | |||
66 | #endif /* __SMI_H_ */ | 72 | #endif /* __SMI_H_ */ |
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c index 000c086bf2e9..08c299ebf4a8 100644 --- a/drivers/infiniband/core/sysfs.c +++ b/drivers/infiniband/core/sysfs.c | |||
@@ -683,6 +683,7 @@ int ib_device_register_sysfs(struct ib_device *device) | |||
683 | 683 | ||
684 | class_dev->class = &ib_class; | 684 | class_dev->class = &ib_class; |
685 | class_dev->class_data = device; | 685 | class_dev->class_data = device; |
686 | class_dev->dev = device->dma_device; | ||
686 | strlcpy(class_dev->class_id, device->name, BUS_ID_SIZE); | 687 | strlcpy(class_dev->class_id, device->name, BUS_ID_SIZE); |
687 | 688 | ||
688 | INIT_LIST_HEAD(&device->port_list); | 689 | INIT_LIST_HEAD(&device->port_list); |
diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c index ee51d79a7ad5..2586a3ee8eba 100644 --- a/drivers/infiniband/core/ucm.c +++ b/drivers/infiniband/core/ucm.c | |||
@@ -407,29 +407,18 @@ static ssize_t ib_ucm_event(struct ib_ucm_file *file, | |||
407 | 407 | ||
408 | mutex_lock(&file->file_mutex); | 408 | mutex_lock(&file->file_mutex); |
409 | while (list_empty(&file->events)) { | 409 | while (list_empty(&file->events)) { |
410 | mutex_unlock(&file->file_mutex); | ||
410 | 411 | ||
411 | if (file->filp->f_flags & O_NONBLOCK) { | 412 | if (file->filp->f_flags & O_NONBLOCK) |
412 | result = -EAGAIN; | 413 | return -EAGAIN; |
413 | break; | ||
414 | } | ||
415 | 414 | ||
416 | if (signal_pending(current)) { | 415 | if (wait_event_interruptible(file->poll_wait, |
417 | result = -ERESTARTSYS; | 416 | !list_empty(&file->events))) |
418 | break; | 417 | return -ERESTARTSYS; |
419 | } | ||
420 | 418 | ||
421 | prepare_to_wait(&file->poll_wait, &wait, TASK_INTERRUPTIBLE); | ||
422 | |||
423 | mutex_unlock(&file->file_mutex); | ||
424 | schedule(); | ||
425 | mutex_lock(&file->file_mutex); | 419 | mutex_lock(&file->file_mutex); |
426 | |||
427 | finish_wait(&file->poll_wait, &wait); | ||
428 | } | 420 | } |
429 | 421 | ||
430 | if (result) | ||
431 | goto done; | ||
432 | |||
433 | uevent = list_entry(file->events.next, struct ib_ucm_event, file_list); | 422 | uevent = list_entry(file->events.next, struct ib_ucm_event, file_list); |
434 | 423 | ||
435 | if (ib_ucm_new_cm_id(uevent->resp.event)) { | 424 | if (ib_ucm_new_cm_id(uevent->resp.event)) { |
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c index c859134c1daa..53b4c94a7eb5 100644 --- a/drivers/infiniband/core/ucma.c +++ b/drivers/infiniband/core/ucma.c | |||
@@ -306,26 +306,18 @@ static ssize_t ucma_get_event(struct ucma_file *file, const char __user *inbuf, | |||
306 | 306 | ||
307 | mutex_lock(&file->mut); | 307 | mutex_lock(&file->mut); |
308 | while (list_empty(&file->event_list)) { | 308 | while (list_empty(&file->event_list)) { |
309 | if (file->filp->f_flags & O_NONBLOCK) { | 309 | mutex_unlock(&file->mut); |
310 | ret = -EAGAIN; | ||
311 | break; | ||
312 | } | ||
313 | 310 | ||
314 | if (signal_pending(current)) { | 311 | if (file->filp->f_flags & O_NONBLOCK) |
315 | ret = -ERESTARTSYS; | 312 | return -EAGAIN; |
316 | break; | 313 | |
317 | } | 314 | if (wait_event_interruptible(file->poll_wait, |
315 | !list_empty(&file->event_list))) | ||
316 | return -ERESTARTSYS; | ||
318 | 317 | ||
319 | prepare_to_wait(&file->poll_wait, &wait, TASK_INTERRUPTIBLE); | ||
320 | mutex_unlock(&file->mut); | ||
321 | schedule(); | ||
322 | mutex_lock(&file->mut); | 318 | mutex_lock(&file->mut); |
323 | finish_wait(&file->poll_wait, &wait); | ||
324 | } | 319 | } |
325 | 320 | ||
326 | if (ret) | ||
327 | goto done; | ||
328 | |||
329 | uevent = list_entry(file->event_list.next, struct ucma_event, list); | 321 | uevent = list_entry(file->event_list.next, struct ucma_event, list); |
330 | 322 | ||
331 | if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) { | 323 | if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) { |
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c index c069ebeba8e3..8199b83052a9 100644 --- a/drivers/infiniband/core/user_mad.c +++ b/drivers/infiniband/core/user_mad.c | |||
@@ -135,7 +135,7 @@ static const dev_t base_dev = MKDEV(IB_UMAD_MAJOR, IB_UMAD_MINOR_BASE); | |||
135 | 135 | ||
136 | static DEFINE_SPINLOCK(port_lock); | 136 | static DEFINE_SPINLOCK(port_lock); |
137 | static struct ib_umad_port *umad_port[IB_UMAD_MAX_PORTS]; | 137 | static struct ib_umad_port *umad_port[IB_UMAD_MAX_PORTS]; |
138 | static DECLARE_BITMAP(dev_map, IB_UMAD_MAX_PORTS * 2); | 138 | static DECLARE_BITMAP(dev_map, IB_UMAD_MAX_PORTS); |
139 | 139 | ||
140 | static void ib_umad_add_one(struct ib_device *device); | 140 | static void ib_umad_add_one(struct ib_device *device); |
141 | static void ib_umad_remove_one(struct ib_device *device); | 141 | static void ib_umad_remove_one(struct ib_device *device); |
@@ -231,12 +231,17 @@ static void recv_handler(struct ib_mad_agent *agent, | |||
231 | packet->mad.hdr.path_bits = mad_recv_wc->wc->dlid_path_bits; | 231 | packet->mad.hdr.path_bits = mad_recv_wc->wc->dlid_path_bits; |
232 | packet->mad.hdr.grh_present = !!(mad_recv_wc->wc->wc_flags & IB_WC_GRH); | 232 | packet->mad.hdr.grh_present = !!(mad_recv_wc->wc->wc_flags & IB_WC_GRH); |
233 | if (packet->mad.hdr.grh_present) { | 233 | if (packet->mad.hdr.grh_present) { |
234 | /* XXX parse GRH */ | 234 | struct ib_ah_attr ah_attr; |
235 | packet->mad.hdr.gid_index = 0; | 235 | |
236 | packet->mad.hdr.hop_limit = 0; | 236 | ib_init_ah_from_wc(agent->device, agent->port_num, |
237 | packet->mad.hdr.traffic_class = 0; | 237 | mad_recv_wc->wc, mad_recv_wc->recv_buf.grh, |
238 | memset(packet->mad.hdr.gid, 0, 16); | 238 | &ah_attr); |
239 | packet->mad.hdr.flow_label = 0; | 239 | |
240 | packet->mad.hdr.gid_index = ah_attr.grh.sgid_index; | ||
241 | packet->mad.hdr.hop_limit = ah_attr.grh.hop_limit; | ||
242 | packet->mad.hdr.traffic_class = ah_attr.grh.traffic_class; | ||
243 | memcpy(packet->mad.hdr.gid, &ah_attr.grh.dgid, 16); | ||
244 | packet->mad.hdr.flow_label = cpu_to_be32(ah_attr.grh.flow_label); | ||
240 | } | 245 | } |
241 | 246 | ||
242 | if (queue_packet(file, agent, packet)) | 247 | if (queue_packet(file, agent, packet)) |
@@ -473,6 +478,7 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf, | |||
473 | if (packet->mad.hdr.grh_present) { | 478 | if (packet->mad.hdr.grh_present) { |
474 | ah_attr.ah_flags = IB_AH_GRH; | 479 | ah_attr.ah_flags = IB_AH_GRH; |
475 | memcpy(ah_attr.grh.dgid.raw, packet->mad.hdr.gid, 16); | 480 | memcpy(ah_attr.grh.dgid.raw, packet->mad.hdr.gid, 16); |
481 | ah_attr.grh.sgid_index = packet->mad.hdr.gid_index; | ||
476 | ah_attr.grh.flow_label = be32_to_cpu(packet->mad.hdr.flow_label); | 482 | ah_attr.grh.flow_label = be32_to_cpu(packet->mad.hdr.flow_label); |
477 | ah_attr.grh.hop_limit = packet->mad.hdr.hop_limit; | 483 | ah_attr.grh.hop_limit = packet->mad.hdr.hop_limit; |
478 | ah_attr.grh.traffic_class = packet->mad.hdr.traffic_class; | 484 | ah_attr.grh.traffic_class = packet->mad.hdr.traffic_class; |
diff --git a/drivers/infiniband/hw/amso1100/c2.c b/drivers/infiniband/hw/amso1100/c2.c index 59243d9aedd6..58bc272bd407 100644 --- a/drivers/infiniband/hw/amso1100/c2.c +++ b/drivers/infiniband/hw/amso1100/c2.c | |||
@@ -439,7 +439,8 @@ static void c2_rx_error(struct c2_port *c2_port, struct c2_element *elem) | |||
439 | } | 439 | } |
440 | 440 | ||
441 | /* Setup the skb for reuse since we're dropping this pkt */ | 441 | /* Setup the skb for reuse since we're dropping this pkt */ |
442 | elem->skb->tail = elem->skb->data = elem->skb->head; | 442 | elem->skb->data = elem->skb->head; |
443 | skb_reset_tail_pointer(elem->skb); | ||
443 | 444 | ||
444 | /* Zero out the rxp hdr in the sk_buff */ | 445 | /* Zero out the rxp hdr in the sk_buff */ |
445 | memset(elem->skb->data, 0, sizeof(*rxp_hdr)); | 446 | memset(elem->skb->data, 0, sizeof(*rxp_hdr)); |
@@ -521,9 +522,8 @@ static void c2_rx_interrupt(struct net_device *netdev) | |||
521 | * "sizeof(struct c2_rxp_hdr)". | 522 | * "sizeof(struct c2_rxp_hdr)". |
522 | */ | 523 | */ |
523 | skb->data += sizeof(*rxp_hdr); | 524 | skb->data += sizeof(*rxp_hdr); |
524 | skb->tail = skb->data + buflen; | 525 | skb_set_tail_pointer(skb, buflen); |
525 | skb->len = buflen; | 526 | skb->len = buflen; |
526 | skb->dev = netdev; | ||
527 | skb->protocol = eth_type_trans(skb, netdev); | 527 | skb->protocol = eth_type_trans(skb, netdev); |
528 | 528 | ||
529 | netif_rx(skb); | 529 | netif_rx(skb); |
diff --git a/drivers/infiniband/hw/amso1100/c2_provider.c b/drivers/infiniband/hw/amso1100/c2_provider.c index fef972752912..607c09bf764c 100644 --- a/drivers/infiniband/hw/amso1100/c2_provider.c +++ b/drivers/infiniband/hw/amso1100/c2_provider.c | |||
@@ -796,7 +796,6 @@ int c2_register_device(struct c2_dev *dev) | |||
796 | memcpy(&dev->ibdev.node_guid, dev->pseudo_netdev->dev_addr, 6); | 796 | memcpy(&dev->ibdev.node_guid, dev->pseudo_netdev->dev_addr, 6); |
797 | dev->ibdev.phys_port_cnt = 1; | 797 | dev->ibdev.phys_port_cnt = 1; |
798 | dev->ibdev.dma_device = &dev->pcidev->dev; | 798 | dev->ibdev.dma_device = &dev->pcidev->dev; |
799 | dev->ibdev.class_dev.dev = &dev->pcidev->dev; | ||
800 | dev->ibdev.query_device = c2_query_device; | 799 | dev->ibdev.query_device = c2_query_device; |
801 | dev->ibdev.query_port = c2_query_port; | 800 | dev->ibdev.query_port = c2_query_port; |
802 | dev->ibdev.modify_port = c2_modify_port; | 801 | dev->ibdev.modify_port = c2_modify_port; |
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c index 2d2de9b8b729..3b4b0acd707f 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_cm.c +++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c | |||
@@ -477,7 +477,7 @@ static void send_mpa_req(struct iwch_ep *ep, struct sk_buff *skb) | |||
477 | BUG_ON(skb_cloned(skb)); | 477 | BUG_ON(skb_cloned(skb)); |
478 | 478 | ||
479 | mpalen = sizeof(*mpa) + ep->plen; | 479 | mpalen = sizeof(*mpa) + ep->plen; |
480 | if (skb->data + mpalen + sizeof(*req) > skb->end) { | 480 | if (skb->data + mpalen + sizeof(*req) > skb_end_pointer(skb)) { |
481 | kfree_skb(skb); | 481 | kfree_skb(skb); |
482 | skb=alloc_skb(mpalen + sizeof(*req), GFP_KERNEL); | 482 | skb=alloc_skb(mpalen + sizeof(*req), GFP_KERNEL); |
483 | if (!skb) { | 483 | if (!skb) { |
@@ -507,7 +507,7 @@ static void send_mpa_req(struct iwch_ep *ep, struct sk_buff *skb) | |||
507 | */ | 507 | */ |
508 | skb_get(skb); | 508 | skb_get(skb); |
509 | set_arp_failure_handler(skb, arp_failure_discard); | 509 | set_arp_failure_handler(skb, arp_failure_discard); |
510 | skb->h.raw = skb->data; | 510 | skb_reset_transport_header(skb); |
511 | len = skb->len; | 511 | len = skb->len; |
512 | req = (struct tx_data_wr *) skb_push(skb, sizeof(*req)); | 512 | req = (struct tx_data_wr *) skb_push(skb, sizeof(*req)); |
513 | req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA)); | 513 | req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA)); |
@@ -559,7 +559,7 @@ static int send_mpa_reject(struct iwch_ep *ep, const void *pdata, u8 plen) | |||
559 | skb_get(skb); | 559 | skb_get(skb); |
560 | skb->priority = CPL_PRIORITY_DATA; | 560 | skb->priority = CPL_PRIORITY_DATA; |
561 | set_arp_failure_handler(skb, arp_failure_discard); | 561 | set_arp_failure_handler(skb, arp_failure_discard); |
562 | skb->h.raw = skb->data; | 562 | skb_reset_transport_header(skb); |
563 | req = (struct tx_data_wr *) skb_push(skb, sizeof(*req)); | 563 | req = (struct tx_data_wr *) skb_push(skb, sizeof(*req)); |
564 | req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA)); | 564 | req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA)); |
565 | req->wr_lo = htonl(V_WR_TID(ep->hwtid)); | 565 | req->wr_lo = htonl(V_WR_TID(ep->hwtid)); |
@@ -610,7 +610,7 @@ static int send_mpa_reply(struct iwch_ep *ep, const void *pdata, u8 plen) | |||
610 | */ | 610 | */ |
611 | skb_get(skb); | 611 | skb_get(skb); |
612 | set_arp_failure_handler(skb, arp_failure_discard); | 612 | set_arp_failure_handler(skb, arp_failure_discard); |
613 | skb->h.raw = skb->data; | 613 | skb_reset_transport_header(skb); |
614 | len = skb->len; | 614 | len = skb->len; |
615 | req = (struct tx_data_wr *) skb_push(skb, sizeof(*req)); | 615 | req = (struct tx_data_wr *) skb_push(skb, sizeof(*req)); |
616 | req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA)); | 616 | req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA)); |
@@ -821,7 +821,8 @@ static void process_mpa_reply(struct iwch_ep *ep, struct sk_buff *skb) | |||
821 | /* | 821 | /* |
822 | * copy the new data into our accumulation buffer. | 822 | * copy the new data into our accumulation buffer. |
823 | */ | 823 | */ |
824 | memcpy(&(ep->mpa_pkt[ep->mpa_pkt_len]), skb->data, skb->len); | 824 | skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]), |
825 | skb->len); | ||
825 | ep->mpa_pkt_len += skb->len; | 826 | ep->mpa_pkt_len += skb->len; |
826 | 827 | ||
827 | /* | 828 | /* |
@@ -940,7 +941,8 @@ static void process_mpa_request(struct iwch_ep *ep, struct sk_buff *skb) | |||
940 | /* | 941 | /* |
941 | * Copy the new data into our accumulation buffer. | 942 | * Copy the new data into our accumulation buffer. |
942 | */ | 943 | */ |
943 | memcpy(&(ep->mpa_pkt[ep->mpa_pkt_len]), skb->data, skb->len); | 944 | skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]), |
945 | skb->len); | ||
944 | ep->mpa_pkt_len += skb->len; | 946 | ep->mpa_pkt_len += skb->len; |
945 | 947 | ||
946 | /* | 948 | /* |
@@ -1619,7 +1621,8 @@ static int terminate(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) | |||
1619 | PDBG("%s ep %p\n", __FUNCTION__, ep); | 1621 | PDBG("%s ep %p\n", __FUNCTION__, ep); |
1620 | skb_pull(skb, sizeof(struct cpl_rdma_terminate)); | 1622 | skb_pull(skb, sizeof(struct cpl_rdma_terminate)); |
1621 | PDBG("%s saving %d bytes of term msg\n", __FUNCTION__, skb->len); | 1623 | PDBG("%s saving %d bytes of term msg\n", __FUNCTION__, skb->len); |
1622 | memcpy(ep->com.qp->attr.terminate_buffer, skb->data, skb->len); | 1624 | skb_copy_from_linear_data(skb, ep->com.qp->attr.terminate_buffer, |
1625 | skb->len); | ||
1623 | ep->com.qp->attr.terminate_msg_len = skb->len; | 1626 | ep->com.qp->attr.terminate_msg_len = skb->len; |
1624 | ep->com.qp->attr.is_terminate_local = 0; | 1627 | ep->com.qp->attr.is_terminate_local = 0; |
1625 | return CPL_RET_BUF_DONE; | 1628 | return CPL_RET_BUF_DONE; |
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c index 24e0df04f7db..af28a317016d 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_provider.c +++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c | |||
@@ -1108,7 +1108,6 @@ int iwch_register_device(struct iwch_dev *dev) | |||
1108 | memcpy(dev->ibdev.node_desc, IWCH_NODE_DESC, sizeof(IWCH_NODE_DESC)); | 1108 | memcpy(dev->ibdev.node_desc, IWCH_NODE_DESC, sizeof(IWCH_NODE_DESC)); |
1109 | dev->ibdev.phys_port_cnt = dev->rdev.port_info.nports; | 1109 | dev->ibdev.phys_port_cnt = dev->rdev.port_info.nports; |
1110 | dev->ibdev.dma_device = &(dev->rdev.rnic_info.pdev->dev); | 1110 | dev->ibdev.dma_device = &(dev->rdev.rnic_info.pdev->dev); |
1111 | dev->ibdev.class_dev.dev = &(dev->rdev.rnic_info.pdev->dev); | ||
1112 | dev->ibdev.query_device = iwch_query_device; | 1111 | dev->ibdev.query_device = iwch_query_device; |
1113 | dev->ibdev.query_port = iwch_query_port; | 1112 | dev->ibdev.query_port = iwch_query_port; |
1114 | dev->ibdev.modify_port = iwch_modify_port; | 1113 | dev->ibdev.modify_port = iwch_modify_port; |
diff --git a/drivers/infiniband/hw/ehca/ehca_classes.h b/drivers/infiniband/hw/ehca/ehca_classes.h index 82ded44c6cee..10fb8fbafa0c 100644 --- a/drivers/infiniband/hw/ehca/ehca_classes.h +++ b/drivers/infiniband/hw/ehca/ehca_classes.h | |||
@@ -106,6 +106,7 @@ struct ehca_shca { | |||
106 | struct ehca_mr *maxmr; | 106 | struct ehca_mr *maxmr; |
107 | struct ehca_pd *pd; | 107 | struct ehca_pd *pd; |
108 | struct h_galpas galpas; | 108 | struct h_galpas galpas; |
109 | struct mutex modify_mutex; | ||
109 | }; | 110 | }; |
110 | 111 | ||
111 | struct ehca_pd { | 112 | struct ehca_pd { |
diff --git a/drivers/infiniband/hw/ehca/ehca_hca.c b/drivers/infiniband/hw/ehca/ehca_hca.c index 30eb45df9f0b..32b55a4f0e5b 100644 --- a/drivers/infiniband/hw/ehca/ehca_hca.c +++ b/drivers/infiniband/hw/ehca/ehca_hca.c | |||
@@ -147,6 +147,7 @@ int ehca_query_port(struct ib_device *ibdev, | |||
147 | break; | 147 | break; |
148 | } | 148 | } |
149 | 149 | ||
150 | props->port_cap_flags = rblock->capability_mask; | ||
150 | props->gid_tbl_len = rblock->gid_tbl_len; | 151 | props->gid_tbl_len = rblock->gid_tbl_len; |
151 | props->max_msg_sz = rblock->max_msg_sz; | 152 | props->max_msg_sz = rblock->max_msg_sz; |
152 | props->bad_pkey_cntr = rblock->bad_pkey_cntr; | 153 | props->bad_pkey_cntr = rblock->bad_pkey_cntr; |
@@ -236,10 +237,60 @@ query_gid1: | |||
236 | return ret; | 237 | return ret; |
237 | } | 238 | } |
238 | 239 | ||
240 | const u32 allowed_port_caps = ( | ||
241 | IB_PORT_SM | IB_PORT_LED_INFO_SUP | IB_PORT_CM_SUP | | ||
242 | IB_PORT_SNMP_TUNNEL_SUP | IB_PORT_DEVICE_MGMT_SUP | | ||
243 | IB_PORT_VENDOR_CLASS_SUP); | ||
244 | |||
239 | int ehca_modify_port(struct ib_device *ibdev, | 245 | int ehca_modify_port(struct ib_device *ibdev, |
240 | u8 port, int port_modify_mask, | 246 | u8 port, int port_modify_mask, |
241 | struct ib_port_modify *props) | 247 | struct ib_port_modify *props) |
242 | { | 248 | { |
243 | /* Not implemented yet */ | 249 | int ret = 0; |
244 | return -EFAULT; | 250 | struct ehca_shca *shca = container_of(ibdev, struct ehca_shca, ib_device); |
251 | struct hipz_query_port *rblock; | ||
252 | u32 cap; | ||
253 | u64 hret; | ||
254 | |||
255 | if ((props->set_port_cap_mask | props->clr_port_cap_mask) | ||
256 | & ~allowed_port_caps) { | ||
257 | ehca_err(&shca->ib_device, "Non-changeable bits set in masks " | ||
258 | "set=%x clr=%x allowed=%x", props->set_port_cap_mask, | ||
259 | props->clr_port_cap_mask, allowed_port_caps); | ||
260 | return -EINVAL; | ||
261 | } | ||
262 | |||
263 | if (mutex_lock_interruptible(&shca->modify_mutex)) | ||
264 | return -ERESTARTSYS; | ||
265 | |||
266 | rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL); | ||
267 | if (!rblock) { | ||
268 | ehca_err(&shca->ib_device, "Can't allocate rblock memory."); | ||
269 | ret = -ENOMEM; | ||
270 | goto modify_port1; | ||
271 | } | ||
272 | |||
273 | if (hipz_h_query_port(shca->ipz_hca_handle, port, rblock) != H_SUCCESS) { | ||
274 | ehca_err(&shca->ib_device, "Can't query port properties"); | ||
275 | ret = -EINVAL; | ||
276 | goto modify_port2; | ||
277 | } | ||
278 | |||
279 | cap = (rblock->capability_mask | props->set_port_cap_mask) | ||
280 | & ~props->clr_port_cap_mask; | ||
281 | |||
282 | hret = hipz_h_modify_port(shca->ipz_hca_handle, port, | ||
283 | cap, props->init_type, port_modify_mask); | ||
284 | if (hret != H_SUCCESS) { | ||
285 | ehca_err(&shca->ib_device, "Modify port failed hret=%lx", hret); | ||
286 | ret = -EINVAL; | ||
287 | } | ||
288 | |||
289 | modify_port2: | ||
290 | ehca_free_fw_ctrlblock(rblock); | ||
291 | |||
292 | modify_port1: | ||
293 | mutex_unlock(&shca->modify_mutex); | ||
294 | |||
295 | return ret; | ||
245 | } | 296 | } |
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c index 059da9628bb5..3b23d677cb86 100644 --- a/drivers/infiniband/hw/ehca/ehca_main.c +++ b/drivers/infiniband/hw/ehca/ehca_main.c | |||
@@ -587,6 +587,7 @@ static int __devinit ehca_probe(struct ibmebus_dev *dev, | |||
587 | ehca_gen_err("Cannot allocate shca memory."); | 587 | ehca_gen_err("Cannot allocate shca memory."); |
588 | return -ENOMEM; | 588 | return -ENOMEM; |
589 | } | 589 | } |
590 | mutex_init(&shca->modify_mutex); | ||
590 | 591 | ||
591 | shca->ibmebus_dev = dev; | 592 | shca->ibmebus_dev = dev; |
592 | shca->ipz_hca_handle.handle = *handle; | 593 | shca->ipz_hca_handle.handle = *handle; |
diff --git a/drivers/infiniband/hw/ehca/hcp_if.c b/drivers/infiniband/hw/ehca/hcp_if.c index 3fb46e67df87..b564fcd3b282 100644 --- a/drivers/infiniband/hw/ehca/hcp_if.c +++ b/drivers/infiniband/hw/ehca/hcp_if.c | |||
@@ -70,6 +70,10 @@ | |||
70 | #define H_ALL_RES_QP_SQUEUE_SIZE_PAGES EHCA_BMASK_IBM(0, 31) | 70 | #define H_ALL_RES_QP_SQUEUE_SIZE_PAGES EHCA_BMASK_IBM(0, 31) |
71 | #define H_ALL_RES_QP_RQUEUE_SIZE_PAGES EHCA_BMASK_IBM(32, 63) | 71 | #define H_ALL_RES_QP_RQUEUE_SIZE_PAGES EHCA_BMASK_IBM(32, 63) |
72 | 72 | ||
73 | #define H_MP_INIT_TYPE EHCA_BMASK_IBM(44, 47) | ||
74 | #define H_MP_SHUTDOWN EHCA_BMASK_IBM(48, 48) | ||
75 | #define H_MP_RESET_QKEY_CTR EHCA_BMASK_IBM(49, 49) | ||
76 | |||
73 | /* direct access qp controls */ | 77 | /* direct access qp controls */ |
74 | #define DAQP_CTRL_ENABLE 0x01 | 78 | #define DAQP_CTRL_ENABLE 0x01 |
75 | #define DAQP_CTRL_SEND_COMP 0x20 | 79 | #define DAQP_CTRL_SEND_COMP 0x20 |
@@ -364,6 +368,26 @@ u64 hipz_h_query_port(const struct ipz_adapter_handle adapter_handle, | |||
364 | return ret; | 368 | return ret; |
365 | } | 369 | } |
366 | 370 | ||
371 | u64 hipz_h_modify_port(const struct ipz_adapter_handle adapter_handle, | ||
372 | const u8 port_id, const u32 port_cap, | ||
373 | const u8 init_type, const int modify_mask) | ||
374 | { | ||
375 | u64 port_attributes = port_cap; | ||
376 | |||
377 | if (modify_mask & IB_PORT_SHUTDOWN) | ||
378 | port_attributes |= EHCA_BMASK_SET(H_MP_SHUTDOWN, 1); | ||
379 | if (modify_mask & IB_PORT_INIT_TYPE) | ||
380 | port_attributes |= EHCA_BMASK_SET(H_MP_INIT_TYPE, init_type); | ||
381 | if (modify_mask & IB_PORT_RESET_QKEY_CNTR) | ||
382 | port_attributes |= EHCA_BMASK_SET(H_MP_RESET_QKEY_CTR, 1); | ||
383 | |||
384 | return ehca_plpar_hcall_norets(H_MODIFY_PORT, | ||
385 | adapter_handle.handle, /* r4 */ | ||
386 | port_id, /* r5 */ | ||
387 | port_attributes, /* r6 */ | ||
388 | 0, 0, 0, 0); | ||
389 | } | ||
390 | |||
367 | u64 hipz_h_query_hca(const struct ipz_adapter_handle adapter_handle, | 391 | u64 hipz_h_query_hca(const struct ipz_adapter_handle adapter_handle, |
368 | struct hipz_query_hca *query_hca_rblock) | 392 | struct hipz_query_hca *query_hca_rblock) |
369 | { | 393 | { |
diff --git a/drivers/infiniband/hw/ehca/hcp_if.h b/drivers/infiniband/hw/ehca/hcp_if.h index 587ebd470959..2869f7dd6196 100644 --- a/drivers/infiniband/hw/ehca/hcp_if.h +++ b/drivers/infiniband/hw/ehca/hcp_if.h | |||
@@ -85,6 +85,10 @@ u64 hipz_h_query_port(const struct ipz_adapter_handle adapter_handle, | |||
85 | const u8 port_id, | 85 | const u8 port_id, |
86 | struct hipz_query_port *query_port_response_block); | 86 | struct hipz_query_port *query_port_response_block); |
87 | 87 | ||
88 | u64 hipz_h_modify_port(const struct ipz_adapter_handle adapter_handle, | ||
89 | const u8 port_id, const u32 port_cap, | ||
90 | const u8 init_type, const int modify_mask); | ||
91 | |||
88 | u64 hipz_h_query_hca(const struct ipz_adapter_handle adapter_handle, | 92 | u64 hipz_h_query_hca(const struct ipz_adapter_handle adapter_handle, |
89 | struct hipz_query_hca *query_hca_rblock); | 93 | struct hipz_query_hca *query_hca_rblock); |
90 | 94 | ||
diff --git a/drivers/infiniband/hw/ipath/ipath_common.h b/drivers/infiniband/hw/ipath/ipath_common.h index 54139d398181..10c008f22ba6 100644 --- a/drivers/infiniband/hw/ipath/ipath_common.h +++ b/drivers/infiniband/hw/ipath/ipath_common.h | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2006 QLogic, Inc. All rights reserved. | 2 | * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved. |
3 | * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. | 3 | * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. |
4 | * | 4 | * |
5 | * This software is available to you under a choice of one of two | 5 | * This software is available to you under a choice of one of two |
@@ -78,6 +78,8 @@ | |||
78 | #define IPATH_IB_LINKINIT 3 | 78 | #define IPATH_IB_LINKINIT 3 |
79 | #define IPATH_IB_LINKDOWN_SLEEP 4 | 79 | #define IPATH_IB_LINKDOWN_SLEEP 4 |
80 | #define IPATH_IB_LINKDOWN_DISABLE 5 | 80 | #define IPATH_IB_LINKDOWN_DISABLE 5 |
81 | #define IPATH_IB_LINK_LOOPBACK 6 /* enable local loopback */ | ||
82 | #define IPATH_IB_LINK_EXTERNAL 7 /* normal, disable local loopback */ | ||
81 | 83 | ||
82 | /* | 84 | /* |
83 | * stats maintained by the driver. For now, at least, this is global | 85 | * stats maintained by the driver. For now, at least, this is global |
@@ -316,11 +318,17 @@ struct ipath_base_info { | |||
316 | /* address of readonly memory copy of the rcvhdrq tail register. */ | 318 | /* address of readonly memory copy of the rcvhdrq tail register. */ |
317 | __u64 spi_rcvhdr_tailaddr; | 319 | __u64 spi_rcvhdr_tailaddr; |
318 | 320 | ||
319 | /* shared memory pages for subports if IPATH_RUNTIME_MASTER is set */ | 321 | /* shared memory pages for subports if port is shared */ |
320 | __u64 spi_subport_uregbase; | 322 | __u64 spi_subport_uregbase; |
321 | __u64 spi_subport_rcvegrbuf; | 323 | __u64 spi_subport_rcvegrbuf; |
322 | __u64 spi_subport_rcvhdr_base; | 324 | __u64 spi_subport_rcvhdr_base; |
323 | 325 | ||
326 | /* shared memory page for hardware port if it is shared */ | ||
327 | __u64 spi_port_uregbase; | ||
328 | __u64 spi_port_rcvegrbuf; | ||
329 | __u64 spi_port_rcvhdr_base; | ||
330 | __u64 spi_port_rcvhdr_tailaddr; | ||
331 | |||
324 | } __attribute__ ((aligned(8))); | 332 | } __attribute__ ((aligned(8))); |
325 | 333 | ||
326 | 334 | ||
@@ -344,7 +352,7 @@ struct ipath_base_info { | |||
344 | * may not be implemented; the user code must deal with this if it | 352 | * may not be implemented; the user code must deal with this if it |
345 | * cares, or it must abort after initialization reports the difference. | 353 | * cares, or it must abort after initialization reports the difference. |
346 | */ | 354 | */ |
347 | #define IPATH_USER_SWMINOR 3 | 355 | #define IPATH_USER_SWMINOR 5 |
348 | 356 | ||
349 | #define IPATH_USER_SWVERSION ((IPATH_USER_SWMAJOR<<16) | IPATH_USER_SWMINOR) | 357 | #define IPATH_USER_SWVERSION ((IPATH_USER_SWMAJOR<<16) | IPATH_USER_SWMINOR) |
350 | 358 | ||
@@ -418,11 +426,14 @@ struct ipath_user_info { | |||
418 | #define IPATH_CMD_TID_UPDATE 19 /* update expected TID entries */ | 426 | #define IPATH_CMD_TID_UPDATE 19 /* update expected TID entries */ |
419 | #define IPATH_CMD_TID_FREE 20 /* free expected TID entries */ | 427 | #define IPATH_CMD_TID_FREE 20 /* free expected TID entries */ |
420 | #define IPATH_CMD_SET_PART_KEY 21 /* add partition key */ | 428 | #define IPATH_CMD_SET_PART_KEY 21 /* add partition key */ |
421 | #define IPATH_CMD_SLAVE_INFO 22 /* return info on slave processes */ | 429 | #define __IPATH_CMD_SLAVE_INFO 22 /* return info on slave processes (for old user code) */ |
422 | #define IPATH_CMD_ASSIGN_PORT 23 /* allocate HCA and port */ | 430 | #define IPATH_CMD_ASSIGN_PORT 23 /* allocate HCA and port */ |
423 | #define IPATH_CMD_USER_INIT 24 /* set up userspace */ | 431 | #define IPATH_CMD_USER_INIT 24 /* set up userspace */ |
432 | #define IPATH_CMD_UNUSED_1 25 | ||
433 | #define IPATH_CMD_UNUSED_2 26 | ||
434 | #define IPATH_CMD_PIOAVAILUPD 27 /* force an update of PIOAvail reg */ | ||
424 | 435 | ||
425 | #define IPATH_CMD_MAX 24 | 436 | #define IPATH_CMD_MAX 27 |
426 | 437 | ||
427 | struct ipath_port_info { | 438 | struct ipath_port_info { |
428 | __u32 num_active; /* number of active units */ | 439 | __u32 num_active; /* number of active units */ |
@@ -430,7 +441,7 @@ struct ipath_port_info { | |||
430 | __u16 port; /* port on unit assigned to caller */ | 441 | __u16 port; /* port on unit assigned to caller */ |
431 | __u16 subport; /* subport on unit assigned to caller */ | 442 | __u16 subport; /* subport on unit assigned to caller */ |
432 | __u16 num_ports; /* number of ports available on unit */ | 443 | __u16 num_ports; /* number of ports available on unit */ |
433 | __u16 num_subports; /* number of subport slaves opened on port */ | 444 | __u16 num_subports; /* number of subports opened on port */ |
434 | }; | 445 | }; |
435 | 446 | ||
436 | struct ipath_tid_info { | 447 | struct ipath_tid_info { |
diff --git a/drivers/infiniband/hw/ipath/ipath_cq.c b/drivers/infiniband/hw/ipath/ipath_cq.c index 87462e0cb4d2..ea78e6dddc90 100644 --- a/drivers/infiniband/hw/ipath/ipath_cq.c +++ b/drivers/infiniband/hw/ipath/ipath_cq.c | |||
@@ -76,7 +76,20 @@ void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int solicited) | |||
76 | } | 76 | } |
77 | return; | 77 | return; |
78 | } | 78 | } |
79 | wc->queue[head] = *entry; | 79 | wc->queue[head].wr_id = entry->wr_id; |
80 | wc->queue[head].status = entry->status; | ||
81 | wc->queue[head].opcode = entry->opcode; | ||
82 | wc->queue[head].vendor_err = entry->vendor_err; | ||
83 | wc->queue[head].byte_len = entry->byte_len; | ||
84 | wc->queue[head].imm_data = (__u32 __force)entry->imm_data; | ||
85 | wc->queue[head].qp_num = entry->qp->qp_num; | ||
86 | wc->queue[head].src_qp = entry->src_qp; | ||
87 | wc->queue[head].wc_flags = entry->wc_flags; | ||
88 | wc->queue[head].pkey_index = entry->pkey_index; | ||
89 | wc->queue[head].slid = entry->slid; | ||
90 | wc->queue[head].sl = entry->sl; | ||
91 | wc->queue[head].dlid_path_bits = entry->dlid_path_bits; | ||
92 | wc->queue[head].port_num = entry->port_num; | ||
80 | wc->head = next; | 93 | wc->head = next; |
81 | 94 | ||
82 | if (cq->notify == IB_CQ_NEXT_COMP || | 95 | if (cq->notify == IB_CQ_NEXT_COMP || |
@@ -122,9 +135,30 @@ int ipath_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry) | |||
122 | if (tail > (u32) cq->ibcq.cqe) | 135 | if (tail > (u32) cq->ibcq.cqe) |
123 | tail = (u32) cq->ibcq.cqe; | 136 | tail = (u32) cq->ibcq.cqe; |
124 | for (npolled = 0; npolled < num_entries; ++npolled, ++entry) { | 137 | for (npolled = 0; npolled < num_entries; ++npolled, ++entry) { |
138 | struct ipath_qp *qp; | ||
139 | |||
125 | if (tail == wc->head) | 140 | if (tail == wc->head) |
126 | break; | 141 | break; |
127 | *entry = wc->queue[tail]; | 142 | |
143 | qp = ipath_lookup_qpn(&to_idev(cq->ibcq.device)->qp_table, | ||
144 | wc->queue[tail].qp_num); | ||
145 | entry->qp = &qp->ibqp; | ||
146 | if (atomic_dec_and_test(&qp->refcount)) | ||
147 | wake_up(&qp->wait); | ||
148 | |||
149 | entry->wr_id = wc->queue[tail].wr_id; | ||
150 | entry->status = wc->queue[tail].status; | ||
151 | entry->opcode = wc->queue[tail].opcode; | ||
152 | entry->vendor_err = wc->queue[tail].vendor_err; | ||
153 | entry->byte_len = wc->queue[tail].byte_len; | ||
154 | entry->imm_data = wc->queue[tail].imm_data; | ||
155 | entry->src_qp = wc->queue[tail].src_qp; | ||
156 | entry->wc_flags = wc->queue[tail].wc_flags; | ||
157 | entry->pkey_index = wc->queue[tail].pkey_index; | ||
158 | entry->slid = wc->queue[tail].slid; | ||
159 | entry->sl = wc->queue[tail].sl; | ||
160 | entry->dlid_path_bits = wc->queue[tail].dlid_path_bits; | ||
161 | entry->port_num = wc->queue[tail].port_num; | ||
128 | if (tail >= cq->ibcq.cqe) | 162 | if (tail >= cq->ibcq.cqe) |
129 | tail = 0; | 163 | tail = 0; |
130 | else | 164 | else |
diff --git a/drivers/infiniband/hw/ipath/ipath_debug.h b/drivers/infiniband/hw/ipath/ipath_debug.h index df69f0d80b8b..42bfbdb0d3e6 100644 --- a/drivers/infiniband/hw/ipath/ipath_debug.h +++ b/drivers/infiniband/hw/ipath/ipath_debug.h | |||
@@ -57,6 +57,7 @@ | |||
57 | #define __IPATH_PROCDBG 0x100 | 57 | #define __IPATH_PROCDBG 0x100 |
58 | /* print mmap/nopage stuff, not using VDBG any more */ | 58 | /* print mmap/nopage stuff, not using VDBG any more */ |
59 | #define __IPATH_MMDBG 0x200 | 59 | #define __IPATH_MMDBG 0x200 |
60 | #define __IPATH_ERRPKTDBG 0x400 | ||
60 | #define __IPATH_USER_SEND 0x1000 /* use user mode send */ | 61 | #define __IPATH_USER_SEND 0x1000 /* use user mode send */ |
61 | #define __IPATH_KERNEL_SEND 0x2000 /* use kernel mode send */ | 62 | #define __IPATH_KERNEL_SEND 0x2000 /* use kernel mode send */ |
62 | #define __IPATH_EPKTDBG 0x4000 /* print ethernet packet data */ | 63 | #define __IPATH_EPKTDBG 0x4000 /* print ethernet packet data */ |
diff --git a/drivers/infiniband/hw/ipath/ipath_diag.c b/drivers/infiniband/hw/ipath/ipath_diag.c index 0f13a2182cc7..63e8368b0e95 100644 --- a/drivers/infiniband/hw/ipath/ipath_diag.c +++ b/drivers/infiniband/hw/ipath/ipath_diag.c | |||
@@ -296,7 +296,7 @@ static int ipath_diag_open(struct inode *in, struct file *fp) | |||
296 | } | 296 | } |
297 | 297 | ||
298 | fp->private_data = dd; | 298 | fp->private_data = dd; |
299 | ipath_diag_inuse = 1; | 299 | ipath_diag_inuse = -2; |
300 | diag_set_link = 0; | 300 | diag_set_link = 0; |
301 | ret = 0; | 301 | ret = 0; |
302 | 302 | ||
@@ -461,6 +461,8 @@ static ssize_t ipath_diag_read(struct file *fp, char __user *data, | |||
461 | else if ((count % 4) || (*off % 4)) | 461 | else if ((count % 4) || (*off % 4)) |
462 | /* address or length is not 32-bit aligned, hence invalid */ | 462 | /* address or length is not 32-bit aligned, hence invalid */ |
463 | ret = -EINVAL; | 463 | ret = -EINVAL; |
464 | else if (ipath_diag_inuse < 1 && (*off || count != 8)) | ||
465 | ret = -EINVAL; /* prevent cat /dev/ipath_diag* */ | ||
464 | else if ((count % 8) || (*off % 8)) | 466 | else if ((count % 8) || (*off % 8)) |
465 | /* address or length not 64-bit aligned; do 32-bit reads */ | 467 | /* address or length not 64-bit aligned; do 32-bit reads */ |
466 | ret = ipath_read_umem32(dd, data, kreg_base + *off, count); | 468 | ret = ipath_read_umem32(dd, data, kreg_base + *off, count); |
@@ -470,6 +472,8 @@ static ssize_t ipath_diag_read(struct file *fp, char __user *data, | |||
470 | if (ret >= 0) { | 472 | if (ret >= 0) { |
471 | *off += count; | 473 | *off += count; |
472 | ret = count; | 474 | ret = count; |
475 | if (ipath_diag_inuse == -2) | ||
476 | ipath_diag_inuse++; | ||
473 | } | 477 | } |
474 | 478 | ||
475 | return ret; | 479 | return ret; |
@@ -489,6 +493,9 @@ static ssize_t ipath_diag_write(struct file *fp, const char __user *data, | |||
489 | else if ((count % 4) || (*off % 4)) | 493 | else if ((count % 4) || (*off % 4)) |
490 | /* address or length is not 32-bit aligned, hence invalid */ | 494 | /* address or length is not 32-bit aligned, hence invalid */ |
491 | ret = -EINVAL; | 495 | ret = -EINVAL; |
496 | else if ((ipath_diag_inuse == -1 && (*off || count != 8)) || | ||
497 | ipath_diag_inuse == -2) /* read qw off 0, write qw off 0 */ | ||
498 | ret = -EINVAL; /* before any other write allowed */ | ||
492 | else if ((count % 8) || (*off % 8)) | 499 | else if ((count % 8) || (*off % 8)) |
493 | /* address or length not 64-bit aligned; do 32-bit writes */ | 500 | /* address or length not 64-bit aligned; do 32-bit writes */ |
494 | ret = ipath_write_umem32(dd, kreg_base + *off, data, count); | 501 | ret = ipath_write_umem32(dd, kreg_base + *off, data, count); |
@@ -498,6 +505,8 @@ static ssize_t ipath_diag_write(struct file *fp, const char __user *data, | |||
498 | if (ret >= 0) { | 505 | if (ret >= 0) { |
499 | *off += count; | 506 | *off += count; |
500 | ret = count; | 507 | ret = count; |
508 | if (ipath_diag_inuse == -1) | ||
509 | ipath_diag_inuse = 1; /* all read/write OK now */ | ||
501 | } | 510 | } |
502 | 511 | ||
503 | return ret; | 512 | return ret; |
diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c index ae7f21a0cdc0..e3a223209710 100644 --- a/drivers/infiniband/hw/ipath/ipath_driver.c +++ b/drivers/infiniband/hw/ipath/ipath_driver.c | |||
@@ -390,15 +390,23 @@ static int __devinit ipath_init_one(struct pci_dev *pdev, | |||
390 | 390 | ||
391 | /* setup the chip-specific functions, as early as possible. */ | 391 | /* setup the chip-specific functions, as early as possible. */ |
392 | switch (ent->device) { | 392 | switch (ent->device) { |
393 | #ifdef CONFIG_HT_IRQ | ||
394 | case PCI_DEVICE_ID_INFINIPATH_HT: | 393 | case PCI_DEVICE_ID_INFINIPATH_HT: |
394 | #ifdef CONFIG_HT_IRQ | ||
395 | ipath_init_iba6110_funcs(dd); | 395 | ipath_init_iba6110_funcs(dd); |
396 | break; | 396 | break; |
397 | #else | ||
398 | ipath_dev_err(dd, "QLogic HT device 0x%x cannot work if " | ||
399 | "CONFIG_HT_IRQ is not enabled\n", ent->device); | ||
400 | return -ENODEV; | ||
397 | #endif | 401 | #endif |
398 | #ifdef CONFIG_PCI_MSI | ||
399 | case PCI_DEVICE_ID_INFINIPATH_PE800: | 402 | case PCI_DEVICE_ID_INFINIPATH_PE800: |
403 | #ifdef CONFIG_PCI_MSI | ||
400 | ipath_init_iba6120_funcs(dd); | 404 | ipath_init_iba6120_funcs(dd); |
401 | break; | 405 | break; |
406 | #else | ||
407 | ipath_dev_err(dd, "QLogic PCIE device 0x%x cannot work if " | ||
408 | "CONFIG_PCI_MSI is not enabled\n", ent->device); | ||
409 | return -ENODEV; | ||
402 | #endif | 410 | #endif |
403 | default: | 411 | default: |
404 | ipath_dev_err(dd, "Found unknown QLogic deviceid 0x%x, " | 412 | ipath_dev_err(dd, "Found unknown QLogic deviceid 0x%x, " |
@@ -486,7 +494,7 @@ static int __devinit ipath_init_one(struct pci_dev *pdev, | |||
486 | 494 | ||
487 | ret = ipath_init_chip(dd, 0); /* do the chip-specific init */ | 495 | ret = ipath_init_chip(dd, 0); /* do the chip-specific init */ |
488 | if (ret) | 496 | if (ret) |
489 | goto bail_iounmap; | 497 | goto bail_irqsetup; |
490 | 498 | ||
491 | ret = ipath_enable_wc(dd); | 499 | ret = ipath_enable_wc(dd); |
492 | 500 | ||
@@ -505,6 +513,9 @@ static int __devinit ipath_init_one(struct pci_dev *pdev, | |||
505 | 513 | ||
506 | goto bail; | 514 | goto bail; |
507 | 515 | ||
516 | bail_irqsetup: | ||
517 | if (pdev->irq) free_irq(pdev->irq, dd); | ||
518 | |||
508 | bail_iounmap: | 519 | bail_iounmap: |
509 | iounmap((volatile void __iomem *) dd->ipath_kregbase); | 520 | iounmap((volatile void __iomem *) dd->ipath_kregbase); |
510 | 521 | ||
@@ -525,8 +536,6 @@ static void __devexit cleanup_device(struct ipath_devdata *dd) | |||
525 | { | 536 | { |
526 | int port; | 537 | int port; |
527 | 538 | ||
528 | ipath_shutdown_device(dd); | ||
529 | |||
530 | if (*dd->ipath_statusp & IPATH_STATUS_CHIP_PRESENT) { | 539 | if (*dd->ipath_statusp & IPATH_STATUS_CHIP_PRESENT) { |
531 | /* can't do anything more with chip; needs re-init */ | 540 | /* can't do anything more with chip; needs re-init */ |
532 | *dd->ipath_statusp &= ~IPATH_STATUS_CHIP_PRESENT; | 541 | *dd->ipath_statusp &= ~IPATH_STATUS_CHIP_PRESENT; |
@@ -594,8 +603,9 @@ static void __devexit cleanup_device(struct ipath_devdata *dd) | |||
594 | 603 | ||
595 | ipath_cdbg(VERBOSE, "Free shadow page tid array at %p\n", | 604 | ipath_cdbg(VERBOSE, "Free shadow page tid array at %p\n", |
596 | dd->ipath_pageshadow); | 605 | dd->ipath_pageshadow); |
597 | vfree(dd->ipath_pageshadow); | 606 | tmpp = dd->ipath_pageshadow; |
598 | dd->ipath_pageshadow = NULL; | 607 | dd->ipath_pageshadow = NULL; |
608 | vfree(tmpp); | ||
599 | } | 609 | } |
600 | 610 | ||
601 | /* | 611 | /* |
@@ -622,6 +632,12 @@ static void __devexit ipath_remove_one(struct pci_dev *pdev) | |||
622 | 632 | ||
623 | ipath_cdbg(VERBOSE, "removing, pdev=%p, dd=%p\n", pdev, dd); | 633 | ipath_cdbg(VERBOSE, "removing, pdev=%p, dd=%p\n", pdev, dd); |
624 | 634 | ||
635 | /* | ||
636 | * disable the IB link early, to be sure no new packets arrive, which | ||
637 | * complicates the shutdown process | ||
638 | */ | ||
639 | ipath_shutdown_device(dd); | ||
640 | |||
625 | if (dd->verbs_dev) | 641 | if (dd->verbs_dev) |
626 | ipath_unregister_ib_device(dd->verbs_dev); | 642 | ipath_unregister_ib_device(dd->verbs_dev); |
627 | 643 | ||
@@ -754,9 +770,42 @@ static int ipath_wait_linkstate(struct ipath_devdata *dd, u32 state, | |||
754 | return (dd->ipath_flags & state) ? 0 : -ETIMEDOUT; | 770 | return (dd->ipath_flags & state) ? 0 : -ETIMEDOUT; |
755 | } | 771 | } |
756 | 772 | ||
757 | void ipath_decode_err(char *buf, size_t blen, ipath_err_t err) | 773 | /* |
774 | * Decode the error status into strings, deciding whether to always | ||
775 | * print * it or not depending on "normal packet errors" vs everything | ||
776 | * else. Return 1 if "real" errors, otherwise 0 if only packet | ||
777 | * errors, so caller can decide what to print with the string. | ||
778 | */ | ||
779 | int ipath_decode_err(char *buf, size_t blen, ipath_err_t err) | ||
758 | { | 780 | { |
781 | int iserr = 1; | ||
759 | *buf = '\0'; | 782 | *buf = '\0'; |
783 | if (err & INFINIPATH_E_PKTERRS) { | ||
784 | if (!(err & ~INFINIPATH_E_PKTERRS)) | ||
785 | iserr = 0; // if only packet errors. | ||
786 | if (ipath_debug & __IPATH_ERRPKTDBG) { | ||
787 | if (err & INFINIPATH_E_REBP) | ||
788 | strlcat(buf, "EBP ", blen); | ||
789 | if (err & INFINIPATH_E_RVCRC) | ||
790 | strlcat(buf, "VCRC ", blen); | ||
791 | if (err & INFINIPATH_E_RICRC) { | ||
792 | strlcat(buf, "CRC ", blen); | ||
793 | // clear for check below, so only once | ||
794 | err &= INFINIPATH_E_RICRC; | ||
795 | } | ||
796 | if (err & INFINIPATH_E_RSHORTPKTLEN) | ||
797 | strlcat(buf, "rshortpktlen ", blen); | ||
798 | if (err & INFINIPATH_E_SDROPPEDDATAPKT) | ||
799 | strlcat(buf, "sdroppeddatapkt ", blen); | ||
800 | if (err & INFINIPATH_E_SPKTLEN) | ||
801 | strlcat(buf, "spktlen ", blen); | ||
802 | } | ||
803 | if ((err & INFINIPATH_E_RICRC) && | ||
804 | !(err&(INFINIPATH_E_RVCRC|INFINIPATH_E_REBP))) | ||
805 | strlcat(buf, "CRC ", blen); | ||
806 | if (!iserr) | ||
807 | goto done; | ||
808 | } | ||
760 | if (err & INFINIPATH_E_RHDRLEN) | 809 | if (err & INFINIPATH_E_RHDRLEN) |
761 | strlcat(buf, "rhdrlen ", blen); | 810 | strlcat(buf, "rhdrlen ", blen); |
762 | if (err & INFINIPATH_E_RBADTID) | 811 | if (err & INFINIPATH_E_RBADTID) |
@@ -767,12 +816,12 @@ void ipath_decode_err(char *buf, size_t blen, ipath_err_t err) | |||
767 | strlcat(buf, "rhdr ", blen); | 816 | strlcat(buf, "rhdr ", blen); |
768 | if (err & INFINIPATH_E_RLONGPKTLEN) | 817 | if (err & INFINIPATH_E_RLONGPKTLEN) |
769 | strlcat(buf, "rlongpktlen ", blen); | 818 | strlcat(buf, "rlongpktlen ", blen); |
770 | if (err & INFINIPATH_E_RSHORTPKTLEN) | ||
771 | strlcat(buf, "rshortpktlen ", blen); | ||
772 | if (err & INFINIPATH_E_RMAXPKTLEN) | 819 | if (err & INFINIPATH_E_RMAXPKTLEN) |
773 | strlcat(buf, "rmaxpktlen ", blen); | 820 | strlcat(buf, "rmaxpktlen ", blen); |
774 | if (err & INFINIPATH_E_RMINPKTLEN) | 821 | if (err & INFINIPATH_E_RMINPKTLEN) |
775 | strlcat(buf, "rminpktlen ", blen); | 822 | strlcat(buf, "rminpktlen ", blen); |
823 | if (err & INFINIPATH_E_SMINPKTLEN) | ||
824 | strlcat(buf, "sminpktlen ", blen); | ||
776 | if (err & INFINIPATH_E_RFORMATERR) | 825 | if (err & INFINIPATH_E_RFORMATERR) |
777 | strlcat(buf, "rformaterr ", blen); | 826 | strlcat(buf, "rformaterr ", blen); |
778 | if (err & INFINIPATH_E_RUNSUPVL) | 827 | if (err & INFINIPATH_E_RUNSUPVL) |
@@ -781,32 +830,20 @@ void ipath_decode_err(char *buf, size_t blen, ipath_err_t err) | |||
781 | strlcat(buf, "runexpchar ", blen); | 830 | strlcat(buf, "runexpchar ", blen); |
782 | if (err & INFINIPATH_E_RIBFLOW) | 831 | if (err & INFINIPATH_E_RIBFLOW) |
783 | strlcat(buf, "ribflow ", blen); | 832 | strlcat(buf, "ribflow ", blen); |
784 | if (err & INFINIPATH_E_REBP) | ||
785 | strlcat(buf, "EBP ", blen); | ||
786 | if (err & INFINIPATH_E_SUNDERRUN) | 833 | if (err & INFINIPATH_E_SUNDERRUN) |
787 | strlcat(buf, "sunderrun ", blen); | 834 | strlcat(buf, "sunderrun ", blen); |
788 | if (err & INFINIPATH_E_SPIOARMLAUNCH) | 835 | if (err & INFINIPATH_E_SPIOARMLAUNCH) |
789 | strlcat(buf, "spioarmlaunch ", blen); | 836 | strlcat(buf, "spioarmlaunch ", blen); |
790 | if (err & INFINIPATH_E_SUNEXPERRPKTNUM) | 837 | if (err & INFINIPATH_E_SUNEXPERRPKTNUM) |
791 | strlcat(buf, "sunexperrpktnum ", blen); | 838 | strlcat(buf, "sunexperrpktnum ", blen); |
792 | if (err & INFINIPATH_E_SDROPPEDDATAPKT) | ||
793 | strlcat(buf, "sdroppeddatapkt ", blen); | ||
794 | if (err & INFINIPATH_E_SDROPPEDSMPPKT) | 839 | if (err & INFINIPATH_E_SDROPPEDSMPPKT) |
795 | strlcat(buf, "sdroppedsmppkt ", blen); | 840 | strlcat(buf, "sdroppedsmppkt ", blen); |
796 | if (err & INFINIPATH_E_SMAXPKTLEN) | 841 | if (err & INFINIPATH_E_SMAXPKTLEN) |
797 | strlcat(buf, "smaxpktlen ", blen); | 842 | strlcat(buf, "smaxpktlen ", blen); |
798 | if (err & INFINIPATH_E_SMINPKTLEN) | ||
799 | strlcat(buf, "sminpktlen ", blen); | ||
800 | if (err & INFINIPATH_E_SUNSUPVL) | 843 | if (err & INFINIPATH_E_SUNSUPVL) |
801 | strlcat(buf, "sunsupVL ", blen); | 844 | strlcat(buf, "sunsupVL ", blen); |
802 | if (err & INFINIPATH_E_SPKTLEN) | ||
803 | strlcat(buf, "spktlen ", blen); | ||
804 | if (err & INFINIPATH_E_INVALIDADDR) | 845 | if (err & INFINIPATH_E_INVALIDADDR) |
805 | strlcat(buf, "invalidaddr ", blen); | 846 | strlcat(buf, "invalidaddr ", blen); |
806 | if (err & INFINIPATH_E_RICRC) | ||
807 | strlcat(buf, "CRC ", blen); | ||
808 | if (err & INFINIPATH_E_RVCRC) | ||
809 | strlcat(buf, "VCRC ", blen); | ||
810 | if (err & INFINIPATH_E_RRCVEGRFULL) | 847 | if (err & INFINIPATH_E_RRCVEGRFULL) |
811 | strlcat(buf, "rcvegrfull ", blen); | 848 | strlcat(buf, "rcvegrfull ", blen); |
812 | if (err & INFINIPATH_E_RRCVHDRFULL) | 849 | if (err & INFINIPATH_E_RRCVHDRFULL) |
@@ -819,6 +856,8 @@ void ipath_decode_err(char *buf, size_t blen, ipath_err_t err) | |||
819 | strlcat(buf, "hardware ", blen); | 856 | strlcat(buf, "hardware ", blen); |
820 | if (err & INFINIPATH_E_RESET) | 857 | if (err & INFINIPATH_E_RESET) |
821 | strlcat(buf, "reset ", blen); | 858 | strlcat(buf, "reset ", blen); |
859 | done: | ||
860 | return iserr; | ||
822 | } | 861 | } |
823 | 862 | ||
824 | /** | 863 | /** |
@@ -1662,6 +1701,22 @@ int ipath_set_linkstate(struct ipath_devdata *dd, u8 newstate) | |||
1662 | lstate = IPATH_LINKACTIVE; | 1701 | lstate = IPATH_LINKACTIVE; |
1663 | break; | 1702 | break; |
1664 | 1703 | ||
1704 | case IPATH_IB_LINK_LOOPBACK: | ||
1705 | dev_info(&dd->pcidev->dev, "Enabling IB local loopback\n"); | ||
1706 | dd->ipath_ibcctrl |= INFINIPATH_IBCC_LOOPBACK; | ||
1707 | ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl, | ||
1708 | dd->ipath_ibcctrl); | ||
1709 | ret = 0; | ||
1710 | goto bail; // no state change to wait for | ||
1711 | |||
1712 | case IPATH_IB_LINK_EXTERNAL: | ||
1713 | dev_info(&dd->pcidev->dev, "Disabling IB local loopback (normal)\n"); | ||
1714 | dd->ipath_ibcctrl &= ~INFINIPATH_IBCC_LOOPBACK; | ||
1715 | ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl, | ||
1716 | dd->ipath_ibcctrl); | ||
1717 | ret = 0; | ||
1718 | goto bail; // no state change to wait for | ||
1719 | |||
1665 | default: | 1720 | default: |
1666 | ipath_dbg("Invalid linkstate 0x%x requested\n", newstate); | 1721 | ipath_dbg("Invalid linkstate 0x%x requested\n", newstate); |
1667 | ret = -EINVAL; | 1722 | ret = -EINVAL; |
@@ -1765,29 +1820,6 @@ int ipath_set_lid(struct ipath_devdata *dd, u32 arg, u8 lmc) | |||
1765 | return 0; | 1820 | return 0; |
1766 | } | 1821 | } |
1767 | 1822 | ||
1768 | /** | ||
1769 | * ipath_read_kreg64_port - read a device's per-port 64-bit kernel register | ||
1770 | * @dd: the infinipath device | ||
1771 | * @regno: the register number to read | ||
1772 | * @port: the port containing the register | ||
1773 | * | ||
1774 | * Registers that vary with the chip implementation constants (port) | ||
1775 | * use this routine. | ||
1776 | */ | ||
1777 | u64 ipath_read_kreg64_port(const struct ipath_devdata *dd, ipath_kreg regno, | ||
1778 | unsigned port) | ||
1779 | { | ||
1780 | u16 where; | ||
1781 | |||
1782 | if (port < dd->ipath_portcnt && | ||
1783 | (regno == dd->ipath_kregs->kr_rcvhdraddr || | ||
1784 | regno == dd->ipath_kregs->kr_rcvhdrtailaddr)) | ||
1785 | where = regno + port; | ||
1786 | else | ||
1787 | where = -1; | ||
1788 | |||
1789 | return ipath_read_kreg64(dd, where); | ||
1790 | } | ||
1791 | 1823 | ||
1792 | /** | 1824 | /** |
1793 | * ipath_write_kreg_port - write a device's per-port 64-bit kernel register | 1825 | * ipath_write_kreg_port - write a device's per-port 64-bit kernel register |
@@ -1973,7 +2005,8 @@ static int __init infinipath_init(void) | |||
1973 | { | 2005 | { |
1974 | int ret; | 2006 | int ret; |
1975 | 2007 | ||
1976 | ipath_dbg(KERN_INFO DRIVER_LOAD_MSG "%s", ib_ipath_version); | 2008 | if (ipath_debug & __IPATH_DBG) |
2009 | printk(KERN_INFO DRIVER_LOAD_MSG "%s", ib_ipath_version); | ||
1977 | 2010 | ||
1978 | /* | 2011 | /* |
1979 | * These must be called before the driver is registered with | 2012 | * These must be called before the driver is registered with |
diff --git a/drivers/infiniband/hw/ipath/ipath_eeprom.c b/drivers/infiniband/hw/ipath/ipath_eeprom.c index a4019a6b7560..030185f90ee2 100644 --- a/drivers/infiniband/hw/ipath/ipath_eeprom.c +++ b/drivers/infiniband/hw/ipath/ipath_eeprom.c | |||
@@ -626,6 +626,10 @@ void ipath_get_eeprom_info(struct ipath_devdata *dd) | |||
626 | } else | 626 | } else |
627 | memcpy(dd->ipath_serial, ifp->if_serial, | 627 | memcpy(dd->ipath_serial, ifp->if_serial, |
628 | sizeof ifp->if_serial); | 628 | sizeof ifp->if_serial); |
629 | if (!strstr(ifp->if_comment, "Tested successfully")) | ||
630 | ipath_dev_err(dd, "Board SN %s did not pass functional " | ||
631 | "test: %s\n", dd->ipath_serial, | ||
632 | ifp->if_comment); | ||
629 | 633 | ||
630 | ipath_cdbg(VERBOSE, "Initted GUID to %llx from eeprom\n", | 634 | ipath_cdbg(VERBOSE, "Initted GUID to %llx from eeprom\n", |
631 | (unsigned long long) be64_to_cpu(dd->ipath_guid)); | 635 | (unsigned long long) be64_to_cpu(dd->ipath_guid)); |
diff --git a/drivers/infiniband/hw/ipath/ipath_file_ops.c b/drivers/infiniband/hw/ipath/ipath_file_ops.c index 5d64ff875297..1272aaf2a785 100644 --- a/drivers/infiniband/hw/ipath/ipath_file_ops.c +++ b/drivers/infiniband/hw/ipath/ipath_file_ops.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2006 QLogic, Inc. All rights reserved. | 2 | * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved. |
3 | * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. | 3 | * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. |
4 | * | 4 | * |
5 | * This software is available to you under a choice of one of two | 5 | * This software is available to you under a choice of one of two |
@@ -41,12 +41,6 @@ | |||
41 | #include "ipath_kernel.h" | 41 | #include "ipath_kernel.h" |
42 | #include "ipath_common.h" | 42 | #include "ipath_common.h" |
43 | 43 | ||
44 | /* | ||
45 | * mmap64 doesn't allow all 64 bits for 32-bit applications | ||
46 | * so only use the low 43 bits. | ||
47 | */ | ||
48 | #define MMAP64_MASK 0x7FFFFFFFFFFUL | ||
49 | |||
50 | static int ipath_open(struct inode *, struct file *); | 44 | static int ipath_open(struct inode *, struct file *); |
51 | static int ipath_close(struct inode *, struct file *); | 45 | static int ipath_close(struct inode *, struct file *); |
52 | static ssize_t ipath_write(struct file *, const char __user *, size_t, | 46 | static ssize_t ipath_write(struct file *, const char __user *, size_t, |
@@ -63,6 +57,24 @@ static const struct file_operations ipath_file_ops = { | |||
63 | .mmap = ipath_mmap | 57 | .mmap = ipath_mmap |
64 | }; | 58 | }; |
65 | 59 | ||
60 | /* | ||
61 | * Convert kernel virtual addresses to physical addresses so they don't | ||
62 | * potentially conflict with the chip addresses used as mmap offsets. | ||
63 | * It doesn't really matter what mmap offset we use as long as we can | ||
64 | * interpret it correctly. | ||
65 | */ | ||
66 | static u64 cvt_kvaddr(void *p) | ||
67 | { | ||
68 | struct page *page; | ||
69 | u64 paddr = 0; | ||
70 | |||
71 | page = vmalloc_to_page(p); | ||
72 | if (page) | ||
73 | paddr = page_to_pfn(page) << PAGE_SHIFT; | ||
74 | |||
75 | return paddr; | ||
76 | } | ||
77 | |||
66 | static int ipath_get_base_info(struct file *fp, | 78 | static int ipath_get_base_info(struct file *fp, |
67 | void __user *ubase, size_t ubase_size) | 79 | void __user *ubase, size_t ubase_size) |
68 | { | 80 | { |
@@ -87,7 +99,7 @@ static int ipath_get_base_info(struct file *fp, | |||
87 | sz = sizeof(*kinfo); | 99 | sz = sizeof(*kinfo); |
88 | /* If port sharing is not requested, allow the old size structure */ | 100 | /* If port sharing is not requested, allow the old size structure */ |
89 | if (!shared) | 101 | if (!shared) |
90 | sz -= 3 * sizeof(u64); | 102 | sz -= 7 * sizeof(u64); |
91 | if (ubase_size < sz) { | 103 | if (ubase_size < sz) { |
92 | ipath_cdbg(PROC, | 104 | ipath_cdbg(PROC, |
93 | "Base size %zu, need %zu (version mismatch?)\n", | 105 | "Base size %zu, need %zu (version mismatch?)\n", |
@@ -165,24 +177,41 @@ static int ipath_get_base_info(struct file *fp, | |||
165 | kinfo->spi_piobufbase = (u64) pd->port_piobufs + | 177 | kinfo->spi_piobufbase = (u64) pd->port_piobufs + |
166 | dd->ipath_palign * | 178 | dd->ipath_palign * |
167 | (dd->ipath_pbufsport - kinfo->spi_piocnt); | 179 | (dd->ipath_pbufsport - kinfo->spi_piocnt); |
168 | kinfo->__spi_uregbase = (u64) dd->ipath_uregbase + | ||
169 | dd->ipath_palign * pd->port_port; | ||
170 | } else { | 180 | } else { |
171 | unsigned slave = subport_fp(fp) - 1; | 181 | unsigned slave = subport_fp(fp) - 1; |
172 | 182 | ||
173 | kinfo->spi_piocnt = dd->ipath_pbufsport / subport_cnt; | 183 | kinfo->spi_piocnt = dd->ipath_pbufsport / subport_cnt; |
174 | kinfo->spi_piobufbase = (u64) pd->port_piobufs + | 184 | kinfo->spi_piobufbase = (u64) pd->port_piobufs + |
175 | dd->ipath_palign * kinfo->spi_piocnt * slave; | 185 | dd->ipath_palign * kinfo->spi_piocnt * slave; |
176 | kinfo->__spi_uregbase = ((u64) pd->subport_uregbase + | 186 | } |
177 | PAGE_SIZE * slave) & MMAP64_MASK; | 187 | if (shared) { |
188 | kinfo->spi_port_uregbase = (u64) dd->ipath_uregbase + | ||
189 | dd->ipath_palign * pd->port_port; | ||
190 | kinfo->spi_port_rcvegrbuf = kinfo->spi_rcv_egrbufs; | ||
191 | kinfo->spi_port_rcvhdr_base = kinfo->spi_rcvhdr_base; | ||
192 | kinfo->spi_port_rcvhdr_tailaddr = kinfo->spi_rcvhdr_tailaddr; | ||
178 | 193 | ||
179 | kinfo->spi_rcvhdr_base = ((u64) pd->subport_rcvhdr_base + | 194 | kinfo->__spi_uregbase = cvt_kvaddr(pd->subport_uregbase + |
180 | pd->port_rcvhdrq_size * slave) & MMAP64_MASK; | 195 | PAGE_SIZE * subport_fp(fp)); |
181 | kinfo->spi_rcvhdr_tailaddr = | 196 | |
182 | (u64) pd->port_rcvhdrqtailaddr_phys & MMAP64_MASK; | 197 | kinfo->spi_rcvhdr_base = cvt_kvaddr(pd->subport_rcvhdr_base + |
183 | kinfo->spi_rcv_egrbufs = ((u64) pd->subport_rcvegrbuf + | 198 | pd->port_rcvhdrq_size * subport_fp(fp)); |
184 | dd->ipath_rcvegrcnt * dd->ipath_rcvegrbufsize * slave) & | 199 | kinfo->spi_rcvhdr_tailaddr = 0; |
185 | MMAP64_MASK; | 200 | kinfo->spi_rcv_egrbufs = cvt_kvaddr(pd->subport_rcvegrbuf + |
201 | pd->port_rcvegrbuf_chunks * pd->port_rcvegrbuf_size * | ||
202 | subport_fp(fp)); | ||
203 | |||
204 | kinfo->spi_subport_uregbase = | ||
205 | cvt_kvaddr(pd->subport_uregbase); | ||
206 | kinfo->spi_subport_rcvegrbuf = | ||
207 | cvt_kvaddr(pd->subport_rcvegrbuf); | ||
208 | kinfo->spi_subport_rcvhdr_base = | ||
209 | cvt_kvaddr(pd->subport_rcvhdr_base); | ||
210 | ipath_cdbg(PROC, "port %u flags %x %llx %llx %llx\n", | ||
211 | kinfo->spi_port, kinfo->spi_runtime_flags, | ||
212 | (unsigned long long) kinfo->spi_subport_uregbase, | ||
213 | (unsigned long long) kinfo->spi_subport_rcvegrbuf, | ||
214 | (unsigned long long) kinfo->spi_subport_rcvhdr_base); | ||
186 | } | 215 | } |
187 | 216 | ||
188 | kinfo->spi_pioindex = (kinfo->spi_piobufbase - dd->ipath_piobufbase) / | 217 | kinfo->spi_pioindex = (kinfo->spi_piobufbase - dd->ipath_piobufbase) / |
@@ -199,20 +228,10 @@ static int ipath_get_base_info(struct file *fp, | |||
199 | 228 | ||
200 | if (master) { | 229 | if (master) { |
201 | kinfo->spi_runtime_flags |= IPATH_RUNTIME_MASTER; | 230 | kinfo->spi_runtime_flags |= IPATH_RUNTIME_MASTER; |
202 | kinfo->spi_subport_uregbase = | ||
203 | (u64) pd->subport_uregbase & MMAP64_MASK; | ||
204 | kinfo->spi_subport_rcvegrbuf = | ||
205 | (u64) pd->subport_rcvegrbuf & MMAP64_MASK; | ||
206 | kinfo->spi_subport_rcvhdr_base = | ||
207 | (u64) pd->subport_rcvhdr_base & MMAP64_MASK; | ||
208 | ipath_cdbg(PROC, "port %u flags %x %llx %llx %llx\n", | ||
209 | kinfo->spi_port, kinfo->spi_runtime_flags, | ||
210 | (unsigned long long) kinfo->spi_subport_uregbase, | ||
211 | (unsigned long long) kinfo->spi_subport_rcvegrbuf, | ||
212 | (unsigned long long) kinfo->spi_subport_rcvhdr_base); | ||
213 | } | 231 | } |
214 | 232 | ||
215 | if (copy_to_user(ubase, kinfo, sizeof(*kinfo))) | 233 | sz = (ubase_size < sizeof(*kinfo)) ? ubase_size : sizeof(*kinfo); |
234 | if (copy_to_user(ubase, kinfo, sz)) | ||
216 | ret = -EFAULT; | 235 | ret = -EFAULT; |
217 | 236 | ||
218 | bail: | 237 | bail: |
@@ -1132,67 +1151,55 @@ static int mmap_kvaddr(struct vm_area_struct *vma, u64 pgaddr, | |||
1132 | struct ipath_devdata *dd; | 1151 | struct ipath_devdata *dd; |
1133 | void *addr; | 1152 | void *addr; |
1134 | size_t size; | 1153 | size_t size; |
1135 | int ret; | 1154 | int ret = 0; |
1136 | 1155 | ||
1137 | /* If the port is not shared, all addresses should be physical */ | 1156 | /* If the port is not shared, all addresses should be physical */ |
1138 | if (!pd->port_subport_cnt) { | 1157 | if (!pd->port_subport_cnt) |
1139 | ret = -EINVAL; | ||
1140 | goto bail; | 1158 | goto bail; |
1141 | } | ||
1142 | 1159 | ||
1143 | dd = pd->port_dd; | 1160 | dd = pd->port_dd; |
1144 | size = pd->port_rcvegrbuf_chunks * pd->port_rcvegrbuf_size; | 1161 | size = pd->port_rcvegrbuf_chunks * pd->port_rcvegrbuf_size; |
1145 | 1162 | ||
1146 | /* | 1163 | /* |
1147 | * Master has all the slave uregbase, rcvhdrq, and | 1164 | * Each process has all the subport uregbase, rcvhdrq, and |
1148 | * rcvegrbufs mmapped. | 1165 | * rcvegrbufs mmapped - as an array for all the processes, |
1166 | * and also separately for this process. | ||
1149 | */ | 1167 | */ |
1150 | if (subport == 0) { | 1168 | if (pgaddr == cvt_kvaddr(pd->subport_uregbase)) { |
1151 | unsigned num_slaves = pd->port_subport_cnt - 1; | 1169 | addr = pd->subport_uregbase; |
1152 | 1170 | size = PAGE_SIZE * pd->port_subport_cnt; | |
1153 | if (pgaddr == ((u64) pd->subport_uregbase & MMAP64_MASK)) { | 1171 | } else if (pgaddr == cvt_kvaddr(pd->subport_rcvhdr_base)) { |
1154 | addr = pd->subport_uregbase; | 1172 | addr = pd->subport_rcvhdr_base; |
1155 | size = PAGE_SIZE * num_slaves; | 1173 | size = pd->port_rcvhdrq_size * pd->port_subport_cnt; |
1156 | } else if (pgaddr == ((u64) pd->subport_rcvhdr_base & | 1174 | } else if (pgaddr == cvt_kvaddr(pd->subport_rcvegrbuf)) { |
1157 | MMAP64_MASK)) { | 1175 | addr = pd->subport_rcvegrbuf; |
1158 | addr = pd->subport_rcvhdr_base; | 1176 | size *= pd->port_subport_cnt; |
1159 | size = pd->port_rcvhdrq_size * num_slaves; | 1177 | } else if (pgaddr == cvt_kvaddr(pd->subport_uregbase + |
1160 | } else if (pgaddr == ((u64) pd->subport_rcvegrbuf & | 1178 | PAGE_SIZE * subport)) { |
1161 | MMAP64_MASK)) { | 1179 | addr = pd->subport_uregbase + PAGE_SIZE * subport; |
1162 | addr = pd->subport_rcvegrbuf; | 1180 | size = PAGE_SIZE; |
1163 | size *= num_slaves; | 1181 | } else if (pgaddr == cvt_kvaddr(pd->subport_rcvhdr_base + |
1164 | } else { | 1182 | pd->port_rcvhdrq_size * subport)) { |
1165 | ret = -EINVAL; | 1183 | addr = pd->subport_rcvhdr_base + |
1166 | goto bail; | 1184 | pd->port_rcvhdrq_size * subport; |
1167 | } | 1185 | size = pd->port_rcvhdrq_size; |
1168 | } else if (pgaddr == (((u64) pd->subport_uregbase + | 1186 | } else if (pgaddr == cvt_kvaddr(pd->subport_rcvegrbuf + |
1169 | PAGE_SIZE * (subport - 1)) & MMAP64_MASK)) { | 1187 | size * subport)) { |
1170 | addr = pd->subport_uregbase + PAGE_SIZE * (subport - 1); | 1188 | addr = pd->subport_rcvegrbuf + size * subport; |
1171 | size = PAGE_SIZE; | 1189 | /* rcvegrbufs are read-only on the slave */ |
1172 | } else if (pgaddr == (((u64) pd->subport_rcvhdr_base + | 1190 | if (vma->vm_flags & VM_WRITE) { |
1173 | pd->port_rcvhdrq_size * (subport - 1)) & | 1191 | dev_info(&dd->pcidev->dev, |
1174 | MMAP64_MASK)) { | 1192 | "Can't map eager buffers as " |
1175 | addr = pd->subport_rcvhdr_base + | 1193 | "writable (flags=%lx)\n", vma->vm_flags); |
1176 | pd->port_rcvhdrq_size * (subport - 1); | 1194 | ret = -EPERM; |
1177 | size = pd->port_rcvhdrq_size; | 1195 | goto bail; |
1178 | } else if (pgaddr == (((u64) pd->subport_rcvegrbuf + | 1196 | } |
1179 | size * (subport - 1)) & MMAP64_MASK)) { | 1197 | /* |
1180 | addr = pd->subport_rcvegrbuf + size * (subport - 1); | 1198 | * Don't allow permission to later change to writeable |
1181 | /* rcvegrbufs are read-only on the slave */ | 1199 | * with mprotect. |
1182 | if (vma->vm_flags & VM_WRITE) { | 1200 | */ |
1183 | dev_info(&dd->pcidev->dev, | 1201 | vma->vm_flags &= ~VM_MAYWRITE; |
1184 | "Can't map eager buffers as " | ||
1185 | "writable (flags=%lx)\n", vma->vm_flags); | ||
1186 | ret = -EPERM; | ||
1187 | goto bail; | ||
1188 | } | ||
1189 | /* | ||
1190 | * Don't allow permission to later change to writeable | ||
1191 | * with mprotect. | ||
1192 | */ | ||
1193 | vma->vm_flags &= ~VM_MAYWRITE; | ||
1194 | } else { | 1202 | } else { |
1195 | ret = -EINVAL; | ||
1196 | goto bail; | 1203 | goto bail; |
1197 | } | 1204 | } |
1198 | len = vma->vm_end - vma->vm_start; | 1205 | len = vma->vm_end - vma->vm_start; |
@@ -1205,7 +1212,7 @@ static int mmap_kvaddr(struct vm_area_struct *vma, u64 pgaddr, | |||
1205 | vma->vm_pgoff = (unsigned long) addr >> PAGE_SHIFT; | 1212 | vma->vm_pgoff = (unsigned long) addr >> PAGE_SHIFT; |
1206 | vma->vm_ops = &ipath_file_vm_ops; | 1213 | vma->vm_ops = &ipath_file_vm_ops; |
1207 | vma->vm_flags |= VM_RESERVED | VM_DONTEXPAND; | 1214 | vma->vm_flags |= VM_RESERVED | VM_DONTEXPAND; |
1208 | ret = 0; | 1215 | ret = 1; |
1209 | 1216 | ||
1210 | bail: | 1217 | bail: |
1211 | return ret; | 1218 | return ret; |
@@ -1265,19 +1272,20 @@ static int ipath_mmap(struct file *fp, struct vm_area_struct *vma) | |||
1265 | * Check for kernel virtual addresses first, anything else must | 1272 | * Check for kernel virtual addresses first, anything else must |
1266 | * match a HW or memory address. | 1273 | * match a HW or memory address. |
1267 | */ | 1274 | */ |
1268 | if (pgaddr >= (1ULL<<40)) { | 1275 | ret = mmap_kvaddr(vma, pgaddr, pd, subport_fp(fp)); |
1269 | ret = mmap_kvaddr(vma, pgaddr, pd, subport_fp(fp)); | 1276 | if (ret) { |
1277 | if (ret > 0) | ||
1278 | ret = 0; | ||
1270 | goto bail; | 1279 | goto bail; |
1271 | } | 1280 | } |
1272 | 1281 | ||
1282 | ureg = dd->ipath_uregbase + dd->ipath_palign * pd->port_port; | ||
1273 | if (!pd->port_subport_cnt) { | 1283 | if (!pd->port_subport_cnt) { |
1274 | /* port is not shared */ | 1284 | /* port is not shared */ |
1275 | ureg = dd->ipath_uregbase + dd->ipath_palign * pd->port_port; | ||
1276 | piocnt = dd->ipath_pbufsport; | 1285 | piocnt = dd->ipath_pbufsport; |
1277 | piobufs = pd->port_piobufs; | 1286 | piobufs = pd->port_piobufs; |
1278 | } else if (!subport_fp(fp)) { | 1287 | } else if (!subport_fp(fp)) { |
1279 | /* caller is the master */ | 1288 | /* caller is the master */ |
1280 | ureg = dd->ipath_uregbase + dd->ipath_palign * pd->port_port; | ||
1281 | piocnt = (dd->ipath_pbufsport / pd->port_subport_cnt) + | 1289 | piocnt = (dd->ipath_pbufsport / pd->port_subport_cnt) + |
1282 | (dd->ipath_pbufsport % pd->port_subport_cnt); | 1290 | (dd->ipath_pbufsport % pd->port_subport_cnt); |
1283 | piobufs = pd->port_piobufs + | 1291 | piobufs = pd->port_piobufs + |
@@ -1286,7 +1294,6 @@ static int ipath_mmap(struct file *fp, struct vm_area_struct *vma) | |||
1286 | unsigned slave = subport_fp(fp) - 1; | 1294 | unsigned slave = subport_fp(fp) - 1; |
1287 | 1295 | ||
1288 | /* caller is a slave */ | 1296 | /* caller is a slave */ |
1289 | ureg = 0; | ||
1290 | piocnt = dd->ipath_pbufsport / pd->port_subport_cnt; | 1297 | piocnt = dd->ipath_pbufsport / pd->port_subport_cnt; |
1291 | piobufs = pd->port_piobufs + dd->ipath_palign * piocnt * slave; | 1298 | piobufs = pd->port_piobufs + dd->ipath_palign * piocnt * slave; |
1292 | } | 1299 | } |
@@ -1300,9 +1307,6 @@ static int ipath_mmap(struct file *fp, struct vm_area_struct *vma) | |||
1300 | ret = ipath_mmap_mem(vma, pd, PAGE_SIZE, 0, | 1307 | ret = ipath_mmap_mem(vma, pd, PAGE_SIZE, 0, |
1301 | (void *) dd->ipath_pioavailregs_dma, | 1308 | (void *) dd->ipath_pioavailregs_dma, |
1302 | "pioavail registers"); | 1309 | "pioavail registers"); |
1303 | else if (subport_fp(fp)) | ||
1304 | /* Subports don't mmap the physical receive buffers */ | ||
1305 | ret = -EINVAL; | ||
1306 | else if (pgaddr == pd->port_rcvegr_phys) | 1310 | else if (pgaddr == pd->port_rcvegr_phys) |
1307 | ret = mmap_rcvegrbufs(vma, pd); | 1311 | ret = mmap_rcvegrbufs(vma, pd); |
1308 | else if (pgaddr == (u64) pd->port_rcvhdrq_phys) | 1312 | else if (pgaddr == (u64) pd->port_rcvhdrq_phys) |
@@ -1400,32 +1404,41 @@ static int init_subports(struct ipath_devdata *dd, | |||
1400 | const struct ipath_user_info *uinfo) | 1404 | const struct ipath_user_info *uinfo) |
1401 | { | 1405 | { |
1402 | int ret = 0; | 1406 | int ret = 0; |
1403 | unsigned num_slaves; | 1407 | unsigned num_subports; |
1404 | size_t size; | 1408 | size_t size; |
1405 | 1409 | ||
1406 | /* Old user binaries don't know about subports */ | ||
1407 | if ((uinfo->spu_userversion & 0xffff) != IPATH_USER_SWMINOR) | ||
1408 | goto bail; | ||
1409 | /* | 1410 | /* |
1410 | * If the user is requesting zero or one port, | 1411 | * If the user is requesting zero or one port, |
1411 | * skip the subport allocation. | 1412 | * skip the subport allocation. |
1412 | */ | 1413 | */ |
1413 | if (uinfo->spu_subport_cnt <= 1) | 1414 | if (uinfo->spu_subport_cnt <= 1) |
1414 | goto bail; | 1415 | goto bail; |
1415 | if (uinfo->spu_subport_cnt > 4) { | 1416 | |
1417 | /* Old user binaries don't know about new subport implementation */ | ||
1418 | if ((uinfo->spu_userversion & 0xffff) != IPATH_USER_SWMINOR) { | ||
1419 | dev_info(&dd->pcidev->dev, | ||
1420 | "Mismatched user minor version (%d) and driver " | ||
1421 | "minor version (%d) while port sharing. Ensure " | ||
1422 | "that driver and library are from the same " | ||
1423 | "release.\n", | ||
1424 | (int) (uinfo->spu_userversion & 0xffff), | ||
1425 | IPATH_USER_SWMINOR); | ||
1426 | goto bail; | ||
1427 | } | ||
1428 | if (uinfo->spu_subport_cnt > INFINIPATH_MAX_SUBPORT) { | ||
1416 | ret = -EINVAL; | 1429 | ret = -EINVAL; |
1417 | goto bail; | 1430 | goto bail; |
1418 | } | 1431 | } |
1419 | 1432 | ||
1420 | num_slaves = uinfo->spu_subport_cnt - 1; | 1433 | num_subports = uinfo->spu_subport_cnt; |
1421 | pd->subport_uregbase = vmalloc(PAGE_SIZE * num_slaves); | 1434 | pd->subport_uregbase = vmalloc(PAGE_SIZE * num_subports); |
1422 | if (!pd->subport_uregbase) { | 1435 | if (!pd->subport_uregbase) { |
1423 | ret = -ENOMEM; | 1436 | ret = -ENOMEM; |
1424 | goto bail; | 1437 | goto bail; |
1425 | } | 1438 | } |
1426 | /* Note: pd->port_rcvhdrq_size isn't initialized yet. */ | 1439 | /* Note: pd->port_rcvhdrq_size isn't initialized yet. */ |
1427 | size = ALIGN(dd->ipath_rcvhdrcnt * dd->ipath_rcvhdrentsize * | 1440 | size = ALIGN(dd->ipath_rcvhdrcnt * dd->ipath_rcvhdrentsize * |
1428 | sizeof(u32), PAGE_SIZE) * num_slaves; | 1441 | sizeof(u32), PAGE_SIZE) * num_subports; |
1429 | pd->subport_rcvhdr_base = vmalloc(size); | 1442 | pd->subport_rcvhdr_base = vmalloc(size); |
1430 | if (!pd->subport_rcvhdr_base) { | 1443 | if (!pd->subport_rcvhdr_base) { |
1431 | ret = -ENOMEM; | 1444 | ret = -ENOMEM; |
@@ -1434,7 +1447,7 @@ static int init_subports(struct ipath_devdata *dd, | |||
1434 | 1447 | ||
1435 | pd->subport_rcvegrbuf = vmalloc(pd->port_rcvegrbuf_chunks * | 1448 | pd->subport_rcvegrbuf = vmalloc(pd->port_rcvegrbuf_chunks * |
1436 | pd->port_rcvegrbuf_size * | 1449 | pd->port_rcvegrbuf_size * |
1437 | num_slaves); | 1450 | num_subports); |
1438 | if (!pd->subport_rcvegrbuf) { | 1451 | if (!pd->subport_rcvegrbuf) { |
1439 | ret = -ENOMEM; | 1452 | ret = -ENOMEM; |
1440 | goto bail_rhdr; | 1453 | goto bail_rhdr; |
@@ -1443,6 +1456,12 @@ static int init_subports(struct ipath_devdata *dd, | |||
1443 | pd->port_subport_cnt = uinfo->spu_subport_cnt; | 1456 | pd->port_subport_cnt = uinfo->spu_subport_cnt; |
1444 | pd->port_subport_id = uinfo->spu_subport_id; | 1457 | pd->port_subport_id = uinfo->spu_subport_id; |
1445 | pd->active_slaves = 1; | 1458 | pd->active_slaves = 1; |
1459 | set_bit(IPATH_PORT_MASTER_UNINIT, &pd->port_flag); | ||
1460 | memset(pd->subport_uregbase, 0, PAGE_SIZE * num_subports); | ||
1461 | memset(pd->subport_rcvhdr_base, 0, size); | ||
1462 | memset(pd->subport_rcvegrbuf, 0, pd->port_rcvegrbuf_chunks * | ||
1463 | pd->port_rcvegrbuf_size * | ||
1464 | num_subports); | ||
1446 | goto bail; | 1465 | goto bail; |
1447 | 1466 | ||
1448 | bail_rhdr: | 1467 | bail_rhdr: |
@@ -1573,18 +1592,19 @@ static int find_best_unit(struct file *fp, | |||
1573 | */ | 1592 | */ |
1574 | if (!cpus_empty(current->cpus_allowed) && | 1593 | if (!cpus_empty(current->cpus_allowed) && |
1575 | !cpus_full(current->cpus_allowed)) { | 1594 | !cpus_full(current->cpus_allowed)) { |
1576 | int ncpus = num_online_cpus(), curcpu = -1; | 1595 | int ncpus = num_online_cpus(), curcpu = -1, nset = 0; |
1577 | for (i = 0; i < ncpus; i++) | 1596 | for (i = 0; i < ncpus; i++) |
1578 | if (cpu_isset(i, current->cpus_allowed)) { | 1597 | if (cpu_isset(i, current->cpus_allowed)) { |
1579 | ipath_cdbg(PROC, "%s[%u] affinity set for " | 1598 | ipath_cdbg(PROC, "%s[%u] affinity set for " |
1580 | "cpu %d\n", current->comm, | 1599 | "cpu %d/%d\n", current->comm, |
1581 | current->pid, i); | 1600 | current->pid, i, ncpus); |
1582 | curcpu = i; | 1601 | curcpu = i; |
1602 | nset++; | ||
1583 | } | 1603 | } |
1584 | if (curcpu != -1) { | 1604 | if (curcpu != -1 && nset != ncpus) { |
1585 | if (npresent) { | 1605 | if (npresent) { |
1586 | prefunit = curcpu / (ncpus / npresent); | 1606 | prefunit = curcpu / (ncpus / npresent); |
1587 | ipath_dbg("%s[%u] %d chips, %d cpus, " | 1607 | ipath_cdbg(PROC,"%s[%u] %d chips, %d cpus, " |
1588 | "%d cpus/chip, select unit %d\n", | 1608 | "%d cpus/chip, select unit %d\n", |
1589 | current->comm, current->pid, | 1609 | current->comm, current->pid, |
1590 | npresent, ncpus, ncpus / npresent, | 1610 | npresent, ncpus, ncpus / npresent, |
@@ -1764,11 +1784,17 @@ static int ipath_do_user_init(struct file *fp, | |||
1764 | const struct ipath_user_info *uinfo) | 1784 | const struct ipath_user_info *uinfo) |
1765 | { | 1785 | { |
1766 | int ret; | 1786 | int ret; |
1767 | struct ipath_portdata *pd; | 1787 | struct ipath_portdata *pd = port_fp(fp); |
1768 | struct ipath_devdata *dd; | 1788 | struct ipath_devdata *dd; |
1769 | u32 head32; | 1789 | u32 head32; |
1770 | 1790 | ||
1771 | pd = port_fp(fp); | 1791 | /* Subports don't need to initialize anything since master did it. */ |
1792 | if (subport_fp(fp)) { | ||
1793 | ret = wait_event_interruptible(pd->port_wait, | ||
1794 | !test_bit(IPATH_PORT_MASTER_UNINIT, &pd->port_flag)); | ||
1795 | goto done; | ||
1796 | } | ||
1797 | |||
1772 | dd = pd->port_dd; | 1798 | dd = pd->port_dd; |
1773 | 1799 | ||
1774 | if (uinfo->spu_rcvhdrsize) { | 1800 | if (uinfo->spu_rcvhdrsize) { |
@@ -1826,6 +1852,11 @@ static int ipath_do_user_init(struct file *fp, | |||
1826 | dd->ipath_rcvctrl & ~INFINIPATH_R_TAILUPD); | 1852 | dd->ipath_rcvctrl & ~INFINIPATH_R_TAILUPD); |
1827 | ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, | 1853 | ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, |
1828 | dd->ipath_rcvctrl); | 1854 | dd->ipath_rcvctrl); |
1855 | /* Notify any waiting slaves */ | ||
1856 | if (pd->port_subport_cnt) { | ||
1857 | clear_bit(IPATH_PORT_MASTER_UNINIT, &pd->port_flag); | ||
1858 | wake_up(&pd->port_wait); | ||
1859 | } | ||
1829 | done: | 1860 | done: |
1830 | return ret; | 1861 | return ret; |
1831 | } | 1862 | } |
@@ -2017,6 +2048,17 @@ static int ipath_get_slave_info(struct ipath_portdata *pd, | |||
2017 | return ret; | 2048 | return ret; |
2018 | } | 2049 | } |
2019 | 2050 | ||
2051 | static int ipath_force_pio_avail_update(struct ipath_devdata *dd) | ||
2052 | { | ||
2053 | u64 reg = dd->ipath_sendctrl; | ||
2054 | |||
2055 | clear_bit(IPATH_S_PIOBUFAVAILUPD, ®); | ||
2056 | ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, reg); | ||
2057 | ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl); | ||
2058 | |||
2059 | return 0; | ||
2060 | } | ||
2061 | |||
2020 | static ssize_t ipath_write(struct file *fp, const char __user *data, | 2062 | static ssize_t ipath_write(struct file *fp, const char __user *data, |
2021 | size_t count, loff_t *off) | 2063 | size_t count, loff_t *off) |
2022 | { | 2064 | { |
@@ -2071,27 +2113,35 @@ static ssize_t ipath_write(struct file *fp, const char __user *data, | |||
2071 | dest = &cmd.cmd.part_key; | 2113 | dest = &cmd.cmd.part_key; |
2072 | src = &ucmd->cmd.part_key; | 2114 | src = &ucmd->cmd.part_key; |
2073 | break; | 2115 | break; |
2074 | case IPATH_CMD_SLAVE_INFO: | 2116 | case __IPATH_CMD_SLAVE_INFO: |
2075 | copy = sizeof(cmd.cmd.slave_mask_addr); | 2117 | copy = sizeof(cmd.cmd.slave_mask_addr); |
2076 | dest = &cmd.cmd.slave_mask_addr; | 2118 | dest = &cmd.cmd.slave_mask_addr; |
2077 | src = &ucmd->cmd.slave_mask_addr; | 2119 | src = &ucmd->cmd.slave_mask_addr; |
2078 | break; | 2120 | break; |
2121 | case IPATH_CMD_PIOAVAILUPD: // force an update of PIOAvail reg | ||
2122 | copy = 0; | ||
2123 | src = NULL; | ||
2124 | dest = NULL; | ||
2125 | break; | ||
2079 | default: | 2126 | default: |
2080 | ret = -EINVAL; | 2127 | ret = -EINVAL; |
2081 | goto bail; | 2128 | goto bail; |
2082 | } | 2129 | } |
2083 | 2130 | ||
2084 | if ((count - consumed) < copy) { | 2131 | if (copy) { |
2085 | ret = -EINVAL; | 2132 | if ((count - consumed) < copy) { |
2086 | goto bail; | 2133 | ret = -EINVAL; |
2087 | } | 2134 | goto bail; |
2135 | } | ||
2088 | 2136 | ||
2089 | if (copy_from_user(dest, src, copy)) { | 2137 | if (copy_from_user(dest, src, copy)) { |
2090 | ret = -EFAULT; | 2138 | ret = -EFAULT; |
2091 | goto bail; | 2139 | goto bail; |
2140 | } | ||
2141 | |||
2142 | consumed += copy; | ||
2092 | } | 2143 | } |
2093 | 2144 | ||
2094 | consumed += copy; | ||
2095 | pd = port_fp(fp); | 2145 | pd = port_fp(fp); |
2096 | if (!pd && cmd.type != __IPATH_CMD_USER_INIT && | 2146 | if (!pd && cmd.type != __IPATH_CMD_USER_INIT && |
2097 | cmd.type != IPATH_CMD_ASSIGN_PORT) { | 2147 | cmd.type != IPATH_CMD_ASSIGN_PORT) { |
@@ -2137,11 +2187,14 @@ static ssize_t ipath_write(struct file *fp, const char __user *data, | |||
2137 | case IPATH_CMD_SET_PART_KEY: | 2187 | case IPATH_CMD_SET_PART_KEY: |
2138 | ret = ipath_set_part_key(pd, cmd.cmd.part_key); | 2188 | ret = ipath_set_part_key(pd, cmd.cmd.part_key); |
2139 | break; | 2189 | break; |
2140 | case IPATH_CMD_SLAVE_INFO: | 2190 | case __IPATH_CMD_SLAVE_INFO: |
2141 | ret = ipath_get_slave_info(pd, | 2191 | ret = ipath_get_slave_info(pd, |
2142 | (void __user *) (unsigned long) | 2192 | (void __user *) (unsigned long) |
2143 | cmd.cmd.slave_mask_addr); | 2193 | cmd.cmd.slave_mask_addr); |
2144 | break; | 2194 | break; |
2195 | case IPATH_CMD_PIOAVAILUPD: | ||
2196 | ret = ipath_force_pio_avail_update(pd->port_dd); | ||
2197 | break; | ||
2145 | } | 2198 | } |
2146 | 2199 | ||
2147 | if (ret >= 0) | 2200 | if (ret >= 0) |
diff --git a/drivers/infiniband/hw/ipath/ipath_iba6110.c b/drivers/infiniband/hw/ipath/ipath_iba6110.c index 993482545021..4171198fc202 100644 --- a/drivers/infiniband/hw/ipath/ipath_iba6110.c +++ b/drivers/infiniband/hw/ipath/ipath_iba6110.c | |||
@@ -43,6 +43,9 @@ | |||
43 | #include "ipath_kernel.h" | 43 | #include "ipath_kernel.h" |
44 | #include "ipath_registers.h" | 44 | #include "ipath_registers.h" |
45 | 45 | ||
46 | static void ipath_setup_ht_setextled(struct ipath_devdata *, u64, u64); | ||
47 | |||
48 | |||
46 | /* | 49 | /* |
47 | * This lists the InfiniPath registers, in the actual chip layout. | 50 | * This lists the InfiniPath registers, in the actual chip layout. |
48 | * This structure should never be directly accessed. | 51 | * This structure should never be directly accessed. |
@@ -208,8 +211,8 @@ static const struct ipath_kregs ipath_ht_kregs = { | |||
208 | .kr_serdesstatus = IPATH_KREG_OFFSET(SerdesStatus), | 211 | .kr_serdesstatus = IPATH_KREG_OFFSET(SerdesStatus), |
209 | .kr_xgxsconfig = IPATH_KREG_OFFSET(XGXSConfig), | 212 | .kr_xgxsconfig = IPATH_KREG_OFFSET(XGXSConfig), |
210 | /* | 213 | /* |
211 | * These should not be used directly via ipath_read_kreg64(), | 214 | * These should not be used directly via ipath_write_kreg64(), |
212 | * use them with ipath_read_kreg64_port(), | 215 | * use them with ipath_write_kreg64_port(), |
213 | */ | 216 | */ |
214 | .kr_rcvhdraddr = IPATH_KREG_OFFSET(RcvHdrAddr0), | 217 | .kr_rcvhdraddr = IPATH_KREG_OFFSET(RcvHdrAddr0), |
215 | .kr_rcvhdrtailaddr = IPATH_KREG_OFFSET(RcvHdrTailAddr0) | 218 | .kr_rcvhdrtailaddr = IPATH_KREG_OFFSET(RcvHdrTailAddr0) |
@@ -284,6 +287,14 @@ static const struct ipath_cregs ipath_ht_cregs = { | |||
284 | #define INFINIPATH_EXTS_MEMBIST_ENDTEST 0x0000000000004000 | 287 | #define INFINIPATH_EXTS_MEMBIST_ENDTEST 0x0000000000004000 |
285 | #define INFINIPATH_EXTS_MEMBIST_CORRECT 0x0000000000008000 | 288 | #define INFINIPATH_EXTS_MEMBIST_CORRECT 0x0000000000008000 |
286 | 289 | ||
290 | |||
291 | /* TID entries (memory), HT-only */ | ||
292 | #define INFINIPATH_RT_ADDR_MASK 0xFFFFFFFFFFULL /* 40 bits valid */ | ||
293 | #define INFINIPATH_RT_VALID 0x8000000000000000ULL | ||
294 | #define INFINIPATH_RT_ADDR_SHIFT 0 | ||
295 | #define INFINIPATH_RT_BUFSIZE_MASK 0x3FFFULL | ||
296 | #define INFINIPATH_RT_BUFSIZE_SHIFT 48 | ||
297 | |||
287 | /* | 298 | /* |
288 | * masks and bits that are different in different chips, or present only | 299 | * masks and bits that are different in different chips, or present only |
289 | * in one | 300 | * in one |
@@ -402,6 +413,14 @@ static const struct ipath_hwerror_msgs ipath_6110_hwerror_msgs[] = { | |||
402 | INFINIPATH_HWE_MSG(SERDESPLLFAILED, "SerDes PLL"), | 413 | INFINIPATH_HWE_MSG(SERDESPLLFAILED, "SerDes PLL"), |
403 | }; | 414 | }; |
404 | 415 | ||
416 | #define TXE_PIO_PARITY ((INFINIPATH_HWE_TXEMEMPARITYERR_PIOBUF | \ | ||
417 | INFINIPATH_HWE_TXEMEMPARITYERR_PIOPBC) \ | ||
418 | << INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT) | ||
419 | #define RXE_EAGER_PARITY (INFINIPATH_HWE_RXEMEMPARITYERR_EAGERTID \ | ||
420 | << INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT) | ||
421 | |||
422 | static int ipath_ht_txe_recover(struct ipath_devdata *); | ||
423 | |||
405 | /** | 424 | /** |
406 | * ipath_ht_handle_hwerrors - display hardware errors. | 425 | * ipath_ht_handle_hwerrors - display hardware errors. |
407 | * @dd: the infinipath device | 426 | * @dd: the infinipath device |
@@ -450,13 +469,12 @@ static void ipath_ht_handle_hwerrors(struct ipath_devdata *dd, char *msg, | |||
450 | 469 | ||
451 | /* | 470 | /* |
452 | * make sure we get this much out, unless told to be quiet, | 471 | * make sure we get this much out, unless told to be quiet, |
472 | * it's a parity error we may recover from, | ||
453 | * or it's occurred within the last 5 seconds | 473 | * or it's occurred within the last 5 seconds |
454 | */ | 474 | */ |
455 | if ((hwerrs & ~(dd->ipath_lasthwerror | | 475 | if ((hwerrs & ~(dd->ipath_lasthwerror | TXE_PIO_PARITY | |
456 | ((INFINIPATH_HWE_TXEMEMPARITYERR_PIOBUF | | 476 | RXE_EAGER_PARITY)) || |
457 | INFINIPATH_HWE_TXEMEMPARITYERR_PIOPBC) | 477 | (ipath_debug & __IPATH_VERBDBG)) |
458 | << INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT))) || | ||
459 | (ipath_debug & __IPATH_VERBDBG)) | ||
460 | dev_info(&dd->pcidev->dev, "Hardware error: hwerr=0x%llx " | 478 | dev_info(&dd->pcidev->dev, "Hardware error: hwerr=0x%llx " |
461 | "(cleared)\n", (unsigned long long) hwerrs); | 479 | "(cleared)\n", (unsigned long long) hwerrs); |
462 | dd->ipath_lasthwerror |= hwerrs; | 480 | dd->ipath_lasthwerror |= hwerrs; |
@@ -467,7 +485,7 @@ static void ipath_ht_handle_hwerrors(struct ipath_devdata *dd, char *msg, | |||
467 | (hwerrs & ~dd->ipath_hwe_bitsextant)); | 485 | (hwerrs & ~dd->ipath_hwe_bitsextant)); |
468 | 486 | ||
469 | ctrl = ipath_read_kreg32(dd, dd->ipath_kregs->kr_control); | 487 | ctrl = ipath_read_kreg32(dd, dd->ipath_kregs->kr_control); |
470 | if (ctrl & INFINIPATH_C_FREEZEMODE) { | 488 | if ((ctrl & INFINIPATH_C_FREEZEMODE) && !ipath_diag_inuse) { |
471 | /* | 489 | /* |
472 | * parity errors in send memory are recoverable, | 490 | * parity errors in send memory are recoverable, |
473 | * just cancel the send (if indicated in * sendbuffererror), | 491 | * just cancel the send (if indicated in * sendbuffererror), |
@@ -476,50 +494,14 @@ static void ipath_ht_handle_hwerrors(struct ipath_devdata *dd, char *msg, | |||
476 | * occur if a processor speculative read is done to the PIO | 494 | * occur if a processor speculative read is done to the PIO |
477 | * buffer while we are sending a packet, for example. | 495 | * buffer while we are sending a packet, for example. |
478 | */ | 496 | */ |
479 | if (hwerrs & ((INFINIPATH_HWE_TXEMEMPARITYERR_PIOBUF | | 497 | if ((hwerrs & TXE_PIO_PARITY) && ipath_ht_txe_recover(dd)) |
480 | INFINIPATH_HWE_TXEMEMPARITYERR_PIOPBC) | 498 | hwerrs &= ~TXE_PIO_PARITY; |
481 | << INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT)) { | 499 | if (hwerrs & RXE_EAGER_PARITY) |
482 | ipath_stats.sps_txeparity++; | 500 | ipath_dev_err(dd, "RXE parity, Eager TID error is not " |
483 | ipath_dbg("Recovering from TXE parity error (%llu), " | 501 | "recoverable\n"); |
484 | "hwerrstatus=%llx\n", | 502 | if (!hwerrs) { |
485 | (unsigned long long) ipath_stats.sps_txeparity, | 503 | ipath_dbg("Clearing freezemode on ignored or " |
486 | (unsigned long long) hwerrs); | 504 | "recovered hardware error\n"); |
487 | ipath_disarm_senderrbufs(dd); | ||
488 | hwerrs &= ~((INFINIPATH_HWE_TXEMEMPARITYERR_PIOBUF | | ||
489 | INFINIPATH_HWE_TXEMEMPARITYERR_PIOPBC) | ||
490 | << INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT); | ||
491 | if (!hwerrs) { /* else leave in freeze mode */ | ||
492 | ipath_write_kreg(dd, | ||
493 | dd->ipath_kregs->kr_control, | ||
494 | dd->ipath_control); | ||
495 | return; | ||
496 | } | ||
497 | } | ||
498 | if (hwerrs) { | ||
499 | /* | ||
500 | * if any set that we aren't ignoring; only | ||
501 | * make the complaint once, in case it's stuck | ||
502 | * or recurring, and we get here multiple | ||
503 | * times. | ||
504 | */ | ||
505 | if (dd->ipath_flags & IPATH_INITTED) { | ||
506 | ipath_dev_err(dd, "Fatal Hardware Error (freeze " | ||
507 | "mode), no longer usable, SN %.16s\n", | ||
508 | dd->ipath_serial); | ||
509 | isfatal = 1; | ||
510 | } | ||
511 | *dd->ipath_statusp &= ~IPATH_STATUS_IB_READY; | ||
512 | /* mark as having had error */ | ||
513 | *dd->ipath_statusp |= IPATH_STATUS_HWERROR; | ||
514 | /* | ||
515 | * mark as not usable, at a minimum until driver | ||
516 | * is reloaded, probably until reboot, since no | ||
517 | * other reset is possible. | ||
518 | */ | ||
519 | dd->ipath_flags &= ~IPATH_INITTED; | ||
520 | } else { | ||
521 | ipath_dbg("Clearing freezemode on ignored hardware " | ||
522 | "error\n"); | ||
523 | ctrl &= ~INFINIPATH_C_FREEZEMODE; | 505 | ctrl &= ~INFINIPATH_C_FREEZEMODE; |
524 | ipath_write_kreg(dd, dd->ipath_kregs->kr_control, | 506 | ipath_write_kreg(dd, dd->ipath_kregs->kr_control, |
525 | ctrl); | 507 | ctrl); |
@@ -587,7 +569,39 @@ static void ipath_ht_handle_hwerrors(struct ipath_devdata *dd, char *msg, | |||
587 | dd->ipath_hwerrmask); | 569 | dd->ipath_hwerrmask); |
588 | } | 570 | } |
589 | 571 | ||
590 | ipath_dev_err(dd, "%s hardware error\n", msg); | 572 | if (hwerrs) { |
573 | /* | ||
574 | * if any set that we aren't ignoring; only | ||
575 | * make the complaint once, in case it's stuck | ||
576 | * or recurring, and we get here multiple | ||
577 | * times. | ||
578 | * force link down, so switch knows, and | ||
579 | * LEDs are turned off | ||
580 | */ | ||
581 | if (dd->ipath_flags & IPATH_INITTED) { | ||
582 | ipath_set_linkstate(dd, IPATH_IB_LINKDOWN); | ||
583 | ipath_setup_ht_setextled(dd, | ||
584 | INFINIPATH_IBCS_L_STATE_DOWN, | ||
585 | INFINIPATH_IBCS_LT_STATE_DISABLED); | ||
586 | ipath_dev_err(dd, "Fatal Hardware Error (freeze " | ||
587 | "mode), no longer usable, SN %.16s\n", | ||
588 | dd->ipath_serial); | ||
589 | isfatal = 1; | ||
590 | } | ||
591 | *dd->ipath_statusp &= ~IPATH_STATUS_IB_READY; | ||
592 | /* mark as having had error */ | ||
593 | *dd->ipath_statusp |= IPATH_STATUS_HWERROR; | ||
594 | /* | ||
595 | * mark as not usable, at a minimum until driver | ||
596 | * is reloaded, probably until reboot, since no | ||
597 | * other reset is possible. | ||
598 | */ | ||
599 | dd->ipath_flags &= ~IPATH_INITTED; | ||
600 | } | ||
601 | else | ||
602 | *msg = 0; /* recovered from all of them */ | ||
603 | if (*msg) | ||
604 | ipath_dev_err(dd, "%s hardware error\n", msg); | ||
591 | if (isfatal && !ipath_diag_inuse && dd->ipath_freezemsg) | 605 | if (isfatal && !ipath_diag_inuse && dd->ipath_freezemsg) |
592 | /* | 606 | /* |
593 | * for status file; if no trailing brace is copied, | 607 | * for status file; if no trailing brace is copied, |
@@ -658,7 +672,8 @@ static int ipath_ht_boardname(struct ipath_devdata *dd, char *name, | |||
658 | if (n) | 672 | if (n) |
659 | snprintf(name, namelen, "%s", n); | 673 | snprintf(name, namelen, "%s", n); |
660 | 674 | ||
661 | if (dd->ipath_majrev != 3 || (dd->ipath_minrev < 2 || dd->ipath_minrev > 3)) { | 675 | if (dd->ipath_majrev != 3 || (dd->ipath_minrev < 2 || |
676 | dd->ipath_minrev > 3)) { | ||
662 | /* | 677 | /* |
663 | * This version of the driver only supports Rev 3.2 and 3.3 | 678 | * This version of the driver only supports Rev 3.2 and 3.3 |
664 | */ | 679 | */ |
@@ -1163,6 +1178,8 @@ static void ipath_ht_init_hwerrors(struct ipath_devdata *dd) | |||
1163 | 1178 | ||
1164 | if (!(extsval & INFINIPATH_EXTS_MEMBIST_ENDTEST)) | 1179 | if (!(extsval & INFINIPATH_EXTS_MEMBIST_ENDTEST)) |
1165 | ipath_dev_err(dd, "MemBIST did not complete!\n"); | 1180 | ipath_dev_err(dd, "MemBIST did not complete!\n"); |
1181 | if (extsval & INFINIPATH_EXTS_MEMBIST_CORRECT) | ||
1182 | ipath_dbg("MemBIST corrected\n"); | ||
1166 | 1183 | ||
1167 | ipath_check_htlink(dd); | 1184 | ipath_check_htlink(dd); |
1168 | 1185 | ||
@@ -1366,6 +1383,9 @@ static void ipath_ht_put_tid(struct ipath_devdata *dd, | |||
1366 | u64 __iomem *tidptr, u32 type, | 1383 | u64 __iomem *tidptr, u32 type, |
1367 | unsigned long pa) | 1384 | unsigned long pa) |
1368 | { | 1385 | { |
1386 | if (!dd->ipath_kregbase) | ||
1387 | return; | ||
1388 | |||
1369 | if (pa != dd->ipath_tidinvalid) { | 1389 | if (pa != dd->ipath_tidinvalid) { |
1370 | if (unlikely((pa & ~INFINIPATH_RT_ADDR_MASK))) { | 1390 | if (unlikely((pa & ~INFINIPATH_RT_ADDR_MASK))) { |
1371 | dev_info(&dd->pcidev->dev, | 1391 | dev_info(&dd->pcidev->dev, |
@@ -1382,10 +1402,10 @@ static void ipath_ht_put_tid(struct ipath_devdata *dd, | |||
1382 | pa |= lenvalid | INFINIPATH_RT_VALID; | 1402 | pa |= lenvalid | INFINIPATH_RT_VALID; |
1383 | } | 1403 | } |
1384 | } | 1404 | } |
1385 | if (dd->ipath_kregbase) | 1405 | writeq(pa, tidptr); |
1386 | writeq(pa, tidptr); | ||
1387 | } | 1406 | } |
1388 | 1407 | ||
1408 | |||
1389 | /** | 1409 | /** |
1390 | * ipath_ht_clear_tid - clear all TID entries for a port, expected and eager | 1410 | * ipath_ht_clear_tid - clear all TID entries for a port, expected and eager |
1391 | * @dd: the infinipath device | 1411 | * @dd: the infinipath device |
@@ -1515,7 +1535,7 @@ static int ipath_ht_early_init(struct ipath_devdata *dd) | |||
1515 | INFINIPATH_S_ABORT); | 1535 | INFINIPATH_S_ABORT); |
1516 | 1536 | ||
1517 | ipath_get_eeprom_info(dd); | 1537 | ipath_get_eeprom_info(dd); |
1518 | if(dd->ipath_boardrev == 5 && dd->ipath_serial[0] == '1' && | 1538 | if (dd->ipath_boardrev == 5 && dd->ipath_serial[0] == '1' && |
1519 | dd->ipath_serial[1] == '2' && dd->ipath_serial[2] == '8') { | 1539 | dd->ipath_serial[1] == '2' && dd->ipath_serial[2] == '8') { |
1520 | /* | 1540 | /* |
1521 | * Later production QHT7040 has same changes as QHT7140, so | 1541 | * Later production QHT7040 has same changes as QHT7140, so |
@@ -1528,6 +1548,24 @@ static int ipath_ht_early_init(struct ipath_devdata *dd) | |||
1528 | return 0; | 1548 | return 0; |
1529 | } | 1549 | } |
1530 | 1550 | ||
1551 | |||
1552 | static int ipath_ht_txe_recover(struct ipath_devdata *dd) | ||
1553 | { | ||
1554 | int cnt = ++ipath_stats.sps_txeparity; | ||
1555 | if (cnt >= IPATH_MAX_PARITY_ATTEMPTS) { | ||
1556 | if (cnt == IPATH_MAX_PARITY_ATTEMPTS) | ||
1557 | ipath_dev_err(dd, | ||
1558 | "Too many attempts to recover from " | ||
1559 | "TXE parity, giving up\n"); | ||
1560 | return 0; | ||
1561 | } | ||
1562 | dev_info(&dd->pcidev->dev, | ||
1563 | "Recovering from TXE PIO parity error\n"); | ||
1564 | ipath_disarm_senderrbufs(dd, 1); | ||
1565 | return 1; | ||
1566 | } | ||
1567 | |||
1568 | |||
1531 | /** | 1569 | /** |
1532 | * ipath_init_ht_get_base_info - set chip-specific flags for user code | 1570 | * ipath_init_ht_get_base_info - set chip-specific flags for user code |
1533 | * @dd: the infinipath device | 1571 | * @dd: the infinipath device |
diff --git a/drivers/infiniband/hw/ipath/ipath_iba6120.c b/drivers/infiniband/hw/ipath/ipath_iba6120.c index 05918e1e7c36..1b9c30857754 100644 --- a/drivers/infiniband/hw/ipath/ipath_iba6120.c +++ b/drivers/infiniband/hw/ipath/ipath_iba6120.c | |||
@@ -43,6 +43,8 @@ | |||
43 | #include "ipath_kernel.h" | 43 | #include "ipath_kernel.h" |
44 | #include "ipath_registers.h" | 44 | #include "ipath_registers.h" |
45 | 45 | ||
46 | static void ipath_setup_pe_setextled(struct ipath_devdata *, u64, u64); | ||
47 | |||
46 | /* | 48 | /* |
47 | * This file contains all the chip-specific register information and | 49 | * This file contains all the chip-specific register information and |
48 | * access functions for the QLogic InfiniPath PCI-Express chip. | 50 | * access functions for the QLogic InfiniPath PCI-Express chip. |
@@ -207,8 +209,8 @@ static const struct ipath_kregs ipath_pe_kregs = { | |||
207 | .kr_ibpllcfg = IPATH_KREG_OFFSET(IBPLLCfg), | 209 | .kr_ibpllcfg = IPATH_KREG_OFFSET(IBPLLCfg), |
208 | 210 | ||
209 | /* | 211 | /* |
210 | * These should not be used directly via ipath_read_kreg64(), | 212 | * These should not be used directly via ipath_write_kreg64(), |
211 | * use them with ipath_read_kreg64_port() | 213 | * use them with ipath_write_kreg64_port(), |
212 | */ | 214 | */ |
213 | .kr_rcvhdraddr = IPATH_KREG_OFFSET(RcvHdrAddr0), | 215 | .kr_rcvhdraddr = IPATH_KREG_OFFSET(RcvHdrAddr0), |
214 | .kr_rcvhdrtailaddr = IPATH_KREG_OFFSET(RcvHdrTailAddr0), | 216 | .kr_rcvhdrtailaddr = IPATH_KREG_OFFSET(RcvHdrTailAddr0), |
@@ -321,6 +323,12 @@ static const struct ipath_hwerror_msgs ipath_6120_hwerror_msgs[] = { | |||
321 | INFINIPATH_HWE_MSG(SERDESPLLFAILED, "SerDes PLL"), | 323 | INFINIPATH_HWE_MSG(SERDESPLLFAILED, "SerDes PLL"), |
322 | }; | 324 | }; |
323 | 325 | ||
326 | #define TXE_PIO_PARITY ((INFINIPATH_HWE_TXEMEMPARITYERR_PIOBUF | \ | ||
327 | INFINIPATH_HWE_TXEMEMPARITYERR_PIOPBC) \ | ||
328 | << INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT) | ||
329 | |||
330 | static int ipath_pe_txe_recover(struct ipath_devdata *); | ||
331 | |||
324 | /** | 332 | /** |
325 | * ipath_pe_handle_hwerrors - display hardware errors. | 333 | * ipath_pe_handle_hwerrors - display hardware errors. |
326 | * @dd: the infinipath device | 334 | * @dd: the infinipath device |
@@ -394,32 +402,21 @@ static void ipath_pe_handle_hwerrors(struct ipath_devdata *dd, char *msg, | |||
394 | * occur if a processor speculative read is done to the PIO | 402 | * occur if a processor speculative read is done to the PIO |
395 | * buffer while we are sending a packet, for example. | 403 | * buffer while we are sending a packet, for example. |
396 | */ | 404 | */ |
397 | if (hwerrs & ((INFINIPATH_HWE_TXEMEMPARITYERR_PIOBUF | | 405 | if ((hwerrs & TXE_PIO_PARITY) && ipath_pe_txe_recover(dd)) |
398 | INFINIPATH_HWE_TXEMEMPARITYERR_PIOPBC) | 406 | hwerrs &= ~TXE_PIO_PARITY; |
399 | << INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT)) { | ||
400 | ipath_stats.sps_txeparity++; | ||
401 | ipath_dbg("Recovering from TXE parity error (%llu), " | ||
402 | "hwerrstatus=%llx\n", | ||
403 | (unsigned long long) ipath_stats.sps_txeparity, | ||
404 | (unsigned long long) hwerrs); | ||
405 | ipath_disarm_senderrbufs(dd); | ||
406 | hwerrs &= ~((INFINIPATH_HWE_TXEMEMPARITYERR_PIOBUF | | ||
407 | INFINIPATH_HWE_TXEMEMPARITYERR_PIOPBC) | ||
408 | << INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT); | ||
409 | if (!hwerrs) { /* else leave in freeze mode */ | ||
410 | ipath_write_kreg(dd, | ||
411 | dd->ipath_kregs->kr_control, | ||
412 | dd->ipath_control); | ||
413 | return; | ||
414 | } | ||
415 | } | ||
416 | if (hwerrs) { | 407 | if (hwerrs) { |
417 | /* | 408 | /* |
418 | * if any set that we aren't ignoring only make the | 409 | * if any set that we aren't ignoring only make the |
419 | * complaint once, in case it's stuck or recurring, | 410 | * complaint once, in case it's stuck or recurring, |
420 | * and we get here multiple times | 411 | * and we get here multiple times |
412 | * Force link down, so switch knows, and | ||
413 | * LEDs are turned off | ||
421 | */ | 414 | */ |
422 | if (dd->ipath_flags & IPATH_INITTED) { | 415 | if (dd->ipath_flags & IPATH_INITTED) { |
416 | ipath_set_linkstate(dd, IPATH_IB_LINKDOWN); | ||
417 | ipath_setup_pe_setextled(dd, | ||
418 | INFINIPATH_IBCS_L_STATE_DOWN, | ||
419 | INFINIPATH_IBCS_LT_STATE_DISABLED); | ||
423 | ipath_dev_err(dd, "Fatal Hardware Error (freeze " | 420 | ipath_dev_err(dd, "Fatal Hardware Error (freeze " |
424 | "mode), no longer usable, SN %.16s\n", | 421 | "mode), no longer usable, SN %.16s\n", |
425 | dd->ipath_serial); | 422 | dd->ipath_serial); |
@@ -493,7 +490,8 @@ static void ipath_pe_handle_hwerrors(struct ipath_devdata *dd, char *msg, | |||
493 | dd->ipath_hwerrmask); | 490 | dd->ipath_hwerrmask); |
494 | } | 491 | } |
495 | 492 | ||
496 | ipath_dev_err(dd, "%s hardware error\n", msg); | 493 | if (*msg) |
494 | ipath_dev_err(dd, "%s hardware error\n", msg); | ||
497 | if (isfatal && !ipath_diag_inuse && dd->ipath_freezemsg) { | 495 | if (isfatal && !ipath_diag_inuse && dd->ipath_freezemsg) { |
498 | /* | 496 | /* |
499 | * for /sys status file ; if no trailing } is copied, we'll | 497 | * for /sys status file ; if no trailing } is copied, we'll |
@@ -581,6 +579,8 @@ static void ipath_pe_init_hwerrors(struct ipath_devdata *dd) | |||
581 | 579 | ||
582 | if (!(extsval & INFINIPATH_EXTS_MEMBIST_ENDTEST)) | 580 | if (!(extsval & INFINIPATH_EXTS_MEMBIST_ENDTEST)) |
583 | ipath_dev_err(dd, "MemBIST did not complete!\n"); | 581 | ipath_dev_err(dd, "MemBIST did not complete!\n"); |
582 | if (extsval & INFINIPATH_EXTS_MEMBIST_FOUND) | ||
583 | ipath_dbg("MemBIST corrected\n"); | ||
584 | 584 | ||
585 | val = ~0ULL; /* barring bugs, all hwerrors become interrupts, */ | 585 | val = ~0ULL; /* barring bugs, all hwerrors become interrupts, */ |
586 | 586 | ||
@@ -1330,6 +1330,35 @@ static void ipath_pe_free_irq(struct ipath_devdata *dd) | |||
1330 | dd->ipath_irq = 0; | 1330 | dd->ipath_irq = 0; |
1331 | } | 1331 | } |
1332 | 1332 | ||
1333 | /* | ||
1334 | * On platforms using this chip, and not having ordered WC stores, we | ||
1335 | * can get TXE parity errors due to speculative reads to the PIO buffers, | ||
1336 | * and this, due to a chip bug can result in (many) false parity error | ||
1337 | * reports. So it's a debug print on those, and an info print on systems | ||
1338 | * where the speculative reads don't occur. | ||
1339 | * Because we can get lots of false errors, we have no upper limit | ||
1340 | * on recovery attempts on those platforms. | ||
1341 | */ | ||
1342 | static int ipath_pe_txe_recover(struct ipath_devdata *dd) | ||
1343 | { | ||
1344 | if (ipath_unordered_wc()) | ||
1345 | ipath_dbg("Recovering from TXE PIO parity error\n"); | ||
1346 | else { | ||
1347 | int cnt = ++ipath_stats.sps_txeparity; | ||
1348 | if (cnt >= IPATH_MAX_PARITY_ATTEMPTS) { | ||
1349 | if (cnt == IPATH_MAX_PARITY_ATTEMPTS) | ||
1350 | ipath_dev_err(dd, | ||
1351 | "Too many attempts to recover from " | ||
1352 | "TXE parity, giving up\n"); | ||
1353 | return 0; | ||
1354 | } | ||
1355 | dev_info(&dd->pcidev->dev, | ||
1356 | "Recovering from TXE PIO parity error\n"); | ||
1357 | } | ||
1358 | ipath_disarm_senderrbufs(dd, 1); | ||
1359 | return 1; | ||
1360 | } | ||
1361 | |||
1333 | /** | 1362 | /** |
1334 | * ipath_init_iba6120_funcs - set up the chip-specific function pointers | 1363 | * ipath_init_iba6120_funcs - set up the chip-specific function pointers |
1335 | * @dd: the infinipath device | 1364 | * @dd: the infinipath device |
diff --git a/drivers/infiniband/hw/ipath/ipath_init_chip.c b/drivers/infiniband/hw/ipath/ipath_init_chip.c index d4f6b5239ef8..7045ba689494 100644 --- a/drivers/infiniband/hw/ipath/ipath_init_chip.c +++ b/drivers/infiniband/hw/ipath/ipath_init_chip.c | |||
@@ -216,6 +216,20 @@ static int bringup_link(struct ipath_devdata *dd) | |||
216 | return ret; | 216 | return ret; |
217 | } | 217 | } |
218 | 218 | ||
219 | static struct ipath_portdata *create_portdata0(struct ipath_devdata *dd) | ||
220 | { | ||
221 | struct ipath_portdata *pd = NULL; | ||
222 | |||
223 | pd = kzalloc(sizeof(*pd), GFP_KERNEL); | ||
224 | if (pd) { | ||
225 | pd->port_dd = dd; | ||
226 | pd->port_cnt = 1; | ||
227 | /* The port 0 pkey table is used by the layer interface. */ | ||
228 | pd->port_pkeys[0] = IPATH_DEFAULT_P_KEY; | ||
229 | } | ||
230 | return pd; | ||
231 | } | ||
232 | |||
219 | static int init_chip_first(struct ipath_devdata *dd, | 233 | static int init_chip_first(struct ipath_devdata *dd, |
220 | struct ipath_portdata **pdp) | 234 | struct ipath_portdata **pdp) |
221 | { | 235 | { |
@@ -271,20 +285,16 @@ static int init_chip_first(struct ipath_devdata *dd, | |||
271 | goto done; | 285 | goto done; |
272 | } | 286 | } |
273 | 287 | ||
274 | dd->ipath_pd[0] = kzalloc(sizeof(*pd), GFP_KERNEL); | 288 | pd = create_portdata0(dd); |
275 | 289 | ||
276 | if (!dd->ipath_pd[0]) { | 290 | if (!pd) { |
277 | ipath_dev_err(dd, "Unable to allocate portdata for port " | 291 | ipath_dev_err(dd, "Unable to allocate portdata for port " |
278 | "0, failing\n"); | 292 | "0, failing\n"); |
279 | ret = -ENOMEM; | 293 | ret = -ENOMEM; |
280 | goto done; | 294 | goto done; |
281 | } | 295 | } |
282 | pd = dd->ipath_pd[0]; | 296 | dd->ipath_pd[0] = pd; |
283 | pd->port_dd = dd; | 297 | |
284 | pd->port_port = 0; | ||
285 | pd->port_cnt = 1; | ||
286 | /* The port 0 pkey table is used by the layer interface. */ | ||
287 | pd->port_pkeys[0] = IPATH_DEFAULT_P_KEY; | ||
288 | dd->ipath_rcvtidcnt = | 298 | dd->ipath_rcvtidcnt = |
289 | ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvtidcnt); | 299 | ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvtidcnt); |
290 | dd->ipath_rcvtidbase = | 300 | dd->ipath_rcvtidbase = |
@@ -590,6 +600,10 @@ static int init_housekeeping(struct ipath_devdata *dd, | |||
590 | goto done; | 600 | goto done; |
591 | } | 601 | } |
592 | 602 | ||
603 | |||
604 | /* clear diagctrl register, in case diags were running and crashed */ | ||
605 | ipath_write_kreg (dd, dd->ipath_kregs->kr_hwdiagctrl, 0); | ||
606 | |||
593 | /* clear the initial reset flag, in case first driver load */ | 607 | /* clear the initial reset flag, in case first driver load */ |
594 | ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear, | 608 | ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear, |
595 | INFINIPATH_E_RESET); | 609 | INFINIPATH_E_RESET); |
@@ -668,6 +682,7 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit) | |||
668 | { | 682 | { |
669 | int ret = 0, i; | 683 | int ret = 0, i; |
670 | u32 val32, kpiobufs; | 684 | u32 val32, kpiobufs; |
685 | u32 piobufs, uports; | ||
671 | u64 val; | 686 | u64 val; |
672 | struct ipath_portdata *pd = NULL; /* keep gcc4 happy */ | 687 | struct ipath_portdata *pd = NULL; /* keep gcc4 happy */ |
673 | gfp_t gfp_flags = GFP_USER | __GFP_COMP; | 688 | gfp_t gfp_flags = GFP_USER | __GFP_COMP; |
@@ -702,16 +717,17 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit) | |||
702 | * the in memory DMA'ed copies of the registers. This has to | 717 | * the in memory DMA'ed copies of the registers. This has to |
703 | * be done early, before we calculate lastport, etc. | 718 | * be done early, before we calculate lastport, etc. |
704 | */ | 719 | */ |
705 | val = dd->ipath_piobcnt2k + dd->ipath_piobcnt4k; | 720 | piobufs = dd->ipath_piobcnt2k + dd->ipath_piobcnt4k; |
706 | /* | 721 | /* |
707 | * calc number of pioavail registers, and save it; we have 2 | 722 | * calc number of pioavail registers, and save it; we have 2 |
708 | * bits per buffer. | 723 | * bits per buffer. |
709 | */ | 724 | */ |
710 | dd->ipath_pioavregs = ALIGN(val, sizeof(u64) * BITS_PER_BYTE / 2) | 725 | dd->ipath_pioavregs = ALIGN(piobufs, sizeof(u64) * BITS_PER_BYTE / 2) |
711 | / (sizeof(u64) * BITS_PER_BYTE / 2); | 726 | / (sizeof(u64) * BITS_PER_BYTE / 2); |
727 | uports = dd->ipath_cfgports ? dd->ipath_cfgports - 1 : 0; | ||
712 | if (ipath_kpiobufs == 0) { | 728 | if (ipath_kpiobufs == 0) { |
713 | /* not set by user (this is default) */ | 729 | /* not set by user (this is default) */ |
714 | if ((dd->ipath_piobcnt2k + dd->ipath_piobcnt4k) > 128) | 730 | if (piobufs >= (uports * IPATH_MIN_USER_PORT_BUFCNT) + 32) |
715 | kpiobufs = 32; | 731 | kpiobufs = 32; |
716 | else | 732 | else |
717 | kpiobufs = 16; | 733 | kpiobufs = 16; |
@@ -719,31 +735,25 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit) | |||
719 | else | 735 | else |
720 | kpiobufs = ipath_kpiobufs; | 736 | kpiobufs = ipath_kpiobufs; |
721 | 737 | ||
722 | if (kpiobufs > | 738 | if (kpiobufs + (uports * IPATH_MIN_USER_PORT_BUFCNT) > piobufs) { |
723 | (dd->ipath_piobcnt2k + dd->ipath_piobcnt4k - | 739 | i = (int) piobufs - |
724 | (dd->ipath_cfgports * IPATH_MIN_USER_PORT_BUFCNT))) { | 740 | (int) (uports * IPATH_MIN_USER_PORT_BUFCNT); |
725 | i = dd->ipath_piobcnt2k + dd->ipath_piobcnt4k - | ||
726 | (dd->ipath_cfgports * IPATH_MIN_USER_PORT_BUFCNT); | ||
727 | if (i < 0) | 741 | if (i < 0) |
728 | i = 0; | 742 | i = 0; |
729 | dev_info(&dd->pcidev->dev, "Allocating %d PIO bufs for " | 743 | dev_info(&dd->pcidev->dev, "Allocating %d PIO bufs of " |
730 | "kernel leaves too few for %d user ports " | 744 | "%d for kernel leaves too few for %d user ports " |
731 | "(%d each); using %u\n", kpiobufs, | 745 | "(%d each); using %u\n", kpiobufs, |
732 | dd->ipath_cfgports - 1, | 746 | piobufs, uports, IPATH_MIN_USER_PORT_BUFCNT, i); |
733 | IPATH_MIN_USER_PORT_BUFCNT, i); | ||
734 | /* | 747 | /* |
735 | * shouldn't change ipath_kpiobufs, because could be | 748 | * shouldn't change ipath_kpiobufs, because could be |
736 | * different for different devices... | 749 | * different for different devices... |
737 | */ | 750 | */ |
738 | kpiobufs = i; | 751 | kpiobufs = i; |
739 | } | 752 | } |
740 | dd->ipath_lastport_piobuf = | 753 | dd->ipath_lastport_piobuf = piobufs - kpiobufs; |
741 | dd->ipath_piobcnt2k + dd->ipath_piobcnt4k - kpiobufs; | 754 | dd->ipath_pbufsport = |
742 | dd->ipath_pbufsport = dd->ipath_cfgports > 1 | 755 | uports ? dd->ipath_lastport_piobuf / uports : 0; |
743 | ? dd->ipath_lastport_piobuf / (dd->ipath_cfgports - 1) | 756 | val32 = dd->ipath_lastport_piobuf - (dd->ipath_pbufsport * uports); |
744 | : 0; | ||
745 | val32 = dd->ipath_lastport_piobuf - | ||
746 | (dd->ipath_pbufsport * (dd->ipath_cfgports - 1)); | ||
747 | if (val32 > 0) { | 757 | if (val32 > 0) { |
748 | ipath_dbg("allocating %u pbufs/port leaves %u unused, " | 758 | ipath_dbg("allocating %u pbufs/port leaves %u unused, " |
749 | "add to kernel\n", dd->ipath_pbufsport, val32); | 759 | "add to kernel\n", dd->ipath_pbufsport, val32); |
@@ -754,8 +764,7 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit) | |||
754 | dd->ipath_lastpioindex = dd->ipath_lastport_piobuf; | 764 | dd->ipath_lastpioindex = dd->ipath_lastport_piobuf; |
755 | ipath_cdbg(VERBOSE, "%d PIO bufs for kernel out of %d total %u " | 765 | ipath_cdbg(VERBOSE, "%d PIO bufs for kernel out of %d total %u " |
756 | "each for %u user ports\n", kpiobufs, | 766 | "each for %u user ports\n", kpiobufs, |
757 | dd->ipath_piobcnt2k + dd->ipath_piobcnt4k, | 767 | piobufs, dd->ipath_pbufsport, uports); |
758 | dd->ipath_pbufsport, dd->ipath_cfgports - 1); | ||
759 | 768 | ||
760 | dd->ipath_f_early_init(dd); | 769 | dd->ipath_f_early_init(dd); |
761 | 770 | ||
@@ -839,11 +848,24 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit) | |||
839 | * Set up the port 0 (kernel) rcvhdr q and egr TIDs. If doing | 848 | * Set up the port 0 (kernel) rcvhdr q and egr TIDs. If doing |
840 | * re-init, the simplest way to handle this is to free | 849 | * re-init, the simplest way to handle this is to free |
841 | * existing, and re-allocate. | 850 | * existing, and re-allocate. |
851 | * Need to re-create rest of port 0 portdata as well. | ||
842 | */ | 852 | */ |
843 | if (reinit) { | 853 | if (reinit) { |
844 | struct ipath_portdata *pd = dd->ipath_pd[0]; | 854 | /* Alloc and init new ipath_portdata for port0, |
845 | dd->ipath_pd[0] = NULL; | 855 | * Then free old pd. Could lead to fragmentation, but also |
846 | ipath_free_pddata(dd, pd); | 856 | * makes later support for hot-swap easier. |
857 | */ | ||
858 | struct ipath_portdata *npd; | ||
859 | npd = create_portdata0(dd); | ||
860 | if (npd) { | ||
861 | ipath_free_pddata(dd, pd); | ||
862 | dd->ipath_pd[0] = pd = npd; | ||
863 | } else { | ||
864 | ipath_dev_err(dd, "Unable to allocate portdata for" | ||
865 | " port 0, failing\n"); | ||
866 | ret = -ENOMEM; | ||
867 | goto done; | ||
868 | } | ||
847 | } | 869 | } |
848 | dd->ipath_f_tidtemplate(dd); | 870 | dd->ipath_f_tidtemplate(dd); |
849 | ret = ipath_create_rcvhdrq(dd, pd); | 871 | ret = ipath_create_rcvhdrq(dd, pd); |
diff --git a/drivers/infiniband/hw/ipath/ipath_intr.c b/drivers/infiniband/hw/ipath/ipath_intr.c index 72b9e279d19d..45d033169c6e 100644 --- a/drivers/infiniband/hw/ipath/ipath_intr.c +++ b/drivers/infiniband/hw/ipath/ipath_intr.c | |||
@@ -38,10 +38,39 @@ | |||
38 | #include "ipath_common.h" | 38 | #include "ipath_common.h" |
39 | 39 | ||
40 | /* | 40 | /* |
41 | * clear (write) a pio buffer, to clear a parity error. This routine | ||
42 | * should only be called when in freeze mode, and the buffer should be | ||
43 | * canceled afterwards. | ||
44 | */ | ||
45 | static void ipath_clrpiobuf(struct ipath_devdata *dd, u32 pnum) | ||
46 | { | ||
47 | u32 __iomem *pbuf; | ||
48 | u32 dwcnt; /* dword count to write */ | ||
49 | if (pnum < dd->ipath_piobcnt2k) { | ||
50 | pbuf = (u32 __iomem *) (dd->ipath_pio2kbase + pnum * | ||
51 | dd->ipath_palign); | ||
52 | dwcnt = dd->ipath_piosize2k >> 2; | ||
53 | } | ||
54 | else { | ||
55 | pbuf = (u32 __iomem *) (dd->ipath_pio4kbase + | ||
56 | (pnum - dd->ipath_piobcnt2k) * dd->ipath_4kalign); | ||
57 | dwcnt = dd->ipath_piosize4k >> 2; | ||
58 | } | ||
59 | dev_info(&dd->pcidev->dev, | ||
60 | "Rewrite PIO buffer %u, to recover from parity error\n", | ||
61 | pnum); | ||
62 | *pbuf = dwcnt+1; /* no flush required, since already in freeze */ | ||
63 | while(--dwcnt) | ||
64 | *pbuf++ = 0; | ||
65 | } | ||
66 | |||
67 | /* | ||
41 | * Called when we might have an error that is specific to a particular | 68 | * Called when we might have an error that is specific to a particular |
42 | * PIO buffer, and may need to cancel that buffer, so it can be re-used. | 69 | * PIO buffer, and may need to cancel that buffer, so it can be re-used. |
70 | * If rewrite is true, and bits are set in the sendbufferror registers, | ||
71 | * we'll write to the buffer, for error recovery on parity errors. | ||
43 | */ | 72 | */ |
44 | void ipath_disarm_senderrbufs(struct ipath_devdata *dd) | 73 | void ipath_disarm_senderrbufs(struct ipath_devdata *dd, int rewrite) |
45 | { | 74 | { |
46 | u32 piobcnt; | 75 | u32 piobcnt; |
47 | unsigned long sbuf[4]; | 76 | unsigned long sbuf[4]; |
@@ -74,8 +103,11 @@ void ipath_disarm_senderrbufs(struct ipath_devdata *dd) | |||
74 | } | 103 | } |
75 | 104 | ||
76 | for (i = 0; i < piobcnt; i++) | 105 | for (i = 0; i < piobcnt; i++) |
77 | if (test_bit(i, sbuf)) | 106 | if (test_bit(i, sbuf)) { |
107 | if (rewrite) | ||
108 | ipath_clrpiobuf(dd, i); | ||
78 | ipath_disarm_piobufs(dd, i, 1); | 109 | ipath_disarm_piobufs(dd, i, 1); |
110 | } | ||
79 | dd->ipath_lastcancel = jiffies+3; /* no armlaunch for a bit */ | 111 | dd->ipath_lastcancel = jiffies+3; /* no armlaunch for a bit */ |
80 | } | 112 | } |
81 | } | 113 | } |
@@ -114,7 +146,7 @@ static u64 handle_e_sum_errs(struct ipath_devdata *dd, ipath_err_t errs) | |||
114 | { | 146 | { |
115 | u64 ignore_this_time = 0; | 147 | u64 ignore_this_time = 0; |
116 | 148 | ||
117 | ipath_disarm_senderrbufs(dd); | 149 | ipath_disarm_senderrbufs(dd, 0); |
118 | if ((errs & E_SUM_LINK_PKTERRS) && | 150 | if ((errs & E_SUM_LINK_PKTERRS) && |
119 | !(dd->ipath_flags & IPATH_LINKACTIVE)) { | 151 | !(dd->ipath_flags & IPATH_LINKACTIVE)) { |
120 | /* | 152 | /* |
@@ -403,10 +435,13 @@ static void handle_supp_msgs(struct ipath_devdata *dd, | |||
403 | * happens so often we never want to count it. | 435 | * happens so often we never want to count it. |
404 | */ | 436 | */ |
405 | if (dd->ipath_lasterror & ~INFINIPATH_E_IBSTATUSCHANGED) { | 437 | if (dd->ipath_lasterror & ~INFINIPATH_E_IBSTATUSCHANGED) { |
406 | ipath_decode_err(msg, sizeof msg, dd->ipath_lasterror & | 438 | int iserr; |
407 | ~INFINIPATH_E_IBSTATUSCHANGED); | 439 | iserr = ipath_decode_err(msg, sizeof msg, |
440 | dd->ipath_lasterror & | ||
441 | ~INFINIPATH_E_IBSTATUSCHANGED); | ||
408 | if (dd->ipath_lasterror & | 442 | if (dd->ipath_lasterror & |
409 | ~(INFINIPATH_E_RRCVEGRFULL | INFINIPATH_E_RRCVHDRFULL)) | 443 | ~(INFINIPATH_E_RRCVEGRFULL | |
444 | INFINIPATH_E_RRCVHDRFULL | INFINIPATH_E_PKTERRS)) | ||
410 | ipath_dev_err(dd, "Suppressed %u messages for " | 445 | ipath_dev_err(dd, "Suppressed %u messages for " |
411 | "fast-repeating errors (%s) (%llx)\n", | 446 | "fast-repeating errors (%s) (%llx)\n", |
412 | supp_msgs, msg, | 447 | supp_msgs, msg, |
@@ -420,8 +455,13 @@ static void handle_supp_msgs(struct ipath_devdata *dd, | |||
420 | * them. So only complain about these at debug | 455 | * them. So only complain about these at debug |
421 | * level. | 456 | * level. |
422 | */ | 457 | */ |
423 | ipath_dbg("Suppressed %u messages for %s\n", | 458 | if (iserr) |
424 | supp_msgs, msg); | 459 | ipath_dbg("Suppressed %u messages for %s\n", |
460 | supp_msgs, msg); | ||
461 | else | ||
462 | ipath_cdbg(ERRPKT, | ||
463 | "Suppressed %u messages for %s\n", | ||
464 | supp_msgs, msg); | ||
425 | } | 465 | } |
426 | } | 466 | } |
427 | } | 467 | } |
@@ -462,7 +502,7 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs) | |||
462 | { | 502 | { |
463 | char msg[512]; | 503 | char msg[512]; |
464 | u64 ignore_this_time = 0; | 504 | u64 ignore_this_time = 0; |
465 | int i; | 505 | int i, iserr = 0; |
466 | int chkerrpkts = 0, noprint = 0; | 506 | int chkerrpkts = 0, noprint = 0; |
467 | unsigned supp_msgs; | 507 | unsigned supp_msgs; |
468 | 508 | ||
@@ -502,6 +542,7 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs) | |||
502 | } | 542 | } |
503 | 543 | ||
504 | if (supp_msgs == 250000) { | 544 | if (supp_msgs == 250000) { |
545 | int s_iserr; | ||
505 | /* | 546 | /* |
506 | * It's not entirely reasonable assuming that the errors set | 547 | * It's not entirely reasonable assuming that the errors set |
507 | * in the last clear period are all responsible for the | 548 | * in the last clear period are all responsible for the |
@@ -511,17 +552,17 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs) | |||
511 | dd->ipath_maskederrs |= dd->ipath_lasterror | errs; | 552 | dd->ipath_maskederrs |= dd->ipath_lasterror | errs; |
512 | ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask, | 553 | ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask, |
513 | ~dd->ipath_maskederrs); | 554 | ~dd->ipath_maskederrs); |
514 | ipath_decode_err(msg, sizeof msg, | 555 | s_iserr = ipath_decode_err(msg, sizeof msg, |
515 | (dd->ipath_maskederrs & ~dd-> | 556 | (dd->ipath_maskederrs & ~dd-> |
516 | ipath_ignorederrs)); | 557 | ipath_ignorederrs)); |
517 | 558 | ||
518 | if ((dd->ipath_maskederrs & ~dd->ipath_ignorederrs) & | 559 | if ((dd->ipath_maskederrs & ~dd->ipath_ignorederrs) & |
519 | ~(INFINIPATH_E_RRCVEGRFULL | INFINIPATH_E_RRCVHDRFULL)) | 560 | ~(INFINIPATH_E_RRCVEGRFULL | |
520 | ipath_dev_err(dd, "Disabling error(s) %llx because " | 561 | INFINIPATH_E_RRCVHDRFULL | INFINIPATH_E_PKTERRS)) |
521 | "occurring too frequently (%s)\n", | 562 | ipath_dev_err(dd, "Temporarily disabling " |
522 | (unsigned long long) | 563 | "error(s) %llx reporting; too frequent (%s)\n", |
523 | (dd->ipath_maskederrs & | 564 | (unsigned long long) (dd->ipath_maskederrs & |
524 | ~dd->ipath_ignorederrs), msg); | 565 | ~dd->ipath_ignorederrs), msg); |
525 | else { | 566 | else { |
526 | /* | 567 | /* |
527 | * rcvegrfull and rcvhdrqfull are "normal", | 568 | * rcvegrfull and rcvhdrqfull are "normal", |
@@ -530,8 +571,15 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs) | |||
530 | * processing them. So only complain about | 571 | * processing them. So only complain about |
531 | * these at debug level. | 572 | * these at debug level. |
532 | */ | 573 | */ |
533 | ipath_dbg("Disabling frequent queue full errors " | 574 | if (s_iserr) |
534 | "(%s)\n", msg); | 575 | ipath_dbg("Temporarily disabling reporting " |
576 | "too frequent queue full errors (%s)\n", | ||
577 | msg); | ||
578 | else | ||
579 | ipath_cdbg(ERRPKT, | ||
580 | "Temporarily disabling reporting too" | ||
581 | " frequent packet errors (%s)\n", | ||
582 | msg); | ||
535 | } | 583 | } |
536 | 584 | ||
537 | /* | 585 | /* |
@@ -589,6 +637,8 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs) | |||
589 | ipath_stats.sps_crcerrs++; | 637 | ipath_stats.sps_crcerrs++; |
590 | chkerrpkts = 1; | 638 | chkerrpkts = 1; |
591 | } | 639 | } |
640 | iserr = errs & ~(E_SUM_PKTERRS | INFINIPATH_E_PKTERRS); | ||
641 | |||
592 | 642 | ||
593 | /* | 643 | /* |
594 | * We don't want to print these two as they happen, or we can make | 644 | * We don't want to print these two as they happen, or we can make |
@@ -677,8 +727,13 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs) | |||
677 | *dd->ipath_statusp &= ~IPATH_STATUS_IB_CONF; | 727 | *dd->ipath_statusp &= ~IPATH_STATUS_IB_CONF; |
678 | } | 728 | } |
679 | 729 | ||
680 | if (!noprint && *msg) | 730 | if (!noprint && *msg) { |
681 | ipath_dev_err(dd, "%s error\n", msg); | 731 | if (iserr) |
732 | ipath_dev_err(dd, "%s error\n", msg); | ||
733 | else | ||
734 | dev_info(&dd->pcidev->dev, "%s packet problems\n", | ||
735 | msg); | ||
736 | } | ||
682 | if (dd->ipath_state_wanted & dd->ipath_flags) { | 737 | if (dd->ipath_state_wanted & dd->ipath_flags) { |
683 | ipath_cdbg(VERBOSE, "driver wanted state %x, iflags now %x, " | 738 | ipath_cdbg(VERBOSE, "driver wanted state %x, iflags now %x, " |
684 | "waking\n", dd->ipath_state_wanted, | 739 | "waking\n", dd->ipath_state_wanted, |
@@ -819,11 +874,10 @@ static void handle_urcv(struct ipath_devdata *dd, u32 istat) | |||
819 | struct ipath_portdata *pd = dd->ipath_pd[i]; | 874 | struct ipath_portdata *pd = dd->ipath_pd[i]; |
820 | if (portr & (1 << i) && pd && pd->port_cnt && | 875 | if (portr & (1 << i) && pd && pd->port_cnt && |
821 | test_bit(IPATH_PORT_WAITING_RCV, &pd->port_flag)) { | 876 | test_bit(IPATH_PORT_WAITING_RCV, &pd->port_flag)) { |
822 | int rcbit; | ||
823 | clear_bit(IPATH_PORT_WAITING_RCV, | 877 | clear_bit(IPATH_PORT_WAITING_RCV, |
824 | &pd->port_flag); | 878 | &pd->port_flag); |
825 | rcbit = i + INFINIPATH_R_INTRAVAIL_SHIFT; | 879 | clear_bit(i + INFINIPATH_R_INTRAVAIL_SHIFT, |
826 | clear_bit(1UL << rcbit, &dd->ipath_rcvctrl); | 880 | &dd->ipath_rcvctrl); |
827 | wake_up_interruptible(&pd->port_wait); | 881 | wake_up_interruptible(&pd->port_wait); |
828 | rcvdint = 1; | 882 | rcvdint = 1; |
829 | } | 883 | } |
diff --git a/drivers/infiniband/hw/ipath/ipath_kernel.h b/drivers/infiniband/hw/ipath/ipath_kernel.h index 6d8d05fb5999..e900c2593f44 100644 --- a/drivers/infiniband/hw/ipath/ipath_kernel.h +++ b/drivers/infiniband/hw/ipath/ipath_kernel.h | |||
@@ -590,7 +590,6 @@ int ipath_enable_wc(struct ipath_devdata *dd); | |||
590 | void ipath_disable_wc(struct ipath_devdata *dd); | 590 | void ipath_disable_wc(struct ipath_devdata *dd); |
591 | int ipath_count_units(int *npresentp, int *nupp, u32 *maxportsp); | 591 | int ipath_count_units(int *npresentp, int *nupp, u32 *maxportsp); |
592 | void ipath_shutdown_device(struct ipath_devdata *); | 592 | void ipath_shutdown_device(struct ipath_devdata *); |
593 | void ipath_disarm_senderrbufs(struct ipath_devdata *); | ||
594 | 593 | ||
595 | struct file_operations; | 594 | struct file_operations; |
596 | int ipath_cdev_init(int minor, char *name, const struct file_operations *fops, | 595 | int ipath_cdev_init(int minor, char *name, const struct file_operations *fops, |
@@ -611,7 +610,7 @@ struct sk_buff *ipath_alloc_skb(struct ipath_devdata *dd, gfp_t); | |||
611 | extern int ipath_diag_inuse; | 610 | extern int ipath_diag_inuse; |
612 | 611 | ||
613 | irqreturn_t ipath_intr(int irq, void *devid); | 612 | irqreturn_t ipath_intr(int irq, void *devid); |
614 | void ipath_decode_err(char *buf, size_t blen, ipath_err_t err); | 613 | int ipath_decode_err(char *buf, size_t blen, ipath_err_t err); |
615 | #if __IPATH_INFO || __IPATH_DBG | 614 | #if __IPATH_INFO || __IPATH_DBG |
616 | extern const char *ipath_ibcstatus_str[]; | 615 | extern const char *ipath_ibcstatus_str[]; |
617 | #endif | 616 | #endif |
@@ -701,6 +700,8 @@ int ipath_set_rx_pol_inv(struct ipath_devdata *dd, u8 new_pol_inv); | |||
701 | #define IPATH_PORT_WAITING_RCV 2 | 700 | #define IPATH_PORT_WAITING_RCV 2 |
702 | /* waiting for a PIO buffer to be available */ | 701 | /* waiting for a PIO buffer to be available */ |
703 | #define IPATH_PORT_WAITING_PIO 3 | 702 | #define IPATH_PORT_WAITING_PIO 3 |
703 | /* master has not finished initializing */ | ||
704 | #define IPATH_PORT_MASTER_UNINIT 4 | ||
704 | 705 | ||
705 | /* free up any allocated data at closes */ | 706 | /* free up any allocated data at closes */ |
706 | void ipath_free_data(struct ipath_portdata *dd); | 707 | void ipath_free_data(struct ipath_portdata *dd); |
@@ -711,6 +712,7 @@ void ipath_init_iba6120_funcs(struct ipath_devdata *); | |||
711 | void ipath_init_iba6110_funcs(struct ipath_devdata *); | 712 | void ipath_init_iba6110_funcs(struct ipath_devdata *); |
712 | void ipath_get_eeprom_info(struct ipath_devdata *); | 713 | void ipath_get_eeprom_info(struct ipath_devdata *); |
713 | u64 ipath_snap_cntr(struct ipath_devdata *, ipath_creg); | 714 | u64 ipath_snap_cntr(struct ipath_devdata *, ipath_creg); |
715 | void ipath_disarm_senderrbufs(struct ipath_devdata *, int); | ||
714 | 716 | ||
715 | /* | 717 | /* |
716 | * number of words used for protocol header if not set by ipath_userinit(); | 718 | * number of words used for protocol header if not set by ipath_userinit(); |
@@ -754,8 +756,6 @@ int ipath_eeprom_write(struct ipath_devdata *, u8, const void *, int); | |||
754 | /* these are used for the registers that vary with port */ | 756 | /* these are used for the registers that vary with port */ |
755 | void ipath_write_kreg_port(const struct ipath_devdata *, ipath_kreg, | 757 | void ipath_write_kreg_port(const struct ipath_devdata *, ipath_kreg, |
756 | unsigned, u64); | 758 | unsigned, u64); |
757 | u64 ipath_read_kreg64_port(const struct ipath_devdata *, ipath_kreg, | ||
758 | unsigned); | ||
759 | 759 | ||
760 | /* | 760 | /* |
761 | * We could have a single register get/put routine, that takes a group type, | 761 | * We could have a single register get/put routine, that takes a group type, |
@@ -897,6 +897,8 @@ dma_addr_t ipath_map_single(struct pci_dev *, void *, size_t, int); | |||
897 | 897 | ||
898 | extern unsigned ipath_debug; /* debugging bit mask */ | 898 | extern unsigned ipath_debug; /* debugging bit mask */ |
899 | 899 | ||
900 | #define IPATH_MAX_PARITY_ATTEMPTS 10000 /* max times to try recovery */ | ||
901 | |||
900 | const char *ipath_get_unit_name(int unit); | 902 | const char *ipath_get_unit_name(int unit); |
901 | 903 | ||
902 | extern struct mutex ipath_mutex; | 904 | extern struct mutex ipath_mutex; |
diff --git a/drivers/infiniband/hw/ipath/ipath_keys.c b/drivers/infiniband/hw/ipath/ipath_keys.c index 851763d7d2db..dd487c100f5b 100644 --- a/drivers/infiniband/hw/ipath/ipath_keys.c +++ b/drivers/infiniband/hw/ipath/ipath_keys.c | |||
@@ -61,7 +61,7 @@ int ipath_alloc_lkey(struct ipath_lkey_table *rkt, struct ipath_mregion *mr) | |||
61 | r = (r + 1) & (rkt->max - 1); | 61 | r = (r + 1) & (rkt->max - 1); |
62 | if (r == n) { | 62 | if (r == n) { |
63 | spin_unlock_irqrestore(&rkt->lock, flags); | 63 | spin_unlock_irqrestore(&rkt->lock, flags); |
64 | ipath_dbg(KERN_INFO "LKEY table full\n"); | 64 | ipath_dbg("LKEY table full\n"); |
65 | ret = 0; | 65 | ret = 0; |
66 | goto bail; | 66 | goto bail; |
67 | } | 67 | } |
@@ -133,6 +133,12 @@ int ipath_lkey_ok(struct ipath_qp *qp, struct ipath_sge *isge, | |||
133 | * being reversible by calling bus_to_virt(). | 133 | * being reversible by calling bus_to_virt(). |
134 | */ | 134 | */ |
135 | if (sge->lkey == 0) { | 135 | if (sge->lkey == 0) { |
136 | struct ipath_pd *pd = to_ipd(qp->ibqp.pd); | ||
137 | |||
138 | if (pd->user) { | ||
139 | ret = 0; | ||
140 | goto bail; | ||
141 | } | ||
136 | isge->mr = NULL; | 142 | isge->mr = NULL; |
137 | isge->vaddr = (void *) sge->addr; | 143 | isge->vaddr = (void *) sge->addr; |
138 | isge->length = sge->length; | 144 | isge->length = sge->length; |
@@ -206,6 +212,12 @@ int ipath_rkey_ok(struct ipath_qp *qp, struct ipath_sge_state *ss, | |||
206 | * (see ipath_get_dma_mr and ipath_dma.c). | 212 | * (see ipath_get_dma_mr and ipath_dma.c). |
207 | */ | 213 | */ |
208 | if (rkey == 0) { | 214 | if (rkey == 0) { |
215 | struct ipath_pd *pd = to_ipd(qp->ibqp.pd); | ||
216 | |||
217 | if (pd->user) { | ||
218 | ret = 0; | ||
219 | goto bail; | ||
220 | } | ||
209 | sge->mr = NULL; | 221 | sge->mr = NULL; |
210 | sge->vaddr = (void *) vaddr; | 222 | sge->vaddr = (void *) vaddr; |
211 | sge->length = len; | 223 | sge->length = len; |
diff --git a/drivers/infiniband/hw/ipath/ipath_mr.c b/drivers/infiniband/hw/ipath/ipath_mr.c index 8cc8598d6c69..31e70732e369 100644 --- a/drivers/infiniband/hw/ipath/ipath_mr.c +++ b/drivers/infiniband/hw/ipath/ipath_mr.c | |||
@@ -210,9 +210,15 @@ struct ib_mr *ipath_reg_user_mr(struct ib_pd *pd, struct ib_umem *region, | |||
210 | m = 0; | 210 | m = 0; |
211 | n = 0; | 211 | n = 0; |
212 | list_for_each_entry(chunk, ®ion->chunk_list, list) { | 212 | list_for_each_entry(chunk, ®ion->chunk_list, list) { |
213 | for (i = 0; i < chunk->nmap; i++) { | 213 | for (i = 0; i < chunk->nents; i++) { |
214 | mr->mr.map[m]->segs[n].vaddr = | 214 | void *vaddr; |
215 | page_address(chunk->page_list[i].page); | 215 | |
216 | vaddr = page_address(chunk->page_list[i].page); | ||
217 | if (!vaddr) { | ||
218 | ret = ERR_PTR(-EINVAL); | ||
219 | goto bail; | ||
220 | } | ||
221 | mr->mr.map[m]->segs[n].vaddr = vaddr; | ||
216 | mr->mr.map[m]->segs[n].length = region->page_size; | 222 | mr->mr.map[m]->segs[n].length = region->page_size; |
217 | n++; | 223 | n++; |
218 | if (n == IPATH_SEGSZ) { | 224 | if (n == IPATH_SEGSZ) { |
diff --git a/drivers/infiniband/hw/ipath/ipath_qp.c b/drivers/infiniband/hw/ipath/ipath_qp.c index 64f07b19349f..16db9ac0b402 100644 --- a/drivers/infiniband/hw/ipath/ipath_qp.c +++ b/drivers/infiniband/hw/ipath/ipath_qp.c | |||
@@ -81,11 +81,51 @@ static u32 credit_table[31] = { | |||
81 | 32768 /* 1E */ | 81 | 32768 /* 1E */ |
82 | }; | 82 | }; |
83 | 83 | ||
84 | static u32 alloc_qpn(struct ipath_qp_table *qpt) | 84 | |
85 | static void get_map_page(struct ipath_qp_table *qpt, struct qpn_map *map) | ||
86 | { | ||
87 | unsigned long page = get_zeroed_page(GFP_KERNEL); | ||
88 | unsigned long flags; | ||
89 | |||
90 | /* | ||
91 | * Free the page if someone raced with us installing it. | ||
92 | */ | ||
93 | |||
94 | spin_lock_irqsave(&qpt->lock, flags); | ||
95 | if (map->page) | ||
96 | free_page(page); | ||
97 | else | ||
98 | map->page = (void *)page; | ||
99 | spin_unlock_irqrestore(&qpt->lock, flags); | ||
100 | } | ||
101 | |||
102 | |||
103 | static int alloc_qpn(struct ipath_qp_table *qpt, enum ib_qp_type type) | ||
85 | { | 104 | { |
86 | u32 i, offset, max_scan, qpn; | 105 | u32 i, offset, max_scan, qpn; |
87 | struct qpn_map *map; | 106 | struct qpn_map *map; |
88 | u32 ret; | 107 | u32 ret = -1; |
108 | |||
109 | if (type == IB_QPT_SMI) | ||
110 | ret = 0; | ||
111 | else if (type == IB_QPT_GSI) | ||
112 | ret = 1; | ||
113 | |||
114 | if (ret != -1) { | ||
115 | map = &qpt->map[0]; | ||
116 | if (unlikely(!map->page)) { | ||
117 | get_map_page(qpt, map); | ||
118 | if (unlikely(!map->page)) { | ||
119 | ret = -ENOMEM; | ||
120 | goto bail; | ||
121 | } | ||
122 | } | ||
123 | if (!test_and_set_bit(ret, map->page)) | ||
124 | atomic_dec(&map->n_free); | ||
125 | else | ||
126 | ret = -EBUSY; | ||
127 | goto bail; | ||
128 | } | ||
89 | 129 | ||
90 | qpn = qpt->last + 1; | 130 | qpn = qpt->last + 1; |
91 | if (qpn >= QPN_MAX) | 131 | if (qpn >= QPN_MAX) |
@@ -95,19 +135,7 @@ static u32 alloc_qpn(struct ipath_qp_table *qpt) | |||
95 | max_scan = qpt->nmaps - !offset; | 135 | max_scan = qpt->nmaps - !offset; |
96 | for (i = 0;;) { | 136 | for (i = 0;;) { |
97 | if (unlikely(!map->page)) { | 137 | if (unlikely(!map->page)) { |
98 | unsigned long page = get_zeroed_page(GFP_KERNEL); | 138 | get_map_page(qpt, map); |
99 | unsigned long flags; | ||
100 | |||
101 | /* | ||
102 | * Free the page if someone raced with us | ||
103 | * installing it: | ||
104 | */ | ||
105 | spin_lock_irqsave(&qpt->lock, flags); | ||
106 | if (map->page) | ||
107 | free_page(page); | ||
108 | else | ||
109 | map->page = (void *)page; | ||
110 | spin_unlock_irqrestore(&qpt->lock, flags); | ||
111 | if (unlikely(!map->page)) | 139 | if (unlikely(!map->page)) |
112 | break; | 140 | break; |
113 | } | 141 | } |
@@ -151,7 +179,7 @@ static u32 alloc_qpn(struct ipath_qp_table *qpt) | |||
151 | qpn = mk_qpn(qpt, map, offset); | 179 | qpn = mk_qpn(qpt, map, offset); |
152 | } | 180 | } |
153 | 181 | ||
154 | ret = 0; | 182 | ret = -ENOMEM; |
155 | 183 | ||
156 | bail: | 184 | bail: |
157 | return ret; | 185 | return ret; |
@@ -180,29 +208,19 @@ static int ipath_alloc_qpn(struct ipath_qp_table *qpt, struct ipath_qp *qp, | |||
180 | enum ib_qp_type type) | 208 | enum ib_qp_type type) |
181 | { | 209 | { |
182 | unsigned long flags; | 210 | unsigned long flags; |
183 | u32 qpn; | ||
184 | int ret; | 211 | int ret; |
185 | 212 | ||
186 | if (type == IB_QPT_SMI) | 213 | ret = alloc_qpn(qpt, type); |
187 | qpn = 0; | 214 | if (ret < 0) |
188 | else if (type == IB_QPT_GSI) | 215 | goto bail; |
189 | qpn = 1; | 216 | qp->ibqp.qp_num = ret; |
190 | else { | ||
191 | /* Allocate the next available QPN */ | ||
192 | qpn = alloc_qpn(qpt); | ||
193 | if (qpn == 0) { | ||
194 | ret = -ENOMEM; | ||
195 | goto bail; | ||
196 | } | ||
197 | } | ||
198 | qp->ibqp.qp_num = qpn; | ||
199 | 217 | ||
200 | /* Add the QP to the hash table. */ | 218 | /* Add the QP to the hash table. */ |
201 | spin_lock_irqsave(&qpt->lock, flags); | 219 | spin_lock_irqsave(&qpt->lock, flags); |
202 | 220 | ||
203 | qpn %= qpt->max; | 221 | ret %= qpt->max; |
204 | qp->next = qpt->table[qpn]; | 222 | qp->next = qpt->table[ret]; |
205 | qpt->table[qpn] = qp; | 223 | qpt->table[ret] = qp; |
206 | atomic_inc(&qp->refcount); | 224 | atomic_inc(&qp->refcount); |
207 | 225 | ||
208 | spin_unlock_irqrestore(&qpt->lock, flags); | 226 | spin_unlock_irqrestore(&qpt->lock, flags); |
@@ -245,9 +263,7 @@ static void ipath_free_qp(struct ipath_qp_table *qpt, struct ipath_qp *qp) | |||
245 | if (!fnd) | 263 | if (!fnd) |
246 | return; | 264 | return; |
247 | 265 | ||
248 | /* If QPN is not reserved, mark QPN free in the bitmap. */ | 266 | free_qpn(qpt, qp->ibqp.qp_num); |
249 | if (qp->ibqp.qp_num > 1) | ||
250 | free_qpn(qpt, qp->ibqp.qp_num); | ||
251 | 267 | ||
252 | wait_event(qp->wait, !atomic_read(&qp->refcount)); | 268 | wait_event(qp->wait, !atomic_read(&qp->refcount)); |
253 | } | 269 | } |
@@ -270,11 +286,10 @@ void ipath_free_all_qps(struct ipath_qp_table *qpt) | |||
270 | 286 | ||
271 | while (qp) { | 287 | while (qp) { |
272 | nqp = qp->next; | 288 | nqp = qp->next; |
273 | if (qp->ibqp.qp_num > 1) | 289 | free_qpn(qpt, qp->ibqp.qp_num); |
274 | free_qpn(qpt, qp->ibqp.qp_num); | ||
275 | if (!atomic_dec_and_test(&qp->refcount) || | 290 | if (!atomic_dec_and_test(&qp->refcount) || |
276 | !ipath_destroy_qp(&qp->ibqp)) | 291 | !ipath_destroy_qp(&qp->ibqp)) |
277 | ipath_dbg(KERN_INFO "QP memory leak!\n"); | 292 | ipath_dbg("QP memory leak!\n"); |
278 | qp = nqp; | 293 | qp = nqp; |
279 | } | 294 | } |
280 | } | 295 | } |
@@ -320,7 +335,8 @@ static void ipath_reset_qp(struct ipath_qp *qp) | |||
320 | qp->remote_qpn = 0; | 335 | qp->remote_qpn = 0; |
321 | qp->qkey = 0; | 336 | qp->qkey = 0; |
322 | qp->qp_access_flags = 0; | 337 | qp->qp_access_flags = 0; |
323 | clear_bit(IPATH_S_BUSY, &qp->s_flags); | 338 | qp->s_busy = 0; |
339 | qp->s_flags &= ~IPATH_S_SIGNAL_REQ_WR; | ||
324 | qp->s_hdrwords = 0; | 340 | qp->s_hdrwords = 0; |
325 | qp->s_psn = 0; | 341 | qp->s_psn = 0; |
326 | qp->r_psn = 0; | 342 | qp->r_psn = 0; |
@@ -333,7 +349,6 @@ static void ipath_reset_qp(struct ipath_qp *qp) | |||
333 | qp->r_state = IB_OPCODE_UC_SEND_LAST; | 349 | qp->r_state = IB_OPCODE_UC_SEND_LAST; |
334 | } | 350 | } |
335 | qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE; | 351 | qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE; |
336 | qp->r_ack_state = IB_OPCODE_RC_ACKNOWLEDGE; | ||
337 | qp->r_nak_state = 0; | 352 | qp->r_nak_state = 0; |
338 | qp->r_wrid_valid = 0; | 353 | qp->r_wrid_valid = 0; |
339 | qp->s_rnr_timeout = 0; | 354 | qp->s_rnr_timeout = 0; |
@@ -344,6 +359,10 @@ static void ipath_reset_qp(struct ipath_qp *qp) | |||
344 | qp->s_ssn = 1; | 359 | qp->s_ssn = 1; |
345 | qp->s_lsn = 0; | 360 | qp->s_lsn = 0; |
346 | qp->s_wait_credit = 0; | 361 | qp->s_wait_credit = 0; |
362 | memset(qp->s_ack_queue, 0, sizeof(qp->s_ack_queue)); | ||
363 | qp->r_head_ack_queue = 0; | ||
364 | qp->s_tail_ack_queue = 0; | ||
365 | qp->s_num_rd_atomic = 0; | ||
347 | if (qp->r_rq.wq) { | 366 | if (qp->r_rq.wq) { |
348 | qp->r_rq.wq->head = 0; | 367 | qp->r_rq.wq->head = 0; |
349 | qp->r_rq.wq->tail = 0; | 368 | qp->r_rq.wq->tail = 0; |
@@ -357,7 +376,7 @@ static void ipath_reset_qp(struct ipath_qp *qp) | |||
357 | * @err: the receive completion error to signal if a RWQE is active | 376 | * @err: the receive completion error to signal if a RWQE is active |
358 | * | 377 | * |
359 | * Flushes both send and receive work queues. | 378 | * Flushes both send and receive work queues. |
360 | * QP s_lock should be held and interrupts disabled. | 379 | * The QP s_lock should be held and interrupts disabled. |
361 | */ | 380 | */ |
362 | 381 | ||
363 | void ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err) | 382 | void ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err) |
@@ -365,7 +384,7 @@ void ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err) | |||
365 | struct ipath_ibdev *dev = to_idev(qp->ibqp.device); | 384 | struct ipath_ibdev *dev = to_idev(qp->ibqp.device); |
366 | struct ib_wc wc; | 385 | struct ib_wc wc; |
367 | 386 | ||
368 | ipath_dbg(KERN_INFO "QP%d/%d in error state\n", | 387 | ipath_dbg("QP%d/%d in error state\n", |
369 | qp->ibqp.qp_num, qp->remote_qpn); | 388 | qp->ibqp.qp_num, qp->remote_qpn); |
370 | 389 | ||
371 | spin_lock(&dev->pending_lock); | 390 | spin_lock(&dev->pending_lock); |
@@ -389,6 +408,8 @@ void ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err) | |||
389 | wc.port_num = 0; | 408 | wc.port_num = 0; |
390 | if (qp->r_wrid_valid) { | 409 | if (qp->r_wrid_valid) { |
391 | qp->r_wrid_valid = 0; | 410 | qp->r_wrid_valid = 0; |
411 | wc.wr_id = qp->r_wr_id; | ||
412 | wc.opcode = IB_WC_RECV; | ||
392 | wc.status = err; | 413 | wc.status = err; |
393 | ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 1); | 414 | ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 1); |
394 | } | 415 | } |
@@ -503,13 +524,17 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |||
503 | attr->path_mig_state != IB_MIG_REARM) | 524 | attr->path_mig_state != IB_MIG_REARM) |
504 | goto inval; | 525 | goto inval; |
505 | 526 | ||
527 | if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) | ||
528 | if (attr->max_dest_rd_atomic > IPATH_MAX_RDMA_ATOMIC) | ||
529 | goto inval; | ||
530 | |||
506 | switch (new_state) { | 531 | switch (new_state) { |
507 | case IB_QPS_RESET: | 532 | case IB_QPS_RESET: |
508 | ipath_reset_qp(qp); | 533 | ipath_reset_qp(qp); |
509 | break; | 534 | break; |
510 | 535 | ||
511 | case IB_QPS_ERR: | 536 | case IB_QPS_ERR: |
512 | ipath_error_qp(qp, IB_WC_GENERAL_ERR); | 537 | ipath_error_qp(qp, IB_WC_WR_FLUSH_ERR); |
513 | break; | 538 | break; |
514 | 539 | ||
515 | default: | 540 | default: |
@@ -559,6 +584,12 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |||
559 | if (attr_mask & IB_QP_QKEY) | 584 | if (attr_mask & IB_QP_QKEY) |
560 | qp->qkey = attr->qkey; | 585 | qp->qkey = attr->qkey; |
561 | 586 | ||
587 | if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) | ||
588 | qp->r_max_rd_atomic = attr->max_dest_rd_atomic; | ||
589 | |||
590 | if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) | ||
591 | qp->s_max_rd_atomic = attr->max_rd_atomic; | ||
592 | |||
562 | qp->state = new_state; | 593 | qp->state = new_state; |
563 | spin_unlock_irqrestore(&qp->s_lock, flags); | 594 | spin_unlock_irqrestore(&qp->s_lock, flags); |
564 | 595 | ||
@@ -598,8 +629,8 @@ int ipath_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |||
598 | attr->alt_pkey_index = 0; | 629 | attr->alt_pkey_index = 0; |
599 | attr->en_sqd_async_notify = 0; | 630 | attr->en_sqd_async_notify = 0; |
600 | attr->sq_draining = 0; | 631 | attr->sq_draining = 0; |
601 | attr->max_rd_atomic = 1; | 632 | attr->max_rd_atomic = qp->s_max_rd_atomic; |
602 | attr->max_dest_rd_atomic = 1; | 633 | attr->max_dest_rd_atomic = qp->r_max_rd_atomic; |
603 | attr->min_rnr_timer = qp->r_min_rnr_timer; | 634 | attr->min_rnr_timer = qp->r_min_rnr_timer; |
604 | attr->port_num = 1; | 635 | attr->port_num = 1; |
605 | attr->timeout = qp->timeout; | 636 | attr->timeout = qp->timeout; |
@@ -614,7 +645,7 @@ int ipath_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |||
614 | init_attr->recv_cq = qp->ibqp.recv_cq; | 645 | init_attr->recv_cq = qp->ibqp.recv_cq; |
615 | init_attr->srq = qp->ibqp.srq; | 646 | init_attr->srq = qp->ibqp.srq; |
616 | init_attr->cap = attr->cap; | 647 | init_attr->cap = attr->cap; |
617 | if (qp->s_flags & (1 << IPATH_S_SIGNAL_REQ_WR)) | 648 | if (qp->s_flags & IPATH_S_SIGNAL_REQ_WR) |
618 | init_attr->sq_sig_type = IB_SIGNAL_REQ_WR; | 649 | init_attr->sq_sig_type = IB_SIGNAL_REQ_WR; |
619 | else | 650 | else |
620 | init_attr->sq_sig_type = IB_SIGNAL_ALL_WR; | 651 | init_attr->sq_sig_type = IB_SIGNAL_ALL_WR; |
@@ -786,7 +817,7 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd, | |||
786 | qp->s_size = init_attr->cap.max_send_wr + 1; | 817 | qp->s_size = init_attr->cap.max_send_wr + 1; |
787 | qp->s_max_sge = init_attr->cap.max_send_sge; | 818 | qp->s_max_sge = init_attr->cap.max_send_sge; |
788 | if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR) | 819 | if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR) |
789 | qp->s_flags = 1 << IPATH_S_SIGNAL_REQ_WR; | 820 | qp->s_flags = IPATH_S_SIGNAL_REQ_WR; |
790 | else | 821 | else |
791 | qp->s_flags = 0; | 822 | qp->s_flags = 0; |
792 | dev = to_idev(ibpd->device); | 823 | dev = to_idev(ibpd->device); |
@@ -958,7 +989,7 @@ bail: | |||
958 | * @wc: the WC responsible for putting the QP in this state | 989 | * @wc: the WC responsible for putting the QP in this state |
959 | * | 990 | * |
960 | * Flushes the send work queue. | 991 | * Flushes the send work queue. |
961 | * The QP s_lock should be held. | 992 | * The QP s_lock should be held and interrupts disabled. |
962 | */ | 993 | */ |
963 | 994 | ||
964 | void ipath_sqerror_qp(struct ipath_qp *qp, struct ib_wc *wc) | 995 | void ipath_sqerror_qp(struct ipath_qp *qp, struct ib_wc *wc) |
@@ -966,7 +997,7 @@ void ipath_sqerror_qp(struct ipath_qp *qp, struct ib_wc *wc) | |||
966 | struct ipath_ibdev *dev = to_idev(qp->ibqp.device); | 997 | struct ipath_ibdev *dev = to_idev(qp->ibqp.device); |
967 | struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last); | 998 | struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last); |
968 | 999 | ||
969 | ipath_dbg(KERN_INFO "Send queue error on QP%d/%d: err: %d\n", | 1000 | ipath_dbg("Send queue error on QP%d/%d: err: %d\n", |
970 | qp->ibqp.qp_num, qp->remote_qpn, wc->status); | 1001 | qp->ibqp.qp_num, qp->remote_qpn, wc->status); |
971 | 1002 | ||
972 | spin_lock(&dev->pending_lock); | 1003 | spin_lock(&dev->pending_lock); |
@@ -984,12 +1015,12 @@ void ipath_sqerror_qp(struct ipath_qp *qp, struct ib_wc *wc) | |||
984 | wc->status = IB_WC_WR_FLUSH_ERR; | 1015 | wc->status = IB_WC_WR_FLUSH_ERR; |
985 | 1016 | ||
986 | while (qp->s_last != qp->s_head) { | 1017 | while (qp->s_last != qp->s_head) { |
1018 | wqe = get_swqe_ptr(qp, qp->s_last); | ||
987 | wc->wr_id = wqe->wr.wr_id; | 1019 | wc->wr_id = wqe->wr.wr_id; |
988 | wc->opcode = ib_ipath_wc_opcode[wqe->wr.opcode]; | 1020 | wc->opcode = ib_ipath_wc_opcode[wqe->wr.opcode]; |
989 | ipath_cq_enter(to_icq(qp->ibqp.send_cq), wc, 1); | 1021 | ipath_cq_enter(to_icq(qp->ibqp.send_cq), wc, 1); |
990 | if (++qp->s_last >= qp->s_size) | 1022 | if (++qp->s_last >= qp->s_size) |
991 | qp->s_last = 0; | 1023 | qp->s_last = 0; |
992 | wqe = get_swqe_ptr(qp, qp->s_last); | ||
993 | } | 1024 | } |
994 | qp->s_cur = qp->s_tail = qp->s_head; | 1025 | qp->s_cur = qp->s_tail = qp->s_head; |
995 | qp->state = IB_QPS_SQE; | 1026 | qp->state = IB_QPS_SQE; |
diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c index 5ff20cb04494..b4b88d0b53f5 100644 --- a/drivers/infiniband/hw/ipath/ipath_rc.c +++ b/drivers/infiniband/hw/ipath/ipath_rc.c | |||
@@ -37,6 +37,19 @@ | |||
37 | /* cut down ridiculously long IB macro names */ | 37 | /* cut down ridiculously long IB macro names */ |
38 | #define OP(x) IB_OPCODE_RC_##x | 38 | #define OP(x) IB_OPCODE_RC_##x |
39 | 39 | ||
40 | static u32 restart_sge(struct ipath_sge_state *ss, struct ipath_swqe *wqe, | ||
41 | u32 psn, u32 pmtu) | ||
42 | { | ||
43 | u32 len; | ||
44 | |||
45 | len = ((psn - wqe->psn) & IPATH_PSN_MASK) * pmtu; | ||
46 | ss->sge = wqe->sg_list[0]; | ||
47 | ss->sg_list = wqe->sg_list + 1; | ||
48 | ss->num_sge = wqe->wr.num_sge; | ||
49 | ipath_skip_sge(ss, len); | ||
50 | return wqe->length - len; | ||
51 | } | ||
52 | |||
40 | /** | 53 | /** |
41 | * ipath_init_restart- initialize the qp->s_sge after a restart | 54 | * ipath_init_restart- initialize the qp->s_sge after a restart |
42 | * @qp: the QP who's SGE we're restarting | 55 | * @qp: the QP who's SGE we're restarting |
@@ -47,15 +60,9 @@ | |||
47 | static void ipath_init_restart(struct ipath_qp *qp, struct ipath_swqe *wqe) | 60 | static void ipath_init_restart(struct ipath_qp *qp, struct ipath_swqe *wqe) |
48 | { | 61 | { |
49 | struct ipath_ibdev *dev; | 62 | struct ipath_ibdev *dev; |
50 | u32 len; | ||
51 | 63 | ||
52 | len = ((qp->s_psn - wqe->psn) & IPATH_PSN_MASK) * | 64 | qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, |
53 | ib_mtu_enum_to_int(qp->path_mtu); | 65 | ib_mtu_enum_to_int(qp->path_mtu)); |
54 | qp->s_sge.sge = wqe->sg_list[0]; | ||
55 | qp->s_sge.sg_list = wqe->sg_list + 1; | ||
56 | qp->s_sge.num_sge = wqe->wr.num_sge; | ||
57 | ipath_skip_sge(&qp->s_sge, len); | ||
58 | qp->s_len = wqe->length - len; | ||
59 | dev = to_idev(qp->ibqp.device); | 66 | dev = to_idev(qp->ibqp.device); |
60 | spin_lock(&dev->pending_lock); | 67 | spin_lock(&dev->pending_lock); |
61 | if (list_empty(&qp->timerwait)) | 68 | if (list_empty(&qp->timerwait)) |
@@ -70,107 +77,123 @@ static void ipath_init_restart(struct ipath_qp *qp, struct ipath_swqe *wqe) | |||
70 | * @ohdr: a pointer to the IB header being constructed | 77 | * @ohdr: a pointer to the IB header being constructed |
71 | * @pmtu: the path MTU | 78 | * @pmtu: the path MTU |
72 | * | 79 | * |
73 | * Return bth0 if constructed; otherwise, return 0. | 80 | * Return 1 if constructed; otherwise, return 0. |
81 | * Note that we are in the responder's side of the QP context. | ||
74 | * Note the QP s_lock must be held. | 82 | * Note the QP s_lock must be held. |
75 | */ | 83 | */ |
76 | u32 ipath_make_rc_ack(struct ipath_qp *qp, | 84 | static int ipath_make_rc_ack(struct ipath_qp *qp, |
77 | struct ipath_other_headers *ohdr, | 85 | struct ipath_other_headers *ohdr, |
78 | u32 pmtu) | 86 | u32 pmtu, u32 *bth0p, u32 *bth2p) |
79 | { | 87 | { |
88 | struct ipath_ack_entry *e; | ||
80 | u32 hwords; | 89 | u32 hwords; |
81 | u32 len; | 90 | u32 len; |
82 | u32 bth0; | 91 | u32 bth0; |
92 | u32 bth2; | ||
83 | 93 | ||
84 | /* header size in 32-bit words LRH+BTH = (8+12)/4. */ | 94 | /* header size in 32-bit words LRH+BTH = (8+12)/4. */ |
85 | hwords = 5; | 95 | hwords = 5; |
86 | 96 | ||
87 | /* | ||
88 | * Send a response. Note that we are in the responder's | ||
89 | * side of the QP context. | ||
90 | */ | ||
91 | switch (qp->s_ack_state) { | 97 | switch (qp->s_ack_state) { |
92 | case OP(RDMA_READ_REQUEST): | 98 | case OP(RDMA_READ_RESPONSE_LAST): |
93 | qp->s_cur_sge = &qp->s_rdma_sge; | 99 | case OP(RDMA_READ_RESPONSE_ONLY): |
94 | len = qp->s_rdma_len; | 100 | case OP(ATOMIC_ACKNOWLEDGE): |
95 | if (len > pmtu) { | 101 | qp->s_ack_state = OP(ACKNOWLEDGE); |
96 | len = pmtu; | 102 | /* FALLTHROUGH */ |
97 | qp->s_ack_state = OP(RDMA_READ_RESPONSE_FIRST); | 103 | case OP(ACKNOWLEDGE): |
98 | } else | 104 | /* Check for no next entry in the queue. */ |
99 | qp->s_ack_state = OP(RDMA_READ_RESPONSE_ONLY); | 105 | if (qp->r_head_ack_queue == qp->s_tail_ack_queue) { |
100 | qp->s_rdma_len -= len; | 106 | if (qp->s_flags & IPATH_S_ACK_PENDING) |
107 | goto normal; | ||
108 | goto bail; | ||
109 | } | ||
110 | |||
111 | e = &qp->s_ack_queue[qp->s_tail_ack_queue]; | ||
112 | if (e->opcode == OP(RDMA_READ_REQUEST)) { | ||
113 | /* Copy SGE state in case we need to resend */ | ||
114 | qp->s_ack_rdma_sge = e->rdma_sge; | ||
115 | qp->s_cur_sge = &qp->s_ack_rdma_sge; | ||
116 | len = e->rdma_sge.sge.sge_length; | ||
117 | if (len > pmtu) { | ||
118 | len = pmtu; | ||
119 | qp->s_ack_state = OP(RDMA_READ_RESPONSE_FIRST); | ||
120 | } else { | ||
121 | qp->s_ack_state = OP(RDMA_READ_RESPONSE_ONLY); | ||
122 | if (++qp->s_tail_ack_queue > | ||
123 | IPATH_MAX_RDMA_ATOMIC) | ||
124 | qp->s_tail_ack_queue = 0; | ||
125 | } | ||
126 | ohdr->u.aeth = ipath_compute_aeth(qp); | ||
127 | hwords++; | ||
128 | qp->s_ack_rdma_psn = e->psn; | ||
129 | bth2 = qp->s_ack_rdma_psn++ & IPATH_PSN_MASK; | ||
130 | } else { | ||
131 | /* COMPARE_SWAP or FETCH_ADD */ | ||
132 | qp->s_cur_sge = NULL; | ||
133 | len = 0; | ||
134 | qp->s_ack_state = OP(ATOMIC_ACKNOWLEDGE); | ||
135 | ohdr->u.at.aeth = ipath_compute_aeth(qp); | ||
136 | ohdr->u.at.atomic_ack_eth[0] = | ||
137 | cpu_to_be32(e->atomic_data >> 32); | ||
138 | ohdr->u.at.atomic_ack_eth[1] = | ||
139 | cpu_to_be32(e->atomic_data); | ||
140 | hwords += sizeof(ohdr->u.at) / sizeof(u32); | ||
141 | bth2 = e->psn; | ||
142 | if (++qp->s_tail_ack_queue > IPATH_MAX_RDMA_ATOMIC) | ||
143 | qp->s_tail_ack_queue = 0; | ||
144 | } | ||
101 | bth0 = qp->s_ack_state << 24; | 145 | bth0 = qp->s_ack_state << 24; |
102 | ohdr->u.aeth = ipath_compute_aeth(qp); | ||
103 | hwords++; | ||
104 | break; | 146 | break; |
105 | 147 | ||
106 | case OP(RDMA_READ_RESPONSE_FIRST): | 148 | case OP(RDMA_READ_RESPONSE_FIRST): |
107 | qp->s_ack_state = OP(RDMA_READ_RESPONSE_MIDDLE); | 149 | qp->s_ack_state = OP(RDMA_READ_RESPONSE_MIDDLE); |
108 | /* FALLTHROUGH */ | 150 | /* FALLTHROUGH */ |
109 | case OP(RDMA_READ_RESPONSE_MIDDLE): | 151 | case OP(RDMA_READ_RESPONSE_MIDDLE): |
110 | qp->s_cur_sge = &qp->s_rdma_sge; | 152 | len = qp->s_ack_rdma_sge.sge.sge_length; |
111 | len = qp->s_rdma_len; | ||
112 | if (len > pmtu) | 153 | if (len > pmtu) |
113 | len = pmtu; | 154 | len = pmtu; |
114 | else { | 155 | else { |
115 | ohdr->u.aeth = ipath_compute_aeth(qp); | 156 | ohdr->u.aeth = ipath_compute_aeth(qp); |
116 | hwords++; | 157 | hwords++; |
117 | qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST); | 158 | qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST); |
159 | if (++qp->s_tail_ack_queue > IPATH_MAX_RDMA_ATOMIC) | ||
160 | qp->s_tail_ack_queue = 0; | ||
118 | } | 161 | } |
119 | qp->s_rdma_len -= len; | ||
120 | bth0 = qp->s_ack_state << 24; | 162 | bth0 = qp->s_ack_state << 24; |
121 | break; | 163 | bth2 = qp->s_ack_rdma_psn++ & IPATH_PSN_MASK; |
122 | |||
123 | case OP(RDMA_READ_RESPONSE_LAST): | ||
124 | case OP(RDMA_READ_RESPONSE_ONLY): | ||
125 | /* | ||
126 | * We have to prevent new requests from changing | ||
127 | * the r_sge state while a ipath_verbs_send() | ||
128 | * is in progress. | ||
129 | */ | ||
130 | qp->s_ack_state = OP(ACKNOWLEDGE); | ||
131 | bth0 = 0; | ||
132 | goto bail; | ||
133 | |||
134 | case OP(COMPARE_SWAP): | ||
135 | case OP(FETCH_ADD): | ||
136 | qp->s_cur_sge = NULL; | ||
137 | len = 0; | ||
138 | /* | ||
139 | * Set the s_ack_state so the receive interrupt handler | ||
140 | * won't try to send an ACK (out of order) until this one | ||
141 | * is actually sent. | ||
142 | */ | ||
143 | qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST); | ||
144 | bth0 = OP(ATOMIC_ACKNOWLEDGE) << 24; | ||
145 | ohdr->u.at.aeth = ipath_compute_aeth(qp); | ||
146 | ohdr->u.at.atomic_ack_eth = cpu_to_be64(qp->r_atomic_data); | ||
147 | hwords += sizeof(ohdr->u.at) / 4; | ||
148 | break; | 164 | break; |
149 | 165 | ||
150 | default: | 166 | default: |
151 | /* Send a regular ACK. */ | 167 | normal: |
152 | qp->s_cur_sge = NULL; | ||
153 | len = 0; | ||
154 | /* | 168 | /* |
155 | * Set the s_ack_state so the receive interrupt handler | 169 | * Send a regular ACK. |
156 | * won't try to send an ACK (out of order) until this one | 170 | * Set the s_ack_state so we wait until after sending |
157 | * is actually sent. | 171 | * the ACK before setting s_ack_state to ACKNOWLEDGE |
172 | * (see above). | ||
158 | */ | 173 | */ |
159 | qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST); | 174 | qp->s_ack_state = OP(ATOMIC_ACKNOWLEDGE); |
160 | bth0 = OP(ACKNOWLEDGE) << 24; | 175 | qp->s_flags &= ~IPATH_S_ACK_PENDING; |
176 | qp->s_cur_sge = NULL; | ||
161 | if (qp->s_nak_state) | 177 | if (qp->s_nak_state) |
162 | ohdr->u.aeth = cpu_to_be32((qp->r_msn & IPATH_MSN_MASK) | | 178 | ohdr->u.aeth = |
163 | (qp->s_nak_state << | 179 | cpu_to_be32((qp->r_msn & IPATH_MSN_MASK) | |
164 | IPATH_AETH_CREDIT_SHIFT)); | 180 | (qp->s_nak_state << |
181 | IPATH_AETH_CREDIT_SHIFT)); | ||
165 | else | 182 | else |
166 | ohdr->u.aeth = ipath_compute_aeth(qp); | 183 | ohdr->u.aeth = ipath_compute_aeth(qp); |
167 | hwords++; | 184 | hwords++; |
185 | len = 0; | ||
186 | bth0 = OP(ACKNOWLEDGE) << 24; | ||
187 | bth2 = qp->s_ack_psn & IPATH_PSN_MASK; | ||
168 | } | 188 | } |
169 | qp->s_hdrwords = hwords; | 189 | qp->s_hdrwords = hwords; |
170 | qp->s_cur_size = len; | 190 | qp->s_cur_size = len; |
191 | *bth0p = bth0; | ||
192 | *bth2p = bth2; | ||
193 | return 1; | ||
171 | 194 | ||
172 | bail: | 195 | bail: |
173 | return bth0; | 196 | return 0; |
174 | } | 197 | } |
175 | 198 | ||
176 | /** | 199 | /** |
@@ -197,9 +220,16 @@ int ipath_make_rc_req(struct ipath_qp *qp, | |||
197 | u32 bth2; | 220 | u32 bth2; |
198 | char newreq; | 221 | char newreq; |
199 | 222 | ||
223 | /* Sending responses has higher priority over sending requests. */ | ||
224 | if ((qp->r_head_ack_queue != qp->s_tail_ack_queue || | ||
225 | (qp->s_flags & IPATH_S_ACK_PENDING) || | ||
226 | qp->s_ack_state != IB_OPCODE_RC_ACKNOWLEDGE) && | ||
227 | ipath_make_rc_ack(qp, ohdr, pmtu, bth0p, bth2p)) | ||
228 | goto done; | ||
229 | |||
200 | if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK) || | 230 | if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK) || |
201 | qp->s_rnr_timeout) | 231 | qp->s_rnr_timeout) |
202 | goto done; | 232 | goto bail; |
203 | 233 | ||
204 | /* Limit the number of packets sent without an ACK. */ | 234 | /* Limit the number of packets sent without an ACK. */ |
205 | if (ipath_cmp24(qp->s_psn, qp->s_last_psn + IPATH_PSN_CREDIT) > 0) { | 235 | if (ipath_cmp24(qp->s_psn, qp->s_last_psn + IPATH_PSN_CREDIT) > 0) { |
@@ -210,7 +240,7 @@ int ipath_make_rc_req(struct ipath_qp *qp, | |||
210 | list_add_tail(&qp->timerwait, | 240 | list_add_tail(&qp->timerwait, |
211 | &dev->pending[dev->pending_index]); | 241 | &dev->pending[dev->pending_index]); |
212 | spin_unlock(&dev->pending_lock); | 242 | spin_unlock(&dev->pending_lock); |
213 | goto done; | 243 | goto bail; |
214 | } | 244 | } |
215 | 245 | ||
216 | /* header size in 32-bit words LRH+BTH = (8+12)/4. */ | 246 | /* header size in 32-bit words LRH+BTH = (8+12)/4. */ |
@@ -232,7 +262,16 @@ int ipath_make_rc_req(struct ipath_qp *qp, | |||
232 | if (qp->s_cur == qp->s_tail) { | 262 | if (qp->s_cur == qp->s_tail) { |
233 | /* Check if send work queue is empty. */ | 263 | /* Check if send work queue is empty. */ |
234 | if (qp->s_tail == qp->s_head) | 264 | if (qp->s_tail == qp->s_head) |
235 | goto done; | 265 | goto bail; |
266 | /* | ||
267 | * If a fence is requested, wait for previous | ||
268 | * RDMA read and atomic operations to finish. | ||
269 | */ | ||
270 | if ((wqe->wr.send_flags & IB_SEND_FENCE) && | ||
271 | qp->s_num_rd_atomic) { | ||
272 | qp->s_flags |= IPATH_S_FENCE_PENDING; | ||
273 | goto bail; | ||
274 | } | ||
236 | wqe->psn = qp->s_next_psn; | 275 | wqe->psn = qp->s_next_psn; |
237 | newreq = 1; | 276 | newreq = 1; |
238 | } | 277 | } |
@@ -250,7 +289,7 @@ int ipath_make_rc_req(struct ipath_qp *qp, | |||
250 | /* If no credit, return. */ | 289 | /* If no credit, return. */ |
251 | if (qp->s_lsn != (u32) -1 && | 290 | if (qp->s_lsn != (u32) -1 && |
252 | ipath_cmp24(wqe->ssn, qp->s_lsn + 1) > 0) | 291 | ipath_cmp24(wqe->ssn, qp->s_lsn + 1) > 0) |
253 | goto done; | 292 | goto bail; |
254 | wqe->lpsn = wqe->psn; | 293 | wqe->lpsn = wqe->psn; |
255 | if (len > pmtu) { | 294 | if (len > pmtu) { |
256 | wqe->lpsn += (len - 1) / pmtu; | 295 | wqe->lpsn += (len - 1) / pmtu; |
@@ -281,13 +320,13 @@ int ipath_make_rc_req(struct ipath_qp *qp, | |||
281 | /* If no credit, return. */ | 320 | /* If no credit, return. */ |
282 | if (qp->s_lsn != (u32) -1 && | 321 | if (qp->s_lsn != (u32) -1 && |
283 | ipath_cmp24(wqe->ssn, qp->s_lsn + 1) > 0) | 322 | ipath_cmp24(wqe->ssn, qp->s_lsn + 1) > 0) |
284 | goto done; | 323 | goto bail; |
285 | ohdr->u.rc.reth.vaddr = | 324 | ohdr->u.rc.reth.vaddr = |
286 | cpu_to_be64(wqe->wr.wr.rdma.remote_addr); | 325 | cpu_to_be64(wqe->wr.wr.rdma.remote_addr); |
287 | ohdr->u.rc.reth.rkey = | 326 | ohdr->u.rc.reth.rkey = |
288 | cpu_to_be32(wqe->wr.wr.rdma.rkey); | 327 | cpu_to_be32(wqe->wr.wr.rdma.rkey); |
289 | ohdr->u.rc.reth.length = cpu_to_be32(len); | 328 | ohdr->u.rc.reth.length = cpu_to_be32(len); |
290 | hwords += sizeof(struct ib_reth) / 4; | 329 | hwords += sizeof(struct ib_reth) / sizeof(u32); |
291 | wqe->lpsn = wqe->psn; | 330 | wqe->lpsn = wqe->psn; |
292 | if (len > pmtu) { | 331 | if (len > pmtu) { |
293 | wqe->lpsn += (len - 1) / pmtu; | 332 | wqe->lpsn += (len - 1) / pmtu; |
@@ -312,14 +351,17 @@ int ipath_make_rc_req(struct ipath_qp *qp, | |||
312 | break; | 351 | break; |
313 | 352 | ||
314 | case IB_WR_RDMA_READ: | 353 | case IB_WR_RDMA_READ: |
315 | ohdr->u.rc.reth.vaddr = | 354 | /* |
316 | cpu_to_be64(wqe->wr.wr.rdma.remote_addr); | 355 | * Don't allow more operations to be started |
317 | ohdr->u.rc.reth.rkey = | 356 | * than the QP limits allow. |
318 | cpu_to_be32(wqe->wr.wr.rdma.rkey); | 357 | */ |
319 | ohdr->u.rc.reth.length = cpu_to_be32(len); | ||
320 | qp->s_state = OP(RDMA_READ_REQUEST); | ||
321 | hwords += sizeof(ohdr->u.rc.reth) / 4; | ||
322 | if (newreq) { | 358 | if (newreq) { |
359 | if (qp->s_num_rd_atomic >= | ||
360 | qp->s_max_rd_atomic) { | ||
361 | qp->s_flags |= IPATH_S_RDMAR_PENDING; | ||
362 | goto bail; | ||
363 | } | ||
364 | qp->s_num_rd_atomic++; | ||
323 | if (qp->s_lsn != (u32) -1) | 365 | if (qp->s_lsn != (u32) -1) |
324 | qp->s_lsn++; | 366 | qp->s_lsn++; |
325 | /* | 367 | /* |
@@ -330,6 +372,13 @@ int ipath_make_rc_req(struct ipath_qp *qp, | |||
330 | qp->s_next_psn += (len - 1) / pmtu; | 372 | qp->s_next_psn += (len - 1) / pmtu; |
331 | wqe->lpsn = qp->s_next_psn++; | 373 | wqe->lpsn = qp->s_next_psn++; |
332 | } | 374 | } |
375 | ohdr->u.rc.reth.vaddr = | ||
376 | cpu_to_be64(wqe->wr.wr.rdma.remote_addr); | ||
377 | ohdr->u.rc.reth.rkey = | ||
378 | cpu_to_be32(wqe->wr.wr.rdma.rkey); | ||
379 | ohdr->u.rc.reth.length = cpu_to_be32(len); | ||
380 | qp->s_state = OP(RDMA_READ_REQUEST); | ||
381 | hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32); | ||
333 | ss = NULL; | 382 | ss = NULL; |
334 | len = 0; | 383 | len = 0; |
335 | if (++qp->s_cur == qp->s_size) | 384 | if (++qp->s_cur == qp->s_size) |
@@ -338,32 +387,48 @@ int ipath_make_rc_req(struct ipath_qp *qp, | |||
338 | 387 | ||
339 | case IB_WR_ATOMIC_CMP_AND_SWP: | 388 | case IB_WR_ATOMIC_CMP_AND_SWP: |
340 | case IB_WR_ATOMIC_FETCH_AND_ADD: | 389 | case IB_WR_ATOMIC_FETCH_AND_ADD: |
341 | if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) | 390 | /* |
342 | qp->s_state = OP(COMPARE_SWAP); | 391 | * Don't allow more operations to be started |
343 | else | 392 | * than the QP limits allow. |
344 | qp->s_state = OP(FETCH_ADD); | 393 | */ |
345 | ohdr->u.atomic_eth.vaddr = cpu_to_be64( | ||
346 | wqe->wr.wr.atomic.remote_addr); | ||
347 | ohdr->u.atomic_eth.rkey = cpu_to_be32( | ||
348 | wqe->wr.wr.atomic.rkey); | ||
349 | ohdr->u.atomic_eth.swap_data = cpu_to_be64( | ||
350 | wqe->wr.wr.atomic.swap); | ||
351 | ohdr->u.atomic_eth.compare_data = cpu_to_be64( | ||
352 | wqe->wr.wr.atomic.compare_add); | ||
353 | hwords += sizeof(struct ib_atomic_eth) / 4; | ||
354 | if (newreq) { | 394 | if (newreq) { |
395 | if (qp->s_num_rd_atomic >= | ||
396 | qp->s_max_rd_atomic) { | ||
397 | qp->s_flags |= IPATH_S_RDMAR_PENDING; | ||
398 | goto bail; | ||
399 | } | ||
400 | qp->s_num_rd_atomic++; | ||
355 | if (qp->s_lsn != (u32) -1) | 401 | if (qp->s_lsn != (u32) -1) |
356 | qp->s_lsn++; | 402 | qp->s_lsn++; |
357 | wqe->lpsn = wqe->psn; | 403 | wqe->lpsn = wqe->psn; |
358 | } | 404 | } |
359 | if (++qp->s_cur == qp->s_size) | 405 | if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) { |
360 | qp->s_cur = 0; | 406 | qp->s_state = OP(COMPARE_SWAP); |
407 | ohdr->u.atomic_eth.swap_data = cpu_to_be64( | ||
408 | wqe->wr.wr.atomic.swap); | ||
409 | ohdr->u.atomic_eth.compare_data = cpu_to_be64( | ||
410 | wqe->wr.wr.atomic.compare_add); | ||
411 | } else { | ||
412 | qp->s_state = OP(FETCH_ADD); | ||
413 | ohdr->u.atomic_eth.swap_data = cpu_to_be64( | ||
414 | wqe->wr.wr.atomic.compare_add); | ||
415 | ohdr->u.atomic_eth.compare_data = 0; | ||
416 | } | ||
417 | ohdr->u.atomic_eth.vaddr[0] = cpu_to_be32( | ||
418 | wqe->wr.wr.atomic.remote_addr >> 32); | ||
419 | ohdr->u.atomic_eth.vaddr[1] = cpu_to_be32( | ||
420 | wqe->wr.wr.atomic.remote_addr); | ||
421 | ohdr->u.atomic_eth.rkey = cpu_to_be32( | ||
422 | wqe->wr.wr.atomic.rkey); | ||
423 | hwords += sizeof(struct ib_atomic_eth) / sizeof(u32); | ||
361 | ss = NULL; | 424 | ss = NULL; |
362 | len = 0; | 425 | len = 0; |
426 | if (++qp->s_cur == qp->s_size) | ||
427 | qp->s_cur = 0; | ||
363 | break; | 428 | break; |
364 | 429 | ||
365 | default: | 430 | default: |
366 | goto done; | 431 | goto bail; |
367 | } | 432 | } |
368 | qp->s_sge.sge = wqe->sg_list[0]; | 433 | qp->s_sge.sge = wqe->sg_list[0]; |
369 | qp->s_sge.sg_list = wqe->sg_list + 1; | 434 | qp->s_sge.sg_list = wqe->sg_list + 1; |
@@ -379,7 +444,7 @@ int ipath_make_rc_req(struct ipath_qp *qp, | |||
379 | qp->s_psn = wqe->lpsn + 1; | 444 | qp->s_psn = wqe->lpsn + 1; |
380 | else { | 445 | else { |
381 | qp->s_psn++; | 446 | qp->s_psn++; |
382 | if ((int)(qp->s_psn - qp->s_next_psn) > 0) | 447 | if (ipath_cmp24(qp->s_psn, qp->s_next_psn) > 0) |
383 | qp->s_next_psn = qp->s_psn; | 448 | qp->s_next_psn = qp->s_psn; |
384 | } | 449 | } |
385 | /* | 450 | /* |
@@ -406,7 +471,7 @@ int ipath_make_rc_req(struct ipath_qp *qp, | |||
406 | /* FALLTHROUGH */ | 471 | /* FALLTHROUGH */ |
407 | case OP(SEND_MIDDLE): | 472 | case OP(SEND_MIDDLE): |
408 | bth2 = qp->s_psn++ & IPATH_PSN_MASK; | 473 | bth2 = qp->s_psn++ & IPATH_PSN_MASK; |
409 | if ((int)(qp->s_psn - qp->s_next_psn) > 0) | 474 | if (ipath_cmp24(qp->s_psn, qp->s_next_psn) > 0) |
410 | qp->s_next_psn = qp->s_psn; | 475 | qp->s_next_psn = qp->s_psn; |
411 | ss = &qp->s_sge; | 476 | ss = &qp->s_sge; |
412 | len = qp->s_len; | 477 | len = qp->s_len; |
@@ -442,7 +507,7 @@ int ipath_make_rc_req(struct ipath_qp *qp, | |||
442 | /* FALLTHROUGH */ | 507 | /* FALLTHROUGH */ |
443 | case OP(RDMA_WRITE_MIDDLE): | 508 | case OP(RDMA_WRITE_MIDDLE): |
444 | bth2 = qp->s_psn++ & IPATH_PSN_MASK; | 509 | bth2 = qp->s_psn++ & IPATH_PSN_MASK; |
445 | if ((int)(qp->s_psn - qp->s_next_psn) > 0) | 510 | if (ipath_cmp24(qp->s_psn, qp->s_next_psn) > 0) |
446 | qp->s_next_psn = qp->s_psn; | 511 | qp->s_next_psn = qp->s_psn; |
447 | ss = &qp->s_sge; | 512 | ss = &qp->s_sge; |
448 | len = qp->s_len; | 513 | len = qp->s_len; |
@@ -479,9 +544,9 @@ int ipath_make_rc_req(struct ipath_qp *qp, | |||
479 | cpu_to_be32(wqe->wr.wr.rdma.rkey); | 544 | cpu_to_be32(wqe->wr.wr.rdma.rkey); |
480 | ohdr->u.rc.reth.length = cpu_to_be32(qp->s_len); | 545 | ohdr->u.rc.reth.length = cpu_to_be32(qp->s_len); |
481 | qp->s_state = OP(RDMA_READ_REQUEST); | 546 | qp->s_state = OP(RDMA_READ_REQUEST); |
482 | hwords += sizeof(ohdr->u.rc.reth) / 4; | 547 | hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32); |
483 | bth2 = qp->s_psn++ & IPATH_PSN_MASK; | 548 | bth2 = qp->s_psn++ & IPATH_PSN_MASK; |
484 | if ((int)(qp->s_psn - qp->s_next_psn) > 0) | 549 | if (ipath_cmp24(qp->s_psn, qp->s_next_psn) > 0) |
485 | qp->s_next_psn = qp->s_psn; | 550 | qp->s_next_psn = qp->s_psn; |
486 | ss = NULL; | 551 | ss = NULL; |
487 | len = 0; | 552 | len = 0; |
@@ -489,20 +554,6 @@ int ipath_make_rc_req(struct ipath_qp *qp, | |||
489 | if (qp->s_cur == qp->s_size) | 554 | if (qp->s_cur == qp->s_size) |
490 | qp->s_cur = 0; | 555 | qp->s_cur = 0; |
491 | break; | 556 | break; |
492 | |||
493 | case OP(RDMA_READ_REQUEST): | ||
494 | case OP(COMPARE_SWAP): | ||
495 | case OP(FETCH_ADD): | ||
496 | /* | ||
497 | * We shouldn't start anything new until this request is | ||
498 | * finished. The ACK will handle rescheduling us. XXX The | ||
499 | * number of outstanding ones is negotiated at connection | ||
500 | * setup time (see pg. 258,289)? XXX Also, if we support | ||
501 | * multiple outstanding requests, we need to check the WQE | ||
502 | * IB_SEND_FENCE flag and not send a new request if a RDMA | ||
503 | * read or atomic is pending. | ||
504 | */ | ||
505 | goto done; | ||
506 | } | 557 | } |
507 | if (ipath_cmp24(qp->s_psn, qp->s_last_psn + IPATH_PSN_CREDIT - 1) >= 0) | 558 | if (ipath_cmp24(qp->s_psn, qp->s_last_psn + IPATH_PSN_CREDIT - 1) >= 0) |
508 | bth2 |= 1 << 31; /* Request ACK. */ | 559 | bth2 |= 1 << 31; /* Request ACK. */ |
@@ -512,9 +563,10 @@ int ipath_make_rc_req(struct ipath_qp *qp, | |||
512 | qp->s_cur_size = len; | 563 | qp->s_cur_size = len; |
513 | *bth0p = bth0 | (qp->s_state << 24); | 564 | *bth0p = bth0 | (qp->s_state << 24); |
514 | *bth2p = bth2; | 565 | *bth2p = bth2; |
566 | done: | ||
515 | return 1; | 567 | return 1; |
516 | 568 | ||
517 | done: | 569 | bail: |
518 | return 0; | 570 | return 0; |
519 | } | 571 | } |
520 | 572 | ||
@@ -524,7 +576,8 @@ done: | |||
524 | * | 576 | * |
525 | * This is called from ipath_rc_rcv() and only uses the receive | 577 | * This is called from ipath_rc_rcv() and only uses the receive |
526 | * side QP state. | 578 | * side QP state. |
527 | * Note that RDMA reads are handled in the send side QP state and tasklet. | 579 | * Note that RDMA reads and atomics are handled in the |
580 | * send side QP state and tasklet. | ||
528 | */ | 581 | */ |
529 | static void send_rc_ack(struct ipath_qp *qp) | 582 | static void send_rc_ack(struct ipath_qp *qp) |
530 | { | 583 | { |
@@ -535,6 +588,10 @@ static void send_rc_ack(struct ipath_qp *qp) | |||
535 | struct ipath_ib_header hdr; | 588 | struct ipath_ib_header hdr; |
536 | struct ipath_other_headers *ohdr; | 589 | struct ipath_other_headers *ohdr; |
537 | 590 | ||
591 | /* Don't send ACK or NAK if a RDMA read or atomic is pending. */ | ||
592 | if (qp->r_head_ack_queue != qp->s_tail_ack_queue) | ||
593 | goto queue_ack; | ||
594 | |||
538 | /* Construct the header. */ | 595 | /* Construct the header. */ |
539 | ohdr = &hdr.u.oth; | 596 | ohdr = &hdr.u.oth; |
540 | lrh0 = IPATH_LRH_BTH; | 597 | lrh0 = IPATH_LRH_BTH; |
@@ -548,19 +605,14 @@ static void send_rc_ack(struct ipath_qp *qp) | |||
548 | lrh0 = IPATH_LRH_GRH; | 605 | lrh0 = IPATH_LRH_GRH; |
549 | } | 606 | } |
550 | /* read pkey_index w/o lock (its atomic) */ | 607 | /* read pkey_index w/o lock (its atomic) */ |
551 | bth0 = ipath_get_pkey(dev->dd, qp->s_pkey_index); | 608 | bth0 = ipath_get_pkey(dev->dd, qp->s_pkey_index) | |
609 | OP(ACKNOWLEDGE) << 24; | ||
552 | if (qp->r_nak_state) | 610 | if (qp->r_nak_state) |
553 | ohdr->u.aeth = cpu_to_be32((qp->r_msn & IPATH_MSN_MASK) | | 611 | ohdr->u.aeth = cpu_to_be32((qp->r_msn & IPATH_MSN_MASK) | |
554 | (qp->r_nak_state << | 612 | (qp->r_nak_state << |
555 | IPATH_AETH_CREDIT_SHIFT)); | 613 | IPATH_AETH_CREDIT_SHIFT)); |
556 | else | 614 | else |
557 | ohdr->u.aeth = ipath_compute_aeth(qp); | 615 | ohdr->u.aeth = ipath_compute_aeth(qp); |
558 | if (qp->r_ack_state >= OP(COMPARE_SWAP)) { | ||
559 | bth0 |= OP(ATOMIC_ACKNOWLEDGE) << 24; | ||
560 | ohdr->u.at.atomic_ack_eth = cpu_to_be64(qp->r_atomic_data); | ||
561 | hwords += sizeof(ohdr->u.at.atomic_ack_eth) / 4; | ||
562 | } else | ||
563 | bth0 |= OP(ACKNOWLEDGE) << 24; | ||
564 | lrh0 |= qp->remote_ah_attr.sl << 4; | 616 | lrh0 |= qp->remote_ah_attr.sl << 4; |
565 | hdr.lrh[0] = cpu_to_be16(lrh0); | 617 | hdr.lrh[0] = cpu_to_be16(lrh0); |
566 | hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid); | 618 | hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid); |
@@ -574,31 +626,31 @@ static void send_rc_ack(struct ipath_qp *qp) | |||
574 | * If we can send the ACK, clear the ACK state. | 626 | * If we can send the ACK, clear the ACK state. |
575 | */ | 627 | */ |
576 | if (ipath_verbs_send(dev->dd, hwords, (u32 *) &hdr, 0, NULL) == 0) { | 628 | if (ipath_verbs_send(dev->dd, hwords, (u32 *) &hdr, 0, NULL) == 0) { |
577 | qp->r_ack_state = OP(ACKNOWLEDGE); | ||
578 | dev->n_unicast_xmit++; | 629 | dev->n_unicast_xmit++; |
579 | } else { | 630 | goto done; |
580 | /* | ||
581 | * We are out of PIO buffers at the moment. | ||
582 | * Pass responsibility for sending the ACK to the | ||
583 | * send tasklet so that when a PIO buffer becomes | ||
584 | * available, the ACK is sent ahead of other outgoing | ||
585 | * packets. | ||
586 | */ | ||
587 | dev->n_rc_qacks++; | ||
588 | spin_lock_irq(&qp->s_lock); | ||
589 | /* Don't coalesce if a RDMA read or atomic is pending. */ | ||
590 | if (qp->s_ack_state == OP(ACKNOWLEDGE) || | ||
591 | qp->s_ack_state < OP(RDMA_READ_REQUEST)) { | ||
592 | qp->s_ack_state = qp->r_ack_state; | ||
593 | qp->s_nak_state = qp->r_nak_state; | ||
594 | qp->s_ack_psn = qp->r_ack_psn; | ||
595 | qp->r_ack_state = OP(ACKNOWLEDGE); | ||
596 | } | ||
597 | spin_unlock_irq(&qp->s_lock); | ||
598 | |||
599 | /* Call ipath_do_rc_send() in another thread. */ | ||
600 | tasklet_hi_schedule(&qp->s_task); | ||
601 | } | 631 | } |
632 | |||
633 | /* | ||
634 | * We are out of PIO buffers at the moment. | ||
635 | * Pass responsibility for sending the ACK to the | ||
636 | * send tasklet so that when a PIO buffer becomes | ||
637 | * available, the ACK is sent ahead of other outgoing | ||
638 | * packets. | ||
639 | */ | ||
640 | dev->n_rc_qacks++; | ||
641 | |||
642 | queue_ack: | ||
643 | spin_lock_irq(&qp->s_lock); | ||
644 | qp->s_flags |= IPATH_S_ACK_PENDING; | ||
645 | qp->s_nak_state = qp->r_nak_state; | ||
646 | qp->s_ack_psn = qp->r_ack_psn; | ||
647 | spin_unlock_irq(&qp->s_lock); | ||
648 | |||
649 | /* Call ipath_do_rc_send() in another thread. */ | ||
650 | tasklet_hi_schedule(&qp->s_task); | ||
651 | |||
652 | done: | ||
653 | return; | ||
602 | } | 654 | } |
603 | 655 | ||
604 | /** | 656 | /** |
@@ -727,7 +779,7 @@ void ipath_restart_rc(struct ipath_qp *qp, u32 psn, struct ib_wc *wc) | |||
727 | if (wqe->wr.opcode == IB_WR_RDMA_READ) | 779 | if (wqe->wr.opcode == IB_WR_RDMA_READ) |
728 | dev->n_rc_resends++; | 780 | dev->n_rc_resends++; |
729 | else | 781 | else |
730 | dev->n_rc_resends += (int)qp->s_psn - (int)psn; | 782 | dev->n_rc_resends += (qp->s_psn - psn) & IPATH_PSN_MASK; |
731 | 783 | ||
732 | reset_psn(qp, psn); | 784 | reset_psn(qp, psn); |
733 | tasklet_hi_schedule(&qp->s_task); | 785 | tasklet_hi_schedule(&qp->s_task); |
@@ -775,10 +827,6 @@ static int do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode) | |||
775 | list_del_init(&qp->timerwait); | 827 | list_del_init(&qp->timerwait); |
776 | spin_unlock(&dev->pending_lock); | 828 | spin_unlock(&dev->pending_lock); |
777 | 829 | ||
778 | /* Nothing is pending to ACK/NAK. */ | ||
779 | if (unlikely(qp->s_last == qp->s_tail)) | ||
780 | goto bail; | ||
781 | |||
782 | /* | 830 | /* |
783 | * Note that NAKs implicitly ACK outstanding SEND and RDMA write | 831 | * Note that NAKs implicitly ACK outstanding SEND and RDMA write |
784 | * requests and implicitly NAK RDMA read and atomic requests issued | 832 | * requests and implicitly NAK RDMA read and atomic requests issued |
@@ -806,7 +854,7 @@ static int do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode) | |||
806 | */ | 854 | */ |
807 | if ((wqe->wr.opcode == IB_WR_RDMA_READ && | 855 | if ((wqe->wr.opcode == IB_WR_RDMA_READ && |
808 | (opcode != OP(RDMA_READ_RESPONSE_LAST) || | 856 | (opcode != OP(RDMA_READ_RESPONSE_LAST) || |
809 | ipath_cmp24(ack_psn, wqe->lpsn) != 0)) || | 857 | ipath_cmp24(ack_psn, wqe->lpsn) != 0)) || |
810 | ((wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP || | 858 | ((wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP || |
811 | wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) && | 859 | wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) && |
812 | (opcode != OP(ATOMIC_ACKNOWLEDGE) || | 860 | (opcode != OP(ATOMIC_ACKNOWLEDGE) || |
@@ -824,20 +872,33 @@ static int do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode) | |||
824 | */ | 872 | */ |
825 | goto bail; | 873 | goto bail; |
826 | } | 874 | } |
827 | if (wqe->wr.opcode == IB_WR_RDMA_READ || | 875 | if (qp->s_num_rd_atomic && |
828 | wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP || | 876 | (wqe->wr.opcode == IB_WR_RDMA_READ || |
829 | wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) | 877 | wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP || |
830 | tasklet_hi_schedule(&qp->s_task); | 878 | wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)) { |
879 | qp->s_num_rd_atomic--; | ||
880 | /* Restart sending task if fence is complete */ | ||
881 | if ((qp->s_flags & IPATH_S_FENCE_PENDING) && | ||
882 | !qp->s_num_rd_atomic) { | ||
883 | qp->s_flags &= ~IPATH_S_FENCE_PENDING; | ||
884 | tasklet_hi_schedule(&qp->s_task); | ||
885 | } else if (qp->s_flags & IPATH_S_RDMAR_PENDING) { | ||
886 | qp->s_flags &= ~IPATH_S_RDMAR_PENDING; | ||
887 | tasklet_hi_schedule(&qp->s_task); | ||
888 | } | ||
889 | } | ||
831 | /* Post a send completion queue entry if requested. */ | 890 | /* Post a send completion queue entry if requested. */ |
832 | if (!test_bit(IPATH_S_SIGNAL_REQ_WR, &qp->s_flags) || | 891 | if (!(qp->s_flags & IPATH_S_SIGNAL_REQ_WR) || |
833 | (wqe->wr.send_flags & IB_SEND_SIGNALED)) { | 892 | (wqe->wr.send_flags & IB_SEND_SIGNALED)) { |
834 | wc.wr_id = wqe->wr.wr_id; | 893 | wc.wr_id = wqe->wr.wr_id; |
835 | wc.status = IB_WC_SUCCESS; | 894 | wc.status = IB_WC_SUCCESS; |
836 | wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode]; | 895 | wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode]; |
837 | wc.vendor_err = 0; | 896 | wc.vendor_err = 0; |
838 | wc.byte_len = wqe->length; | 897 | wc.byte_len = wqe->length; |
898 | wc.imm_data = 0; | ||
839 | wc.qp = &qp->ibqp; | 899 | wc.qp = &qp->ibqp; |
840 | wc.src_qp = qp->remote_qpn; | 900 | wc.src_qp = qp->remote_qpn; |
901 | wc.wc_flags = 0; | ||
841 | wc.pkey_index = 0; | 902 | wc.pkey_index = 0; |
842 | wc.slid = qp->remote_ah_attr.dlid; | 903 | wc.slid = qp->remote_ah_attr.dlid; |
843 | wc.sl = qp->remote_ah_attr.sl; | 904 | wc.sl = qp->remote_ah_attr.sl; |
@@ -854,15 +915,19 @@ static int do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode) | |||
854 | if (qp->s_last == qp->s_cur) { | 915 | if (qp->s_last == qp->s_cur) { |
855 | if (++qp->s_cur >= qp->s_size) | 916 | if (++qp->s_cur >= qp->s_size) |
856 | qp->s_cur = 0; | 917 | qp->s_cur = 0; |
918 | qp->s_last = qp->s_cur; | ||
919 | if (qp->s_last == qp->s_tail) | ||
920 | break; | ||
857 | wqe = get_swqe_ptr(qp, qp->s_cur); | 921 | wqe = get_swqe_ptr(qp, qp->s_cur); |
858 | qp->s_state = OP(SEND_LAST); | 922 | qp->s_state = OP(SEND_LAST); |
859 | qp->s_psn = wqe->psn; | 923 | qp->s_psn = wqe->psn; |
924 | } else { | ||
925 | if (++qp->s_last >= qp->s_size) | ||
926 | qp->s_last = 0; | ||
927 | if (qp->s_last == qp->s_tail) | ||
928 | break; | ||
929 | wqe = get_swqe_ptr(qp, qp->s_last); | ||
860 | } | 930 | } |
861 | if (++qp->s_last >= qp->s_size) | ||
862 | qp->s_last = 0; | ||
863 | wqe = get_swqe_ptr(qp, qp->s_last); | ||
864 | if (qp->s_last == qp->s_tail) | ||
865 | break; | ||
866 | } | 931 | } |
867 | 932 | ||
868 | switch (aeth >> 29) { | 933 | switch (aeth >> 29) { |
@@ -874,6 +939,18 @@ static int do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode) | |||
874 | list_add_tail(&qp->timerwait, | 939 | list_add_tail(&qp->timerwait, |
875 | &dev->pending[dev->pending_index]); | 940 | &dev->pending[dev->pending_index]); |
876 | spin_unlock(&dev->pending_lock); | 941 | spin_unlock(&dev->pending_lock); |
942 | /* | ||
943 | * If we get a partial ACK for a resent operation, | ||
944 | * we can stop resending the earlier packets and | ||
945 | * continue with the next packet the receiver wants. | ||
946 | */ | ||
947 | if (ipath_cmp24(qp->s_psn, psn) <= 0) { | ||
948 | reset_psn(qp, psn + 1); | ||
949 | tasklet_hi_schedule(&qp->s_task); | ||
950 | } | ||
951 | } else if (ipath_cmp24(qp->s_psn, psn) <= 0) { | ||
952 | qp->s_state = OP(SEND_LAST); | ||
953 | qp->s_psn = psn + 1; | ||
877 | } | 954 | } |
878 | ipath_get_credit(qp, aeth); | 955 | ipath_get_credit(qp, aeth); |
879 | qp->s_rnr_retry = qp->s_rnr_retry_cnt; | 956 | qp->s_rnr_retry = qp->s_rnr_retry_cnt; |
@@ -884,22 +961,23 @@ static int do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode) | |||
884 | 961 | ||
885 | case 1: /* RNR NAK */ | 962 | case 1: /* RNR NAK */ |
886 | dev->n_rnr_naks++; | 963 | dev->n_rnr_naks++; |
964 | if (qp->s_last == qp->s_tail) | ||
965 | goto bail; | ||
887 | if (qp->s_rnr_retry == 0) { | 966 | if (qp->s_rnr_retry == 0) { |
888 | if (qp->s_last == qp->s_tail) | ||
889 | goto bail; | ||
890 | |||
891 | wc.status = IB_WC_RNR_RETRY_EXC_ERR; | 967 | wc.status = IB_WC_RNR_RETRY_EXC_ERR; |
892 | goto class_b; | 968 | goto class_b; |
893 | } | 969 | } |
894 | if (qp->s_rnr_retry_cnt < 7) | 970 | if (qp->s_rnr_retry_cnt < 7) |
895 | qp->s_rnr_retry--; | 971 | qp->s_rnr_retry--; |
896 | if (qp->s_last == qp->s_tail) | ||
897 | goto bail; | ||
898 | 972 | ||
899 | /* The last valid PSN is the previous PSN. */ | 973 | /* The last valid PSN is the previous PSN. */ |
900 | update_last_psn(qp, psn - 1); | 974 | update_last_psn(qp, psn - 1); |
901 | 975 | ||
902 | dev->n_rc_resends += (int)qp->s_psn - (int)psn; | 976 | if (wqe->wr.opcode == IB_WR_RDMA_READ) |
977 | dev->n_rc_resends++; | ||
978 | else | ||
979 | dev->n_rc_resends += | ||
980 | (qp->s_psn - psn) & IPATH_PSN_MASK; | ||
903 | 981 | ||
904 | reset_psn(qp, psn); | 982 | reset_psn(qp, psn); |
905 | 983 | ||
@@ -910,26 +988,20 @@ static int do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode) | |||
910 | goto bail; | 988 | goto bail; |
911 | 989 | ||
912 | case 3: /* NAK */ | 990 | case 3: /* NAK */ |
913 | /* The last valid PSN seen is the previous request's. */ | 991 | if (qp->s_last == qp->s_tail) |
914 | if (qp->s_last != qp->s_tail) | 992 | goto bail; |
915 | update_last_psn(qp, wqe->psn - 1); | 993 | /* The last valid PSN is the previous PSN. */ |
994 | update_last_psn(qp, psn - 1); | ||
916 | switch ((aeth >> IPATH_AETH_CREDIT_SHIFT) & | 995 | switch ((aeth >> IPATH_AETH_CREDIT_SHIFT) & |
917 | IPATH_AETH_CREDIT_MASK) { | 996 | IPATH_AETH_CREDIT_MASK) { |
918 | case 0: /* PSN sequence error */ | 997 | case 0: /* PSN sequence error */ |
919 | dev->n_seq_naks++; | 998 | dev->n_seq_naks++; |
920 | /* | 999 | /* |
921 | * Back up to the responder's expected PSN. XXX | 1000 | * Back up to the responder's expected PSN. |
922 | * Note that we might get a NAK in the middle of an | 1001 | * Note that we might get a NAK in the middle of an |
923 | * RDMA READ response which terminates the RDMA | 1002 | * RDMA READ response which terminates the RDMA |
924 | * READ. | 1003 | * READ. |
925 | */ | 1004 | */ |
926 | if (qp->s_last == qp->s_tail) | ||
927 | break; | ||
928 | |||
929 | if (ipath_cmp24(psn, wqe->psn) < 0) | ||
930 | break; | ||
931 | |||
932 | /* Retry the request. */ | ||
933 | ipath_restart_rc(qp, psn, &wc); | 1005 | ipath_restart_rc(qp, psn, &wc); |
934 | break; | 1006 | break; |
935 | 1007 | ||
@@ -1003,6 +1075,7 @@ static inline void ipath_rc_rcv_resp(struct ipath_ibdev *dev, | |||
1003 | u32 psn, u32 hdrsize, u32 pmtu, | 1075 | u32 psn, u32 hdrsize, u32 pmtu, |
1004 | int header_in_data) | 1076 | int header_in_data) |
1005 | { | 1077 | { |
1078 | struct ipath_swqe *wqe; | ||
1006 | unsigned long flags; | 1079 | unsigned long flags; |
1007 | struct ib_wc wc; | 1080 | struct ib_wc wc; |
1008 | int diff; | 1081 | int diff; |
@@ -1032,6 +1105,10 @@ static inline void ipath_rc_rcv_resp(struct ipath_ibdev *dev, | |||
1032 | goto ack_done; | 1105 | goto ack_done; |
1033 | } | 1106 | } |
1034 | 1107 | ||
1108 | if (unlikely(qp->s_last == qp->s_tail)) | ||
1109 | goto ack_done; | ||
1110 | wqe = get_swqe_ptr(qp, qp->s_last); | ||
1111 | |||
1035 | switch (opcode) { | 1112 | switch (opcode) { |
1036 | case OP(ACKNOWLEDGE): | 1113 | case OP(ACKNOWLEDGE): |
1037 | case OP(ATOMIC_ACKNOWLEDGE): | 1114 | case OP(ATOMIC_ACKNOWLEDGE): |
@@ -1042,38 +1119,49 @@ static inline void ipath_rc_rcv_resp(struct ipath_ibdev *dev, | |||
1042 | aeth = be32_to_cpu(((__be32 *) data)[0]); | 1119 | aeth = be32_to_cpu(((__be32 *) data)[0]); |
1043 | data += sizeof(__be32); | 1120 | data += sizeof(__be32); |
1044 | } | 1121 | } |
1045 | if (opcode == OP(ATOMIC_ACKNOWLEDGE)) | 1122 | if (opcode == OP(ATOMIC_ACKNOWLEDGE)) { |
1046 | *(u64 *) qp->s_sge.sge.vaddr = *(u64 *) data; | 1123 | u64 val; |
1124 | |||
1125 | if (!header_in_data) { | ||
1126 | __be32 *p = ohdr->u.at.atomic_ack_eth; | ||
1127 | |||
1128 | val = ((u64) be32_to_cpu(p[0]) << 32) | | ||
1129 | be32_to_cpu(p[1]); | ||
1130 | } else | ||
1131 | val = be64_to_cpu(((__be64 *) data)[0]); | ||
1132 | *(u64 *) wqe->sg_list[0].vaddr = val; | ||
1133 | } | ||
1047 | if (!do_rc_ack(qp, aeth, psn, opcode) || | 1134 | if (!do_rc_ack(qp, aeth, psn, opcode) || |
1048 | opcode != OP(RDMA_READ_RESPONSE_FIRST)) | 1135 | opcode != OP(RDMA_READ_RESPONSE_FIRST)) |
1049 | goto ack_done; | 1136 | goto ack_done; |
1050 | hdrsize += 4; | 1137 | hdrsize += 4; |
1138 | if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ)) | ||
1139 | goto ack_op_err; | ||
1051 | /* | 1140 | /* |
1052 | * do_rc_ack() has already checked the PSN so skip | 1141 | * If this is a response to a resent RDMA read, we |
1053 | * the sequence check. | 1142 | * have to be careful to copy the data to the right |
1143 | * location. | ||
1054 | */ | 1144 | */ |
1055 | goto rdma_read; | 1145 | qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge, |
1146 | wqe, psn, pmtu); | ||
1147 | goto read_middle; | ||
1056 | 1148 | ||
1057 | case OP(RDMA_READ_RESPONSE_MIDDLE): | 1149 | case OP(RDMA_READ_RESPONSE_MIDDLE): |
1058 | /* no AETH, no ACK */ | 1150 | /* no AETH, no ACK */ |
1059 | if (unlikely(ipath_cmp24(psn, qp->s_last_psn + 1))) { | 1151 | if (unlikely(ipath_cmp24(psn, qp->s_last_psn + 1))) { |
1060 | dev->n_rdma_seq++; | 1152 | dev->n_rdma_seq++; |
1061 | if (qp->s_last != qp->s_tail) | 1153 | ipath_restart_rc(qp, qp->s_last_psn + 1, &wc); |
1062 | ipath_restart_rc(qp, qp->s_last_psn + 1, &wc); | ||
1063 | goto ack_done; | 1154 | goto ack_done; |
1064 | } | 1155 | } |
1065 | rdma_read: | 1156 | if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ)) |
1066 | if (unlikely(qp->s_state != OP(RDMA_READ_REQUEST))) | 1157 | goto ack_op_err; |
1067 | goto ack_done; | 1158 | read_middle: |
1068 | if (unlikely(tlen != (hdrsize + pmtu + 4))) | 1159 | if (unlikely(tlen != (hdrsize + pmtu + 4))) |
1069 | goto ack_done; | 1160 | goto ack_len_err; |
1070 | if (unlikely(pmtu >= qp->s_len)) | 1161 | if (unlikely(pmtu >= qp->s_rdma_read_len)) |
1071 | goto ack_done; | 1162 | goto ack_len_err; |
1163 | |||
1072 | /* We got a response so update the timeout. */ | 1164 | /* We got a response so update the timeout. */ |
1073 | if (unlikely(qp->s_last == qp->s_tail || | ||
1074 | get_swqe_ptr(qp, qp->s_last)->wr.opcode != | ||
1075 | IB_WR_RDMA_READ)) | ||
1076 | goto ack_done; | ||
1077 | spin_lock(&dev->pending_lock); | 1165 | spin_lock(&dev->pending_lock); |
1078 | if (qp->s_rnr_timeout == 0 && !list_empty(&qp->timerwait)) | 1166 | if (qp->s_rnr_timeout == 0 && !list_empty(&qp->timerwait)) |
1079 | list_move_tail(&qp->timerwait, | 1167 | list_move_tail(&qp->timerwait, |
@@ -1082,67 +1170,97 @@ static inline void ipath_rc_rcv_resp(struct ipath_ibdev *dev, | |||
1082 | /* | 1170 | /* |
1083 | * Update the RDMA receive state but do the copy w/o | 1171 | * Update the RDMA receive state but do the copy w/o |
1084 | * holding the locks and blocking interrupts. | 1172 | * holding the locks and blocking interrupts. |
1085 | * XXX Yet another place that affects relaxed RDMA order | ||
1086 | * since we don't want s_sge modified. | ||
1087 | */ | 1173 | */ |
1088 | qp->s_len -= pmtu; | 1174 | qp->s_rdma_read_len -= pmtu; |
1089 | update_last_psn(qp, psn); | 1175 | update_last_psn(qp, psn); |
1090 | spin_unlock_irqrestore(&qp->s_lock, flags); | 1176 | spin_unlock_irqrestore(&qp->s_lock, flags); |
1091 | ipath_copy_sge(&qp->s_sge, data, pmtu); | 1177 | ipath_copy_sge(&qp->s_rdma_read_sge, data, pmtu); |
1092 | goto bail; | 1178 | goto bail; |
1093 | 1179 | ||
1094 | case OP(RDMA_READ_RESPONSE_LAST): | 1180 | case OP(RDMA_READ_RESPONSE_ONLY): |
1095 | /* ACKs READ req. */ | ||
1096 | if (unlikely(ipath_cmp24(psn, qp->s_last_psn + 1))) { | 1181 | if (unlikely(ipath_cmp24(psn, qp->s_last_psn + 1))) { |
1097 | dev->n_rdma_seq++; | 1182 | dev->n_rdma_seq++; |
1098 | if (qp->s_last != qp->s_tail) | 1183 | ipath_restart_rc(qp, qp->s_last_psn + 1, &wc); |
1099 | ipath_restart_rc(qp, qp->s_last_psn + 1, &wc); | ||
1100 | goto ack_done; | 1184 | goto ack_done; |
1101 | } | 1185 | } |
1102 | /* FALLTHROUGH */ | 1186 | if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ)) |
1103 | case OP(RDMA_READ_RESPONSE_ONLY): | 1187 | goto ack_op_err; |
1104 | if (unlikely(qp->s_state != OP(RDMA_READ_REQUEST))) | 1188 | /* Get the number of bytes the message was padded by. */ |
1105 | goto ack_done; | 1189 | pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3; |
1190 | /* | ||
1191 | * Check that the data size is >= 0 && <= pmtu. | ||
1192 | * Remember to account for the AETH header (4) and | ||
1193 | * ICRC (4). | ||
1194 | */ | ||
1195 | if (unlikely(tlen < (hdrsize + pad + 8))) | ||
1196 | goto ack_len_err; | ||
1106 | /* | 1197 | /* |
1107 | * Get the number of bytes the message was padded by. | 1198 | * If this is a response to a resent RDMA read, we |
1199 | * have to be careful to copy the data to the right | ||
1200 | * location. | ||
1108 | */ | 1201 | */ |
1202 | qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge, | ||
1203 | wqe, psn, pmtu); | ||
1204 | goto read_last; | ||
1205 | |||
1206 | case OP(RDMA_READ_RESPONSE_LAST): | ||
1207 | /* ACKs READ req. */ | ||
1208 | if (unlikely(ipath_cmp24(psn, qp->s_last_psn + 1))) { | ||
1209 | dev->n_rdma_seq++; | ||
1210 | ipath_restart_rc(qp, qp->s_last_psn + 1, &wc); | ||
1211 | goto ack_done; | ||
1212 | } | ||
1213 | if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ)) | ||
1214 | goto ack_op_err; | ||
1215 | /* Get the number of bytes the message was padded by. */ | ||
1109 | pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3; | 1216 | pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3; |
1110 | /* | 1217 | /* |
1111 | * Check that the data size is >= 1 && <= pmtu. | 1218 | * Check that the data size is >= 1 && <= pmtu. |
1112 | * Remember to account for the AETH header (4) and | 1219 | * Remember to account for the AETH header (4) and |
1113 | * ICRC (4). | 1220 | * ICRC (4). |
1114 | */ | 1221 | */ |
1115 | if (unlikely(tlen <= (hdrsize + pad + 8))) { | 1222 | if (unlikely(tlen <= (hdrsize + pad + 8))) |
1116 | /* XXX Need to generate an error CQ entry. */ | 1223 | goto ack_len_err; |
1117 | goto ack_done; | 1224 | read_last: |
1118 | } | ||
1119 | tlen -= hdrsize + pad + 8; | 1225 | tlen -= hdrsize + pad + 8; |
1120 | if (unlikely(tlen != qp->s_len)) { | 1226 | if (unlikely(tlen != qp->s_rdma_read_len)) |
1121 | /* XXX Need to generate an error CQ entry. */ | 1227 | goto ack_len_err; |
1122 | goto ack_done; | ||
1123 | } | ||
1124 | if (!header_in_data) | 1228 | if (!header_in_data) |
1125 | aeth = be32_to_cpu(ohdr->u.aeth); | 1229 | aeth = be32_to_cpu(ohdr->u.aeth); |
1126 | else { | 1230 | else { |
1127 | aeth = be32_to_cpu(((__be32 *) data)[0]); | 1231 | aeth = be32_to_cpu(((__be32 *) data)[0]); |
1128 | data += sizeof(__be32); | 1232 | data += sizeof(__be32); |
1129 | } | 1233 | } |
1130 | ipath_copy_sge(&qp->s_sge, data, tlen); | 1234 | ipath_copy_sge(&qp->s_rdma_read_sge, data, tlen); |
1131 | if (do_rc_ack(qp, aeth, psn, OP(RDMA_READ_RESPONSE_LAST))) { | 1235 | (void) do_rc_ack(qp, aeth, psn, OP(RDMA_READ_RESPONSE_LAST)); |
1132 | /* | ||
1133 | * Change the state so we contimue | ||
1134 | * processing new requests and wake up the | ||
1135 | * tasklet if there are posted sends. | ||
1136 | */ | ||
1137 | qp->s_state = OP(SEND_LAST); | ||
1138 | if (qp->s_tail != qp->s_head) | ||
1139 | tasklet_hi_schedule(&qp->s_task); | ||
1140 | } | ||
1141 | goto ack_done; | 1236 | goto ack_done; |
1142 | } | 1237 | } |
1143 | 1238 | ||
1144 | ack_done: | 1239 | ack_done: |
1145 | spin_unlock_irqrestore(&qp->s_lock, flags); | 1240 | spin_unlock_irqrestore(&qp->s_lock, flags); |
1241 | goto bail; | ||
1242 | |||
1243 | ack_op_err: | ||
1244 | wc.status = IB_WC_LOC_QP_OP_ERR; | ||
1245 | goto ack_err; | ||
1246 | |||
1247 | ack_len_err: | ||
1248 | wc.status = IB_WC_LOC_LEN_ERR; | ||
1249 | ack_err: | ||
1250 | wc.wr_id = wqe->wr.wr_id; | ||
1251 | wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode]; | ||
1252 | wc.vendor_err = 0; | ||
1253 | wc.byte_len = 0; | ||
1254 | wc.imm_data = 0; | ||
1255 | wc.qp = &qp->ibqp; | ||
1256 | wc.src_qp = qp->remote_qpn; | ||
1257 | wc.wc_flags = 0; | ||
1258 | wc.pkey_index = 0; | ||
1259 | wc.slid = qp->remote_ah_attr.dlid; | ||
1260 | wc.sl = qp->remote_ah_attr.sl; | ||
1261 | wc.dlid_path_bits = 0; | ||
1262 | wc.port_num = 0; | ||
1263 | ipath_sqerror_qp(qp, &wc); | ||
1146 | bail: | 1264 | bail: |
1147 | return; | 1265 | return; |
1148 | } | 1266 | } |
@@ -1162,7 +1280,7 @@ bail: | |||
1162 | * incoming RC packet for the given QP. | 1280 | * incoming RC packet for the given QP. |
1163 | * Called at interrupt level. | 1281 | * Called at interrupt level. |
1164 | * Return 1 if no more processing is needed; otherwise return 0 to | 1282 | * Return 1 if no more processing is needed; otherwise return 0 to |
1165 | * schedule a response to be sent and the s_lock unlocked. | 1283 | * schedule a response to be sent. |
1166 | */ | 1284 | */ |
1167 | static inline int ipath_rc_rcv_error(struct ipath_ibdev *dev, | 1285 | static inline int ipath_rc_rcv_error(struct ipath_ibdev *dev, |
1168 | struct ipath_other_headers *ohdr, | 1286 | struct ipath_other_headers *ohdr, |
@@ -1173,25 +1291,23 @@ static inline int ipath_rc_rcv_error(struct ipath_ibdev *dev, | |||
1173 | int diff, | 1291 | int diff, |
1174 | int header_in_data) | 1292 | int header_in_data) |
1175 | { | 1293 | { |
1176 | struct ib_reth *reth; | 1294 | struct ipath_ack_entry *e; |
1295 | u8 i, prev; | ||
1296 | int old_req; | ||
1177 | 1297 | ||
1178 | if (diff > 0) { | 1298 | if (diff > 0) { |
1179 | /* | 1299 | /* |
1180 | * Packet sequence error. | 1300 | * Packet sequence error. |
1181 | * A NAK will ACK earlier sends and RDMA writes. | 1301 | * A NAK will ACK earlier sends and RDMA writes. |
1182 | * Don't queue the NAK if a RDMA read, atomic, or | 1302 | * Don't queue the NAK if we already sent one. |
1183 | * NAK is pending though. | ||
1184 | */ | 1303 | */ |
1185 | if (qp->s_ack_state != OP(ACKNOWLEDGE) || | 1304 | if (!qp->r_nak_state) { |
1186 | qp->r_nak_state != 0) | ||
1187 | goto done; | ||
1188 | if (qp->r_ack_state < OP(COMPARE_SWAP)) { | ||
1189 | qp->r_ack_state = OP(SEND_ONLY); | ||
1190 | qp->r_nak_state = IB_NAK_PSN_ERROR; | 1305 | qp->r_nak_state = IB_NAK_PSN_ERROR; |
1191 | /* Use the expected PSN. */ | 1306 | /* Use the expected PSN. */ |
1192 | qp->r_ack_psn = qp->r_psn; | 1307 | qp->r_ack_psn = qp->r_psn; |
1308 | goto send_ack; | ||
1193 | } | 1309 | } |
1194 | goto send_ack; | 1310 | goto done; |
1195 | } | 1311 | } |
1196 | 1312 | ||
1197 | /* | 1313 | /* |
@@ -1204,8 +1320,46 @@ static inline int ipath_rc_rcv_error(struct ipath_ibdev *dev, | |||
1204 | * can coalesce an outstanding duplicate ACK. We have to | 1320 | * can coalesce an outstanding duplicate ACK. We have to |
1205 | * send the earliest so that RDMA reads can be restarted at | 1321 | * send the earliest so that RDMA reads can be restarted at |
1206 | * the requester's expected PSN. | 1322 | * the requester's expected PSN. |
1323 | * | ||
1324 | * First, find where this duplicate PSN falls within the | ||
1325 | * ACKs previously sent. | ||
1207 | */ | 1326 | */ |
1208 | if (opcode == OP(RDMA_READ_REQUEST)) { | 1327 | psn &= IPATH_PSN_MASK; |
1328 | e = NULL; | ||
1329 | old_req = 1; | ||
1330 | spin_lock_irq(&qp->s_lock); | ||
1331 | for (i = qp->r_head_ack_queue; ; i = prev) { | ||
1332 | if (i == qp->s_tail_ack_queue) | ||
1333 | old_req = 0; | ||
1334 | if (i) | ||
1335 | prev = i - 1; | ||
1336 | else | ||
1337 | prev = IPATH_MAX_RDMA_ATOMIC; | ||
1338 | if (prev == qp->r_head_ack_queue) { | ||
1339 | e = NULL; | ||
1340 | break; | ||
1341 | } | ||
1342 | e = &qp->s_ack_queue[prev]; | ||
1343 | if (!e->opcode) { | ||
1344 | e = NULL; | ||
1345 | break; | ||
1346 | } | ||
1347 | if (ipath_cmp24(psn, e->psn) >= 0) | ||
1348 | break; | ||
1349 | } | ||
1350 | switch (opcode) { | ||
1351 | case OP(RDMA_READ_REQUEST): { | ||
1352 | struct ib_reth *reth; | ||
1353 | u32 offset; | ||
1354 | u32 len; | ||
1355 | |||
1356 | /* | ||
1357 | * If we didn't find the RDMA read request in the ack queue, | ||
1358 | * or the send tasklet is already backed up to send an | ||
1359 | * earlier entry, we can ignore this request. | ||
1360 | */ | ||
1361 | if (!e || e->opcode != OP(RDMA_READ_REQUEST) || old_req) | ||
1362 | goto unlock_done; | ||
1209 | /* RETH comes after BTH */ | 1363 | /* RETH comes after BTH */ |
1210 | if (!header_in_data) | 1364 | if (!header_in_data) |
1211 | reth = &ohdr->u.rc.reth; | 1365 | reth = &ohdr->u.rc.reth; |
@@ -1214,88 +1368,87 @@ static inline int ipath_rc_rcv_error(struct ipath_ibdev *dev, | |||
1214 | data += sizeof(*reth); | 1368 | data += sizeof(*reth); |
1215 | } | 1369 | } |
1216 | /* | 1370 | /* |
1217 | * If we receive a duplicate RDMA request, it means the | 1371 | * Address range must be a subset of the original |
1218 | * requester saw a sequence error and needs to restart | 1372 | * request and start on pmtu boundaries. |
1219 | * from an earlier point. We can abort the current | 1373 | * We reuse the old ack_queue slot since the requester |
1220 | * RDMA read send in that case. | 1374 | * should not back up and request an earlier PSN for the |
1375 | * same request. | ||
1221 | */ | 1376 | */ |
1222 | spin_lock_irq(&qp->s_lock); | 1377 | offset = ((psn - e->psn) & IPATH_PSN_MASK) * |
1223 | if (qp->s_ack_state != OP(ACKNOWLEDGE) && | 1378 | ib_mtu_enum_to_int(qp->path_mtu); |
1224 | (qp->s_hdrwords || ipath_cmp24(psn, qp->s_ack_psn) >= 0)) { | 1379 | len = be32_to_cpu(reth->length); |
1225 | /* | 1380 | if (unlikely(offset + len > e->rdma_sge.sge.sge_length)) |
1226 | * We are already sending earlier requested data. | 1381 | goto unlock_done; |
1227 | * Don't abort it to send later out of sequence data. | 1382 | if (len != 0) { |
1228 | */ | ||
1229 | spin_unlock_irq(&qp->s_lock); | ||
1230 | goto done; | ||
1231 | } | ||
1232 | qp->s_rdma_len = be32_to_cpu(reth->length); | ||
1233 | if (qp->s_rdma_len != 0) { | ||
1234 | u32 rkey = be32_to_cpu(reth->rkey); | 1383 | u32 rkey = be32_to_cpu(reth->rkey); |
1235 | u64 vaddr = be64_to_cpu(reth->vaddr); | 1384 | u64 vaddr = be64_to_cpu(reth->vaddr); |
1236 | int ok; | 1385 | int ok; |
1237 | 1386 | ||
1238 | /* | 1387 | ok = ipath_rkey_ok(qp, &e->rdma_sge, |
1239 | * Address range must be a subset of the original | 1388 | len, vaddr, rkey, |
1240 | * request and start on pmtu boundaries. | ||
1241 | */ | ||
1242 | ok = ipath_rkey_ok(qp, &qp->s_rdma_sge, | ||
1243 | qp->s_rdma_len, vaddr, rkey, | ||
1244 | IB_ACCESS_REMOTE_READ); | 1389 | IB_ACCESS_REMOTE_READ); |
1245 | if (unlikely(!ok)) { | 1390 | if (unlikely(!ok)) |
1246 | spin_unlock_irq(&qp->s_lock); | 1391 | goto unlock_done; |
1247 | goto done; | ||
1248 | } | ||
1249 | } else { | 1392 | } else { |
1250 | qp->s_rdma_sge.sg_list = NULL; | 1393 | e->rdma_sge.sg_list = NULL; |
1251 | qp->s_rdma_sge.num_sge = 0; | 1394 | e->rdma_sge.num_sge = 0; |
1252 | qp->s_rdma_sge.sge.mr = NULL; | 1395 | e->rdma_sge.sge.mr = NULL; |
1253 | qp->s_rdma_sge.sge.vaddr = NULL; | 1396 | e->rdma_sge.sge.vaddr = NULL; |
1254 | qp->s_rdma_sge.sge.length = 0; | 1397 | e->rdma_sge.sge.length = 0; |
1255 | qp->s_rdma_sge.sge.sge_length = 0; | 1398 | e->rdma_sge.sge.sge_length = 0; |
1256 | } | 1399 | } |
1257 | qp->s_ack_state = opcode; | 1400 | e->psn = psn; |
1258 | qp->s_ack_psn = psn; | 1401 | qp->s_ack_state = OP(ACKNOWLEDGE); |
1259 | spin_unlock_irq(&qp->s_lock); | 1402 | qp->s_tail_ack_queue = prev; |
1260 | tasklet_hi_schedule(&qp->s_task); | 1403 | break; |
1261 | goto send_ack; | ||
1262 | } | 1404 | } |
1263 | 1405 | ||
1264 | /* | ||
1265 | * A pending RDMA read will ACK anything before it so | ||
1266 | * ignore earlier duplicate requests. | ||
1267 | */ | ||
1268 | if (qp->s_ack_state != OP(ACKNOWLEDGE)) | ||
1269 | goto done; | ||
1270 | |||
1271 | /* | ||
1272 | * If an ACK is pending, don't replace the pending ACK | ||
1273 | * with an earlier one since the later one will ACK the earlier. | ||
1274 | * Also, if we already have a pending atomic, send it. | ||
1275 | */ | ||
1276 | if (qp->r_ack_state != OP(ACKNOWLEDGE) && | ||
1277 | (ipath_cmp24(psn, qp->r_ack_psn) <= 0 || | ||
1278 | qp->r_ack_state >= OP(COMPARE_SWAP))) | ||
1279 | goto send_ack; | ||
1280 | switch (opcode) { | ||
1281 | case OP(COMPARE_SWAP): | 1406 | case OP(COMPARE_SWAP): |
1282 | case OP(FETCH_ADD): | 1407 | case OP(FETCH_ADD): { |
1283 | /* | 1408 | /* |
1284 | * Check for the PSN of the last atomic operation | 1409 | * If we didn't find the atomic request in the ack queue |
1285 | * performed and resend the result if found. | 1410 | * or the send tasklet is already backed up to send an |
1411 | * earlier entry, we can ignore this request. | ||
1286 | */ | 1412 | */ |
1287 | if ((psn & IPATH_PSN_MASK) != qp->r_atomic_psn) | 1413 | if (!e || e->opcode != (u8) opcode || old_req) |
1288 | goto done; | 1414 | goto unlock_done; |
1415 | qp->s_ack_state = OP(ACKNOWLEDGE); | ||
1416 | qp->s_tail_ack_queue = prev; | ||
1417 | break; | ||
1418 | } | ||
1419 | |||
1420 | default: | ||
1421 | if (old_req) | ||
1422 | goto unlock_done; | ||
1423 | /* | ||
1424 | * Resend the most recent ACK if this request is | ||
1425 | * after all the previous RDMA reads and atomics. | ||
1426 | */ | ||
1427 | if (i == qp->r_head_ack_queue) { | ||
1428 | spin_unlock_irq(&qp->s_lock); | ||
1429 | qp->r_nak_state = 0; | ||
1430 | qp->r_ack_psn = qp->r_psn - 1; | ||
1431 | goto send_ack; | ||
1432 | } | ||
1433 | /* | ||
1434 | * Resend the RDMA read or atomic op which | ||
1435 | * ACKs this duplicate request. | ||
1436 | */ | ||
1437 | qp->s_ack_state = OP(ACKNOWLEDGE); | ||
1438 | qp->s_tail_ack_queue = i; | ||
1289 | break; | 1439 | break; |
1290 | } | 1440 | } |
1291 | qp->r_ack_state = opcode; | ||
1292 | qp->r_nak_state = 0; | 1441 | qp->r_nak_state = 0; |
1293 | qp->r_ack_psn = psn; | 1442 | spin_unlock_irq(&qp->s_lock); |
1294 | send_ack: | 1443 | tasklet_hi_schedule(&qp->s_task); |
1295 | return 0; | ||
1296 | 1444 | ||
1445 | unlock_done: | ||
1446 | spin_unlock_irq(&qp->s_lock); | ||
1297 | done: | 1447 | done: |
1298 | return 1; | 1448 | return 1; |
1449 | |||
1450 | send_ack: | ||
1451 | return 0; | ||
1299 | } | 1452 | } |
1300 | 1453 | ||
1301 | static void ipath_rc_error(struct ipath_qp *qp, enum ib_wc_status err) | 1454 | static void ipath_rc_error(struct ipath_qp *qp, enum ib_wc_status err) |
@@ -1391,15 +1544,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr, | |||
1391 | opcode == OP(SEND_LAST_WITH_IMMEDIATE)) | 1544 | opcode == OP(SEND_LAST_WITH_IMMEDIATE)) |
1392 | break; | 1545 | break; |
1393 | nack_inv: | 1546 | nack_inv: |
1394 | /* | ||
1395 | * A NAK will ACK earlier sends and RDMA writes. | ||
1396 | * Don't queue the NAK if a RDMA read, atomic, or NAK | ||
1397 | * is pending though. | ||
1398 | */ | ||
1399 | if (qp->r_ack_state >= OP(COMPARE_SWAP)) | ||
1400 | goto send_ack; | ||
1401 | ipath_rc_error(qp, IB_WC_REM_INV_REQ_ERR); | 1547 | ipath_rc_error(qp, IB_WC_REM_INV_REQ_ERR); |
1402 | qp->r_ack_state = OP(SEND_ONLY); | ||
1403 | qp->r_nak_state = IB_NAK_INVALID_REQUEST; | 1548 | qp->r_nak_state = IB_NAK_INVALID_REQUEST; |
1404 | qp->r_ack_psn = qp->r_psn; | 1549 | qp->r_ack_psn = qp->r_psn; |
1405 | goto send_ack; | 1550 | goto send_ack; |
@@ -1441,9 +1586,8 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr, | |||
1441 | * Don't queue the NAK if a RDMA read or atomic | 1586 | * Don't queue the NAK if a RDMA read or atomic |
1442 | * is pending though. | 1587 | * is pending though. |
1443 | */ | 1588 | */ |
1444 | if (qp->r_ack_state >= OP(COMPARE_SWAP)) | 1589 | if (qp->r_nak_state) |
1445 | goto send_ack; | 1590 | goto done; |
1446 | qp->r_ack_state = OP(SEND_ONLY); | ||
1447 | qp->r_nak_state = IB_RNR_NAK | qp->r_min_rnr_timer; | 1591 | qp->r_nak_state = IB_RNR_NAK | qp->r_min_rnr_timer; |
1448 | qp->r_ack_psn = qp->r_psn; | 1592 | qp->r_ack_psn = qp->r_psn; |
1449 | goto send_ack; | 1593 | goto send_ack; |
@@ -1567,7 +1711,19 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr, | |||
1567 | goto rnr_nak; | 1711 | goto rnr_nak; |
1568 | goto send_last_imm; | 1712 | goto send_last_imm; |
1569 | 1713 | ||
1570 | case OP(RDMA_READ_REQUEST): | 1714 | case OP(RDMA_READ_REQUEST): { |
1715 | struct ipath_ack_entry *e; | ||
1716 | u32 len; | ||
1717 | u8 next; | ||
1718 | |||
1719 | if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ))) | ||
1720 | goto nack_acc; | ||
1721 | next = qp->r_head_ack_queue + 1; | ||
1722 | if (next > IPATH_MAX_RDMA_ATOMIC) | ||
1723 | next = 0; | ||
1724 | if (unlikely(next == qp->s_tail_ack_queue)) | ||
1725 | goto nack_inv; | ||
1726 | e = &qp->s_ack_queue[qp->r_head_ack_queue]; | ||
1571 | /* RETH comes after BTH */ | 1727 | /* RETH comes after BTH */ |
1572 | if (!header_in_data) | 1728 | if (!header_in_data) |
1573 | reth = &ohdr->u.rc.reth; | 1729 | reth = &ohdr->u.rc.reth; |
@@ -1575,72 +1731,75 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr, | |||
1575 | reth = (struct ib_reth *)data; | 1731 | reth = (struct ib_reth *)data; |
1576 | data += sizeof(*reth); | 1732 | data += sizeof(*reth); |
1577 | } | 1733 | } |
1578 | if (unlikely(!(qp->qp_access_flags & | 1734 | len = be32_to_cpu(reth->length); |
1579 | IB_ACCESS_REMOTE_READ))) | 1735 | if (len) { |
1580 | goto nack_acc; | ||
1581 | spin_lock_irq(&qp->s_lock); | ||
1582 | qp->s_rdma_len = be32_to_cpu(reth->length); | ||
1583 | if (qp->s_rdma_len != 0) { | ||
1584 | u32 rkey = be32_to_cpu(reth->rkey); | 1736 | u32 rkey = be32_to_cpu(reth->rkey); |
1585 | u64 vaddr = be64_to_cpu(reth->vaddr); | 1737 | u64 vaddr = be64_to_cpu(reth->vaddr); |
1586 | int ok; | 1738 | int ok; |
1587 | 1739 | ||
1588 | /* Check rkey & NAK */ | 1740 | /* Check rkey & NAK */ |
1589 | ok = ipath_rkey_ok(qp, &qp->s_rdma_sge, | 1741 | ok = ipath_rkey_ok(qp, &e->rdma_sge, len, vaddr, |
1590 | qp->s_rdma_len, vaddr, rkey, | 1742 | rkey, IB_ACCESS_REMOTE_READ); |
1591 | IB_ACCESS_REMOTE_READ); | 1743 | if (unlikely(!ok)) |
1592 | if (unlikely(!ok)) { | ||
1593 | spin_unlock_irq(&qp->s_lock); | ||
1594 | goto nack_acc; | 1744 | goto nack_acc; |
1595 | } | ||
1596 | /* | 1745 | /* |
1597 | * Update the next expected PSN. We add 1 later | 1746 | * Update the next expected PSN. We add 1 later |
1598 | * below, so only add the remainder here. | 1747 | * below, so only add the remainder here. |
1599 | */ | 1748 | */ |
1600 | if (qp->s_rdma_len > pmtu) | 1749 | if (len > pmtu) |
1601 | qp->r_psn += (qp->s_rdma_len - 1) / pmtu; | 1750 | qp->r_psn += (len - 1) / pmtu; |
1602 | } else { | 1751 | } else { |
1603 | qp->s_rdma_sge.sg_list = NULL; | 1752 | e->rdma_sge.sg_list = NULL; |
1604 | qp->s_rdma_sge.num_sge = 0; | 1753 | e->rdma_sge.num_sge = 0; |
1605 | qp->s_rdma_sge.sge.mr = NULL; | 1754 | e->rdma_sge.sge.mr = NULL; |
1606 | qp->s_rdma_sge.sge.vaddr = NULL; | 1755 | e->rdma_sge.sge.vaddr = NULL; |
1607 | qp->s_rdma_sge.sge.length = 0; | 1756 | e->rdma_sge.sge.length = 0; |
1608 | qp->s_rdma_sge.sge.sge_length = 0; | 1757 | e->rdma_sge.sge.sge_length = 0; |
1609 | } | 1758 | } |
1759 | e->opcode = opcode; | ||
1760 | e->psn = psn; | ||
1610 | /* | 1761 | /* |
1611 | * We need to increment the MSN here instead of when we | 1762 | * We need to increment the MSN here instead of when we |
1612 | * finish sending the result since a duplicate request would | 1763 | * finish sending the result since a duplicate request would |
1613 | * increment it more than once. | 1764 | * increment it more than once. |
1614 | */ | 1765 | */ |
1615 | qp->r_msn++; | 1766 | qp->r_msn++; |
1616 | |||
1617 | qp->s_ack_state = opcode; | ||
1618 | qp->s_ack_psn = psn; | ||
1619 | spin_unlock_irq(&qp->s_lock); | ||
1620 | |||
1621 | qp->r_psn++; | 1767 | qp->r_psn++; |
1622 | qp->r_state = opcode; | 1768 | qp->r_state = opcode; |
1623 | qp->r_nak_state = 0; | 1769 | qp->r_nak_state = 0; |
1770 | barrier(); | ||
1771 | qp->r_head_ack_queue = next; | ||
1624 | 1772 | ||
1625 | /* Call ipath_do_rc_send() in another thread. */ | 1773 | /* Call ipath_do_rc_send() in another thread. */ |
1626 | tasklet_hi_schedule(&qp->s_task); | 1774 | tasklet_hi_schedule(&qp->s_task); |
1627 | 1775 | ||
1628 | goto done; | 1776 | goto done; |
1777 | } | ||
1629 | 1778 | ||
1630 | case OP(COMPARE_SWAP): | 1779 | case OP(COMPARE_SWAP): |
1631 | case OP(FETCH_ADD): { | 1780 | case OP(FETCH_ADD): { |
1632 | struct ib_atomic_eth *ateth; | 1781 | struct ib_atomic_eth *ateth; |
1782 | struct ipath_ack_entry *e; | ||
1633 | u64 vaddr; | 1783 | u64 vaddr; |
1784 | atomic64_t *maddr; | ||
1634 | u64 sdata; | 1785 | u64 sdata; |
1635 | u32 rkey; | 1786 | u32 rkey; |
1787 | u8 next; | ||
1636 | 1788 | ||
1789 | if (unlikely(!(qp->qp_access_flags & | ||
1790 | IB_ACCESS_REMOTE_ATOMIC))) | ||
1791 | goto nack_acc; | ||
1792 | next = qp->r_head_ack_queue + 1; | ||
1793 | if (next > IPATH_MAX_RDMA_ATOMIC) | ||
1794 | next = 0; | ||
1795 | if (unlikely(next == qp->s_tail_ack_queue)) | ||
1796 | goto nack_inv; | ||
1637 | if (!header_in_data) | 1797 | if (!header_in_data) |
1638 | ateth = &ohdr->u.atomic_eth; | 1798 | ateth = &ohdr->u.atomic_eth; |
1639 | else { | 1799 | else |
1640 | ateth = (struct ib_atomic_eth *)data; | 1800 | ateth = (struct ib_atomic_eth *)data; |
1641 | data += sizeof(*ateth); | 1801 | vaddr = ((u64) be32_to_cpu(ateth->vaddr[0]) << 32) | |
1642 | } | 1802 | be32_to_cpu(ateth->vaddr[1]); |
1643 | vaddr = be64_to_cpu(ateth->vaddr); | ||
1644 | if (unlikely(vaddr & (sizeof(u64) - 1))) | 1803 | if (unlikely(vaddr & (sizeof(u64) - 1))) |
1645 | goto nack_inv; | 1804 | goto nack_inv; |
1646 | rkey = be32_to_cpu(ateth->rkey); | 1805 | rkey = be32_to_cpu(ateth->rkey); |
@@ -1649,63 +1808,50 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr, | |||
1649 | sizeof(u64), vaddr, rkey, | 1808 | sizeof(u64), vaddr, rkey, |
1650 | IB_ACCESS_REMOTE_ATOMIC))) | 1809 | IB_ACCESS_REMOTE_ATOMIC))) |
1651 | goto nack_acc; | 1810 | goto nack_acc; |
1652 | if (unlikely(!(qp->qp_access_flags & | ||
1653 | IB_ACCESS_REMOTE_ATOMIC))) | ||
1654 | goto nack_acc; | ||
1655 | /* Perform atomic OP and save result. */ | 1811 | /* Perform atomic OP and save result. */ |
1812 | maddr = (atomic64_t *) qp->r_sge.sge.vaddr; | ||
1656 | sdata = be64_to_cpu(ateth->swap_data); | 1813 | sdata = be64_to_cpu(ateth->swap_data); |
1657 | spin_lock_irq(&dev->pending_lock); | 1814 | e = &qp->s_ack_queue[qp->r_head_ack_queue]; |
1658 | qp->r_atomic_data = *(u64 *) qp->r_sge.sge.vaddr; | 1815 | e->atomic_data = (opcode == OP(FETCH_ADD)) ? |
1659 | if (opcode == OP(FETCH_ADD)) | 1816 | (u64) atomic64_add_return(sdata, maddr) - sdata : |
1660 | *(u64 *) qp->r_sge.sge.vaddr = | 1817 | (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr, |
1661 | qp->r_atomic_data + sdata; | 1818 | be64_to_cpu(ateth->compare_data), |
1662 | else if (qp->r_atomic_data == | 1819 | sdata); |
1663 | be64_to_cpu(ateth->compare_data)) | 1820 | e->opcode = opcode; |
1664 | *(u64 *) qp->r_sge.sge.vaddr = sdata; | 1821 | e->psn = psn & IPATH_PSN_MASK; |
1665 | spin_unlock_irq(&dev->pending_lock); | ||
1666 | qp->r_msn++; | 1822 | qp->r_msn++; |
1667 | qp->r_atomic_psn = psn & IPATH_PSN_MASK; | 1823 | qp->r_psn++; |
1668 | psn |= 1 << 31; | 1824 | qp->r_state = opcode; |
1669 | break; | 1825 | qp->r_nak_state = 0; |
1826 | barrier(); | ||
1827 | qp->r_head_ack_queue = next; | ||
1828 | |||
1829 | /* Call ipath_do_rc_send() in another thread. */ | ||
1830 | tasklet_hi_schedule(&qp->s_task); | ||
1831 | |||
1832 | goto done; | ||
1670 | } | 1833 | } |
1671 | 1834 | ||
1672 | default: | 1835 | default: |
1673 | /* Drop packet for unknown opcodes. */ | 1836 | /* NAK unknown opcodes. */ |
1674 | goto done; | 1837 | goto nack_inv; |
1675 | } | 1838 | } |
1676 | qp->r_psn++; | 1839 | qp->r_psn++; |
1677 | qp->r_state = opcode; | 1840 | qp->r_state = opcode; |
1841 | qp->r_ack_psn = psn; | ||
1678 | qp->r_nak_state = 0; | 1842 | qp->r_nak_state = 0; |
1679 | /* Send an ACK if requested or required. */ | 1843 | /* Send an ACK if requested or required. */ |
1680 | if (psn & (1 << 31)) { | 1844 | if (psn & (1 << 31)) |
1681 | /* | ||
1682 | * Coalesce ACKs unless there is a RDMA READ or | ||
1683 | * ATOMIC pending. | ||
1684 | */ | ||
1685 | if (qp->r_ack_state < OP(COMPARE_SWAP)) { | ||
1686 | qp->r_ack_state = opcode; | ||
1687 | qp->r_ack_psn = psn; | ||
1688 | } | ||
1689 | goto send_ack; | 1845 | goto send_ack; |
1690 | } | ||
1691 | goto done; | 1846 | goto done; |
1692 | 1847 | ||
1693 | nack_acc: | 1848 | nack_acc: |
1694 | /* | 1849 | ipath_rc_error(qp, IB_WC_REM_ACCESS_ERR); |
1695 | * A NAK will ACK earlier sends and RDMA writes. | 1850 | qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR; |
1696 | * Don't queue the NAK if a RDMA read, atomic, or NAK | 1851 | qp->r_ack_psn = qp->r_psn; |
1697 | * is pending though. | 1852 | |
1698 | */ | ||
1699 | if (qp->r_ack_state < OP(COMPARE_SWAP)) { | ||
1700 | ipath_rc_error(qp, IB_WC_REM_ACCESS_ERR); | ||
1701 | qp->r_ack_state = OP(RDMA_WRITE_ONLY); | ||
1702 | qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR; | ||
1703 | qp->r_ack_psn = qp->r_psn; | ||
1704 | } | ||
1705 | send_ack: | 1853 | send_ack: |
1706 | /* Send ACK right away unless the send tasklet has a pending ACK. */ | 1854 | send_rc_ack(qp); |
1707 | if (qp->s_ack_state == OP(ACKNOWLEDGE)) | ||
1708 | send_rc_ack(qp); | ||
1709 | 1855 | ||
1710 | done: | 1856 | done: |
1711 | return; | 1857 | return; |
diff --git a/drivers/infiniband/hw/ipath/ipath_registers.h b/drivers/infiniband/hw/ipath/ipath_registers.h index dffc76016d3c..c182bcd62098 100644 --- a/drivers/infiniband/hw/ipath/ipath_registers.h +++ b/drivers/infiniband/hw/ipath/ipath_registers.h | |||
@@ -126,9 +126,18 @@ | |||
126 | #define INFINIPATH_E_RESET 0x0004000000000000ULL | 126 | #define INFINIPATH_E_RESET 0x0004000000000000ULL |
127 | #define INFINIPATH_E_HARDWARE 0x0008000000000000ULL | 127 | #define INFINIPATH_E_HARDWARE 0x0008000000000000ULL |
128 | 128 | ||
129 | /* | ||
130 | * this is used to print "common" packet errors only when the | ||
131 | * __IPATH_ERRPKTDBG bit is set in ipath_debug. | ||
132 | */ | ||
133 | #define INFINIPATH_E_PKTERRS ( INFINIPATH_E_SPKTLEN \ | ||
134 | | INFINIPATH_E_SDROPPEDDATAPKT | INFINIPATH_E_RVCRC \ | ||
135 | | INFINIPATH_E_RICRC | INFINIPATH_E_RSHORTPKTLEN \ | ||
136 | | INFINIPATH_E_REBP ) | ||
137 | |||
129 | /* kr_hwerrclear, kr_hwerrmask, kr_hwerrstatus, bits */ | 138 | /* kr_hwerrclear, kr_hwerrmask, kr_hwerrstatus, bits */ |
130 | /* TXEMEMPARITYERR bit 0: PIObuf, 1: PIOpbc, 2: launchfifo | 139 | /* TXEMEMPARITYERR bit 0: PIObuf, 1: PIOpbc, 2: launchfifo |
131 | * RXEMEMPARITYERR bit 0: rcvbuf, 1: lookupq, 2: eagerTID, 3: expTID | 140 | * RXEMEMPARITYERR bit 0: rcvbuf, 1: lookupq, 2: expTID, 3: eagerTID |
132 | * bit 4: flag buffer, 5: datainfo, 6: header info */ | 141 | * bit 4: flag buffer, 5: datainfo, 6: header info */ |
133 | #define INFINIPATH_HWE_TXEMEMPARITYERR_MASK 0xFULL | 142 | #define INFINIPATH_HWE_TXEMEMPARITYERR_MASK 0xFULL |
134 | #define INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT 40 | 143 | #define INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT 40 |
@@ -143,8 +152,8 @@ | |||
143 | /* rxe mem parity errors (shift by INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT) */ | 152 | /* rxe mem parity errors (shift by INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT) */ |
144 | #define INFINIPATH_HWE_RXEMEMPARITYERR_RCVBUF 0x01ULL | 153 | #define INFINIPATH_HWE_RXEMEMPARITYERR_RCVBUF 0x01ULL |
145 | #define INFINIPATH_HWE_RXEMEMPARITYERR_LOOKUPQ 0x02ULL | 154 | #define INFINIPATH_HWE_RXEMEMPARITYERR_LOOKUPQ 0x02ULL |
146 | #define INFINIPATH_HWE_RXEMEMPARITYERR_EAGERTID 0x04ULL | 155 | #define INFINIPATH_HWE_RXEMEMPARITYERR_EXPTID 0x04ULL |
147 | #define INFINIPATH_HWE_RXEMEMPARITYERR_EXPTID 0x08ULL | 156 | #define INFINIPATH_HWE_RXEMEMPARITYERR_EAGERTID 0x08ULL |
148 | #define INFINIPATH_HWE_RXEMEMPARITYERR_FLAGBUF 0x10ULL | 157 | #define INFINIPATH_HWE_RXEMEMPARITYERR_FLAGBUF 0x10ULL |
149 | #define INFINIPATH_HWE_RXEMEMPARITYERR_DATAINFO 0x20ULL | 158 | #define INFINIPATH_HWE_RXEMEMPARITYERR_DATAINFO 0x20ULL |
150 | #define INFINIPATH_HWE_RXEMEMPARITYERR_HDRINFO 0x40ULL | 159 | #define INFINIPATH_HWE_RXEMEMPARITYERR_HDRINFO 0x40ULL |
@@ -299,13 +308,6 @@ | |||
299 | #define INFINIPATH_XGXS_RX_POL_SHIFT 19 | 308 | #define INFINIPATH_XGXS_RX_POL_SHIFT 19 |
300 | #define INFINIPATH_XGXS_RX_POL_MASK 0xfULL | 309 | #define INFINIPATH_XGXS_RX_POL_MASK 0xfULL |
301 | 310 | ||
302 | #define INFINIPATH_RT_ADDR_MASK 0xFFFFFFFFFFULL /* 40 bits valid */ | ||
303 | |||
304 | /* TID entries (memory), HT-only */ | ||
305 | #define INFINIPATH_RT_VALID 0x8000000000000000ULL | ||
306 | #define INFINIPATH_RT_ADDR_SHIFT 0 | ||
307 | #define INFINIPATH_RT_BUFSIZE_MASK 0x3FFF | ||
308 | #define INFINIPATH_RT_BUFSIZE_SHIFT 48 | ||
309 | 311 | ||
310 | /* | 312 | /* |
311 | * IPATH_PIO_MAXIBHDR is the max IB header size allowed for in our | 313 | * IPATH_PIO_MAXIBHDR is the max IB header size allowed for in our |
diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c index e86cb171872e..d9c2a9b15d86 100644 --- a/drivers/infiniband/hw/ipath/ipath_ruc.c +++ b/drivers/infiniband/hw/ipath/ipath_ruc.c | |||
@@ -202,6 +202,7 @@ int ipath_get_rwqe(struct ipath_qp *qp, int wr_id_only) | |||
202 | wq->tail = tail; | 202 | wq->tail = tail; |
203 | 203 | ||
204 | ret = 1; | 204 | ret = 1; |
205 | qp->r_wrid_valid = 1; | ||
205 | if (handler) { | 206 | if (handler) { |
206 | u32 n; | 207 | u32 n; |
207 | 208 | ||
@@ -229,7 +230,6 @@ int ipath_get_rwqe(struct ipath_qp *qp, int wr_id_only) | |||
229 | } | 230 | } |
230 | } | 231 | } |
231 | spin_unlock_irqrestore(&rq->lock, flags); | 232 | spin_unlock_irqrestore(&rq->lock, flags); |
232 | qp->r_wrid_valid = 1; | ||
233 | 233 | ||
234 | bail: | 234 | bail: |
235 | return ret; | 235 | return ret; |
@@ -255,6 +255,7 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp) | |||
255 | unsigned long flags; | 255 | unsigned long flags; |
256 | struct ib_wc wc; | 256 | struct ib_wc wc; |
257 | u64 sdata; | 257 | u64 sdata; |
258 | atomic64_t *maddr; | ||
258 | 259 | ||
259 | qp = ipath_lookup_qpn(&dev->qp_table, sqp->remote_qpn); | 260 | qp = ipath_lookup_qpn(&dev->qp_table, sqp->remote_qpn); |
260 | if (!qp) { | 261 | if (!qp) { |
@@ -265,7 +266,8 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp) | |||
265 | again: | 266 | again: |
266 | spin_lock_irqsave(&sqp->s_lock, flags); | 267 | spin_lock_irqsave(&sqp->s_lock, flags); |
267 | 268 | ||
268 | if (!(ib_ipath_state_ops[sqp->state] & IPATH_PROCESS_SEND_OK)) { | 269 | if (!(ib_ipath_state_ops[sqp->state] & IPATH_PROCESS_SEND_OK) || |
270 | qp->s_rnr_timeout) { | ||
269 | spin_unlock_irqrestore(&sqp->s_lock, flags); | 271 | spin_unlock_irqrestore(&sqp->s_lock, flags); |
270 | goto done; | 272 | goto done; |
271 | } | 273 | } |
@@ -310,7 +312,7 @@ again: | |||
310 | sqp->s_rnr_retry--; | 312 | sqp->s_rnr_retry--; |
311 | dev->n_rnr_naks++; | 313 | dev->n_rnr_naks++; |
312 | sqp->s_rnr_timeout = | 314 | sqp->s_rnr_timeout = |
313 | ib_ipath_rnr_table[sqp->r_min_rnr_timer]; | 315 | ib_ipath_rnr_table[qp->r_min_rnr_timer]; |
314 | ipath_insert_rnr_queue(sqp); | 316 | ipath_insert_rnr_queue(sqp); |
315 | goto done; | 317 | goto done; |
316 | } | 318 | } |
@@ -343,20 +345,22 @@ again: | |||
343 | wc.sl = sqp->remote_ah_attr.sl; | 345 | wc.sl = sqp->remote_ah_attr.sl; |
344 | wc.dlid_path_bits = 0; | 346 | wc.dlid_path_bits = 0; |
345 | wc.port_num = 0; | 347 | wc.port_num = 0; |
348 | spin_lock_irqsave(&sqp->s_lock, flags); | ||
346 | ipath_sqerror_qp(sqp, &wc); | 349 | ipath_sqerror_qp(sqp, &wc); |
350 | spin_unlock_irqrestore(&sqp->s_lock, flags); | ||
347 | goto done; | 351 | goto done; |
348 | } | 352 | } |
349 | break; | 353 | break; |
350 | 354 | ||
351 | case IB_WR_RDMA_READ: | 355 | case IB_WR_RDMA_READ: |
356 | if (unlikely(!(qp->qp_access_flags & | ||
357 | IB_ACCESS_REMOTE_READ))) | ||
358 | goto acc_err; | ||
352 | if (unlikely(!ipath_rkey_ok(qp, &sqp->s_sge, wqe->length, | 359 | if (unlikely(!ipath_rkey_ok(qp, &sqp->s_sge, wqe->length, |
353 | wqe->wr.wr.rdma.remote_addr, | 360 | wqe->wr.wr.rdma.remote_addr, |
354 | wqe->wr.wr.rdma.rkey, | 361 | wqe->wr.wr.rdma.rkey, |
355 | IB_ACCESS_REMOTE_READ))) | 362 | IB_ACCESS_REMOTE_READ))) |
356 | goto acc_err; | 363 | goto acc_err; |
357 | if (unlikely(!(qp->qp_access_flags & | ||
358 | IB_ACCESS_REMOTE_READ))) | ||
359 | goto acc_err; | ||
360 | qp->r_sge.sge = wqe->sg_list[0]; | 364 | qp->r_sge.sge = wqe->sg_list[0]; |
361 | qp->r_sge.sg_list = wqe->sg_list + 1; | 365 | qp->r_sge.sg_list = wqe->sg_list + 1; |
362 | qp->r_sge.num_sge = wqe->wr.num_sge; | 366 | qp->r_sge.num_sge = wqe->wr.num_sge; |
@@ -364,22 +368,22 @@ again: | |||
364 | 368 | ||
365 | case IB_WR_ATOMIC_CMP_AND_SWP: | 369 | case IB_WR_ATOMIC_CMP_AND_SWP: |
366 | case IB_WR_ATOMIC_FETCH_AND_ADD: | 370 | case IB_WR_ATOMIC_FETCH_AND_ADD: |
371 | if (unlikely(!(qp->qp_access_flags & | ||
372 | IB_ACCESS_REMOTE_ATOMIC))) | ||
373 | goto acc_err; | ||
367 | if (unlikely(!ipath_rkey_ok(qp, &qp->r_sge, sizeof(u64), | 374 | if (unlikely(!ipath_rkey_ok(qp, &qp->r_sge, sizeof(u64), |
368 | wqe->wr.wr.rdma.remote_addr, | 375 | wqe->wr.wr.atomic.remote_addr, |
369 | wqe->wr.wr.rdma.rkey, | 376 | wqe->wr.wr.atomic.rkey, |
370 | IB_ACCESS_REMOTE_ATOMIC))) | 377 | IB_ACCESS_REMOTE_ATOMIC))) |
371 | goto acc_err; | 378 | goto acc_err; |
372 | /* Perform atomic OP and save result. */ | 379 | /* Perform atomic OP and save result. */ |
373 | sdata = wqe->wr.wr.atomic.swap; | 380 | maddr = (atomic64_t *) qp->r_sge.sge.vaddr; |
374 | spin_lock_irqsave(&dev->pending_lock, flags); | 381 | sdata = wqe->wr.wr.atomic.compare_add; |
375 | qp->r_atomic_data = *(u64 *) qp->r_sge.sge.vaddr; | 382 | *(u64 *) sqp->s_sge.sge.vaddr = |
376 | if (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) | 383 | (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ? |
377 | *(u64 *) qp->r_sge.sge.vaddr = | 384 | (u64) atomic64_add_return(sdata, maddr) - sdata : |
378 | qp->r_atomic_data + sdata; | 385 | (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr, |
379 | else if (qp->r_atomic_data == wqe->wr.wr.atomic.compare_add) | 386 | sdata, wqe->wr.wr.atomic.swap); |
380 | *(u64 *) qp->r_sge.sge.vaddr = sdata; | ||
381 | spin_unlock_irqrestore(&dev->pending_lock, flags); | ||
382 | *(u64 *) sqp->s_sge.sge.vaddr = qp->r_atomic_data; | ||
383 | goto send_comp; | 387 | goto send_comp; |
384 | 388 | ||
385 | default: | 389 | default: |
@@ -440,7 +444,7 @@ again: | |||
440 | send_comp: | 444 | send_comp: |
441 | sqp->s_rnr_retry = sqp->s_rnr_retry_cnt; | 445 | sqp->s_rnr_retry = sqp->s_rnr_retry_cnt; |
442 | 446 | ||
443 | if (!test_bit(IPATH_S_SIGNAL_REQ_WR, &sqp->s_flags) || | 447 | if (!(sqp->s_flags & IPATH_S_SIGNAL_REQ_WR) || |
444 | (wqe->wr.send_flags & IB_SEND_SIGNALED)) { | 448 | (wqe->wr.send_flags & IB_SEND_SIGNALED)) { |
445 | wc.wr_id = wqe->wr.wr_id; | 449 | wc.wr_id = wqe->wr.wr_id; |
446 | wc.status = IB_WC_SUCCESS; | 450 | wc.status = IB_WC_SUCCESS; |
@@ -502,7 +506,7 @@ void ipath_no_bufs_available(struct ipath_qp *qp, struct ipath_ibdev *dev) | |||
502 | * We clear the tasklet flag now since we are committing to return | 506 | * We clear the tasklet flag now since we are committing to return |
503 | * from the tasklet function. | 507 | * from the tasklet function. |
504 | */ | 508 | */ |
505 | clear_bit(IPATH_S_BUSY, &qp->s_flags); | 509 | clear_bit(IPATH_S_BUSY, &qp->s_busy); |
506 | tasklet_unlock(&qp->s_task); | 510 | tasklet_unlock(&qp->s_task); |
507 | want_buffer(dev->dd); | 511 | want_buffer(dev->dd); |
508 | dev->n_piowait++; | 512 | dev->n_piowait++; |
@@ -541,6 +545,9 @@ int ipath_post_ruc_send(struct ipath_qp *qp, struct ib_send_wr *wr) | |||
541 | wr->sg_list[0].addr & (sizeof(u64) - 1))) { | 545 | wr->sg_list[0].addr & (sizeof(u64) - 1))) { |
542 | ret = -EINVAL; | 546 | ret = -EINVAL; |
543 | goto bail; | 547 | goto bail; |
548 | } else if (wr->opcode >= IB_WR_RDMA_READ && !qp->s_max_rd_atomic) { | ||
549 | ret = -EINVAL; | ||
550 | goto bail; | ||
544 | } | 551 | } |
545 | /* IB spec says that num_sge == 0 is OK. */ | 552 | /* IB spec says that num_sge == 0 is OK. */ |
546 | if (wr->num_sge > qp->s_max_sge) { | 553 | if (wr->num_sge > qp->s_max_sge) { |
@@ -647,7 +654,7 @@ void ipath_do_ruc_send(unsigned long data) | |||
647 | u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu); | 654 | u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu); |
648 | struct ipath_other_headers *ohdr; | 655 | struct ipath_other_headers *ohdr; |
649 | 656 | ||
650 | if (test_and_set_bit(IPATH_S_BUSY, &qp->s_flags)) | 657 | if (test_and_set_bit(IPATH_S_BUSY, &qp->s_busy)) |
651 | goto bail; | 658 | goto bail; |
652 | 659 | ||
653 | if (unlikely(qp->remote_ah_attr.dlid == dev->dd->ipath_lid)) { | 660 | if (unlikely(qp->remote_ah_attr.dlid == dev->dd->ipath_lid)) { |
@@ -683,19 +690,15 @@ again: | |||
683 | */ | 690 | */ |
684 | spin_lock_irqsave(&qp->s_lock, flags); | 691 | spin_lock_irqsave(&qp->s_lock, flags); |
685 | 692 | ||
686 | /* Sending responses has higher priority over sending requests. */ | 693 | if (!((qp->ibqp.qp_type == IB_QPT_RC) ? |
687 | if (qp->s_ack_state != IB_OPCODE_RC_ACKNOWLEDGE && | 694 | ipath_make_rc_req(qp, ohdr, pmtu, &bth0, &bth2) : |
688 | (bth0 = ipath_make_rc_ack(qp, ohdr, pmtu)) != 0) | 695 | ipath_make_uc_req(qp, ohdr, pmtu, &bth0, &bth2))) { |
689 | bth2 = qp->s_ack_psn++ & IPATH_PSN_MASK; | ||
690 | else if (!((qp->ibqp.qp_type == IB_QPT_RC) ? | ||
691 | ipath_make_rc_req(qp, ohdr, pmtu, &bth0, &bth2) : | ||
692 | ipath_make_uc_req(qp, ohdr, pmtu, &bth0, &bth2))) { | ||
693 | /* | 696 | /* |
694 | * Clear the busy bit before unlocking to avoid races with | 697 | * Clear the busy bit before unlocking to avoid races with |
695 | * adding new work queue items and then failing to process | 698 | * adding new work queue items and then failing to process |
696 | * them. | 699 | * them. |
697 | */ | 700 | */ |
698 | clear_bit(IPATH_S_BUSY, &qp->s_flags); | 701 | clear_bit(IPATH_S_BUSY, &qp->s_busy); |
699 | spin_unlock_irqrestore(&qp->s_lock, flags); | 702 | spin_unlock_irqrestore(&qp->s_lock, flags); |
700 | goto bail; | 703 | goto bail; |
701 | } | 704 | } |
@@ -728,7 +731,7 @@ again: | |||
728 | goto again; | 731 | goto again; |
729 | 732 | ||
730 | clear: | 733 | clear: |
731 | clear_bit(IPATH_S_BUSY, &qp->s_flags); | 734 | clear_bit(IPATH_S_BUSY, &qp->s_busy); |
732 | bail: | 735 | bail: |
733 | return; | 736 | return; |
734 | } | 737 | } |
diff --git a/drivers/infiniband/hw/ipath/ipath_stats.c b/drivers/infiniband/hw/ipath/ipath_stats.c index 30a825928fcf..9307f7187ca5 100644 --- a/drivers/infiniband/hw/ipath/ipath_stats.c +++ b/drivers/infiniband/hw/ipath/ipath_stats.c | |||
@@ -207,7 +207,7 @@ void ipath_get_faststats(unsigned long opaque) | |||
207 | * don't access the chip while running diags, or memory diags can | 207 | * don't access the chip while running diags, or memory diags can |
208 | * fail | 208 | * fail |
209 | */ | 209 | */ |
210 | if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_PRESENT) || | 210 | if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_INITTED) || |
211 | ipath_diag_inuse) | 211 | ipath_diag_inuse) |
212 | /* but re-arm the timer, for diags case; won't hurt other */ | 212 | /* but re-arm the timer, for diags case; won't hurt other */ |
213 | goto done; | 213 | goto done; |
@@ -237,11 +237,13 @@ void ipath_get_faststats(unsigned long opaque) | |||
237 | if ((dd->ipath_maskederrs & ~dd->ipath_ignorederrs) | 237 | if ((dd->ipath_maskederrs & ~dd->ipath_ignorederrs) |
238 | && time_after(jiffies, dd->ipath_unmasktime)) { | 238 | && time_after(jiffies, dd->ipath_unmasktime)) { |
239 | char ebuf[256]; | 239 | char ebuf[256]; |
240 | ipath_decode_err(ebuf, sizeof ebuf, | 240 | int iserr; |
241 | iserr = ipath_decode_err(ebuf, sizeof ebuf, | ||
241 | (dd->ipath_maskederrs & ~dd-> | 242 | (dd->ipath_maskederrs & ~dd-> |
242 | ipath_ignorederrs)); | 243 | ipath_ignorederrs)); |
243 | if ((dd->ipath_maskederrs & ~dd->ipath_ignorederrs) & | 244 | if ((dd->ipath_maskederrs & ~dd->ipath_ignorederrs) & |
244 | ~(INFINIPATH_E_RRCVEGRFULL | INFINIPATH_E_RRCVHDRFULL)) | 245 | ~(INFINIPATH_E_RRCVEGRFULL | INFINIPATH_E_RRCVHDRFULL | |
246 | INFINIPATH_E_PKTERRS )) | ||
245 | ipath_dev_err(dd, "Re-enabling masked errors " | 247 | ipath_dev_err(dd, "Re-enabling masked errors " |
246 | "(%s)\n", ebuf); | 248 | "(%s)\n", ebuf); |
247 | else { | 249 | else { |
@@ -252,8 +254,12 @@ void ipath_get_faststats(unsigned long opaque) | |||
252 | * them. So only complain about these at debug | 254 | * them. So only complain about these at debug |
253 | * level. | 255 | * level. |
254 | */ | 256 | */ |
255 | ipath_dbg("Disabling frequent queue full errors " | 257 | if (iserr) |
256 | "(%s)\n", ebuf); | 258 | ipath_dbg("Re-enabling queue full errors (%s)\n", |
259 | ebuf); | ||
260 | else | ||
261 | ipath_cdbg(ERRPKT, "Re-enabling packet" | ||
262 | " problem interrupt (%s)\n", ebuf); | ||
257 | } | 263 | } |
258 | dd->ipath_maskederrs = dd->ipath_ignorederrs; | 264 | dd->ipath_maskederrs = dd->ipath_ignorederrs; |
259 | ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask, | 265 | ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask, |
diff --git a/drivers/infiniband/hw/ipath/ipath_uc.c b/drivers/infiniband/hw/ipath/ipath_uc.c index 325d6634ff53..1c2b03c2ef5e 100644 --- a/drivers/infiniband/hw/ipath/ipath_uc.c +++ b/drivers/infiniband/hw/ipath/ipath_uc.c | |||
@@ -42,7 +42,7 @@ static void complete_last_send(struct ipath_qp *qp, struct ipath_swqe *wqe, | |||
42 | { | 42 | { |
43 | if (++qp->s_last == qp->s_size) | 43 | if (++qp->s_last == qp->s_size) |
44 | qp->s_last = 0; | 44 | qp->s_last = 0; |
45 | if (!test_bit(IPATH_S_SIGNAL_REQ_WR, &qp->s_flags) || | 45 | if (!(qp->s_flags & IPATH_S_SIGNAL_REQ_WR) || |
46 | (wqe->wr.send_flags & IB_SEND_SIGNALED)) { | 46 | (wqe->wr.send_flags & IB_SEND_SIGNALED)) { |
47 | wc->wr_id = wqe->wr.wr_id; | 47 | wc->wr_id = wqe->wr.wr_id; |
48 | wc->status = IB_WC_SUCCESS; | 48 | wc->status = IB_WC_SUCCESS; |
@@ -344,13 +344,13 @@ void ipath_uc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr, | |||
344 | send_first: | 344 | send_first: |
345 | if (qp->r_reuse_sge) { | 345 | if (qp->r_reuse_sge) { |
346 | qp->r_reuse_sge = 0; | 346 | qp->r_reuse_sge = 0; |
347 | qp->r_sge = qp->s_rdma_sge; | 347 | qp->r_sge = qp->s_rdma_read_sge; |
348 | } else if (!ipath_get_rwqe(qp, 0)) { | 348 | } else if (!ipath_get_rwqe(qp, 0)) { |
349 | dev->n_pkt_drops++; | 349 | dev->n_pkt_drops++; |
350 | goto done; | 350 | goto done; |
351 | } | 351 | } |
352 | /* Save the WQE so we can reuse it in case of an error. */ | 352 | /* Save the WQE so we can reuse it in case of an error. */ |
353 | qp->s_rdma_sge = qp->r_sge; | 353 | qp->s_rdma_read_sge = qp->r_sge; |
354 | qp->r_rcv_len = 0; | 354 | qp->r_rcv_len = 0; |
355 | if (opcode == OP(SEND_ONLY)) | 355 | if (opcode == OP(SEND_ONLY)) |
356 | goto send_last; | 356 | goto send_last; |
diff --git a/drivers/infiniband/hw/ipath/ipath_ud.c b/drivers/infiniband/hw/ipath/ipath_ud.c index 9a3e54664ee4..a518f7c8fa83 100644 --- a/drivers/infiniband/hw/ipath/ipath_ud.c +++ b/drivers/infiniband/hw/ipath/ipath_ud.c | |||
@@ -308,6 +308,11 @@ int ipath_post_ud_send(struct ipath_qp *qp, struct ib_send_wr *wr) | |||
308 | goto bail; | 308 | goto bail; |
309 | } | 309 | } |
310 | 310 | ||
311 | if (wr->wr.ud.ah->pd != qp->ibqp.pd) { | ||
312 | ret = -EPERM; | ||
313 | goto bail; | ||
314 | } | ||
315 | |||
311 | /* IB spec says that num_sge == 0 is OK. */ | 316 | /* IB spec says that num_sge == 0 is OK. */ |
312 | if (wr->num_sge > qp->s_max_sge) { | 317 | if (wr->num_sge > qp->s_max_sge) { |
313 | ret = -EINVAL; | 318 | ret = -EINVAL; |
@@ -467,7 +472,7 @@ int ipath_post_ud_send(struct ipath_qp *qp, struct ib_send_wr *wr) | |||
467 | 472 | ||
468 | done: | 473 | done: |
469 | /* Queue the completion status entry. */ | 474 | /* Queue the completion status entry. */ |
470 | if (!test_bit(IPATH_S_SIGNAL_REQ_WR, &qp->s_flags) || | 475 | if (!(qp->s_flags & IPATH_S_SIGNAL_REQ_WR) || |
471 | (wr->send_flags & IB_SEND_SIGNALED)) { | 476 | (wr->send_flags & IB_SEND_SIGNALED)) { |
472 | wc.wr_id = wr->wr_id; | 477 | wc.wr_id = wr->wr_id; |
473 | wc.status = IB_WC_SUCCESS; | 478 | wc.status = IB_WC_SUCCESS; |
@@ -647,6 +652,7 @@ void ipath_ud_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr, | |||
647 | ipath_skip_sge(&qp->r_sge, sizeof(struct ib_grh)); | 652 | ipath_skip_sge(&qp->r_sge, sizeof(struct ib_grh)); |
648 | ipath_copy_sge(&qp->r_sge, data, | 653 | ipath_copy_sge(&qp->r_sge, data, |
649 | wc.byte_len - sizeof(struct ib_grh)); | 654 | wc.byte_len - sizeof(struct ib_grh)); |
655 | qp->r_wrid_valid = 0; | ||
650 | wc.wr_id = qp->r_wr_id; | 656 | wc.wr_id = qp->r_wr_id; |
651 | wc.status = IB_WC_SUCCESS; | 657 | wc.status = IB_WC_SUCCESS; |
652 | wc.opcode = IB_WC_RECV; | 658 | wc.opcode = IB_WC_RECV; |
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/infiniband/hw/ipath/ipath_verbs.c index 2aaacdb7e52a..18c6df2052c2 100644 --- a/drivers/infiniband/hw/ipath/ipath_verbs.c +++ b/drivers/infiniband/hw/ipath/ipath_verbs.c | |||
@@ -438,6 +438,10 @@ void ipath_ib_rcv(struct ipath_ibdev *dev, void *rhdr, void *data, | |||
438 | struct ipath_mcast *mcast; | 438 | struct ipath_mcast *mcast; |
439 | struct ipath_mcast_qp *p; | 439 | struct ipath_mcast_qp *p; |
440 | 440 | ||
441 | if (lnh != IPATH_LRH_GRH) { | ||
442 | dev->n_pkt_drops++; | ||
443 | goto bail; | ||
444 | } | ||
441 | mcast = ipath_mcast_find(&hdr->u.l.grh.dgid); | 445 | mcast = ipath_mcast_find(&hdr->u.l.grh.dgid); |
442 | if (mcast == NULL) { | 446 | if (mcast == NULL) { |
443 | dev->n_pkt_drops++; | 447 | dev->n_pkt_drops++; |
@@ -445,8 +449,7 @@ void ipath_ib_rcv(struct ipath_ibdev *dev, void *rhdr, void *data, | |||
445 | } | 449 | } |
446 | dev->n_multicast_rcv++; | 450 | dev->n_multicast_rcv++; |
447 | list_for_each_entry_rcu(p, &mcast->qp_list, list) | 451 | list_for_each_entry_rcu(p, &mcast->qp_list, list) |
448 | ipath_qp_rcv(dev, hdr, lnh == IPATH_LRH_GRH, data, | 452 | ipath_qp_rcv(dev, hdr, 1, data, tlen, p->qp); |
449 | tlen, p->qp); | ||
450 | /* | 453 | /* |
451 | * Notify ipath_multicast_detach() if it is waiting for us | 454 | * Notify ipath_multicast_detach() if it is waiting for us |
452 | * to finish. | 455 | * to finish. |
@@ -773,7 +776,6 @@ int ipath_verbs_send(struct ipath_devdata *dd, u32 hdrwords, | |||
773 | /* +1 is for the qword padding of pbc */ | 776 | /* +1 is for the qword padding of pbc */ |
774 | plen = hdrwords + ((len + 3) >> 2) + 1; | 777 | plen = hdrwords + ((len + 3) >> 2) + 1; |
775 | if (unlikely((plen << 2) > dd->ipath_ibmaxlen)) { | 778 | if (unlikely((plen << 2) > dd->ipath_ibmaxlen)) { |
776 | ipath_dbg("packet len 0x%x too long, failing\n", plen); | ||
777 | ret = -EINVAL; | 779 | ret = -EINVAL; |
778 | goto bail; | 780 | goto bail; |
779 | } | 781 | } |
@@ -980,14 +982,14 @@ static int ipath_query_device(struct ib_device *ibdev, | |||
980 | props->max_cqe = ib_ipath_max_cqes; | 982 | props->max_cqe = ib_ipath_max_cqes; |
981 | props->max_mr = dev->lk_table.max; | 983 | props->max_mr = dev->lk_table.max; |
982 | props->max_pd = ib_ipath_max_pds; | 984 | props->max_pd = ib_ipath_max_pds; |
983 | props->max_qp_rd_atom = 1; | 985 | props->max_qp_rd_atom = IPATH_MAX_RDMA_ATOMIC; |
984 | props->max_qp_init_rd_atom = 1; | 986 | props->max_qp_init_rd_atom = 255; |
985 | /* props->max_res_rd_atom */ | 987 | /* props->max_res_rd_atom */ |
986 | props->max_srq = ib_ipath_max_srqs; | 988 | props->max_srq = ib_ipath_max_srqs; |
987 | props->max_srq_wr = ib_ipath_max_srq_wrs; | 989 | props->max_srq_wr = ib_ipath_max_srq_wrs; |
988 | props->max_srq_sge = ib_ipath_max_srq_sges; | 990 | props->max_srq_sge = ib_ipath_max_srq_sges; |
989 | /* props->local_ca_ack_delay */ | 991 | /* props->local_ca_ack_delay */ |
990 | props->atomic_cap = IB_ATOMIC_HCA; | 992 | props->atomic_cap = IB_ATOMIC_GLOB; |
991 | props->max_pkeys = ipath_get_npkeys(dev->dd); | 993 | props->max_pkeys = ipath_get_npkeys(dev->dd); |
992 | props->max_mcast_grp = ib_ipath_max_mcast_grps; | 994 | props->max_mcast_grp = ib_ipath_max_mcast_grps; |
993 | props->max_mcast_qp_attach = ib_ipath_max_mcast_qp_attached; | 995 | props->max_mcast_qp_attach = ib_ipath_max_mcast_qp_attached; |
@@ -1557,7 +1559,6 @@ int ipath_register_ib_device(struct ipath_devdata *dd) | |||
1557 | dev->node_type = RDMA_NODE_IB_CA; | 1559 | dev->node_type = RDMA_NODE_IB_CA; |
1558 | dev->phys_port_cnt = 1; | 1560 | dev->phys_port_cnt = 1; |
1559 | dev->dma_device = &dd->pcidev->dev; | 1561 | dev->dma_device = &dd->pcidev->dev; |
1560 | dev->class_dev.dev = dev->dma_device; | ||
1561 | dev->query_device = ipath_query_device; | 1562 | dev->query_device = ipath_query_device; |
1562 | dev->modify_device = ipath_modify_device; | 1563 | dev->modify_device = ipath_modify_device; |
1563 | dev->query_port = ipath_query_port; | 1564 | dev->query_port = ipath_query_port; |
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.h b/drivers/infiniband/hw/ipath/ipath_verbs.h index c0c8d5b24a7d..7c4929f1cb5b 100644 --- a/drivers/infiniband/hw/ipath/ipath_verbs.h +++ b/drivers/infiniband/hw/ipath/ipath_verbs.h | |||
@@ -40,9 +40,12 @@ | |||
40 | #include <linux/interrupt.h> | 40 | #include <linux/interrupt.h> |
41 | #include <linux/kref.h> | 41 | #include <linux/kref.h> |
42 | #include <rdma/ib_pack.h> | 42 | #include <rdma/ib_pack.h> |
43 | #include <rdma/ib_user_verbs.h> | ||
43 | 44 | ||
44 | #include "ipath_layer.h" | 45 | #include "ipath_layer.h" |
45 | 46 | ||
47 | #define IPATH_MAX_RDMA_ATOMIC 4 | ||
48 | |||
46 | #define QPN_MAX (1 << 24) | 49 | #define QPN_MAX (1 << 24) |
47 | #define QPNMAP_ENTRIES (QPN_MAX / PAGE_SIZE / BITS_PER_BYTE) | 50 | #define QPNMAP_ENTRIES (QPN_MAX / PAGE_SIZE / BITS_PER_BYTE) |
48 | 51 | ||
@@ -89,7 +92,7 @@ struct ib_reth { | |||
89 | } __attribute__ ((packed)); | 92 | } __attribute__ ((packed)); |
90 | 93 | ||
91 | struct ib_atomic_eth { | 94 | struct ib_atomic_eth { |
92 | __be64 vaddr; | 95 | __be32 vaddr[2]; /* unaligned so access as 2 32-bit words */ |
93 | __be32 rkey; | 96 | __be32 rkey; |
94 | __be64 swap_data; | 97 | __be64 swap_data; |
95 | __be64 compare_data; | 98 | __be64 compare_data; |
@@ -108,7 +111,7 @@ struct ipath_other_headers { | |||
108 | } rc; | 111 | } rc; |
109 | struct { | 112 | struct { |
110 | __be32 aeth; | 113 | __be32 aeth; |
111 | __be64 atomic_ack_eth; | 114 | __be32 atomic_ack_eth[2]; |
112 | } at; | 115 | } at; |
113 | __be32 imm_data; | 116 | __be32 imm_data; |
114 | __be32 aeth; | 117 | __be32 aeth; |
@@ -186,7 +189,7 @@ struct ipath_mmap_info { | |||
186 | struct ipath_cq_wc { | 189 | struct ipath_cq_wc { |
187 | u32 head; /* index of next entry to fill */ | 190 | u32 head; /* index of next entry to fill */ |
188 | u32 tail; /* index of next ib_poll_cq() entry */ | 191 | u32 tail; /* index of next ib_poll_cq() entry */ |
189 | struct ib_wc queue[1]; /* this is actually size ibcq.cqe + 1 */ | 192 | struct ib_uverbs_wc queue[1]; /* this is actually size ibcq.cqe + 1 */ |
190 | }; | 193 | }; |
191 | 194 | ||
192 | /* | 195 | /* |
@@ -312,6 +315,19 @@ struct ipath_sge_state { | |||
312 | }; | 315 | }; |
313 | 316 | ||
314 | /* | 317 | /* |
318 | * This structure holds the information that the send tasklet needs | ||
319 | * to send a RDMA read response or atomic operation. | ||
320 | */ | ||
321 | struct ipath_ack_entry { | ||
322 | u8 opcode; | ||
323 | u32 psn; | ||
324 | union { | ||
325 | struct ipath_sge_state rdma_sge; | ||
326 | u64 atomic_data; | ||
327 | }; | ||
328 | }; | ||
329 | |||
330 | /* | ||
315 | * Variables prefixed with s_ are for the requester (sender). | 331 | * Variables prefixed with s_ are for the requester (sender). |
316 | * Variables prefixed with r_ are for the responder (receiver). | 332 | * Variables prefixed with r_ are for the responder (receiver). |
317 | * Variables prefixed with ack_ are for responder replies. | 333 | * Variables prefixed with ack_ are for responder replies. |
@@ -333,24 +349,24 @@ struct ipath_qp { | |||
333 | struct ipath_mmap_info *ip; | 349 | struct ipath_mmap_info *ip; |
334 | struct ipath_sge_state *s_cur_sge; | 350 | struct ipath_sge_state *s_cur_sge; |
335 | struct ipath_sge_state s_sge; /* current send request data */ | 351 | struct ipath_sge_state s_sge; /* current send request data */ |
336 | /* current RDMA read send data */ | 352 | struct ipath_ack_entry s_ack_queue[IPATH_MAX_RDMA_ATOMIC + 1]; |
337 | struct ipath_sge_state s_rdma_sge; | 353 | struct ipath_sge_state s_ack_rdma_sge; |
354 | struct ipath_sge_state s_rdma_read_sge; | ||
338 | struct ipath_sge_state r_sge; /* current receive data */ | 355 | struct ipath_sge_state r_sge; /* current receive data */ |
339 | spinlock_t s_lock; | 356 | spinlock_t s_lock; |
340 | unsigned long s_flags; | 357 | unsigned long s_busy; |
341 | u32 s_hdrwords; /* size of s_hdr in 32 bit words */ | 358 | u32 s_hdrwords; /* size of s_hdr in 32 bit words */ |
342 | u32 s_cur_size; /* size of send packet in bytes */ | 359 | u32 s_cur_size; /* size of send packet in bytes */ |
343 | u32 s_len; /* total length of s_sge */ | 360 | u32 s_len; /* total length of s_sge */ |
344 | u32 s_rdma_len; /* total length of s_rdma_sge */ | 361 | u32 s_rdma_read_len; /* total length of s_rdma_read_sge */ |
345 | u32 s_next_psn; /* PSN for next request */ | 362 | u32 s_next_psn; /* PSN for next request */ |
346 | u32 s_last_psn; /* last response PSN processed */ | 363 | u32 s_last_psn; /* last response PSN processed */ |
347 | u32 s_psn; /* current packet sequence number */ | 364 | u32 s_psn; /* current packet sequence number */ |
348 | u32 s_ack_psn; /* PSN for RDMA_READ */ | 365 | u32 s_ack_rdma_psn; /* PSN for sending RDMA read responses */ |
366 | u32 s_ack_psn; /* PSN for acking sends and RDMA writes */ | ||
349 | u32 s_rnr_timeout; /* number of milliseconds for RNR timeout */ | 367 | u32 s_rnr_timeout; /* number of milliseconds for RNR timeout */ |
350 | u32 r_ack_psn; /* PSN for next ACK or atomic ACK */ | 368 | u32 r_ack_psn; /* PSN for next ACK or atomic ACK */ |
351 | u64 r_wr_id; /* ID for current receive WQE */ | 369 | u64 r_wr_id; /* ID for current receive WQE */ |
352 | u64 r_atomic_data; /* data for last atomic op */ | ||
353 | u32 r_atomic_psn; /* PSN of last atomic op */ | ||
354 | u32 r_len; /* total length of r_sge */ | 370 | u32 r_len; /* total length of r_sge */ |
355 | u32 r_rcv_len; /* receive data len processed */ | 371 | u32 r_rcv_len; /* receive data len processed */ |
356 | u32 r_psn; /* expected rcv packet sequence number */ | 372 | u32 r_psn; /* expected rcv packet sequence number */ |
@@ -360,12 +376,13 @@ struct ipath_qp { | |||
360 | u8 s_ack_state; /* opcode of packet to ACK */ | 376 | u8 s_ack_state; /* opcode of packet to ACK */ |
361 | u8 s_nak_state; /* non-zero if NAK is pending */ | 377 | u8 s_nak_state; /* non-zero if NAK is pending */ |
362 | u8 r_state; /* opcode of last packet received */ | 378 | u8 r_state; /* opcode of last packet received */ |
363 | u8 r_ack_state; /* opcode of packet to ACK */ | ||
364 | u8 r_nak_state; /* non-zero if NAK is pending */ | 379 | u8 r_nak_state; /* non-zero if NAK is pending */ |
365 | u8 r_min_rnr_timer; /* retry timeout value for RNR NAKs */ | 380 | u8 r_min_rnr_timer; /* retry timeout value for RNR NAKs */ |
366 | u8 r_reuse_sge; /* for UC receive errors */ | 381 | u8 r_reuse_sge; /* for UC receive errors */ |
367 | u8 r_sge_inx; /* current index into sg_list */ | 382 | u8 r_sge_inx; /* current index into sg_list */ |
368 | u8 r_wrid_valid; /* r_wrid set but CQ entry not yet made */ | 383 | u8 r_wrid_valid; /* r_wrid set but CQ entry not yet made */ |
384 | u8 r_max_rd_atomic; /* max number of RDMA read/atomic to receive */ | ||
385 | u8 r_head_ack_queue; /* index into s_ack_queue[] */ | ||
369 | u8 qp_access_flags; | 386 | u8 qp_access_flags; |
370 | u8 s_max_sge; /* size of s_wq->sg_list */ | 387 | u8 s_max_sge; /* size of s_wq->sg_list */ |
371 | u8 s_retry_cnt; /* number of times to retry */ | 388 | u8 s_retry_cnt; /* number of times to retry */ |
@@ -374,6 +391,10 @@ struct ipath_qp { | |||
374 | u8 s_rnr_retry; /* requester RNR retry counter */ | 391 | u8 s_rnr_retry; /* requester RNR retry counter */ |
375 | u8 s_wait_credit; /* limit number of unacked packets sent */ | 392 | u8 s_wait_credit; /* limit number of unacked packets sent */ |
376 | u8 s_pkey_index; /* PKEY index to use */ | 393 | u8 s_pkey_index; /* PKEY index to use */ |
394 | u8 s_max_rd_atomic; /* max number of RDMA read/atomic to send */ | ||
395 | u8 s_num_rd_atomic; /* number of RDMA read/atomic pending */ | ||
396 | u8 s_tail_ack_queue; /* index into s_ack_queue[] */ | ||
397 | u8 s_flags; | ||
377 | u8 timeout; /* Timeout for this QP */ | 398 | u8 timeout; /* Timeout for this QP */ |
378 | enum ib_mtu path_mtu; | 399 | enum ib_mtu path_mtu; |
379 | u32 remote_qpn; | 400 | u32 remote_qpn; |
@@ -390,11 +411,16 @@ struct ipath_qp { | |||
390 | struct ipath_sge r_sg_list[0]; /* verified SGEs */ | 411 | struct ipath_sge r_sg_list[0]; /* verified SGEs */ |
391 | }; | 412 | }; |
392 | 413 | ||
414 | /* Bit definition for s_busy. */ | ||
415 | #define IPATH_S_BUSY 0 | ||
416 | |||
393 | /* | 417 | /* |
394 | * Bit definitions for s_flags. | 418 | * Bit definitions for s_flags. |
395 | */ | 419 | */ |
396 | #define IPATH_S_BUSY 0 | 420 | #define IPATH_S_SIGNAL_REQ_WR 0x01 |
397 | #define IPATH_S_SIGNAL_REQ_WR 1 | 421 | #define IPATH_S_FENCE_PENDING 0x02 |
422 | #define IPATH_S_RDMAR_PENDING 0x04 | ||
423 | #define IPATH_S_ACK_PENDING 0x08 | ||
398 | 424 | ||
399 | #define IPATH_PSN_CREDIT 2048 | 425 | #define IPATH_PSN_CREDIT 2048 |
400 | 426 | ||
@@ -706,8 +732,6 @@ int ipath_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr); | |||
706 | 732 | ||
707 | int ipath_destroy_srq(struct ib_srq *ibsrq); | 733 | int ipath_destroy_srq(struct ib_srq *ibsrq); |
708 | 734 | ||
709 | void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int sig); | ||
710 | |||
711 | int ipath_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry); | 735 | int ipath_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry); |
712 | 736 | ||
713 | struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries, | 737 | struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries, |
@@ -757,9 +781,6 @@ u32 ipath_make_grh(struct ipath_ibdev *dev, struct ib_grh *hdr, | |||
757 | 781 | ||
758 | void ipath_do_ruc_send(unsigned long data); | 782 | void ipath_do_ruc_send(unsigned long data); |
759 | 783 | ||
760 | u32 ipath_make_rc_ack(struct ipath_qp *qp, struct ipath_other_headers *ohdr, | ||
761 | u32 pmtu); | ||
762 | |||
763 | int ipath_make_rc_req(struct ipath_qp *qp, struct ipath_other_headers *ohdr, | 784 | int ipath_make_rc_req(struct ipath_qp *qp, struct ipath_other_headers *ohdr, |
764 | u32 pmtu, u32 *bth0p, u32 *bth2p); | 785 | u32 pmtu, u32 *bth0p, u32 *bth2p); |
765 | 786 | ||
diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c index 0d9b7d06bbc2..773145e29947 100644 --- a/drivers/infiniband/hw/mthca/mthca_main.c +++ b/drivers/infiniband/hw/mthca/mthca_main.c | |||
@@ -1013,14 +1013,14 @@ static struct { | |||
1013 | u64 latest_fw; | 1013 | u64 latest_fw; |
1014 | u32 flags; | 1014 | u32 flags; |
1015 | } mthca_hca_table[] = { | 1015 | } mthca_hca_table[] = { |
1016 | [TAVOR] = { .latest_fw = MTHCA_FW_VER(3, 4, 0), | 1016 | [TAVOR] = { .latest_fw = MTHCA_FW_VER(3, 5, 0), |
1017 | .flags = 0 }, | 1017 | .flags = 0 }, |
1018 | [ARBEL_COMPAT] = { .latest_fw = MTHCA_FW_VER(4, 7, 600), | 1018 | [ARBEL_COMPAT] = { .latest_fw = MTHCA_FW_VER(4, 8, 200), |
1019 | .flags = MTHCA_FLAG_PCIE }, | 1019 | .flags = MTHCA_FLAG_PCIE }, |
1020 | [ARBEL_NATIVE] = { .latest_fw = MTHCA_FW_VER(5, 1, 400), | 1020 | [ARBEL_NATIVE] = { .latest_fw = MTHCA_FW_VER(5, 2, 0), |
1021 | .flags = MTHCA_FLAG_MEMFREE | | 1021 | .flags = MTHCA_FLAG_MEMFREE | |
1022 | MTHCA_FLAG_PCIE }, | 1022 | MTHCA_FLAG_PCIE }, |
1023 | [SINAI] = { .latest_fw = MTHCA_FW_VER(1, 1, 0), | 1023 | [SINAI] = { .latest_fw = MTHCA_FW_VER(1, 2, 0), |
1024 | .flags = MTHCA_FLAG_MEMFREE | | 1024 | .flags = MTHCA_FLAG_MEMFREE | |
1025 | MTHCA_FLAG_PCIE | | 1025 | MTHCA_FLAG_PCIE | |
1026 | MTHCA_FLAG_SINAI_OPT } | 1026 | MTHCA_FLAG_SINAI_OPT } |
@@ -1135,7 +1135,7 @@ static int __mthca_init_one(struct pci_dev *pdev, int hca_type) | |||
1135 | goto err_cmd; | 1135 | goto err_cmd; |
1136 | 1136 | ||
1137 | if (mdev->fw_ver < mthca_hca_table[hca_type].latest_fw) { | 1137 | if (mdev->fw_ver < mthca_hca_table[hca_type].latest_fw) { |
1138 | mthca_warn(mdev, "HCA FW version %d.%d.%d is old (%d.%d.%d is current).\n", | 1138 | mthca_warn(mdev, "HCA FW version %d.%d.%3d is old (%d.%d.%3d is current).\n", |
1139 | (int) (mdev->fw_ver >> 32), (int) (mdev->fw_ver >> 16) & 0xffff, | 1139 | (int) (mdev->fw_ver >> 32), (int) (mdev->fw_ver >> 16) & 0xffff, |
1140 | (int) (mdev->fw_ver & 0xffff), | 1140 | (int) (mdev->fw_ver & 0xffff), |
1141 | (int) (mthca_hca_table[hca_type].latest_fw >> 32), | 1141 | (int) (mthca_hca_table[hca_type].latest_fw >> 32), |
diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c index ee561c569d5f..aa6c70a6a36f 100644 --- a/drivers/infiniband/hw/mthca/mthca_mr.c +++ b/drivers/infiniband/hw/mthca/mthca_mr.c | |||
@@ -297,7 +297,8 @@ out: | |||
297 | 297 | ||
298 | int mthca_write_mtt_size(struct mthca_dev *dev) | 298 | int mthca_write_mtt_size(struct mthca_dev *dev) |
299 | { | 299 | { |
300 | if (dev->mr_table.fmr_mtt_buddy != &dev->mr_table.mtt_buddy) | 300 | if (dev->mr_table.fmr_mtt_buddy != &dev->mr_table.mtt_buddy || |
301 | !(dev->mthca_flags & MTHCA_FLAG_FMR)) | ||
301 | /* | 302 | /* |
302 | * Be friendly to WRITE_MTT command | 303 | * Be friendly to WRITE_MTT command |
303 | * and leave two empty slots for the | 304 | * and leave two empty slots for the |
@@ -355,7 +356,8 @@ int mthca_write_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt, | |||
355 | int size = mthca_write_mtt_size(dev); | 356 | int size = mthca_write_mtt_size(dev); |
356 | int chunk; | 357 | int chunk; |
357 | 358 | ||
358 | if (dev->mr_table.fmr_mtt_buddy != &dev->mr_table.mtt_buddy) | 359 | if (dev->mr_table.fmr_mtt_buddy != &dev->mr_table.mtt_buddy || |
360 | !(dev->mthca_flags & MTHCA_FLAG_FMR)) | ||
359 | return __mthca_write_mtt(dev, mtt, start_index, buffer_list, list_len); | 361 | return __mthca_write_mtt(dev, mtt, start_index, buffer_list, list_len); |
360 | 362 | ||
361 | while (list_len > 0) { | 363 | while (list_len > 0) { |
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c index 0725ad7ad9bf..47e6fd46d9c2 100644 --- a/drivers/infiniband/hw/mthca/mthca_provider.c +++ b/drivers/infiniband/hw/mthca/mthca_provider.c | |||
@@ -1293,7 +1293,6 @@ int mthca_register_device(struct mthca_dev *dev) | |||
1293 | dev->ib_dev.node_type = RDMA_NODE_IB_CA; | 1293 | dev->ib_dev.node_type = RDMA_NODE_IB_CA; |
1294 | dev->ib_dev.phys_port_cnt = dev->limits.num_ports; | 1294 | dev->ib_dev.phys_port_cnt = dev->limits.num_ports; |
1295 | dev->ib_dev.dma_device = &dev->pdev->dev; | 1295 | dev->ib_dev.dma_device = &dev->pdev->dev; |
1296 | dev->ib_dev.class_dev.dev = &dev->pdev->dev; | ||
1297 | dev->ib_dev.query_device = mthca_query_device; | 1296 | dev->ib_dev.query_device = mthca_query_device; |
1298 | dev->ib_dev.query_port = mthca_query_port; | 1297 | dev->ib_dev.query_port = mthca_query_port; |
1299 | dev->ib_dev.modify_device = mthca_modify_device; | 1298 | dev->ib_dev.modify_device = mthca_modify_device; |
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c index 1c6b63aca268..8fe6fee7a97a 100644 --- a/drivers/infiniband/hw/mthca/mthca_qp.c +++ b/drivers/infiniband/hw/mthca/mthca_qp.c | |||
@@ -1419,11 +1419,10 @@ void mthca_free_qp(struct mthca_dev *dev, | |||
1419 | * unref the mem-free tables and free the QPN in our table. | 1419 | * unref the mem-free tables and free the QPN in our table. |
1420 | */ | 1420 | */ |
1421 | if (!qp->ibqp.uobject) { | 1421 | if (!qp->ibqp.uobject) { |
1422 | mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq), qp->qpn, | 1422 | mthca_cq_clean(dev, recv_cq, qp->qpn, |
1423 | qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); | 1423 | qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); |
1424 | if (qp->ibqp.send_cq != qp->ibqp.recv_cq) | 1424 | if (send_cq != recv_cq) |
1425 | mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq), qp->qpn, | 1425 | mthca_cq_clean(dev, send_cq, qp->qpn, NULL); |
1426 | qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); | ||
1427 | 1426 | ||
1428 | mthca_free_memfree(dev, qp); | 1427 | mthca_free_memfree(dev, qp); |
1429 | mthca_free_wqe_buf(dev, qp); | 1428 | mthca_free_wqe_buf(dev, qp); |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c index 2b242a4823f8..0c4e59b906cd 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c | |||
@@ -228,7 +228,6 @@ static int ipoib_cm_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *even | |||
228 | struct net_device *dev = cm_id->context; | 228 | struct net_device *dev = cm_id->context; |
229 | struct ipoib_dev_priv *priv = netdev_priv(dev); | 229 | struct ipoib_dev_priv *priv = netdev_priv(dev); |
230 | struct ipoib_cm_rx *p; | 230 | struct ipoib_cm_rx *p; |
231 | unsigned long flags; | ||
232 | unsigned psn; | 231 | unsigned psn; |
233 | int ret; | 232 | int ret; |
234 | 233 | ||
@@ -257,9 +256,9 @@ static int ipoib_cm_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *even | |||
257 | 256 | ||
258 | cm_id->context = p; | 257 | cm_id->context = p; |
259 | p->jiffies = jiffies; | 258 | p->jiffies = jiffies; |
260 | spin_lock_irqsave(&priv->lock, flags); | 259 | spin_lock_irq(&priv->lock); |
261 | list_add(&p->list, &priv->cm.passive_ids); | 260 | list_add(&p->list, &priv->cm.passive_ids); |
262 | spin_unlock_irqrestore(&priv->lock, flags); | 261 | spin_unlock_irq(&priv->lock); |
263 | queue_delayed_work(ipoib_workqueue, | 262 | queue_delayed_work(ipoib_workqueue, |
264 | &priv->cm.stale_task, IPOIB_CM_RX_DELAY); | 263 | &priv->cm.stale_task, IPOIB_CM_RX_DELAY); |
265 | return 0; | 264 | return 0; |
@@ -277,7 +276,6 @@ static int ipoib_cm_rx_handler(struct ib_cm_id *cm_id, | |||
277 | { | 276 | { |
278 | struct ipoib_cm_rx *p; | 277 | struct ipoib_cm_rx *p; |
279 | struct ipoib_dev_priv *priv; | 278 | struct ipoib_dev_priv *priv; |
280 | unsigned long flags; | ||
281 | int ret; | 279 | int ret; |
282 | 280 | ||
283 | switch (event->event) { | 281 | switch (event->event) { |
@@ -290,14 +288,14 @@ static int ipoib_cm_rx_handler(struct ib_cm_id *cm_id, | |||
290 | case IB_CM_REJ_RECEIVED: | 288 | case IB_CM_REJ_RECEIVED: |
291 | p = cm_id->context; | 289 | p = cm_id->context; |
292 | priv = netdev_priv(p->dev); | 290 | priv = netdev_priv(p->dev); |
293 | spin_lock_irqsave(&priv->lock, flags); | 291 | spin_lock_irq(&priv->lock); |
294 | if (list_empty(&p->list)) | 292 | if (list_empty(&p->list)) |
295 | ret = 0; /* Connection is going away already. */ | 293 | ret = 0; /* Connection is going away already. */ |
296 | else { | 294 | else { |
297 | list_del_init(&p->list); | 295 | list_del_init(&p->list); |
298 | ret = -ECONNRESET; | 296 | ret = -ECONNRESET; |
299 | } | 297 | } |
300 | spin_unlock_irqrestore(&priv->lock, flags); | 298 | spin_unlock_irq(&priv->lock); |
301 | if (ret) { | 299 | if (ret) { |
302 | ib_destroy_qp(p->qp); | 300 | ib_destroy_qp(p->qp); |
303 | kfree(p); | 301 | kfree(p); |
@@ -351,8 +349,8 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) | |||
351 | u64 mapping[IPOIB_CM_RX_SG]; | 349 | u64 mapping[IPOIB_CM_RX_SG]; |
352 | int frags; | 350 | int frags; |
353 | 351 | ||
354 | ipoib_dbg_data(priv, "cm recv completion: id %d, op %d, status: %d\n", | 352 | ipoib_dbg_data(priv, "cm recv completion: id %d, status: %d\n", |
355 | wr_id, wc->opcode, wc->status); | 353 | wr_id, wc->status); |
356 | 354 | ||
357 | if (unlikely(wr_id >= ipoib_recvq_size)) { | 355 | if (unlikely(wr_id >= ipoib_recvq_size)) { |
358 | ipoib_warn(priv, "cm recv completion event with wrid %d (> %d)\n", | 356 | ipoib_warn(priv, "cm recv completion event with wrid %d (> %d)\n", |
@@ -408,7 +406,7 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) | |||
408 | skb_put_frags(skb, IPOIB_CM_HEAD_SIZE, wc->byte_len, newskb); | 406 | skb_put_frags(skb, IPOIB_CM_HEAD_SIZE, wc->byte_len, newskb); |
409 | 407 | ||
410 | skb->protocol = ((struct ipoib_header *) skb->data)->proto; | 408 | skb->protocol = ((struct ipoib_header *) skb->data)->proto; |
411 | skb->mac.raw = skb->data; | 409 | skb_reset_mac_header(skb); |
412 | skb_pull(skb, IPOIB_ENCAP_LEN); | 410 | skb_pull(skb, IPOIB_ENCAP_LEN); |
413 | 411 | ||
414 | dev->last_rx = jiffies; | 412 | dev->last_rx = jiffies; |
@@ -504,8 +502,8 @@ static void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ipoib_cm_tx *tx | |||
504 | struct ipoib_tx_buf *tx_req; | 502 | struct ipoib_tx_buf *tx_req; |
505 | unsigned long flags; | 503 | unsigned long flags; |
506 | 504 | ||
507 | ipoib_dbg_data(priv, "cm send completion: id %d, op %d, status: %d\n", | 505 | ipoib_dbg_data(priv, "cm send completion: id %d, status: %d\n", |
508 | wr_id, wc->opcode, wc->status); | 506 | wr_id, wc->status); |
509 | 507 | ||
510 | if (unlikely(wr_id >= ipoib_sendq_size)) { | 508 | if (unlikely(wr_id >= ipoib_sendq_size)) { |
511 | ipoib_warn(priv, "cm send completion event with wrid %d (> %d)\n", | 509 | ipoib_warn(priv, "cm send completion event with wrid %d (> %d)\n", |
@@ -612,23 +610,22 @@ void ipoib_cm_dev_stop(struct net_device *dev) | |||
612 | { | 610 | { |
613 | struct ipoib_dev_priv *priv = netdev_priv(dev); | 611 | struct ipoib_dev_priv *priv = netdev_priv(dev); |
614 | struct ipoib_cm_rx *p; | 612 | struct ipoib_cm_rx *p; |
615 | unsigned long flags; | ||
616 | 613 | ||
617 | if (!IPOIB_CM_SUPPORTED(dev->dev_addr)) | 614 | if (!IPOIB_CM_SUPPORTED(dev->dev_addr)) |
618 | return; | 615 | return; |
619 | 616 | ||
620 | ib_destroy_cm_id(priv->cm.id); | 617 | ib_destroy_cm_id(priv->cm.id); |
621 | spin_lock_irqsave(&priv->lock, flags); | 618 | spin_lock_irq(&priv->lock); |
622 | while (!list_empty(&priv->cm.passive_ids)) { | 619 | while (!list_empty(&priv->cm.passive_ids)) { |
623 | p = list_entry(priv->cm.passive_ids.next, typeof(*p), list); | 620 | p = list_entry(priv->cm.passive_ids.next, typeof(*p), list); |
624 | list_del_init(&p->list); | 621 | list_del_init(&p->list); |
625 | spin_unlock_irqrestore(&priv->lock, flags); | 622 | spin_unlock_irq(&priv->lock); |
626 | ib_destroy_cm_id(p->id); | 623 | ib_destroy_cm_id(p->id); |
627 | ib_destroy_qp(p->qp); | 624 | ib_destroy_qp(p->qp); |
628 | kfree(p); | 625 | kfree(p); |
629 | spin_lock_irqsave(&priv->lock, flags); | 626 | spin_lock_irq(&priv->lock); |
630 | } | 627 | } |
631 | spin_unlock_irqrestore(&priv->lock, flags); | 628 | spin_unlock_irq(&priv->lock); |
632 | 629 | ||
633 | cancel_delayed_work(&priv->cm.stale_task); | 630 | cancel_delayed_work(&priv->cm.stale_task); |
634 | } | 631 | } |
@@ -642,7 +639,6 @@ static int ipoib_cm_rep_handler(struct ib_cm_id *cm_id, struct ib_cm_event *even | |||
642 | struct ib_qp_attr qp_attr; | 639 | struct ib_qp_attr qp_attr; |
643 | int qp_attr_mask, ret; | 640 | int qp_attr_mask, ret; |
644 | struct sk_buff *skb; | 641 | struct sk_buff *skb; |
645 | unsigned long flags; | ||
646 | 642 | ||
647 | p->mtu = be32_to_cpu(data->mtu); | 643 | p->mtu = be32_to_cpu(data->mtu); |
648 | 644 | ||
@@ -680,12 +676,12 @@ static int ipoib_cm_rep_handler(struct ib_cm_id *cm_id, struct ib_cm_event *even | |||
680 | 676 | ||
681 | skb_queue_head_init(&skqueue); | 677 | skb_queue_head_init(&skqueue); |
682 | 678 | ||
683 | spin_lock_irqsave(&priv->lock, flags); | 679 | spin_lock_irq(&priv->lock); |
684 | set_bit(IPOIB_FLAG_OPER_UP, &p->flags); | 680 | set_bit(IPOIB_FLAG_OPER_UP, &p->flags); |
685 | if (p->neigh) | 681 | if (p->neigh) |
686 | while ((skb = __skb_dequeue(&p->neigh->queue))) | 682 | while ((skb = __skb_dequeue(&p->neigh->queue))) |
687 | __skb_queue_tail(&skqueue, skb); | 683 | __skb_queue_tail(&skqueue, skb); |
688 | spin_unlock_irqrestore(&priv->lock, flags); | 684 | spin_unlock_irq(&priv->lock); |
689 | 685 | ||
690 | while ((skb = __skb_dequeue(&skqueue))) { | 686 | while ((skb = __skb_dequeue(&skqueue))) { |
691 | skb->dev = p->dev; | 687 | skb->dev = p->dev; |
@@ -895,7 +891,6 @@ static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id, | |||
895 | struct ipoib_dev_priv *priv = netdev_priv(tx->dev); | 891 | struct ipoib_dev_priv *priv = netdev_priv(tx->dev); |
896 | struct net_device *dev = priv->dev; | 892 | struct net_device *dev = priv->dev; |
897 | struct ipoib_neigh *neigh; | 893 | struct ipoib_neigh *neigh; |
898 | unsigned long flags; | ||
899 | int ret; | 894 | int ret; |
900 | 895 | ||
901 | switch (event->event) { | 896 | switch (event->event) { |
@@ -914,7 +909,7 @@ static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id, | |||
914 | case IB_CM_REJ_RECEIVED: | 909 | case IB_CM_REJ_RECEIVED: |
915 | case IB_CM_TIMEWAIT_EXIT: | 910 | case IB_CM_TIMEWAIT_EXIT: |
916 | ipoib_dbg(priv, "CM error %d.\n", event->event); | 911 | ipoib_dbg(priv, "CM error %d.\n", event->event); |
917 | spin_lock_irqsave(&priv->tx_lock, flags); | 912 | spin_lock_irq(&priv->tx_lock); |
918 | spin_lock(&priv->lock); | 913 | spin_lock(&priv->lock); |
919 | neigh = tx->neigh; | 914 | neigh = tx->neigh; |
920 | 915 | ||
@@ -934,7 +929,7 @@ static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id, | |||
934 | } | 929 | } |
935 | 930 | ||
936 | spin_unlock(&priv->lock); | 931 | spin_unlock(&priv->lock); |
937 | spin_unlock_irqrestore(&priv->tx_lock, flags); | 932 | spin_unlock_irq(&priv->tx_lock); |
938 | break; | 933 | break; |
939 | default: | 934 | default: |
940 | break; | 935 | break; |
@@ -1023,21 +1018,20 @@ static void ipoib_cm_tx_reap(struct work_struct *work) | |||
1023 | struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, | 1018 | struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, |
1024 | cm.reap_task); | 1019 | cm.reap_task); |
1025 | struct ipoib_cm_tx *p; | 1020 | struct ipoib_cm_tx *p; |
1026 | unsigned long flags; | ||
1027 | 1021 | ||
1028 | spin_lock_irqsave(&priv->tx_lock, flags); | 1022 | spin_lock_irq(&priv->tx_lock); |
1029 | spin_lock(&priv->lock); | 1023 | spin_lock(&priv->lock); |
1030 | while (!list_empty(&priv->cm.reap_list)) { | 1024 | while (!list_empty(&priv->cm.reap_list)) { |
1031 | p = list_entry(priv->cm.reap_list.next, typeof(*p), list); | 1025 | p = list_entry(priv->cm.reap_list.next, typeof(*p), list); |
1032 | list_del(&p->list); | 1026 | list_del(&p->list); |
1033 | spin_unlock(&priv->lock); | 1027 | spin_unlock(&priv->lock); |
1034 | spin_unlock_irqrestore(&priv->tx_lock, flags); | 1028 | spin_unlock_irq(&priv->tx_lock); |
1035 | ipoib_cm_tx_destroy(p); | 1029 | ipoib_cm_tx_destroy(p); |
1036 | spin_lock_irqsave(&priv->tx_lock, flags); | 1030 | spin_lock_irq(&priv->tx_lock); |
1037 | spin_lock(&priv->lock); | 1031 | spin_lock(&priv->lock); |
1038 | } | 1032 | } |
1039 | spin_unlock(&priv->lock); | 1033 | spin_unlock(&priv->lock); |
1040 | spin_unlock_irqrestore(&priv->tx_lock, flags); | 1034 | spin_unlock_irq(&priv->tx_lock); |
1041 | } | 1035 | } |
1042 | 1036 | ||
1043 | static void ipoib_cm_skb_reap(struct work_struct *work) | 1037 | static void ipoib_cm_skb_reap(struct work_struct *work) |
@@ -1046,15 +1040,14 @@ static void ipoib_cm_skb_reap(struct work_struct *work) | |||
1046 | cm.skb_task); | 1040 | cm.skb_task); |
1047 | struct net_device *dev = priv->dev; | 1041 | struct net_device *dev = priv->dev; |
1048 | struct sk_buff *skb; | 1042 | struct sk_buff *skb; |
1049 | unsigned long flags; | ||
1050 | 1043 | ||
1051 | unsigned mtu = priv->mcast_mtu; | 1044 | unsigned mtu = priv->mcast_mtu; |
1052 | 1045 | ||
1053 | spin_lock_irqsave(&priv->tx_lock, flags); | 1046 | spin_lock_irq(&priv->tx_lock); |
1054 | spin_lock(&priv->lock); | 1047 | spin_lock(&priv->lock); |
1055 | while ((skb = skb_dequeue(&priv->cm.skb_queue))) { | 1048 | while ((skb = skb_dequeue(&priv->cm.skb_queue))) { |
1056 | spin_unlock(&priv->lock); | 1049 | spin_unlock(&priv->lock); |
1057 | spin_unlock_irqrestore(&priv->tx_lock, flags); | 1050 | spin_unlock_irq(&priv->tx_lock); |
1058 | if (skb->protocol == htons(ETH_P_IP)) | 1051 | if (skb->protocol == htons(ETH_P_IP)) |
1059 | icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu)); | 1052 | icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu)); |
1060 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | 1053 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) |
@@ -1062,11 +1055,11 @@ static void ipoib_cm_skb_reap(struct work_struct *work) | |||
1062 | icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, dev); | 1055 | icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, dev); |
1063 | #endif | 1056 | #endif |
1064 | dev_kfree_skb_any(skb); | 1057 | dev_kfree_skb_any(skb); |
1065 | spin_lock_irqsave(&priv->tx_lock, flags); | 1058 | spin_lock_irq(&priv->tx_lock); |
1066 | spin_lock(&priv->lock); | 1059 | spin_lock(&priv->lock); |
1067 | } | 1060 | } |
1068 | spin_unlock(&priv->lock); | 1061 | spin_unlock(&priv->lock); |
1069 | spin_unlock_irqrestore(&priv->tx_lock, flags); | 1062 | spin_unlock_irq(&priv->tx_lock); |
1070 | } | 1063 | } |
1071 | 1064 | ||
1072 | void ipoib_cm_skb_too_long(struct net_device* dev, struct sk_buff *skb, | 1065 | void ipoib_cm_skb_too_long(struct net_device* dev, struct sk_buff *skb, |
@@ -1088,9 +1081,8 @@ static void ipoib_cm_stale_task(struct work_struct *work) | |||
1088 | struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, | 1081 | struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, |
1089 | cm.stale_task.work); | 1082 | cm.stale_task.work); |
1090 | struct ipoib_cm_rx *p; | 1083 | struct ipoib_cm_rx *p; |
1091 | unsigned long flags; | ||
1092 | 1084 | ||
1093 | spin_lock_irqsave(&priv->lock, flags); | 1085 | spin_lock_irq(&priv->lock); |
1094 | while (!list_empty(&priv->cm.passive_ids)) { | 1086 | while (!list_empty(&priv->cm.passive_ids)) { |
1095 | /* List if sorted by LRU, start from tail, | 1087 | /* List if sorted by LRU, start from tail, |
1096 | * stop when we see a recently used entry */ | 1088 | * stop when we see a recently used entry */ |
@@ -1098,13 +1090,13 @@ static void ipoib_cm_stale_task(struct work_struct *work) | |||
1098 | if (time_before_eq(jiffies, p->jiffies + IPOIB_CM_RX_TIMEOUT)) | 1090 | if (time_before_eq(jiffies, p->jiffies + IPOIB_CM_RX_TIMEOUT)) |
1099 | break; | 1091 | break; |
1100 | list_del_init(&p->list); | 1092 | list_del_init(&p->list); |
1101 | spin_unlock_irqrestore(&priv->lock, flags); | 1093 | spin_unlock_irq(&priv->lock); |
1102 | ib_destroy_cm_id(p->id); | 1094 | ib_destroy_cm_id(p->id); |
1103 | ib_destroy_qp(p->qp); | 1095 | ib_destroy_qp(p->qp); |
1104 | kfree(p); | 1096 | kfree(p); |
1105 | spin_lock_irqsave(&priv->lock, flags); | 1097 | spin_lock_irq(&priv->lock); |
1106 | } | 1098 | } |
1107 | spin_unlock_irqrestore(&priv->lock, flags); | 1099 | spin_unlock_irq(&priv->lock); |
1108 | } | 1100 | } |
1109 | 1101 | ||
1110 | 1102 | ||
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index ba0ee5cf2ad7..1bdb9101911a 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c | |||
@@ -172,8 +172,8 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) | |||
172 | struct sk_buff *skb; | 172 | struct sk_buff *skb; |
173 | u64 addr; | 173 | u64 addr; |
174 | 174 | ||
175 | ipoib_dbg_data(priv, "recv completion: id %d, op %d, status: %d\n", | 175 | ipoib_dbg_data(priv, "recv completion: id %d, status: %d\n", |
176 | wr_id, wc->opcode, wc->status); | 176 | wr_id, wc->status); |
177 | 177 | ||
178 | if (unlikely(wr_id >= ipoib_recvq_size)) { | 178 | if (unlikely(wr_id >= ipoib_recvq_size)) { |
179 | ipoib_warn(priv, "recv completion event with wrid %d (> %d)\n", | 179 | ipoib_warn(priv, "recv completion event with wrid %d (> %d)\n", |
@@ -216,7 +216,7 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) | |||
216 | if (wc->slid != priv->local_lid || | 216 | if (wc->slid != priv->local_lid || |
217 | wc->src_qp != priv->qp->qp_num) { | 217 | wc->src_qp != priv->qp->qp_num) { |
218 | skb->protocol = ((struct ipoib_header *) skb->data)->proto; | 218 | skb->protocol = ((struct ipoib_header *) skb->data)->proto; |
219 | skb->mac.raw = skb->data; | 219 | skb_reset_mac_header(skb); |
220 | skb_pull(skb, IPOIB_ENCAP_LEN); | 220 | skb_pull(skb, IPOIB_ENCAP_LEN); |
221 | 221 | ||
222 | dev->last_rx = jiffies; | 222 | dev->last_rx = jiffies; |
@@ -245,8 +245,8 @@ static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc) | |||
245 | struct ipoib_tx_buf *tx_req; | 245 | struct ipoib_tx_buf *tx_req; |
246 | unsigned long flags; | 246 | unsigned long flags; |
247 | 247 | ||
248 | ipoib_dbg_data(priv, "send completion: id %d, op %d, status: %d\n", | 248 | ipoib_dbg_data(priv, "send completion: id %d, status: %d\n", |
249 | wr_id, wc->opcode, wc->status); | 249 | wr_id, wc->status); |
250 | 250 | ||
251 | if (unlikely(wr_id >= ipoib_sendq_size)) { | 251 | if (unlikely(wr_id >= ipoib_sendq_size)) { |
252 | ipoib_warn(priv, "send completion event with wrid %d (> %d)\n", | 252 | ipoib_warn(priv, "send completion event with wrid %d (> %d)\n", |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index f2a40ae8e7d0..b4c380c5a3ba 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c | |||
@@ -395,14 +395,10 @@ static void path_rec_completion(int status, | |||
395 | skb_queue_head_init(&skqueue); | 395 | skb_queue_head_init(&skqueue); |
396 | 396 | ||
397 | if (!status) { | 397 | if (!status) { |
398 | struct ib_ah_attr av = { | 398 | struct ib_ah_attr av; |
399 | .dlid = be16_to_cpu(pathrec->dlid), | 399 | |
400 | .sl = pathrec->sl, | 400 | if (!ib_init_ah_from_path(priv->ca, priv->port, pathrec, &av)) |
401 | .port_num = priv->port, | 401 | ah = ipoib_create_ah(dev, priv->pd, &av); |
402 | .static_rate = pathrec->rate | ||
403 | }; | ||
404 | |||
405 | ah = ipoib_create_ah(dev, priv->pd, &av); | ||
406 | } | 402 | } |
407 | 403 | ||
408 | spin_lock_irqsave(&priv->lock, flags); | 404 | spin_lock_irqsave(&priv->lock, flags); |
diff --git a/drivers/isdn/act2000/module.c b/drivers/isdn/act2000/module.c index e3e5c1399076..ee2b0b9f8f46 100644 --- a/drivers/isdn/act2000/module.c +++ b/drivers/isdn/act2000/module.c | |||
@@ -442,7 +442,7 @@ act2000_sendbuf(act2000_card *card, int channel, int ack, struct sk_buff *skb) | |||
442 | return 0; | 442 | return 0; |
443 | } | 443 | } |
444 | skb_reserve(xmit_skb, 19); | 444 | skb_reserve(xmit_skb, 19); |
445 | memcpy(skb_put(xmit_skb, len), skb->data, len); | 445 | skb_copy_from_linear_data(skb, skb_put(xmit_skb, len), len); |
446 | } else { | 446 | } else { |
447 | xmit_skb = skb_clone(skb, GFP_ATOMIC); | 447 | xmit_skb = skb_clone(skb, GFP_ATOMIC); |
448 | if (!xmit_skb) { | 448 | if (!xmit_skb) { |
diff --git a/drivers/isdn/gigaset/usb-gigaset.c b/drivers/isdn/gigaset/usb-gigaset.c index 2baef349c12d..c8e1c357cec8 100644 --- a/drivers/isdn/gigaset/usb-gigaset.c +++ b/drivers/isdn/gigaset/usb-gigaset.c | |||
@@ -652,7 +652,7 @@ static int write_modem(struct cardstate *cs) | |||
652 | * transmit data | 652 | * transmit data |
653 | */ | 653 | */ |
654 | count = min(bcs->tx_skb->len, (unsigned) ucs->bulk_out_size); | 654 | count = min(bcs->tx_skb->len, (unsigned) ucs->bulk_out_size); |
655 | memcpy(ucs->bulk_out_buffer, bcs->tx_skb->data, count); | 655 | skb_copy_from_linear_data(bcs->tx_skb, ucs->bulk_out_buffer, count); |
656 | skb_pull(bcs->tx_skb, count); | 656 | skb_pull(bcs->tx_skb, count); |
657 | atomic_set(&ucs->busy, 1); | 657 | atomic_set(&ucs->busy, 1); |
658 | gig_dbg(DEBUG_OUTPUT, "write_modem: send %d bytes", count); | 658 | gig_dbg(DEBUG_OUTPUT, "write_modem: send %d bytes", count); |
diff --git a/drivers/isdn/hardware/avm/b1dma.c b/drivers/isdn/hardware/avm/b1dma.c index 1e2d38e3d68c..428872b653e9 100644 --- a/drivers/isdn/hardware/avm/b1dma.c +++ b/drivers/isdn/hardware/avm/b1dma.c | |||
@@ -404,7 +404,8 @@ static void b1dma_dispatch_tx(avmcard *card) | |||
404 | printk(KERN_DEBUG "tx: put 0x%x len=%d\n", | 404 | printk(KERN_DEBUG "tx: put 0x%x len=%d\n", |
405 | skb->data[2], txlen); | 405 | skb->data[2], txlen); |
406 | #endif | 406 | #endif |
407 | memcpy(dma->sendbuf.dmabuf, skb->data+2, skb->len-2); | 407 | skb_copy_from_linear_data_offset(skb, 2, dma->sendbuf.dmabuf, |
408 | skb->len - 2); | ||
408 | } | 409 | } |
409 | txlen = (txlen + 3) & ~3; | 410 | txlen = (txlen + 3) & ~3; |
410 | 411 | ||
diff --git a/drivers/isdn/hardware/avm/c4.c b/drivers/isdn/hardware/avm/c4.c index 6f5efa8d78cb..d58f927e766a 100644 --- a/drivers/isdn/hardware/avm/c4.c +++ b/drivers/isdn/hardware/avm/c4.c | |||
@@ -457,7 +457,8 @@ static void c4_dispatch_tx(avmcard *card) | |||
457 | printk(KERN_DEBUG "%s: tx put 0x%x len=%d\n", | 457 | printk(KERN_DEBUG "%s: tx put 0x%x len=%d\n", |
458 | card->name, skb->data[2], txlen); | 458 | card->name, skb->data[2], txlen); |
459 | #endif | 459 | #endif |
460 | memcpy(dma->sendbuf.dmabuf, skb->data+2, skb->len-2); | 460 | skb_copy_from_linear_data_offset(skb, 2, dma->sendbuf.dmabuf, |
461 | skb->len - 2); | ||
461 | } | 462 | } |
462 | txlen = (txlen + 3) & ~3; | 463 | txlen = (txlen + 3) & ~3; |
463 | 464 | ||
diff --git a/drivers/isdn/hisax/elsa_ser.c b/drivers/isdn/hisax/elsa_ser.c index ae377e812775..1642dca988a1 100644 --- a/drivers/isdn/hisax/elsa_ser.c +++ b/drivers/isdn/hisax/elsa_ser.c | |||
@@ -254,14 +254,16 @@ write_modem(struct BCState *bcs) { | |||
254 | count = len; | 254 | count = len; |
255 | if (count > MAX_MODEM_BUF - fp) { | 255 | if (count > MAX_MODEM_BUF - fp) { |
256 | count = MAX_MODEM_BUF - fp; | 256 | count = MAX_MODEM_BUF - fp; |
257 | memcpy(cs->hw.elsa.transbuf + fp, bcs->tx_skb->data, count); | 257 | skb_copy_from_linear_data(bcs->tx_skb, |
258 | cs->hw.elsa.transbuf + fp, count); | ||
258 | skb_pull(bcs->tx_skb, count); | 259 | skb_pull(bcs->tx_skb, count); |
259 | cs->hw.elsa.transcnt += count; | 260 | cs->hw.elsa.transcnt += count; |
260 | ret = count; | 261 | ret = count; |
261 | count = len - count; | 262 | count = len - count; |
262 | fp = 0; | 263 | fp = 0; |
263 | } | 264 | } |
264 | memcpy((cs->hw.elsa.transbuf + fp), bcs->tx_skb->data, count); | 265 | skb_copy_from_linear_data(bcs->tx_skb, |
266 | cs->hw.elsa.transbuf + fp, count); | ||
265 | skb_pull(bcs->tx_skb, count); | 267 | skb_pull(bcs->tx_skb, count); |
266 | cs->hw.elsa.transcnt += count; | 268 | cs->hw.elsa.transcnt += count; |
267 | ret += count; | 269 | ret += count; |
diff --git a/drivers/isdn/hisax/isdnl2.c b/drivers/isdn/hisax/isdnl2.c index cd3b5ad53491..3446f249d675 100644 --- a/drivers/isdn/hisax/isdnl2.c +++ b/drivers/isdn/hisax/isdnl2.c | |||
@@ -1293,7 +1293,8 @@ l2_pull_iqueue(struct FsmInst *fi, int event, void *arg) | |||
1293 | oskb = skb; | 1293 | oskb = skb; |
1294 | skb = alloc_skb(oskb->len + i, GFP_ATOMIC); | 1294 | skb = alloc_skb(oskb->len + i, GFP_ATOMIC); |
1295 | memcpy(skb_put(skb, i), header, i); | 1295 | memcpy(skb_put(skb, i), header, i); |
1296 | memcpy(skb_put(skb, oskb->len), oskb->data, oskb->len); | 1296 | skb_copy_from_linear_data(oskb, |
1297 | skb_put(skb, oskb->len), oskb->len); | ||
1297 | dev_kfree_skb(oskb); | 1298 | dev_kfree_skb(oskb); |
1298 | } | 1299 | } |
1299 | st->l2.l2l1(st, PH_PULL | INDICATION, skb); | 1300 | st->l2.l2l1(st, PH_PULL | INDICATION, skb); |
diff --git a/drivers/isdn/hysdn/hycapi.c b/drivers/isdn/hysdn/hycapi.c index b2ae4ec1e49e..f85450146bdc 100644 --- a/drivers/isdn/hysdn/hycapi.c +++ b/drivers/isdn/hysdn/hycapi.c | |||
@@ -398,8 +398,9 @@ static u16 hycapi_send_message(struct capi_ctr *ctrl, struct sk_buff *skb) | |||
398 | _len = CAPIMSG_LEN(skb->data); | 398 | _len = CAPIMSG_LEN(skb->data); |
399 | if (_len > 22) { | 399 | if (_len > 22) { |
400 | _len2 = _len - 22; | 400 | _len2 = _len - 22; |
401 | memcpy(msghead, skb->data, 22); | 401 | skb_copy_from_linear_data(skb, msghead, 22); |
402 | memcpy(skb->data + _len2, msghead, 22); | 402 | skb_copy_to_linear_data_offset(skb, _len2, |
403 | msghead, 22); | ||
403 | skb_pull(skb, _len2); | 404 | skb_pull(skb, _len2); |
404 | CAPIMSG_SETLEN(skb->data, 22); | 405 | CAPIMSG_SETLEN(skb->data, 22); |
405 | retval = capilib_data_b3_req(&cinfo->ncci_head, | 406 | retval = capilib_data_b3_req(&cinfo->ncci_head, |
diff --git a/drivers/isdn/hysdn/hysdn_net.c b/drivers/isdn/hysdn/hysdn_net.c index 557d96c78a62..cfa8fa5e44ab 100644 --- a/drivers/isdn/hysdn/hysdn_net.c +++ b/drivers/isdn/hysdn/hysdn_net.c | |||
@@ -214,8 +214,6 @@ hysdn_rx_netpkt(hysdn_card * card, unsigned char *buf, unsigned short len) | |||
214 | lp->stats.rx_dropped++; | 214 | lp->stats.rx_dropped++; |
215 | return; | 215 | return; |
216 | } | 216 | } |
217 | skb->dev = &lp->netdev; | ||
218 | |||
219 | /* copy the data */ | 217 | /* copy the data */ |
220 | memcpy(skb_put(skb, len), buf, len); | 218 | memcpy(skb_put(skb, len), buf, len); |
221 | 219 | ||
diff --git a/drivers/isdn/hysdn/hysdn_sched.c b/drivers/isdn/hysdn/hysdn_sched.c index b7b5aa4748a0..81db4a190d41 100644 --- a/drivers/isdn/hysdn/hysdn_sched.c +++ b/drivers/isdn/hysdn/hysdn_sched.c | |||
@@ -113,7 +113,8 @@ hysdn_sched_tx(hysdn_card *card, unsigned char *buf, | |||
113 | (skb = hysdn_tx_netget(card)) != NULL) | 113 | (skb = hysdn_tx_netget(card)) != NULL) |
114 | { | 114 | { |
115 | if (skb->len <= maxlen) { | 115 | if (skb->len <= maxlen) { |
116 | memcpy(buf, skb->data, skb->len); /* copy the packet to the buffer */ | 116 | /* copy the packet to the buffer */ |
117 | skb_copy_from_linear_data(skb, buf, skb->len); | ||
117 | *len = skb->len; | 118 | *len = skb->len; |
118 | *chan = CHAN_NDIS_DATA; | 119 | *chan = CHAN_NDIS_DATA; |
119 | card->net_tx_busy = 1; /* we are busy sending network data */ | 120 | card->net_tx_busy = 1; /* we are busy sending network data */ |
@@ -126,7 +127,7 @@ hysdn_sched_tx(hysdn_card *card, unsigned char *buf, | |||
126 | ((skb = hycapi_tx_capiget(card)) != NULL) ) | 127 | ((skb = hycapi_tx_capiget(card)) != NULL) ) |
127 | { | 128 | { |
128 | if (skb->len <= maxlen) { | 129 | if (skb->len <= maxlen) { |
129 | memcpy(buf, skb->data, skb->len); | 130 | skb_copy_from_linear_data(skb, buf, skb->len); |
130 | *len = skb->len; | 131 | *len = skb->len; |
131 | *chan = CHAN_CAPI; | 132 | *chan = CHAN_CAPI; |
132 | hycapi_tx_capiack(card); | 133 | hycapi_tx_capiack(card); |
diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c index 9c926e41b114..c97330b19877 100644 --- a/drivers/isdn/i4l/isdn_common.c +++ b/drivers/isdn/i4l/isdn_common.c | |||
@@ -829,7 +829,7 @@ isdn_readbchan(int di, int channel, u_char * buf, u_char * fp, int len, wait_que | |||
829 | dflag = 0; | 829 | dflag = 0; |
830 | } | 830 | } |
831 | count_put = count_pull; | 831 | count_put = count_pull; |
832 | memcpy(cp, skb->data, count_put); | 832 | skb_copy_from_linear_data(skb, cp, count_put); |
833 | cp += count_put; | 833 | cp += count_put; |
834 | len -= count_put; | 834 | len -= count_put; |
835 | #ifdef CONFIG_ISDN_AUDIO | 835 | #ifdef CONFIG_ISDN_AUDIO |
diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c index 838b3734e2b6..aa83277aba74 100644 --- a/drivers/isdn/i4l/isdn_net.c +++ b/drivers/isdn/i4l/isdn_net.c | |||
@@ -872,7 +872,8 @@ typedef struct { | |||
872 | static void | 872 | static void |
873 | isdn_net_log_skb(struct sk_buff * skb, isdn_net_local * lp) | 873 | isdn_net_log_skb(struct sk_buff * skb, isdn_net_local * lp) |
874 | { | 874 | { |
875 | u_char *p = skb->nh.raw; /* hopefully, this was set correctly */ | 875 | /* hopefully, this was set correctly */ |
876 | const u_char *p = skb_network_header(skb); | ||
876 | unsigned short proto = ntohs(skb->protocol); | 877 | unsigned short proto = ntohs(skb->protocol); |
877 | int data_ofs; | 878 | int data_ofs; |
878 | ip_ports *ipp; | 879 | ip_ports *ipp; |
@@ -880,7 +881,7 @@ isdn_net_log_skb(struct sk_buff * skb, isdn_net_local * lp) | |||
880 | 881 | ||
881 | addinfo[0] = '\0'; | 882 | addinfo[0] = '\0'; |
882 | /* This check stolen from 2.1.72 dev_queue_xmit_nit() */ | 883 | /* This check stolen from 2.1.72 dev_queue_xmit_nit() */ |
883 | if (skb->nh.raw < skb->data || skb->nh.raw >= skb->tail) { | 884 | if (p < skb->data || skb->network_header >= skb->tail) { |
884 | /* fall back to old isdn_net_log_packet method() */ | 885 | /* fall back to old isdn_net_log_packet method() */ |
885 | char * buf = skb->data; | 886 | char * buf = skb->data; |
886 | 887 | ||
@@ -1121,7 +1122,7 @@ isdn_net_adjust_hdr(struct sk_buff *skb, struct net_device *dev) | |||
1121 | if (!skb) | 1122 | if (!skb) |
1122 | return; | 1123 | return; |
1123 | if (lp->p_encap == ISDN_NET_ENCAP_ETHER) { | 1124 | if (lp->p_encap == ISDN_NET_ENCAP_ETHER) { |
1124 | int pullsize = (ulong)skb->nh.raw - (ulong)skb->data - ETH_HLEN; | 1125 | const int pullsize = skb_network_offset(skb) - ETH_HLEN; |
1125 | if (pullsize > 0) { | 1126 | if (pullsize > 0) { |
1126 | printk(KERN_DEBUG "isdn_net: Pull junk %d\n", pullsize); | 1127 | printk(KERN_DEBUG "isdn_net: Pull junk %d\n", pullsize); |
1127 | skb_pull(skb, pullsize); | 1128 | skb_pull(skb, pullsize); |
@@ -1366,7 +1367,7 @@ isdn_net_type_trans(struct sk_buff *skb, struct net_device *dev) | |||
1366 | struct ethhdr *eth; | 1367 | struct ethhdr *eth; |
1367 | unsigned char *rawp; | 1368 | unsigned char *rawp; |
1368 | 1369 | ||
1369 | skb->mac.raw = skb->data; | 1370 | skb_reset_mac_header(skb); |
1370 | skb_pull(skb, ETH_HLEN); | 1371 | skb_pull(skb, ETH_HLEN); |
1371 | eth = eth_hdr(skb); | 1372 | eth = eth_hdr(skb); |
1372 | 1373 | ||
@@ -1786,7 +1787,7 @@ isdn_net_receive(struct net_device *ndev, struct sk_buff *skb) | |||
1786 | } | 1787 | } |
1787 | skb->dev = ndev; | 1788 | skb->dev = ndev; |
1788 | skb->pkt_type = PACKET_HOST; | 1789 | skb->pkt_type = PACKET_HOST; |
1789 | skb->mac.raw = skb->data; | 1790 | skb_reset_mac_header(skb); |
1790 | #ifdef ISDN_DEBUG_NET_DUMP | 1791 | #ifdef ISDN_DEBUG_NET_DUMP |
1791 | isdn_dumppkt("R:", skb->data, skb->len, 40); | 1792 | isdn_dumppkt("R:", skb->data, skb->len, 40); |
1792 | #endif | 1793 | #endif |
diff --git a/drivers/isdn/i4l/isdn_ppp.c b/drivers/isdn/i4l/isdn_ppp.c index 1b2df80c3bce..387392cb3d68 100644 --- a/drivers/isdn/i4l/isdn_ppp.c +++ b/drivers/isdn/i4l/isdn_ppp.c | |||
@@ -1100,7 +1100,8 @@ isdn_ppp_push_higher(isdn_net_dev * net_dev, isdn_net_local * lp, struct sk_buff | |||
1100 | goto drop_packet; | 1100 | goto drop_packet; |
1101 | } | 1101 | } |
1102 | skb_put(skb, skb_old->len + 128); | 1102 | skb_put(skb, skb_old->len + 128); |
1103 | memcpy(skb->data, skb_old->data, skb_old->len); | 1103 | skb_copy_from_linear_data(skb_old, skb->data, |
1104 | skb_old->len); | ||
1104 | if (net_dev->local->ppp_slot < 0) { | 1105 | if (net_dev->local->ppp_slot < 0) { |
1105 | printk(KERN_ERR "%s: net_dev->local->ppp_slot(%d) out of range\n", | 1106 | printk(KERN_ERR "%s: net_dev->local->ppp_slot(%d) out of range\n", |
1106 | __FUNCTION__, net_dev->local->ppp_slot); | 1107 | __FUNCTION__, net_dev->local->ppp_slot); |
@@ -1167,7 +1168,7 @@ isdn_ppp_push_higher(isdn_net_dev * net_dev, isdn_net_local * lp, struct sk_buff | |||
1167 | mlp->huptimer = 0; | 1168 | mlp->huptimer = 0; |
1168 | #endif /* CONFIG_IPPP_FILTER */ | 1169 | #endif /* CONFIG_IPPP_FILTER */ |
1169 | skb->dev = dev; | 1170 | skb->dev = dev; |
1170 | skb->mac.raw = skb->data; | 1171 | skb_reset_mac_header(skb); |
1171 | netif_rx(skb); | 1172 | netif_rx(skb); |
1172 | /* net_dev->local->stats.rx_packets++; done in isdn_net.c */ | 1173 | /* net_dev->local->stats.rx_packets++; done in isdn_net.c */ |
1173 | return; | 1174 | return; |
@@ -1902,7 +1903,9 @@ void isdn_ppp_mp_reassembly( isdn_net_dev * net_dev, isdn_net_local * lp, | |||
1902 | while( from != to ) { | 1903 | while( from != to ) { |
1903 | unsigned int len = from->len - MP_HEADER_LEN; | 1904 | unsigned int len = from->len - MP_HEADER_LEN; |
1904 | 1905 | ||
1905 | memcpy(skb_put(skb,len), from->data+MP_HEADER_LEN, len); | 1906 | skb_copy_from_linear_data_offset(from, MP_HEADER_LEN, |
1907 | skb_put(skb,len), | ||
1908 | len); | ||
1906 | frag = from->next; | 1909 | frag = from->next; |
1907 | isdn_ppp_mp_free_skb(mp, from); | 1910 | isdn_ppp_mp_free_skb(mp, from); |
1908 | from = frag; | 1911 | from = frag; |
diff --git a/drivers/isdn/isdnloop/isdnloop.c b/drivers/isdn/isdnloop/isdnloop.c index e3add27dd0e1..e93ad59f60bf 100644 --- a/drivers/isdn/isdnloop/isdnloop.c +++ b/drivers/isdn/isdnloop/isdnloop.c | |||
@@ -415,7 +415,8 @@ isdnloop_sendbuf(int channel, struct sk_buff *skb, isdnloop_card * card) | |||
415 | spin_lock_irqsave(&card->isdnloop_lock, flags); | 415 | spin_lock_irqsave(&card->isdnloop_lock, flags); |
416 | nskb = dev_alloc_skb(skb->len); | 416 | nskb = dev_alloc_skb(skb->len); |
417 | if (nskb) { | 417 | if (nskb) { |
418 | memcpy(skb_put(nskb, len), skb->data, len); | 418 | skb_copy_from_linear_data(skb, |
419 | skb_put(nskb, len), len); | ||
419 | skb_queue_tail(&card->bqueue[channel], nskb); | 420 | skb_queue_tail(&card->bqueue[channel], nskb); |
420 | dev_kfree_skb(skb); | 421 | dev_kfree_skb(skb); |
421 | } else | 422 | } else |
diff --git a/drivers/isdn/pcbit/capi.c b/drivers/isdn/pcbit/capi.c index 47c59e95898d..7b55e151f1b0 100644 --- a/drivers/isdn/pcbit/capi.c +++ b/drivers/isdn/pcbit/capi.c | |||
@@ -429,8 +429,9 @@ int capi_decode_conn_ind(struct pcbit_chan * chan, | |||
429 | if (!(info->data.setup.CallingPN = kmalloc(len - count + 1, GFP_ATOMIC))) | 429 | if (!(info->data.setup.CallingPN = kmalloc(len - count + 1, GFP_ATOMIC))) |
430 | return -1; | 430 | return -1; |
431 | 431 | ||
432 | memcpy(info->data.setup.CallingPN, skb->data + count + 1, | 432 | skb_copy_from_linear_data_offset(skb, count + 1, |
433 | len - count); | 433 | info->data.setup.CallingPN, |
434 | len - count); | ||
434 | info->data.setup.CallingPN[len - count] = 0; | 435 | info->data.setup.CallingPN[len - count] = 0; |
435 | 436 | ||
436 | } | 437 | } |
@@ -457,8 +458,9 @@ int capi_decode_conn_ind(struct pcbit_chan * chan, | |||
457 | if (!(info->data.setup.CalledPN = kmalloc(len - count + 1, GFP_ATOMIC))) | 458 | if (!(info->data.setup.CalledPN = kmalloc(len - count + 1, GFP_ATOMIC))) |
458 | return -1; | 459 | return -1; |
459 | 460 | ||
460 | memcpy(info->data.setup.CalledPN, skb->data + count + 1, | 461 | skb_copy_from_linear_data_offset(skb, count + 1, |
461 | len - count); | 462 | info->data.setup.CalledPN, |
463 | len - count); | ||
462 | info->data.setup.CalledPN[len - count] = 0; | 464 | info->data.setup.CalledPN[len - count] = 0; |
463 | 465 | ||
464 | } | 466 | } |
@@ -539,7 +541,7 @@ int capi_decode_conn_actv_ind(struct pcbit_chan * chan, struct sk_buff *skb) | |||
539 | 541 | ||
540 | #ifdef DEBUG | 542 | #ifdef DEBUG |
541 | if (len > 1 && len < 31) { | 543 | if (len > 1 && len < 31) { |
542 | memcpy(str, skb->data + 2, len - 1); | 544 | skb_copy_from_linear_data_offset(skb, 2, str, len - 1); |
543 | str[len] = 0; | 545 | str[len] = 0; |
544 | printk(KERN_DEBUG "Connected Party Number: %s\n", str); | 546 | printk(KERN_DEBUG "Connected Party Number: %s\n", str); |
545 | } | 547 | } |
diff --git a/drivers/media/dvb/dvb-core/dvb_net.c b/drivers/media/dvb/dvb-core/dvb_net.c index 76e9c36597eb..6a5ab409c4e7 100644 --- a/drivers/media/dvb/dvb-core/dvb_net.c +++ b/drivers/media/dvb/dvb-core/dvb_net.c | |||
@@ -174,7 +174,7 @@ static unsigned short dvb_net_eth_type_trans(struct sk_buff *skb, | |||
174 | struct ethhdr *eth; | 174 | struct ethhdr *eth; |
175 | unsigned char *rawp; | 175 | unsigned char *rawp; |
176 | 176 | ||
177 | skb->mac.raw=skb->data; | 177 | skb_reset_mac_header(skb); |
178 | skb_pull(skb,dev->hard_header_len); | 178 | skb_pull(skb,dev->hard_header_len); |
179 | eth = eth_hdr(skb); | 179 | eth = eth_hdr(skb); |
180 | 180 | ||
@@ -600,6 +600,7 @@ static void dvb_net_ule( struct net_device *dev, const u8 *buf, size_t buf_len ) | |||
600 | /* Check CRC32, we've got it in our skb already. */ | 600 | /* Check CRC32, we've got it in our skb already. */ |
601 | unsigned short ulen = htons(priv->ule_sndu_len); | 601 | unsigned short ulen = htons(priv->ule_sndu_len); |
602 | unsigned short utype = htons(priv->ule_sndu_type); | 602 | unsigned short utype = htons(priv->ule_sndu_type); |
603 | const u8 *tail; | ||
603 | struct kvec iov[3] = { | 604 | struct kvec iov[3] = { |
604 | { &ulen, sizeof ulen }, | 605 | { &ulen, sizeof ulen }, |
605 | { &utype, sizeof utype }, | 606 | { &utype, sizeof utype }, |
@@ -613,10 +614,11 @@ static void dvb_net_ule( struct net_device *dev, const u8 *buf, size_t buf_len ) | |||
613 | } | 614 | } |
614 | 615 | ||
615 | ule_crc = iov_crc32(ule_crc, iov, 3); | 616 | ule_crc = iov_crc32(ule_crc, iov, 3); |
616 | expected_crc = *((u8 *)priv->ule_skb->tail - 4) << 24 | | 617 | tail = skb_tail_pointer(priv->ule_skb); |
617 | *((u8 *)priv->ule_skb->tail - 3) << 16 | | 618 | expected_crc = *(tail - 4) << 24 | |
618 | *((u8 *)priv->ule_skb->tail - 2) << 8 | | 619 | *(tail - 3) << 16 | |
619 | *((u8 *)priv->ule_skb->tail - 1); | 620 | *(tail - 2) << 8 | |
621 | *(tail - 1); | ||
620 | if (ule_crc != expected_crc) { | 622 | if (ule_crc != expected_crc) { |
621 | printk(KERN_WARNING "%lu: CRC32 check FAILED: %08x / %08x, SNDU len %d type %#x, ts_remain %d, next 2: %x.\n", | 623 | printk(KERN_WARNING "%lu: CRC32 check FAILED: %08x / %08x, SNDU len %d type %#x, ts_remain %d, next 2: %x.\n", |
622 | priv->ts_count, ule_crc, expected_crc, priv->ule_sndu_len, priv->ule_sndu_type, ts_remain, ts_remain > 2 ? *(unsigned short *)from_where : 0); | 624 | priv->ts_count, ule_crc, expected_crc, priv->ule_sndu_len, priv->ule_sndu_type, ts_remain, ts_remain > 2 ? *(unsigned short *)from_where : 0); |
@@ -695,7 +697,9 @@ static void dvb_net_ule( struct net_device *dev, const u8 *buf, size_t buf_len ) | |||
695 | } | 697 | } |
696 | else | 698 | else |
697 | { | 699 | { |
698 | memcpy(dest_addr, priv->ule_skb->data, ETH_ALEN); | 700 | skb_copy_from_linear_data(priv->ule_skb, |
701 | dest_addr, | ||
702 | ETH_ALEN); | ||
699 | skb_pull(priv->ule_skb, ETH_ALEN); | 703 | skb_pull(priv->ule_skb, ETH_ALEN); |
700 | } | 704 | } |
701 | } | 705 | } |
diff --git a/drivers/message/fusion/mptlan.c b/drivers/message/fusion/mptlan.c index b691292ff599..7dd34bd28efc 100644 --- a/drivers/message/fusion/mptlan.c +++ b/drivers/message/fusion/mptlan.c | |||
@@ -714,6 +714,7 @@ mpt_lan_sdu_send (struct sk_buff *skb, struct net_device *dev) | |||
714 | LANSendRequest_t *pSendReq; | 714 | LANSendRequest_t *pSendReq; |
715 | SGETransaction32_t *pTrans; | 715 | SGETransaction32_t *pTrans; |
716 | SGESimple64_t *pSimple; | 716 | SGESimple64_t *pSimple; |
717 | const unsigned char *mac; | ||
717 | dma_addr_t dma; | 718 | dma_addr_t dma; |
718 | unsigned long flags; | 719 | unsigned long flags; |
719 | int ctx; | 720 | int ctx; |
@@ -753,7 +754,7 @@ mpt_lan_sdu_send (struct sk_buff *skb, struct net_device *dev) | |||
753 | /* Set the mac.raw pointer, since this apparently isn't getting | 754 | /* Set the mac.raw pointer, since this apparently isn't getting |
754 | * done before we get the skb. Pull the data pointer past the mac data. | 755 | * done before we get the skb. Pull the data pointer past the mac data. |
755 | */ | 756 | */ |
756 | skb->mac.raw = skb->data; | 757 | skb_reset_mac_header(skb); |
757 | skb_pull(skb, 12); | 758 | skb_pull(skb, 12); |
758 | 759 | ||
759 | dma = pci_map_single(mpt_dev->pcidev, skb->data, skb->len, | 760 | dma = pci_map_single(mpt_dev->pcidev, skb->data, skb->len, |
@@ -784,6 +785,7 @@ mpt_lan_sdu_send (struct sk_buff *skb, struct net_device *dev) | |||
784 | // IOC_AND_NETDEV_NAMES_s_s(dev), | 785 | // IOC_AND_NETDEV_NAMES_s_s(dev), |
785 | // ctx, skb, skb->data)); | 786 | // ctx, skb, skb->data)); |
786 | 787 | ||
788 | mac = skb_mac_header(skb); | ||
787 | #ifdef QLOGIC_NAA_WORKAROUND | 789 | #ifdef QLOGIC_NAA_WORKAROUND |
788 | { | 790 | { |
789 | struct NAA_Hosed *nh; | 791 | struct NAA_Hosed *nh; |
@@ -793,12 +795,12 @@ mpt_lan_sdu_send (struct sk_buff *skb, struct net_device *dev) | |||
793 | drops. */ | 795 | drops. */ |
794 | read_lock_irq(&bad_naa_lock); | 796 | read_lock_irq(&bad_naa_lock); |
795 | for (nh = mpt_bad_naa; nh != NULL; nh=nh->next) { | 797 | for (nh = mpt_bad_naa; nh != NULL; nh=nh->next) { |
796 | if ((nh->ieee[0] == skb->mac.raw[0]) && | 798 | if ((nh->ieee[0] == mac[0]) && |
797 | (nh->ieee[1] == skb->mac.raw[1]) && | 799 | (nh->ieee[1] == mac[1]) && |
798 | (nh->ieee[2] == skb->mac.raw[2]) && | 800 | (nh->ieee[2] == mac[2]) && |
799 | (nh->ieee[3] == skb->mac.raw[3]) && | 801 | (nh->ieee[3] == mac[3]) && |
800 | (nh->ieee[4] == skb->mac.raw[4]) && | 802 | (nh->ieee[4] == mac[4]) && |
801 | (nh->ieee[5] == skb->mac.raw[5])) { | 803 | (nh->ieee[5] == mac[5])) { |
802 | cur_naa = nh->NAA; | 804 | cur_naa = nh->NAA; |
803 | dlprintk ((KERN_INFO "mptlan/sdu_send: using NAA value " | 805 | dlprintk ((KERN_INFO "mptlan/sdu_send: using NAA value " |
804 | "= %04x.\n", cur_naa)); | 806 | "= %04x.\n", cur_naa)); |
@@ -810,12 +812,12 @@ mpt_lan_sdu_send (struct sk_buff *skb, struct net_device *dev) | |||
810 | #endif | 812 | #endif |
811 | 813 | ||
812 | pTrans->TransactionDetails[0] = cpu_to_le32((cur_naa << 16) | | 814 | pTrans->TransactionDetails[0] = cpu_to_le32((cur_naa << 16) | |
813 | (skb->mac.raw[0] << 8) | | 815 | (mac[0] << 8) | |
814 | (skb->mac.raw[1] << 0)); | 816 | (mac[1] << 0)); |
815 | pTrans->TransactionDetails[1] = cpu_to_le32((skb->mac.raw[2] << 24) | | 817 | pTrans->TransactionDetails[1] = cpu_to_le32((mac[2] << 24) | |
816 | (skb->mac.raw[3] << 16) | | 818 | (mac[3] << 16) | |
817 | (skb->mac.raw[4] << 8) | | 819 | (mac[4] << 8) | |
818 | (skb->mac.raw[5] << 0)); | 820 | (mac[5] << 0)); |
819 | 821 | ||
820 | pSimple = (SGESimple64_t *) &pTrans->TransactionDetails[2]; | 822 | pSimple = (SGESimple64_t *) &pTrans->TransactionDetails[2]; |
821 | 823 | ||
@@ -930,7 +932,7 @@ mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg) | |||
930 | pci_dma_sync_single_for_cpu(mpt_dev->pcidev, priv->RcvCtl[ctx].dma, | 932 | pci_dma_sync_single_for_cpu(mpt_dev->pcidev, priv->RcvCtl[ctx].dma, |
931 | priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE); | 933 | priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE); |
932 | 934 | ||
933 | memcpy(skb_put(skb, len), old_skb->data, len); | 935 | skb_copy_from_linear_data(old_skb, skb_put(skb, len), len); |
934 | 936 | ||
935 | pci_dma_sync_single_for_device(mpt_dev->pcidev, priv->RcvCtl[ctx].dma, | 937 | pci_dma_sync_single_for_device(mpt_dev->pcidev, priv->RcvCtl[ctx].dma, |
936 | priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE); | 938 | priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE); |
@@ -1091,7 +1093,7 @@ mpt_lan_receive_post_reply(struct net_device *dev, | |||
1091 | priv->RcvCtl[ctx].dma, | 1093 | priv->RcvCtl[ctx].dma, |
1092 | priv->RcvCtl[ctx].len, | 1094 | priv->RcvCtl[ctx].len, |
1093 | PCI_DMA_FROMDEVICE); | 1095 | PCI_DMA_FROMDEVICE); |
1094 | memcpy(skb_put(skb, l), old_skb->data, l); | 1096 | skb_copy_from_linear_data(old_skb, skb_put(skb, l), l); |
1095 | 1097 | ||
1096 | pci_dma_sync_single_for_device(mpt_dev->pcidev, | 1098 | pci_dma_sync_single_for_device(mpt_dev->pcidev, |
1097 | priv->RcvCtl[ctx].dma, | 1099 | priv->RcvCtl[ctx].dma, |
@@ -1120,7 +1122,7 @@ mpt_lan_receive_post_reply(struct net_device *dev, | |||
1120 | priv->RcvCtl[ctx].len, | 1122 | priv->RcvCtl[ctx].len, |
1121 | PCI_DMA_FROMDEVICE); | 1123 | PCI_DMA_FROMDEVICE); |
1122 | 1124 | ||
1123 | memcpy(skb_put(skb, len), old_skb->data, len); | 1125 | skb_copy_from_linear_data(old_skb, skb_put(skb, len), len); |
1124 | 1126 | ||
1125 | pci_dma_sync_single_for_device(mpt_dev->pcidev, | 1127 | pci_dma_sync_single_for_device(mpt_dev->pcidev, |
1126 | priv->RcvCtl[ctx].dma, | 1128 | priv->RcvCtl[ctx].dma, |
@@ -1549,7 +1551,7 @@ mpt_lan_type_trans(struct sk_buff *skb, struct net_device *dev) | |||
1549 | struct mpt_lan_ohdr *fch = (struct mpt_lan_ohdr *)skb->data; | 1551 | struct mpt_lan_ohdr *fch = (struct mpt_lan_ohdr *)skb->data; |
1550 | struct fcllc *fcllc; | 1552 | struct fcllc *fcllc; |
1551 | 1553 | ||
1552 | skb->mac.raw = skb->data; | 1554 | skb_reset_mac_header(skb); |
1553 | skb_pull(skb, sizeof(struct mpt_lan_ohdr)); | 1555 | skb_pull(skb, sizeof(struct mpt_lan_ohdr)); |
1554 | 1556 | ||
1555 | if (fch->dtype == htons(0xffff)) { | 1557 | if (fch->dtype == htons(0xffff)) { |
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig index fedf9b7eae5d..c1b47db29bd2 100644 --- a/drivers/mtd/Kconfig +++ b/drivers/mtd/Kconfig | |||
@@ -287,4 +287,6 @@ source "drivers/mtd/nand/Kconfig" | |||
287 | 287 | ||
288 | source "drivers/mtd/onenand/Kconfig" | 288 | source "drivers/mtd/onenand/Kconfig" |
289 | 289 | ||
290 | source "drivers/mtd/ubi/Kconfig" | ||
291 | |||
290 | endif # MTD | 292 | endif # MTD |
diff --git a/drivers/mtd/Makefile b/drivers/mtd/Makefile index c130e6261adf..92055405cb30 100644 --- a/drivers/mtd/Makefile +++ b/drivers/mtd/Makefile | |||
@@ -28,3 +28,5 @@ nftl-objs := nftlcore.o nftlmount.o | |||
28 | inftl-objs := inftlcore.o inftlmount.o | 28 | inftl-objs := inftlcore.o inftlmount.o |
29 | 29 | ||
30 | obj-y += chips/ maps/ devices/ nand/ onenand/ | 30 | obj-y += chips/ maps/ devices/ nand/ onenand/ |
31 | |||
32 | obj-$(CONFIG_MTD_UBI) += ubi/ | ||
diff --git a/drivers/mtd/maps/sun_uflash.c b/drivers/mtd/maps/sun_uflash.c index 4db2055cee31..001af7f7ddda 100644 --- a/drivers/mtd/maps/sun_uflash.c +++ b/drivers/mtd/maps/sun_uflash.c | |||
@@ -39,7 +39,7 @@ MODULE_VERSION("2.0"); | |||
39 | 39 | ||
40 | static LIST_HEAD(device_list); | 40 | static LIST_HEAD(device_list); |
41 | struct uflash_dev { | 41 | struct uflash_dev { |
42 | char *name; /* device name */ | 42 | const char *name; /* device name */ |
43 | struct map_info map; /* mtd map info */ | 43 | struct map_info map; /* mtd map info */ |
44 | struct mtd_info *mtd; /* mtd info */ | 44 | struct mtd_info *mtd; /* mtd info */ |
45 | }; | 45 | }; |
@@ -80,7 +80,7 @@ int uflash_devinit(struct linux_ebus_device *edev, struct device_node *dp) | |||
80 | 80 | ||
81 | up->name = of_get_property(dp, "model", NULL); | 81 | up->name = of_get_property(dp, "model", NULL); |
82 | if (up->name && 0 < strlen(up->name)) | 82 | if (up->name && 0 < strlen(up->name)) |
83 | up->map.name = up->name; | 83 | up->map.name = (char *)up->name; |
84 | 84 | ||
85 | up->map.phys = res->start; | 85 | up->map.phys = res->start; |
86 | 86 | ||
diff --git a/drivers/mtd/ubi/Kconfig b/drivers/mtd/ubi/Kconfig new file mode 100644 index 000000000000..b9daf159a4a7 --- /dev/null +++ b/drivers/mtd/ubi/Kconfig | |||
@@ -0,0 +1,58 @@ | |||
1 | # drivers/mtd/ubi/Kconfig | ||
2 | |||
3 | menu "UBI - Unsorted block images" | ||
4 | depends on MTD | ||
5 | |||
6 | config MTD_UBI | ||
7 | tristate "Enable UBI" | ||
8 | depends on MTD | ||
9 | select CRC32 | ||
10 | help | ||
11 | UBI is a software layer above MTD layer which admits of LVM-like | ||
12 | logical volumes on top of MTD devices, hides some complexities of | ||
13 | flash chips like wear and bad blocks and provides some other useful | ||
14 | capabilities. Please, consult the MTD web site for more details | ||
15 | (www.linux-mtd.infradead.org). | ||
16 | |||
17 | config MTD_UBI_WL_THRESHOLD | ||
18 | int "UBI wear-leveling threshold" | ||
19 | default 4096 | ||
20 | range 2 65536 | ||
21 | depends on MTD_UBI | ||
22 | help | ||
23 | This parameter defines the maximum difference between the highest | ||
24 | erase counter value and the lowest erase counter value of eraseblocks | ||
25 | of UBI devices. When this threshold is exceeded, UBI starts performing | ||
26 | wear leveling by means of moving data from eraseblock with low erase | ||
27 | counter to eraseblocks with high erase counter. Leave the default | ||
28 | value if unsure. | ||
29 | |||
30 | config MTD_UBI_BEB_RESERVE | ||
31 | int "Percentage of reserved eraseblocks for bad eraseblocks handling" | ||
32 | default 1 | ||
33 | range 0 25 | ||
34 | depends on MTD_UBI | ||
35 | help | ||
36 | If the MTD device admits of bad eraseblocks (e.g. NAND flash), UBI | ||
37 | reserves some amount of physical eraseblocks to handle new bad | ||
38 | eraseblocks. For example, if a flash physical eraseblock becomes bad, | ||
39 | UBI uses these reserved physical eraseblocks to relocate the bad one. | ||
40 | This option specifies how many physical eraseblocks will be reserved | ||
41 | for bad eraseblock handling (percents of total number of good flash | ||
42 | eraseblocks). If the underlying flash does not admit of bad | ||
43 | eraseblocks (e.g. NOR flash), this value is ignored and nothing is | ||
44 | reserved. Leave the default value if unsure. | ||
45 | |||
46 | config MTD_UBI_GLUEBI | ||
47 | bool "Emulate MTD devices" | ||
48 | default n | ||
49 | depends on MTD_UBI | ||
50 | help | ||
51 | This option enables MTD devices emulation on top of UBI volumes: for | ||
52 | each UBI volumes an MTD device is created, and all I/O to this MTD | ||
53 | device is redirected to the UBI volume. This is handy to make | ||
54 | MTD-oriented software (like JFFS2) work on top of UBI. Do not enable | ||
55 | this if no legacy software will be used. | ||
56 | |||
57 | source "drivers/mtd/ubi/Kconfig.debug" | ||
58 | endmenu | ||
diff --git a/drivers/mtd/ubi/Kconfig.debug b/drivers/mtd/ubi/Kconfig.debug new file mode 100644 index 000000000000..1e2ee22edeff --- /dev/null +++ b/drivers/mtd/ubi/Kconfig.debug | |||
@@ -0,0 +1,104 @@ | |||
1 | comment "UBI debugging options" | ||
2 | depends on MTD_UBI | ||
3 | |||
4 | config MTD_UBI_DEBUG | ||
5 | bool "UBI debugging" | ||
6 | depends on SYSFS | ||
7 | depends on MTD_UBI | ||
8 | select DEBUG_FS | ||
9 | select KALLSYMS_ALL | ||
10 | help | ||
11 | This option enables UBI debugging. | ||
12 | |||
13 | config MTD_UBI_DEBUG_MSG | ||
14 | bool "UBI debugging messages" | ||
15 | depends on MTD_UBI_DEBUG | ||
16 | default n | ||
17 | help | ||
18 | This option enables UBI debugging messages. | ||
19 | |||
20 | config MTD_UBI_DEBUG_PARANOID | ||
21 | bool "Extra self-checks" | ||
22 | default n | ||
23 | depends on MTD_UBI_DEBUG | ||
24 | help | ||
25 | This option enables extra checks in UBI code. Note this slows UBI down | ||
26 | significantly. | ||
27 | |||
28 | config MTD_UBI_DEBUG_DISABLE_BGT | ||
29 | bool "Do not enable the UBI background thread" | ||
30 | depends on MTD_UBI_DEBUG | ||
31 | default n | ||
32 | help | ||
33 | This option switches the background thread off by default. The thread | ||
34 | may be also be enabled/disabled via UBI sysfs. | ||
35 | |||
36 | config MTD_UBI_DEBUG_USERSPACE_IO | ||
37 | bool "Direct user-space write/erase support" | ||
38 | default n | ||
39 | depends on MTD_UBI_DEBUG | ||
40 | help | ||
41 | By default, users cannot directly write and erase individual | ||
42 | eraseblocks of dynamic volumes, and have to use update operation | ||
43 | instead. This option enables this capability - it is very useful for | ||
44 | debugging and testing. | ||
45 | |||
46 | config MTD_UBI_DEBUG_EMULATE_BITFLIPS | ||
47 | bool "Emulate flash bit-flips" | ||
48 | depends on MTD_UBI_DEBUG | ||
49 | default n | ||
50 | help | ||
51 | This option emulates bit-flips with probability 1/50, which in turn | ||
52 | causes scrubbing. Useful for debugging and stressing UBI. | ||
53 | |||
54 | config MTD_UBI_DEBUG_EMULATE_WRITE_FAILURES | ||
55 | bool "Emulate flash write failures" | ||
56 | depends on MTD_UBI_DEBUG | ||
57 | default n | ||
58 | help | ||
59 | This option emulates write failures with probability 1/100. Useful for | ||
60 | debugging and testing how UBI handlines errors. | ||
61 | |||
62 | config MTD_UBI_DEBUG_EMULATE_ERASE_FAILURES | ||
63 | bool "Emulate flash erase failures" | ||
64 | depends on MTD_UBI_DEBUG | ||
65 | default n | ||
66 | help | ||
67 | This option emulates erase failures with probability 1/100. Useful for | ||
68 | debugging and testing how UBI handlines errors. | ||
69 | |||
70 | menu "Additional UBI debugging messages" | ||
71 | depends on MTD_UBI_DEBUG | ||
72 | |||
73 | config MTD_UBI_DEBUG_MSG_BLD | ||
74 | bool "Additional UBI initialization and build messages" | ||
75 | default n | ||
76 | depends on MTD_UBI_DEBUG | ||
77 | help | ||
78 | This option enables detailed UBI initialization and device build | ||
79 | debugging messages. | ||
80 | |||
81 | config MTD_UBI_DEBUG_MSG_EBA | ||
82 | bool "Eraseblock association unit messages" | ||
83 | default n | ||
84 | depends on MTD_UBI_DEBUG | ||
85 | help | ||
86 | This option enables debugging messages from the UBI eraseblock | ||
87 | association unit. | ||
88 | |||
89 | config MTD_UBI_DEBUG_MSG_WL | ||
90 | bool "Wear-leveling unit messages" | ||
91 | default n | ||
92 | depends on MTD_UBI_DEBUG | ||
93 | help | ||
94 | This option enables debugging messages from the UBI wear-leveling | ||
95 | unit. | ||
96 | |||
97 | config MTD_UBI_DEBUG_MSG_IO | ||
98 | bool "Input/output unit messages" | ||
99 | default n | ||
100 | depends on MTD_UBI_DEBUG | ||
101 | help | ||
102 | This option enables debugging messages from the UBI input/output unit. | ||
103 | |||
104 | endmenu # UBI debugging messages | ||
diff --git a/drivers/mtd/ubi/Makefile b/drivers/mtd/ubi/Makefile new file mode 100644 index 000000000000..dd834e04151b --- /dev/null +++ b/drivers/mtd/ubi/Makefile | |||
@@ -0,0 +1,7 @@ | |||
1 | obj-$(CONFIG_MTD_UBI) += ubi.o | ||
2 | |||
3 | ubi-y += vtbl.o vmt.o upd.o build.o cdev.o kapi.o eba.o io.o wl.o scan.o | ||
4 | ubi-y += misc.o | ||
5 | |||
6 | ubi-$(CONFIG_MTD_UBI_DEBUG) += debug.o | ||
7 | ubi-$(CONFIG_MTD_UBI_GLUEBI) += gluebi.o | ||
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c new file mode 100644 index 000000000000..555d594d1811 --- /dev/null +++ b/drivers/mtd/ubi/build.c | |||
@@ -0,0 +1,848 @@ | |||
1 | /* | ||
2 | * Copyright (c) International Business Machines Corp., 2006 | ||
3 | * Copyright (c) Nokia Corporation, 2007 | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation; either version 2 of the License, or | ||
8 | * (at your option) any later version. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See | ||
13 | * the GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program; if not, write to the Free Software | ||
17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
18 | * | ||
19 | * Author: Artem Bityutskiy (Битюцкий Артём), | ||
20 | * Frank Haverkamp | ||
21 | */ | ||
22 | |||
23 | /* | ||
24 | * This file includes UBI initialization and building of UBI devices. At the | ||
25 | * moment UBI devices may only be added while UBI is initialized, but dynamic | ||
26 | * device add/remove functionality is planned. Also, at the moment we only | ||
27 | * attach UBI devices by scanning, which will become a bottleneck when flashes | ||
28 | * reach certain large size. Then one may improve UBI and add other methods. | ||
29 | */ | ||
30 | |||
31 | #include <linux/err.h> | ||
32 | #include <linux/module.h> | ||
33 | #include <linux/moduleparam.h> | ||
34 | #include <linux/stringify.h> | ||
35 | #include <linux/stat.h> | ||
36 | #include "ubi.h" | ||
37 | |||
38 | /* Maximum length of the 'mtd=' parameter */ | ||
39 | #define MTD_PARAM_LEN_MAX 64 | ||
40 | |||
41 | /** | ||
42 | * struct mtd_dev_param - MTD device parameter description data structure. | ||
43 | * @name: MTD device name or number string | ||
44 | * @vid_hdr_offs: VID header offset | ||
45 | * @data_offs: data offset | ||
46 | */ | ||
47 | struct mtd_dev_param | ||
48 | { | ||
49 | char name[MTD_PARAM_LEN_MAX]; | ||
50 | int vid_hdr_offs; | ||
51 | int data_offs; | ||
52 | }; | ||
53 | |||
54 | /* Numbers of elements set in the @mtd_dev_param array */ | ||
55 | static int mtd_devs = 0; | ||
56 | |||
57 | /* MTD devices specification parameters */ | ||
58 | static struct mtd_dev_param mtd_dev_param[UBI_MAX_DEVICES]; | ||
59 | |||
60 | /* Number of UBI devices in system */ | ||
61 | int ubi_devices_cnt; | ||
62 | |||
63 | /* All UBI devices in system */ | ||
64 | struct ubi_device *ubi_devices[UBI_MAX_DEVICES]; | ||
65 | |||
66 | /* Root UBI "class" object (corresponds to '/<sysfs>/class/ubi/') */ | ||
67 | struct class *ubi_class; | ||
68 | |||
69 | /* "Show" method for files in '/<sysfs>/class/ubi/' */ | ||
70 | static ssize_t ubi_version_show(struct class *class, char *buf) | ||
71 | { | ||
72 | return sprintf(buf, "%d\n", UBI_VERSION); | ||
73 | } | ||
74 | |||
75 | /* UBI version attribute ('/<sysfs>/class/ubi/version') */ | ||
76 | static struct class_attribute ubi_version = | ||
77 | __ATTR(version, S_IRUGO, ubi_version_show, NULL); | ||
78 | |||
79 | static ssize_t dev_attribute_show(struct device *dev, | ||
80 | struct device_attribute *attr, char *buf); | ||
81 | |||
82 | /* UBI device attributes (correspond to files in '/<sysfs>/class/ubi/ubiX') */ | ||
83 | static struct device_attribute dev_eraseblock_size = | ||
84 | __ATTR(eraseblock_size, S_IRUGO, dev_attribute_show, NULL); | ||
85 | static struct device_attribute dev_avail_eraseblocks = | ||
86 | __ATTR(avail_eraseblocks, S_IRUGO, dev_attribute_show, NULL); | ||
87 | static struct device_attribute dev_total_eraseblocks = | ||
88 | __ATTR(total_eraseblocks, S_IRUGO, dev_attribute_show, NULL); | ||
89 | static struct device_attribute dev_volumes_count = | ||
90 | __ATTR(volumes_count, S_IRUGO, dev_attribute_show, NULL); | ||
91 | static struct device_attribute dev_max_ec = | ||
92 | __ATTR(max_ec, S_IRUGO, dev_attribute_show, NULL); | ||
93 | static struct device_attribute dev_reserved_for_bad = | ||
94 | __ATTR(reserved_for_bad, S_IRUGO, dev_attribute_show, NULL); | ||
95 | static struct device_attribute dev_bad_peb_count = | ||
96 | __ATTR(bad_peb_count, S_IRUGO, dev_attribute_show, NULL); | ||
97 | static struct device_attribute dev_max_vol_count = | ||
98 | __ATTR(max_vol_count, S_IRUGO, dev_attribute_show, NULL); | ||
99 | static struct device_attribute dev_min_io_size = | ||
100 | __ATTR(min_io_size, S_IRUGO, dev_attribute_show, NULL); | ||
101 | static struct device_attribute dev_bgt_enabled = | ||
102 | __ATTR(bgt_enabled, S_IRUGO, dev_attribute_show, NULL); | ||
103 | |||
104 | /* "Show" method for files in '/<sysfs>/class/ubi/ubiX/' */ | ||
105 | static ssize_t dev_attribute_show(struct device *dev, | ||
106 | struct device_attribute *attr, char *buf) | ||
107 | { | ||
108 | const struct ubi_device *ubi; | ||
109 | |||
110 | ubi = container_of(dev, struct ubi_device, dev); | ||
111 | if (attr == &dev_eraseblock_size) | ||
112 | return sprintf(buf, "%d\n", ubi->leb_size); | ||
113 | else if (attr == &dev_avail_eraseblocks) | ||
114 | return sprintf(buf, "%d\n", ubi->avail_pebs); | ||
115 | else if (attr == &dev_total_eraseblocks) | ||
116 | return sprintf(buf, "%d\n", ubi->good_peb_count); | ||
117 | else if (attr == &dev_volumes_count) | ||
118 | return sprintf(buf, "%d\n", ubi->vol_count); | ||
119 | else if (attr == &dev_max_ec) | ||
120 | return sprintf(buf, "%d\n", ubi->max_ec); | ||
121 | else if (attr == &dev_reserved_for_bad) | ||
122 | return sprintf(buf, "%d\n", ubi->beb_rsvd_pebs); | ||
123 | else if (attr == &dev_bad_peb_count) | ||
124 | return sprintf(buf, "%d\n", ubi->bad_peb_count); | ||
125 | else if (attr == &dev_max_vol_count) | ||
126 | return sprintf(buf, "%d\n", ubi->vtbl_slots); | ||
127 | else if (attr == &dev_min_io_size) | ||
128 | return sprintf(buf, "%d\n", ubi->min_io_size); | ||
129 | else if (attr == &dev_bgt_enabled) | ||
130 | return sprintf(buf, "%d\n", ubi->thread_enabled); | ||
131 | else | ||
132 | BUG(); | ||
133 | |||
134 | return 0; | ||
135 | } | ||
136 | |||
137 | /* Fake "release" method for UBI devices */ | ||
138 | static void dev_release(struct device *dev) { } | ||
139 | |||
140 | /** | ||
141 | * ubi_sysfs_init - initialize sysfs for an UBI device. | ||
142 | * @ubi: UBI device description object | ||
143 | * | ||
144 | * This function returns zero in case of success and a negative error code in | ||
145 | * case of failure. | ||
146 | */ | ||
147 | static int ubi_sysfs_init(struct ubi_device *ubi) | ||
148 | { | ||
149 | int err; | ||
150 | |||
151 | ubi->dev.release = dev_release; | ||
152 | ubi->dev.devt = MKDEV(ubi->major, 0); | ||
153 | ubi->dev.class = ubi_class; | ||
154 | sprintf(&ubi->dev.bus_id[0], UBI_NAME_STR"%d", ubi->ubi_num); | ||
155 | err = device_register(&ubi->dev); | ||
156 | if (err) | ||
157 | goto out; | ||
158 | |||
159 | err = device_create_file(&ubi->dev, &dev_eraseblock_size); | ||
160 | if (err) | ||
161 | goto out_unregister; | ||
162 | err = device_create_file(&ubi->dev, &dev_avail_eraseblocks); | ||
163 | if (err) | ||
164 | goto out_eraseblock_size; | ||
165 | err = device_create_file(&ubi->dev, &dev_total_eraseblocks); | ||
166 | if (err) | ||
167 | goto out_avail_eraseblocks; | ||
168 | err = device_create_file(&ubi->dev, &dev_volumes_count); | ||
169 | if (err) | ||
170 | goto out_total_eraseblocks; | ||
171 | err = device_create_file(&ubi->dev, &dev_max_ec); | ||
172 | if (err) | ||
173 | goto out_volumes_count; | ||
174 | err = device_create_file(&ubi->dev, &dev_reserved_for_bad); | ||
175 | if (err) | ||
176 | goto out_volumes_max_ec; | ||
177 | err = device_create_file(&ubi->dev, &dev_bad_peb_count); | ||
178 | if (err) | ||
179 | goto out_reserved_for_bad; | ||
180 | err = device_create_file(&ubi->dev, &dev_max_vol_count); | ||
181 | if (err) | ||
182 | goto out_bad_peb_count; | ||
183 | err = device_create_file(&ubi->dev, &dev_min_io_size); | ||
184 | if (err) | ||
185 | goto out_max_vol_count; | ||
186 | err = device_create_file(&ubi->dev, &dev_bgt_enabled); | ||
187 | if (err) | ||
188 | goto out_min_io_size; | ||
189 | |||
190 | return 0; | ||
191 | |||
192 | out_min_io_size: | ||
193 | device_remove_file(&ubi->dev, &dev_min_io_size); | ||
194 | out_max_vol_count: | ||
195 | device_remove_file(&ubi->dev, &dev_max_vol_count); | ||
196 | out_bad_peb_count: | ||
197 | device_remove_file(&ubi->dev, &dev_bad_peb_count); | ||
198 | out_reserved_for_bad: | ||
199 | device_remove_file(&ubi->dev, &dev_reserved_for_bad); | ||
200 | out_volumes_max_ec: | ||
201 | device_remove_file(&ubi->dev, &dev_max_ec); | ||
202 | out_volumes_count: | ||
203 | device_remove_file(&ubi->dev, &dev_volumes_count); | ||
204 | out_total_eraseblocks: | ||
205 | device_remove_file(&ubi->dev, &dev_total_eraseblocks); | ||
206 | out_avail_eraseblocks: | ||
207 | device_remove_file(&ubi->dev, &dev_avail_eraseblocks); | ||
208 | out_eraseblock_size: | ||
209 | device_remove_file(&ubi->dev, &dev_eraseblock_size); | ||
210 | out_unregister: | ||
211 | device_unregister(&ubi->dev); | ||
212 | out: | ||
213 | ubi_err("failed to initialize sysfs for %s", ubi->ubi_name); | ||
214 | return err; | ||
215 | } | ||
216 | |||
217 | /** | ||
218 | * ubi_sysfs_close - close sysfs for an UBI device. | ||
219 | * @ubi: UBI device description object | ||
220 | */ | ||
221 | static void ubi_sysfs_close(struct ubi_device *ubi) | ||
222 | { | ||
223 | device_remove_file(&ubi->dev, &dev_bgt_enabled); | ||
224 | device_remove_file(&ubi->dev, &dev_min_io_size); | ||
225 | device_remove_file(&ubi->dev, &dev_max_vol_count); | ||
226 | device_remove_file(&ubi->dev, &dev_bad_peb_count); | ||
227 | device_remove_file(&ubi->dev, &dev_reserved_for_bad); | ||
228 | device_remove_file(&ubi->dev, &dev_max_ec); | ||
229 | device_remove_file(&ubi->dev, &dev_volumes_count); | ||
230 | device_remove_file(&ubi->dev, &dev_total_eraseblocks); | ||
231 | device_remove_file(&ubi->dev, &dev_avail_eraseblocks); | ||
232 | device_remove_file(&ubi->dev, &dev_eraseblock_size); | ||
233 | device_unregister(&ubi->dev); | ||
234 | } | ||
235 | |||
236 | /** | ||
237 | * kill_volumes - destroy all volumes. | ||
238 | * @ubi: UBI device description object | ||
239 | */ | ||
240 | static void kill_volumes(struct ubi_device *ubi) | ||
241 | { | ||
242 | int i; | ||
243 | |||
244 | for (i = 0; i < ubi->vtbl_slots; i++) | ||
245 | if (ubi->volumes[i]) | ||
246 | ubi_free_volume(ubi, i); | ||
247 | } | ||
248 | |||
249 | /** | ||
250 | * uif_init - initialize user interfaces for an UBI device. | ||
251 | * @ubi: UBI device description object | ||
252 | * | ||
253 | * This function returns zero in case of success and a negative error code in | ||
254 | * case of failure. | ||
255 | */ | ||
256 | static int uif_init(struct ubi_device *ubi) | ||
257 | { | ||
258 | int i, err; | ||
259 | dev_t dev; | ||
260 | |||
261 | mutex_init(&ubi->vtbl_mutex); | ||
262 | spin_lock_init(&ubi->volumes_lock); | ||
263 | |||
264 | sprintf(ubi->ubi_name, UBI_NAME_STR "%d", ubi->ubi_num); | ||
265 | |||
266 | /* | ||
267 | * Major numbers for the UBI character devices are allocated | ||
268 | * dynamically. Major numbers of volume character devices are | ||
269 | * equivalent to ones of the corresponding UBI character device. Minor | ||
270 | * numbers of UBI character devices are 0, while minor numbers of | ||
271 | * volume character devices start from 1. Thus, we allocate one major | ||
272 | * number and ubi->vtbl_slots + 1 minor numbers. | ||
273 | */ | ||
274 | err = alloc_chrdev_region(&dev, 0, ubi->vtbl_slots + 1, ubi->ubi_name); | ||
275 | if (err) { | ||
276 | ubi_err("cannot register UBI character devices"); | ||
277 | return err; | ||
278 | } | ||
279 | |||
280 | cdev_init(&ubi->cdev, &ubi_cdev_operations); | ||
281 | ubi->major = MAJOR(dev); | ||
282 | dbg_msg("%s major is %u", ubi->ubi_name, ubi->major); | ||
283 | ubi->cdev.owner = THIS_MODULE; | ||
284 | |||
285 | dev = MKDEV(ubi->major, 0); | ||
286 | err = cdev_add(&ubi->cdev, dev, 1); | ||
287 | if (err) { | ||
288 | ubi_err("cannot add character device %s", ubi->ubi_name); | ||
289 | goto out_unreg; | ||
290 | } | ||
291 | |||
292 | err = ubi_sysfs_init(ubi); | ||
293 | if (err) | ||
294 | goto out_cdev; | ||
295 | |||
296 | for (i = 0; i < ubi->vtbl_slots; i++) | ||
297 | if (ubi->volumes[i]) { | ||
298 | err = ubi_add_volume(ubi, i); | ||
299 | if (err) | ||
300 | goto out_volumes; | ||
301 | } | ||
302 | |||
303 | return 0; | ||
304 | |||
305 | out_volumes: | ||
306 | kill_volumes(ubi); | ||
307 | ubi_sysfs_close(ubi); | ||
308 | out_cdev: | ||
309 | cdev_del(&ubi->cdev); | ||
310 | out_unreg: | ||
311 | unregister_chrdev_region(MKDEV(ubi->major, 0), | ||
312 | ubi->vtbl_slots + 1); | ||
313 | return err; | ||
314 | } | ||
315 | |||
316 | /** | ||
317 | * uif_close - close user interfaces for an UBI device. | ||
318 | * @ubi: UBI device description object | ||
319 | */ | ||
320 | static void uif_close(struct ubi_device *ubi) | ||
321 | { | ||
322 | kill_volumes(ubi); | ||
323 | ubi_sysfs_close(ubi); | ||
324 | cdev_del(&ubi->cdev); | ||
325 | unregister_chrdev_region(MKDEV(ubi->major, 0), ubi->vtbl_slots + 1); | ||
326 | } | ||
327 | |||
328 | /** | ||
329 | * attach_by_scanning - attach an MTD device using scanning method. | ||
330 | * @ubi: UBI device descriptor | ||
331 | * | ||
332 | * This function returns zero in case of success and a negative error code in | ||
333 | * case of failure. | ||
334 | * | ||
335 | * Note, currently this is the only method to attach UBI devices. Hopefully in | ||
336 | * the future we'll have more scalable attaching methods and avoid full media | ||
337 | * scanning. But even in this case scanning will be needed as a fall-back | ||
338 | * attaching method if there are some on-flash table corruptions. | ||
339 | */ | ||
340 | static int attach_by_scanning(struct ubi_device *ubi) | ||
341 | { | ||
342 | int err; | ||
343 | struct ubi_scan_info *si; | ||
344 | |||
345 | si = ubi_scan(ubi); | ||
346 | if (IS_ERR(si)) | ||
347 | return PTR_ERR(si); | ||
348 | |||
349 | ubi->bad_peb_count = si->bad_peb_count; | ||
350 | ubi->good_peb_count = ubi->peb_count - ubi->bad_peb_count; | ||
351 | ubi->max_ec = si->max_ec; | ||
352 | ubi->mean_ec = si->mean_ec; | ||
353 | |||
354 | err = ubi_read_volume_table(ubi, si); | ||
355 | if (err) | ||
356 | goto out_si; | ||
357 | |||
358 | err = ubi_wl_init_scan(ubi, si); | ||
359 | if (err) | ||
360 | goto out_vtbl; | ||
361 | |||
362 | err = ubi_eba_init_scan(ubi, si); | ||
363 | if (err) | ||
364 | goto out_wl; | ||
365 | |||
366 | ubi_scan_destroy_si(si); | ||
367 | return 0; | ||
368 | |||
369 | out_wl: | ||
370 | ubi_wl_close(ubi); | ||
371 | out_vtbl: | ||
372 | kfree(ubi->vtbl); | ||
373 | out_si: | ||
374 | ubi_scan_destroy_si(si); | ||
375 | return err; | ||
376 | } | ||
377 | |||
378 | /** | ||
379 | * io_init - initialize I/O unit for a given UBI device. | ||
380 | * @ubi: UBI device description object | ||
381 | * | ||
382 | * If @ubi->vid_hdr_offset or @ubi->leb_start is zero, default offsets are | ||
383 | * assumed: | ||
384 | * o EC header is always at offset zero - this cannot be changed; | ||
385 | * o VID header starts just after the EC header at the closest address | ||
386 | * aligned to @io->@hdrs_min_io_size; | ||
387 | * o data starts just after the VID header at the closest address aligned to | ||
388 | * @io->@min_io_size | ||
389 | * | ||
390 | * This function returns zero in case of success and a negative error code in | ||
391 | * case of failure. | ||
392 | */ | ||
393 | static int io_init(struct ubi_device *ubi) | ||
394 | { | ||
395 | if (ubi->mtd->numeraseregions != 0) { | ||
396 | /* | ||
397 | * Some flashes have several erase regions. Different regions | ||
398 | * may have different eraseblock size and other | ||
399 | * characteristics. It looks like mostly multi-region flashes | ||
400 | * have one "main" region and one or more small regions to | ||
401 | * store boot loader code or boot parameters or whatever. I | ||
402 | * guess we should just pick the largest region. But this is | ||
403 | * not implemented. | ||
404 | */ | ||
405 | ubi_err("multiple regions, not implemented"); | ||
406 | return -EINVAL; | ||
407 | } | ||
408 | |||
409 | /* | ||
410 | * Note, in this implementation we support MTD devices with 0x7FFFFFFF | ||
411 | * physical eraseblocks maximum. | ||
412 | */ | ||
413 | |||
414 | ubi->peb_size = ubi->mtd->erasesize; | ||
415 | ubi->peb_count = ubi->mtd->size / ubi->mtd->erasesize; | ||
416 | ubi->flash_size = ubi->mtd->size; | ||
417 | |||
418 | if (ubi->mtd->block_isbad && ubi->mtd->block_markbad) | ||
419 | ubi->bad_allowed = 1; | ||
420 | |||
421 | ubi->min_io_size = ubi->mtd->writesize; | ||
422 | ubi->hdrs_min_io_size = ubi->mtd->writesize >> ubi->mtd->subpage_sft; | ||
423 | |||
424 | /* Make sure minimal I/O unit is power of 2 */ | ||
425 | if (ubi->min_io_size == 0 || | ||
426 | (ubi->min_io_size & (ubi->min_io_size - 1))) { | ||
427 | ubi_err("bad min. I/O unit"); | ||
428 | return -EINVAL; | ||
429 | } | ||
430 | |||
431 | ubi_assert(ubi->hdrs_min_io_size > 0); | ||
432 | ubi_assert(ubi->hdrs_min_io_size <= ubi->min_io_size); | ||
433 | ubi_assert(ubi->min_io_size % ubi->hdrs_min_io_size == 0); | ||
434 | |||
435 | /* Calculate default aligned sizes of EC and VID headers */ | ||
436 | ubi->ec_hdr_alsize = ALIGN(UBI_EC_HDR_SIZE, ubi->hdrs_min_io_size); | ||
437 | ubi->vid_hdr_alsize = ALIGN(UBI_VID_HDR_SIZE, ubi->hdrs_min_io_size); | ||
438 | |||
439 | dbg_msg("min_io_size %d", ubi->min_io_size); | ||
440 | dbg_msg("hdrs_min_io_size %d", ubi->hdrs_min_io_size); | ||
441 | dbg_msg("ec_hdr_alsize %d", ubi->ec_hdr_alsize); | ||
442 | dbg_msg("vid_hdr_alsize %d", ubi->vid_hdr_alsize); | ||
443 | |||
444 | if (ubi->vid_hdr_offset == 0) | ||
445 | /* Default offset */ | ||
446 | ubi->vid_hdr_offset = ubi->vid_hdr_aloffset = | ||
447 | ubi->ec_hdr_alsize; | ||
448 | else { | ||
449 | ubi->vid_hdr_aloffset = ubi->vid_hdr_offset & | ||
450 | ~(ubi->hdrs_min_io_size - 1); | ||
451 | ubi->vid_hdr_shift = ubi->vid_hdr_offset - | ||
452 | ubi->vid_hdr_aloffset; | ||
453 | } | ||
454 | |||
455 | /* Similar for the data offset */ | ||
456 | if (ubi->leb_start == 0) { | ||
457 | ubi->leb_start = ubi->vid_hdr_offset + ubi->vid_hdr_alsize; | ||
458 | ubi->leb_start = ALIGN(ubi->leb_start, ubi->min_io_size); | ||
459 | } | ||
460 | |||
461 | dbg_msg("vid_hdr_offset %d", ubi->vid_hdr_offset); | ||
462 | dbg_msg("vid_hdr_aloffset %d", ubi->vid_hdr_aloffset); | ||
463 | dbg_msg("vid_hdr_shift %d", ubi->vid_hdr_shift); | ||
464 | dbg_msg("leb_start %d", ubi->leb_start); | ||
465 | |||
466 | /* The shift must be aligned to 32-bit boundary */ | ||
467 | if (ubi->vid_hdr_shift % 4) { | ||
468 | ubi_err("unaligned VID header shift %d", | ||
469 | ubi->vid_hdr_shift); | ||
470 | return -EINVAL; | ||
471 | } | ||
472 | |||
473 | /* Check sanity */ | ||
474 | if (ubi->vid_hdr_offset < UBI_EC_HDR_SIZE || | ||
475 | ubi->leb_start < ubi->vid_hdr_offset + UBI_VID_HDR_SIZE || | ||
476 | ubi->leb_start > ubi->peb_size - UBI_VID_HDR_SIZE || | ||
477 | ubi->leb_start % ubi->min_io_size) { | ||
478 | ubi_err("bad VID header (%d) or data offsets (%d)", | ||
479 | ubi->vid_hdr_offset, ubi->leb_start); | ||
480 | return -EINVAL; | ||
481 | } | ||
482 | |||
483 | /* | ||
484 | * It may happen that EC and VID headers are situated in one minimal | ||
485 | * I/O unit. In this case we can only accept this UBI image in | ||
486 | * read-only mode. | ||
487 | */ | ||
488 | if (ubi->vid_hdr_offset + UBI_VID_HDR_SIZE <= ubi->hdrs_min_io_size) { | ||
489 | ubi_warn("EC and VID headers are in the same minimal I/O unit, " | ||
490 | "switch to read-only mode"); | ||
491 | ubi->ro_mode = 1; | ||
492 | } | ||
493 | |||
494 | ubi->leb_size = ubi->peb_size - ubi->leb_start; | ||
495 | |||
496 | if (!(ubi->mtd->flags & MTD_WRITEABLE)) { | ||
497 | ubi_msg("MTD device %d is write-protected, attach in " | ||
498 | "read-only mode", ubi->mtd->index); | ||
499 | ubi->ro_mode = 1; | ||
500 | } | ||
501 | |||
502 | dbg_msg("leb_size %d", ubi->leb_size); | ||
503 | dbg_msg("ro_mode %d", ubi->ro_mode); | ||
504 | |||
505 | /* | ||
506 | * Note, ideally, we have to initialize ubi->bad_peb_count here. But | ||
507 | * unfortunately, MTD does not provide this information. We should loop | ||
508 | * over all physical eraseblocks and invoke mtd->block_is_bad() for | ||
509 | * each physical eraseblock. So, we skip ubi->bad_peb_count | ||
510 | * uninitialized and initialize it after scanning. | ||
511 | */ | ||
512 | |||
513 | return 0; | ||
514 | } | ||
515 | |||
516 | /** | ||
517 | * attach_mtd_dev - attach an MTD device. | ||
518 | * @mtd_dev: MTD device name or number string | ||
519 | * @vid_hdr_offset: VID header offset | ||
520 | * @data_offset: data offset | ||
521 | * | ||
522 | * This function attaches an MTD device to UBI. It first treats @mtd_dev as the | ||
523 | * MTD device name, and tries to open it by this name. If it is unable to open, | ||
524 | * it tries to convert @mtd_dev to an integer and open the MTD device by its | ||
525 | * number. Returns zero in case of success and a negative error code in case of | ||
526 | * failure. | ||
527 | */ | ||
528 | static int attach_mtd_dev(const char *mtd_dev, int vid_hdr_offset, | ||
529 | int data_offset) | ||
530 | { | ||
531 | struct ubi_device *ubi; | ||
532 | struct mtd_info *mtd; | ||
533 | int i, err; | ||
534 | |||
535 | mtd = get_mtd_device_nm(mtd_dev); | ||
536 | if (IS_ERR(mtd)) { | ||
537 | int mtd_num; | ||
538 | char *endp; | ||
539 | |||
540 | if (PTR_ERR(mtd) != -ENODEV) | ||
541 | return PTR_ERR(mtd); | ||
542 | |||
543 | /* | ||
544 | * Probably this is not MTD device name but MTD device number - | ||
545 | * check this out. | ||
546 | */ | ||
547 | mtd_num = simple_strtoul(mtd_dev, &endp, 0); | ||
548 | if (*endp != '\0' || mtd_dev == endp) { | ||
549 | ubi_err("incorrect MTD device: \"%s\"", mtd_dev); | ||
550 | return -ENODEV; | ||
551 | } | ||
552 | |||
553 | mtd = get_mtd_device(NULL, mtd_num); | ||
554 | if (IS_ERR(mtd)) | ||
555 | return PTR_ERR(mtd); | ||
556 | } | ||
557 | |||
558 | /* Check if we already have the same MTD device attached */ | ||
559 | for (i = 0; i < ubi_devices_cnt; i++) | ||
560 | if (ubi_devices[i]->mtd->index == mtd->index) { | ||
561 | ubi_err("mtd%d is already attached to ubi%d", | ||
562 | mtd->index, i); | ||
563 | err = -EINVAL; | ||
564 | goto out_mtd; | ||
565 | } | ||
566 | |||
567 | ubi = ubi_devices[ubi_devices_cnt] = kzalloc(sizeof(struct ubi_device), | ||
568 | GFP_KERNEL); | ||
569 | if (!ubi) { | ||
570 | err = -ENOMEM; | ||
571 | goto out_mtd; | ||
572 | } | ||
573 | |||
574 | ubi->ubi_num = ubi_devices_cnt; | ||
575 | ubi->mtd = mtd; | ||
576 | |||
577 | dbg_msg("attaching mtd%d to ubi%d: VID header offset %d data offset %d", | ||
578 | ubi->mtd->index, ubi_devices_cnt, vid_hdr_offset, data_offset); | ||
579 | |||
580 | ubi->vid_hdr_offset = vid_hdr_offset; | ||
581 | ubi->leb_start = data_offset; | ||
582 | err = io_init(ubi); | ||
583 | if (err) | ||
584 | goto out_free; | ||
585 | |||
586 | err = attach_by_scanning(ubi); | ||
587 | if (err) { | ||
588 | dbg_err("failed to attach by scanning, error %d", err); | ||
589 | goto out_free; | ||
590 | } | ||
591 | |||
592 | err = uif_init(ubi); | ||
593 | if (err) | ||
594 | goto out_detach; | ||
595 | |||
596 | ubi_devices_cnt += 1; | ||
597 | |||
598 | ubi_msg("attached mtd%d to ubi%d", ubi->mtd->index, ubi_devices_cnt); | ||
599 | ubi_msg("MTD device name: \"%s\"", ubi->mtd->name); | ||
600 | ubi_msg("MTD device size: %llu MiB", ubi->flash_size >> 20); | ||
601 | ubi_msg("physical eraseblock size: %d bytes (%d KiB)", | ||
602 | ubi->peb_size, ubi->peb_size >> 10); | ||
603 | ubi_msg("logical eraseblock size: %d bytes", ubi->leb_size); | ||
604 | ubi_msg("number of good PEBs: %d", ubi->good_peb_count); | ||
605 | ubi_msg("number of bad PEBs: %d", ubi->bad_peb_count); | ||
606 | ubi_msg("smallest flash I/O unit: %d", ubi->min_io_size); | ||
607 | ubi_msg("VID header offset: %d (aligned %d)", | ||
608 | ubi->vid_hdr_offset, ubi->vid_hdr_aloffset); | ||
609 | ubi_msg("data offset: %d", ubi->leb_start); | ||
610 | ubi_msg("max. allowed volumes: %d", ubi->vtbl_slots); | ||
611 | ubi_msg("wear-leveling threshold: %d", CONFIG_MTD_UBI_WL_THRESHOLD); | ||
612 | ubi_msg("number of internal volumes: %d", UBI_INT_VOL_COUNT); | ||
613 | ubi_msg("number of user volumes: %d", | ||
614 | ubi->vol_count - UBI_INT_VOL_COUNT); | ||
615 | ubi_msg("available PEBs: %d", ubi->avail_pebs); | ||
616 | ubi_msg("total number of reserved PEBs: %d", ubi->rsvd_pebs); | ||
617 | ubi_msg("number of PEBs reserved for bad PEB handling: %d", | ||
618 | ubi->beb_rsvd_pebs); | ||
619 | ubi_msg("max/mean erase counter: %d/%d", ubi->max_ec, ubi->mean_ec); | ||
620 | |||
621 | /* Enable the background thread */ | ||
622 | if (!DBG_DISABLE_BGT) { | ||
623 | ubi->thread_enabled = 1; | ||
624 | wake_up_process(ubi->bgt_thread); | ||
625 | } | ||
626 | |||
627 | return 0; | ||
628 | |||
629 | out_detach: | ||
630 | ubi_eba_close(ubi); | ||
631 | ubi_wl_close(ubi); | ||
632 | kfree(ubi->vtbl); | ||
633 | out_free: | ||
634 | kfree(ubi); | ||
635 | out_mtd: | ||
636 | put_mtd_device(mtd); | ||
637 | ubi_devices[ubi_devices_cnt] = NULL; | ||
638 | return err; | ||
639 | } | ||
640 | |||
641 | /** | ||
642 | * detach_mtd_dev - detach an MTD device. | ||
643 | * @ubi: UBI device description object | ||
644 | */ | ||
645 | static void detach_mtd_dev(struct ubi_device *ubi) | ||
646 | { | ||
647 | int ubi_num = ubi->ubi_num, mtd_num = ubi->mtd->index; | ||
648 | |||
649 | dbg_msg("detaching mtd%d from ubi%d", ubi->mtd->index, ubi_num); | ||
650 | uif_close(ubi); | ||
651 | ubi_eba_close(ubi); | ||
652 | ubi_wl_close(ubi); | ||
653 | kfree(ubi->vtbl); | ||
654 | put_mtd_device(ubi->mtd); | ||
655 | kfree(ubi_devices[ubi_num]); | ||
656 | ubi_devices[ubi_num] = NULL; | ||
657 | ubi_devices_cnt -= 1; | ||
658 | ubi_assert(ubi_devices_cnt >= 0); | ||
659 | ubi_msg("mtd%d is detached from ubi%d", mtd_num, ubi_num); | ||
660 | } | ||
661 | |||
662 | static int __init ubi_init(void) | ||
663 | { | ||
664 | int err, i, k; | ||
665 | |||
666 | /* Ensure that EC and VID headers have correct size */ | ||
667 | BUILD_BUG_ON(sizeof(struct ubi_ec_hdr) != 64); | ||
668 | BUILD_BUG_ON(sizeof(struct ubi_vid_hdr) != 64); | ||
669 | |||
670 | if (mtd_devs > UBI_MAX_DEVICES) { | ||
671 | printk("UBI error: too many MTD devices, maximum is %d\n", | ||
672 | UBI_MAX_DEVICES); | ||
673 | return -EINVAL; | ||
674 | } | ||
675 | |||
676 | ubi_class = class_create(THIS_MODULE, UBI_NAME_STR); | ||
677 | if (IS_ERR(ubi_class)) | ||
678 | return PTR_ERR(ubi_class); | ||
679 | |||
680 | err = class_create_file(ubi_class, &ubi_version); | ||
681 | if (err) | ||
682 | goto out_class; | ||
683 | |||
684 | /* Attach MTD devices */ | ||
685 | for (i = 0; i < mtd_devs; i++) { | ||
686 | struct mtd_dev_param *p = &mtd_dev_param[i]; | ||
687 | |||
688 | cond_resched(); | ||
689 | |||
690 | if (!p->name) { | ||
691 | dbg_err("empty name"); | ||
692 | err = -EINVAL; | ||
693 | goto out_detach; | ||
694 | } | ||
695 | |||
696 | err = attach_mtd_dev(p->name, p->vid_hdr_offs, p->data_offs); | ||
697 | if (err) | ||
698 | goto out_detach; | ||
699 | } | ||
700 | |||
701 | return 0; | ||
702 | |||
703 | out_detach: | ||
704 | for (k = 0; k < i; k++) | ||
705 | detach_mtd_dev(ubi_devices[k]); | ||
706 | class_remove_file(ubi_class, &ubi_version); | ||
707 | out_class: | ||
708 | class_destroy(ubi_class); | ||
709 | return err; | ||
710 | } | ||
711 | module_init(ubi_init); | ||
712 | |||
713 | static void __exit ubi_exit(void) | ||
714 | { | ||
715 | int i, n = ubi_devices_cnt; | ||
716 | |||
717 | for (i = 0; i < n; i++) | ||
718 | detach_mtd_dev(ubi_devices[i]); | ||
719 | class_remove_file(ubi_class, &ubi_version); | ||
720 | class_destroy(ubi_class); | ||
721 | } | ||
722 | module_exit(ubi_exit); | ||
723 | |||
724 | /** | ||
725 | * bytes_str_to_int - convert a string representing number of bytes to an | ||
726 | * integer. | ||
727 | * @str: the string to convert | ||
728 | * | ||
729 | * This function returns positive resulting integer in case of success and a | ||
730 | * negative error code in case of failure. | ||
731 | */ | ||
732 | static int __init bytes_str_to_int(const char *str) | ||
733 | { | ||
734 | char *endp; | ||
735 | unsigned long result; | ||
736 | |||
737 | result = simple_strtoul(str, &endp, 0); | ||
738 | if (str == endp || result < 0) { | ||
739 | printk("UBI error: incorrect bytes count: \"%s\"\n", str); | ||
740 | return -EINVAL; | ||
741 | } | ||
742 | |||
743 | switch (*endp) { | ||
744 | case 'G': | ||
745 | result *= 1024; | ||
746 | case 'M': | ||
747 | result *= 1024; | ||
748 | case 'K': | ||
749 | case 'k': | ||
750 | result *= 1024; | ||
751 | if (endp[1] == 'i' && (endp[2] == '\0' || | ||
752 | endp[2] == 'B' || endp[2] == 'b')) | ||
753 | endp += 2; | ||
754 | case '\0': | ||
755 | break; | ||
756 | default: | ||
757 | printk("UBI error: incorrect bytes count: \"%s\"\n", str); | ||
758 | return -EINVAL; | ||
759 | } | ||
760 | |||
761 | return result; | ||
762 | } | ||
763 | |||
764 | /** | ||
765 | * ubi_mtd_param_parse - parse the 'mtd=' UBI parameter. | ||
766 | * @val: the parameter value to parse | ||
767 | * @kp: not used | ||
768 | * | ||
769 | * This function returns zero in case of success and a negative error code in | ||
770 | * case of error. | ||
771 | */ | ||
772 | static int __init ubi_mtd_param_parse(const char *val, struct kernel_param *kp) | ||
773 | { | ||
774 | int i, len; | ||
775 | struct mtd_dev_param *p; | ||
776 | char buf[MTD_PARAM_LEN_MAX]; | ||
777 | char *pbuf = &buf[0]; | ||
778 | char *tokens[3] = {NULL, NULL, NULL}; | ||
779 | |||
780 | if (mtd_devs == UBI_MAX_DEVICES) { | ||
781 | printk("UBI error: too many parameters, max. is %d\n", | ||
782 | UBI_MAX_DEVICES); | ||
783 | return -EINVAL; | ||
784 | } | ||
785 | |||
786 | len = strnlen(val, MTD_PARAM_LEN_MAX); | ||
787 | if (len == MTD_PARAM_LEN_MAX) { | ||
788 | printk("UBI error: parameter \"%s\" is too long, max. is %d\n", | ||
789 | val, MTD_PARAM_LEN_MAX); | ||
790 | return -EINVAL; | ||
791 | } | ||
792 | |||
793 | if (len == 0) { | ||
794 | printk("UBI warning: empty 'mtd=' parameter - ignored\n"); | ||
795 | return 0; | ||
796 | } | ||
797 | |||
798 | strcpy(buf, val); | ||
799 | |||
800 | /* Get rid of the final newline */ | ||
801 | if (buf[len - 1] == '\n') | ||
802 | buf[len - 1] = 0; | ||
803 | |||
804 | for (i = 0; i < 3; i++) | ||
805 | tokens[i] = strsep(&pbuf, ","); | ||
806 | |||
807 | if (pbuf) { | ||
808 | printk("UBI error: too many arguments at \"%s\"\n", val); | ||
809 | return -EINVAL; | ||
810 | } | ||
811 | |||
812 | if (tokens[0] == '\0') | ||
813 | return -EINVAL; | ||
814 | |||
815 | p = &mtd_dev_param[mtd_devs]; | ||
816 | strcpy(&p->name[0], tokens[0]); | ||
817 | |||
818 | if (tokens[1]) | ||
819 | p->vid_hdr_offs = bytes_str_to_int(tokens[1]); | ||
820 | if (tokens[2]) | ||
821 | p->data_offs = bytes_str_to_int(tokens[2]); | ||
822 | |||
823 | if (p->vid_hdr_offs < 0) | ||
824 | return p->vid_hdr_offs; | ||
825 | if (p->data_offs < 0) | ||
826 | return p->data_offs; | ||
827 | |||
828 | mtd_devs += 1; | ||
829 | return 0; | ||
830 | } | ||
831 | |||
832 | module_param_call(mtd, ubi_mtd_param_parse, NULL, NULL, 000); | ||
833 | MODULE_PARM_DESC(mtd, "MTD devices to attach. Parameter format: " | ||
834 | "mtd=<name|num>[,<vid_hdr_offs>,<data_offs>]. " | ||
835 | "Multiple \"mtd\" parameters may be specified.\n" | ||
836 | "MTD devices may be specified by their number or name. " | ||
837 | "Optional \"vid_hdr_offs\" and \"data_offs\" parameters " | ||
838 | "specify UBI VID header position and data starting " | ||
839 | "position to be used by UBI.\n" | ||
840 | "Example: mtd=content,1984,2048 mtd=4 - attach MTD device" | ||
841 | "with name content using VID header offset 1984 and data " | ||
842 | "start 2048, and MTD device number 4 using default " | ||
843 | "offsets"); | ||
844 | |||
845 | MODULE_VERSION(__stringify(UBI_VERSION)); | ||
846 | MODULE_DESCRIPTION("UBI - Unsorted Block Images"); | ||
847 | MODULE_AUTHOR("Artem Bityutskiy"); | ||
848 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c new file mode 100644 index 000000000000..6612eb79bf17 --- /dev/null +++ b/drivers/mtd/ubi/cdev.c | |||
@@ -0,0 +1,722 @@ | |||
1 | /* | ||
2 | * Copyright (c) International Business Machines Corp., 2006 | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License as published by | ||
6 | * the Free Software Foundation; either version 2 of the License, or | ||
7 | * (at your option) any later version. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See | ||
12 | * the GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
17 | * | ||
18 | * Author: Artem Bityutskiy (Битюцкий Артём) | ||
19 | */ | ||
20 | |||
21 | /* | ||
22 | * This file includes implementation of UBI character device operations. | ||
23 | * | ||
24 | * There are two kinds of character devices in UBI: UBI character devices and | ||
25 | * UBI volume character devices. UBI character devices allow users to | ||
26 | * manipulate whole volumes: create, remove, and re-size them. Volume character | ||
27 | * devices provide volume I/O capabilities. | ||
28 | * | ||
29 | * Major and minor numbers are assigned dynamically to both UBI and volume | ||
30 | * character devices. | ||
31 | */ | ||
32 | |||
33 | #include <linux/module.h> | ||
34 | #include <linux/stat.h> | ||
35 | #include <linux/ioctl.h> | ||
36 | #include <linux/capability.h> | ||
37 | #include <mtd/ubi-user.h> | ||
38 | #include <asm/uaccess.h> | ||
39 | #include <asm/div64.h> | ||
40 | #include "ubi.h" | ||
41 | |||
42 | /* | ||
43 | * Maximum sequence numbers of UBI and volume character device IOCTLs (direct | ||
44 | * logical eraseblock erase is a debug-only feature). | ||
45 | */ | ||
46 | #define UBI_CDEV_IOC_MAX_SEQ 2 | ||
47 | #ifndef CONFIG_MTD_UBI_DEBUG_USERSPACE_IO | ||
48 | #define VOL_CDEV_IOC_MAX_SEQ 1 | ||
49 | #else | ||
50 | #define VOL_CDEV_IOC_MAX_SEQ 2 | ||
51 | #endif | ||
52 | |||
53 | /** | ||
54 | * major_to_device - get UBI device object by character device major number. | ||
55 | * @major: major number | ||
56 | * | ||
57 | * This function returns a pointer to the UBI device object. | ||
58 | */ | ||
59 | static struct ubi_device *major_to_device(int major) | ||
60 | { | ||
61 | int i; | ||
62 | |||
63 | for (i = 0; i < ubi_devices_cnt; i++) | ||
64 | if (ubi_devices[i] && ubi_devices[i]->major == major) | ||
65 | return ubi_devices[i]; | ||
66 | BUG(); | ||
67 | } | ||
68 | |||
69 | /** | ||
70 | * get_exclusive - get exclusive access to an UBI volume. | ||
71 | * @desc: volume descriptor | ||
72 | * | ||
73 | * This function changes UBI volume open mode to "exclusive". Returns previous | ||
74 | * mode value (positive integer) in case of success and a negative error code | ||
75 | * in case of failure. | ||
76 | */ | ||
77 | static int get_exclusive(struct ubi_volume_desc *desc) | ||
78 | { | ||
79 | int users, err; | ||
80 | struct ubi_volume *vol = desc->vol; | ||
81 | |||
82 | spin_lock(&vol->ubi->volumes_lock); | ||
83 | users = vol->readers + vol->writers + vol->exclusive; | ||
84 | ubi_assert(users > 0); | ||
85 | if (users > 1) { | ||
86 | dbg_err("%d users for volume %d", users, vol->vol_id); | ||
87 | err = -EBUSY; | ||
88 | } else { | ||
89 | vol->readers = vol->writers = 0; | ||
90 | vol->exclusive = 1; | ||
91 | err = desc->mode; | ||
92 | desc->mode = UBI_EXCLUSIVE; | ||
93 | } | ||
94 | spin_unlock(&vol->ubi->volumes_lock); | ||
95 | |||
96 | return err; | ||
97 | } | ||
98 | |||
99 | /** | ||
100 | * revoke_exclusive - revoke exclusive mode. | ||
101 | * @desc: volume descriptor | ||
102 | * @mode: new mode to switch to | ||
103 | */ | ||
104 | static void revoke_exclusive(struct ubi_volume_desc *desc, int mode) | ||
105 | { | ||
106 | struct ubi_volume *vol = desc->vol; | ||
107 | |||
108 | spin_lock(&vol->ubi->volumes_lock); | ||
109 | ubi_assert(vol->readers == 0 && vol->writers == 0); | ||
110 | ubi_assert(vol->exclusive == 1 && desc->mode == UBI_EXCLUSIVE); | ||
111 | vol->exclusive = 0; | ||
112 | if (mode == UBI_READONLY) | ||
113 | vol->readers = 1; | ||
114 | else if (mode == UBI_READWRITE) | ||
115 | vol->writers = 1; | ||
116 | else | ||
117 | vol->exclusive = 1; | ||
118 | spin_unlock(&vol->ubi->volumes_lock); | ||
119 | |||
120 | desc->mode = mode; | ||
121 | } | ||
122 | |||
123 | static int vol_cdev_open(struct inode *inode, struct file *file) | ||
124 | { | ||
125 | struct ubi_volume_desc *desc; | ||
126 | const struct ubi_device *ubi = major_to_device(imajor(inode)); | ||
127 | int vol_id = iminor(inode) - 1; | ||
128 | int mode; | ||
129 | |||
130 | if (file->f_mode & FMODE_WRITE) | ||
131 | mode = UBI_READWRITE; | ||
132 | else | ||
133 | mode = UBI_READONLY; | ||
134 | |||
135 | dbg_msg("open volume %d, mode %d", vol_id, mode); | ||
136 | |||
137 | desc = ubi_open_volume(ubi->ubi_num, vol_id, mode); | ||
138 | if (IS_ERR(desc)) | ||
139 | return PTR_ERR(desc); | ||
140 | |||
141 | file->private_data = desc; | ||
142 | return 0; | ||
143 | } | ||
144 | |||
145 | static int vol_cdev_release(struct inode *inode, struct file *file) | ||
146 | { | ||
147 | struct ubi_volume_desc *desc = file->private_data; | ||
148 | struct ubi_volume *vol = desc->vol; | ||
149 | |||
150 | dbg_msg("release volume %d, mode %d", vol->vol_id, desc->mode); | ||
151 | |||
152 | if (vol->updating) { | ||
153 | ubi_warn("update of volume %d not finished, volume is damaged", | ||
154 | vol->vol_id); | ||
155 | vol->updating = 0; | ||
156 | kfree(vol->upd_buf); | ||
157 | } | ||
158 | |||
159 | ubi_close_volume(desc); | ||
160 | return 0; | ||
161 | } | ||
162 | |||
163 | static loff_t vol_cdev_llseek(struct file *file, loff_t offset, int origin) | ||
164 | { | ||
165 | struct ubi_volume_desc *desc = file->private_data; | ||
166 | struct ubi_volume *vol = desc->vol; | ||
167 | loff_t new_offset; | ||
168 | |||
169 | if (vol->updating) { | ||
170 | /* Update is in progress, seeking is prohibited */ | ||
171 | dbg_err("updating"); | ||
172 | return -EBUSY; | ||
173 | } | ||
174 | |||
175 | switch (origin) { | ||
176 | case 0: /* SEEK_SET */ | ||
177 | new_offset = offset; | ||
178 | break; | ||
179 | case 1: /* SEEK_CUR */ | ||
180 | new_offset = file->f_pos + offset; | ||
181 | break; | ||
182 | case 2: /* SEEK_END */ | ||
183 | new_offset = vol->used_bytes + offset; | ||
184 | break; | ||
185 | default: | ||
186 | return -EINVAL; | ||
187 | } | ||
188 | |||
189 | if (new_offset < 0 || new_offset > vol->used_bytes) { | ||
190 | dbg_err("bad seek %lld", new_offset); | ||
191 | return -EINVAL; | ||
192 | } | ||
193 | |||
194 | dbg_msg("seek volume %d, offset %lld, origin %d, new offset %lld", | ||
195 | vol->vol_id, offset, origin, new_offset); | ||
196 | |||
197 | file->f_pos = new_offset; | ||
198 | return new_offset; | ||
199 | } | ||
200 | |||
201 | static ssize_t vol_cdev_read(struct file *file, __user char *buf, size_t count, | ||
202 | loff_t *offp) | ||
203 | { | ||
204 | struct ubi_volume_desc *desc = file->private_data; | ||
205 | struct ubi_volume *vol = desc->vol; | ||
206 | struct ubi_device *ubi = vol->ubi; | ||
207 | int err, lnum, off, len, vol_id = desc->vol->vol_id, tbuf_size; | ||
208 | size_t count_save = count; | ||
209 | void *tbuf; | ||
210 | uint64_t tmp; | ||
211 | |||
212 | dbg_msg("read %zd bytes from offset %lld of volume %d", | ||
213 | count, *offp, vol_id); | ||
214 | |||
215 | if (vol->updating) { | ||
216 | dbg_err("updating"); | ||
217 | return -EBUSY; | ||
218 | } | ||
219 | if (vol->upd_marker) { | ||
220 | dbg_err("damaged volume, update marker is set"); | ||
221 | return -EBADF; | ||
222 | } | ||
223 | if (*offp == vol->used_bytes || count == 0) | ||
224 | return 0; | ||
225 | |||
226 | if (vol->corrupted) | ||
227 | dbg_msg("read from corrupted volume %d", vol_id); | ||
228 | |||
229 | if (*offp + count > vol->used_bytes) | ||
230 | count_save = count = vol->used_bytes - *offp; | ||
231 | |||
232 | tbuf_size = vol->usable_leb_size; | ||
233 | if (count < tbuf_size) | ||
234 | tbuf_size = ALIGN(count, ubi->min_io_size); | ||
235 | tbuf = kmalloc(tbuf_size, GFP_KERNEL); | ||
236 | if (!tbuf) | ||
237 | return -ENOMEM; | ||
238 | |||
239 | len = count > tbuf_size ? tbuf_size : count; | ||
240 | |||
241 | tmp = *offp; | ||
242 | off = do_div(tmp, vol->usable_leb_size); | ||
243 | lnum = tmp; | ||
244 | |||
245 | do { | ||
246 | cond_resched(); | ||
247 | |||
248 | if (off + len >= vol->usable_leb_size) | ||
249 | len = vol->usable_leb_size - off; | ||
250 | |||
251 | err = ubi_eba_read_leb(ubi, vol_id, lnum, tbuf, off, len, 0); | ||
252 | if (err) | ||
253 | break; | ||
254 | |||
255 | off += len; | ||
256 | if (off == vol->usable_leb_size) { | ||
257 | lnum += 1; | ||
258 | off -= vol->usable_leb_size; | ||
259 | } | ||
260 | |||
261 | count -= len; | ||
262 | *offp += len; | ||
263 | |||
264 | err = copy_to_user(buf, tbuf, len); | ||
265 | if (err) { | ||
266 | err = -EFAULT; | ||
267 | break; | ||
268 | } | ||
269 | |||
270 | buf += len; | ||
271 | len = count > tbuf_size ? tbuf_size : count; | ||
272 | } while (count); | ||
273 | |||
274 | kfree(tbuf); | ||
275 | return err ? err : count_save - count; | ||
276 | } | ||
277 | |||
278 | #ifdef CONFIG_MTD_UBI_DEBUG_USERSPACE_IO | ||
279 | |||
280 | /* | ||
281 | * This function allows to directly write to dynamic UBI volumes, without | ||
282 | * issuing the volume update operation. Available only as a debugging feature. | ||
283 | * Very useful for testing UBI. | ||
284 | */ | ||
285 | static ssize_t vol_cdev_direct_write(struct file *file, const char __user *buf, | ||
286 | size_t count, loff_t *offp) | ||
287 | { | ||
288 | struct ubi_volume_desc *desc = file->private_data; | ||
289 | struct ubi_volume *vol = desc->vol; | ||
290 | struct ubi_device *ubi = vol->ubi; | ||
291 | int lnum, off, len, tbuf_size, vol_id = vol->vol_id, err = 0; | ||
292 | size_t count_save = count; | ||
293 | char *tbuf; | ||
294 | uint64_t tmp; | ||
295 | |||
296 | dbg_msg("requested: write %zd bytes to offset %lld of volume %u", | ||
297 | count, *offp, desc->vol->vol_id); | ||
298 | |||
299 | if (vol->vol_type == UBI_STATIC_VOLUME) | ||
300 | return -EROFS; | ||
301 | |||
302 | tmp = *offp; | ||
303 | off = do_div(tmp, vol->usable_leb_size); | ||
304 | lnum = tmp; | ||
305 | |||
306 | if (off % ubi->min_io_size) { | ||
307 | dbg_err("unaligned position"); | ||
308 | return -EINVAL; | ||
309 | } | ||
310 | |||
311 | if (*offp + count > vol->used_bytes) | ||
312 | count_save = count = vol->used_bytes - *offp; | ||
313 | |||
314 | /* We can write only in fractions of the minimum I/O unit */ | ||
315 | if (count % ubi->min_io_size) { | ||
316 | dbg_err("unaligned write length"); | ||
317 | return -EINVAL; | ||
318 | } | ||
319 | |||
320 | tbuf_size = vol->usable_leb_size; | ||
321 | if (count < tbuf_size) | ||
322 | tbuf_size = ALIGN(count, ubi->min_io_size); | ||
323 | tbuf = kmalloc(tbuf_size, GFP_KERNEL); | ||
324 | if (!tbuf) | ||
325 | return -ENOMEM; | ||
326 | |||
327 | len = count > tbuf_size ? tbuf_size : count; | ||
328 | |||
329 | while (count) { | ||
330 | cond_resched(); | ||
331 | |||
332 | if (off + len >= vol->usable_leb_size) | ||
333 | len = vol->usable_leb_size - off; | ||
334 | |||
335 | err = copy_from_user(tbuf, buf, len); | ||
336 | if (err) { | ||
337 | err = -EFAULT; | ||
338 | break; | ||
339 | } | ||
340 | |||
341 | err = ubi_eba_write_leb(ubi, vol_id, lnum, tbuf, off, len, | ||
342 | UBI_UNKNOWN); | ||
343 | if (err) | ||
344 | break; | ||
345 | |||
346 | off += len; | ||
347 | if (off == vol->usable_leb_size) { | ||
348 | lnum += 1; | ||
349 | off -= vol->usable_leb_size; | ||
350 | } | ||
351 | |||
352 | count -= len; | ||
353 | *offp += len; | ||
354 | buf += len; | ||
355 | len = count > tbuf_size ? tbuf_size : count; | ||
356 | } | ||
357 | |||
358 | kfree(tbuf); | ||
359 | return err ? err : count_save - count; | ||
360 | } | ||
361 | |||
362 | #else | ||
363 | #define vol_cdev_direct_write(file, buf, count, offp) -EPERM | ||
364 | #endif /* CONFIG_MTD_UBI_DEBUG_USERSPACE_IO */ | ||
365 | |||
366 | static ssize_t vol_cdev_write(struct file *file, const char __user *buf, | ||
367 | size_t count, loff_t *offp) | ||
368 | { | ||
369 | int err = 0; | ||
370 | struct ubi_volume_desc *desc = file->private_data; | ||
371 | struct ubi_volume *vol = desc->vol; | ||
372 | struct ubi_device *ubi = vol->ubi; | ||
373 | |||
374 | if (!vol->updating) | ||
375 | return vol_cdev_direct_write(file, buf, count, offp); | ||
376 | |||
377 | err = ubi_more_update_data(ubi, vol->vol_id, buf, count); | ||
378 | if (err < 0) { | ||
379 | ubi_err("cannot write %zd bytes of update data", count); | ||
380 | return err; | ||
381 | } | ||
382 | |||
383 | if (err) { | ||
384 | /* | ||
385 | * Update is finished, @err contains number of actually written | ||
386 | * bytes now. | ||
387 | */ | ||
388 | count = err; | ||
389 | |||
390 | err = ubi_check_volume(ubi, vol->vol_id); | ||
391 | if (err < 0) | ||
392 | return err; | ||
393 | |||
394 | if (err) { | ||
395 | ubi_warn("volume %d on UBI device %d is corrupted", | ||
396 | vol->vol_id, ubi->ubi_num); | ||
397 | vol->corrupted = 1; | ||
398 | } | ||
399 | vol->checked = 1; | ||
400 | revoke_exclusive(desc, UBI_READWRITE); | ||
401 | } | ||
402 | |||
403 | *offp += count; | ||
404 | return count; | ||
405 | } | ||
406 | |||
407 | static int vol_cdev_ioctl(struct inode *inode, struct file *file, | ||
408 | unsigned int cmd, unsigned long arg) | ||
409 | { | ||
410 | int err = 0; | ||
411 | struct ubi_volume_desc *desc = file->private_data; | ||
412 | struct ubi_volume *vol = desc->vol; | ||
413 | struct ubi_device *ubi = vol->ubi; | ||
414 | void __user *argp = (void __user *)arg; | ||
415 | |||
416 | if (_IOC_NR(cmd) > VOL_CDEV_IOC_MAX_SEQ || | ||
417 | _IOC_TYPE(cmd) != UBI_VOL_IOC_MAGIC) | ||
418 | return -ENOTTY; | ||
419 | |||
420 | if (_IOC_DIR(cmd) && _IOC_READ) | ||
421 | err = !access_ok(VERIFY_WRITE, argp, _IOC_SIZE(cmd)); | ||
422 | else if (_IOC_DIR(cmd) && _IOC_WRITE) | ||
423 | err = !access_ok(VERIFY_READ, argp, _IOC_SIZE(cmd)); | ||
424 | if (err) | ||
425 | return -EFAULT; | ||
426 | |||
427 | switch (cmd) { | ||
428 | |||
429 | /* Volume update command */ | ||
430 | case UBI_IOCVOLUP: | ||
431 | { | ||
432 | int64_t bytes, rsvd_bytes; | ||
433 | |||
434 | if (!capable(CAP_SYS_RESOURCE)) { | ||
435 | err = -EPERM; | ||
436 | break; | ||
437 | } | ||
438 | |||
439 | err = copy_from_user(&bytes, argp, sizeof(int64_t)); | ||
440 | if (err) { | ||
441 | err = -EFAULT; | ||
442 | break; | ||
443 | } | ||
444 | |||
445 | if (desc->mode == UBI_READONLY) { | ||
446 | err = -EROFS; | ||
447 | break; | ||
448 | } | ||
449 | |||
450 | rsvd_bytes = vol->reserved_pebs * (ubi->leb_size-vol->data_pad); | ||
451 | if (bytes < 0 || bytes > rsvd_bytes) { | ||
452 | err = -EINVAL; | ||
453 | break; | ||
454 | } | ||
455 | |||
456 | err = get_exclusive(desc); | ||
457 | if (err < 0) | ||
458 | break; | ||
459 | |||
460 | err = ubi_start_update(ubi, vol->vol_id, bytes); | ||
461 | if (bytes == 0) | ||
462 | revoke_exclusive(desc, UBI_READWRITE); | ||
463 | |||
464 | file->f_pos = 0; | ||
465 | break; | ||
466 | } | ||
467 | |||
468 | #ifdef CONFIG_MTD_UBI_DEBUG_USERSPACE_IO | ||
469 | /* Logical eraseblock erasure command */ | ||
470 | case UBI_IOCEBER: | ||
471 | { | ||
472 | int32_t lnum; | ||
473 | |||
474 | err = __get_user(lnum, (__user int32_t *)argp); | ||
475 | if (err) { | ||
476 | err = -EFAULT; | ||
477 | break; | ||
478 | } | ||
479 | |||
480 | if (desc->mode == UBI_READONLY) { | ||
481 | err = -EROFS; | ||
482 | break; | ||
483 | } | ||
484 | |||
485 | if (lnum < 0 || lnum >= vol->reserved_pebs) { | ||
486 | err = -EINVAL; | ||
487 | break; | ||
488 | } | ||
489 | |||
490 | if (vol->vol_type != UBI_DYNAMIC_VOLUME) { | ||
491 | err = -EROFS; | ||
492 | break; | ||
493 | } | ||
494 | |||
495 | dbg_msg("erase LEB %d:%d", vol->vol_id, lnum); | ||
496 | err = ubi_eba_unmap_leb(ubi, vol->vol_id, lnum); | ||
497 | if (err) | ||
498 | break; | ||
499 | |||
500 | err = ubi_wl_flush(ubi); | ||
501 | break; | ||
502 | } | ||
503 | #endif | ||
504 | |||
505 | default: | ||
506 | err = -ENOTTY; | ||
507 | break; | ||
508 | } | ||
509 | |||
510 | return err; | ||
511 | } | ||
512 | |||
513 | /** | ||
514 | * verify_mkvol_req - verify volume creation request. | ||
515 | * @ubi: UBI device description object | ||
516 | * @req: the request to check | ||
517 | * | ||
518 | * This function zero if the request is correct, and %-EINVAL if not. | ||
519 | */ | ||
520 | static int verify_mkvol_req(const struct ubi_device *ubi, | ||
521 | const struct ubi_mkvol_req *req) | ||
522 | { | ||
523 | int n, err = -EINVAL; | ||
524 | |||
525 | if (req->bytes < 0 || req->alignment < 0 || req->vol_type < 0 || | ||
526 | req->name_len < 0) | ||
527 | goto bad; | ||
528 | |||
529 | if ((req->vol_id < 0 || req->vol_id >= ubi->vtbl_slots) && | ||
530 | req->vol_id != UBI_VOL_NUM_AUTO) | ||
531 | goto bad; | ||
532 | |||
533 | if (req->alignment == 0) | ||
534 | goto bad; | ||
535 | |||
536 | if (req->bytes == 0) | ||
537 | goto bad; | ||
538 | |||
539 | if (req->vol_type != UBI_DYNAMIC_VOLUME && | ||
540 | req->vol_type != UBI_STATIC_VOLUME) | ||
541 | goto bad; | ||
542 | |||
543 | if (req->alignment > ubi->leb_size) | ||
544 | goto bad; | ||
545 | |||
546 | n = req->alignment % ubi->min_io_size; | ||
547 | if (req->alignment != 1 && n) | ||
548 | goto bad; | ||
549 | |||
550 | if (req->name_len > UBI_VOL_NAME_MAX) { | ||
551 | err = -ENAMETOOLONG; | ||
552 | goto bad; | ||
553 | } | ||
554 | |||
555 | return 0; | ||
556 | |||
557 | bad: | ||
558 | dbg_err("bad volume creation request"); | ||
559 | ubi_dbg_dump_mkvol_req(req); | ||
560 | return err; | ||
561 | } | ||
562 | |||
563 | /** | ||
564 | * verify_rsvol_req - verify volume re-size request. | ||
565 | * @ubi: UBI device description object | ||
566 | * @req: the request to check | ||
567 | * | ||
568 | * This function returns zero if the request is correct, and %-EINVAL if not. | ||
569 | */ | ||
570 | static int verify_rsvol_req(const struct ubi_device *ubi, | ||
571 | const struct ubi_rsvol_req *req) | ||
572 | { | ||
573 | if (req->bytes <= 0) | ||
574 | return -EINVAL; | ||
575 | |||
576 | if (req->vol_id < 0 || req->vol_id >= ubi->vtbl_slots) | ||
577 | return -EINVAL; | ||
578 | |||
579 | return 0; | ||
580 | } | ||
581 | |||
582 | static int ubi_cdev_ioctl(struct inode *inode, struct file *file, | ||
583 | unsigned int cmd, unsigned long arg) | ||
584 | { | ||
585 | int err = 0; | ||
586 | struct ubi_device *ubi; | ||
587 | struct ubi_volume_desc *desc; | ||
588 | void __user *argp = (void __user *)arg; | ||
589 | |||
590 | if (_IOC_NR(cmd) > UBI_CDEV_IOC_MAX_SEQ || | ||
591 | _IOC_TYPE(cmd) != UBI_IOC_MAGIC) | ||
592 | return -ENOTTY; | ||
593 | |||
594 | if (_IOC_DIR(cmd) && _IOC_READ) | ||
595 | err = !access_ok(VERIFY_WRITE, argp, _IOC_SIZE(cmd)); | ||
596 | else if (_IOC_DIR(cmd) && _IOC_WRITE) | ||
597 | err = !access_ok(VERIFY_READ, argp, _IOC_SIZE(cmd)); | ||
598 | if (err) | ||
599 | return -EFAULT; | ||
600 | |||
601 | if (!capable(CAP_SYS_RESOURCE)) | ||
602 | return -EPERM; | ||
603 | |||
604 | ubi = major_to_device(imajor(inode)); | ||
605 | if (IS_ERR(ubi)) | ||
606 | return PTR_ERR(ubi); | ||
607 | |||
608 | switch (cmd) { | ||
609 | /* Create volume command */ | ||
610 | case UBI_IOCMKVOL: | ||
611 | { | ||
612 | struct ubi_mkvol_req req; | ||
613 | |||
614 | dbg_msg("create volume"); | ||
615 | err = __copy_from_user(&req, argp, | ||
616 | sizeof(struct ubi_mkvol_req)); | ||
617 | if (err) { | ||
618 | err = -EFAULT; | ||
619 | break; | ||
620 | } | ||
621 | |||
622 | err = verify_mkvol_req(ubi, &req); | ||
623 | if (err) | ||
624 | break; | ||
625 | |||
626 | req.name[req.name_len] = '\0'; | ||
627 | |||
628 | err = ubi_create_volume(ubi, &req); | ||
629 | if (err) | ||
630 | break; | ||
631 | |||
632 | err = __put_user(req.vol_id, (__user int32_t *)argp); | ||
633 | if (err) | ||
634 | err = -EFAULT; | ||
635 | |||
636 | break; | ||
637 | } | ||
638 | |||
639 | /* Remove volume command */ | ||
640 | case UBI_IOCRMVOL: | ||
641 | { | ||
642 | int vol_id; | ||
643 | |||
644 | dbg_msg("remove volume"); | ||
645 | err = __get_user(vol_id, (__user int32_t *)argp); | ||
646 | if (err) { | ||
647 | err = -EFAULT; | ||
648 | break; | ||
649 | } | ||
650 | |||
651 | desc = ubi_open_volume(ubi->ubi_num, vol_id, UBI_EXCLUSIVE); | ||
652 | if (IS_ERR(desc)) { | ||
653 | err = PTR_ERR(desc); | ||
654 | break; | ||
655 | } | ||
656 | |||
657 | err = ubi_remove_volume(desc); | ||
658 | if (err) | ||
659 | ubi_close_volume(desc); | ||
660 | |||
661 | break; | ||
662 | } | ||
663 | |||
664 | /* Re-size volume command */ | ||
665 | case UBI_IOCRSVOL: | ||
666 | { | ||
667 | int pebs; | ||
668 | uint64_t tmp; | ||
669 | struct ubi_rsvol_req req; | ||
670 | |||
671 | dbg_msg("re-size volume"); | ||
672 | err = __copy_from_user(&req, argp, | ||
673 | sizeof(struct ubi_rsvol_req)); | ||
674 | if (err) { | ||
675 | err = -EFAULT; | ||
676 | break; | ||
677 | } | ||
678 | |||
679 | err = verify_rsvol_req(ubi, &req); | ||
680 | if (err) | ||
681 | break; | ||
682 | |||
683 | desc = ubi_open_volume(ubi->ubi_num, req.vol_id, UBI_EXCLUSIVE); | ||
684 | if (IS_ERR(desc)) { | ||
685 | err = PTR_ERR(desc); | ||
686 | break; | ||
687 | } | ||
688 | |||
689 | tmp = req.bytes; | ||
690 | pebs = !!do_div(tmp, desc->vol->usable_leb_size); | ||
691 | pebs += tmp; | ||
692 | |||
693 | err = ubi_resize_volume(desc, pebs); | ||
694 | ubi_close_volume(desc); | ||
695 | break; | ||
696 | } | ||
697 | |||
698 | default: | ||
699 | err = -ENOTTY; | ||
700 | break; | ||
701 | } | ||
702 | |||
703 | return err; | ||
704 | } | ||
705 | |||
706 | /* UBI character device operations */ | ||
707 | struct file_operations ubi_cdev_operations = { | ||
708 | .owner = THIS_MODULE, | ||
709 | .ioctl = ubi_cdev_ioctl, | ||
710 | .llseek = no_llseek | ||
711 | }; | ||
712 | |||
713 | /* UBI volume character device operations */ | ||
714 | struct file_operations ubi_vol_cdev_operations = { | ||
715 | .owner = THIS_MODULE, | ||
716 | .open = vol_cdev_open, | ||
717 | .release = vol_cdev_release, | ||
718 | .llseek = vol_cdev_llseek, | ||
719 | .read = vol_cdev_read, | ||
720 | .write = vol_cdev_write, | ||
721 | .ioctl = vol_cdev_ioctl | ||
722 | }; | ||
diff --git a/drivers/mtd/ubi/debug.c b/drivers/mtd/ubi/debug.c new file mode 100644 index 000000000000..86364221fafe --- /dev/null +++ b/drivers/mtd/ubi/debug.c | |||
@@ -0,0 +1,224 @@ | |||
1 | /* | ||
2 | * Copyright (c) International Business Machines Corp., 2006 | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License as published by | ||
6 | * the Free Software Foundation; either version 2 of the License, or | ||
7 | * (at your option) any later version. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See | ||
12 | * the GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
17 | * | ||
18 | * Author: Artem Bityutskiy (Битюцкий Артём) | ||
19 | */ | ||
20 | |||
21 | /* | ||
22 | * Here we keep all the UBI debugging stuff which should normally be disabled | ||
23 | * and compiled-out, but it is extremely helpful when hunting bugs or doing big | ||
24 | * changes. | ||
25 | */ | ||
26 | |||
27 | #ifdef CONFIG_MTD_UBI_DEBUG_MSG | ||
28 | |||
29 | #include "ubi.h" | ||
30 | |||
31 | /** | ||
32 | * ubi_dbg_dump_ec_hdr - dump an erase counter header. | ||
33 | * @ec_hdr: the erase counter header to dump | ||
34 | */ | ||
35 | void ubi_dbg_dump_ec_hdr(const struct ubi_ec_hdr *ec_hdr) | ||
36 | { | ||
37 | dbg_msg("erase counter header dump:"); | ||
38 | dbg_msg("magic %#08x", ubi32_to_cpu(ec_hdr->magic)); | ||
39 | dbg_msg("version %d", (int)ec_hdr->version); | ||
40 | dbg_msg("ec %llu", (long long)ubi64_to_cpu(ec_hdr->ec)); | ||
41 | dbg_msg("vid_hdr_offset %d", ubi32_to_cpu(ec_hdr->vid_hdr_offset)); | ||
42 | dbg_msg("data_offset %d", ubi32_to_cpu(ec_hdr->data_offset)); | ||
43 | dbg_msg("hdr_crc %#08x", ubi32_to_cpu(ec_hdr->hdr_crc)); | ||
44 | dbg_msg("erase counter header hexdump:"); | ||
45 | ubi_dbg_hexdump(ec_hdr, UBI_EC_HDR_SIZE); | ||
46 | } | ||
47 | |||
48 | /** | ||
49 | * ubi_dbg_dump_vid_hdr - dump a volume identifier header. | ||
50 | * @vid_hdr: the volume identifier header to dump | ||
51 | */ | ||
52 | void ubi_dbg_dump_vid_hdr(const struct ubi_vid_hdr *vid_hdr) | ||
53 | { | ||
54 | dbg_msg("volume identifier header dump:"); | ||
55 | dbg_msg("magic %08x", ubi32_to_cpu(vid_hdr->magic)); | ||
56 | dbg_msg("version %d", (int)vid_hdr->version); | ||
57 | dbg_msg("vol_type %d", (int)vid_hdr->vol_type); | ||
58 | dbg_msg("copy_flag %d", (int)vid_hdr->copy_flag); | ||
59 | dbg_msg("compat %d", (int)vid_hdr->compat); | ||
60 | dbg_msg("vol_id %d", ubi32_to_cpu(vid_hdr->vol_id)); | ||
61 | dbg_msg("lnum %d", ubi32_to_cpu(vid_hdr->lnum)); | ||
62 | dbg_msg("leb_ver %u", ubi32_to_cpu(vid_hdr->leb_ver)); | ||
63 | dbg_msg("data_size %d", ubi32_to_cpu(vid_hdr->data_size)); | ||
64 | dbg_msg("used_ebs %d", ubi32_to_cpu(vid_hdr->used_ebs)); | ||
65 | dbg_msg("data_pad %d", ubi32_to_cpu(vid_hdr->data_pad)); | ||
66 | dbg_msg("sqnum %llu", | ||
67 | (unsigned long long)ubi64_to_cpu(vid_hdr->sqnum)); | ||
68 | dbg_msg("hdr_crc %08x", ubi32_to_cpu(vid_hdr->hdr_crc)); | ||
69 | dbg_msg("volume identifier header hexdump:"); | ||
70 | } | ||
71 | |||
72 | /** | ||
73 | * ubi_dbg_dump_vol_info- dump volume information. | ||
74 | * @vol: UBI volume description object | ||
75 | */ | ||
76 | void ubi_dbg_dump_vol_info(const struct ubi_volume *vol) | ||
77 | { | ||
78 | dbg_msg("volume information dump:"); | ||
79 | dbg_msg("vol_id %d", vol->vol_id); | ||
80 | dbg_msg("reserved_pebs %d", vol->reserved_pebs); | ||
81 | dbg_msg("alignment %d", vol->alignment); | ||
82 | dbg_msg("data_pad %d", vol->data_pad); | ||
83 | dbg_msg("vol_type %d", vol->vol_type); | ||
84 | dbg_msg("name_len %d", vol->name_len); | ||
85 | dbg_msg("usable_leb_size %d", vol->usable_leb_size); | ||
86 | dbg_msg("used_ebs %d", vol->used_ebs); | ||
87 | dbg_msg("used_bytes %lld", vol->used_bytes); | ||
88 | dbg_msg("last_eb_bytes %d", vol->last_eb_bytes); | ||
89 | dbg_msg("corrupted %d", vol->corrupted); | ||
90 | dbg_msg("upd_marker %d", vol->upd_marker); | ||
91 | |||
92 | if (vol->name_len <= UBI_VOL_NAME_MAX && | ||
93 | strnlen(vol->name, vol->name_len + 1) == vol->name_len) { | ||
94 | dbg_msg("name %s", vol->name); | ||
95 | } else { | ||
96 | dbg_msg("the 1st 5 characters of the name: %c%c%c%c%c", | ||
97 | vol->name[0], vol->name[1], vol->name[2], | ||
98 | vol->name[3], vol->name[4]); | ||
99 | } | ||
100 | } | ||
101 | |||
102 | /** | ||
103 | * ubi_dbg_dump_vtbl_record - dump a &struct ubi_vtbl_record object. | ||
104 | * @r: the object to dump | ||
105 | * @idx: volume table index | ||
106 | */ | ||
107 | void ubi_dbg_dump_vtbl_record(const struct ubi_vtbl_record *r, int idx) | ||
108 | { | ||
109 | int name_len = ubi16_to_cpu(r->name_len); | ||
110 | |||
111 | dbg_msg("volume table record %d dump:", idx); | ||
112 | dbg_msg("reserved_pebs %d", ubi32_to_cpu(r->reserved_pebs)); | ||
113 | dbg_msg("alignment %d", ubi32_to_cpu(r->alignment)); | ||
114 | dbg_msg("data_pad %d", ubi32_to_cpu(r->data_pad)); | ||
115 | dbg_msg("vol_type %d", (int)r->vol_type); | ||
116 | dbg_msg("upd_marker %d", (int)r->upd_marker); | ||
117 | dbg_msg("name_len %d", name_len); | ||
118 | |||
119 | if (r->name[0] == '\0') { | ||
120 | dbg_msg("name NULL"); | ||
121 | return; | ||
122 | } | ||
123 | |||
124 | if (name_len <= UBI_VOL_NAME_MAX && | ||
125 | strnlen(&r->name[0], name_len + 1) == name_len) { | ||
126 | dbg_msg("name %s", &r->name[0]); | ||
127 | } else { | ||
128 | dbg_msg("1st 5 characters of the name: %c%c%c%c%c", | ||
129 | r->name[0], r->name[1], r->name[2], r->name[3], | ||
130 | r->name[4]); | ||
131 | } | ||
132 | dbg_msg("crc %#08x", ubi32_to_cpu(r->crc)); | ||
133 | } | ||
134 | |||
135 | /** | ||
136 | * ubi_dbg_dump_sv - dump a &struct ubi_scan_volume object. | ||
137 | * @sv: the object to dump | ||
138 | */ | ||
139 | void ubi_dbg_dump_sv(const struct ubi_scan_volume *sv) | ||
140 | { | ||
141 | dbg_msg("volume scanning information dump:"); | ||
142 | dbg_msg("vol_id %d", sv->vol_id); | ||
143 | dbg_msg("highest_lnum %d", sv->highest_lnum); | ||
144 | dbg_msg("leb_count %d", sv->leb_count); | ||
145 | dbg_msg("compat %d", sv->compat); | ||
146 | dbg_msg("vol_type %d", sv->vol_type); | ||
147 | dbg_msg("used_ebs %d", sv->used_ebs); | ||
148 | dbg_msg("last_data_size %d", sv->last_data_size); | ||
149 | dbg_msg("data_pad %d", sv->data_pad); | ||
150 | } | ||
151 | |||
152 | /** | ||
153 | * ubi_dbg_dump_seb - dump a &struct ubi_scan_leb object. | ||
154 | * @seb: the object to dump | ||
155 | * @type: object type: 0 - not corrupted, 1 - corrupted | ||
156 | */ | ||
157 | void ubi_dbg_dump_seb(const struct ubi_scan_leb *seb, int type) | ||
158 | { | ||
159 | dbg_msg("eraseblock scanning information dump:"); | ||
160 | dbg_msg("ec %d", seb->ec); | ||
161 | dbg_msg("pnum %d", seb->pnum); | ||
162 | if (type == 0) { | ||
163 | dbg_msg("lnum %d", seb->lnum); | ||
164 | dbg_msg("scrub %d", seb->scrub); | ||
165 | dbg_msg("sqnum %llu", seb->sqnum); | ||
166 | dbg_msg("leb_ver %u", seb->leb_ver); | ||
167 | } | ||
168 | } | ||
169 | |||
170 | /** | ||
171 | * ubi_dbg_dump_mkvol_req - dump a &struct ubi_mkvol_req object. | ||
172 | * @req: the object to dump | ||
173 | */ | ||
174 | void ubi_dbg_dump_mkvol_req(const struct ubi_mkvol_req *req) | ||
175 | { | ||
176 | char nm[17]; | ||
177 | |||
178 | dbg_msg("volume creation request dump:"); | ||
179 | dbg_msg("vol_id %d", req->vol_id); | ||
180 | dbg_msg("alignment %d", req->alignment); | ||
181 | dbg_msg("bytes %lld", (long long)req->bytes); | ||
182 | dbg_msg("vol_type %d", req->vol_type); | ||
183 | dbg_msg("name_len %d", req->name_len); | ||
184 | |||
185 | memcpy(nm, req->name, 16); | ||
186 | nm[16] = 0; | ||
187 | dbg_msg("the 1st 16 characters of the name: %s", nm); | ||
188 | } | ||
189 | |||
190 | #define BYTES_PER_LINE 32 | ||
191 | |||
192 | /** | ||
193 | * ubi_dbg_hexdump - dump a buffer. | ||
194 | * @ptr: the buffer to dump | ||
195 | * @size: buffer size which must be multiple of 4 bytes | ||
196 | */ | ||
197 | void ubi_dbg_hexdump(const void *ptr, int size) | ||
198 | { | ||
199 | int i, k = 0, rows, columns; | ||
200 | const uint8_t *p = ptr; | ||
201 | |||
202 | size = ALIGN(size, 4); | ||
203 | rows = size/BYTES_PER_LINE + size % BYTES_PER_LINE; | ||
204 | for (i = 0; i < rows; i++) { | ||
205 | int j; | ||
206 | |||
207 | cond_resched(); | ||
208 | columns = min(size - k, BYTES_PER_LINE) / 4; | ||
209 | if (columns == 0) | ||
210 | break; | ||
211 | printk(KERN_DEBUG "%5d: ", i * BYTES_PER_LINE); | ||
212 | for (j = 0; j < columns; j++) { | ||
213 | int n, N; | ||
214 | |||
215 | N = size - k > 4 ? 4 : size - k; | ||
216 | for (n = 0; n < N; n++) | ||
217 | printk("%02x", p[k++]); | ||
218 | printk(" "); | ||
219 | } | ||
220 | printk("\n"); | ||
221 | } | ||
222 | } | ||
223 | |||
224 | #endif /* CONFIG_MTD_UBI_DEBUG_MSG */ | ||
diff --git a/drivers/mtd/ubi/debug.h b/drivers/mtd/ubi/debug.h new file mode 100644 index 000000000000..f816ad9a36c0 --- /dev/null +++ b/drivers/mtd/ubi/debug.h | |||
@@ -0,0 +1,161 @@ | |||
1 | /* | ||
2 | * Copyright (c) International Business Machines Corp., 2006 | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License as published by | ||
6 | * the Free Software Foundation; either version 2 of the License, or | ||
7 | * (at your option) any later version. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See | ||
12 | * the GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
17 | * | ||
18 | * Author: Artem Bityutskiy (Битюцкий Артём) | ||
19 | */ | ||
20 | |||
21 | #ifndef __UBI_DEBUG_H__ | ||
22 | #define __UBI_DEBUG_H__ | ||
23 | |||
24 | #ifdef CONFIG_MTD_UBI_DEBUG | ||
25 | #include <linux/random.h> | ||
26 | |||
27 | #define ubi_assert(expr) BUG_ON(!(expr)) | ||
28 | #define dbg_err(fmt, ...) ubi_err(fmt, ##__VA_ARGS__) | ||
29 | #else | ||
30 | #define ubi_assert(expr) ({}) | ||
31 | #define dbg_err(fmt, ...) ({}) | ||
32 | #endif | ||
33 | |||
34 | #ifdef CONFIG_MTD_UBI_DEBUG_DISABLE_BGT | ||
35 | #define DBG_DISABLE_BGT 1 | ||
36 | #else | ||
37 | #define DBG_DISABLE_BGT 0 | ||
38 | #endif | ||
39 | |||
40 | #ifdef CONFIG_MTD_UBI_DEBUG_MSG | ||
41 | /* Generic debugging message */ | ||
42 | #define dbg_msg(fmt, ...) \ | ||
43 | printk(KERN_DEBUG "UBI DBG: %s: " fmt "\n", __FUNCTION__, ##__VA_ARGS__) | ||
44 | |||
45 | #define ubi_dbg_dump_stack() dump_stack() | ||
46 | |||
47 | struct ubi_ec_hdr; | ||
48 | struct ubi_vid_hdr; | ||
49 | struct ubi_volume; | ||
50 | struct ubi_vtbl_record; | ||
51 | struct ubi_scan_volume; | ||
52 | struct ubi_scan_leb; | ||
53 | struct ubi_mkvol_req; | ||
54 | |||
55 | void ubi_dbg_print(int type, const char *func, const char *fmt, ...); | ||
56 | void ubi_dbg_dump_ec_hdr(const struct ubi_ec_hdr *ec_hdr); | ||
57 | void ubi_dbg_dump_vid_hdr(const struct ubi_vid_hdr *vid_hdr); | ||
58 | void ubi_dbg_dump_vol_info(const struct ubi_volume *vol); | ||
59 | void ubi_dbg_dump_vtbl_record(const struct ubi_vtbl_record *r, int idx); | ||
60 | void ubi_dbg_dump_sv(const struct ubi_scan_volume *sv); | ||
61 | void ubi_dbg_dump_seb(const struct ubi_scan_leb *seb, int type); | ||
62 | void ubi_dbg_dump_mkvol_req(const struct ubi_mkvol_req *req); | ||
63 | void ubi_dbg_hexdump(const void *buf, int size); | ||
64 | |||
65 | #else | ||
66 | |||
67 | #define dbg_msg(fmt, ...) ({}) | ||
68 | #define ubi_dbg_dump_stack() ({}) | ||
69 | #define ubi_dbg_print(func, fmt, ...) ({}) | ||
70 | #define ubi_dbg_dump_ec_hdr(ec_hdr) ({}) | ||
71 | #define ubi_dbg_dump_vid_hdr(vid_hdr) ({}) | ||
72 | #define ubi_dbg_dump_vol_info(vol) ({}) | ||
73 | #define ubi_dbg_dump_vtbl_record(r, idx) ({}) | ||
74 | #define ubi_dbg_dump_sv(sv) ({}) | ||
75 | #define ubi_dbg_dump_seb(seb, type) ({}) | ||
76 | #define ubi_dbg_dump_mkvol_req(req) ({}) | ||
77 | #define ubi_dbg_hexdump(buf, size) ({}) | ||
78 | |||
79 | #endif /* CONFIG_MTD_UBI_DEBUG_MSG */ | ||
80 | |||
81 | #ifdef CONFIG_MTD_UBI_DEBUG_MSG_EBA | ||
82 | /* Messages from the eraseblock association unit */ | ||
83 | #define dbg_eba(fmt, ...) \ | ||
84 | printk(KERN_DEBUG "UBI DBG eba: %s: " fmt "\n", __FUNCTION__, \ | ||
85 | ##__VA_ARGS__) | ||
86 | #else | ||
87 | #define dbg_eba(fmt, ...) ({}) | ||
88 | #endif | ||
89 | |||
90 | #ifdef CONFIG_MTD_UBI_DEBUG_MSG_WL | ||
91 | /* Messages from the wear-leveling unit */ | ||
92 | #define dbg_wl(fmt, ...) \ | ||
93 | printk(KERN_DEBUG "UBI DBG wl: %s: " fmt "\n", __FUNCTION__, \ | ||
94 | ##__VA_ARGS__) | ||
95 | #else | ||
96 | #define dbg_wl(fmt, ...) ({}) | ||
97 | #endif | ||
98 | |||
99 | #ifdef CONFIG_MTD_UBI_DEBUG_MSG_IO | ||
100 | /* Messages from the input/output unit */ | ||
101 | #define dbg_io(fmt, ...) \ | ||
102 | printk(KERN_DEBUG "UBI DBG io: %s: " fmt "\n", __FUNCTION__, \ | ||
103 | ##__VA_ARGS__) | ||
104 | #else | ||
105 | #define dbg_io(fmt, ...) ({}) | ||
106 | #endif | ||
107 | |||
108 | #ifdef CONFIG_MTD_UBI_DEBUG_MSG_BLD | ||
109 | /* Initialization and build messages */ | ||
110 | #define dbg_bld(fmt, ...) \ | ||
111 | printk(KERN_DEBUG "UBI DBG bld: %s: " fmt "\n", __FUNCTION__, \ | ||
112 | ##__VA_ARGS__) | ||
113 | #else | ||
114 | #define dbg_bld(fmt, ...) ({}) | ||
115 | #endif | ||
116 | |||
117 | #ifdef CONFIG_MTD_UBI_DEBUG_EMULATE_BITFLIPS | ||
118 | /** | ||
119 | * ubi_dbg_is_bitflip - if it is time to emulate a bit-flip. | ||
120 | * | ||
121 | * Returns non-zero if a bit-flip should be emulated, otherwise returns zero. | ||
122 | */ | ||
123 | static inline int ubi_dbg_is_bitflip(void) | ||
124 | { | ||
125 | return !(random32() % 200); | ||
126 | } | ||
127 | #else | ||
128 | #define ubi_dbg_is_bitflip() 0 | ||
129 | #endif | ||
130 | |||
131 | #ifdef CONFIG_MTD_UBI_DEBUG_EMULATE_WRITE_FAILURES | ||
132 | /** | ||
133 | * ubi_dbg_is_write_failure - if it is time to emulate a write failure. | ||
134 | * | ||
135 | * Returns non-zero if a write failure should be emulated, otherwise returns | ||
136 | * zero. | ||
137 | */ | ||
138 | static inline int ubi_dbg_is_write_failure(void) | ||
139 | { | ||
140 | return !(random32() % 500); | ||
141 | } | ||
142 | #else | ||
143 | #define ubi_dbg_is_write_failure() 0 | ||
144 | #endif | ||
145 | |||
146 | #ifdef CONFIG_MTD_UBI_DEBUG_EMULATE_ERASE_FAILURES | ||
147 | /** | ||
148 | * ubi_dbg_is_erase_failure - if its time to emulate an erase failure. | ||
149 | * | ||
150 | * Returns non-zero if an erase failure should be emulated, otherwise returns | ||
151 | * zero. | ||
152 | */ | ||
153 | static inline int ubi_dbg_is_erase_failure(void) | ||
154 | { | ||
155 | return !(random32() % 400); | ||
156 | } | ||
157 | #else | ||
158 | #define ubi_dbg_is_erase_failure() 0 | ||
159 | #endif | ||
160 | |||
161 | #endif /* !__UBI_DEBUG_H__ */ | ||
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c new file mode 100644 index 000000000000..d847ee1da3d9 --- /dev/null +++ b/drivers/mtd/ubi/eba.c | |||
@@ -0,0 +1,1241 @@ | |||
1 | /* | ||
2 | * Copyright (c) International Business Machines Corp., 2006 | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License as published by | ||
6 | * the Free Software Foundation; either version 2 of the License, or | ||
7 | * (at your option) any later version. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See | ||
12 | * the GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
17 | * | ||
18 | * Author: Artem Bityutskiy (Битюцкий Артём) | ||
19 | */ | ||
20 | |||
21 | /* | ||
22 | * The UBI Eraseblock Association (EBA) unit. | ||
23 | * | ||
24 | * This unit is responsible for I/O to/from logical eraseblock. | ||
25 | * | ||
26 | * Although in this implementation the EBA table is fully kept and managed in | ||
27 | * RAM, which assumes poor scalability, it might be (partially) maintained on | ||
28 | * flash in future implementations. | ||
29 | * | ||
30 | * The EBA unit implements per-logical eraseblock locking. Before accessing a | ||
31 | * logical eraseblock it is locked for reading or writing. The per-logical | ||
32 | * eraseblock locking is implemented by means of the lock tree. The lock tree | ||
33 | * is an RB-tree which refers all the currently locked logical eraseblocks. The | ||
34 | * lock tree elements are &struct ltree_entry objects. They are indexed by | ||
35 | * (@vol_id, @lnum) pairs. | ||
36 | * | ||
37 | * EBA also maintains the global sequence counter which is incremented each | ||
38 | * time a logical eraseblock is mapped to a physical eraseblock and it is | ||
39 | * stored in the volume identifier header. This means that each VID header has | ||
40 | * a unique sequence number. The sequence number is only increased an we assume | ||
41 | * 64 bits is enough to never overflow. | ||
42 | */ | ||
43 | |||
44 | #include <linux/slab.h> | ||
45 | #include <linux/crc32.h> | ||
46 | #include <linux/err.h> | ||
47 | #include "ubi.h" | ||
48 | |||
49 | /** | ||
50 | * struct ltree_entry - an entry in the lock tree. | ||
51 | * @rb: links RB-tree nodes | ||
52 | * @vol_id: volume ID of the locked logical eraseblock | ||
53 | * @lnum: locked logical eraseblock number | ||
54 | * @users: how many tasks are using this logical eraseblock or wait for it | ||
55 | * @mutex: read/write mutex to implement read/write access serialization to | ||
56 | * the (@vol_id, @lnum) logical eraseblock | ||
57 | * | ||
58 | * When a logical eraseblock is being locked - corresponding &struct ltree_entry | ||
59 | * object is inserted to the lock tree (@ubi->ltree). | ||
60 | */ | ||
61 | struct ltree_entry { | ||
62 | struct rb_node rb; | ||
63 | int vol_id; | ||
64 | int lnum; | ||
65 | int users; | ||
66 | struct rw_semaphore mutex; | ||
67 | }; | ||
68 | |||
69 | /* Slab cache for lock-tree entries */ | ||
70 | static struct kmem_cache *ltree_slab; | ||
71 | |||
72 | /** | ||
73 | * next_sqnum - get next sequence number. | ||
74 | * @ubi: UBI device description object | ||
75 | * | ||
76 | * This function returns next sequence number to use, which is just the current | ||
77 | * global sequence counter value. It also increases the global sequence | ||
78 | * counter. | ||
79 | */ | ||
80 | static unsigned long long next_sqnum(struct ubi_device *ubi) | ||
81 | { | ||
82 | unsigned long long sqnum; | ||
83 | |||
84 | spin_lock(&ubi->ltree_lock); | ||
85 | sqnum = ubi->global_sqnum++; | ||
86 | spin_unlock(&ubi->ltree_lock); | ||
87 | |||
88 | return sqnum; | ||
89 | } | ||
90 | |||
91 | /** | ||
92 | * ubi_get_compat - get compatibility flags of a volume. | ||
93 | * @ubi: UBI device description object | ||
94 | * @vol_id: volume ID | ||
95 | * | ||
96 | * This function returns compatibility flags for an internal volume. User | ||
97 | * volumes have no compatibility flags, so %0 is returned. | ||
98 | */ | ||
99 | static int ubi_get_compat(const struct ubi_device *ubi, int vol_id) | ||
100 | { | ||
101 | if (vol_id == UBI_LAYOUT_VOL_ID) | ||
102 | return UBI_LAYOUT_VOLUME_COMPAT; | ||
103 | return 0; | ||
104 | } | ||
105 | |||
106 | /** | ||
107 | * ltree_lookup - look up the lock tree. | ||
108 | * @ubi: UBI device description object | ||
109 | * @vol_id: volume ID | ||
110 | * @lnum: logical eraseblock number | ||
111 | * | ||
112 | * This function returns a pointer to the corresponding &struct ltree_entry | ||
113 | * object if the logical eraseblock is locked and %NULL if it is not. | ||
114 | * @ubi->ltree_lock has to be locked. | ||
115 | */ | ||
116 | static struct ltree_entry *ltree_lookup(struct ubi_device *ubi, int vol_id, | ||
117 | int lnum) | ||
118 | { | ||
119 | struct rb_node *p; | ||
120 | |||
121 | p = ubi->ltree.rb_node; | ||
122 | while (p) { | ||
123 | struct ltree_entry *le; | ||
124 | |||
125 | le = rb_entry(p, struct ltree_entry, rb); | ||
126 | |||
127 | if (vol_id < le->vol_id) | ||
128 | p = p->rb_left; | ||
129 | else if (vol_id > le->vol_id) | ||
130 | p = p->rb_right; | ||
131 | else { | ||
132 | if (lnum < le->lnum) | ||
133 | p = p->rb_left; | ||
134 | else if (lnum > le->lnum) | ||
135 | p = p->rb_right; | ||
136 | else | ||
137 | return le; | ||
138 | } | ||
139 | } | ||
140 | |||
141 | return NULL; | ||
142 | } | ||
143 | |||
144 | /** | ||
145 | * ltree_add_entry - add new entry to the lock tree. | ||
146 | * @ubi: UBI device description object | ||
147 | * @vol_id: volume ID | ||
148 | * @lnum: logical eraseblock number | ||
149 | * | ||
150 | * This function adds new entry for logical eraseblock (@vol_id, @lnum) to the | ||
151 | * lock tree. If such entry is already there, its usage counter is increased. | ||
152 | * Returns pointer to the lock tree entry or %-ENOMEM if memory allocation | ||
153 | * failed. | ||
154 | */ | ||
155 | static struct ltree_entry *ltree_add_entry(struct ubi_device *ubi, int vol_id, | ||
156 | int lnum) | ||
157 | { | ||
158 | struct ltree_entry *le, *le1, *le_free; | ||
159 | |||
160 | le = kmem_cache_alloc(ltree_slab, GFP_KERNEL); | ||
161 | if (!le) | ||
162 | return ERR_PTR(-ENOMEM); | ||
163 | |||
164 | le->vol_id = vol_id; | ||
165 | le->lnum = lnum; | ||
166 | |||
167 | spin_lock(&ubi->ltree_lock); | ||
168 | le1 = ltree_lookup(ubi, vol_id, lnum); | ||
169 | |||
170 | if (le1) { | ||
171 | /* | ||
172 | * This logical eraseblock is already locked. The newly | ||
173 | * allocated lock entry is not needed. | ||
174 | */ | ||
175 | le_free = le; | ||
176 | le = le1; | ||
177 | } else { | ||
178 | struct rb_node **p, *parent = NULL; | ||
179 | |||
180 | /* | ||
181 | * No lock entry, add the newly allocated one to the | ||
182 | * @ubi->ltree RB-tree. | ||
183 | */ | ||
184 | le_free = NULL; | ||
185 | |||
186 | p = &ubi->ltree.rb_node; | ||
187 | while (*p) { | ||
188 | parent = *p; | ||
189 | le1 = rb_entry(parent, struct ltree_entry, rb); | ||
190 | |||
191 | if (vol_id < le1->vol_id) | ||
192 | p = &(*p)->rb_left; | ||
193 | else if (vol_id > le1->vol_id) | ||
194 | p = &(*p)->rb_right; | ||
195 | else { | ||
196 | ubi_assert(lnum != le1->lnum); | ||
197 | if (lnum < le1->lnum) | ||
198 | p = &(*p)->rb_left; | ||
199 | else | ||
200 | p = &(*p)->rb_right; | ||
201 | } | ||
202 | } | ||
203 | |||
204 | rb_link_node(&le->rb, parent, p); | ||
205 | rb_insert_color(&le->rb, &ubi->ltree); | ||
206 | } | ||
207 | le->users += 1; | ||
208 | spin_unlock(&ubi->ltree_lock); | ||
209 | |||
210 | if (le_free) | ||
211 | kmem_cache_free(ltree_slab, le_free); | ||
212 | |||
213 | return le; | ||
214 | } | ||
215 | |||
216 | /** | ||
217 | * leb_read_lock - lock logical eraseblock for reading. | ||
218 | * @ubi: UBI device description object | ||
219 | * @vol_id: volume ID | ||
220 | * @lnum: logical eraseblock number | ||
221 | * | ||
222 | * This function locks a logical eraseblock for reading. Returns zero in case | ||
223 | * of success and a negative error code in case of failure. | ||
224 | */ | ||
225 | static int leb_read_lock(struct ubi_device *ubi, int vol_id, int lnum) | ||
226 | { | ||
227 | struct ltree_entry *le; | ||
228 | |||
229 | le = ltree_add_entry(ubi, vol_id, lnum); | ||
230 | if (IS_ERR(le)) | ||
231 | return PTR_ERR(le); | ||
232 | down_read(&le->mutex); | ||
233 | return 0; | ||
234 | } | ||
235 | |||
236 | /** | ||
237 | * leb_read_unlock - unlock logical eraseblock. | ||
238 | * @ubi: UBI device description object | ||
239 | * @vol_id: volume ID | ||
240 | * @lnum: logical eraseblock number | ||
241 | */ | ||
242 | static void leb_read_unlock(struct ubi_device *ubi, int vol_id, int lnum) | ||
243 | { | ||
244 | int free = 0; | ||
245 | struct ltree_entry *le; | ||
246 | |||
247 | spin_lock(&ubi->ltree_lock); | ||
248 | le = ltree_lookup(ubi, vol_id, lnum); | ||
249 | le->users -= 1; | ||
250 | ubi_assert(le->users >= 0); | ||
251 | if (le->users == 0) { | ||
252 | rb_erase(&le->rb, &ubi->ltree); | ||
253 | free = 1; | ||
254 | } | ||
255 | spin_unlock(&ubi->ltree_lock); | ||
256 | |||
257 | up_read(&le->mutex); | ||
258 | if (free) | ||
259 | kmem_cache_free(ltree_slab, le); | ||
260 | } | ||
261 | |||
262 | /** | ||
263 | * leb_write_lock - lock logical eraseblock for writing. | ||
264 | * @ubi: UBI device description object | ||
265 | * @vol_id: volume ID | ||
266 | * @lnum: logical eraseblock number | ||
267 | * | ||
268 | * This function locks a logical eraseblock for writing. Returns zero in case | ||
269 | * of success and a negative error code in case of failure. | ||
270 | */ | ||
271 | static int leb_write_lock(struct ubi_device *ubi, int vol_id, int lnum) | ||
272 | { | ||
273 | struct ltree_entry *le; | ||
274 | |||
275 | le = ltree_add_entry(ubi, vol_id, lnum); | ||
276 | if (IS_ERR(le)) | ||
277 | return PTR_ERR(le); | ||
278 | down_write(&le->mutex); | ||
279 | return 0; | ||
280 | } | ||
281 | |||
282 | /** | ||
283 | * leb_write_unlock - unlock logical eraseblock. | ||
284 | * @ubi: UBI device description object | ||
285 | * @vol_id: volume ID | ||
286 | * @lnum: logical eraseblock number | ||
287 | */ | ||
288 | static void leb_write_unlock(struct ubi_device *ubi, int vol_id, int lnum) | ||
289 | { | ||
290 | int free; | ||
291 | struct ltree_entry *le; | ||
292 | |||
293 | spin_lock(&ubi->ltree_lock); | ||
294 | le = ltree_lookup(ubi, vol_id, lnum); | ||
295 | le->users -= 1; | ||
296 | ubi_assert(le->users >= 0); | ||
297 | if (le->users == 0) { | ||
298 | rb_erase(&le->rb, &ubi->ltree); | ||
299 | free = 1; | ||
300 | } else | ||
301 | free = 0; | ||
302 | spin_unlock(&ubi->ltree_lock); | ||
303 | |||
304 | up_write(&le->mutex); | ||
305 | if (free) | ||
306 | kmem_cache_free(ltree_slab, le); | ||
307 | } | ||
308 | |||
309 | /** | ||
310 | * ubi_eba_unmap_leb - un-map logical eraseblock. | ||
311 | * @ubi: UBI device description object | ||
312 | * @vol_id: volume ID | ||
313 | * @lnum: logical eraseblock number | ||
314 | * | ||
315 | * This function un-maps logical eraseblock @lnum and schedules corresponding | ||
316 | * physical eraseblock for erasure. Returns zero in case of success and a | ||
317 | * negative error code in case of failure. | ||
318 | */ | ||
319 | int ubi_eba_unmap_leb(struct ubi_device *ubi, int vol_id, int lnum) | ||
320 | { | ||
321 | int idx = vol_id2idx(ubi, vol_id), err, pnum; | ||
322 | struct ubi_volume *vol = ubi->volumes[idx]; | ||
323 | |||
324 | if (ubi->ro_mode) | ||
325 | return -EROFS; | ||
326 | |||
327 | err = leb_write_lock(ubi, vol_id, lnum); | ||
328 | if (err) | ||
329 | return err; | ||
330 | |||
331 | pnum = vol->eba_tbl[lnum]; | ||
332 | if (pnum < 0) | ||
333 | /* This logical eraseblock is already unmapped */ | ||
334 | goto out_unlock; | ||
335 | |||
336 | dbg_eba("erase LEB %d:%d, PEB %d", vol_id, lnum, pnum); | ||
337 | |||
338 | vol->eba_tbl[lnum] = UBI_LEB_UNMAPPED; | ||
339 | err = ubi_wl_put_peb(ubi, pnum, 0); | ||
340 | |||
341 | out_unlock: | ||
342 | leb_write_unlock(ubi, vol_id, lnum); | ||
343 | return err; | ||
344 | } | ||
345 | |||
346 | /** | ||
347 | * ubi_eba_read_leb - read data. | ||
348 | * @ubi: UBI device description object | ||
349 | * @vol_id: volume ID | ||
350 | * @lnum: logical eraseblock number | ||
351 | * @buf: buffer to store the read data | ||
352 | * @offset: offset from where to read | ||
353 | * @len: how many bytes to read | ||
354 | * @check: data CRC check flag | ||
355 | * | ||
356 | * If the logical eraseblock @lnum is unmapped, @buf is filled with 0xFF | ||
357 | * bytes. The @check flag only makes sense for static volumes and forces | ||
358 | * eraseblock data CRC checking. | ||
359 | * | ||
360 | * In case of success this function returns zero. In case of a static volume, | ||
361 | * if data CRC mismatches - %-EBADMSG is returned. %-EBADMSG may also be | ||
362 | * returned for any volume type if an ECC error was detected by the MTD device | ||
363 | * driver. Other negative error cored may be returned in case of other errors. | ||
364 | */ | ||
365 | int ubi_eba_read_leb(struct ubi_device *ubi, int vol_id, int lnum, void *buf, | ||
366 | int offset, int len, int check) | ||
367 | { | ||
368 | int err, pnum, scrub = 0, idx = vol_id2idx(ubi, vol_id); | ||
369 | struct ubi_vid_hdr *vid_hdr; | ||
370 | struct ubi_volume *vol = ubi->volumes[idx]; | ||
371 | uint32_t crc, crc1; | ||
372 | |||
373 | err = leb_read_lock(ubi, vol_id, lnum); | ||
374 | if (err) | ||
375 | return err; | ||
376 | |||
377 | pnum = vol->eba_tbl[lnum]; | ||
378 | if (pnum < 0) { | ||
379 | /* | ||
380 | * The logical eraseblock is not mapped, fill the whole buffer | ||
381 | * with 0xFF bytes. The exception is static volumes for which | ||
382 | * it is an error to read unmapped logical eraseblocks. | ||
383 | */ | ||
384 | dbg_eba("read %d bytes from offset %d of LEB %d:%d (unmapped)", | ||
385 | len, offset, vol_id, lnum); | ||
386 | leb_read_unlock(ubi, vol_id, lnum); | ||
387 | ubi_assert(vol->vol_type != UBI_STATIC_VOLUME); | ||
388 | memset(buf, 0xFF, len); | ||
389 | return 0; | ||
390 | } | ||
391 | |||
392 | dbg_eba("read %d bytes from offset %d of LEB %d:%d, PEB %d", | ||
393 | len, offset, vol_id, lnum, pnum); | ||
394 | |||
395 | if (vol->vol_type == UBI_DYNAMIC_VOLUME) | ||
396 | check = 0; | ||
397 | |||
398 | retry: | ||
399 | if (check) { | ||
400 | vid_hdr = ubi_zalloc_vid_hdr(ubi); | ||
401 | if (!vid_hdr) { | ||
402 | err = -ENOMEM; | ||
403 | goto out_unlock; | ||
404 | } | ||
405 | |||
406 | err = ubi_io_read_vid_hdr(ubi, pnum, vid_hdr, 1); | ||
407 | if (err && err != UBI_IO_BITFLIPS) { | ||
408 | if (err > 0) { | ||
409 | /* | ||
410 | * The header is either absent or corrupted. | ||
411 | * The former case means there is a bug - | ||
412 | * switch to read-only mode just in case. | ||
413 | * The latter case means a real corruption - we | ||
414 | * may try to recover data. FIXME: but this is | ||
415 | * not implemented. | ||
416 | */ | ||
417 | if (err == UBI_IO_BAD_VID_HDR) { | ||
418 | ubi_warn("bad VID header at PEB %d, LEB" | ||
419 | "%d:%d", pnum, vol_id, lnum); | ||
420 | err = -EBADMSG; | ||
421 | } else | ||
422 | ubi_ro_mode(ubi); | ||
423 | } | ||
424 | goto out_free; | ||
425 | } else if (err == UBI_IO_BITFLIPS) | ||
426 | scrub = 1; | ||
427 | |||
428 | ubi_assert(lnum < ubi32_to_cpu(vid_hdr->used_ebs)); | ||
429 | ubi_assert(len == ubi32_to_cpu(vid_hdr->data_size)); | ||
430 | |||
431 | crc = ubi32_to_cpu(vid_hdr->data_crc); | ||
432 | ubi_free_vid_hdr(ubi, vid_hdr); | ||
433 | } | ||
434 | |||
435 | err = ubi_io_read_data(ubi, buf, pnum, offset, len); | ||
436 | if (err) { | ||
437 | if (err == UBI_IO_BITFLIPS) { | ||
438 | scrub = 1; | ||
439 | err = 0; | ||
440 | } else if (err == -EBADMSG) { | ||
441 | if (vol->vol_type == UBI_DYNAMIC_VOLUME) | ||
442 | goto out_unlock; | ||
443 | scrub = 1; | ||
444 | if (!check) { | ||
445 | ubi_msg("force data checking"); | ||
446 | check = 1; | ||
447 | goto retry; | ||
448 | } | ||
449 | } else | ||
450 | goto out_unlock; | ||
451 | } | ||
452 | |||
453 | if (check) { | ||
454 | crc1 = crc32(UBI_CRC32_INIT, buf, len); | ||
455 | if (crc1 != crc) { | ||
456 | ubi_warn("CRC error: calculated %#08x, must be %#08x", | ||
457 | crc1, crc); | ||
458 | err = -EBADMSG; | ||
459 | goto out_unlock; | ||
460 | } | ||
461 | } | ||
462 | |||
463 | if (scrub) | ||
464 | err = ubi_wl_scrub_peb(ubi, pnum); | ||
465 | |||
466 | leb_read_unlock(ubi, vol_id, lnum); | ||
467 | return err; | ||
468 | |||
469 | out_free: | ||
470 | ubi_free_vid_hdr(ubi, vid_hdr); | ||
471 | out_unlock: | ||
472 | leb_read_unlock(ubi, vol_id, lnum); | ||
473 | return err; | ||
474 | } | ||
475 | |||
476 | /** | ||
477 | * recover_peb - recover from write failure. | ||
478 | * @ubi: UBI device description object | ||
479 | * @pnum: the physical eraseblock to recover | ||
480 | * @vol_id: volume ID | ||
481 | * @lnum: logical eraseblock number | ||
482 | * @buf: data which was not written because of the write failure | ||
483 | * @offset: offset of the failed write | ||
484 | * @len: how many bytes should have been written | ||
485 | * | ||
486 | * This function is called in case of a write failure and moves all good data | ||
487 | * from the potentially bad physical eraseblock to a good physical eraseblock. | ||
488 | * This function also writes the data which was not written due to the failure. | ||
489 | * Returns new physical eraseblock number in case of success, and a negative | ||
490 | * error code in case of failure. | ||
491 | */ | ||
492 | static int recover_peb(struct ubi_device *ubi, int pnum, int vol_id, int lnum, | ||
493 | const void *buf, int offset, int len) | ||
494 | { | ||
495 | int err, idx = vol_id2idx(ubi, vol_id), new_pnum, data_size, tries = 0; | ||
496 | struct ubi_volume *vol = ubi->volumes[idx]; | ||
497 | struct ubi_vid_hdr *vid_hdr; | ||
498 | unsigned char *new_buf; | ||
499 | |||
500 | vid_hdr = ubi_zalloc_vid_hdr(ubi); | ||
501 | if (!vid_hdr) { | ||
502 | return -ENOMEM; | ||
503 | } | ||
504 | |||
505 | retry: | ||
506 | new_pnum = ubi_wl_get_peb(ubi, UBI_UNKNOWN); | ||
507 | if (new_pnum < 0) { | ||
508 | ubi_free_vid_hdr(ubi, vid_hdr); | ||
509 | return new_pnum; | ||
510 | } | ||
511 | |||
512 | ubi_msg("recover PEB %d, move data to PEB %d", pnum, new_pnum); | ||
513 | |||
514 | err = ubi_io_read_vid_hdr(ubi, pnum, vid_hdr, 1); | ||
515 | if (err && err != UBI_IO_BITFLIPS) { | ||
516 | if (err > 0) | ||
517 | err = -EIO; | ||
518 | goto out_put; | ||
519 | } | ||
520 | |||
521 | vid_hdr->sqnum = cpu_to_ubi64(next_sqnum(ubi)); | ||
522 | err = ubi_io_write_vid_hdr(ubi, new_pnum, vid_hdr); | ||
523 | if (err) | ||
524 | goto write_error; | ||
525 | |||
526 | data_size = offset + len; | ||
527 | new_buf = kmalloc(data_size, GFP_KERNEL); | ||
528 | if (!new_buf) { | ||
529 | err = -ENOMEM; | ||
530 | goto out_put; | ||
531 | } | ||
532 | memset(new_buf + offset, 0xFF, len); | ||
533 | |||
534 | /* Read everything before the area where the write failure happened */ | ||
535 | if (offset > 0) { | ||
536 | err = ubi_io_read_data(ubi, new_buf, pnum, 0, offset); | ||
537 | if (err && err != UBI_IO_BITFLIPS) { | ||
538 | kfree(new_buf); | ||
539 | goto out_put; | ||
540 | } | ||
541 | } | ||
542 | |||
543 | memcpy(new_buf + offset, buf, len); | ||
544 | |||
545 | err = ubi_io_write_data(ubi, new_buf, new_pnum, 0, data_size); | ||
546 | if (err) { | ||
547 | kfree(new_buf); | ||
548 | goto write_error; | ||
549 | } | ||
550 | |||
551 | kfree(new_buf); | ||
552 | ubi_free_vid_hdr(ubi, vid_hdr); | ||
553 | |||
554 | vol->eba_tbl[lnum] = new_pnum; | ||
555 | ubi_wl_put_peb(ubi, pnum, 1); | ||
556 | |||
557 | ubi_msg("data was successfully recovered"); | ||
558 | return 0; | ||
559 | |||
560 | out_put: | ||
561 | ubi_wl_put_peb(ubi, new_pnum, 1); | ||
562 | ubi_free_vid_hdr(ubi, vid_hdr); | ||
563 | return err; | ||
564 | |||
565 | write_error: | ||
566 | /* | ||
567 | * Bad luck? This physical eraseblock is bad too? Crud. Let's try to | ||
568 | * get another one. | ||
569 | */ | ||
570 | ubi_warn("failed to write to PEB %d", new_pnum); | ||
571 | ubi_wl_put_peb(ubi, new_pnum, 1); | ||
572 | if (++tries > UBI_IO_RETRIES) { | ||
573 | ubi_free_vid_hdr(ubi, vid_hdr); | ||
574 | return err; | ||
575 | } | ||
576 | ubi_msg("try again"); | ||
577 | goto retry; | ||
578 | } | ||
579 | |||
580 | /** | ||
581 | * ubi_eba_write_leb - write data to dynamic volume. | ||
582 | * @ubi: UBI device description object | ||
583 | * @vol_id: volume ID | ||
584 | * @lnum: logical eraseblock number | ||
585 | * @buf: the data to write | ||
586 | * @offset: offset within the logical eraseblock where to write | ||
587 | * @len: how many bytes to write | ||
588 | * @dtype: data type | ||
589 | * | ||
590 | * This function writes data to logical eraseblock @lnum of a dynamic volume | ||
591 | * @vol_id. Returns zero in case of success and a negative error code in case | ||
592 | * of failure. In case of error, it is possible that something was still | ||
593 | * written to the flash media, but may be some garbage. | ||
594 | */ | ||
595 | int ubi_eba_write_leb(struct ubi_device *ubi, int vol_id, int lnum, | ||
596 | const void *buf, int offset, int len, int dtype) | ||
597 | { | ||
598 | int idx = vol_id2idx(ubi, vol_id), err, pnum, tries = 0; | ||
599 | struct ubi_volume *vol = ubi->volumes[idx]; | ||
600 | struct ubi_vid_hdr *vid_hdr; | ||
601 | |||
602 | if (ubi->ro_mode) | ||
603 | return -EROFS; | ||
604 | |||
605 | err = leb_write_lock(ubi, vol_id, lnum); | ||
606 | if (err) | ||
607 | return err; | ||
608 | |||
609 | pnum = vol->eba_tbl[lnum]; | ||
610 | if (pnum >= 0) { | ||
611 | dbg_eba("write %d bytes at offset %d of LEB %d:%d, PEB %d", | ||
612 | len, offset, vol_id, lnum, pnum); | ||
613 | |||
614 | err = ubi_io_write_data(ubi, buf, pnum, offset, len); | ||
615 | if (err) { | ||
616 | ubi_warn("failed to write data to PEB %d", pnum); | ||
617 | if (err == -EIO && ubi->bad_allowed) | ||
618 | err = recover_peb(ubi, pnum, vol_id, lnum, buf, offset, len); | ||
619 | if (err) | ||
620 | ubi_ro_mode(ubi); | ||
621 | } | ||
622 | leb_write_unlock(ubi, vol_id, lnum); | ||
623 | return err; | ||
624 | } | ||
625 | |||
626 | /* | ||
627 | * The logical eraseblock is not mapped. We have to get a free physical | ||
628 | * eraseblock and write the volume identifier header there first. | ||
629 | */ | ||
630 | vid_hdr = ubi_zalloc_vid_hdr(ubi); | ||
631 | if (!vid_hdr) { | ||
632 | leb_write_unlock(ubi, vol_id, lnum); | ||
633 | return -ENOMEM; | ||
634 | } | ||
635 | |||
636 | vid_hdr->vol_type = UBI_VID_DYNAMIC; | ||
637 | vid_hdr->sqnum = cpu_to_ubi64(next_sqnum(ubi)); | ||
638 | vid_hdr->vol_id = cpu_to_ubi32(vol_id); | ||
639 | vid_hdr->lnum = cpu_to_ubi32(lnum); | ||
640 | vid_hdr->compat = ubi_get_compat(ubi, vol_id); | ||
641 | vid_hdr->data_pad = cpu_to_ubi32(vol->data_pad); | ||
642 | |||
643 | retry: | ||
644 | pnum = ubi_wl_get_peb(ubi, dtype); | ||
645 | if (pnum < 0) { | ||
646 | ubi_free_vid_hdr(ubi, vid_hdr); | ||
647 | leb_write_unlock(ubi, vol_id, lnum); | ||
648 | return pnum; | ||
649 | } | ||
650 | |||
651 | dbg_eba("write VID hdr and %d bytes at offset %d of LEB %d:%d, PEB %d", | ||
652 | len, offset, vol_id, lnum, pnum); | ||
653 | |||
654 | err = ubi_io_write_vid_hdr(ubi, pnum, vid_hdr); | ||
655 | if (err) { | ||
656 | ubi_warn("failed to write VID header to LEB %d:%d, PEB %d", | ||
657 | vol_id, lnum, pnum); | ||
658 | goto write_error; | ||
659 | } | ||
660 | |||
661 | err = ubi_io_write_data(ubi, buf, pnum, offset, len); | ||
662 | if (err) { | ||
663 | ubi_warn("failed to write %d bytes at offset %d of LEB %d:%d, " | ||
664 | "PEB %d", len, offset, vol_id, lnum, pnum); | ||
665 | goto write_error; | ||
666 | } | ||
667 | |||
668 | vol->eba_tbl[lnum] = pnum; | ||
669 | |||
670 | leb_write_unlock(ubi, vol_id, lnum); | ||
671 | ubi_free_vid_hdr(ubi, vid_hdr); | ||
672 | return 0; | ||
673 | |||
674 | write_error: | ||
675 | if (err != -EIO || !ubi->bad_allowed) { | ||
676 | ubi_ro_mode(ubi); | ||
677 | leb_write_unlock(ubi, vol_id, lnum); | ||
678 | ubi_free_vid_hdr(ubi, vid_hdr); | ||
679 | return err; | ||
680 | } | ||
681 | |||
682 | /* | ||
683 | * Fortunately, this is the first write operation to this physical | ||
684 | * eraseblock, so just put it and request a new one. We assume that if | ||
685 | * this physical eraseblock went bad, the erase code will handle that. | ||
686 | */ | ||
687 | err = ubi_wl_put_peb(ubi, pnum, 1); | ||
688 | if (err || ++tries > UBI_IO_RETRIES) { | ||
689 | ubi_ro_mode(ubi); | ||
690 | leb_write_unlock(ubi, vol_id, lnum); | ||
691 | ubi_free_vid_hdr(ubi, vid_hdr); | ||
692 | return err; | ||
693 | } | ||
694 | |||
695 | vid_hdr->sqnum = cpu_to_ubi64(next_sqnum(ubi)); | ||
696 | ubi_msg("try another PEB"); | ||
697 | goto retry; | ||
698 | } | ||
699 | |||
700 | /** | ||
701 | * ubi_eba_write_leb_st - write data to static volume. | ||
702 | * @ubi: UBI device description object | ||
703 | * @vol_id: volume ID | ||
704 | * @lnum: logical eraseblock number | ||
705 | * @buf: data to write | ||
706 | * @len: how many bytes to write | ||
707 | * @dtype: data type | ||
708 | * @used_ebs: how many logical eraseblocks will this volume contain | ||
709 | * | ||
710 | * This function writes data to logical eraseblock @lnum of static volume | ||
711 | * @vol_id. The @used_ebs argument should contain total number of logical | ||
712 | * eraseblock in this static volume. | ||
713 | * | ||
714 | * When writing to the last logical eraseblock, the @len argument doesn't have | ||
715 | * to be aligned to the minimal I/O unit size. Instead, it has to be equivalent | ||
716 | * to the real data size, although the @buf buffer has to contain the | ||
717 | * alignment. In all other cases, @len has to be aligned. | ||
718 | * | ||
719 | * It is prohibited to write more then once to logical eraseblocks of static | ||
720 | * volumes. This function returns zero in case of success and a negative error | ||
721 | * code in case of failure. | ||
722 | */ | ||
723 | int ubi_eba_write_leb_st(struct ubi_device *ubi, int vol_id, int lnum, | ||
724 | const void *buf, int len, int dtype, int used_ebs) | ||
725 | { | ||
726 | int err, pnum, tries = 0, data_size = len; | ||
727 | int idx = vol_id2idx(ubi, vol_id); | ||
728 | struct ubi_volume *vol = ubi->volumes[idx]; | ||
729 | struct ubi_vid_hdr *vid_hdr; | ||
730 | uint32_t crc; | ||
731 | |||
732 | if (ubi->ro_mode) | ||
733 | return -EROFS; | ||
734 | |||
735 | if (lnum == used_ebs - 1) | ||
736 | /* If this is the last LEB @len may be unaligned */ | ||
737 | len = ALIGN(data_size, ubi->min_io_size); | ||
738 | else | ||
739 | ubi_assert(len % ubi->min_io_size == 0); | ||
740 | |||
741 | vid_hdr = ubi_zalloc_vid_hdr(ubi); | ||
742 | if (!vid_hdr) | ||
743 | return -ENOMEM; | ||
744 | |||
745 | err = leb_write_lock(ubi, vol_id, lnum); | ||
746 | if (err) { | ||
747 | ubi_free_vid_hdr(ubi, vid_hdr); | ||
748 | return err; | ||
749 | } | ||
750 | |||
751 | vid_hdr->sqnum = cpu_to_ubi64(next_sqnum(ubi)); | ||
752 | vid_hdr->vol_id = cpu_to_ubi32(vol_id); | ||
753 | vid_hdr->lnum = cpu_to_ubi32(lnum); | ||
754 | vid_hdr->compat = ubi_get_compat(ubi, vol_id); | ||
755 | vid_hdr->data_pad = cpu_to_ubi32(vol->data_pad); | ||
756 | |||
757 | crc = crc32(UBI_CRC32_INIT, buf, data_size); | ||
758 | vid_hdr->vol_type = UBI_VID_STATIC; | ||
759 | vid_hdr->data_size = cpu_to_ubi32(data_size); | ||
760 | vid_hdr->used_ebs = cpu_to_ubi32(used_ebs); | ||
761 | vid_hdr->data_crc = cpu_to_ubi32(crc); | ||
762 | |||
763 | retry: | ||
764 | pnum = ubi_wl_get_peb(ubi, dtype); | ||
765 | if (pnum < 0) { | ||
766 | ubi_free_vid_hdr(ubi, vid_hdr); | ||
767 | leb_write_unlock(ubi, vol_id, lnum); | ||
768 | return pnum; | ||
769 | } | ||
770 | |||
771 | dbg_eba("write VID hdr and %d bytes at LEB %d:%d, PEB %d, used_ebs %d", | ||
772 | len, vol_id, lnum, pnum, used_ebs); | ||
773 | |||
774 | err = ubi_io_write_vid_hdr(ubi, pnum, vid_hdr); | ||
775 | if (err) { | ||
776 | ubi_warn("failed to write VID header to LEB %d:%d, PEB %d", | ||
777 | vol_id, lnum, pnum); | ||
778 | goto write_error; | ||
779 | } | ||
780 | |||
781 | err = ubi_io_write_data(ubi, buf, pnum, 0, len); | ||
782 | if (err) { | ||
783 | ubi_warn("failed to write %d bytes of data to PEB %d", | ||
784 | len, pnum); | ||
785 | goto write_error; | ||
786 | } | ||
787 | |||
788 | ubi_assert(vol->eba_tbl[lnum] < 0); | ||
789 | vol->eba_tbl[lnum] = pnum; | ||
790 | |||
791 | leb_write_unlock(ubi, vol_id, lnum); | ||
792 | ubi_free_vid_hdr(ubi, vid_hdr); | ||
793 | return 0; | ||
794 | |||
795 | write_error: | ||
796 | if (err != -EIO || !ubi->bad_allowed) { | ||
797 | /* | ||
798 | * This flash device does not admit of bad eraseblocks or | ||
799 | * something nasty and unexpected happened. Switch to read-only | ||
800 | * mode just in case. | ||
801 | */ | ||
802 | ubi_ro_mode(ubi); | ||
803 | leb_write_unlock(ubi, vol_id, lnum); | ||
804 | ubi_free_vid_hdr(ubi, vid_hdr); | ||
805 | return err; | ||
806 | } | ||
807 | |||
808 | err = ubi_wl_put_peb(ubi, pnum, 1); | ||
809 | if (err || ++tries > UBI_IO_RETRIES) { | ||
810 | ubi_ro_mode(ubi); | ||
811 | leb_write_unlock(ubi, vol_id, lnum); | ||
812 | ubi_free_vid_hdr(ubi, vid_hdr); | ||
813 | return err; | ||
814 | } | ||
815 | |||
816 | vid_hdr->sqnum = cpu_to_ubi64(next_sqnum(ubi)); | ||
817 | ubi_msg("try another PEB"); | ||
818 | goto retry; | ||
819 | } | ||
820 | |||
821 | /* | ||
822 | * ubi_eba_atomic_leb_change - change logical eraseblock atomically. | ||
823 | * @ubi: UBI device description object | ||
824 | * @vol_id: volume ID | ||
825 | * @lnum: logical eraseblock number | ||
826 | * @buf: data to write | ||
827 | * @len: how many bytes to write | ||
828 | * @dtype: data type | ||
829 | * | ||
830 | * This function changes the contents of a logical eraseblock atomically. @buf | ||
831 | * has to contain new logical eraseblock data, and @len - the length of the | ||
832 | * data, which has to be aligned. This function guarantees that in case of an | ||
833 | * unclean reboot the old contents is preserved. Returns zero in case of | ||
834 | * success and a negative error code in case of failure. | ||
835 | */ | ||
836 | int ubi_eba_atomic_leb_change(struct ubi_device *ubi, int vol_id, int lnum, | ||
837 | const void *buf, int len, int dtype) | ||
838 | { | ||
839 | int err, pnum, tries = 0, idx = vol_id2idx(ubi, vol_id); | ||
840 | struct ubi_volume *vol = ubi->volumes[idx]; | ||
841 | struct ubi_vid_hdr *vid_hdr; | ||
842 | uint32_t crc; | ||
843 | |||
844 | if (ubi->ro_mode) | ||
845 | return -EROFS; | ||
846 | |||
847 | vid_hdr = ubi_zalloc_vid_hdr(ubi); | ||
848 | if (!vid_hdr) | ||
849 | return -ENOMEM; | ||
850 | |||
851 | err = leb_write_lock(ubi, vol_id, lnum); | ||
852 | if (err) { | ||
853 | ubi_free_vid_hdr(ubi, vid_hdr); | ||
854 | return err; | ||
855 | } | ||
856 | |||
857 | vid_hdr->sqnum = cpu_to_ubi64(next_sqnum(ubi)); | ||
858 | vid_hdr->vol_id = cpu_to_ubi32(vol_id); | ||
859 | vid_hdr->lnum = cpu_to_ubi32(lnum); | ||
860 | vid_hdr->compat = ubi_get_compat(ubi, vol_id); | ||
861 | vid_hdr->data_pad = cpu_to_ubi32(vol->data_pad); | ||
862 | |||
863 | crc = crc32(UBI_CRC32_INIT, buf, len); | ||
864 | vid_hdr->vol_type = UBI_VID_STATIC; | ||
865 | vid_hdr->data_size = cpu_to_ubi32(len); | ||
866 | vid_hdr->copy_flag = 1; | ||
867 | vid_hdr->data_crc = cpu_to_ubi32(crc); | ||
868 | |||
869 | retry: | ||
870 | pnum = ubi_wl_get_peb(ubi, dtype); | ||
871 | if (pnum < 0) { | ||
872 | ubi_free_vid_hdr(ubi, vid_hdr); | ||
873 | leb_write_unlock(ubi, vol_id, lnum); | ||
874 | return pnum; | ||
875 | } | ||
876 | |||
877 | dbg_eba("change LEB %d:%d, PEB %d, write VID hdr to PEB %d", | ||
878 | vol_id, lnum, vol->eba_tbl[lnum], pnum); | ||
879 | |||
880 | err = ubi_io_write_vid_hdr(ubi, pnum, vid_hdr); | ||
881 | if (err) { | ||
882 | ubi_warn("failed to write VID header to LEB %d:%d, PEB %d", | ||
883 | vol_id, lnum, pnum); | ||
884 | goto write_error; | ||
885 | } | ||
886 | |||
887 | err = ubi_io_write_data(ubi, buf, pnum, 0, len); | ||
888 | if (err) { | ||
889 | ubi_warn("failed to write %d bytes of data to PEB %d", | ||
890 | len, pnum); | ||
891 | goto write_error; | ||
892 | } | ||
893 | |||
894 | err = ubi_wl_put_peb(ubi, vol->eba_tbl[lnum], 1); | ||
895 | if (err) { | ||
896 | ubi_free_vid_hdr(ubi, vid_hdr); | ||
897 | leb_write_unlock(ubi, vol_id, lnum); | ||
898 | return err; | ||
899 | } | ||
900 | |||
901 | vol->eba_tbl[lnum] = pnum; | ||
902 | leb_write_unlock(ubi, vol_id, lnum); | ||
903 | ubi_free_vid_hdr(ubi, vid_hdr); | ||
904 | return 0; | ||
905 | |||
906 | write_error: | ||
907 | if (err != -EIO || !ubi->bad_allowed) { | ||
908 | /* | ||
909 | * This flash device does not admit of bad eraseblocks or | ||
910 | * something nasty and unexpected happened. Switch to read-only | ||
911 | * mode just in case. | ||
912 | */ | ||
913 | ubi_ro_mode(ubi); | ||
914 | leb_write_unlock(ubi, vol_id, lnum); | ||
915 | ubi_free_vid_hdr(ubi, vid_hdr); | ||
916 | return err; | ||
917 | } | ||
918 | |||
919 | err = ubi_wl_put_peb(ubi, pnum, 1); | ||
920 | if (err || ++tries > UBI_IO_RETRIES) { | ||
921 | ubi_ro_mode(ubi); | ||
922 | leb_write_unlock(ubi, vol_id, lnum); | ||
923 | ubi_free_vid_hdr(ubi, vid_hdr); | ||
924 | return err; | ||
925 | } | ||
926 | |||
927 | vid_hdr->sqnum = cpu_to_ubi64(next_sqnum(ubi)); | ||
928 | ubi_msg("try another PEB"); | ||
929 | goto retry; | ||
930 | } | ||
931 | |||
932 | /** | ||
933 | * ltree_entry_ctor - lock tree entries slab cache constructor. | ||
934 | * @obj: the lock-tree entry to construct | ||
935 | * @cache: the lock tree entry slab cache | ||
936 | * @flags: constructor flags | ||
937 | */ | ||
938 | static void ltree_entry_ctor(void *obj, struct kmem_cache *cache, | ||
939 | unsigned long flags) | ||
940 | { | ||
941 | struct ltree_entry *le = obj; | ||
942 | |||
943 | if ((flags & (SLAB_CTOR_VERIFY | SLAB_CTOR_CONSTRUCTOR)) != | ||
944 | SLAB_CTOR_CONSTRUCTOR) | ||
945 | return; | ||
946 | |||
947 | le->users = 0; | ||
948 | init_rwsem(&le->mutex); | ||
949 | } | ||
950 | |||
951 | /** | ||
952 | * ubi_eba_copy_leb - copy logical eraseblock. | ||
953 | * @ubi: UBI device description object | ||
954 | * @from: physical eraseblock number from where to copy | ||
955 | * @to: physical eraseblock number where to copy | ||
956 | * @vid_hdr: VID header of the @from physical eraseblock | ||
957 | * | ||
958 | * This function copies logical eraseblock from physical eraseblock @from to | ||
959 | * physical eraseblock @to. The @vid_hdr buffer may be changed by this | ||
960 | * function. Returns zero in case of success, %UBI_IO_BITFLIPS if the operation | ||
961 | * was canceled because bit-flips were detected at the target PEB, and a | ||
962 | * negative error code in case of failure. | ||
963 | */ | ||
964 | int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, | ||
965 | struct ubi_vid_hdr *vid_hdr) | ||
966 | { | ||
967 | int err, vol_id, lnum, data_size, aldata_size, pnum, idx; | ||
968 | struct ubi_volume *vol; | ||
969 | uint32_t crc; | ||
970 | void *buf, *buf1 = NULL; | ||
971 | |||
972 | vol_id = ubi32_to_cpu(vid_hdr->vol_id); | ||
973 | lnum = ubi32_to_cpu(vid_hdr->lnum); | ||
974 | |||
975 | dbg_eba("copy LEB %d:%d, PEB %d to PEB %d", vol_id, lnum, from, to); | ||
976 | |||
977 | if (vid_hdr->vol_type == UBI_VID_STATIC) { | ||
978 | data_size = ubi32_to_cpu(vid_hdr->data_size); | ||
979 | aldata_size = ALIGN(data_size, ubi->min_io_size); | ||
980 | } else | ||
981 | data_size = aldata_size = | ||
982 | ubi->leb_size - ubi32_to_cpu(vid_hdr->data_pad); | ||
983 | |||
984 | buf = kmalloc(aldata_size, GFP_KERNEL); | ||
985 | if (!buf) | ||
986 | return -ENOMEM; | ||
987 | |||
988 | /* | ||
989 | * We do not want anybody to write to this logical eraseblock while we | ||
990 | * are moving it, so we lock it. | ||
991 | */ | ||
992 | err = leb_write_lock(ubi, vol_id, lnum); | ||
993 | if (err) { | ||
994 | kfree(buf); | ||
995 | return err; | ||
996 | } | ||
997 | |||
998 | /* | ||
999 | * But the logical eraseblock might have been put by this time. | ||
1000 | * Cancel if it is true. | ||
1001 | */ | ||
1002 | idx = vol_id2idx(ubi, vol_id); | ||
1003 | |||
1004 | /* | ||
1005 | * We may race with volume deletion/re-size, so we have to hold | ||
1006 | * @ubi->volumes_lock. | ||
1007 | */ | ||
1008 | spin_lock(&ubi->volumes_lock); | ||
1009 | vol = ubi->volumes[idx]; | ||
1010 | if (!vol) { | ||
1011 | dbg_eba("volume %d was removed meanwhile", vol_id); | ||
1012 | spin_unlock(&ubi->volumes_lock); | ||
1013 | goto out_unlock; | ||
1014 | } | ||
1015 | |||
1016 | pnum = vol->eba_tbl[lnum]; | ||
1017 | if (pnum != from) { | ||
1018 | dbg_eba("LEB %d:%d is no longer mapped to PEB %d, mapped to " | ||
1019 | "PEB %d, cancel", vol_id, lnum, from, pnum); | ||
1020 | spin_unlock(&ubi->volumes_lock); | ||
1021 | goto out_unlock; | ||
1022 | } | ||
1023 | spin_unlock(&ubi->volumes_lock); | ||
1024 | |||
1025 | /* OK, now the LEB is locked and we can safely start moving it */ | ||
1026 | |||
1027 | dbg_eba("read %d bytes of data", aldata_size); | ||
1028 | err = ubi_io_read_data(ubi, buf, from, 0, aldata_size); | ||
1029 | if (err && err != UBI_IO_BITFLIPS) { | ||
1030 | ubi_warn("error %d while reading data from PEB %d", | ||
1031 | err, from); | ||
1032 | goto out_unlock; | ||
1033 | } | ||
1034 | |||
1035 | /* | ||
1036 | * Now we have got to calculate how much data we have to to copy. In | ||
1037 | * case of a static volume it is fairly easy - the VID header contains | ||
1038 | * the data size. In case of a dynamic volume it is more difficult - we | ||
1039 | * have to read the contents, cut 0xFF bytes from the end and copy only | ||
1040 | * the first part. We must do this to avoid writing 0xFF bytes as it | ||
1041 | * may have some side-effects. And not only this. It is important not | ||
1042 | * to include those 0xFFs to CRC because later the they may be filled | ||
1043 | * by data. | ||
1044 | */ | ||
1045 | if (vid_hdr->vol_type == UBI_VID_DYNAMIC) | ||
1046 | aldata_size = data_size = | ||
1047 | ubi_calc_data_len(ubi, buf, data_size); | ||
1048 | |||
1049 | cond_resched(); | ||
1050 | crc = crc32(UBI_CRC32_INIT, buf, data_size); | ||
1051 | cond_resched(); | ||
1052 | |||
1053 | /* | ||
1054 | * It may turn out to me that the whole @from physical eraseblock | ||
1055 | * contains only 0xFF bytes. Then we have to only write the VID header | ||
1056 | * and do not write any data. This also means we should not set | ||
1057 | * @vid_hdr->copy_flag, @vid_hdr->data_size, and @vid_hdr->data_crc. | ||
1058 | */ | ||
1059 | if (data_size > 0) { | ||
1060 | vid_hdr->copy_flag = 1; | ||
1061 | vid_hdr->data_size = cpu_to_ubi32(data_size); | ||
1062 | vid_hdr->data_crc = cpu_to_ubi32(crc); | ||
1063 | } | ||
1064 | vid_hdr->sqnum = cpu_to_ubi64(next_sqnum(ubi)); | ||
1065 | |||
1066 | err = ubi_io_write_vid_hdr(ubi, to, vid_hdr); | ||
1067 | if (err) | ||
1068 | goto out_unlock; | ||
1069 | |||
1070 | cond_resched(); | ||
1071 | |||
1072 | /* Read the VID header back and check if it was written correctly */ | ||
1073 | err = ubi_io_read_vid_hdr(ubi, to, vid_hdr, 1); | ||
1074 | if (err) { | ||
1075 | if (err != UBI_IO_BITFLIPS) | ||
1076 | ubi_warn("cannot read VID header back from PEB %d", to); | ||
1077 | goto out_unlock; | ||
1078 | } | ||
1079 | |||
1080 | if (data_size > 0) { | ||
1081 | err = ubi_io_write_data(ubi, buf, to, 0, aldata_size); | ||
1082 | if (err) | ||
1083 | goto out_unlock; | ||
1084 | |||
1085 | /* | ||
1086 | * We've written the data and are going to read it back to make | ||
1087 | * sure it was written correctly. | ||
1088 | */ | ||
1089 | buf1 = kmalloc(aldata_size, GFP_KERNEL); | ||
1090 | if (!buf1) { | ||
1091 | err = -ENOMEM; | ||
1092 | goto out_unlock; | ||
1093 | } | ||
1094 | |||
1095 | cond_resched(); | ||
1096 | |||
1097 | err = ubi_io_read_data(ubi, buf1, to, 0, aldata_size); | ||
1098 | if (err) { | ||
1099 | if (err != UBI_IO_BITFLIPS) | ||
1100 | ubi_warn("cannot read data back from PEB %d", | ||
1101 | to); | ||
1102 | goto out_unlock; | ||
1103 | } | ||
1104 | |||
1105 | cond_resched(); | ||
1106 | |||
1107 | if (memcmp(buf, buf1, aldata_size)) { | ||
1108 | ubi_warn("read data back from PEB %d - it is different", | ||
1109 | to); | ||
1110 | goto out_unlock; | ||
1111 | } | ||
1112 | } | ||
1113 | |||
1114 | ubi_assert(vol->eba_tbl[lnum] == from); | ||
1115 | vol->eba_tbl[lnum] = to; | ||
1116 | |||
1117 | leb_write_unlock(ubi, vol_id, lnum); | ||
1118 | kfree(buf); | ||
1119 | kfree(buf1); | ||
1120 | |||
1121 | return 0; | ||
1122 | |||
1123 | out_unlock: | ||
1124 | leb_write_unlock(ubi, vol_id, lnum); | ||
1125 | kfree(buf); | ||
1126 | kfree(buf1); | ||
1127 | return err; | ||
1128 | } | ||
1129 | |||
1130 | /** | ||
1131 | * ubi_eba_init_scan - initialize the EBA unit using scanning information. | ||
1132 | * @ubi: UBI device description object | ||
1133 | * @si: scanning information | ||
1134 | * | ||
1135 | * This function returns zero in case of success and a negative error code in | ||
1136 | * case of failure. | ||
1137 | */ | ||
1138 | int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si) | ||
1139 | { | ||
1140 | int i, j, err, num_volumes; | ||
1141 | struct ubi_scan_volume *sv; | ||
1142 | struct ubi_volume *vol; | ||
1143 | struct ubi_scan_leb *seb; | ||
1144 | struct rb_node *rb; | ||
1145 | |||
1146 | dbg_eba("initialize EBA unit"); | ||
1147 | |||
1148 | spin_lock_init(&ubi->ltree_lock); | ||
1149 | ubi->ltree = RB_ROOT; | ||
1150 | |||
1151 | if (ubi_devices_cnt == 0) { | ||
1152 | ltree_slab = kmem_cache_create("ubi_ltree_slab", | ||
1153 | sizeof(struct ltree_entry), 0, | ||
1154 | 0, <ree_entry_ctor, NULL); | ||
1155 | if (!ltree_slab) | ||
1156 | return -ENOMEM; | ||
1157 | } | ||
1158 | |||
1159 | ubi->global_sqnum = si->max_sqnum + 1; | ||
1160 | num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT; | ||
1161 | |||
1162 | for (i = 0; i < num_volumes; i++) { | ||
1163 | vol = ubi->volumes[i]; | ||
1164 | if (!vol) | ||
1165 | continue; | ||
1166 | |||
1167 | cond_resched(); | ||
1168 | |||
1169 | vol->eba_tbl = kmalloc(vol->reserved_pebs * sizeof(int), | ||
1170 | GFP_KERNEL); | ||
1171 | if (!vol->eba_tbl) { | ||
1172 | err = -ENOMEM; | ||
1173 | goto out_free; | ||
1174 | } | ||
1175 | |||
1176 | for (j = 0; j < vol->reserved_pebs; j++) | ||
1177 | vol->eba_tbl[j] = UBI_LEB_UNMAPPED; | ||
1178 | |||
1179 | sv = ubi_scan_find_sv(si, idx2vol_id(ubi, i)); | ||
1180 | if (!sv) | ||
1181 | continue; | ||
1182 | |||
1183 | ubi_rb_for_each_entry(rb, seb, &sv->root, u.rb) { | ||
1184 | if (seb->lnum >= vol->reserved_pebs) | ||
1185 | /* | ||
1186 | * This may happen in case of an unclean reboot | ||
1187 | * during re-size. | ||
1188 | */ | ||
1189 | ubi_scan_move_to_list(sv, seb, &si->erase); | ||
1190 | vol->eba_tbl[seb->lnum] = seb->pnum; | ||
1191 | } | ||
1192 | } | ||
1193 | |||
1194 | if (ubi->bad_allowed) { | ||
1195 | ubi_calculate_reserved(ubi); | ||
1196 | |||
1197 | if (ubi->avail_pebs < ubi->beb_rsvd_level) { | ||
1198 | /* No enough free physical eraseblocks */ | ||
1199 | ubi->beb_rsvd_pebs = ubi->avail_pebs; | ||
1200 | ubi_warn("cannot reserve enough PEBs for bad PEB " | ||
1201 | "handling, reserved %d, need %d", | ||
1202 | ubi->beb_rsvd_pebs, ubi->beb_rsvd_level); | ||
1203 | } else | ||
1204 | ubi->beb_rsvd_pebs = ubi->beb_rsvd_level; | ||
1205 | |||
1206 | ubi->avail_pebs -= ubi->beb_rsvd_pebs; | ||
1207 | ubi->rsvd_pebs += ubi->beb_rsvd_pebs; | ||
1208 | } | ||
1209 | |||
1210 | dbg_eba("EBA unit is initialized"); | ||
1211 | return 0; | ||
1212 | |||
1213 | out_free: | ||
1214 | for (i = 0; i < num_volumes; i++) { | ||
1215 | if (!ubi->volumes[i]) | ||
1216 | continue; | ||
1217 | kfree(ubi->volumes[i]->eba_tbl); | ||
1218 | } | ||
1219 | if (ubi_devices_cnt == 0) | ||
1220 | kmem_cache_destroy(ltree_slab); | ||
1221 | return err; | ||
1222 | } | ||
1223 | |||
1224 | /** | ||
1225 | * ubi_eba_close - close EBA unit. | ||
1226 | * @ubi: UBI device description object | ||
1227 | */ | ||
1228 | void ubi_eba_close(const struct ubi_device *ubi) | ||
1229 | { | ||
1230 | int i, num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT; | ||
1231 | |||
1232 | dbg_eba("close EBA unit"); | ||
1233 | |||
1234 | for (i = 0; i < num_volumes; i++) { | ||
1235 | if (!ubi->volumes[i]) | ||
1236 | continue; | ||
1237 | kfree(ubi->volumes[i]->eba_tbl); | ||
1238 | } | ||
1239 | if (ubi_devices_cnt == 1) | ||
1240 | kmem_cache_destroy(ltree_slab); | ||
1241 | } | ||
diff --git a/drivers/mtd/ubi/gluebi.c b/drivers/mtd/ubi/gluebi.c new file mode 100644 index 000000000000..fc9478d605ff --- /dev/null +++ b/drivers/mtd/ubi/gluebi.c | |||
@@ -0,0 +1,323 @@ | |||
1 | /* | ||
2 | * Copyright (c) International Business Machines Corp., 2006 | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License as published by | ||
6 | * the Free Software Foundation; either version 2 of the License, or | ||
7 | * (at your option) any later version. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See | ||
12 | * the GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
17 | * | ||
18 | * Author: Artem Bityutskiy (Битюцкий Артём), Joern Engel | ||
19 | */ | ||
20 | |||
21 | /* | ||
22 | * This file includes implementation of fake MTD devices for each UBI volume. | ||
23 | * This sounds strange, but it is in fact quite useful to make MTD-oriented | ||
24 | * software (including all the legacy software) to work on top of UBI. | ||
25 | * | ||
26 | * Gluebi emulates MTD devices of "MTD_UBIVOLUME" type. Their minimal I/O unit | ||
27 | * size (mtd->writesize) is equivalent to the UBI minimal I/O unit. The | ||
28 | * eraseblock size is equivalent to the logical eraseblock size of the volume. | ||
29 | */ | ||
30 | |||
31 | #include <asm/div64.h> | ||
32 | #include "ubi.h" | ||
33 | |||
34 | /** | ||
35 | * gluebi_get_device - get MTD device reference. | ||
36 | * @mtd: the MTD device description object | ||
37 | * | ||
38 | * This function is called every time the MTD device is being opened and | ||
39 | * implements the MTD get_device() operation. Returns zero in case of success | ||
40 | * and a negative error code in case of failure. | ||
41 | */ | ||
42 | static int gluebi_get_device(struct mtd_info *mtd) | ||
43 | { | ||
44 | struct ubi_volume *vol; | ||
45 | |||
46 | vol = container_of(mtd, struct ubi_volume, gluebi_mtd); | ||
47 | |||
48 | /* | ||
49 | * We do not introduce locks for gluebi reference count because the | ||
50 | * get_device()/put_device() calls are already serialized at MTD. | ||
51 | */ | ||
52 | if (vol->gluebi_refcount > 0) { | ||
53 | /* | ||
54 | * The MTD device is already referenced and this is just one | ||
55 | * more reference. MTD allows many users to open the same | ||
56 | * volume simultaneously and do not distinguish between | ||
57 | * readers/writers/exclusive openers as UBI does. So we do not | ||
58 | * open the UBI volume again - just increase the reference | ||
59 | * counter and return. | ||
60 | */ | ||
61 | vol->gluebi_refcount += 1; | ||
62 | return 0; | ||
63 | } | ||
64 | |||
65 | /* | ||
66 | * This is the first reference to this UBI volume via the MTD device | ||
67 | * interface. Open the corresponding volume in read-write mode. | ||
68 | */ | ||
69 | vol->gluebi_desc = ubi_open_volume(vol->ubi->ubi_num, vol->vol_id, | ||
70 | UBI_READWRITE); | ||
71 | if (IS_ERR(vol->gluebi_desc)) | ||
72 | return PTR_ERR(vol->gluebi_desc); | ||
73 | vol->gluebi_refcount += 1; | ||
74 | return 0; | ||
75 | } | ||
76 | |||
77 | /** | ||
78 | * gluebi_put_device - put MTD device reference. | ||
79 | * @mtd: the MTD device description object | ||
80 | * | ||
81 | * This function is called every time the MTD device is being put. Returns | ||
82 | * zero in case of success and a negative error code in case of failure. | ||
83 | */ | ||
84 | static void gluebi_put_device(struct mtd_info *mtd) | ||
85 | { | ||
86 | struct ubi_volume *vol; | ||
87 | |||
88 | vol = container_of(mtd, struct ubi_volume, gluebi_mtd); | ||
89 | vol->gluebi_refcount -= 1; | ||
90 | ubi_assert(vol->gluebi_refcount >= 0); | ||
91 | if (vol->gluebi_refcount == 0) | ||
92 | ubi_close_volume(vol->gluebi_desc); | ||
93 | } | ||
94 | |||
95 | /** | ||
96 | * gluebi_read - read operation of emulated MTD devices. | ||
97 | * @mtd: MTD device description object | ||
98 | * @from: absolute offset from where to read | ||
99 | * @len: how many bytes to read | ||
100 | * @retlen: count of read bytes is returned here | ||
101 | * @buf: buffer to store the read data | ||
102 | * | ||
103 | * This function returns zero in case of success and a negative error code in | ||
104 | * case of failure. | ||
105 | */ | ||
106 | static int gluebi_read(struct mtd_info *mtd, loff_t from, size_t len, | ||
107 | size_t *retlen, unsigned char *buf) | ||
108 | { | ||
109 | int err = 0, lnum, offs, total_read; | ||
110 | struct ubi_volume *vol; | ||
111 | struct ubi_device *ubi; | ||
112 | uint64_t tmp = from; | ||
113 | |||
114 | dbg_msg("read %zd bytes from offset %lld", len, from); | ||
115 | |||
116 | if (len < 0 || from < 0 || from + len > mtd->size) | ||
117 | return -EINVAL; | ||
118 | |||
119 | vol = container_of(mtd, struct ubi_volume, gluebi_mtd); | ||
120 | ubi = vol->ubi; | ||
121 | |||
122 | offs = do_div(tmp, mtd->erasesize); | ||
123 | lnum = tmp; | ||
124 | |||
125 | total_read = len; | ||
126 | while (total_read) { | ||
127 | size_t to_read = mtd->erasesize - offs; | ||
128 | |||
129 | if (to_read > total_read) | ||
130 | to_read = total_read; | ||
131 | |||
132 | err = ubi_eba_read_leb(ubi, vol->vol_id, lnum, buf, offs, | ||
133 | to_read, 0); | ||
134 | if (err) | ||
135 | break; | ||
136 | |||
137 | lnum += 1; | ||
138 | offs = 0; | ||
139 | total_read -= to_read; | ||
140 | buf += to_read; | ||
141 | } | ||
142 | |||
143 | *retlen = len - total_read; | ||
144 | return err; | ||
145 | } | ||
146 | |||
147 | /** | ||
148 | * gluebi_write - write operation of emulated MTD devices. | ||
149 | * @mtd: MTD device description object | ||
150 | * @to: absolute offset where to write | ||
151 | * @len: how many bytes to write | ||
152 | * @retlen: count of written bytes is returned here | ||
153 | * @buf: buffer with data to write | ||
154 | * | ||
155 | * This function returns zero in case of success and a negative error code in | ||
156 | * case of failure. | ||
157 | */ | ||
158 | static int gluebi_write(struct mtd_info *mtd, loff_t to, size_t len, | ||
159 | size_t *retlen, const u_char *buf) | ||
160 | { | ||
161 | int err = 0, lnum, offs, total_written; | ||
162 | struct ubi_volume *vol; | ||
163 | struct ubi_device *ubi; | ||
164 | uint64_t tmp = to; | ||
165 | |||
166 | dbg_msg("write %zd bytes to offset %lld", len, to); | ||
167 | |||
168 | if (len < 0 || to < 0 || len + to > mtd->size) | ||
169 | return -EINVAL; | ||
170 | |||
171 | vol = container_of(mtd, struct ubi_volume, gluebi_mtd); | ||
172 | ubi = vol->ubi; | ||
173 | |||
174 | if (ubi->ro_mode) | ||
175 | return -EROFS; | ||
176 | |||
177 | offs = do_div(tmp, mtd->erasesize); | ||
178 | lnum = tmp; | ||
179 | |||
180 | if (len % mtd->writesize || offs % mtd->writesize) | ||
181 | return -EINVAL; | ||
182 | |||
183 | total_written = len; | ||
184 | while (total_written) { | ||
185 | size_t to_write = mtd->erasesize - offs; | ||
186 | |||
187 | if (to_write > total_written) | ||
188 | to_write = total_written; | ||
189 | |||
190 | err = ubi_eba_write_leb(ubi, vol->vol_id, lnum, buf, offs, | ||
191 | to_write, UBI_UNKNOWN); | ||
192 | if (err) | ||
193 | break; | ||
194 | |||
195 | lnum += 1; | ||
196 | offs = 0; | ||
197 | total_written -= to_write; | ||
198 | buf += to_write; | ||
199 | } | ||
200 | |||
201 | *retlen = len - total_written; | ||
202 | return err; | ||
203 | } | ||
204 | |||
205 | /** | ||
206 | * gluebi_erase - erase operation of emulated MTD devices. | ||
207 | * @mtd: the MTD device description object | ||
208 | * @instr: the erase operation description | ||
209 | * | ||
210 | * This function calls the erase callback when finishes. Returns zero in case | ||
211 | * of success and a negative error code in case of failure. | ||
212 | */ | ||
213 | static int gluebi_erase(struct mtd_info *mtd, struct erase_info *instr) | ||
214 | { | ||
215 | int err, i, lnum, count; | ||
216 | struct ubi_volume *vol; | ||
217 | struct ubi_device *ubi; | ||
218 | |||
219 | dbg_msg("erase %u bytes at offset %u", instr->len, instr->addr); | ||
220 | |||
221 | if (instr->addr < 0 || instr->addr > mtd->size - mtd->erasesize) | ||
222 | return -EINVAL; | ||
223 | |||
224 | if (instr->len < 0 || instr->addr + instr->len > mtd->size) | ||
225 | return -EINVAL; | ||
226 | |||
227 | if (instr->addr % mtd->writesize || instr->len % mtd->writesize) | ||
228 | return -EINVAL; | ||
229 | |||
230 | lnum = instr->addr / mtd->erasesize; | ||
231 | count = instr->len / mtd->erasesize; | ||
232 | |||
233 | vol = container_of(mtd, struct ubi_volume, gluebi_mtd); | ||
234 | ubi = vol->ubi; | ||
235 | |||
236 | if (ubi->ro_mode) | ||
237 | return -EROFS; | ||
238 | |||
239 | for (i = 0; i < count; i++) { | ||
240 | err = ubi_eba_unmap_leb(ubi, vol->vol_id, lnum + i); | ||
241 | if (err) | ||
242 | goto out_err; | ||
243 | } | ||
244 | |||
245 | /* | ||
246 | * MTD erase operations are synchronous, so we have to make sure the | ||
247 | * physical eraseblock is wiped out. | ||
248 | */ | ||
249 | err = ubi_wl_flush(ubi); | ||
250 | if (err) | ||
251 | goto out_err; | ||
252 | |||
253 | instr->state = MTD_ERASE_DONE; | ||
254 | mtd_erase_callback(instr); | ||
255 | return 0; | ||
256 | |||
257 | out_err: | ||
258 | instr->state = MTD_ERASE_FAILED; | ||
259 | instr->fail_addr = lnum * mtd->erasesize; | ||
260 | return err; | ||
261 | } | ||
262 | |||
263 | /** | ||
264 | * ubi_create_gluebi - initialize gluebi for an UBI volume. | ||
265 | * @ubi: UBI device description object | ||
266 | * @vol: volume description object | ||
267 | * | ||
268 | * This function is called when an UBI volume is created in order to create | ||
269 | * corresponding fake MTD device. Returns zero in case of success and a | ||
270 | * negative error code in case of failure. | ||
271 | */ | ||
272 | int ubi_create_gluebi(struct ubi_device *ubi, struct ubi_volume *vol) | ||
273 | { | ||
274 | struct mtd_info *mtd = &vol->gluebi_mtd; | ||
275 | |||
276 | mtd->name = kmemdup(vol->name, vol->name_len + 1, GFP_KERNEL); | ||
277 | if (!mtd->name) | ||
278 | return -ENOMEM; | ||
279 | |||
280 | mtd->type = MTD_UBIVOLUME; | ||
281 | if (!ubi->ro_mode) | ||
282 | mtd->flags = MTD_WRITEABLE; | ||
283 | mtd->writesize = ubi->min_io_size; | ||
284 | mtd->owner = THIS_MODULE; | ||
285 | mtd->size = vol->usable_leb_size * vol->reserved_pebs; | ||
286 | mtd->erasesize = vol->usable_leb_size; | ||
287 | mtd->read = gluebi_read; | ||
288 | mtd->write = gluebi_write; | ||
289 | mtd->erase = gluebi_erase; | ||
290 | mtd->get_device = gluebi_get_device; | ||
291 | mtd->put_device = gluebi_put_device; | ||
292 | |||
293 | if (add_mtd_device(mtd)) { | ||
294 | ubi_err("cannot not add MTD device\n"); | ||
295 | kfree(mtd->name); | ||
296 | return -ENFILE; | ||
297 | } | ||
298 | |||
299 | dbg_msg("added mtd%d (\"%s\"), size %u, EB size %u", | ||
300 | mtd->index, mtd->name, mtd->size, mtd->erasesize); | ||
301 | return 0; | ||
302 | } | ||
303 | |||
304 | /** | ||
305 | * ubi_destroy_gluebi - close gluebi for an UBI volume. | ||
306 | * @vol: volume description object | ||
307 | * | ||
308 | * This function is called when an UBI volume is removed in order to remove | ||
309 | * corresponding fake MTD device. Returns zero in case of success and a | ||
310 | * negative error code in case of failure. | ||
311 | */ | ||
312 | int ubi_destroy_gluebi(struct ubi_volume *vol) | ||
313 | { | ||
314 | int err; | ||
315 | struct mtd_info *mtd = &vol->gluebi_mtd; | ||
316 | |||
317 | dbg_msg("remove mtd%d", mtd->index); | ||
318 | err = del_mtd_device(mtd); | ||
319 | if (err) | ||
320 | return err; | ||
321 | kfree(mtd->name); | ||
322 | return 0; | ||
323 | } | ||
diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c new file mode 100644 index 000000000000..438914d05151 --- /dev/null +++ b/drivers/mtd/ubi/io.c | |||
@@ -0,0 +1,1259 @@ | |||
1 | /* | ||
2 | * Copyright (c) International Business Machines Corp., 2006 | ||
3 | * Copyright (c) Nokia Corporation, 2006, 2007 | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation; either version 2 of the License, or | ||
8 | * (at your option) any later version. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See | ||
13 | * the GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program; if not, write to the Free Software | ||
17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
18 | * | ||
19 | * Author: Artem Bityutskiy (Битюцкий Артём) | ||
20 | */ | ||
21 | |||
22 | /* | ||
23 | * UBI input/output unit. | ||
24 | * | ||
25 | * This unit provides a uniform way to work with all kinds of the underlying | ||
26 | * MTD devices. It also implements handy functions for reading and writing UBI | ||
27 | * headers. | ||
28 | * | ||
29 | * We are trying to have a paranoid mindset and not to trust to what we read | ||
30 | * from the flash media in order to be more secure and robust. So this unit | ||
31 | * validates every single header it reads from the flash media. | ||
32 | * | ||
33 | * Some words about how the eraseblock headers are stored. | ||
34 | * | ||
35 | * The erase counter header is always stored at offset zero. By default, the | ||
36 | * VID header is stored after the EC header at the closest aligned offset | ||
37 | * (i.e. aligned to the minimum I/O unit size). Data starts next to the VID | ||
38 | * header at the closest aligned offset. But this default layout may be | ||
39 | * changed. For example, for different reasons (e.g., optimization) UBI may be | ||
40 | * asked to put the VID header at further offset, and even at an unaligned | ||
41 | * offset. Of course, if the offset of the VID header is unaligned, UBI adds | ||
42 | * proper padding in front of it. Data offset may also be changed but it has to | ||
43 | * be aligned. | ||
44 | * | ||
45 | * About minimal I/O units. In general, UBI assumes flash device model where | ||
46 | * there is only one minimal I/O unit size. E.g., in case of NOR flash it is 1, | ||
47 | * in case of NAND flash it is a NAND page, etc. This is reported by MTD in the | ||
48 | * @ubi->mtd->writesize field. But as an exception, UBI admits of using another | ||
49 | * (smaller) minimal I/O unit size for EC and VID headers to make it possible | ||
50 | * to do different optimizations. | ||
51 | * | ||
52 | * This is extremely useful in case of NAND flashes which admit of several | ||
53 | * write operations to one NAND page. In this case UBI can fit EC and VID | ||
54 | * headers at one NAND page. Thus, UBI may use "sub-page" size as the minimal | ||
55 | * I/O unit for the headers (the @ubi->hdrs_min_io_size field). But it still | ||
56 | * reports NAND page size (@ubi->min_io_size) as a minimal I/O unit for the UBI | ||
57 | * users. | ||
58 | * | ||
59 | * Example: some Samsung NANDs with 2KiB pages allow 4x 512-byte writes, so | ||
60 | * although the minimal I/O unit is 2K, UBI uses 512 bytes for EC and VID | ||
61 | * headers. | ||
62 | * | ||
63 | * Q: why not just to treat sub-page as a minimal I/O unit of this flash | ||
64 | * device, e.g., make @ubi->min_io_size = 512 in the example above? | ||
65 | * | ||
66 | * A: because when writing a sub-page, MTD still writes a full 2K page but the | ||
67 | * bytes which are no relevant to the sub-page are 0xFF. So, basically, writing | ||
68 | * 4x512 sub-pages is 4 times slower then writing one 2KiB NAND page. Thus, we | ||
69 | * prefer to use sub-pages only for EV and VID headers. | ||
70 | * | ||
71 | * As it was noted above, the VID header may start at a non-aligned offset. | ||
72 | * For example, in case of a 2KiB page NAND flash with a 512 bytes sub-page, | ||
73 | * the VID header may reside at offset 1984 which is the last 64 bytes of the | ||
74 | * last sub-page (EC header is always at offset zero). This causes some | ||
75 | * difficulties when reading and writing VID headers. | ||
76 | * | ||
77 | * Suppose we have a 64-byte buffer and we read a VID header at it. We change | ||
78 | * the data and want to write this VID header out. As we can only write in | ||
79 | * 512-byte chunks, we have to allocate one more buffer and copy our VID header | ||
80 | * to offset 448 of this buffer. | ||
81 | * | ||
82 | * The I/O unit does the following trick in order to avoid this extra copy. | ||
83 | * It always allocates a @ubi->vid_hdr_alsize bytes buffer for the VID header | ||
84 | * and returns a pointer to offset @ubi->vid_hdr_shift of this buffer. When the | ||
85 | * VID header is being written out, it shifts the VID header pointer back and | ||
86 | * writes the whole sub-page. | ||
87 | */ | ||
88 | |||
89 | #include <linux/crc32.h> | ||
90 | #include <linux/err.h> | ||
91 | #include "ubi.h" | ||
92 | |||
93 | #ifdef CONFIG_MTD_UBI_DEBUG_PARANOID | ||
94 | static int paranoid_check_not_bad(const struct ubi_device *ubi, int pnum); | ||
95 | static int paranoid_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum); | ||
96 | static int paranoid_check_ec_hdr(const struct ubi_device *ubi, int pnum, | ||
97 | const struct ubi_ec_hdr *ec_hdr); | ||
98 | static int paranoid_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum); | ||
99 | static int paranoid_check_vid_hdr(const struct ubi_device *ubi, int pnum, | ||
100 | const struct ubi_vid_hdr *vid_hdr); | ||
101 | static int paranoid_check_all_ff(const struct ubi_device *ubi, int pnum, | ||
102 | int offset, int len); | ||
103 | #else | ||
104 | #define paranoid_check_not_bad(ubi, pnum) 0 | ||
105 | #define paranoid_check_peb_ec_hdr(ubi, pnum) 0 | ||
106 | #define paranoid_check_ec_hdr(ubi, pnum, ec_hdr) 0 | ||
107 | #define paranoid_check_peb_vid_hdr(ubi, pnum) 0 | ||
108 | #define paranoid_check_vid_hdr(ubi, pnum, vid_hdr) 0 | ||
109 | #define paranoid_check_all_ff(ubi, pnum, offset, len) 0 | ||
110 | #endif | ||
111 | |||
112 | /** | ||
113 | * ubi_io_read - read data from a physical eraseblock. | ||
114 | * @ubi: UBI device description object | ||
115 | * @buf: buffer where to store the read data | ||
116 | * @pnum: physical eraseblock number to read from | ||
117 | * @offset: offset within the physical eraseblock from where to read | ||
118 | * @len: how many bytes to read | ||
119 | * | ||
120 | * This function reads data from offset @offset of physical eraseblock @pnum | ||
121 | * and stores the read data in the @buf buffer. The following return codes are | ||
122 | * possible: | ||
123 | * | ||
124 | * o %0 if all the requested data were successfully read; | ||
125 | * o %UBI_IO_BITFLIPS if all the requested data were successfully read, but | ||
126 | * correctable bit-flips were detected; this is harmless but may indicate | ||
127 | * that this eraseblock may become bad soon (but do not have to); | ||
128 | * o %-EBADMSG if the MTD subsystem reported about data data integrity | ||
129 | * problems, for example it can me an ECC error in case of NAND; this most | ||
130 | * probably means that the data is corrupted; | ||
131 | * o %-EIO if some I/O error occurred; | ||
132 | * o other negative error codes in case of other errors. | ||
133 | */ | ||
134 | int ubi_io_read(const struct ubi_device *ubi, void *buf, int pnum, int offset, | ||
135 | int len) | ||
136 | { | ||
137 | int err, retries = 0; | ||
138 | size_t read; | ||
139 | loff_t addr; | ||
140 | |||
141 | dbg_io("read %d bytes from PEB %d:%d", len, pnum, offset); | ||
142 | |||
143 | ubi_assert(pnum >= 0 && pnum < ubi->peb_count); | ||
144 | ubi_assert(offset >= 0 && offset + len <= ubi->peb_size); | ||
145 | ubi_assert(len > 0); | ||
146 | |||
147 | err = paranoid_check_not_bad(ubi, pnum); | ||
148 | if (err) | ||
149 | return err > 0 ? -EINVAL : err; | ||
150 | |||
151 | addr = (loff_t)pnum * ubi->peb_size + offset; | ||
152 | retry: | ||
153 | err = ubi->mtd->read(ubi->mtd, addr, len, &read, buf); | ||
154 | if (err) { | ||
155 | if (err == -EUCLEAN) { | ||
156 | /* | ||
157 | * -EUCLEAN is reported if there was a bit-flip which | ||
158 | * was corrected, so this is harmless. | ||
159 | */ | ||
160 | ubi_msg("fixable bit-flip detected at PEB %d", pnum); | ||
161 | ubi_assert(len == read); | ||
162 | return UBI_IO_BITFLIPS; | ||
163 | } | ||
164 | |||
165 | if (read != len && retries++ < UBI_IO_RETRIES) { | ||
166 | dbg_io("error %d while reading %d bytes from PEB %d:%d, " | ||
167 | "read only %zd bytes, retry", | ||
168 | err, len, pnum, offset, read); | ||
169 | yield(); | ||
170 | goto retry; | ||
171 | } | ||
172 | |||
173 | ubi_err("error %d while reading %d bytes from PEB %d:%d, " | ||
174 | "read %zd bytes", err, len, pnum, offset, read); | ||
175 | ubi_dbg_dump_stack(); | ||
176 | } else { | ||
177 | ubi_assert(len == read); | ||
178 | |||
179 | if (ubi_dbg_is_bitflip()) { | ||
180 | dbg_msg("bit-flip (emulated)"); | ||
181 | err = UBI_IO_BITFLIPS; | ||
182 | } | ||
183 | } | ||
184 | |||
185 | return err; | ||
186 | } | ||
187 | |||
188 | /** | ||
189 | * ubi_io_write - write data to a physical eraseblock. | ||
190 | * @ubi: UBI device description object | ||
191 | * @buf: buffer with the data to write | ||
192 | * @pnum: physical eraseblock number to write to | ||
193 | * @offset: offset within the physical eraseblock where to write | ||
194 | * @len: how many bytes to write | ||
195 | * | ||
196 | * This function writes @len bytes of data from buffer @buf to offset @offset | ||
197 | * of physical eraseblock @pnum. If all the data were successfully written, | ||
198 | * zero is returned. If an error occurred, this function returns a negative | ||
199 | * error code. If %-EIO is returned, the physical eraseblock most probably went | ||
200 | * bad. | ||
201 | * | ||
202 | * Note, in case of an error, it is possible that something was still written | ||
203 | * to the flash media, but may be some garbage. | ||
204 | */ | ||
205 | int ubi_io_write(const struct ubi_device *ubi, const void *buf, int pnum, | ||
206 | int offset, int len) | ||
207 | { | ||
208 | int err; | ||
209 | size_t written; | ||
210 | loff_t addr; | ||
211 | |||
212 | dbg_io("write %d bytes to PEB %d:%d", len, pnum, offset); | ||
213 | |||
214 | ubi_assert(pnum >= 0 && pnum < ubi->peb_count); | ||
215 | ubi_assert(offset >= 0 && offset + len <= ubi->peb_size); | ||
216 | ubi_assert(offset % ubi->hdrs_min_io_size == 0); | ||
217 | ubi_assert(len > 0 && len % ubi->hdrs_min_io_size == 0); | ||
218 | |||
219 | if (ubi->ro_mode) { | ||
220 | ubi_err("read-only mode"); | ||
221 | return -EROFS; | ||
222 | } | ||
223 | |||
224 | /* The below has to be compiled out if paranoid checks are disabled */ | ||
225 | |||
226 | err = paranoid_check_not_bad(ubi, pnum); | ||
227 | if (err) | ||
228 | return err > 0 ? -EINVAL : err; | ||
229 | |||
230 | /* The area we are writing to has to contain all 0xFF bytes */ | ||
231 | err = paranoid_check_all_ff(ubi, pnum, offset, len); | ||
232 | if (err) | ||
233 | return err > 0 ? -EINVAL : err; | ||
234 | |||
235 | if (offset >= ubi->leb_start) { | ||
236 | /* | ||
237 | * We write to the data area of the physical eraseblock. Make | ||
238 | * sure it has valid EC and VID headers. | ||
239 | */ | ||
240 | err = paranoid_check_peb_ec_hdr(ubi, pnum); | ||
241 | if (err) | ||
242 | return err > 0 ? -EINVAL : err; | ||
243 | err = paranoid_check_peb_vid_hdr(ubi, pnum); | ||
244 | if (err) | ||
245 | return err > 0 ? -EINVAL : err; | ||
246 | } | ||
247 | |||
248 | if (ubi_dbg_is_write_failure()) { | ||
249 | dbg_err("cannot write %d bytes to PEB %d:%d " | ||
250 | "(emulated)", len, pnum, offset); | ||
251 | ubi_dbg_dump_stack(); | ||
252 | return -EIO; | ||
253 | } | ||
254 | |||
255 | addr = (loff_t)pnum * ubi->peb_size + offset; | ||
256 | err = ubi->mtd->write(ubi->mtd, addr, len, &written, buf); | ||
257 | if (err) { | ||
258 | ubi_err("error %d while writing %d bytes to PEB %d:%d, written" | ||
259 | " %zd bytes", err, len, pnum, offset, written); | ||
260 | ubi_dbg_dump_stack(); | ||
261 | } else | ||
262 | ubi_assert(written == len); | ||
263 | |||
264 | return err; | ||
265 | } | ||
266 | |||
267 | /** | ||
268 | * erase_callback - MTD erasure call-back. | ||
269 | * @ei: MTD erase information object. | ||
270 | * | ||
271 | * Note, even though MTD erase interface is asynchronous, all the current | ||
272 | * implementations are synchronous anyway. | ||
273 | */ | ||
274 | static void erase_callback(struct erase_info *ei) | ||
275 | { | ||
276 | wake_up_interruptible((wait_queue_head_t *)ei->priv); | ||
277 | } | ||
278 | |||
279 | /** | ||
280 | * do_sync_erase - synchronously erase a physical eraseblock. | ||
281 | * @ubi: UBI device description object | ||
282 | * @pnum: the physical eraseblock number to erase | ||
283 | * | ||
284 | * This function synchronously erases physical eraseblock @pnum and returns | ||
285 | * zero in case of success and a negative error code in case of failure. If | ||
286 | * %-EIO is returned, the physical eraseblock most probably went bad. | ||
287 | */ | ||
288 | static int do_sync_erase(const struct ubi_device *ubi, int pnum) | ||
289 | { | ||
290 | int err, retries = 0; | ||
291 | struct erase_info ei; | ||
292 | wait_queue_head_t wq; | ||
293 | |||
294 | dbg_io("erase PEB %d", pnum); | ||
295 | |||
296 | retry: | ||
297 | init_waitqueue_head(&wq); | ||
298 | memset(&ei, 0, sizeof(struct erase_info)); | ||
299 | |||
300 | ei.mtd = ubi->mtd; | ||
301 | ei.addr = pnum * ubi->peb_size; | ||
302 | ei.len = ubi->peb_size; | ||
303 | ei.callback = erase_callback; | ||
304 | ei.priv = (unsigned long)&wq; | ||
305 | |||
306 | err = ubi->mtd->erase(ubi->mtd, &ei); | ||
307 | if (err) { | ||
308 | if (retries++ < UBI_IO_RETRIES) { | ||
309 | dbg_io("error %d while erasing PEB %d, retry", | ||
310 | err, pnum); | ||
311 | yield(); | ||
312 | goto retry; | ||
313 | } | ||
314 | ubi_err("cannot erase PEB %d, error %d", pnum, err); | ||
315 | ubi_dbg_dump_stack(); | ||
316 | return err; | ||
317 | } | ||
318 | |||
319 | err = wait_event_interruptible(wq, ei.state == MTD_ERASE_DONE || | ||
320 | ei.state == MTD_ERASE_FAILED); | ||
321 | if (err) { | ||
322 | ubi_err("interrupted PEB %d erasure", pnum); | ||
323 | return -EINTR; | ||
324 | } | ||
325 | |||
326 | if (ei.state == MTD_ERASE_FAILED) { | ||
327 | if (retries++ < UBI_IO_RETRIES) { | ||
328 | dbg_io("error while erasing PEB %d, retry", pnum); | ||
329 | yield(); | ||
330 | goto retry; | ||
331 | } | ||
332 | ubi_err("cannot erase PEB %d", pnum); | ||
333 | ubi_dbg_dump_stack(); | ||
334 | return -EIO; | ||
335 | } | ||
336 | |||
337 | err = paranoid_check_all_ff(ubi, pnum, 0, ubi->peb_size); | ||
338 | if (err) | ||
339 | return err > 0 ? -EINVAL : err; | ||
340 | |||
341 | if (ubi_dbg_is_erase_failure() && !err) { | ||
342 | dbg_err("cannot erase PEB %d (emulated)", pnum); | ||
343 | return -EIO; | ||
344 | } | ||
345 | |||
346 | return 0; | ||
347 | } | ||
348 | |||
349 | /** | ||
350 | * check_pattern - check if buffer contains only a certain byte pattern. | ||
351 | * @buf: buffer to check | ||
352 | * @patt: the pattern to check | ||
353 | * @size: buffer size in bytes | ||
354 | * | ||
355 | * This function returns %1 in there are only @patt bytes in @buf, and %0 if | ||
356 | * something else was also found. | ||
357 | */ | ||
358 | static int check_pattern(const void *buf, uint8_t patt, int size) | ||
359 | { | ||
360 | int i; | ||
361 | |||
362 | for (i = 0; i < size; i++) | ||
363 | if (((const uint8_t *)buf)[i] != patt) | ||
364 | return 0; | ||
365 | return 1; | ||
366 | } | ||
367 | |||
368 | /* Patterns to write to a physical eraseblock when torturing it */ | ||
369 | static uint8_t patterns[] = {0xa5, 0x5a, 0x0}; | ||
370 | |||
371 | /** | ||
372 | * torture_peb - test a supposedly bad physical eraseblock. | ||
373 | * @ubi: UBI device description object | ||
374 | * @pnum: the physical eraseblock number to test | ||
375 | * | ||
376 | * This function returns %-EIO if the physical eraseblock did not pass the | ||
377 | * test, a positive number of erase operations done if the test was | ||
378 | * successfully passed, and other negative error codes in case of other errors. | ||
379 | */ | ||
380 | static int torture_peb(const struct ubi_device *ubi, int pnum) | ||
381 | { | ||
382 | void *buf; | ||
383 | int err, i, patt_count; | ||
384 | |||
385 | buf = kmalloc(ubi->peb_size, GFP_KERNEL); | ||
386 | if (!buf) | ||
387 | return -ENOMEM; | ||
388 | |||
389 | patt_count = ARRAY_SIZE(patterns); | ||
390 | ubi_assert(patt_count > 0); | ||
391 | |||
392 | for (i = 0; i < patt_count; i++) { | ||
393 | err = do_sync_erase(ubi, pnum); | ||
394 | if (err) | ||
395 | goto out; | ||
396 | |||
397 | /* Make sure the PEB contains only 0xFF bytes */ | ||
398 | err = ubi_io_read(ubi, buf, pnum, 0, ubi->peb_size); | ||
399 | if (err) | ||
400 | goto out; | ||
401 | |||
402 | err = check_pattern(buf, 0xFF, ubi->peb_size); | ||
403 | if (err == 0) { | ||
404 | ubi_err("erased PEB %d, but a non-0xFF byte found", | ||
405 | pnum); | ||
406 | err = -EIO; | ||
407 | goto out; | ||
408 | } | ||
409 | |||
410 | /* Write a pattern and check it */ | ||
411 | memset(buf, patterns[i], ubi->peb_size); | ||
412 | err = ubi_io_write(ubi, buf, pnum, 0, ubi->peb_size); | ||
413 | if (err) | ||
414 | goto out; | ||
415 | |||
416 | memset(buf, ~patterns[i], ubi->peb_size); | ||
417 | err = ubi_io_read(ubi, buf, pnum, 0, ubi->peb_size); | ||
418 | if (err) | ||
419 | goto out; | ||
420 | |||
421 | err = check_pattern(buf, patterns[i], ubi->peb_size); | ||
422 | if (err == 0) { | ||
423 | ubi_err("pattern %x checking failed for PEB %d", | ||
424 | patterns[i], pnum); | ||
425 | err = -EIO; | ||
426 | goto out; | ||
427 | } | ||
428 | } | ||
429 | |||
430 | err = patt_count; | ||
431 | |||
432 | out: | ||
433 | if (err == UBI_IO_BITFLIPS || err == -EBADMSG) | ||
434 | /* | ||
435 | * If a bit-flip or data integrity error was detected, the test | ||
436 | * has not passed because it happened on a freshly erased | ||
437 | * physical eraseblock which means something is wrong with it. | ||
438 | */ | ||
439 | err = -EIO; | ||
440 | kfree(buf); | ||
441 | return err; | ||
442 | } | ||
443 | |||
444 | /** | ||
445 | * ubi_io_sync_erase - synchronously erase a physical eraseblock. | ||
446 | * @ubi: UBI device description object | ||
447 | * @pnum: physical eraseblock number to erase | ||
448 | * @torture: if this physical eraseblock has to be tortured | ||
449 | * | ||
450 | * This function synchronously erases physical eraseblock @pnum. If @torture | ||
451 | * flag is not zero, the physical eraseblock is checked by means of writing | ||
452 | * different patterns to it and reading them back. If the torturing is enabled, | ||
453 | * the physical eraseblock is erased more then once. | ||
454 | * | ||
455 | * This function returns the number of erasures made in case of success, %-EIO | ||
456 | * if the erasure failed or the torturing test failed, and other negative error | ||
457 | * codes in case of other errors. Note, %-EIO means that the physical | ||
458 | * eraseblock is bad. | ||
459 | */ | ||
460 | int ubi_io_sync_erase(const struct ubi_device *ubi, int pnum, int torture) | ||
461 | { | ||
462 | int err, ret = 0; | ||
463 | |||
464 | ubi_assert(pnum >= 0 && pnum < ubi->peb_count); | ||
465 | |||
466 | err = paranoid_check_not_bad(ubi, pnum); | ||
467 | if (err != 0) | ||
468 | return err > 0 ? -EINVAL : err; | ||
469 | |||
470 | if (ubi->ro_mode) { | ||
471 | ubi_err("read-only mode"); | ||
472 | return -EROFS; | ||
473 | } | ||
474 | |||
475 | if (torture) { | ||
476 | ret = torture_peb(ubi, pnum); | ||
477 | if (ret < 0) | ||
478 | return ret; | ||
479 | } | ||
480 | |||
481 | err = do_sync_erase(ubi, pnum); | ||
482 | if (err) | ||
483 | return err; | ||
484 | |||
485 | return ret + 1; | ||
486 | } | ||
487 | |||
488 | /** | ||
489 | * ubi_io_is_bad - check if a physical eraseblock is bad. | ||
490 | * @ubi: UBI device description object | ||
491 | * @pnum: the physical eraseblock number to check | ||
492 | * | ||
493 | * This function returns a positive number if the physical eraseblock is bad, | ||
494 | * zero if not, and a negative error code if an error occurred. | ||
495 | */ | ||
496 | int ubi_io_is_bad(const struct ubi_device *ubi, int pnum) | ||
497 | { | ||
498 | struct mtd_info *mtd = ubi->mtd; | ||
499 | |||
500 | ubi_assert(pnum >= 0 && pnum < ubi->peb_count); | ||
501 | |||
502 | if (ubi->bad_allowed) { | ||
503 | int ret; | ||
504 | |||
505 | ret = mtd->block_isbad(mtd, (loff_t)pnum * ubi->peb_size); | ||
506 | if (ret < 0) | ||
507 | ubi_err("error %d while checking if PEB %d is bad", | ||
508 | ret, pnum); | ||
509 | else if (ret) | ||
510 | dbg_io("PEB %d is bad", pnum); | ||
511 | return ret; | ||
512 | } | ||
513 | |||
514 | return 0; | ||
515 | } | ||
516 | |||
517 | /** | ||
518 | * ubi_io_mark_bad - mark a physical eraseblock as bad. | ||
519 | * @ubi: UBI device description object | ||
520 | * @pnum: the physical eraseblock number to mark | ||
521 | * | ||
522 | * This function returns zero in case of success and a negative error code in | ||
523 | * case of failure. | ||
524 | */ | ||
525 | int ubi_io_mark_bad(const struct ubi_device *ubi, int pnum) | ||
526 | { | ||
527 | int err; | ||
528 | struct mtd_info *mtd = ubi->mtd; | ||
529 | |||
530 | ubi_assert(pnum >= 0 && pnum < ubi->peb_count); | ||
531 | |||
532 | if (ubi->ro_mode) { | ||
533 | ubi_err("read-only mode"); | ||
534 | return -EROFS; | ||
535 | } | ||
536 | |||
537 | if (!ubi->bad_allowed) | ||
538 | return 0; | ||
539 | |||
540 | err = mtd->block_markbad(mtd, (loff_t)pnum * ubi->peb_size); | ||
541 | if (err) | ||
542 | ubi_err("cannot mark PEB %d bad, error %d", pnum, err); | ||
543 | return err; | ||
544 | } | ||
545 | |||
546 | /** | ||
547 | * validate_ec_hdr - validate an erase counter header. | ||
548 | * @ubi: UBI device description object | ||
549 | * @ec_hdr: the erase counter header to check | ||
550 | * | ||
551 | * This function returns zero if the erase counter header is OK, and %1 if | ||
552 | * not. | ||
553 | */ | ||
554 | static int validate_ec_hdr(const struct ubi_device *ubi, | ||
555 | const struct ubi_ec_hdr *ec_hdr) | ||
556 | { | ||
557 | long long ec; | ||
558 | int vid_hdr_offset, leb_start; | ||
559 | |||
560 | ec = ubi64_to_cpu(ec_hdr->ec); | ||
561 | vid_hdr_offset = ubi32_to_cpu(ec_hdr->vid_hdr_offset); | ||
562 | leb_start = ubi32_to_cpu(ec_hdr->data_offset); | ||
563 | |||
564 | if (ec_hdr->version != UBI_VERSION) { | ||
565 | ubi_err("node with incompatible UBI version found: " | ||
566 | "this UBI version is %d, image version is %d", | ||
567 | UBI_VERSION, (int)ec_hdr->version); | ||
568 | goto bad; | ||
569 | } | ||
570 | |||
571 | if (vid_hdr_offset != ubi->vid_hdr_offset) { | ||
572 | ubi_err("bad VID header offset %d, expected %d", | ||
573 | vid_hdr_offset, ubi->vid_hdr_offset); | ||
574 | goto bad; | ||
575 | } | ||
576 | |||
577 | if (leb_start != ubi->leb_start) { | ||
578 | ubi_err("bad data offset %d, expected %d", | ||
579 | leb_start, ubi->leb_start); | ||
580 | goto bad; | ||
581 | } | ||
582 | |||
583 | if (ec < 0 || ec > UBI_MAX_ERASECOUNTER) { | ||
584 | ubi_err("bad erase counter %lld", ec); | ||
585 | goto bad; | ||
586 | } | ||
587 | |||
588 | return 0; | ||
589 | |||
590 | bad: | ||
591 | ubi_err("bad EC header"); | ||
592 | ubi_dbg_dump_ec_hdr(ec_hdr); | ||
593 | ubi_dbg_dump_stack(); | ||
594 | return 1; | ||
595 | } | ||
596 | |||
597 | /** | ||
598 | * ubi_io_read_ec_hdr - read and check an erase counter header. | ||
599 | * @ubi: UBI device description object | ||
600 | * @pnum: physical eraseblock to read from | ||
601 | * @ec_hdr: a &struct ubi_ec_hdr object where to store the read erase counter | ||
602 | * header | ||
603 | * @verbose: be verbose if the header is corrupted or was not found | ||
604 | * | ||
605 | * This function reads erase counter header from physical eraseblock @pnum and | ||
606 | * stores it in @ec_hdr. This function also checks CRC checksum of the read | ||
607 | * erase counter header. The following codes may be returned: | ||
608 | * | ||
609 | * o %0 if the CRC checksum is correct and the header was successfully read; | ||
610 | * o %UBI_IO_BITFLIPS if the CRC is correct, but bit-flips were detected | ||
611 | * and corrected by the flash driver; this is harmless but may indicate that | ||
612 | * this eraseblock may become bad soon (but may be not); | ||
613 | * o %UBI_IO_BAD_EC_HDR if the erase counter header is corrupted (a CRC error); | ||
614 | * o %UBI_IO_PEB_EMPTY if the physical eraseblock is empty; | ||
615 | * o a negative error code in case of failure. | ||
616 | */ | ||
617 | int ubi_io_read_ec_hdr(const struct ubi_device *ubi, int pnum, | ||
618 | struct ubi_ec_hdr *ec_hdr, int verbose) | ||
619 | { | ||
620 | int err, read_err = 0; | ||
621 | uint32_t crc, magic, hdr_crc; | ||
622 | |||
623 | dbg_io("read EC header from PEB %d", pnum); | ||
624 | ubi_assert(pnum >= 0 && pnum < ubi->peb_count); | ||
625 | |||
626 | err = ubi_io_read(ubi, ec_hdr, pnum, 0, UBI_EC_HDR_SIZE); | ||
627 | if (err) { | ||
628 | if (err != UBI_IO_BITFLIPS && err != -EBADMSG) | ||
629 | return err; | ||
630 | |||
631 | /* | ||
632 | * We read all the data, but either a correctable bit-flip | ||
633 | * occurred, or MTD reported about some data integrity error, | ||
634 | * like an ECC error in case of NAND. The former is harmless, | ||
635 | * the later may mean that the read data is corrupted. But we | ||
636 | * have a CRC check-sum and we will detect this. If the EC | ||
637 | * header is still OK, we just report this as there was a | ||
638 | * bit-flip. | ||
639 | */ | ||
640 | read_err = err; | ||
641 | } | ||
642 | |||
643 | magic = ubi32_to_cpu(ec_hdr->magic); | ||
644 | if (magic != UBI_EC_HDR_MAGIC) { | ||
645 | /* | ||
646 | * The magic field is wrong. Let's check if we have read all | ||
647 | * 0xFF. If yes, this physical eraseblock is assumed to be | ||
648 | * empty. | ||
649 | * | ||
650 | * But if there was a read error, we do not test it for all | ||
651 | * 0xFFs. Even if it does contain all 0xFFs, this error | ||
652 | * indicates that something is still wrong with this physical | ||
653 | * eraseblock and we anyway cannot treat it as empty. | ||
654 | */ | ||
655 | if (read_err != -EBADMSG && | ||
656 | check_pattern(ec_hdr, 0xFF, UBI_EC_HDR_SIZE)) { | ||
657 | /* The physical eraseblock is supposedly empty */ | ||
658 | |||
659 | /* | ||
660 | * The below is just a paranoid check, it has to be | ||
661 | * compiled out if paranoid checks are disabled. | ||
662 | */ | ||
663 | err = paranoid_check_all_ff(ubi, pnum, 0, | ||
664 | ubi->peb_size); | ||
665 | if (err) | ||
666 | return err > 0 ? UBI_IO_BAD_EC_HDR : err; | ||
667 | |||
668 | if (verbose) | ||
669 | ubi_warn("no EC header found at PEB %d, " | ||
670 | "only 0xFF bytes", pnum); | ||
671 | return UBI_IO_PEB_EMPTY; | ||
672 | } | ||
673 | |||
674 | /* | ||
675 | * This is not a valid erase counter header, and these are not | ||
676 | * 0xFF bytes. Report that the header is corrupted. | ||
677 | */ | ||
678 | if (verbose) { | ||
679 | ubi_warn("bad magic number at PEB %d: %08x instead of " | ||
680 | "%08x", pnum, magic, UBI_EC_HDR_MAGIC); | ||
681 | ubi_dbg_dump_ec_hdr(ec_hdr); | ||
682 | } | ||
683 | return UBI_IO_BAD_EC_HDR; | ||
684 | } | ||
685 | |||
686 | crc = crc32(UBI_CRC32_INIT, ec_hdr, UBI_EC_HDR_SIZE_CRC); | ||
687 | hdr_crc = ubi32_to_cpu(ec_hdr->hdr_crc); | ||
688 | |||
689 | if (hdr_crc != crc) { | ||
690 | if (verbose) { | ||
691 | ubi_warn("bad EC header CRC at PEB %d, calculated %#08x," | ||
692 | " read %#08x", pnum, crc, hdr_crc); | ||
693 | ubi_dbg_dump_ec_hdr(ec_hdr); | ||
694 | } | ||
695 | return UBI_IO_BAD_EC_HDR; | ||
696 | } | ||
697 | |||
698 | /* And of course validate what has just been read from the media */ | ||
699 | err = validate_ec_hdr(ubi, ec_hdr); | ||
700 | if (err) { | ||
701 | ubi_err("validation failed for PEB %d", pnum); | ||
702 | return -EINVAL; | ||
703 | } | ||
704 | |||
705 | return read_err ? UBI_IO_BITFLIPS : 0; | ||
706 | } | ||
707 | |||
708 | /** | ||
709 | * ubi_io_write_ec_hdr - write an erase counter header. | ||
710 | * @ubi: UBI device description object | ||
711 | * @pnum: physical eraseblock to write to | ||
712 | * @ec_hdr: the erase counter header to write | ||
713 | * | ||
714 | * This function writes erase counter header described by @ec_hdr to physical | ||
715 | * eraseblock @pnum. It also fills most fields of @ec_hdr before writing, so | ||
716 | * the caller do not have to fill them. Callers must only fill the @ec_hdr->ec | ||
717 | * field. | ||
718 | * | ||
719 | * This function returns zero in case of success and a negative error code in | ||
720 | * case of failure. If %-EIO is returned, the physical eraseblock most probably | ||
721 | * went bad. | ||
722 | */ | ||
723 | int ubi_io_write_ec_hdr(const struct ubi_device *ubi, int pnum, | ||
724 | struct ubi_ec_hdr *ec_hdr) | ||
725 | { | ||
726 | int err; | ||
727 | uint32_t crc; | ||
728 | |||
729 | dbg_io("write EC header to PEB %d", pnum); | ||
730 | ubi_assert(pnum >= 0 && pnum < ubi->peb_count); | ||
731 | |||
732 | ec_hdr->magic = cpu_to_ubi32(UBI_EC_HDR_MAGIC); | ||
733 | ec_hdr->version = UBI_VERSION; | ||
734 | ec_hdr->vid_hdr_offset = cpu_to_ubi32(ubi->vid_hdr_offset); | ||
735 | ec_hdr->data_offset = cpu_to_ubi32(ubi->leb_start); | ||
736 | crc = crc32(UBI_CRC32_INIT, ec_hdr, UBI_EC_HDR_SIZE_CRC); | ||
737 | ec_hdr->hdr_crc = cpu_to_ubi32(crc); | ||
738 | |||
739 | err = paranoid_check_ec_hdr(ubi, pnum, ec_hdr); | ||
740 | if (err) | ||
741 | return -EINVAL; | ||
742 | |||
743 | err = ubi_io_write(ubi, ec_hdr, pnum, 0, ubi->ec_hdr_alsize); | ||
744 | return err; | ||
745 | } | ||
746 | |||
747 | /** | ||
748 | * validate_vid_hdr - validate a volume identifier header. | ||
749 | * @ubi: UBI device description object | ||
750 | * @vid_hdr: the volume identifier header to check | ||
751 | * | ||
752 | * This function checks that data stored in the volume identifier header | ||
753 | * @vid_hdr. Returns zero if the VID header is OK and %1 if not. | ||
754 | */ | ||
755 | static int validate_vid_hdr(const struct ubi_device *ubi, | ||
756 | const struct ubi_vid_hdr *vid_hdr) | ||
757 | { | ||
758 | int vol_type = vid_hdr->vol_type; | ||
759 | int copy_flag = vid_hdr->copy_flag; | ||
760 | int vol_id = ubi32_to_cpu(vid_hdr->vol_id); | ||
761 | int lnum = ubi32_to_cpu(vid_hdr->lnum); | ||
762 | int compat = vid_hdr->compat; | ||
763 | int data_size = ubi32_to_cpu(vid_hdr->data_size); | ||
764 | int used_ebs = ubi32_to_cpu(vid_hdr->used_ebs); | ||
765 | int data_pad = ubi32_to_cpu(vid_hdr->data_pad); | ||
766 | int data_crc = ubi32_to_cpu(vid_hdr->data_crc); | ||
767 | int usable_leb_size = ubi->leb_size - data_pad; | ||
768 | |||
769 | if (copy_flag != 0 && copy_flag != 1) { | ||
770 | dbg_err("bad copy_flag"); | ||
771 | goto bad; | ||
772 | } | ||
773 | |||
774 | if (vol_id < 0 || lnum < 0 || data_size < 0 || used_ebs < 0 || | ||
775 | data_pad < 0) { | ||
776 | dbg_err("negative values"); | ||
777 | goto bad; | ||
778 | } | ||
779 | |||
780 | if (vol_id >= UBI_MAX_VOLUMES && vol_id < UBI_INTERNAL_VOL_START) { | ||
781 | dbg_err("bad vol_id"); | ||
782 | goto bad; | ||
783 | } | ||
784 | |||
785 | if (vol_id < UBI_INTERNAL_VOL_START && compat != 0) { | ||
786 | dbg_err("bad compat"); | ||
787 | goto bad; | ||
788 | } | ||
789 | |||
790 | if (vol_id >= UBI_INTERNAL_VOL_START && compat != UBI_COMPAT_DELETE && | ||
791 | compat != UBI_COMPAT_RO && compat != UBI_COMPAT_PRESERVE && | ||
792 | compat != UBI_COMPAT_REJECT) { | ||
793 | dbg_err("bad compat"); | ||
794 | goto bad; | ||
795 | } | ||
796 | |||
797 | if (vol_type != UBI_VID_DYNAMIC && vol_type != UBI_VID_STATIC) { | ||
798 | dbg_err("bad vol_type"); | ||
799 | goto bad; | ||
800 | } | ||
801 | |||
802 | if (data_pad >= ubi->leb_size / 2) { | ||
803 | dbg_err("bad data_pad"); | ||
804 | goto bad; | ||
805 | } | ||
806 | |||
807 | if (vol_type == UBI_VID_STATIC) { | ||
808 | /* | ||
809 | * Although from high-level point of view static volumes may | ||
810 | * contain zero bytes of data, but no VID headers can contain | ||
811 | * zero at these fields, because they empty volumes do not have | ||
812 | * mapped logical eraseblocks. | ||
813 | */ | ||
814 | if (used_ebs == 0) { | ||
815 | dbg_err("zero used_ebs"); | ||
816 | goto bad; | ||
817 | } | ||
818 | if (data_size == 0) { | ||
819 | dbg_err("zero data_size"); | ||
820 | goto bad; | ||
821 | } | ||
822 | if (lnum < used_ebs - 1) { | ||
823 | if (data_size != usable_leb_size) { | ||
824 | dbg_err("bad data_size"); | ||
825 | goto bad; | ||
826 | } | ||
827 | } else if (lnum == used_ebs - 1) { | ||
828 | if (data_size == 0) { | ||
829 | dbg_err("bad data_size at last LEB"); | ||
830 | goto bad; | ||
831 | } | ||
832 | } else { | ||
833 | dbg_err("too high lnum"); | ||
834 | goto bad; | ||
835 | } | ||
836 | } else { | ||
837 | if (copy_flag == 0) { | ||
838 | if (data_crc != 0) { | ||
839 | dbg_err("non-zero data CRC"); | ||
840 | goto bad; | ||
841 | } | ||
842 | if (data_size != 0) { | ||
843 | dbg_err("non-zero data_size"); | ||
844 | goto bad; | ||
845 | } | ||
846 | } else { | ||
847 | if (data_size == 0) { | ||
848 | dbg_err("zero data_size of copy"); | ||
849 | goto bad; | ||
850 | } | ||
851 | } | ||
852 | if (used_ebs != 0) { | ||
853 | dbg_err("bad used_ebs"); | ||
854 | goto bad; | ||
855 | } | ||
856 | } | ||
857 | |||
858 | return 0; | ||
859 | |||
860 | bad: | ||
861 | ubi_err("bad VID header"); | ||
862 | ubi_dbg_dump_vid_hdr(vid_hdr); | ||
863 | ubi_dbg_dump_stack(); | ||
864 | return 1; | ||
865 | } | ||
866 | |||
867 | /** | ||
868 | * ubi_io_read_vid_hdr - read and check a volume identifier header. | ||
869 | * @ubi: UBI device description object | ||
870 | * @pnum: physical eraseblock number to read from | ||
871 | * @vid_hdr: &struct ubi_vid_hdr object where to store the read volume | ||
872 | * identifier header | ||
873 | * @verbose: be verbose if the header is corrupted or wasn't found | ||
874 | * | ||
875 | * This function reads the volume identifier header from physical eraseblock | ||
876 | * @pnum and stores it in @vid_hdr. It also checks CRC checksum of the read | ||
877 | * volume identifier header. The following codes may be returned: | ||
878 | * | ||
879 | * o %0 if the CRC checksum is correct and the header was successfully read; | ||
880 | * o %UBI_IO_BITFLIPS if the CRC is correct, but bit-flips were detected | ||
881 | * and corrected by the flash driver; this is harmless but may indicate that | ||
882 | * this eraseblock may become bad soon; | ||
883 | * o %UBI_IO_BAD_VID_HRD if the volume identifier header is corrupted (a CRC | ||
884 | * error detected); | ||
885 | * o %UBI_IO_PEB_FREE if the physical eraseblock is free (i.e., there is no VID | ||
886 | * header there); | ||
887 | * o a negative error code in case of failure. | ||
888 | */ | ||
889 | int ubi_io_read_vid_hdr(const struct ubi_device *ubi, int pnum, | ||
890 | struct ubi_vid_hdr *vid_hdr, int verbose) | ||
891 | { | ||
892 | int err, read_err = 0; | ||
893 | uint32_t crc, magic, hdr_crc; | ||
894 | void *p; | ||
895 | |||
896 | dbg_io("read VID header from PEB %d", pnum); | ||
897 | ubi_assert(pnum >= 0 && pnum < ubi->peb_count); | ||
898 | |||
899 | p = (char *)vid_hdr - ubi->vid_hdr_shift; | ||
900 | err = ubi_io_read(ubi, p, pnum, ubi->vid_hdr_aloffset, | ||
901 | ubi->vid_hdr_alsize); | ||
902 | if (err) { | ||
903 | if (err != UBI_IO_BITFLIPS && err != -EBADMSG) | ||
904 | return err; | ||
905 | |||
906 | /* | ||
907 | * We read all the data, but either a correctable bit-flip | ||
908 | * occurred, or MTD reported about some data integrity error, | ||
909 | * like an ECC error in case of NAND. The former is harmless, | ||
910 | * the later may mean the read data is corrupted. But we have a | ||
911 | * CRC check-sum and we will identify this. If the VID header is | ||
912 | * still OK, we just report this as there was a bit-flip. | ||
913 | */ | ||
914 | read_err = err; | ||
915 | } | ||
916 | |||
917 | magic = ubi32_to_cpu(vid_hdr->magic); | ||
918 | if (magic != UBI_VID_HDR_MAGIC) { | ||
919 | /* | ||
920 | * If we have read all 0xFF bytes, the VID header probably does | ||
921 | * not exist and the physical eraseblock is assumed to be free. | ||
922 | * | ||
923 | * But if there was a read error, we do not test the data for | ||
924 | * 0xFFs. Even if it does contain all 0xFFs, this error | ||
925 | * indicates that something is still wrong with this physical | ||
926 | * eraseblock and it cannot be regarded as free. | ||
927 | */ | ||
928 | if (read_err != -EBADMSG && | ||
929 | check_pattern(vid_hdr, 0xFF, UBI_VID_HDR_SIZE)) { | ||
930 | /* The physical eraseblock is supposedly free */ | ||
931 | |||
932 | /* | ||
933 | * The below is just a paranoid check, it has to be | ||
934 | * compiled out if paranoid checks are disabled. | ||
935 | */ | ||
936 | err = paranoid_check_all_ff(ubi, pnum, ubi->leb_start, | ||
937 | ubi->leb_size); | ||
938 | if (err) | ||
939 | return err > 0 ? UBI_IO_BAD_VID_HDR : err; | ||
940 | |||
941 | if (verbose) | ||
942 | ubi_warn("no VID header found at PEB %d, " | ||
943 | "only 0xFF bytes", pnum); | ||
944 | return UBI_IO_PEB_FREE; | ||
945 | } | ||
946 | |||
947 | /* | ||
948 | * This is not a valid VID header, and these are not 0xFF | ||
949 | * bytes. Report that the header is corrupted. | ||
950 | */ | ||
951 | if (verbose) { | ||
952 | ubi_warn("bad magic number at PEB %d: %08x instead of " | ||
953 | "%08x", pnum, magic, UBI_VID_HDR_MAGIC); | ||
954 | ubi_dbg_dump_vid_hdr(vid_hdr); | ||
955 | } | ||
956 | return UBI_IO_BAD_VID_HDR; | ||
957 | } | ||
958 | |||
959 | crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_VID_HDR_SIZE_CRC); | ||
960 | hdr_crc = ubi32_to_cpu(vid_hdr->hdr_crc); | ||
961 | |||
962 | if (hdr_crc != crc) { | ||
963 | if (verbose) { | ||
964 | ubi_warn("bad CRC at PEB %d, calculated %#08x, " | ||
965 | "read %#08x", pnum, crc, hdr_crc); | ||
966 | ubi_dbg_dump_vid_hdr(vid_hdr); | ||
967 | } | ||
968 | return UBI_IO_BAD_VID_HDR; | ||
969 | } | ||
970 | |||
971 | /* Validate the VID header that we have just read */ | ||
972 | err = validate_vid_hdr(ubi, vid_hdr); | ||
973 | if (err) { | ||
974 | ubi_err("validation failed for PEB %d", pnum); | ||
975 | return -EINVAL; | ||
976 | } | ||
977 | |||
978 | return read_err ? UBI_IO_BITFLIPS : 0; | ||
979 | } | ||
980 | |||
981 | /** | ||
982 | * ubi_io_write_vid_hdr - write a volume identifier header. | ||
983 | * @ubi: UBI device description object | ||
984 | * @pnum: the physical eraseblock number to write to | ||
985 | * @vid_hdr: the volume identifier header to write | ||
986 | * | ||
987 | * This function writes the volume identifier header described by @vid_hdr to | ||
988 | * physical eraseblock @pnum. This function automatically fills the | ||
989 | * @vid_hdr->magic and the @vid_hdr->version fields, as well as calculates | ||
990 | * header CRC checksum and stores it at vid_hdr->hdr_crc. | ||
991 | * | ||
992 | * This function returns zero in case of success and a negative error code in | ||
993 | * case of failure. If %-EIO is returned, the physical eraseblock probably went | ||
994 | * bad. | ||
995 | */ | ||
996 | int ubi_io_write_vid_hdr(const struct ubi_device *ubi, int pnum, | ||
997 | struct ubi_vid_hdr *vid_hdr) | ||
998 | { | ||
999 | int err; | ||
1000 | uint32_t crc; | ||
1001 | void *p; | ||
1002 | |||
1003 | dbg_io("write VID header to PEB %d", pnum); | ||
1004 | ubi_assert(pnum >= 0 && pnum < ubi->peb_count); | ||
1005 | |||
1006 | err = paranoid_check_peb_ec_hdr(ubi, pnum); | ||
1007 | if (err) | ||
1008 | return err > 0 ? -EINVAL: err; | ||
1009 | |||
1010 | vid_hdr->magic = cpu_to_ubi32(UBI_VID_HDR_MAGIC); | ||
1011 | vid_hdr->version = UBI_VERSION; | ||
1012 | crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_VID_HDR_SIZE_CRC); | ||
1013 | vid_hdr->hdr_crc = cpu_to_ubi32(crc); | ||
1014 | |||
1015 | err = paranoid_check_vid_hdr(ubi, pnum, vid_hdr); | ||
1016 | if (err) | ||
1017 | return -EINVAL; | ||
1018 | |||
1019 | p = (char *)vid_hdr - ubi->vid_hdr_shift; | ||
1020 | err = ubi_io_write(ubi, p, pnum, ubi->vid_hdr_aloffset, | ||
1021 | ubi->vid_hdr_alsize); | ||
1022 | return err; | ||
1023 | } | ||
1024 | |||
1025 | #ifdef CONFIG_MTD_UBI_DEBUG_PARANOID | ||
1026 | |||
1027 | /** | ||
1028 | * paranoid_check_not_bad - ensure that a physical eraseblock is not bad. | ||
1029 | * @ubi: UBI device description object | ||
1030 | * @pnum: physical eraseblock number to check | ||
1031 | * | ||
1032 | * This function returns zero if the physical eraseblock is good, a positive | ||
1033 | * number if it is bad and a negative error code if an error occurred. | ||
1034 | */ | ||
1035 | static int paranoid_check_not_bad(const struct ubi_device *ubi, int pnum) | ||
1036 | { | ||
1037 | int err; | ||
1038 | |||
1039 | err = ubi_io_is_bad(ubi, pnum); | ||
1040 | if (!err) | ||
1041 | return err; | ||
1042 | |||
1043 | ubi_err("paranoid check failed for PEB %d", pnum); | ||
1044 | ubi_dbg_dump_stack(); | ||
1045 | return err; | ||
1046 | } | ||
1047 | |||
1048 | /** | ||
1049 | * paranoid_check_ec_hdr - check if an erase counter header is all right. | ||
1050 | * @ubi: UBI device description object | ||
1051 | * @pnum: physical eraseblock number the erase counter header belongs to | ||
1052 | * @ec_hdr: the erase counter header to check | ||
1053 | * | ||
1054 | * This function returns zero if the erase counter header contains valid | ||
1055 | * values, and %1 if not. | ||
1056 | */ | ||
1057 | static int paranoid_check_ec_hdr(const struct ubi_device *ubi, int pnum, | ||
1058 | const struct ubi_ec_hdr *ec_hdr) | ||
1059 | { | ||
1060 | int err; | ||
1061 | uint32_t magic; | ||
1062 | |||
1063 | magic = ubi32_to_cpu(ec_hdr->magic); | ||
1064 | if (magic != UBI_EC_HDR_MAGIC) { | ||
1065 | ubi_err("bad magic %#08x, must be %#08x", | ||
1066 | magic, UBI_EC_HDR_MAGIC); | ||
1067 | goto fail; | ||
1068 | } | ||
1069 | |||
1070 | err = validate_ec_hdr(ubi, ec_hdr); | ||
1071 | if (err) { | ||
1072 | ubi_err("paranoid check failed for PEB %d", pnum); | ||
1073 | goto fail; | ||
1074 | } | ||
1075 | |||
1076 | return 0; | ||
1077 | |||
1078 | fail: | ||
1079 | ubi_dbg_dump_ec_hdr(ec_hdr); | ||
1080 | ubi_dbg_dump_stack(); | ||
1081 | return 1; | ||
1082 | } | ||
1083 | |||
1084 | /** | ||
1085 | * paranoid_check_peb_ec_hdr - check that the erase counter header of a | ||
1086 | * physical eraseblock is in-place and is all right. | ||
1087 | * @ubi: UBI device description object | ||
1088 | * @pnum: the physical eraseblock number to check | ||
1089 | * | ||
1090 | * This function returns zero if the erase counter header is all right, %1 if | ||
1091 | * not, and a negative error code if an error occurred. | ||
1092 | */ | ||
1093 | static int paranoid_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum) | ||
1094 | { | ||
1095 | int err; | ||
1096 | uint32_t crc, hdr_crc; | ||
1097 | struct ubi_ec_hdr *ec_hdr; | ||
1098 | |||
1099 | ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL); | ||
1100 | if (!ec_hdr) | ||
1101 | return -ENOMEM; | ||
1102 | |||
1103 | err = ubi_io_read(ubi, ec_hdr, pnum, 0, UBI_EC_HDR_SIZE); | ||
1104 | if (err && err != UBI_IO_BITFLIPS && err != -EBADMSG) | ||
1105 | goto exit; | ||
1106 | |||
1107 | crc = crc32(UBI_CRC32_INIT, ec_hdr, UBI_EC_HDR_SIZE_CRC); | ||
1108 | hdr_crc = ubi32_to_cpu(ec_hdr->hdr_crc); | ||
1109 | if (hdr_crc != crc) { | ||
1110 | ubi_err("bad CRC, calculated %#08x, read %#08x", crc, hdr_crc); | ||
1111 | ubi_err("paranoid check failed for PEB %d", pnum); | ||
1112 | ubi_dbg_dump_ec_hdr(ec_hdr); | ||
1113 | ubi_dbg_dump_stack(); | ||
1114 | err = 1; | ||
1115 | goto exit; | ||
1116 | } | ||
1117 | |||
1118 | err = paranoid_check_ec_hdr(ubi, pnum, ec_hdr); | ||
1119 | |||
1120 | exit: | ||
1121 | kfree(ec_hdr); | ||
1122 | return err; | ||
1123 | } | ||
1124 | |||
1125 | /** | ||
1126 | * paranoid_check_vid_hdr - check that a volume identifier header is all right. | ||
1127 | * @ubi: UBI device description object | ||
1128 | * @pnum: physical eraseblock number the volume identifier header belongs to | ||
1129 | * @vid_hdr: the volume identifier header to check | ||
1130 | * | ||
1131 | * This function returns zero if the volume identifier header is all right, and | ||
1132 | * %1 if not. | ||
1133 | */ | ||
1134 | static int paranoid_check_vid_hdr(const struct ubi_device *ubi, int pnum, | ||
1135 | const struct ubi_vid_hdr *vid_hdr) | ||
1136 | { | ||
1137 | int err; | ||
1138 | uint32_t magic; | ||
1139 | |||
1140 | magic = ubi32_to_cpu(vid_hdr->magic); | ||
1141 | if (magic != UBI_VID_HDR_MAGIC) { | ||
1142 | ubi_err("bad VID header magic %#08x at PEB %d, must be %#08x", | ||
1143 | magic, pnum, UBI_VID_HDR_MAGIC); | ||
1144 | goto fail; | ||
1145 | } | ||
1146 | |||
1147 | err = validate_vid_hdr(ubi, vid_hdr); | ||
1148 | if (err) { | ||
1149 | ubi_err("paranoid check failed for PEB %d", pnum); | ||
1150 | goto fail; | ||
1151 | } | ||
1152 | |||
1153 | return err; | ||
1154 | |||
1155 | fail: | ||
1156 | ubi_err("paranoid check failed for PEB %d", pnum); | ||
1157 | ubi_dbg_dump_vid_hdr(vid_hdr); | ||
1158 | ubi_dbg_dump_stack(); | ||
1159 | return 1; | ||
1160 | |||
1161 | } | ||
1162 | |||
1163 | /** | ||
1164 | * paranoid_check_peb_vid_hdr - check that the volume identifier header of a | ||
1165 | * physical eraseblock is in-place and is all right. | ||
1166 | * @ubi: UBI device description object | ||
1167 | * @pnum: the physical eraseblock number to check | ||
1168 | * | ||
1169 | * This function returns zero if the volume identifier header is all right, | ||
1170 | * %1 if not, and a negative error code if an error occurred. | ||
1171 | */ | ||
1172 | static int paranoid_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum) | ||
1173 | { | ||
1174 | int err; | ||
1175 | uint32_t crc, hdr_crc; | ||
1176 | struct ubi_vid_hdr *vid_hdr; | ||
1177 | void *p; | ||
1178 | |||
1179 | vid_hdr = ubi_zalloc_vid_hdr(ubi); | ||
1180 | if (!vid_hdr) | ||
1181 | return -ENOMEM; | ||
1182 | |||
1183 | p = (char *)vid_hdr - ubi->vid_hdr_shift; | ||
1184 | err = ubi_io_read(ubi, p, pnum, ubi->vid_hdr_aloffset, | ||
1185 | ubi->vid_hdr_alsize); | ||
1186 | if (err && err != UBI_IO_BITFLIPS && err != -EBADMSG) | ||
1187 | goto exit; | ||
1188 | |||
1189 | crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_EC_HDR_SIZE_CRC); | ||
1190 | hdr_crc = ubi32_to_cpu(vid_hdr->hdr_crc); | ||
1191 | if (hdr_crc != crc) { | ||
1192 | ubi_err("bad VID header CRC at PEB %d, calculated %#08x, " | ||
1193 | "read %#08x", pnum, crc, hdr_crc); | ||
1194 | ubi_err("paranoid check failed for PEB %d", pnum); | ||
1195 | ubi_dbg_dump_vid_hdr(vid_hdr); | ||
1196 | ubi_dbg_dump_stack(); | ||
1197 | err = 1; | ||
1198 | goto exit; | ||
1199 | } | ||
1200 | |||
1201 | err = paranoid_check_vid_hdr(ubi, pnum, vid_hdr); | ||
1202 | |||
1203 | exit: | ||
1204 | ubi_free_vid_hdr(ubi, vid_hdr); | ||
1205 | return err; | ||
1206 | } | ||
1207 | |||
1208 | /** | ||
1209 | * paranoid_check_all_ff - check that a region of flash is empty. | ||
1210 | * @ubi: UBI device description object | ||
1211 | * @pnum: the physical eraseblock number to check | ||
1212 | * @offset: the starting offset within the physical eraseblock to check | ||
1213 | * @len: the length of the region to check | ||
1214 | * | ||
1215 | * This function returns zero if only 0xFF bytes are present at offset | ||
1216 | * @offset of the physical eraseblock @pnum, %1 if not, and a negative error | ||
1217 | * code if an error occurred. | ||
1218 | */ | ||
1219 | static int paranoid_check_all_ff(const struct ubi_device *ubi, int pnum, | ||
1220 | int offset, int len) | ||
1221 | { | ||
1222 | size_t read; | ||
1223 | int err; | ||
1224 | void *buf; | ||
1225 | loff_t addr = (loff_t)pnum * ubi->peb_size + offset; | ||
1226 | |||
1227 | buf = kzalloc(len, GFP_KERNEL); | ||
1228 | if (!buf) | ||
1229 | return -ENOMEM; | ||
1230 | |||
1231 | err = ubi->mtd->read(ubi->mtd, addr, len, &read, buf); | ||
1232 | if (err && err != -EUCLEAN) { | ||
1233 | ubi_err("error %d while reading %d bytes from PEB %d:%d, " | ||
1234 | "read %zd bytes", err, len, pnum, offset, read); | ||
1235 | goto error; | ||
1236 | } | ||
1237 | |||
1238 | err = check_pattern(buf, 0xFF, len); | ||
1239 | if (err == 0) { | ||
1240 | ubi_err("flash region at PEB %d:%d, length %d does not " | ||
1241 | "contain all 0xFF bytes", pnum, offset, len); | ||
1242 | goto fail; | ||
1243 | } | ||
1244 | |||
1245 | kfree(buf); | ||
1246 | return 0; | ||
1247 | |||
1248 | fail: | ||
1249 | ubi_err("paranoid check failed for PEB %d", pnum); | ||
1250 | dbg_msg("hex dump of the %d-%d region", offset, offset + len); | ||
1251 | ubi_dbg_hexdump(buf, len); | ||
1252 | err = 1; | ||
1253 | error: | ||
1254 | ubi_dbg_dump_stack(); | ||
1255 | kfree(buf); | ||
1256 | return err; | ||
1257 | } | ||
1258 | |||
1259 | #endif /* CONFIG_MTD_UBI_DEBUG_PARANOID */ | ||
diff --git a/drivers/mtd/ubi/kapi.c b/drivers/mtd/ubi/kapi.c new file mode 100644 index 000000000000..d352c4575c3d --- /dev/null +++ b/drivers/mtd/ubi/kapi.c | |||
@@ -0,0 +1,575 @@ | |||
1 | /* | ||
2 | * Copyright (c) International Business Machines Corp., 2006 | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License as published by | ||
6 | * the Free Software Foundation; either version 2 of the License, or | ||
7 | * (at your option) any later version. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See | ||
12 | * the GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
17 | * | ||
18 | * Author: Artem Bityutskiy (Битюцкий Артём) | ||
19 | */ | ||
20 | |||
21 | /* This file mostly implements UBI kernel API functions */ | ||
22 | |||
23 | #include <linux/module.h> | ||
24 | #include <linux/err.h> | ||
25 | #include <asm/div64.h> | ||
26 | #include "ubi.h" | ||
27 | |||
28 | /** | ||
29 | * ubi_get_device_info - get information about UBI device. | ||
30 | * @ubi_num: UBI device number | ||
31 | * @di: the information is stored here | ||
32 | * | ||
33 | * This function returns %0 in case of success and a %-ENODEV if there is no | ||
34 | * such UBI device. | ||
35 | */ | ||
36 | int ubi_get_device_info(int ubi_num, struct ubi_device_info *di) | ||
37 | { | ||
38 | const struct ubi_device *ubi; | ||
39 | |||
40 | if (!try_module_get(THIS_MODULE)) | ||
41 | return -ENODEV; | ||
42 | |||
43 | if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES || | ||
44 | !ubi_devices[ubi_num]) { | ||
45 | module_put(THIS_MODULE); | ||
46 | return -ENODEV; | ||
47 | } | ||
48 | |||
49 | ubi = ubi_devices[ubi_num]; | ||
50 | di->ubi_num = ubi->ubi_num; | ||
51 | di->leb_size = ubi->leb_size; | ||
52 | di->min_io_size = ubi->min_io_size; | ||
53 | di->ro_mode = ubi->ro_mode; | ||
54 | di->cdev = MKDEV(ubi->major, 0); | ||
55 | module_put(THIS_MODULE); | ||
56 | return 0; | ||
57 | } | ||
58 | EXPORT_SYMBOL_GPL(ubi_get_device_info); | ||
59 | |||
60 | /** | ||
61 | * ubi_get_volume_info - get information about UBI volume. | ||
62 | * @desc: volume descriptor | ||
63 | * @vi: the information is stored here | ||
64 | */ | ||
65 | void ubi_get_volume_info(struct ubi_volume_desc *desc, | ||
66 | struct ubi_volume_info *vi) | ||
67 | { | ||
68 | const struct ubi_volume *vol = desc->vol; | ||
69 | const struct ubi_device *ubi = vol->ubi; | ||
70 | |||
71 | vi->vol_id = vol->vol_id; | ||
72 | vi->ubi_num = ubi->ubi_num; | ||
73 | vi->size = vol->reserved_pebs; | ||
74 | vi->used_bytes = vol->used_bytes; | ||
75 | vi->vol_type = vol->vol_type; | ||
76 | vi->corrupted = vol->corrupted; | ||
77 | vi->upd_marker = vol->upd_marker; | ||
78 | vi->alignment = vol->alignment; | ||
79 | vi->usable_leb_size = vol->usable_leb_size; | ||
80 | vi->name_len = vol->name_len; | ||
81 | vi->name = vol->name; | ||
82 | vi->cdev = MKDEV(ubi->major, vi->vol_id + 1); | ||
83 | } | ||
84 | EXPORT_SYMBOL_GPL(ubi_get_volume_info); | ||
85 | |||
86 | /** | ||
87 | * ubi_open_volume - open UBI volume. | ||
88 | * @ubi_num: UBI device number | ||
89 | * @vol_id: volume ID | ||
90 | * @mode: open mode | ||
91 | * | ||
92 | * The @mode parameter specifies if the volume should be opened in read-only | ||
93 | * mode, read-write mode, or exclusive mode. The exclusive mode guarantees that | ||
94 | * nobody else will be able to open this volume. UBI allows to have many volume | ||
95 | * readers and one writer at a time. | ||
96 | * | ||
97 | * If a static volume is being opened for the first time since boot, it will be | ||
98 | * checked by this function, which means it will be fully read and the CRC | ||
99 | * checksum of each logical eraseblock will be checked. | ||
100 | * | ||
101 | * This function returns volume descriptor in case of success and a negative | ||
102 | * error code in case of failure. | ||
103 | */ | ||
104 | struct ubi_volume_desc *ubi_open_volume(int ubi_num, int vol_id, int mode) | ||
105 | { | ||
106 | int err; | ||
107 | struct ubi_volume_desc *desc; | ||
108 | struct ubi_device *ubi = ubi_devices[ubi_num]; | ||
109 | struct ubi_volume *vol; | ||
110 | |||
111 | dbg_msg("open device %d volume %d, mode %d", ubi_num, vol_id, mode); | ||
112 | |||
113 | err = -ENODEV; | ||
114 | if (!try_module_get(THIS_MODULE)) | ||
115 | return ERR_PTR(err); | ||
116 | |||
117 | if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES || !ubi) | ||
118 | goto out_put; | ||
119 | |||
120 | err = -EINVAL; | ||
121 | if (vol_id < 0 || vol_id >= ubi->vtbl_slots) | ||
122 | goto out_put; | ||
123 | if (mode != UBI_READONLY && mode != UBI_READWRITE && | ||
124 | mode != UBI_EXCLUSIVE) | ||
125 | goto out_put; | ||
126 | |||
127 | desc = kmalloc(sizeof(struct ubi_volume_desc), GFP_KERNEL); | ||
128 | if (!desc) { | ||
129 | err = -ENOMEM; | ||
130 | goto out_put; | ||
131 | } | ||
132 | |||
133 | spin_lock(&ubi->volumes_lock); | ||
134 | vol = ubi->volumes[vol_id]; | ||
135 | if (!vol) { | ||
136 | err = -ENODEV; | ||
137 | goto out_unlock; | ||
138 | } | ||
139 | |||
140 | err = -EBUSY; | ||
141 | switch (mode) { | ||
142 | case UBI_READONLY: | ||
143 | if (vol->exclusive) | ||
144 | goto out_unlock; | ||
145 | vol->readers += 1; | ||
146 | break; | ||
147 | |||
148 | case UBI_READWRITE: | ||
149 | if (vol->exclusive || vol->writers > 0) | ||
150 | goto out_unlock; | ||
151 | vol->writers += 1; | ||
152 | break; | ||
153 | |||
154 | case UBI_EXCLUSIVE: | ||
155 | if (vol->exclusive || vol->writers || vol->readers) | ||
156 | goto out_unlock; | ||
157 | vol->exclusive = 1; | ||
158 | break; | ||
159 | } | ||
160 | spin_unlock(&ubi->volumes_lock); | ||
161 | |||
162 | desc->vol = vol; | ||
163 | desc->mode = mode; | ||
164 | |||
165 | /* | ||
166 | * To prevent simultaneous checks of the same volume we use @vtbl_mutex, | ||
167 | * although it is not the purpose it was introduced for. | ||
168 | */ | ||
169 | mutex_lock(&ubi->vtbl_mutex); | ||
170 | if (!vol->checked) { | ||
171 | /* This is the first open - check the volume */ | ||
172 | err = ubi_check_volume(ubi, vol_id); | ||
173 | if (err < 0) { | ||
174 | mutex_unlock(&ubi->vtbl_mutex); | ||
175 | ubi_close_volume(desc); | ||
176 | return ERR_PTR(err); | ||
177 | } | ||
178 | if (err == 1) { | ||
179 | ubi_warn("volume %d on UBI device %d is corrupted", | ||
180 | vol_id, ubi->ubi_num); | ||
181 | vol->corrupted = 1; | ||
182 | } | ||
183 | vol->checked = 1; | ||
184 | } | ||
185 | mutex_unlock(&ubi->vtbl_mutex); | ||
186 | return desc; | ||
187 | |||
188 | out_unlock: | ||
189 | spin_unlock(&ubi->volumes_lock); | ||
190 | kfree(desc); | ||
191 | out_put: | ||
192 | module_put(THIS_MODULE); | ||
193 | return ERR_PTR(err); | ||
194 | } | ||
195 | EXPORT_SYMBOL_GPL(ubi_open_volume); | ||
196 | |||
197 | /** | ||
198 | * ubi_open_volume_nm - open UBI volume by name. | ||
199 | * @ubi_num: UBI device number | ||
200 | * @name: volume name | ||
201 | * @mode: open mode | ||
202 | * | ||
203 | * This function is similar to 'ubi_open_volume()', but opens a volume by name. | ||
204 | */ | ||
205 | struct ubi_volume_desc *ubi_open_volume_nm(int ubi_num, const char *name, | ||
206 | int mode) | ||
207 | { | ||
208 | int i, vol_id = -1, len; | ||
209 | struct ubi_volume_desc *ret; | ||
210 | struct ubi_device *ubi; | ||
211 | |||
212 | dbg_msg("open volume %s, mode %d", name, mode); | ||
213 | |||
214 | if (!name) | ||
215 | return ERR_PTR(-EINVAL); | ||
216 | |||
217 | len = strnlen(name, UBI_VOL_NAME_MAX + 1); | ||
218 | if (len > UBI_VOL_NAME_MAX) | ||
219 | return ERR_PTR(-EINVAL); | ||
220 | |||
221 | ret = ERR_PTR(-ENODEV); | ||
222 | if (!try_module_get(THIS_MODULE)) | ||
223 | return ret; | ||
224 | |||
225 | if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES || !ubi_devices[ubi_num]) | ||
226 | goto out_put; | ||
227 | |||
228 | ubi = ubi_devices[ubi_num]; | ||
229 | |||
230 | spin_lock(&ubi->volumes_lock); | ||
231 | /* Walk all volumes of this UBI device */ | ||
232 | for (i = 0; i < ubi->vtbl_slots; i++) { | ||
233 | struct ubi_volume *vol = ubi->volumes[i]; | ||
234 | |||
235 | if (vol && len == vol->name_len && !strcmp(name, vol->name)) { | ||
236 | vol_id = i; | ||
237 | break; | ||
238 | } | ||
239 | } | ||
240 | spin_unlock(&ubi->volumes_lock); | ||
241 | |||
242 | if (vol_id < 0) | ||
243 | goto out_put; | ||
244 | |||
245 | ret = ubi_open_volume(ubi_num, vol_id, mode); | ||
246 | |||
247 | out_put: | ||
248 | module_put(THIS_MODULE); | ||
249 | return ret; | ||
250 | } | ||
251 | EXPORT_SYMBOL_GPL(ubi_open_volume_nm); | ||
252 | |||
253 | /** | ||
254 | * ubi_close_volume - close UBI volume. | ||
255 | * @desc: volume descriptor | ||
256 | */ | ||
257 | void ubi_close_volume(struct ubi_volume_desc *desc) | ||
258 | { | ||
259 | struct ubi_volume *vol = desc->vol; | ||
260 | |||
261 | dbg_msg("close volume %d, mode %d", vol->vol_id, desc->mode); | ||
262 | |||
263 | spin_lock(&vol->ubi->volumes_lock); | ||
264 | switch (desc->mode) { | ||
265 | case UBI_READONLY: | ||
266 | vol->readers -= 1; | ||
267 | break; | ||
268 | case UBI_READWRITE: | ||
269 | vol->writers -= 1; | ||
270 | break; | ||
271 | case UBI_EXCLUSIVE: | ||
272 | vol->exclusive = 0; | ||
273 | } | ||
274 | spin_unlock(&vol->ubi->volumes_lock); | ||
275 | |||
276 | kfree(desc); | ||
277 | module_put(THIS_MODULE); | ||
278 | } | ||
279 | EXPORT_SYMBOL_GPL(ubi_close_volume); | ||
280 | |||
281 | /** | ||
282 | * ubi_leb_read - read data. | ||
283 | * @desc: volume descriptor | ||
284 | * @lnum: logical eraseblock number to read from | ||
285 | * @buf: buffer where to store the read data | ||
286 | * @offset: offset within the logical eraseblock to read from | ||
287 | * @len: how many bytes to read | ||
288 | * @check: whether UBI has to check the read data's CRC or not. | ||
289 | * | ||
290 | * This function reads data from offset @offset of logical eraseblock @lnum and | ||
291 | * stores the data at @buf. When reading from static volumes, @check specifies | ||
292 | * whether the data has to be checked or not. If yes, the whole logical | ||
293 | * eraseblock will be read and its CRC checksum will be checked (i.e., the CRC | ||
294 | * checksum is per-eraseblock). So checking may substantially slow down the | ||
295 | * read speed. The @check argument is ignored for dynamic volumes. | ||
296 | * | ||
297 | * In case of success, this function returns zero. In case of failure, this | ||
298 | * function returns a negative error code. | ||
299 | * | ||
300 | * %-EBADMSG error code is returned: | ||
301 | * o for both static and dynamic volumes if MTD driver has detected a data | ||
302 | * integrity problem (unrecoverable ECC checksum mismatch in case of NAND); | ||
303 | * o for static volumes in case of data CRC mismatch. | ||
304 | * | ||
305 | * If the volume is damaged because of an interrupted update this function just | ||
306 | * returns immediately with %-EBADF error code. | ||
307 | */ | ||
308 | int ubi_leb_read(struct ubi_volume_desc *desc, int lnum, char *buf, int offset, | ||
309 | int len, int check) | ||
310 | { | ||
311 | struct ubi_volume *vol = desc->vol; | ||
312 | struct ubi_device *ubi = vol->ubi; | ||
313 | int err, vol_id = vol->vol_id; | ||
314 | |||
315 | dbg_msg("read %d bytes from LEB %d:%d:%d", len, vol_id, lnum, offset); | ||
316 | |||
317 | if (vol_id < 0 || vol_id >= ubi->vtbl_slots || lnum < 0 || | ||
318 | lnum >= vol->used_ebs || offset < 0 || len < 0 || | ||
319 | offset + len > vol->usable_leb_size) | ||
320 | return -EINVAL; | ||
321 | |||
322 | if (vol->vol_type == UBI_STATIC_VOLUME && lnum == vol->used_ebs - 1 && | ||
323 | offset + len > vol->last_eb_bytes) | ||
324 | return -EINVAL; | ||
325 | |||
326 | if (vol->upd_marker) | ||
327 | return -EBADF; | ||
328 | if (len == 0) | ||
329 | return 0; | ||
330 | |||
331 | err = ubi_eba_read_leb(ubi, vol_id, lnum, buf, offset, len, check); | ||
332 | if (err && err == -EBADMSG && vol->vol_type == UBI_STATIC_VOLUME) { | ||
333 | ubi_warn("mark volume %d as corrupted", vol_id); | ||
334 | vol->corrupted = 1; | ||
335 | } | ||
336 | |||
337 | return err; | ||
338 | } | ||
339 | EXPORT_SYMBOL_GPL(ubi_leb_read); | ||
340 | |||
341 | /** | ||
342 | * ubi_leb_write - write data. | ||
343 | * @desc: volume descriptor | ||
344 | * @lnum: logical eraseblock number to write to | ||
345 | * @buf: data to write | ||
346 | * @offset: offset within the logical eraseblock where to write | ||
347 | * @len: how many bytes to write | ||
348 | * @dtype: expected data type | ||
349 | * | ||
350 | * This function writes @len bytes of data from @buf to offset @offset of | ||
351 | * logical eraseblock @lnum. The @dtype argument describes expected lifetime of | ||
352 | * the data. | ||
353 | * | ||
354 | * This function takes care of physical eraseblock write failures. If write to | ||
355 | * the physical eraseblock write operation fails, the logical eraseblock is | ||
356 | * re-mapped to another physical eraseblock, the data is recovered, and the | ||
357 | * write finishes. UBI has a pool of reserved physical eraseblocks for this. | ||
358 | * | ||
359 | * If all the data were successfully written, zero is returned. If an error | ||
360 | * occurred and UBI has not been able to recover from it, this function returns | ||
361 | * a negative error code. Note, in case of an error, it is possible that | ||
362 | * something was still written to the flash media, but that may be some | ||
363 | * garbage. | ||
364 | * | ||
365 | * If the volume is damaged because of an interrupted update this function just | ||
366 | * returns immediately with %-EBADF code. | ||
367 | */ | ||
368 | int ubi_leb_write(struct ubi_volume_desc *desc, int lnum, const void *buf, | ||
369 | int offset, int len, int dtype) | ||
370 | { | ||
371 | struct ubi_volume *vol = desc->vol; | ||
372 | struct ubi_device *ubi = vol->ubi; | ||
373 | int vol_id = vol->vol_id; | ||
374 | |||
375 | dbg_msg("write %d bytes to LEB %d:%d:%d", len, vol_id, lnum, offset); | ||
376 | |||
377 | if (vol_id < 0 || vol_id >= ubi->vtbl_slots) | ||
378 | return -EINVAL; | ||
379 | |||
380 | if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME) | ||
381 | return -EROFS; | ||
382 | |||
383 | if (lnum < 0 || lnum >= vol->reserved_pebs || offset < 0 || len < 0 || | ||
384 | offset + len > vol->usable_leb_size || offset % ubi->min_io_size || | ||
385 | len % ubi->min_io_size) | ||
386 | return -EINVAL; | ||
387 | |||
388 | if (dtype != UBI_LONGTERM && dtype != UBI_SHORTTERM && | ||
389 | dtype != UBI_UNKNOWN) | ||
390 | return -EINVAL; | ||
391 | |||
392 | if (vol->upd_marker) | ||
393 | return -EBADF; | ||
394 | |||
395 | if (len == 0) | ||
396 | return 0; | ||
397 | |||
398 | return ubi_eba_write_leb(ubi, vol_id, lnum, buf, offset, len, dtype); | ||
399 | } | ||
400 | EXPORT_SYMBOL_GPL(ubi_leb_write); | ||
401 | |||
402 | /* | ||
403 | * ubi_leb_change - change logical eraseblock atomically. | ||
404 | * @desc: volume descriptor | ||
405 | * @lnum: logical eraseblock number to change | ||
406 | * @buf: data to write | ||
407 | * @len: how many bytes to write | ||
408 | * @dtype: expected data type | ||
409 | * | ||
410 | * This function changes the contents of a logical eraseblock atomically. @buf | ||
411 | * has to contain new logical eraseblock data, and @len - the length of the | ||
412 | * data, which has to be aligned. The length may be shorter then the logical | ||
413 | * eraseblock size, ant the logical eraseblock may be appended to more times | ||
414 | * later on. This function guarantees that in case of an unclean reboot the old | ||
415 | * contents is preserved. Returns zero in case of success and a negative error | ||
416 | * code in case of failure. | ||
417 | */ | ||
418 | int ubi_leb_change(struct ubi_volume_desc *desc, int lnum, const void *buf, | ||
419 | int len, int dtype) | ||
420 | { | ||
421 | struct ubi_volume *vol = desc->vol; | ||
422 | struct ubi_device *ubi = vol->ubi; | ||
423 | int vol_id = vol->vol_id; | ||
424 | |||
425 | dbg_msg("atomically write %d bytes to LEB %d:%d", len, vol_id, lnum); | ||
426 | |||
427 | if (vol_id < 0 || vol_id >= ubi->vtbl_slots) | ||
428 | return -EINVAL; | ||
429 | |||
430 | if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME) | ||
431 | return -EROFS; | ||
432 | |||
433 | if (lnum < 0 || lnum >= vol->reserved_pebs || len < 0 || | ||
434 | len > vol->usable_leb_size || len % ubi->min_io_size) | ||
435 | return -EINVAL; | ||
436 | |||
437 | if (dtype != UBI_LONGTERM && dtype != UBI_SHORTTERM && | ||
438 | dtype != UBI_UNKNOWN) | ||
439 | return -EINVAL; | ||
440 | |||
441 | if (vol->upd_marker) | ||
442 | return -EBADF; | ||
443 | |||
444 | if (len == 0) | ||
445 | return 0; | ||
446 | |||
447 | return ubi_eba_atomic_leb_change(ubi, vol_id, lnum, buf, len, dtype); | ||
448 | } | ||
449 | EXPORT_SYMBOL_GPL(ubi_leb_change); | ||
450 | |||
451 | /** | ||
452 | * ubi_leb_erase - erase logical eraseblock. | ||
453 | * @desc: volume descriptor | ||
454 | * @lnum: logical eraseblock number | ||
455 | * | ||
456 | * This function un-maps logical eraseblock @lnum and synchronously erases the | ||
457 | * correspondent physical eraseblock. Returns zero in case of success and a | ||
458 | * negative error code in case of failure. | ||
459 | * | ||
460 | * If the volume is damaged because of an interrupted update this function just | ||
461 | * returns immediately with %-EBADF code. | ||
462 | */ | ||
463 | int ubi_leb_erase(struct ubi_volume_desc *desc, int lnum) | ||
464 | { | ||
465 | struct ubi_volume *vol = desc->vol; | ||
466 | struct ubi_device *ubi = vol->ubi; | ||
467 | int err, vol_id = vol->vol_id; | ||
468 | |||
469 | dbg_msg("erase LEB %d:%d", vol_id, lnum); | ||
470 | |||
471 | if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME) | ||
472 | return -EROFS; | ||
473 | |||
474 | if (lnum < 0 || lnum >= vol->reserved_pebs) | ||
475 | return -EINVAL; | ||
476 | |||
477 | if (vol->upd_marker) | ||
478 | return -EBADF; | ||
479 | |||
480 | err = ubi_eba_unmap_leb(ubi, vol_id, lnum); | ||
481 | if (err) | ||
482 | return err; | ||
483 | |||
484 | return ubi_wl_flush(ubi); | ||
485 | } | ||
486 | EXPORT_SYMBOL_GPL(ubi_leb_erase); | ||
487 | |||
488 | /** | ||
489 | * ubi_leb_unmap - un-map logical eraseblock. | ||
490 | * @desc: volume descriptor | ||
491 | * @lnum: logical eraseblock number | ||
492 | * | ||
493 | * This function un-maps logical eraseblock @lnum and schedules the | ||
494 | * corresponding physical eraseblock for erasure, so that it will eventually be | ||
495 | * physically erased in background. This operation is much faster then the | ||
496 | * erase operation. | ||
497 | * | ||
498 | * Unlike erase, the un-map operation does not guarantee that the logical | ||
499 | * eraseblock will contain all 0xFF bytes when UBI is initialized again. For | ||
500 | * example, if several logical eraseblocks are un-mapped, and an unclean reboot | ||
501 | * happens after this, the logical eraseblocks will not necessarily be | ||
502 | * un-mapped again when this MTD device is attached. They may actually be | ||
503 | * mapped to the same physical eraseblocks again. So, this function has to be | ||
504 | * used with care. | ||
505 | * | ||
506 | * In other words, when un-mapping a logical eraseblock, UBI does not store | ||
507 | * any information about this on the flash media, it just marks the logical | ||
508 | * eraseblock as "un-mapped" in RAM. If UBI is detached before the physical | ||
509 | * eraseblock is physically erased, it will be mapped again to the same logical | ||
510 | * eraseblock when the MTD device is attached again. | ||
511 | * | ||
512 | * The main and obvious use-case of this function is when the contents of a | ||
513 | * logical eraseblock has to be re-written. Then it is much more efficient to | ||
514 | * first un-map it, then write new data, rather then first erase it, then write | ||
515 | * new data. Note, once new data has been written to the logical eraseblock, | ||
516 | * UBI guarantees that the old contents has gone forever. In other words, if an | ||
517 | * unclean reboot happens after the logical eraseblock has been un-mapped and | ||
518 | * then written to, it will contain the last written data. | ||
519 | * | ||
520 | * This function returns zero in case of success and a negative error code in | ||
521 | * case of failure. If the volume is damaged because of an interrupted update | ||
522 | * this function just returns immediately with %-EBADF code. | ||
523 | */ | ||
524 | int ubi_leb_unmap(struct ubi_volume_desc *desc, int lnum) | ||
525 | { | ||
526 | struct ubi_volume *vol = desc->vol; | ||
527 | struct ubi_device *ubi = vol->ubi; | ||
528 | int vol_id = vol->vol_id; | ||
529 | |||
530 | dbg_msg("unmap LEB %d:%d", vol_id, lnum); | ||
531 | |||
532 | if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME) | ||
533 | return -EROFS; | ||
534 | |||
535 | if (lnum < 0 || lnum >= vol->reserved_pebs) | ||
536 | return -EINVAL; | ||
537 | |||
538 | if (vol->upd_marker) | ||
539 | return -EBADF; | ||
540 | |||
541 | return ubi_eba_unmap_leb(ubi, vol_id, lnum); | ||
542 | } | ||
543 | EXPORT_SYMBOL_GPL(ubi_leb_unmap); | ||
544 | |||
545 | /** | ||
546 | * ubi_is_mapped - check if logical eraseblock is mapped. | ||
547 | * @desc: volume descriptor | ||
548 | * @lnum: logical eraseblock number | ||
549 | * | ||
550 | * This function checks if logical eraseblock @lnum is mapped to a physical | ||
551 | * eraseblock. If a logical eraseblock is un-mapped, this does not necessarily | ||
552 | * mean it will still be un-mapped after the UBI device is re-attached. The | ||
553 | * logical eraseblock may become mapped to the physical eraseblock it was last | ||
554 | * mapped to. | ||
555 | * | ||
556 | * This function returns %1 if the LEB is mapped, %0 if not, and a negative | ||
557 | * error code in case of failure. If the volume is damaged because of an | ||
558 | * interrupted update this function just returns immediately with %-EBADF error | ||
559 | * code. | ||
560 | */ | ||
561 | int ubi_is_mapped(struct ubi_volume_desc *desc, int lnum) | ||
562 | { | ||
563 | struct ubi_volume *vol = desc->vol; | ||
564 | |||
565 | dbg_msg("test LEB %d:%d", vol->vol_id, lnum); | ||
566 | |||
567 | if (lnum < 0 || lnum >= vol->reserved_pebs) | ||
568 | return -EINVAL; | ||
569 | |||
570 | if (vol->upd_marker) | ||
571 | return -EBADF; | ||
572 | |||
573 | return vol->eba_tbl[lnum] >= 0; | ||
574 | } | ||
575 | EXPORT_SYMBOL_GPL(ubi_is_mapped); | ||
diff --git a/drivers/mtd/ubi/misc.c b/drivers/mtd/ubi/misc.c new file mode 100644 index 000000000000..38d4e6757dc7 --- /dev/null +++ b/drivers/mtd/ubi/misc.c | |||
@@ -0,0 +1,105 @@ | |||
1 | /* | ||
2 | * Copyright (c) International Business Machines Corp., 2006 | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License as published by | ||
6 | * the Free Software Foundation; either version 2 of the License, or | ||
7 | * (at your option) any later version. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See | ||
12 | * the GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
17 | * | ||
18 | * Author: Artem Bityutskiy (Битюцкий Артём) | ||
19 | */ | ||
20 | |||
21 | /* Here we keep miscellaneous functions which are used all over the UBI code */ | ||
22 | |||
23 | #include "ubi.h" | ||
24 | |||
25 | /** | ||
26 | * calc_data_len - calculate how much real data is stored in a buffer. | ||
27 | * @ubi: UBI device description object | ||
28 | * @buf: a buffer with the contents of the physical eraseblock | ||
29 | * @length: the buffer length | ||
30 | * | ||
31 | * This function calculates how much "real data" is stored in @buf and returnes | ||
32 | * the length. Continuous 0xFF bytes at the end of the buffer are not | ||
33 | * considered as "real data". | ||
34 | */ | ||
35 | int ubi_calc_data_len(const struct ubi_device *ubi, const void *buf, | ||
36 | int length) | ||
37 | { | ||
38 | int i; | ||
39 | |||
40 | ubi_assert(length % ubi->min_io_size == 0); | ||
41 | |||
42 | for (i = length - 1; i >= 0; i--) | ||
43 | if (((const uint8_t *)buf)[i] != 0xFF) | ||
44 | break; | ||
45 | |||
46 | /* The resulting length must be aligned to the minimum flash I/O size */ | ||
47 | length = ALIGN(i + 1, ubi->min_io_size); | ||
48 | return length; | ||
49 | } | ||
50 | |||
51 | /** | ||
52 | * ubi_check_volume - check the contents of a static volume. | ||
53 | * @ubi: UBI device description object | ||
54 | * @vol_id: ID of the volume to check | ||
55 | * | ||
56 | * This function checks if static volume @vol_id is corrupted by fully reading | ||
57 | * it and checking data CRC. This function returns %0 if the volume is not | ||
58 | * corrupted, %1 if it is corrupted and a negative error code in case of | ||
59 | * failure. Dynamic volumes are not checked and zero is returned immediately. | ||
60 | */ | ||
61 | int ubi_check_volume(struct ubi_device *ubi, int vol_id) | ||
62 | { | ||
63 | void *buf; | ||
64 | int err = 0, i; | ||
65 | struct ubi_volume *vol = ubi->volumes[vol_id]; | ||
66 | |||
67 | if (vol->vol_type != UBI_STATIC_VOLUME) | ||
68 | return 0; | ||
69 | |||
70 | buf = kmalloc(vol->usable_leb_size, GFP_KERNEL); | ||
71 | if (!buf) | ||
72 | return -ENOMEM; | ||
73 | |||
74 | for (i = 0; i < vol->used_ebs; i++) { | ||
75 | int size; | ||
76 | |||
77 | if (i == vol->used_ebs - 1) | ||
78 | size = vol->last_eb_bytes; | ||
79 | else | ||
80 | size = vol->usable_leb_size; | ||
81 | |||
82 | err = ubi_eba_read_leb(ubi, vol_id, i, buf, 0, size, 1); | ||
83 | if (err) { | ||
84 | if (err == -EBADMSG) | ||
85 | err = 1; | ||
86 | break; | ||
87 | } | ||
88 | } | ||
89 | |||
90 | kfree(buf); | ||
91 | return err; | ||
92 | } | ||
93 | |||
94 | /** | ||
95 | * ubi_calculate_rsvd_pool - calculate how many PEBs must be reserved for bad | ||
96 | * eraseblock handling. | ||
97 | * @ubi: UBI device description object | ||
98 | */ | ||
99 | void ubi_calculate_reserved(struct ubi_device *ubi) | ||
100 | { | ||
101 | ubi->beb_rsvd_level = ubi->good_peb_count/100; | ||
102 | ubi->beb_rsvd_level *= CONFIG_MTD_UBI_BEB_RESERVE; | ||
103 | if (ubi->beb_rsvd_level < MIN_RESEVED_PEBS) | ||
104 | ubi->beb_rsvd_level = MIN_RESEVED_PEBS; | ||
105 | } | ||
diff --git a/drivers/mtd/ubi/scan.c b/drivers/mtd/ubi/scan.c new file mode 100644 index 000000000000..473f3200b868 --- /dev/null +++ b/drivers/mtd/ubi/scan.c | |||
@@ -0,0 +1,1368 @@ | |||
1 | /* | ||
2 | * Copyright (c) International Business Machines Corp., 2006 | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License as published by | ||
6 | * the Free Software Foundation; either version 2 of the License, or | ||
7 | * (at your option) any later version. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See | ||
12 | * the GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
17 | * | ||
18 | * Author: Artem Bityutskiy (Битюцкий Артём) | ||
19 | */ | ||
20 | |||
21 | /* | ||
22 | * UBI scanning unit. | ||
23 | * | ||
24 | * This unit is responsible for scanning the flash media, checking UBI | ||
25 | * headers and providing complete information about the UBI flash image. | ||
26 | * | ||
27 | * The scanning information is reoresented by a &struct ubi_scan_info' object. | ||
28 | * Information about found volumes is represented by &struct ubi_scan_volume | ||
29 | * objects which are kept in volume RB-tree with root at the @volumes field. | ||
30 | * The RB-tree is indexed by the volume ID. | ||
31 | * | ||
32 | * Found logical eraseblocks are represented by &struct ubi_scan_leb objects. | ||
33 | * These objects are kept in per-volume RB-trees with the root at the | ||
34 | * corresponding &struct ubi_scan_volume object. To put it differently, we keep | ||
35 | * an RB-tree of per-volume objects and each of these objects is the root of | ||
36 | * RB-tree of per-eraseblock objects. | ||
37 | * | ||
38 | * Corrupted physical eraseblocks are put to the @corr list, free physical | ||
39 | * eraseblocks are put to the @free list and the physical eraseblock to be | ||
40 | * erased are put to the @erase list. | ||
41 | */ | ||
42 | |||
43 | #include <linux/err.h> | ||
44 | #include <linux/crc32.h> | ||
45 | #include "ubi.h" | ||
46 | |||
47 | #ifdef CONFIG_MTD_UBI_DEBUG_PARANOID | ||
48 | static int paranoid_check_si(const struct ubi_device *ubi, | ||
49 | struct ubi_scan_info *si); | ||
50 | #else | ||
51 | #define paranoid_check_si(ubi, si) 0 | ||
52 | #endif | ||
53 | |||
54 | /* Temporary variables used during scanning */ | ||
55 | static struct ubi_ec_hdr *ech; | ||
56 | static struct ubi_vid_hdr *vidh; | ||
57 | |||
58 | int ubi_scan_add_to_list(struct ubi_scan_info *si, int pnum, int ec, | ||
59 | struct list_head *list) | ||
60 | { | ||
61 | struct ubi_scan_leb *seb; | ||
62 | |||
63 | if (list == &si->free) | ||
64 | dbg_bld("add to free: PEB %d, EC %d", pnum, ec); | ||
65 | else if (list == &si->erase) | ||
66 | dbg_bld("add to erase: PEB %d, EC %d", pnum, ec); | ||
67 | else if (list == &si->corr) | ||
68 | dbg_bld("add to corrupted: PEB %d, EC %d", pnum, ec); | ||
69 | else if (list == &si->alien) | ||
70 | dbg_bld("add to alien: PEB %d, EC %d", pnum, ec); | ||
71 | else | ||
72 | BUG(); | ||
73 | |||
74 | seb = kmalloc(sizeof(struct ubi_scan_leb), GFP_KERNEL); | ||
75 | if (!seb) | ||
76 | return -ENOMEM; | ||
77 | |||
78 | seb->pnum = pnum; | ||
79 | seb->ec = ec; | ||
80 | list_add_tail(&seb->u.list, list); | ||
81 | return 0; | ||
82 | } | ||
83 | |||
84 | /** | ||
85 | * commit_to_mean_value - commit intermediate results to the final mean erase | ||
86 | * counter value. | ||
87 | * @si: scanning information | ||
88 | * | ||
89 | * This is a helper function which calculates partial mean erase counter mean | ||
90 | * value and adds it to the resulting mean value. As we can work only in | ||
91 | * integer arithmetic and we want to calculate the mean value of erase counter | ||
92 | * accurately, we first sum erase counter values in @si->ec_sum variable and | ||
93 | * count these components in @si->ec_count. If this temporary @si->ec_sum is | ||
94 | * going to overflow, we calculate the partial mean value | ||
95 | * (@si->ec_sum/@si->ec_count) and add it to @si->mean_ec. | ||
96 | */ | ||
97 | static void commit_to_mean_value(struct ubi_scan_info *si) | ||
98 | { | ||
99 | si->ec_sum /= si->ec_count; | ||
100 | if (si->ec_sum % si->ec_count >= si->ec_count / 2) | ||
101 | si->mean_ec += 1; | ||
102 | si->mean_ec += si->ec_sum; | ||
103 | } | ||
104 | |||
105 | /** | ||
106 | * validate_vid_hdr - check that volume identifier header is correct and | ||
107 | * consistent. | ||
108 | * @vid_hdr: the volume identifier header to check | ||
109 | * @sv: information about the volume this logical eraseblock belongs to | ||
110 | * @pnum: physical eraseblock number the VID header came from | ||
111 | * | ||
112 | * This function checks that data stored in @vid_hdr is consistent. Returns | ||
113 | * non-zero if an inconsistency was found and zero if not. | ||
114 | * | ||
115 | * Note, UBI does sanity check of everything it reads from the flash media. | ||
116 | * Most of the checks are done in the I/O unit. Here we check that the | ||
117 | * information in the VID header is consistent to the information in other VID | ||
118 | * headers of the same volume. | ||
119 | */ | ||
120 | static int validate_vid_hdr(const struct ubi_vid_hdr *vid_hdr, | ||
121 | const struct ubi_scan_volume *sv, int pnum) | ||
122 | { | ||
123 | int vol_type = vid_hdr->vol_type; | ||
124 | int vol_id = ubi32_to_cpu(vid_hdr->vol_id); | ||
125 | int used_ebs = ubi32_to_cpu(vid_hdr->used_ebs); | ||
126 | int data_pad = ubi32_to_cpu(vid_hdr->data_pad); | ||
127 | |||
128 | if (sv->leb_count != 0) { | ||
129 | int sv_vol_type; | ||
130 | |||
131 | /* | ||
132 | * This is not the first logical eraseblock belonging to this | ||
133 | * volume. Ensure that the data in its VID header is consistent | ||
134 | * to the data in previous logical eraseblock headers. | ||
135 | */ | ||
136 | |||
137 | if (vol_id != sv->vol_id) { | ||
138 | dbg_err("inconsistent vol_id"); | ||
139 | goto bad; | ||
140 | } | ||
141 | |||
142 | if (sv->vol_type == UBI_STATIC_VOLUME) | ||
143 | sv_vol_type = UBI_VID_STATIC; | ||
144 | else | ||
145 | sv_vol_type = UBI_VID_DYNAMIC; | ||
146 | |||
147 | if (vol_type != sv_vol_type) { | ||
148 | dbg_err("inconsistent vol_type"); | ||
149 | goto bad; | ||
150 | } | ||
151 | |||
152 | if (used_ebs != sv->used_ebs) { | ||
153 | dbg_err("inconsistent used_ebs"); | ||
154 | goto bad; | ||
155 | } | ||
156 | |||
157 | if (data_pad != sv->data_pad) { | ||
158 | dbg_err("inconsistent data_pad"); | ||
159 | goto bad; | ||
160 | } | ||
161 | } | ||
162 | |||
163 | return 0; | ||
164 | |||
165 | bad: | ||
166 | ubi_err("inconsistent VID header at PEB %d", pnum); | ||
167 | ubi_dbg_dump_vid_hdr(vid_hdr); | ||
168 | ubi_dbg_dump_sv(sv); | ||
169 | return -EINVAL; | ||
170 | } | ||
171 | |||
172 | /** | ||
173 | * add_volume - add volume to the scanning information. | ||
174 | * @si: scanning information | ||
175 | * @vol_id: ID of the volume to add | ||
176 | * @pnum: physical eraseblock number | ||
177 | * @vid_hdr: volume identifier header | ||
178 | * | ||
179 | * If the volume corresponding to the @vid_hdr logical eraseblock is already | ||
180 | * present in the scanning information, this function does nothing. Otherwise | ||
181 | * it adds corresponding volume to the scanning information. Returns a pointer | ||
182 | * to the scanning volume object in case of success and a negative error code | ||
183 | * in case of failure. | ||
184 | */ | ||
185 | static struct ubi_scan_volume *add_volume(struct ubi_scan_info *si, int vol_id, | ||
186 | int pnum, | ||
187 | const struct ubi_vid_hdr *vid_hdr) | ||
188 | { | ||
189 | struct ubi_scan_volume *sv; | ||
190 | struct rb_node **p = &si->volumes.rb_node, *parent = NULL; | ||
191 | |||
192 | ubi_assert(vol_id == ubi32_to_cpu(vid_hdr->vol_id)); | ||
193 | |||
194 | /* Walk the volume RB-tree to look if this volume is already present */ | ||
195 | while (*p) { | ||
196 | parent = *p; | ||
197 | sv = rb_entry(parent, struct ubi_scan_volume, rb); | ||
198 | |||
199 | if (vol_id == sv->vol_id) | ||
200 | return sv; | ||
201 | |||
202 | if (vol_id > sv->vol_id) | ||
203 | p = &(*p)->rb_left; | ||
204 | else | ||
205 | p = &(*p)->rb_right; | ||
206 | } | ||
207 | |||
208 | /* The volume is absent - add it */ | ||
209 | sv = kmalloc(sizeof(struct ubi_scan_volume), GFP_KERNEL); | ||
210 | if (!sv) | ||
211 | return ERR_PTR(-ENOMEM); | ||
212 | |||
213 | sv->highest_lnum = sv->leb_count = 0; | ||
214 | si->max_sqnum = 0; | ||
215 | sv->vol_id = vol_id; | ||
216 | sv->root = RB_ROOT; | ||
217 | sv->used_ebs = ubi32_to_cpu(vid_hdr->used_ebs); | ||
218 | sv->data_pad = ubi32_to_cpu(vid_hdr->data_pad); | ||
219 | sv->compat = vid_hdr->compat; | ||
220 | sv->vol_type = vid_hdr->vol_type == UBI_VID_DYNAMIC ? UBI_DYNAMIC_VOLUME | ||
221 | : UBI_STATIC_VOLUME; | ||
222 | if (vol_id > si->highest_vol_id) | ||
223 | si->highest_vol_id = vol_id; | ||
224 | |||
225 | rb_link_node(&sv->rb, parent, p); | ||
226 | rb_insert_color(&sv->rb, &si->volumes); | ||
227 | si->vols_found += 1; | ||
228 | dbg_bld("added volume %d", vol_id); | ||
229 | return sv; | ||
230 | } | ||
231 | |||
232 | /** | ||
233 | * compare_lebs - find out which logical eraseblock is newer. | ||
234 | * @ubi: UBI device description object | ||
235 | * @seb: first logical eraseblock to compare | ||
236 | * @pnum: physical eraseblock number of the second logical eraseblock to | ||
237 | * compare | ||
238 | * @vid_hdr: volume identifier header of the second logical eraseblock | ||
239 | * | ||
240 | * This function compares 2 copies of a LEB and informs which one is newer. In | ||
241 | * case of success this function returns a positive value, in case of failure, a | ||
242 | * negative error code is returned. The success return codes use the following | ||
243 | * bits: | ||
244 | * o bit 0 is cleared: the first PEB (described by @seb) is newer then the | ||
245 | * second PEB (described by @pnum and @vid_hdr); | ||
246 | * o bit 0 is set: the second PEB is newer; | ||
247 | * o bit 1 is cleared: no bit-flips were detected in the newer LEB; | ||
248 | * o bit 1 is set: bit-flips were detected in the newer LEB; | ||
249 | * o bit 2 is cleared: the older LEB is not corrupted; | ||
250 | * o bit 2 is set: the older LEB is corrupted. | ||
251 | */ | ||
252 | static int compare_lebs(const struct ubi_device *ubi, | ||
253 | const struct ubi_scan_leb *seb, int pnum, | ||
254 | const struct ubi_vid_hdr *vid_hdr) | ||
255 | { | ||
256 | void *buf; | ||
257 | int len, err, second_is_newer, bitflips = 0, corrupted = 0; | ||
258 | uint32_t data_crc, crc; | ||
259 | struct ubi_vid_hdr *vidh = NULL; | ||
260 | unsigned long long sqnum2 = ubi64_to_cpu(vid_hdr->sqnum); | ||
261 | |||
262 | if (seb->sqnum == 0 && sqnum2 == 0) { | ||
263 | long long abs, v1 = seb->leb_ver, v2 = ubi32_to_cpu(vid_hdr->leb_ver); | ||
264 | |||
265 | /* | ||
266 | * UBI constantly increases the logical eraseblock version | ||
267 | * number and it can overflow. Thus, we have to bear in mind | ||
268 | * that versions that are close to %0xFFFFFFFF are less then | ||
269 | * versions that are close to %0. | ||
270 | * | ||
271 | * The UBI WL unit guarantees that the number of pending tasks | ||
272 | * is not greater then %0x7FFFFFFF. So, if the difference | ||
273 | * between any two versions is greater or equivalent to | ||
274 | * %0x7FFFFFFF, there was an overflow and the logical | ||
275 | * eraseblock with lower version is actually newer then the one | ||
276 | * with higher version. | ||
277 | * | ||
278 | * FIXME: but this is anyway obsolete and will be removed at | ||
279 | * some point. | ||
280 | */ | ||
281 | |||
282 | dbg_bld("using old crappy leb_ver stuff"); | ||
283 | |||
284 | abs = v1 - v2; | ||
285 | if (abs < 0) | ||
286 | abs = -abs; | ||
287 | |||
288 | if (abs < 0x7FFFFFFF) | ||
289 | /* Non-overflow situation */ | ||
290 | second_is_newer = (v2 > v1); | ||
291 | else | ||
292 | second_is_newer = (v2 < v1); | ||
293 | } else | ||
294 | /* Obviously the LEB with lower sequence counter is older */ | ||
295 | second_is_newer = sqnum2 > seb->sqnum; | ||
296 | |||
297 | /* | ||
298 | * Now we know which copy is newer. If the copy flag of the PEB with | ||
299 | * newer version is not set, then we just return, otherwise we have to | ||
300 | * check data CRC. For the second PEB we already have the VID header, | ||
301 | * for the first one - we'll need to re-read it from flash. | ||
302 | * | ||
303 | * FIXME: this may be optimized so that we wouldn't read twice. | ||
304 | */ | ||
305 | |||
306 | if (second_is_newer) { | ||
307 | if (!vid_hdr->copy_flag) { | ||
308 | /* It is not a copy, so it is newer */ | ||
309 | dbg_bld("second PEB %d is newer, copy_flag is unset", | ||
310 | pnum); | ||
311 | return 1; | ||
312 | } | ||
313 | } else { | ||
314 | pnum = seb->pnum; | ||
315 | |||
316 | vidh = ubi_zalloc_vid_hdr(ubi); | ||
317 | if (!vidh) | ||
318 | return -ENOMEM; | ||
319 | |||
320 | err = ubi_io_read_vid_hdr(ubi, pnum, vidh, 0); | ||
321 | if (err) { | ||
322 | if (err == UBI_IO_BITFLIPS) | ||
323 | bitflips = 1; | ||
324 | else { | ||
325 | dbg_err("VID of PEB %d header is bad, but it " | ||
326 | "was OK earlier", pnum); | ||
327 | if (err > 0) | ||
328 | err = -EIO; | ||
329 | |||
330 | goto out_free_vidh; | ||
331 | } | ||
332 | } | ||
333 | |||
334 | if (!vidh->copy_flag) { | ||
335 | /* It is not a copy, so it is newer */ | ||
336 | dbg_bld("first PEB %d is newer, copy_flag is unset", | ||
337 | pnum); | ||
338 | err = bitflips << 1; | ||
339 | goto out_free_vidh; | ||
340 | } | ||
341 | |||
342 | vid_hdr = vidh; | ||
343 | } | ||
344 | |||
345 | /* Read the data of the copy and check the CRC */ | ||
346 | |||
347 | len = ubi32_to_cpu(vid_hdr->data_size); | ||
348 | buf = kmalloc(len, GFP_KERNEL); | ||
349 | if (!buf) { | ||
350 | err = -ENOMEM; | ||
351 | goto out_free_vidh; | ||
352 | } | ||
353 | |||
354 | err = ubi_io_read_data(ubi, buf, pnum, 0, len); | ||
355 | if (err && err != UBI_IO_BITFLIPS) | ||
356 | goto out_free_buf; | ||
357 | |||
358 | data_crc = ubi32_to_cpu(vid_hdr->data_crc); | ||
359 | crc = crc32(UBI_CRC32_INIT, buf, len); | ||
360 | if (crc != data_crc) { | ||
361 | dbg_bld("PEB %d CRC error: calculated %#08x, must be %#08x", | ||
362 | pnum, crc, data_crc); | ||
363 | corrupted = 1; | ||
364 | bitflips = 0; | ||
365 | second_is_newer = !second_is_newer; | ||
366 | } else { | ||
367 | dbg_bld("PEB %d CRC is OK", pnum); | ||
368 | bitflips = !!err; | ||
369 | } | ||
370 | |||
371 | kfree(buf); | ||
372 | ubi_free_vid_hdr(ubi, vidh); | ||
373 | |||
374 | if (second_is_newer) | ||
375 | dbg_bld("second PEB %d is newer, copy_flag is set", pnum); | ||
376 | else | ||
377 | dbg_bld("first PEB %d is newer, copy_flag is set", pnum); | ||
378 | |||
379 | return second_is_newer | (bitflips << 1) | (corrupted << 2); | ||
380 | |||
381 | out_free_buf: | ||
382 | kfree(buf); | ||
383 | out_free_vidh: | ||
384 | ubi_free_vid_hdr(ubi, vidh); | ||
385 | ubi_assert(err < 0); | ||
386 | return err; | ||
387 | } | ||
388 | |||
389 | /** | ||
390 | * ubi_scan_add_used - add information about a physical eraseblock to the | ||
391 | * scanning information. | ||
392 | * @ubi: UBI device description object | ||
393 | * @si: scanning information | ||
394 | * @pnum: the physical eraseblock number | ||
395 | * @ec: erase counter | ||
396 | * @vid_hdr: the volume identifier header | ||
397 | * @bitflips: if bit-flips were detected when this physical eraseblock was read | ||
398 | * | ||
399 | * This function returns zero in case of success and a negative error code in | ||
400 | * case of failure. | ||
401 | */ | ||
402 | int ubi_scan_add_used(const struct ubi_device *ubi, struct ubi_scan_info *si, | ||
403 | int pnum, int ec, const struct ubi_vid_hdr *vid_hdr, | ||
404 | int bitflips) | ||
405 | { | ||
406 | int err, vol_id, lnum; | ||
407 | uint32_t leb_ver; | ||
408 | unsigned long long sqnum; | ||
409 | struct ubi_scan_volume *sv; | ||
410 | struct ubi_scan_leb *seb; | ||
411 | struct rb_node **p, *parent = NULL; | ||
412 | |||
413 | vol_id = ubi32_to_cpu(vid_hdr->vol_id); | ||
414 | lnum = ubi32_to_cpu(vid_hdr->lnum); | ||
415 | sqnum = ubi64_to_cpu(vid_hdr->sqnum); | ||
416 | leb_ver = ubi32_to_cpu(vid_hdr->leb_ver); | ||
417 | |||
418 | dbg_bld("PEB %d, LEB %d:%d, EC %d, sqnum %llu, ver %u, bitflips %d", | ||
419 | pnum, vol_id, lnum, ec, sqnum, leb_ver, bitflips); | ||
420 | |||
421 | sv = add_volume(si, vol_id, pnum, vid_hdr); | ||
422 | if (IS_ERR(sv) < 0) | ||
423 | return PTR_ERR(sv); | ||
424 | |||
425 | /* | ||
426 | * Walk the RB-tree of logical eraseblocks of volume @vol_id to look | ||
427 | * if this is the first instance of this logical eraseblock or not. | ||
428 | */ | ||
429 | p = &sv->root.rb_node; | ||
430 | while (*p) { | ||
431 | int cmp_res; | ||
432 | |||
433 | parent = *p; | ||
434 | seb = rb_entry(parent, struct ubi_scan_leb, u.rb); | ||
435 | if (lnum != seb->lnum) { | ||
436 | if (lnum < seb->lnum) | ||
437 | p = &(*p)->rb_left; | ||
438 | else | ||
439 | p = &(*p)->rb_right; | ||
440 | continue; | ||
441 | } | ||
442 | |||
443 | /* | ||
444 | * There is already a physical eraseblock describing the same | ||
445 | * logical eraseblock present. | ||
446 | */ | ||
447 | |||
448 | dbg_bld("this LEB already exists: PEB %d, sqnum %llu, " | ||
449 | "LEB ver %u, EC %d", seb->pnum, seb->sqnum, | ||
450 | seb->leb_ver, seb->ec); | ||
451 | |||
452 | /* | ||
453 | * Make sure that the logical eraseblocks have different | ||
454 | * versions. Otherwise the image is bad. | ||
455 | */ | ||
456 | if (seb->leb_ver == leb_ver && leb_ver != 0) { | ||
457 | ubi_err("two LEBs with same version %u", leb_ver); | ||
458 | ubi_dbg_dump_seb(seb, 0); | ||
459 | ubi_dbg_dump_vid_hdr(vid_hdr); | ||
460 | return -EINVAL; | ||
461 | } | ||
462 | |||
463 | /* | ||
464 | * Make sure that the logical eraseblocks have different | ||
465 | * sequence numbers. Otherwise the image is bad. | ||
466 | * | ||
467 | * FIXME: remove 'sqnum != 0' check when leb_ver is removed. | ||
468 | */ | ||
469 | if (seb->sqnum == sqnum && sqnum != 0) { | ||
470 | ubi_err("two LEBs with same sequence number %llu", | ||
471 | sqnum); | ||
472 | ubi_dbg_dump_seb(seb, 0); | ||
473 | ubi_dbg_dump_vid_hdr(vid_hdr); | ||
474 | return -EINVAL; | ||
475 | } | ||
476 | |||
477 | /* | ||
478 | * Now we have to drop the older one and preserve the newer | ||
479 | * one. | ||
480 | */ | ||
481 | cmp_res = compare_lebs(ubi, seb, pnum, vid_hdr); | ||
482 | if (cmp_res < 0) | ||
483 | return cmp_res; | ||
484 | |||
485 | if (cmp_res & 1) { | ||
486 | /* | ||
487 | * This logical eraseblock is newer then the one | ||
488 | * found earlier. | ||
489 | */ | ||
490 | err = validate_vid_hdr(vid_hdr, sv, pnum); | ||
491 | if (err) | ||
492 | return err; | ||
493 | |||
494 | if (cmp_res & 4) | ||
495 | err = ubi_scan_add_to_list(si, seb->pnum, | ||
496 | seb->ec, &si->corr); | ||
497 | else | ||
498 | err = ubi_scan_add_to_list(si, seb->pnum, | ||
499 | seb->ec, &si->erase); | ||
500 | if (err) | ||
501 | return err; | ||
502 | |||
503 | seb->ec = ec; | ||
504 | seb->pnum = pnum; | ||
505 | seb->scrub = ((cmp_res & 2) || bitflips); | ||
506 | seb->sqnum = sqnum; | ||
507 | seb->leb_ver = leb_ver; | ||
508 | |||
509 | if (sv->highest_lnum == lnum) | ||
510 | sv->last_data_size = | ||
511 | ubi32_to_cpu(vid_hdr->data_size); | ||
512 | |||
513 | return 0; | ||
514 | } else { | ||
515 | /* | ||
516 | * This logical eraseblock is older then the one found | ||
517 | * previously. | ||
518 | */ | ||
519 | if (cmp_res & 4) | ||
520 | return ubi_scan_add_to_list(si, pnum, ec, | ||
521 | &si->corr); | ||
522 | else | ||
523 | return ubi_scan_add_to_list(si, pnum, ec, | ||
524 | &si->erase); | ||
525 | } | ||
526 | } | ||
527 | |||
528 | /* | ||
529 | * We've met this logical eraseblock for the first time, add it to the | ||
530 | * scanning information. | ||
531 | */ | ||
532 | |||
533 | err = validate_vid_hdr(vid_hdr, sv, pnum); | ||
534 | if (err) | ||
535 | return err; | ||
536 | |||
537 | seb = kmalloc(sizeof(struct ubi_scan_leb), GFP_KERNEL); | ||
538 | if (!seb) | ||
539 | return -ENOMEM; | ||
540 | |||
541 | seb->ec = ec; | ||
542 | seb->pnum = pnum; | ||
543 | seb->lnum = lnum; | ||
544 | seb->sqnum = sqnum; | ||
545 | seb->scrub = bitflips; | ||
546 | seb->leb_ver = leb_ver; | ||
547 | |||
548 | if (sv->highest_lnum <= lnum) { | ||
549 | sv->highest_lnum = lnum; | ||
550 | sv->last_data_size = ubi32_to_cpu(vid_hdr->data_size); | ||
551 | } | ||
552 | |||
553 | if (si->max_sqnum < sqnum) | ||
554 | si->max_sqnum = sqnum; | ||
555 | |||
556 | sv->leb_count += 1; | ||
557 | rb_link_node(&seb->u.rb, parent, p); | ||
558 | rb_insert_color(&seb->u.rb, &sv->root); | ||
559 | return 0; | ||
560 | } | ||
561 | |||
562 | /** | ||
563 | * ubi_scan_find_sv - find information about a particular volume in the | ||
564 | * scanning information. | ||
565 | * @si: scanning information | ||
566 | * @vol_id: the requested volume ID | ||
567 | * | ||
568 | * This function returns a pointer to the volume description or %NULL if there | ||
569 | * are no data about this volume in the scanning information. | ||
570 | */ | ||
571 | struct ubi_scan_volume *ubi_scan_find_sv(const struct ubi_scan_info *si, | ||
572 | int vol_id) | ||
573 | { | ||
574 | struct ubi_scan_volume *sv; | ||
575 | struct rb_node *p = si->volumes.rb_node; | ||
576 | |||
577 | while (p) { | ||
578 | sv = rb_entry(p, struct ubi_scan_volume, rb); | ||
579 | |||
580 | if (vol_id == sv->vol_id) | ||
581 | return sv; | ||
582 | |||
583 | if (vol_id > sv->vol_id) | ||
584 | p = p->rb_left; | ||
585 | else | ||
586 | p = p->rb_right; | ||
587 | } | ||
588 | |||
589 | return NULL; | ||
590 | } | ||
591 | |||
592 | /** | ||
593 | * ubi_scan_find_seb - find information about a particular logical | ||
594 | * eraseblock in the volume scanning information. | ||
595 | * @sv: a pointer to the volume scanning information | ||
596 | * @lnum: the requested logical eraseblock | ||
597 | * | ||
598 | * This function returns a pointer to the scanning logical eraseblock or %NULL | ||
599 | * if there are no data about it in the scanning volume information. | ||
600 | */ | ||
601 | struct ubi_scan_leb *ubi_scan_find_seb(const struct ubi_scan_volume *sv, | ||
602 | int lnum) | ||
603 | { | ||
604 | struct ubi_scan_leb *seb; | ||
605 | struct rb_node *p = sv->root.rb_node; | ||
606 | |||
607 | while (p) { | ||
608 | seb = rb_entry(p, struct ubi_scan_leb, u.rb); | ||
609 | |||
610 | if (lnum == seb->lnum) | ||
611 | return seb; | ||
612 | |||
613 | if (lnum > seb->lnum) | ||
614 | p = p->rb_left; | ||
615 | else | ||
616 | p = p->rb_right; | ||
617 | } | ||
618 | |||
619 | return NULL; | ||
620 | } | ||
621 | |||
622 | /** | ||
623 | * ubi_scan_rm_volume - delete scanning information about a volume. | ||
624 | * @si: scanning information | ||
625 | * @sv: the volume scanning information to delete | ||
626 | */ | ||
627 | void ubi_scan_rm_volume(struct ubi_scan_info *si, struct ubi_scan_volume *sv) | ||
628 | { | ||
629 | struct rb_node *rb; | ||
630 | struct ubi_scan_leb *seb; | ||
631 | |||
632 | dbg_bld("remove scanning information about volume %d", sv->vol_id); | ||
633 | |||
634 | while ((rb = rb_first(&sv->root))) { | ||
635 | seb = rb_entry(rb, struct ubi_scan_leb, u.rb); | ||
636 | rb_erase(&seb->u.rb, &sv->root); | ||
637 | list_add_tail(&seb->u.list, &si->erase); | ||
638 | } | ||
639 | |||
640 | rb_erase(&sv->rb, &si->volumes); | ||
641 | kfree(sv); | ||
642 | si->vols_found -= 1; | ||
643 | } | ||
644 | |||
645 | /** | ||
646 | * ubi_scan_erase_peb - erase a physical eraseblock. | ||
647 | * @ubi: UBI device description object | ||
648 | * @si: scanning information | ||
649 | * @pnum: physical eraseblock number to erase; | ||
650 | * @ec: erase counter value to write (%UBI_SCAN_UNKNOWN_EC if it is unknown) | ||
651 | * | ||
652 | * This function erases physical eraseblock 'pnum', and writes the erase | ||
653 | * counter header to it. This function should only be used on UBI device | ||
654 | * initialization stages, when the EBA unit had not been yet initialized. This | ||
655 | * function returns zero in case of success and a negative error code in case | ||
656 | * of failure. | ||
657 | */ | ||
658 | int ubi_scan_erase_peb(const struct ubi_device *ubi, | ||
659 | const struct ubi_scan_info *si, int pnum, int ec) | ||
660 | { | ||
661 | int err; | ||
662 | struct ubi_ec_hdr *ec_hdr; | ||
663 | |||
664 | ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL); | ||
665 | if (!ec_hdr) | ||
666 | return -ENOMEM; | ||
667 | |||
668 | if ((long long)ec >= UBI_MAX_ERASECOUNTER) { | ||
669 | /* | ||
670 | * Erase counter overflow. Upgrade UBI and use 64-bit | ||
671 | * erase counters internally. | ||
672 | */ | ||
673 | ubi_err("erase counter overflow at PEB %d, EC %d", pnum, ec); | ||
674 | return -EINVAL; | ||
675 | } | ||
676 | |||
677 | ec_hdr->ec = cpu_to_ubi64(ec); | ||
678 | |||
679 | err = ubi_io_sync_erase(ubi, pnum, 0); | ||
680 | if (err < 0) | ||
681 | goto out_free; | ||
682 | |||
683 | err = ubi_io_write_ec_hdr(ubi, pnum, ec_hdr); | ||
684 | |||
685 | out_free: | ||
686 | kfree(ec_hdr); | ||
687 | return err; | ||
688 | } | ||
689 | |||
690 | /** | ||
691 | * ubi_scan_get_free_peb - get a free physical eraseblock. | ||
692 | * @ubi: UBI device description object | ||
693 | * @si: scanning information | ||
694 | * | ||
695 | * This function returns a free physical eraseblock. It is supposed to be | ||
696 | * called on the UBI initialization stages when the wear-leveling unit is not | ||
697 | * initialized yet. This function picks a physical eraseblocks from one of the | ||
698 | * lists, writes the EC header if it is needed, and removes it from the list. | ||
699 | * | ||
700 | * This function returns scanning physical eraseblock information in case of | ||
701 | * success and an error code in case of failure. | ||
702 | */ | ||
703 | struct ubi_scan_leb *ubi_scan_get_free_peb(const struct ubi_device *ubi, | ||
704 | struct ubi_scan_info *si) | ||
705 | { | ||
706 | int err = 0, i; | ||
707 | struct ubi_scan_leb *seb; | ||
708 | |||
709 | if (!list_empty(&si->free)) { | ||
710 | seb = list_entry(si->free.next, struct ubi_scan_leb, u.list); | ||
711 | list_del(&seb->u.list); | ||
712 | dbg_bld("return free PEB %d, EC %d", seb->pnum, seb->ec); | ||
713 | return seb; | ||
714 | } | ||
715 | |||
716 | for (i = 0; i < 2; i++) { | ||
717 | struct list_head *head; | ||
718 | struct ubi_scan_leb *tmp_seb; | ||
719 | |||
720 | if (i == 0) | ||
721 | head = &si->erase; | ||
722 | else | ||
723 | head = &si->corr; | ||
724 | |||
725 | /* | ||
726 | * We try to erase the first physical eraseblock from the @head | ||
727 | * list and pick it if we succeed, or try to erase the | ||
728 | * next one if not. And so forth. We don't want to take care | ||
729 | * about bad eraseblocks here - they'll be handled later. | ||
730 | */ | ||
731 | list_for_each_entry_safe(seb, tmp_seb, head, u.list) { | ||
732 | if (seb->ec == UBI_SCAN_UNKNOWN_EC) | ||
733 | seb->ec = si->mean_ec; | ||
734 | |||
735 | err = ubi_scan_erase_peb(ubi, si, seb->pnum, seb->ec+1); | ||
736 | if (err) | ||
737 | continue; | ||
738 | |||
739 | seb->ec += 1; | ||
740 | list_del(&seb->u.list); | ||
741 | dbg_bld("return PEB %d, EC %d", seb->pnum, seb->ec); | ||
742 | return seb; | ||
743 | } | ||
744 | } | ||
745 | |||
746 | ubi_err("no eraseblocks found"); | ||
747 | return ERR_PTR(-ENOSPC); | ||
748 | } | ||
749 | |||
750 | /** | ||
751 | * process_eb - read UBI headers, check them and add corresponding data | ||
752 | * to the scanning information. | ||
753 | * @ubi: UBI device description object | ||
754 | * @si: scanning information | ||
755 | * @pnum: the physical eraseblock number | ||
756 | * | ||
757 | * This function returns a zero if the physical eraseblock was succesfully | ||
758 | * handled and a negative error code in case of failure. | ||
759 | */ | ||
760 | static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si, int pnum) | ||
761 | { | ||
762 | long long ec; | ||
763 | int err, bitflips = 0, vol_id, ec_corr = 0; | ||
764 | |||
765 | dbg_bld("scan PEB %d", pnum); | ||
766 | |||
767 | /* Skip bad physical eraseblocks */ | ||
768 | err = ubi_io_is_bad(ubi, pnum); | ||
769 | if (err < 0) | ||
770 | return err; | ||
771 | else if (err) { | ||
772 | /* | ||
773 | * FIXME: this is actually duty of the I/O unit to initialize | ||
774 | * this, but MTD does not provide enough information. | ||
775 | */ | ||
776 | si->bad_peb_count += 1; | ||
777 | return 0; | ||
778 | } | ||
779 | |||
780 | err = ubi_io_read_ec_hdr(ubi, pnum, ech, 0); | ||
781 | if (err < 0) | ||
782 | return err; | ||
783 | else if (err == UBI_IO_BITFLIPS) | ||
784 | bitflips = 1; | ||
785 | else if (err == UBI_IO_PEB_EMPTY) | ||
786 | return ubi_scan_add_to_list(si, pnum, UBI_SCAN_UNKNOWN_EC, | ||
787 | &si->erase); | ||
788 | else if (err == UBI_IO_BAD_EC_HDR) { | ||
789 | /* | ||
790 | * We have to also look at the VID header, possibly it is not | ||
791 | * corrupted. Set %bitflips flag in order to make this PEB be | ||
792 | * moved and EC be re-created. | ||
793 | */ | ||
794 | ec_corr = 1; | ||
795 | ec = UBI_SCAN_UNKNOWN_EC; | ||
796 | bitflips = 1; | ||
797 | } | ||
798 | |||
799 | si->is_empty = 0; | ||
800 | |||
801 | if (!ec_corr) { | ||
802 | /* Make sure UBI version is OK */ | ||
803 | if (ech->version != UBI_VERSION) { | ||
804 | ubi_err("this UBI version is %d, image version is %d", | ||
805 | UBI_VERSION, (int)ech->version); | ||
806 | return -EINVAL; | ||
807 | } | ||
808 | |||
809 | ec = ubi64_to_cpu(ech->ec); | ||
810 | if (ec > UBI_MAX_ERASECOUNTER) { | ||
811 | /* | ||
812 | * Erase counter overflow. The EC headers have 64 bits | ||
813 | * reserved, but we anyway make use of only 31 bit | ||
814 | * values, as this seems to be enough for any existing | ||
815 | * flash. Upgrade UBI and use 64-bit erase counters | ||
816 | * internally. | ||
817 | */ | ||
818 | ubi_err("erase counter overflow, max is %d", | ||
819 | UBI_MAX_ERASECOUNTER); | ||
820 | ubi_dbg_dump_ec_hdr(ech); | ||
821 | return -EINVAL; | ||
822 | } | ||
823 | } | ||
824 | |||
825 | /* OK, we've done with the EC header, let's look at the VID header */ | ||
826 | |||
827 | err = ubi_io_read_vid_hdr(ubi, pnum, vidh, 0); | ||
828 | if (err < 0) | ||
829 | return err; | ||
830 | else if (err == UBI_IO_BITFLIPS) | ||
831 | bitflips = 1; | ||
832 | else if (err == UBI_IO_BAD_VID_HDR || | ||
833 | (err == UBI_IO_PEB_FREE && ec_corr)) { | ||
834 | /* VID header is corrupted */ | ||
835 | err = ubi_scan_add_to_list(si, pnum, ec, &si->corr); | ||
836 | if (err) | ||
837 | return err; | ||
838 | goto adjust_mean_ec; | ||
839 | } else if (err == UBI_IO_PEB_FREE) { | ||
840 | /* No VID header - the physical eraseblock is free */ | ||
841 | err = ubi_scan_add_to_list(si, pnum, ec, &si->free); | ||
842 | if (err) | ||
843 | return err; | ||
844 | goto adjust_mean_ec; | ||
845 | } | ||
846 | |||
847 | vol_id = ubi32_to_cpu(vidh->vol_id); | ||
848 | if (vol_id > UBI_MAX_VOLUMES && vol_id != UBI_LAYOUT_VOL_ID) { | ||
849 | int lnum = ubi32_to_cpu(vidh->lnum); | ||
850 | |||
851 | /* Unsupported internal volume */ | ||
852 | switch (vidh->compat) { | ||
853 | case UBI_COMPAT_DELETE: | ||
854 | ubi_msg("\"delete\" compatible internal volume %d:%d" | ||
855 | " found, remove it", vol_id, lnum); | ||
856 | err = ubi_scan_add_to_list(si, pnum, ec, &si->corr); | ||
857 | if (err) | ||
858 | return err; | ||
859 | break; | ||
860 | |||
861 | case UBI_COMPAT_RO: | ||
862 | ubi_msg("read-only compatible internal volume %d:%d" | ||
863 | " found, switch to read-only mode", | ||
864 | vol_id, lnum); | ||
865 | ubi->ro_mode = 1; | ||
866 | break; | ||
867 | |||
868 | case UBI_COMPAT_PRESERVE: | ||
869 | ubi_msg("\"preserve\" compatible internal volume %d:%d" | ||
870 | " found", vol_id, lnum); | ||
871 | err = ubi_scan_add_to_list(si, pnum, ec, &si->alien); | ||
872 | if (err) | ||
873 | return err; | ||
874 | si->alien_peb_count += 1; | ||
875 | return 0; | ||
876 | |||
877 | case UBI_COMPAT_REJECT: | ||
878 | ubi_err("incompatible internal volume %d:%d found", | ||
879 | vol_id, lnum); | ||
880 | return -EINVAL; | ||
881 | } | ||
882 | } | ||
883 | |||
884 | /* Both UBI headers seem to be fine */ | ||
885 | err = ubi_scan_add_used(ubi, si, pnum, ec, vidh, bitflips); | ||
886 | if (err) | ||
887 | return err; | ||
888 | |||
889 | adjust_mean_ec: | ||
890 | if (!ec_corr) { | ||
891 | if (si->ec_sum + ec < ec) { | ||
892 | commit_to_mean_value(si); | ||
893 | si->ec_sum = 0; | ||
894 | si->ec_count = 0; | ||
895 | } else { | ||
896 | si->ec_sum += ec; | ||
897 | si->ec_count += 1; | ||
898 | } | ||
899 | |||
900 | if (ec > si->max_ec) | ||
901 | si->max_ec = ec; | ||
902 | if (ec < si->min_ec) | ||
903 | si->min_ec = ec; | ||
904 | } | ||
905 | |||
906 | return 0; | ||
907 | } | ||
908 | |||
909 | /** | ||
910 | * ubi_scan - scan an MTD device. | ||
911 | * @ubi: UBI device description object | ||
912 | * | ||
913 | * This function does full scanning of an MTD device and returns complete | ||
914 | * information about it. In case of failure, an error code is returned. | ||
915 | */ | ||
916 | struct ubi_scan_info *ubi_scan(struct ubi_device *ubi) | ||
917 | { | ||
918 | int err, pnum; | ||
919 | struct rb_node *rb1, *rb2; | ||
920 | struct ubi_scan_volume *sv; | ||
921 | struct ubi_scan_leb *seb; | ||
922 | struct ubi_scan_info *si; | ||
923 | |||
924 | si = kzalloc(sizeof(struct ubi_scan_info), GFP_KERNEL); | ||
925 | if (!si) | ||
926 | return ERR_PTR(-ENOMEM); | ||
927 | |||
928 | INIT_LIST_HEAD(&si->corr); | ||
929 | INIT_LIST_HEAD(&si->free); | ||
930 | INIT_LIST_HEAD(&si->erase); | ||
931 | INIT_LIST_HEAD(&si->alien); | ||
932 | si->volumes = RB_ROOT; | ||
933 | si->is_empty = 1; | ||
934 | |||
935 | err = -ENOMEM; | ||
936 | ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL); | ||
937 | if (!ech) | ||
938 | goto out_si; | ||
939 | |||
940 | vidh = ubi_zalloc_vid_hdr(ubi); | ||
941 | if (!vidh) | ||
942 | goto out_ech; | ||
943 | |||
944 | for (pnum = 0; pnum < ubi->peb_count; pnum++) { | ||
945 | cond_resched(); | ||
946 | |||
947 | dbg_msg("process PEB %d", pnum); | ||
948 | err = process_eb(ubi, si, pnum); | ||
949 | if (err < 0) | ||
950 | goto out_vidh; | ||
951 | } | ||
952 | |||
953 | dbg_msg("scanning is finished"); | ||
954 | |||
955 | /* Finish mean erase counter calculations */ | ||
956 | if (si->ec_count) | ||
957 | commit_to_mean_value(si); | ||
958 | |||
959 | if (si->is_empty) | ||
960 | ubi_msg("empty MTD device detected"); | ||
961 | |||
962 | /* | ||
963 | * In case of unknown erase counter we use the mean erase counter | ||
964 | * value. | ||
965 | */ | ||
966 | ubi_rb_for_each_entry(rb1, sv, &si->volumes, rb) { | ||
967 | ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb) | ||
968 | if (seb->ec == UBI_SCAN_UNKNOWN_EC) | ||
969 | seb->ec = si->mean_ec; | ||
970 | } | ||
971 | |||
972 | list_for_each_entry(seb, &si->free, u.list) { | ||
973 | if (seb->ec == UBI_SCAN_UNKNOWN_EC) | ||
974 | seb->ec = si->mean_ec; | ||
975 | } | ||
976 | |||
977 | list_for_each_entry(seb, &si->corr, u.list) | ||
978 | if (seb->ec == UBI_SCAN_UNKNOWN_EC) | ||
979 | seb->ec = si->mean_ec; | ||
980 | |||
981 | list_for_each_entry(seb, &si->erase, u.list) | ||
982 | if (seb->ec == UBI_SCAN_UNKNOWN_EC) | ||
983 | seb->ec = si->mean_ec; | ||
984 | |||
985 | err = paranoid_check_si(ubi, si); | ||
986 | if (err) { | ||
987 | if (err > 0) | ||
988 | err = -EINVAL; | ||
989 | goto out_vidh; | ||
990 | } | ||
991 | |||
992 | ubi_free_vid_hdr(ubi, vidh); | ||
993 | kfree(ech); | ||
994 | |||
995 | return si; | ||
996 | |||
997 | out_vidh: | ||
998 | ubi_free_vid_hdr(ubi, vidh); | ||
999 | out_ech: | ||
1000 | kfree(ech); | ||
1001 | out_si: | ||
1002 | ubi_scan_destroy_si(si); | ||
1003 | return ERR_PTR(err); | ||
1004 | } | ||
1005 | |||
1006 | /** | ||
1007 | * destroy_sv - free the scanning volume information | ||
1008 | * @sv: scanning volume information | ||
1009 | * | ||
1010 | * This function destroys the volume RB-tree (@sv->root) and the scanning | ||
1011 | * volume information. | ||
1012 | */ | ||
1013 | static void destroy_sv(struct ubi_scan_volume *sv) | ||
1014 | { | ||
1015 | struct ubi_scan_leb *seb; | ||
1016 | struct rb_node *this = sv->root.rb_node; | ||
1017 | |||
1018 | while (this) { | ||
1019 | if (this->rb_left) | ||
1020 | this = this->rb_left; | ||
1021 | else if (this->rb_right) | ||
1022 | this = this->rb_right; | ||
1023 | else { | ||
1024 | seb = rb_entry(this, struct ubi_scan_leb, u.rb); | ||
1025 | this = rb_parent(this); | ||
1026 | if (this) { | ||
1027 | if (this->rb_left == &seb->u.rb) | ||
1028 | this->rb_left = NULL; | ||
1029 | else | ||
1030 | this->rb_right = NULL; | ||
1031 | } | ||
1032 | |||
1033 | kfree(seb); | ||
1034 | } | ||
1035 | } | ||
1036 | kfree(sv); | ||
1037 | } | ||
1038 | |||
1039 | /** | ||
1040 | * ubi_scan_destroy_si - destroy scanning information. | ||
1041 | * @si: scanning information | ||
1042 | */ | ||
1043 | void ubi_scan_destroy_si(struct ubi_scan_info *si) | ||
1044 | { | ||
1045 | struct ubi_scan_leb *seb, *seb_tmp; | ||
1046 | struct ubi_scan_volume *sv; | ||
1047 | struct rb_node *rb; | ||
1048 | |||
1049 | list_for_each_entry_safe(seb, seb_tmp, &si->alien, u.list) { | ||
1050 | list_del(&seb->u.list); | ||
1051 | kfree(seb); | ||
1052 | } | ||
1053 | list_for_each_entry_safe(seb, seb_tmp, &si->erase, u.list) { | ||
1054 | list_del(&seb->u.list); | ||
1055 | kfree(seb); | ||
1056 | } | ||
1057 | list_for_each_entry_safe(seb, seb_tmp, &si->corr, u.list) { | ||
1058 | list_del(&seb->u.list); | ||
1059 | kfree(seb); | ||
1060 | } | ||
1061 | list_for_each_entry_safe(seb, seb_tmp, &si->free, u.list) { | ||
1062 | list_del(&seb->u.list); | ||
1063 | kfree(seb); | ||
1064 | } | ||
1065 | |||
1066 | /* Destroy the volume RB-tree */ | ||
1067 | rb = si->volumes.rb_node; | ||
1068 | while (rb) { | ||
1069 | if (rb->rb_left) | ||
1070 | rb = rb->rb_left; | ||
1071 | else if (rb->rb_right) | ||
1072 | rb = rb->rb_right; | ||
1073 | else { | ||
1074 | sv = rb_entry(rb, struct ubi_scan_volume, rb); | ||
1075 | |||
1076 | rb = rb_parent(rb); | ||
1077 | if (rb) { | ||
1078 | if (rb->rb_left == &sv->rb) | ||
1079 | rb->rb_left = NULL; | ||
1080 | else | ||
1081 | rb->rb_right = NULL; | ||
1082 | } | ||
1083 | |||
1084 | destroy_sv(sv); | ||
1085 | } | ||
1086 | } | ||
1087 | |||
1088 | kfree(si); | ||
1089 | } | ||
1090 | |||
1091 | #ifdef CONFIG_MTD_UBI_DEBUG_PARANOID | ||
1092 | |||
1093 | /** | ||
1094 | * paranoid_check_si - check if the scanning information is correct and | ||
1095 | * consistent. | ||
1096 | * @ubi: UBI device description object | ||
1097 | * @si: scanning information | ||
1098 | * | ||
1099 | * This function returns zero if the scanning information is all right, %1 if | ||
1100 | * not and a negative error code if an error occurred. | ||
1101 | */ | ||
1102 | static int paranoid_check_si(const struct ubi_device *ubi, | ||
1103 | struct ubi_scan_info *si) | ||
1104 | { | ||
1105 | int pnum, err, vols_found = 0; | ||
1106 | struct rb_node *rb1, *rb2; | ||
1107 | struct ubi_scan_volume *sv; | ||
1108 | struct ubi_scan_leb *seb, *last_seb; | ||
1109 | uint8_t *buf; | ||
1110 | |||
1111 | /* | ||
1112 | * At first, check that scanning information is ok. | ||
1113 | */ | ||
1114 | ubi_rb_for_each_entry(rb1, sv, &si->volumes, rb) { | ||
1115 | int leb_count = 0; | ||
1116 | |||
1117 | cond_resched(); | ||
1118 | |||
1119 | vols_found += 1; | ||
1120 | |||
1121 | if (si->is_empty) { | ||
1122 | ubi_err("bad is_empty flag"); | ||
1123 | goto bad_sv; | ||
1124 | } | ||
1125 | |||
1126 | if (sv->vol_id < 0 || sv->highest_lnum < 0 || | ||
1127 | sv->leb_count < 0 || sv->vol_type < 0 || sv->used_ebs < 0 || | ||
1128 | sv->data_pad < 0 || sv->last_data_size < 0) { | ||
1129 | ubi_err("negative values"); | ||
1130 | goto bad_sv; | ||
1131 | } | ||
1132 | |||
1133 | if (sv->vol_id >= UBI_MAX_VOLUMES && | ||
1134 | sv->vol_id < UBI_INTERNAL_VOL_START) { | ||
1135 | ubi_err("bad vol_id"); | ||
1136 | goto bad_sv; | ||
1137 | } | ||
1138 | |||
1139 | if (sv->vol_id > si->highest_vol_id) { | ||
1140 | ubi_err("highest_vol_id is %d, but vol_id %d is there", | ||
1141 | si->highest_vol_id, sv->vol_id); | ||
1142 | goto out; | ||
1143 | } | ||
1144 | |||
1145 | if (sv->vol_type != UBI_DYNAMIC_VOLUME && | ||
1146 | sv->vol_type != UBI_STATIC_VOLUME) { | ||
1147 | ubi_err("bad vol_type"); | ||
1148 | goto bad_sv; | ||
1149 | } | ||
1150 | |||
1151 | if (sv->data_pad > ubi->leb_size / 2) { | ||
1152 | ubi_err("bad data_pad"); | ||
1153 | goto bad_sv; | ||
1154 | } | ||
1155 | |||
1156 | last_seb = NULL; | ||
1157 | ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb) { | ||
1158 | cond_resched(); | ||
1159 | |||
1160 | last_seb = seb; | ||
1161 | leb_count += 1; | ||
1162 | |||
1163 | if (seb->pnum < 0 || seb->ec < 0) { | ||
1164 | ubi_err("negative values"); | ||
1165 | goto bad_seb; | ||
1166 | } | ||
1167 | |||
1168 | if (seb->ec < si->min_ec) { | ||
1169 | ubi_err("bad si->min_ec (%d), %d found", | ||
1170 | si->min_ec, seb->ec); | ||
1171 | goto bad_seb; | ||
1172 | } | ||
1173 | |||
1174 | if (seb->ec > si->max_ec) { | ||
1175 | ubi_err("bad si->max_ec (%d), %d found", | ||
1176 | si->max_ec, seb->ec); | ||
1177 | goto bad_seb; | ||
1178 | } | ||
1179 | |||
1180 | if (seb->pnum >= ubi->peb_count) { | ||
1181 | ubi_err("too high PEB number %d, total PEBs %d", | ||
1182 | seb->pnum, ubi->peb_count); | ||
1183 | goto bad_seb; | ||
1184 | } | ||
1185 | |||
1186 | if (sv->vol_type == UBI_STATIC_VOLUME) { | ||
1187 | if (seb->lnum >= sv->used_ebs) { | ||
1188 | ubi_err("bad lnum or used_ebs"); | ||
1189 | goto bad_seb; | ||
1190 | } | ||
1191 | } else { | ||
1192 | if (sv->used_ebs != 0) { | ||
1193 | ubi_err("non-zero used_ebs"); | ||
1194 | goto bad_seb; | ||
1195 | } | ||
1196 | } | ||
1197 | |||
1198 | if (seb->lnum > sv->highest_lnum) { | ||
1199 | ubi_err("incorrect highest_lnum or lnum"); | ||
1200 | goto bad_seb; | ||
1201 | } | ||
1202 | } | ||
1203 | |||
1204 | if (sv->leb_count != leb_count) { | ||
1205 | ubi_err("bad leb_count, %d objects in the tree", | ||
1206 | leb_count); | ||
1207 | goto bad_sv; | ||
1208 | } | ||
1209 | |||
1210 | if (!last_seb) | ||
1211 | continue; | ||
1212 | |||
1213 | seb = last_seb; | ||
1214 | |||
1215 | if (seb->lnum != sv->highest_lnum) { | ||
1216 | ubi_err("bad highest_lnum"); | ||
1217 | goto bad_seb; | ||
1218 | } | ||
1219 | } | ||
1220 | |||
1221 | if (vols_found != si->vols_found) { | ||
1222 | ubi_err("bad si->vols_found %d, should be %d", | ||
1223 | si->vols_found, vols_found); | ||
1224 | goto out; | ||
1225 | } | ||
1226 | |||
1227 | /* Check that scanning information is correct */ | ||
1228 | ubi_rb_for_each_entry(rb1, sv, &si->volumes, rb) { | ||
1229 | last_seb = NULL; | ||
1230 | ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb) { | ||
1231 | int vol_type; | ||
1232 | |||
1233 | cond_resched(); | ||
1234 | |||
1235 | last_seb = seb; | ||
1236 | |||
1237 | err = ubi_io_read_vid_hdr(ubi, seb->pnum, vidh, 1); | ||
1238 | if (err && err != UBI_IO_BITFLIPS) { | ||
1239 | ubi_err("VID header is not OK (%d)", err); | ||
1240 | if (err > 0) | ||
1241 | err = -EIO; | ||
1242 | return err; | ||
1243 | } | ||
1244 | |||
1245 | vol_type = vidh->vol_type == UBI_VID_DYNAMIC ? | ||
1246 | UBI_DYNAMIC_VOLUME : UBI_STATIC_VOLUME; | ||
1247 | if (sv->vol_type != vol_type) { | ||
1248 | ubi_err("bad vol_type"); | ||
1249 | goto bad_vid_hdr; | ||
1250 | } | ||
1251 | |||
1252 | if (seb->sqnum != ubi64_to_cpu(vidh->sqnum)) { | ||
1253 | ubi_err("bad sqnum %llu", seb->sqnum); | ||
1254 | goto bad_vid_hdr; | ||
1255 | } | ||
1256 | |||
1257 | if (sv->vol_id != ubi32_to_cpu(vidh->vol_id)) { | ||
1258 | ubi_err("bad vol_id %d", sv->vol_id); | ||
1259 | goto bad_vid_hdr; | ||
1260 | } | ||
1261 | |||
1262 | if (sv->compat != vidh->compat) { | ||
1263 | ubi_err("bad compat %d", vidh->compat); | ||
1264 | goto bad_vid_hdr; | ||
1265 | } | ||
1266 | |||
1267 | if (seb->lnum != ubi32_to_cpu(vidh->lnum)) { | ||
1268 | ubi_err("bad lnum %d", seb->lnum); | ||
1269 | goto bad_vid_hdr; | ||
1270 | } | ||
1271 | |||
1272 | if (sv->used_ebs != ubi32_to_cpu(vidh->used_ebs)) { | ||
1273 | ubi_err("bad used_ebs %d", sv->used_ebs); | ||
1274 | goto bad_vid_hdr; | ||
1275 | } | ||
1276 | |||
1277 | if (sv->data_pad != ubi32_to_cpu(vidh->data_pad)) { | ||
1278 | ubi_err("bad data_pad %d", sv->data_pad); | ||
1279 | goto bad_vid_hdr; | ||
1280 | } | ||
1281 | |||
1282 | if (seb->leb_ver != ubi32_to_cpu(vidh->leb_ver)) { | ||
1283 | ubi_err("bad leb_ver %u", seb->leb_ver); | ||
1284 | goto bad_vid_hdr; | ||
1285 | } | ||
1286 | } | ||
1287 | |||
1288 | if (!last_seb) | ||
1289 | continue; | ||
1290 | |||
1291 | if (sv->highest_lnum != ubi32_to_cpu(vidh->lnum)) { | ||
1292 | ubi_err("bad highest_lnum %d", sv->highest_lnum); | ||
1293 | goto bad_vid_hdr; | ||
1294 | } | ||
1295 | |||
1296 | if (sv->last_data_size != ubi32_to_cpu(vidh->data_size)) { | ||
1297 | ubi_err("bad last_data_size %d", sv->last_data_size); | ||
1298 | goto bad_vid_hdr; | ||
1299 | } | ||
1300 | } | ||
1301 | |||
1302 | /* | ||
1303 | * Make sure that all the physical eraseblocks are in one of the lists | ||
1304 | * or trees. | ||
1305 | */ | ||
1306 | buf = kmalloc(ubi->peb_count, GFP_KERNEL); | ||
1307 | if (!buf) | ||
1308 | return -ENOMEM; | ||
1309 | |||
1310 | memset(buf, 1, ubi->peb_count); | ||
1311 | for (pnum = 0; pnum < ubi->peb_count; pnum++) { | ||
1312 | err = ubi_io_is_bad(ubi, pnum); | ||
1313 | if (err < 0) | ||
1314 | return err; | ||
1315 | else if (err) | ||
1316 | buf[pnum] = 0; | ||
1317 | } | ||
1318 | |||
1319 | ubi_rb_for_each_entry(rb1, sv, &si->volumes, rb) | ||
1320 | ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb) | ||
1321 | buf[seb->pnum] = 0; | ||
1322 | |||
1323 | list_for_each_entry(seb, &si->free, u.list) | ||
1324 | buf[seb->pnum] = 0; | ||
1325 | |||
1326 | list_for_each_entry(seb, &si->corr, u.list) | ||
1327 | buf[seb->pnum] = 0; | ||
1328 | |||
1329 | list_for_each_entry(seb, &si->erase, u.list) | ||
1330 | buf[seb->pnum] = 0; | ||
1331 | |||
1332 | list_for_each_entry(seb, &si->alien, u.list) | ||
1333 | buf[seb->pnum] = 0; | ||
1334 | |||
1335 | err = 0; | ||
1336 | for (pnum = 0; pnum < ubi->peb_count; pnum++) | ||
1337 | if (buf[pnum]) { | ||
1338 | ubi_err("PEB %d is not referred", pnum); | ||
1339 | err = 1; | ||
1340 | } | ||
1341 | |||
1342 | kfree(buf); | ||
1343 | if (err) | ||
1344 | goto out; | ||
1345 | return 0; | ||
1346 | |||
1347 | bad_seb: | ||
1348 | ubi_err("bad scanning information about LEB %d", seb->lnum); | ||
1349 | ubi_dbg_dump_seb(seb, 0); | ||
1350 | ubi_dbg_dump_sv(sv); | ||
1351 | goto out; | ||
1352 | |||
1353 | bad_sv: | ||
1354 | ubi_err("bad scanning information about volume %d", sv->vol_id); | ||
1355 | ubi_dbg_dump_sv(sv); | ||
1356 | goto out; | ||
1357 | |||
1358 | bad_vid_hdr: | ||
1359 | ubi_err("bad scanning information about volume %d", sv->vol_id); | ||
1360 | ubi_dbg_dump_sv(sv); | ||
1361 | ubi_dbg_dump_vid_hdr(vidh); | ||
1362 | |||
1363 | out: | ||
1364 | ubi_dbg_dump_stack(); | ||
1365 | return 1; | ||
1366 | } | ||
1367 | |||
1368 | #endif /* CONFIG_MTD_UBI_DEBUG_PARANOID */ | ||
diff --git a/drivers/mtd/ubi/scan.h b/drivers/mtd/ubi/scan.h new file mode 100644 index 000000000000..3949f6192c76 --- /dev/null +++ b/drivers/mtd/ubi/scan.h | |||
@@ -0,0 +1,167 @@ | |||
1 | /* | ||
2 | * Copyright (c) International Business Machines Corp., 2006 | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License as published by | ||
6 | * the Free Software Foundation; either version 2 of the License, or | ||
7 | * (at your option) any later version. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See | ||
12 | * the GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
17 | * | ||
18 | * Author: Artem Bityutskiy (Битюцкий Артём) | ||
19 | */ | ||
20 | |||
21 | #ifndef __UBI_SCAN_H__ | ||
22 | #define __UBI_SCAN_H__ | ||
23 | |||
24 | /* The erase counter value for this physical eraseblock is unknown */ | ||
25 | #define UBI_SCAN_UNKNOWN_EC (-1) | ||
26 | |||
27 | /** | ||
28 | * struct ubi_scan_leb - scanning information about a physical eraseblock. | ||
29 | * @ec: erase counter (%UBI_SCAN_UNKNOWN_EC if it is unknown) | ||
30 | * @pnum: physical eraseblock number | ||
31 | * @lnum: logical eraseblock number | ||
32 | * @scrub: if this physical eraseblock needs scrubbing | ||
33 | * @sqnum: sequence number | ||
34 | * @u: unions RB-tree or @list links | ||
35 | * @u.rb: link in the per-volume RB-tree of &struct ubi_scan_leb objects | ||
36 | * @u.list: link in one of the eraseblock lists | ||
37 | * @leb_ver: logical eraseblock version (obsolete) | ||
38 | * | ||
39 | * One object of this type is allocated for each physical eraseblock during | ||
40 | * scanning. | ||
41 | */ | ||
42 | struct ubi_scan_leb { | ||
43 | int ec; | ||
44 | int pnum; | ||
45 | int lnum; | ||
46 | int scrub; | ||
47 | unsigned long long sqnum; | ||
48 | union { | ||
49 | struct rb_node rb; | ||
50 | struct list_head list; | ||
51 | } u; | ||
52 | uint32_t leb_ver; | ||
53 | }; | ||
54 | |||
55 | /** | ||
56 | * struct ubi_scan_volume - scanning information about a volume. | ||
57 | * @vol_id: volume ID | ||
58 | * @highest_lnum: highest logical eraseblock number in this volume | ||
59 | * @leb_count: number of logical eraseblocks in this volume | ||
60 | * @vol_type: volume type | ||
61 | * @used_ebs: number of used logical eraseblocks in this volume (only for | ||
62 | * static volumes) | ||
63 | * @last_data_size: amount of data in the last logical eraseblock of this | ||
64 | * volume (always equivalent to the usable logical eraseblock size in case of | ||
65 | * dynamic volumes) | ||
66 | * @data_pad: how many bytes at the end of logical eraseblocks of this volume | ||
67 | * are not used (due to volume alignment) | ||
68 | * @compat: compatibility flags of this volume | ||
69 | * @rb: link in the volume RB-tree | ||
70 | * @root: root of the RB-tree containing all the eraseblock belonging to this | ||
71 | * volume (&struct ubi_scan_leb objects) | ||
72 | * | ||
73 | * One object of this type is allocated for each volume during scanning. | ||
74 | */ | ||
75 | struct ubi_scan_volume { | ||
76 | int vol_id; | ||
77 | int highest_lnum; | ||
78 | int leb_count; | ||
79 | int vol_type; | ||
80 | int used_ebs; | ||
81 | int last_data_size; | ||
82 | int data_pad; | ||
83 | int compat; | ||
84 | struct rb_node rb; | ||
85 | struct rb_root root; | ||
86 | }; | ||
87 | |||
88 | /** | ||
89 | * struct ubi_scan_info - UBI scanning information. | ||
90 | * @volumes: root of the volume RB-tree | ||
91 | * @corr: list of corrupted physical eraseblocks | ||
92 | * @free: list of free physical eraseblocks | ||
93 | * @erase: list of physical eraseblocks which have to be erased | ||
94 | * @alien: list of physical eraseblocks which should not be used by UBI (e.g., | ||
95 | * @bad_peb_count: count of bad physical eraseblocks | ||
96 | * those belonging to "preserve"-compatible internal volumes) | ||
97 | * @vols_found: number of volumes found during scanning | ||
98 | * @highest_vol_id: highest volume ID | ||
99 | * @alien_peb_count: count of physical eraseblocks in the @alien list | ||
100 | * @is_empty: flag indicating whether the MTD device is empty or not | ||
101 | * @min_ec: lowest erase counter value | ||
102 | * @max_ec: highest erase counter value | ||
103 | * @max_sqnum: highest sequence number value | ||
104 | * @mean_ec: mean erase counter value | ||
105 | * @ec_sum: a temporary variable used when calculating @mean_ec | ||
106 | * @ec_count: a temporary variable used when calculating @mean_ec | ||
107 | * | ||
108 | * This data structure contains the result of scanning and may be used by other | ||
109 | * UBI units to build final UBI data structures, further error-recovery and so | ||
110 | * on. | ||
111 | */ | ||
112 | struct ubi_scan_info { | ||
113 | struct rb_root volumes; | ||
114 | struct list_head corr; | ||
115 | struct list_head free; | ||
116 | struct list_head erase; | ||
117 | struct list_head alien; | ||
118 | int bad_peb_count; | ||
119 | int vols_found; | ||
120 | int highest_vol_id; | ||
121 | int alien_peb_count; | ||
122 | int is_empty; | ||
123 | int min_ec; | ||
124 | int max_ec; | ||
125 | unsigned long long max_sqnum; | ||
126 | int mean_ec; | ||
127 | int ec_sum; | ||
128 | int ec_count; | ||
129 | }; | ||
130 | |||
131 | struct ubi_device; | ||
132 | struct ubi_vid_hdr; | ||
133 | |||
134 | /* | ||
135 | * ubi_scan_move_to_list - move a physical eraseblock from the volume tree to a | ||
136 | * list. | ||
137 | * | ||
138 | * @sv: volume scanning information | ||
139 | * @seb: scanning eraseblock infprmation | ||
140 | * @list: the list to move to | ||
141 | */ | ||
142 | static inline void ubi_scan_move_to_list(struct ubi_scan_volume *sv, | ||
143 | struct ubi_scan_leb *seb, | ||
144 | struct list_head *list) | ||
145 | { | ||
146 | rb_erase(&seb->u.rb, &sv->root); | ||
147 | list_add_tail(&seb->u.list, list); | ||
148 | } | ||
149 | |||
150 | int ubi_scan_add_to_list(struct ubi_scan_info *si, int pnum, int ec, | ||
151 | struct list_head *list); | ||
152 | int ubi_scan_add_used(const struct ubi_device *ubi, struct ubi_scan_info *si, | ||
153 | int pnum, int ec, const struct ubi_vid_hdr *vid_hdr, | ||
154 | int bitflips); | ||
155 | struct ubi_scan_volume *ubi_scan_find_sv(const struct ubi_scan_info *si, | ||
156 | int vol_id); | ||
157 | struct ubi_scan_leb *ubi_scan_find_seb(const struct ubi_scan_volume *sv, | ||
158 | int lnum); | ||
159 | void ubi_scan_rm_volume(struct ubi_scan_info *si, struct ubi_scan_volume *sv); | ||
160 | struct ubi_scan_leb *ubi_scan_get_free_peb(const struct ubi_device *ubi, | ||
161 | struct ubi_scan_info *si); | ||
162 | int ubi_scan_erase_peb(const struct ubi_device *ubi, | ||
163 | const struct ubi_scan_info *si, int pnum, int ec); | ||
164 | struct ubi_scan_info *ubi_scan(struct ubi_device *ubi); | ||
165 | void ubi_scan_destroy_si(struct ubi_scan_info *si); | ||
166 | |||
167 | #endif /* !__UBI_SCAN_H__ */ | ||
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h new file mode 100644 index 000000000000..feb647f108f0 --- /dev/null +++ b/drivers/mtd/ubi/ubi.h | |||
@@ -0,0 +1,535 @@ | |||
1 | /* | ||
2 | * Copyright (c) International Business Machines Corp., 2006 | ||
3 | * Copyright (c) Nokia Corporation, 2006, 2007 | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation; either version 2 of the License, or | ||
8 | * (at your option) any later version. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See | ||
13 | * the GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program; if not, write to the Free Software | ||
17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
18 | * | ||
19 | * Author: Artem Bityutskiy (Битюцкий Артём) | ||
20 | */ | ||
21 | |||
22 | #ifndef __UBI_UBI_H__ | ||
23 | #define __UBI_UBI_H__ | ||
24 | |||
25 | #include <linux/init.h> | ||
26 | #include <linux/types.h> | ||
27 | #include <linux/list.h> | ||
28 | #include <linux/rbtree.h> | ||
29 | #include <linux/sched.h> | ||
30 | #include <linux/wait.h> | ||
31 | #include <linux/mutex.h> | ||
32 | #include <linux/rwsem.h> | ||
33 | #include <linux/spinlock.h> | ||
34 | #include <linux/fs.h> | ||
35 | #include <linux/cdev.h> | ||
36 | #include <linux/device.h> | ||
37 | #include <linux/string.h> | ||
38 | #include <linux/mtd/mtd.h> | ||
39 | |||
40 | #include <mtd/ubi-header.h> | ||
41 | #include <linux/mtd/ubi.h> | ||
42 | |||
43 | #include "scan.h" | ||
44 | #include "debug.h" | ||
45 | |||
46 | /* Maximum number of supported UBI devices */ | ||
47 | #define UBI_MAX_DEVICES 32 | ||
48 | |||
49 | /* UBI name used for character devices, sysfs, etc */ | ||
50 | #define UBI_NAME_STR "ubi" | ||
51 | |||
52 | /* Normal UBI messages */ | ||
53 | #define ubi_msg(fmt, ...) printk(KERN_NOTICE "UBI: " fmt "\n", ##__VA_ARGS__) | ||
54 | /* UBI warning messages */ | ||
55 | #define ubi_warn(fmt, ...) printk(KERN_WARNING "UBI warning: %s: " fmt "\n", \ | ||
56 | __FUNCTION__, ##__VA_ARGS__) | ||
57 | /* UBI error messages */ | ||
58 | #define ubi_err(fmt, ...) printk(KERN_ERR "UBI error: %s: " fmt "\n", \ | ||
59 | __FUNCTION__, ##__VA_ARGS__) | ||
60 | |||
61 | /* Lowest number PEBs reserved for bad PEB handling */ | ||
62 | #define MIN_RESEVED_PEBS 2 | ||
63 | |||
64 | /* Background thread name pattern */ | ||
65 | #define UBI_BGT_NAME_PATTERN "ubi_bgt%dd" | ||
66 | |||
67 | /* This marker in the EBA table means that the LEB is um-mapped */ | ||
68 | #define UBI_LEB_UNMAPPED -1 | ||
69 | |||
70 | /* | ||
71 | * In case of errors, UBI tries to repeat the operation several times before | ||
72 | * returning error. The below constant defines how many times UBI re-tries. | ||
73 | */ | ||
74 | #define UBI_IO_RETRIES 3 | ||
75 | |||
76 | /* | ||
77 | * Error codes returned by the I/O unit. | ||
78 | * | ||
79 | * UBI_IO_PEB_EMPTY: the physical eraseblock is empty, i.e. it contains only | ||
80 | * 0xFF bytes | ||
81 | * UBI_IO_PEB_FREE: the physical eraseblock is free, i.e. it contains only a | ||
82 | * valid erase counter header, and the rest are %0xFF bytes | ||
83 | * UBI_IO_BAD_EC_HDR: the erase counter header is corrupted (bad magic or CRC) | ||
84 | * UBI_IO_BAD_VID_HDR: the volume identifier header is corrupted (bad magic or | ||
85 | * CRC) | ||
86 | * UBI_IO_BITFLIPS: bit-flips were detected and corrected | ||
87 | */ | ||
88 | enum { | ||
89 | UBI_IO_PEB_EMPTY = 1, | ||
90 | UBI_IO_PEB_FREE, | ||
91 | UBI_IO_BAD_EC_HDR, | ||
92 | UBI_IO_BAD_VID_HDR, | ||
93 | UBI_IO_BITFLIPS | ||
94 | }; | ||
95 | |||
96 | extern int ubi_devices_cnt; | ||
97 | extern struct ubi_device *ubi_devices[]; | ||
98 | |||
99 | struct ubi_volume_desc; | ||
100 | |||
101 | /** | ||
102 | * struct ubi_volume - UBI volume description data structure. | ||
103 | * @dev: device object to make use of the the Linux device model | ||
104 | * @cdev: character device object to create character device | ||
105 | * @ubi: reference to the UBI device description object | ||
106 | * @vol_id: volume ID | ||
107 | * @readers: number of users holding this volume in read-only mode | ||
108 | * @writers: number of users holding this volume in read-write mode | ||
109 | * @exclusive: whether somebody holds this volume in exclusive mode | ||
110 | * @removed: if the volume was removed | ||
111 | * @checked: if this static volume was checked | ||
112 | * | ||
113 | * @reserved_pebs: how many physical eraseblocks are reserved for this volume | ||
114 | * @vol_type: volume type (%UBI_DYNAMIC_VOLUME or %UBI_STATIC_VOLUME) | ||
115 | * @usable_leb_size: logical eraseblock size without padding | ||
116 | * @used_ebs: how many logical eraseblocks in this volume contain data | ||
117 | * @last_eb_bytes: how many bytes are stored in the last logical eraseblock | ||
118 | * @used_bytes: how many bytes of data this volume contains | ||
119 | * @upd_marker: non-zero if the update marker is set for this volume | ||
120 | * @corrupted: non-zero if the volume is corrupted (static volumes only) | ||
121 | * @alignment: volume alignment | ||
122 | * @data_pad: how many bytes are not used at the end of physical eraseblocks to | ||
123 | * satisfy the requested alignment | ||
124 | * @name_len: volume name length | ||
125 | * @name: volume name | ||
126 | * | ||
127 | * @updating: whether the volume is being updated | ||
128 | * @upd_ebs: how many eraseblocks are expected to be updated | ||
129 | * @upd_bytes: how many bytes are expected to be received | ||
130 | * @upd_received: how many update bytes were already received | ||
131 | * @upd_buf: update buffer which is used to collect update data | ||
132 | * | ||
133 | * @eba_tbl: EBA table of this volume (LEB->PEB mapping) | ||
134 | * | ||
135 | * @gluebi_desc: gluebi UBI volume descriptor | ||
136 | * @gluebi_refcount: reference count of the gluebi MTD device | ||
137 | * @gluebi_mtd: MTD device description object of the gluebi MTD device | ||
138 | * | ||
139 | * The @corrupted field indicates that the volume's contents is corrupted. | ||
140 | * Since UBI protects only static volumes, this field is not relevant to | ||
141 | * dynamic volumes - it is user's responsibility to assure their data | ||
142 | * integrity. | ||
143 | * | ||
144 | * The @upd_marker flag indicates that this volume is either being updated at | ||
145 | * the moment or is damaged because of an unclean reboot. | ||
146 | */ | ||
147 | struct ubi_volume { | ||
148 | struct device dev; | ||
149 | struct cdev cdev; | ||
150 | struct ubi_device *ubi; | ||
151 | int vol_id; | ||
152 | int readers; | ||
153 | int writers; | ||
154 | int exclusive; | ||
155 | int removed; | ||
156 | int checked; | ||
157 | |||
158 | int reserved_pebs; | ||
159 | int vol_type; | ||
160 | int usable_leb_size; | ||
161 | int used_ebs; | ||
162 | int last_eb_bytes; | ||
163 | long long used_bytes; | ||
164 | int upd_marker; | ||
165 | int corrupted; | ||
166 | int alignment; | ||
167 | int data_pad; | ||
168 | int name_len; | ||
169 | char name[UBI_VOL_NAME_MAX+1]; | ||
170 | |||
171 | int updating; | ||
172 | int upd_ebs; | ||
173 | long long upd_bytes; | ||
174 | long long upd_received; | ||
175 | void *upd_buf; | ||
176 | |||
177 | int *eba_tbl; | ||
178 | |||
179 | #ifdef CONFIG_MTD_UBI_GLUEBI | ||
180 | /* Gluebi-related stuff may be compiled out */ | ||
181 | struct ubi_volume_desc *gluebi_desc; | ||
182 | int gluebi_refcount; | ||
183 | struct mtd_info gluebi_mtd; | ||
184 | #endif | ||
185 | }; | ||
186 | |||
187 | /** | ||
188 | * struct ubi_volume_desc - descriptor of the UBI volume returned when it is | ||
189 | * opened. | ||
190 | * @vol: reference to the corresponding volume description object | ||
191 | * @mode: open mode (%UBI_READONLY, %UBI_READWRITE, or %UBI_EXCLUSIVE) | ||
192 | */ | ||
193 | struct ubi_volume_desc { | ||
194 | struct ubi_volume *vol; | ||
195 | int mode; | ||
196 | }; | ||
197 | |||
198 | struct ubi_wl_entry; | ||
199 | |||
200 | /** | ||
201 | * struct ubi_device - UBI device description structure | ||
202 | * @dev: class device object to use the the Linux device model | ||
203 | * @cdev: character device object to create character device | ||
204 | * @ubi_num: UBI device number | ||
205 | * @ubi_name: UBI device name | ||
206 | * @major: character device major number | ||
207 | * @vol_count: number of volumes in this UBI device | ||
208 | * @volumes: volumes of this UBI device | ||
209 | * @volumes_lock: protects @volumes, @rsvd_pebs, @avail_pebs, beb_rsvd_pebs, | ||
210 | * @beb_rsvd_level, @bad_peb_count, @good_peb_count, @vol_count, @vol->readers, | ||
211 | * @vol->writers, @vol->exclusive, @vol->removed, @vol->mapping and | ||
212 | * @vol->eba_tbl. | ||
213 | * | ||
214 | * @rsvd_pebs: count of reserved physical eraseblocks | ||
215 | * @avail_pebs: count of available physical eraseblocks | ||
216 | * @beb_rsvd_pebs: how many physical eraseblocks are reserved for bad PEB | ||
217 | * handling | ||
218 | * @beb_rsvd_level: normal level of PEBs reserved for bad PEB handling | ||
219 | * | ||
220 | * @vtbl_slots: how many slots are available in the volume table | ||
221 | * @vtbl_size: size of the volume table in bytes | ||
222 | * @vtbl: in-RAM volume table copy | ||
223 | * | ||
224 | * @max_ec: current highest erase counter value | ||
225 | * @mean_ec: current mean erase counter value | ||
226 | * | ||
227 | * global_sqnum: global sequence number | ||
228 | * @ltree_lock: protects the lock tree and @global_sqnum | ||
229 | * @ltree: the lock tree | ||
230 | * @vtbl_mutex: protects on-flash volume table | ||
231 | * | ||
232 | * @used: RB-tree of used physical eraseblocks | ||
233 | * @free: RB-tree of free physical eraseblocks | ||
234 | * @scrub: RB-tree of physical eraseblocks which need scrubbing | ||
235 | * @prot: protection trees | ||
236 | * @prot.pnum: protection tree indexed by physical eraseblock numbers | ||
237 | * @prot.aec: protection tree indexed by absolute erase counter value | ||
238 | * @wl_lock: protects the @used, @free, @prot, @lookuptbl, @abs_ec, @move_from, | ||
239 | * @move_to, @move_to_put @erase_pending, @wl_scheduled, and @works | ||
240 | * fields | ||
241 | * @wl_scheduled: non-zero if the wear-leveling was scheduled | ||
242 | * @lookuptbl: a table to quickly find a &struct ubi_wl_entry object for any | ||
243 | * physical eraseblock | ||
244 | * @abs_ec: absolute erase counter | ||
245 | * @move_from: physical eraseblock from where the data is being moved | ||
246 | * @move_to: physical eraseblock where the data is being moved to | ||
247 | * @move_from_put: if the "from" PEB was put | ||
248 | * @move_to_put: if the "to" PEB was put | ||
249 | * @works: list of pending works | ||
250 | * @works_count: count of pending works | ||
251 | * @bgt_thread: background thread description object | ||
252 | * @thread_enabled: if the background thread is enabled | ||
253 | * @bgt_name: background thread name | ||
254 | * | ||
255 | * @flash_size: underlying MTD device size (in bytes) | ||
256 | * @peb_count: count of physical eraseblocks on the MTD device | ||
257 | * @peb_size: physical eraseblock size | ||
258 | * @bad_peb_count: count of bad physical eraseblocks | ||
259 | * @good_peb_count: count of good physical eraseblocks | ||
260 | * @min_io_size: minimal input/output unit size of the underlying MTD device | ||
261 | * @hdrs_min_io_size: minimal I/O unit size used for VID and EC headers | ||
262 | * @ro_mode: if the UBI device is in read-only mode | ||
263 | * @leb_size: logical eraseblock size | ||
264 | * @leb_start: starting offset of logical eraseblocks within physical | ||
265 | * eraseblocks | ||
266 | * @ec_hdr_alsize: size of the EC header aligned to @hdrs_min_io_size | ||
267 | * @vid_hdr_alsize: size of the VID header aligned to @hdrs_min_io_size | ||
268 | * @vid_hdr_offset: starting offset of the volume identifier header (might be | ||
269 | * unaligned) | ||
270 | * @vid_hdr_aloffset: starting offset of the VID header aligned to | ||
271 | * @hdrs_min_io_size | ||
272 | * @vid_hdr_shift: contains @vid_hdr_offset - @vid_hdr_aloffset | ||
273 | * @bad_allowed: whether the MTD device admits of bad physical eraseblocks or | ||
274 | * not | ||
275 | * @mtd: MTD device descriptor | ||
276 | */ | ||
277 | struct ubi_device { | ||
278 | struct cdev cdev; | ||
279 | struct device dev; | ||
280 | int ubi_num; | ||
281 | char ubi_name[sizeof(UBI_NAME_STR)+5]; | ||
282 | int major; | ||
283 | int vol_count; | ||
284 | struct ubi_volume *volumes[UBI_MAX_VOLUMES+UBI_INT_VOL_COUNT]; | ||
285 | spinlock_t volumes_lock; | ||
286 | |||
287 | int rsvd_pebs; | ||
288 | int avail_pebs; | ||
289 | int beb_rsvd_pebs; | ||
290 | int beb_rsvd_level; | ||
291 | |||
292 | int vtbl_slots; | ||
293 | int vtbl_size; | ||
294 | struct ubi_vtbl_record *vtbl; | ||
295 | struct mutex vtbl_mutex; | ||
296 | |||
297 | int max_ec; | ||
298 | int mean_ec; | ||
299 | |||
300 | /* EBA unit's stuff */ | ||
301 | unsigned long long global_sqnum; | ||
302 | spinlock_t ltree_lock; | ||
303 | struct rb_root ltree; | ||
304 | |||
305 | /* Wear-leveling unit's stuff */ | ||
306 | struct rb_root used; | ||
307 | struct rb_root free; | ||
308 | struct rb_root scrub; | ||
309 | struct { | ||
310 | struct rb_root pnum; | ||
311 | struct rb_root aec; | ||
312 | } prot; | ||
313 | spinlock_t wl_lock; | ||
314 | int wl_scheduled; | ||
315 | struct ubi_wl_entry **lookuptbl; | ||
316 | unsigned long long abs_ec; | ||
317 | struct ubi_wl_entry *move_from; | ||
318 | struct ubi_wl_entry *move_to; | ||
319 | int move_from_put; | ||
320 | int move_to_put; | ||
321 | struct list_head works; | ||
322 | int works_count; | ||
323 | struct task_struct *bgt_thread; | ||
324 | int thread_enabled; | ||
325 | char bgt_name[sizeof(UBI_BGT_NAME_PATTERN)+2]; | ||
326 | |||
327 | /* I/O unit's stuff */ | ||
328 | long long flash_size; | ||
329 | int peb_count; | ||
330 | int peb_size; | ||
331 | int bad_peb_count; | ||
332 | int good_peb_count; | ||
333 | int min_io_size; | ||
334 | int hdrs_min_io_size; | ||
335 | int ro_mode; | ||
336 | int leb_size; | ||
337 | int leb_start; | ||
338 | int ec_hdr_alsize; | ||
339 | int vid_hdr_alsize; | ||
340 | int vid_hdr_offset; | ||
341 | int vid_hdr_aloffset; | ||
342 | int vid_hdr_shift; | ||
343 | int bad_allowed; | ||
344 | struct mtd_info *mtd; | ||
345 | }; | ||
346 | |||
347 | extern struct file_operations ubi_cdev_operations; | ||
348 | extern struct file_operations ubi_vol_cdev_operations; | ||
349 | extern struct class *ubi_class; | ||
350 | |||
351 | /* vtbl.c */ | ||
352 | int ubi_change_vtbl_record(struct ubi_device *ubi, int idx, | ||
353 | struct ubi_vtbl_record *vtbl_rec); | ||
354 | int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_scan_info *si); | ||
355 | |||
356 | /* vmt.c */ | ||
357 | int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req); | ||
358 | int ubi_remove_volume(struct ubi_volume_desc *desc); | ||
359 | int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs); | ||
360 | int ubi_add_volume(struct ubi_device *ubi, int vol_id); | ||
361 | void ubi_free_volume(struct ubi_device *ubi, int vol_id); | ||
362 | |||
363 | /* upd.c */ | ||
364 | int ubi_start_update(struct ubi_device *ubi, int vol_id, long long bytes); | ||
365 | int ubi_more_update_data(struct ubi_device *ubi, int vol_id, | ||
366 | const void __user *buf, int count); | ||
367 | |||
368 | /* misc.c */ | ||
369 | int ubi_calc_data_len(const struct ubi_device *ubi, const void *buf, int length); | ||
370 | int ubi_check_volume(struct ubi_device *ubi, int vol_id); | ||
371 | void ubi_calculate_reserved(struct ubi_device *ubi); | ||
372 | |||
373 | /* gluebi.c */ | ||
374 | #ifdef CONFIG_MTD_UBI_GLUEBI | ||
375 | int ubi_create_gluebi(struct ubi_device *ubi, struct ubi_volume *vol); | ||
376 | int ubi_destroy_gluebi(struct ubi_volume *vol); | ||
377 | #else | ||
378 | #define ubi_create_gluebi(ubi, vol) 0 | ||
379 | #define ubi_destroy_gluebi(vol) 0 | ||
380 | #endif | ||
381 | |||
382 | /* eba.c */ | ||
383 | int ubi_eba_unmap_leb(struct ubi_device *ubi, int vol_id, int lnum); | ||
384 | int ubi_eba_read_leb(struct ubi_device *ubi, int vol_id, int lnum, void *buf, | ||
385 | int offset, int len, int check); | ||
386 | int ubi_eba_write_leb(struct ubi_device *ubi, int vol_id, int lnum, | ||
387 | const void *buf, int offset, int len, int dtype); | ||
388 | int ubi_eba_write_leb_st(struct ubi_device *ubi, int vol_id, int lnum, | ||
389 | const void *buf, int len, int dtype, | ||
390 | int used_ebs); | ||
391 | int ubi_eba_atomic_leb_change(struct ubi_device *ubi, int vol_id, int lnum, | ||
392 | const void *buf, int len, int dtype); | ||
393 | int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, | ||
394 | struct ubi_vid_hdr *vid_hdr); | ||
395 | int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si); | ||
396 | void ubi_eba_close(const struct ubi_device *ubi); | ||
397 | |||
398 | /* wl.c */ | ||
399 | int ubi_wl_get_peb(struct ubi_device *ubi, int dtype); | ||
400 | int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture); | ||
401 | int ubi_wl_flush(struct ubi_device *ubi); | ||
402 | int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum); | ||
403 | int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si); | ||
404 | void ubi_wl_close(struct ubi_device *ubi); | ||
405 | |||
406 | /* io.c */ | ||
407 | int ubi_io_read(const struct ubi_device *ubi, void *buf, int pnum, int offset, | ||
408 | int len); | ||
409 | int ubi_io_write(const struct ubi_device *ubi, const void *buf, int pnum, | ||
410 | int offset, int len); | ||
411 | int ubi_io_sync_erase(const struct ubi_device *ubi, int pnum, int torture); | ||
412 | int ubi_io_is_bad(const struct ubi_device *ubi, int pnum); | ||
413 | int ubi_io_mark_bad(const struct ubi_device *ubi, int pnum); | ||
414 | int ubi_io_read_ec_hdr(const struct ubi_device *ubi, int pnum, | ||
415 | struct ubi_ec_hdr *ec_hdr, int verbose); | ||
416 | int ubi_io_write_ec_hdr(const struct ubi_device *ubi, int pnum, | ||
417 | struct ubi_ec_hdr *ec_hdr); | ||
418 | int ubi_io_read_vid_hdr(const struct ubi_device *ubi, int pnum, | ||
419 | struct ubi_vid_hdr *vid_hdr, int verbose); | ||
420 | int ubi_io_write_vid_hdr(const struct ubi_device *ubi, int pnum, | ||
421 | struct ubi_vid_hdr *vid_hdr); | ||
422 | |||
423 | /* | ||
424 | * ubi_rb_for_each_entry - walk an RB-tree. | ||
425 | * @rb: a pointer to type 'struct rb_node' to to use as a loop counter | ||
426 | * @pos: a pointer to RB-tree entry type to use as a loop counter | ||
427 | * @root: RB-tree's root | ||
428 | * @member: the name of the 'struct rb_node' within the RB-tree entry | ||
429 | */ | ||
430 | #define ubi_rb_for_each_entry(rb, pos, root, member) \ | ||
431 | for (rb = rb_first(root), \ | ||
432 | pos = (rb ? container_of(rb, typeof(*pos), member) : NULL); \ | ||
433 | rb; \ | ||
434 | rb = rb_next(rb), pos = container_of(rb, typeof(*pos), member)) | ||
435 | |||
436 | /** | ||
437 | * ubi_zalloc_vid_hdr - allocate a volume identifier header object. | ||
438 | * @ubi: UBI device description object | ||
439 | * | ||
440 | * This function returns a pointer to the newly allocated and zero-filled | ||
441 | * volume identifier header object in case of success and %NULL in case of | ||
442 | * failure. | ||
443 | */ | ||
444 | static inline struct ubi_vid_hdr *ubi_zalloc_vid_hdr(const struct ubi_device *ubi) | ||
445 | { | ||
446 | void *vid_hdr; | ||
447 | |||
448 | vid_hdr = kzalloc(ubi->vid_hdr_alsize, GFP_KERNEL); | ||
449 | if (!vid_hdr) | ||
450 | return NULL; | ||
451 | |||
452 | /* | ||
453 | * VID headers may be stored at un-aligned flash offsets, so we shift | ||
454 | * the pointer. | ||
455 | */ | ||
456 | return vid_hdr + ubi->vid_hdr_shift; | ||
457 | } | ||
458 | |||
459 | /** | ||
460 | * ubi_free_vid_hdr - free a volume identifier header object. | ||
461 | * @ubi: UBI device description object | ||
462 | * @vid_hdr: the object to free | ||
463 | */ | ||
464 | static inline void ubi_free_vid_hdr(const struct ubi_device *ubi, | ||
465 | struct ubi_vid_hdr *vid_hdr) | ||
466 | { | ||
467 | void *p = vid_hdr; | ||
468 | |||
469 | if (!p) | ||
470 | return; | ||
471 | |||
472 | kfree(p - ubi->vid_hdr_shift); | ||
473 | } | ||
474 | |||
475 | /* | ||
476 | * This function is equivalent to 'ubi_io_read()', but @offset is relative to | ||
477 | * the beginning of the logical eraseblock, not to the beginning of the | ||
478 | * physical eraseblock. | ||
479 | */ | ||
480 | static inline int ubi_io_read_data(const struct ubi_device *ubi, void *buf, | ||
481 | int pnum, int offset, int len) | ||
482 | { | ||
483 | ubi_assert(offset >= 0); | ||
484 | return ubi_io_read(ubi, buf, pnum, offset + ubi->leb_start, len); | ||
485 | } | ||
486 | |||
487 | /* | ||
488 | * This function is equivalent to 'ubi_io_write()', but @offset is relative to | ||
489 | * the beginning of the logical eraseblock, not to the beginning of the | ||
490 | * physical eraseblock. | ||
491 | */ | ||
492 | static inline int ubi_io_write_data(const struct ubi_device *ubi, const void *buf, | ||
493 | int pnum, int offset, int len) | ||
494 | { | ||
495 | ubi_assert(offset >= 0); | ||
496 | return ubi_io_write(ubi, buf, pnum, offset + ubi->leb_start, len); | ||
497 | } | ||
498 | |||
499 | /** | ||
500 | * ubi_ro_mode - switch to read-only mode. | ||
501 | * @ubi: UBI device description object | ||
502 | */ | ||
503 | static inline void ubi_ro_mode(struct ubi_device *ubi) | ||
504 | { | ||
505 | ubi->ro_mode = 1; | ||
506 | ubi_warn("switch to read-only mode"); | ||
507 | } | ||
508 | |||
509 | /** | ||
510 | * vol_id2idx - get table index by volume ID. | ||
511 | * @ubi: UBI device description object | ||
512 | * @vol_id: volume ID | ||
513 | */ | ||
514 | static inline int vol_id2idx(const struct ubi_device *ubi, int vol_id) | ||
515 | { | ||
516 | if (vol_id >= UBI_INTERNAL_VOL_START) | ||
517 | return vol_id - UBI_INTERNAL_VOL_START + ubi->vtbl_slots; | ||
518 | else | ||
519 | return vol_id; | ||
520 | } | ||
521 | |||
522 | /** | ||
523 | * idx2vol_id - get volume ID by table index. | ||
524 | * @ubi: UBI device description object | ||
525 | * @idx: table index | ||
526 | */ | ||
527 | static inline int idx2vol_id(const struct ubi_device *ubi, int idx) | ||
528 | { | ||
529 | if (idx >= ubi->vtbl_slots) | ||
530 | return idx - ubi->vtbl_slots + UBI_INTERNAL_VOL_START; | ||
531 | else | ||
532 | return idx; | ||
533 | } | ||
534 | |||
535 | #endif /* !__UBI_UBI_H__ */ | ||
diff --git a/drivers/mtd/ubi/upd.c b/drivers/mtd/ubi/upd.c new file mode 100644 index 000000000000..8925b977e3dc --- /dev/null +++ b/drivers/mtd/ubi/upd.c | |||
@@ -0,0 +1,348 @@ | |||
1 | /* | ||
2 | * Copyright (c) International Business Machines Corp., 2006 | ||
3 | * Copyright (c) Nokia Corporation, 2006 | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation; either version 2 of the License, or | ||
8 | * (at your option) any later version. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See | ||
13 | * the GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program; if not, write to the Free Software | ||
17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
18 | * | ||
19 | * Author: Artem Bityutskiy (Битюцкий Артём) | ||
20 | * | ||
21 | * Jan 2007: Alexander Schmidt, hacked per-volume update. | ||
22 | */ | ||
23 | |||
24 | /* | ||
25 | * This file contains implementation of the volume update functionality. | ||
26 | * | ||
27 | * The update operation is based on the per-volume update marker which is | ||
28 | * stored in the volume table. The update marker is set before the update | ||
29 | * starts, and removed after the update has been finished. So if the update was | ||
30 | * interrupted by an unclean re-boot or due to some other reasons, the update | ||
31 | * marker stays on the flash media and UBI finds it when it attaches the MTD | ||
32 | * device next time. If the update marker is set for a volume, the volume is | ||
33 | * treated as damaged and most I/O operations are prohibited. Only a new update | ||
34 | * operation is allowed. | ||
35 | * | ||
36 | * Note, in general it is possible to implement the update operation as a | ||
37 | * transaction with a roll-back capability. | ||
38 | */ | ||
39 | |||
40 | #include <linux/err.h> | ||
41 | #include <asm/uaccess.h> | ||
42 | #include <asm/div64.h> | ||
43 | #include "ubi.h" | ||
44 | |||
45 | /** | ||
46 | * set_update_marker - set update marker. | ||
47 | * @ubi: UBI device description object | ||
48 | * @vol_id: volume ID | ||
49 | * | ||
50 | * This function sets the update marker flag for volume @vol_id. Returns zero | ||
51 | * in case of success and a negative error code in case of failure. | ||
52 | */ | ||
53 | static int set_update_marker(struct ubi_device *ubi, int vol_id) | ||
54 | { | ||
55 | int err; | ||
56 | struct ubi_vtbl_record vtbl_rec; | ||
57 | struct ubi_volume *vol = ubi->volumes[vol_id]; | ||
58 | |||
59 | dbg_msg("set update marker for volume %d", vol_id); | ||
60 | |||
61 | if (vol->upd_marker) { | ||
62 | ubi_assert(ubi->vtbl[vol_id].upd_marker); | ||
63 | dbg_msg("already set"); | ||
64 | return 0; | ||
65 | } | ||
66 | |||
67 | memcpy(&vtbl_rec, &ubi->vtbl[vol_id], sizeof(struct ubi_vtbl_record)); | ||
68 | vtbl_rec.upd_marker = 1; | ||
69 | |||
70 | err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec); | ||
71 | vol->upd_marker = 1; | ||
72 | return err; | ||
73 | } | ||
74 | |||
75 | /** | ||
76 | * clear_update_marker - clear update marker. | ||
77 | * @ubi: UBI device description object | ||
78 | * @vol_id: volume ID | ||
79 | * @bytes: new data size in bytes | ||
80 | * | ||
81 | * This function clears the update marker for volume @vol_id, sets new volume | ||
82 | * data size and clears the "corrupted" flag (static volumes only). Returns | ||
83 | * zero in case of success and a negative error code in case of failure. | ||
84 | */ | ||
85 | static int clear_update_marker(struct ubi_device *ubi, int vol_id, long long bytes) | ||
86 | { | ||
87 | int err; | ||
88 | uint64_t tmp; | ||
89 | struct ubi_vtbl_record vtbl_rec; | ||
90 | struct ubi_volume *vol = ubi->volumes[vol_id]; | ||
91 | |||
92 | dbg_msg("clear update marker for volume %d", vol_id); | ||
93 | |||
94 | memcpy(&vtbl_rec, &ubi->vtbl[vol_id], sizeof(struct ubi_vtbl_record)); | ||
95 | ubi_assert(vol->upd_marker && vtbl_rec.upd_marker); | ||
96 | vtbl_rec.upd_marker = 0; | ||
97 | |||
98 | if (vol->vol_type == UBI_STATIC_VOLUME) { | ||
99 | vol->corrupted = 0; | ||
100 | vol->used_bytes = tmp = bytes; | ||
101 | vol->last_eb_bytes = do_div(tmp, vol->usable_leb_size); | ||
102 | vol->used_ebs = tmp; | ||
103 | if (vol->last_eb_bytes) | ||
104 | vol->used_ebs += 1; | ||
105 | else | ||
106 | vol->last_eb_bytes = vol->usable_leb_size; | ||
107 | } | ||
108 | |||
109 | err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec); | ||
110 | vol->upd_marker = 0; | ||
111 | return err; | ||
112 | } | ||
113 | |||
114 | /** | ||
115 | * ubi_start_update - start volume update. | ||
116 | * @ubi: UBI device description object | ||
117 | * @vol_id: volume ID | ||
118 | * @bytes: update bytes | ||
119 | * | ||
120 | * This function starts volume update operation. If @bytes is zero, the volume | ||
121 | * is just wiped out. Returns zero in case of success and a negative error code | ||
122 | * in case of failure. | ||
123 | */ | ||
124 | int ubi_start_update(struct ubi_device *ubi, int vol_id, long long bytes) | ||
125 | { | ||
126 | int i, err; | ||
127 | uint64_t tmp; | ||
128 | struct ubi_volume *vol = ubi->volumes[vol_id]; | ||
129 | |||
130 | dbg_msg("start update of volume %d, %llu bytes", vol_id, bytes); | ||
131 | vol->updating = 1; | ||
132 | |||
133 | err = set_update_marker(ubi, vol_id); | ||
134 | if (err) | ||
135 | return err; | ||
136 | |||
137 | /* Before updating - wipe out the volume */ | ||
138 | for (i = 0; i < vol->reserved_pebs; i++) { | ||
139 | err = ubi_eba_unmap_leb(ubi, vol_id, i); | ||
140 | if (err) | ||
141 | return err; | ||
142 | } | ||
143 | |||
144 | if (bytes == 0) { | ||
145 | err = clear_update_marker(ubi, vol_id, 0); | ||
146 | if (err) | ||
147 | return err; | ||
148 | err = ubi_wl_flush(ubi); | ||
149 | if (!err) | ||
150 | vol->updating = 0; | ||
151 | } | ||
152 | |||
153 | vol->upd_buf = kmalloc(ubi->leb_size, GFP_KERNEL); | ||
154 | if (!vol->upd_buf) | ||
155 | return -ENOMEM; | ||
156 | |||
157 | tmp = bytes; | ||
158 | vol->upd_ebs = !!do_div(tmp, vol->usable_leb_size); | ||
159 | vol->upd_ebs += tmp; | ||
160 | vol->upd_bytes = bytes; | ||
161 | vol->upd_received = 0; | ||
162 | return 0; | ||
163 | } | ||
164 | |||
165 | /** | ||
166 | * write_leb - write update data. | ||
167 | * @ubi: UBI device description object | ||
168 | * @vol_id: volume ID | ||
169 | * @lnum: logical eraseblock number | ||
170 | * @buf: data to write | ||
171 | * @len: data size | ||
172 | * @used_ebs: how many logical eraseblocks will this volume contain (static | ||
173 | * volumes only) | ||
174 | * | ||
175 | * This function writes update data to corresponding logical eraseblock. In | ||
176 | * case of dynamic volume, this function checks if the data contains 0xFF bytes | ||
177 | * at the end. If yes, the 0xFF bytes are cut and not written. So if the whole | ||
178 | * buffer contains only 0xFF bytes, the LEB is left unmapped. | ||
179 | * | ||
180 | * The reason why we skip the trailing 0xFF bytes in case of dynamic volume is | ||
181 | * that we want to make sure that more data may be appended to the logical | ||
182 | * eraseblock in future. Indeed, writing 0xFF bytes may have side effects and | ||
183 | * this PEB won't be writable anymore. So if one writes the file-system image | ||
184 | * to the UBI volume where 0xFFs mean free space - UBI makes sure this free | ||
185 | * space is writable after the update. | ||
186 | * | ||
187 | * We do not do this for static volumes because they are read-only. But this | ||
188 | * also cannot be done because we have to store per-LEB CRC and the correct | ||
189 | * data length. | ||
190 | * | ||
191 | * This function returns zero in case of success and a negative error code in | ||
192 | * case of failure. | ||
193 | */ | ||
194 | static int write_leb(struct ubi_device *ubi, int vol_id, int lnum, void *buf, | ||
195 | int len, int used_ebs) | ||
196 | { | ||
197 | int err, l; | ||
198 | struct ubi_volume *vol = ubi->volumes[vol_id]; | ||
199 | |||
200 | if (vol->vol_type == UBI_DYNAMIC_VOLUME) { | ||
201 | l = ALIGN(len, ubi->min_io_size); | ||
202 | memset(buf + len, 0xFF, l - len); | ||
203 | |||
204 | l = ubi_calc_data_len(ubi, buf, l); | ||
205 | if (l == 0) { | ||
206 | dbg_msg("all %d bytes contain 0xFF - skip", len); | ||
207 | return 0; | ||
208 | } | ||
209 | if (len != l) | ||
210 | dbg_msg("skip last %d bytes (0xFF)", len - l); | ||
211 | |||
212 | err = ubi_eba_write_leb(ubi, vol_id, lnum, buf, 0, l, | ||
213 | UBI_UNKNOWN); | ||
214 | } else { | ||
215 | /* | ||
216 | * When writing static volume, and this is the last logical | ||
217 | * eraseblock, the length (@len) does not have to be aligned to | ||
218 | * the minimal flash I/O unit. The 'ubi_eba_write_leb_st()' | ||
219 | * function accepts exact (unaligned) length and stores it in | ||
220 | * the VID header. And it takes care of proper alignment by | ||
221 | * padding the buffer. Here we just make sure the padding will | ||
222 | * contain zeros, not random trash. | ||
223 | */ | ||
224 | memset(buf + len, 0, vol->usable_leb_size - len); | ||
225 | err = ubi_eba_write_leb_st(ubi, vol_id, lnum, buf, len, | ||
226 | UBI_UNKNOWN, used_ebs); | ||
227 | } | ||
228 | |||
229 | return err; | ||
230 | } | ||
231 | |||
232 | /** | ||
233 | * ubi_more_update_data - write more update data. | ||
234 | * @vol: volume description object | ||
235 | * @buf: write data (user-space memory buffer) | ||
236 | * @count: how much bytes to write | ||
237 | * | ||
238 | * This function writes more data to the volume which is being updated. It may | ||
239 | * be called arbitrary number of times until all of the update data arrive. | ||
240 | * This function returns %0 in case of success, number of bytes written during | ||
241 | * the last call if the whole volume update was successfully finished, and a | ||
242 | * negative error code in case of failure. | ||
243 | */ | ||
244 | int ubi_more_update_data(struct ubi_device *ubi, int vol_id, | ||
245 | const void __user *buf, int count) | ||
246 | { | ||
247 | uint64_t tmp; | ||
248 | struct ubi_volume *vol = ubi->volumes[vol_id]; | ||
249 | int lnum, offs, err = 0, len, to_write = count; | ||
250 | |||
251 | dbg_msg("write %d of %lld bytes, %lld already passed", | ||
252 | count, vol->upd_bytes, vol->upd_received); | ||
253 | |||
254 | if (ubi->ro_mode) | ||
255 | return -EROFS; | ||
256 | |||
257 | tmp = vol->upd_received; | ||
258 | offs = do_div(tmp, vol->usable_leb_size); | ||
259 | lnum = tmp; | ||
260 | |||
261 | if (vol->upd_received + count > vol->upd_bytes) | ||
262 | to_write = count = vol->upd_bytes - vol->upd_received; | ||
263 | |||
264 | /* | ||
265 | * When updating volumes, we accumulate whole logical eraseblock of | ||
266 | * data and write it at once. | ||
267 | */ | ||
268 | if (offs != 0) { | ||
269 | /* | ||
270 | * This is a write to the middle of the logical eraseblock. We | ||
271 | * copy the data to our update buffer and wait for more data or | ||
272 | * flush it if the whole eraseblock is written or the update | ||
273 | * is finished. | ||
274 | */ | ||
275 | |||
276 | len = vol->usable_leb_size - offs; | ||
277 | if (len > count) | ||
278 | len = count; | ||
279 | |||
280 | err = copy_from_user(vol->upd_buf + offs, buf, len); | ||
281 | if (err) | ||
282 | return -EFAULT; | ||
283 | |||
284 | if (offs + len == vol->usable_leb_size || | ||
285 | vol->upd_received + len == vol->upd_bytes) { | ||
286 | int flush_len = offs + len; | ||
287 | |||
288 | /* | ||
289 | * OK, we gathered either the whole eraseblock or this | ||
290 | * is the last chunk, it's time to flush the buffer. | ||
291 | */ | ||
292 | ubi_assert(flush_len <= vol->usable_leb_size); | ||
293 | err = write_leb(ubi, vol_id, lnum, vol->upd_buf, | ||
294 | flush_len, vol->upd_ebs); | ||
295 | if (err) | ||
296 | return err; | ||
297 | } | ||
298 | |||
299 | vol->upd_received += len; | ||
300 | count -= len; | ||
301 | buf += len; | ||
302 | lnum += 1; | ||
303 | } | ||
304 | |||
305 | /* | ||
306 | * If we've got more to write, let's continue. At this point we know we | ||
307 | * are starting from the beginning of an eraseblock. | ||
308 | */ | ||
309 | while (count) { | ||
310 | if (count > vol->usable_leb_size) | ||
311 | len = vol->usable_leb_size; | ||
312 | else | ||
313 | len = count; | ||
314 | |||
315 | err = copy_from_user(vol->upd_buf, buf, len); | ||
316 | if (err) | ||
317 | return -EFAULT; | ||
318 | |||
319 | if (len == vol->usable_leb_size || | ||
320 | vol->upd_received + len == vol->upd_bytes) { | ||
321 | err = write_leb(ubi, vol_id, lnum, vol->upd_buf, len, | ||
322 | vol->upd_ebs); | ||
323 | if (err) | ||
324 | break; | ||
325 | } | ||
326 | |||
327 | vol->upd_received += len; | ||
328 | count -= len; | ||
329 | lnum += 1; | ||
330 | buf += len; | ||
331 | } | ||
332 | |||
333 | ubi_assert(vol->upd_received <= vol->upd_bytes); | ||
334 | if (vol->upd_received == vol->upd_bytes) { | ||
335 | /* The update is finished, clear the update marker */ | ||
336 | err = clear_update_marker(ubi, vol_id, vol->upd_bytes); | ||
337 | if (err) | ||
338 | return err; | ||
339 | err = ubi_wl_flush(ubi); | ||
340 | if (err == 0) { | ||
341 | err = to_write; | ||
342 | kfree(vol->upd_buf); | ||
343 | vol->updating = 0; | ||
344 | } | ||
345 | } | ||
346 | |||
347 | return err; | ||
348 | } | ||
diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c new file mode 100644 index 000000000000..622d0d18952c --- /dev/null +++ b/drivers/mtd/ubi/vmt.c | |||
@@ -0,0 +1,809 @@ | |||
1 | /* | ||
2 | * Copyright (c) International Business Machines Corp., 2006 | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License as published by | ||
6 | * the Free Software Foundation; either version 2 of the License, or | ||
7 | * (at your option) any later version. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See | ||
12 | * the GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
17 | * | ||
18 | * Author: Artem Bityutskiy (Битюцкий Артём) | ||
19 | */ | ||
20 | |||
21 | /* | ||
22 | * This file contains implementation of volume creation, deletion, updating and | ||
23 | * resizing. | ||
24 | */ | ||
25 | |||
26 | #include <linux/err.h> | ||
27 | #include <asm/div64.h> | ||
28 | #include "ubi.h" | ||
29 | |||
30 | #ifdef CONFIG_MTD_UBI_DEBUG_PARANOID | ||
31 | static void paranoid_check_volumes(struct ubi_device *ubi); | ||
32 | #else | ||
33 | #define paranoid_check_volumes(ubi) | ||
34 | #endif | ||
35 | |||
36 | static ssize_t vol_attribute_show(struct device *dev, | ||
37 | struct device_attribute *attr, char *buf); | ||
38 | |||
39 | /* Device attributes corresponding to files in '/<sysfs>/class/ubi/ubiX_Y' */ | ||
40 | static struct device_attribute vol_reserved_ebs = | ||
41 | __ATTR(reserved_ebs, S_IRUGO, vol_attribute_show, NULL); | ||
42 | static struct device_attribute vol_type = | ||
43 | __ATTR(type, S_IRUGO, vol_attribute_show, NULL); | ||
44 | static struct device_attribute vol_name = | ||
45 | __ATTR(name, S_IRUGO, vol_attribute_show, NULL); | ||
46 | static struct device_attribute vol_corrupted = | ||
47 | __ATTR(corrupted, S_IRUGO, vol_attribute_show, NULL); | ||
48 | static struct device_attribute vol_alignment = | ||
49 | __ATTR(alignment, S_IRUGO, vol_attribute_show, NULL); | ||
50 | static struct device_attribute vol_usable_eb_size = | ||
51 | __ATTR(usable_eb_size, S_IRUGO, vol_attribute_show, NULL); | ||
52 | static struct device_attribute vol_data_bytes = | ||
53 | __ATTR(data_bytes, S_IRUGO, vol_attribute_show, NULL); | ||
54 | static struct device_attribute vol_upd_marker = | ||
55 | __ATTR(upd_marker, S_IRUGO, vol_attribute_show, NULL); | ||
56 | |||
57 | /* | ||
58 | * "Show" method for files in '/<sysfs>/class/ubi/ubiX_Y/'. | ||
59 | * | ||
60 | * Consider a situation: | ||
61 | * A. process 1 opens a sysfs file related to volume Y, say | ||
62 | * /<sysfs>/class/ubi/ubiX_Y/reserved_ebs; | ||
63 | * B. process 2 removes volume Y; | ||
64 | * C. process 1 starts reading the /<sysfs>/class/ubi/ubiX_Y/reserved_ebs file; | ||
65 | * | ||
66 | * What we want to do in a situation like that is to return error when the file | ||
67 | * is read. This is done by means of the 'removed' flag and the 'vol_lock' of | ||
68 | * the UBI volume description object. | ||
69 | */ | ||
70 | static ssize_t vol_attribute_show(struct device *dev, | ||
71 | struct device_attribute *attr, char *buf) | ||
72 | { | ||
73 | int ret; | ||
74 | struct ubi_volume *vol = container_of(dev, struct ubi_volume, dev); | ||
75 | |||
76 | spin_lock(&vol->ubi->volumes_lock); | ||
77 | if (vol->removed) { | ||
78 | spin_unlock(&vol->ubi->volumes_lock); | ||
79 | return -ENODEV; | ||
80 | } | ||
81 | if (attr == &vol_reserved_ebs) | ||
82 | ret = sprintf(buf, "%d\n", vol->reserved_pebs); | ||
83 | else if (attr == &vol_type) { | ||
84 | const char *tp; | ||
85 | tp = vol->vol_type == UBI_DYNAMIC_VOLUME ? "dynamic" : "static"; | ||
86 | ret = sprintf(buf, "%s\n", tp); | ||
87 | } else if (attr == &vol_name) | ||
88 | ret = sprintf(buf, "%s\n", vol->name); | ||
89 | else if (attr == &vol_corrupted) | ||
90 | ret = sprintf(buf, "%d\n", vol->corrupted); | ||
91 | else if (attr == &vol_alignment) | ||
92 | ret = sprintf(buf, "%d\n", vol->alignment); | ||
93 | else if (attr == &vol_usable_eb_size) { | ||
94 | ret = sprintf(buf, "%d\n", vol->usable_leb_size); | ||
95 | } else if (attr == &vol_data_bytes) | ||
96 | ret = sprintf(buf, "%lld\n", vol->used_bytes); | ||
97 | else if (attr == &vol_upd_marker) | ||
98 | ret = sprintf(buf, "%d\n", vol->upd_marker); | ||
99 | else | ||
100 | BUG(); | ||
101 | spin_unlock(&vol->ubi->volumes_lock); | ||
102 | return ret; | ||
103 | } | ||
104 | |||
105 | /* Release method for volume devices */ | ||
106 | static void vol_release(struct device *dev) | ||
107 | { | ||
108 | struct ubi_volume *vol = container_of(dev, struct ubi_volume, dev); | ||
109 | ubi_assert(vol->removed); | ||
110 | kfree(vol); | ||
111 | } | ||
112 | |||
113 | /** | ||
114 | * volume_sysfs_init - initialize sysfs for new volume. | ||
115 | * @ubi: UBI device description object | ||
116 | * @vol: volume description object | ||
117 | * | ||
118 | * This function returns zero in case of success and a negative error code in | ||
119 | * case of failure. | ||
120 | * | ||
121 | * Note, this function does not free allocated resources in case of failure - | ||
122 | * the caller does it. This is because this would cause release() here and the | ||
123 | * caller would oops. | ||
124 | */ | ||
125 | static int volume_sysfs_init(struct ubi_device *ubi, struct ubi_volume *vol) | ||
126 | { | ||
127 | int err; | ||
128 | |||
129 | err = device_create_file(&vol->dev, &vol_reserved_ebs); | ||
130 | if (err) | ||
131 | return err; | ||
132 | err = device_create_file(&vol->dev, &vol_type); | ||
133 | if (err) | ||
134 | return err; | ||
135 | err = device_create_file(&vol->dev, &vol_name); | ||
136 | if (err) | ||
137 | return err; | ||
138 | err = device_create_file(&vol->dev, &vol_corrupted); | ||
139 | if (err) | ||
140 | return err; | ||
141 | err = device_create_file(&vol->dev, &vol_alignment); | ||
142 | if (err) | ||
143 | return err; | ||
144 | err = device_create_file(&vol->dev, &vol_usable_eb_size); | ||
145 | if (err) | ||
146 | return err; | ||
147 | err = device_create_file(&vol->dev, &vol_data_bytes); | ||
148 | if (err) | ||
149 | return err; | ||
150 | err = device_create_file(&vol->dev, &vol_upd_marker); | ||
151 | if (err) | ||
152 | return err; | ||
153 | return 0; | ||
154 | } | ||
155 | |||
156 | /** | ||
157 | * volume_sysfs_close - close sysfs for a volume. | ||
158 | * @vol: volume description object | ||
159 | */ | ||
160 | static void volume_sysfs_close(struct ubi_volume *vol) | ||
161 | { | ||
162 | device_remove_file(&vol->dev, &vol_upd_marker); | ||
163 | device_remove_file(&vol->dev, &vol_data_bytes); | ||
164 | device_remove_file(&vol->dev, &vol_usable_eb_size); | ||
165 | device_remove_file(&vol->dev, &vol_alignment); | ||
166 | device_remove_file(&vol->dev, &vol_corrupted); | ||
167 | device_remove_file(&vol->dev, &vol_name); | ||
168 | device_remove_file(&vol->dev, &vol_type); | ||
169 | device_remove_file(&vol->dev, &vol_reserved_ebs); | ||
170 | device_unregister(&vol->dev); | ||
171 | } | ||
172 | |||
173 | /** | ||
174 | * ubi_create_volume - create volume. | ||
175 | * @ubi: UBI device description object | ||
176 | * @req: volume creation request | ||
177 | * | ||
178 | * This function creates volume described by @req. If @req->vol_id id | ||
179 | * %UBI_VOL_NUM_AUTO, this function automatically assigne ID to the new volume | ||
180 | * and saves it in @req->vol_id. Returns zero in case of success and a negative | ||
181 | * error code in case of failure. | ||
182 | */ | ||
183 | int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req) | ||
184 | { | ||
185 | int i, err, vol_id = req->vol_id; | ||
186 | struct ubi_volume *vol; | ||
187 | struct ubi_vtbl_record vtbl_rec; | ||
188 | uint64_t bytes; | ||
189 | |||
190 | if (ubi->ro_mode) | ||
191 | return -EROFS; | ||
192 | |||
193 | vol = kzalloc(sizeof(struct ubi_volume), GFP_KERNEL); | ||
194 | if (!vol) | ||
195 | return -ENOMEM; | ||
196 | |||
197 | spin_lock(&ubi->volumes_lock); | ||
198 | |||
199 | if (vol_id == UBI_VOL_NUM_AUTO) { | ||
200 | /* Find unused volume ID */ | ||
201 | dbg_msg("search for vacant volume ID"); | ||
202 | for (i = 0; i < ubi->vtbl_slots; i++) | ||
203 | if (!ubi->volumes[i]) { | ||
204 | vol_id = i; | ||
205 | break; | ||
206 | } | ||
207 | |||
208 | if (vol_id == UBI_VOL_NUM_AUTO) { | ||
209 | dbg_err("out of volume IDs"); | ||
210 | err = -ENFILE; | ||
211 | goto out_unlock; | ||
212 | } | ||
213 | req->vol_id = vol_id; | ||
214 | } | ||
215 | |||
216 | dbg_msg("volume ID %d, %llu bytes, type %d, name %s", | ||
217 | vol_id, (unsigned long long)req->bytes, | ||
218 | (int)req->vol_type, req->name); | ||
219 | |||
220 | /* Ensure that this volume does not exist */ | ||
221 | err = -EEXIST; | ||
222 | if (ubi->volumes[vol_id]) { | ||
223 | dbg_err("volume %d already exists", vol_id); | ||
224 | goto out_unlock; | ||
225 | } | ||
226 | |||
227 | /* Ensure that the name is unique */ | ||
228 | for (i = 0; i < ubi->vtbl_slots; i++) | ||
229 | if (ubi->volumes[i] && | ||
230 | ubi->volumes[i]->name_len == req->name_len && | ||
231 | strcmp(ubi->volumes[i]->name, req->name) == 0) { | ||
232 | dbg_err("volume \"%s\" exists (ID %d)", req->name, i); | ||
233 | goto out_unlock; | ||
234 | } | ||
235 | |||
236 | /* Calculate how many eraseblocks are requested */ | ||
237 | vol->usable_leb_size = ubi->leb_size - ubi->leb_size % req->alignment; | ||
238 | bytes = req->bytes; | ||
239 | if (do_div(bytes, vol->usable_leb_size)) | ||
240 | vol->reserved_pebs = 1; | ||
241 | vol->reserved_pebs += bytes; | ||
242 | |||
243 | /* Reserve physical eraseblocks */ | ||
244 | if (vol->reserved_pebs > ubi->avail_pebs) { | ||
245 | dbg_err("not enough PEBs, only %d available", ubi->avail_pebs); | ||
246 | spin_unlock(&ubi->volumes_lock); | ||
247 | err = -ENOSPC; | ||
248 | goto out_unlock; | ||
249 | } | ||
250 | ubi->avail_pebs -= vol->reserved_pebs; | ||
251 | ubi->rsvd_pebs += vol->reserved_pebs; | ||
252 | |||
253 | vol->vol_id = vol_id; | ||
254 | vol->alignment = req->alignment; | ||
255 | vol->data_pad = ubi->leb_size % vol->alignment; | ||
256 | vol->vol_type = req->vol_type; | ||
257 | vol->name_len = req->name_len; | ||
258 | memcpy(vol->name, req->name, vol->name_len + 1); | ||
259 | vol->exclusive = 1; | ||
260 | vol->ubi = ubi; | ||
261 | ubi->volumes[vol_id] = vol; | ||
262 | spin_unlock(&ubi->volumes_lock); | ||
263 | |||
264 | /* | ||
265 | * Finish all pending erases because there may be some LEBs belonging | ||
266 | * to the same volume ID. | ||
267 | */ | ||
268 | err = ubi_wl_flush(ubi); | ||
269 | if (err) | ||
270 | goto out_acc; | ||
271 | |||
272 | vol->eba_tbl = kmalloc(vol->reserved_pebs * sizeof(int), GFP_KERNEL); | ||
273 | if (!vol->eba_tbl) { | ||
274 | err = -ENOMEM; | ||
275 | goto out_acc; | ||
276 | } | ||
277 | |||
278 | for (i = 0; i < vol->reserved_pebs; i++) | ||
279 | vol->eba_tbl[i] = UBI_LEB_UNMAPPED; | ||
280 | |||
281 | if (vol->vol_type == UBI_DYNAMIC_VOLUME) { | ||
282 | vol->used_ebs = vol->reserved_pebs; | ||
283 | vol->last_eb_bytes = vol->usable_leb_size; | ||
284 | vol->used_bytes = vol->used_ebs * vol->usable_leb_size; | ||
285 | } else { | ||
286 | bytes = vol->used_bytes; | ||
287 | vol->last_eb_bytes = do_div(bytes, vol->usable_leb_size); | ||
288 | vol->used_ebs = bytes; | ||
289 | if (vol->last_eb_bytes) | ||
290 | vol->used_ebs += 1; | ||
291 | else | ||
292 | vol->last_eb_bytes = vol->usable_leb_size; | ||
293 | } | ||
294 | |||
295 | /* Register character device for the volume */ | ||
296 | cdev_init(&vol->cdev, &ubi_vol_cdev_operations); | ||
297 | vol->cdev.owner = THIS_MODULE; | ||
298 | err = cdev_add(&vol->cdev, MKDEV(ubi->major, vol_id + 1), 1); | ||
299 | if (err) { | ||
300 | ubi_err("cannot add character device for volume %d", vol_id); | ||
301 | goto out_mapping; | ||
302 | } | ||
303 | |||
304 | err = ubi_create_gluebi(ubi, vol); | ||
305 | if (err) | ||
306 | goto out_cdev; | ||
307 | |||
308 | vol->dev.release = vol_release; | ||
309 | vol->dev.parent = &ubi->dev; | ||
310 | vol->dev.devt = MKDEV(ubi->major, vol->vol_id + 1); | ||
311 | vol->dev.class = ubi_class; | ||
312 | sprintf(&vol->dev.bus_id[0], "%s_%d", ubi->ubi_name, vol->vol_id); | ||
313 | err = device_register(&vol->dev); | ||
314 | if (err) | ||
315 | goto out_gluebi; | ||
316 | |||
317 | err = volume_sysfs_init(ubi, vol); | ||
318 | if (err) | ||
319 | goto out_sysfs; | ||
320 | |||
321 | /* Fill volume table record */ | ||
322 | memset(&vtbl_rec, 0, sizeof(struct ubi_vtbl_record)); | ||
323 | vtbl_rec.reserved_pebs = cpu_to_ubi32(vol->reserved_pebs); | ||
324 | vtbl_rec.alignment = cpu_to_ubi32(vol->alignment); | ||
325 | vtbl_rec.data_pad = cpu_to_ubi32(vol->data_pad); | ||
326 | vtbl_rec.name_len = cpu_to_ubi16(vol->name_len); | ||
327 | if (vol->vol_type == UBI_DYNAMIC_VOLUME) | ||
328 | vtbl_rec.vol_type = UBI_VID_DYNAMIC; | ||
329 | else | ||
330 | vtbl_rec.vol_type = UBI_VID_STATIC; | ||
331 | memcpy(vtbl_rec.name, vol->name, vol->name_len + 1); | ||
332 | |||
333 | err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec); | ||
334 | if (err) | ||
335 | goto out_sysfs; | ||
336 | |||
337 | spin_lock(&ubi->volumes_lock); | ||
338 | ubi->vol_count += 1; | ||
339 | vol->exclusive = 0; | ||
340 | spin_unlock(&ubi->volumes_lock); | ||
341 | |||
342 | paranoid_check_volumes(ubi); | ||
343 | return 0; | ||
344 | |||
345 | out_gluebi: | ||
346 | err = ubi_destroy_gluebi(vol); | ||
347 | out_cdev: | ||
348 | cdev_del(&vol->cdev); | ||
349 | out_mapping: | ||
350 | kfree(vol->eba_tbl); | ||
351 | out_acc: | ||
352 | spin_lock(&ubi->volumes_lock); | ||
353 | ubi->rsvd_pebs -= vol->reserved_pebs; | ||
354 | ubi->avail_pebs += vol->reserved_pebs; | ||
355 | out_unlock: | ||
356 | spin_unlock(&ubi->volumes_lock); | ||
357 | kfree(vol); | ||
358 | return err; | ||
359 | |||
360 | /* | ||
361 | * We are registered, so @vol is destroyed in the release function and | ||
362 | * we have to de-initialize differently. | ||
363 | */ | ||
364 | out_sysfs: | ||
365 | err = ubi_destroy_gluebi(vol); | ||
366 | cdev_del(&vol->cdev); | ||
367 | kfree(vol->eba_tbl); | ||
368 | spin_lock(&ubi->volumes_lock); | ||
369 | ubi->rsvd_pebs -= vol->reserved_pebs; | ||
370 | ubi->avail_pebs += vol->reserved_pebs; | ||
371 | spin_unlock(&ubi->volumes_lock); | ||
372 | volume_sysfs_close(vol); | ||
373 | return err; | ||
374 | } | ||
375 | |||
376 | /** | ||
377 | * ubi_remove_volume - remove volume. | ||
378 | * @desc: volume descriptor | ||
379 | * | ||
380 | * This function removes volume described by @desc. The volume has to be opened | ||
381 | * in "exclusive" mode. Returns zero in case of success and a negative error | ||
382 | * code in case of failure. | ||
383 | */ | ||
384 | int ubi_remove_volume(struct ubi_volume_desc *desc) | ||
385 | { | ||
386 | struct ubi_volume *vol = desc->vol; | ||
387 | struct ubi_device *ubi = vol->ubi; | ||
388 | int i, err, vol_id = vol->vol_id, reserved_pebs = vol->reserved_pebs; | ||
389 | |||
390 | dbg_msg("remove UBI volume %d", vol_id); | ||
391 | ubi_assert(desc->mode == UBI_EXCLUSIVE); | ||
392 | ubi_assert(vol == ubi->volumes[vol_id]); | ||
393 | |||
394 | if (ubi->ro_mode) | ||
395 | return -EROFS; | ||
396 | |||
397 | err = ubi_destroy_gluebi(vol); | ||
398 | if (err) | ||
399 | return err; | ||
400 | |||
401 | err = ubi_change_vtbl_record(ubi, vol_id, NULL); | ||
402 | if (err) | ||
403 | return err; | ||
404 | |||
405 | for (i = 0; i < vol->reserved_pebs; i++) { | ||
406 | err = ubi_eba_unmap_leb(ubi, vol_id, i); | ||
407 | if (err) | ||
408 | return err; | ||
409 | } | ||
410 | |||
411 | spin_lock(&ubi->volumes_lock); | ||
412 | vol->removed = 1; | ||
413 | ubi->volumes[vol_id] = NULL; | ||
414 | spin_unlock(&ubi->volumes_lock); | ||
415 | |||
416 | kfree(vol->eba_tbl); | ||
417 | vol->eba_tbl = NULL; | ||
418 | cdev_del(&vol->cdev); | ||
419 | volume_sysfs_close(vol); | ||
420 | kfree(desc); | ||
421 | |||
422 | spin_lock(&ubi->volumes_lock); | ||
423 | ubi->rsvd_pebs -= reserved_pebs; | ||
424 | ubi->avail_pebs += reserved_pebs; | ||
425 | i = ubi->beb_rsvd_level - ubi->beb_rsvd_pebs; | ||
426 | if (i > 0) { | ||
427 | i = ubi->avail_pebs >= i ? i : ubi->avail_pebs; | ||
428 | ubi->avail_pebs -= i; | ||
429 | ubi->rsvd_pebs += i; | ||
430 | ubi->beb_rsvd_pebs += i; | ||
431 | if (i > 0) | ||
432 | ubi_msg("reserve more %d PEBs", i); | ||
433 | } | ||
434 | ubi->vol_count -= 1; | ||
435 | spin_unlock(&ubi->volumes_lock); | ||
436 | |||
437 | paranoid_check_volumes(ubi); | ||
438 | module_put(THIS_MODULE); | ||
439 | return 0; | ||
440 | } | ||
441 | |||
442 | /** | ||
443 | * ubi_resize_volume - re-size volume. | ||
444 | * @desc: volume descriptor | ||
445 | * @reserved_pebs: new size in physical eraseblocks | ||
446 | * | ||
447 | * This function returns zero in case of success, and a negative error code in | ||
448 | * case of failure. | ||
449 | */ | ||
450 | int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs) | ||
451 | { | ||
452 | int i, err, pebs, *new_mapping; | ||
453 | struct ubi_volume *vol = desc->vol; | ||
454 | struct ubi_device *ubi = vol->ubi; | ||
455 | struct ubi_vtbl_record vtbl_rec; | ||
456 | int vol_id = vol->vol_id; | ||
457 | |||
458 | if (ubi->ro_mode) | ||
459 | return -EROFS; | ||
460 | |||
461 | dbg_msg("re-size volume %d to from %d to %d PEBs", | ||
462 | vol_id, vol->reserved_pebs, reserved_pebs); | ||
463 | ubi_assert(desc->mode == UBI_EXCLUSIVE); | ||
464 | ubi_assert(vol == ubi->volumes[vol_id]); | ||
465 | |||
466 | if (vol->vol_type == UBI_STATIC_VOLUME && | ||
467 | reserved_pebs < vol->used_ebs) { | ||
468 | dbg_err("too small size %d, %d LEBs contain data", | ||
469 | reserved_pebs, vol->used_ebs); | ||
470 | return -EINVAL; | ||
471 | } | ||
472 | |||
473 | /* If the size is the same, we have nothing to do */ | ||
474 | if (reserved_pebs == vol->reserved_pebs) | ||
475 | return 0; | ||
476 | |||
477 | new_mapping = kmalloc(reserved_pebs * sizeof(int), GFP_KERNEL); | ||
478 | if (!new_mapping) | ||
479 | return -ENOMEM; | ||
480 | |||
481 | for (i = 0; i < reserved_pebs; i++) | ||
482 | new_mapping[i] = UBI_LEB_UNMAPPED; | ||
483 | |||
484 | /* Reserve physical eraseblocks */ | ||
485 | pebs = reserved_pebs - vol->reserved_pebs; | ||
486 | if (pebs > 0) { | ||
487 | spin_lock(&ubi->volumes_lock); | ||
488 | if (pebs > ubi->avail_pebs) { | ||
489 | dbg_err("not enough PEBs: requested %d, available %d", | ||
490 | pebs, ubi->avail_pebs); | ||
491 | spin_unlock(&ubi->volumes_lock); | ||
492 | err = -ENOSPC; | ||
493 | goto out_free; | ||
494 | } | ||
495 | ubi->avail_pebs -= pebs; | ||
496 | ubi->rsvd_pebs += pebs; | ||
497 | for (i = 0; i < vol->reserved_pebs; i++) | ||
498 | new_mapping[i] = vol->eba_tbl[i]; | ||
499 | kfree(vol->eba_tbl); | ||
500 | vol->eba_tbl = new_mapping; | ||
501 | spin_unlock(&ubi->volumes_lock); | ||
502 | } | ||
503 | |||
504 | /* Change volume table record */ | ||
505 | memcpy(&vtbl_rec, &ubi->vtbl[vol_id], sizeof(struct ubi_vtbl_record)); | ||
506 | vtbl_rec.reserved_pebs = cpu_to_ubi32(reserved_pebs); | ||
507 | err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec); | ||
508 | if (err) | ||
509 | goto out_acc; | ||
510 | |||
511 | if (pebs < 0) { | ||
512 | for (i = 0; i < -pebs; i++) { | ||
513 | err = ubi_eba_unmap_leb(ubi, vol_id, reserved_pebs + i); | ||
514 | if (err) | ||
515 | goto out_acc; | ||
516 | } | ||
517 | spin_lock(&ubi->volumes_lock); | ||
518 | ubi->rsvd_pebs += pebs; | ||
519 | ubi->avail_pebs -= pebs; | ||
520 | pebs = ubi->beb_rsvd_level - ubi->beb_rsvd_pebs; | ||
521 | if (pebs > 0) { | ||
522 | pebs = ubi->avail_pebs >= pebs ? pebs : ubi->avail_pebs; | ||
523 | ubi->avail_pebs -= pebs; | ||
524 | ubi->rsvd_pebs += pebs; | ||
525 | ubi->beb_rsvd_pebs += pebs; | ||
526 | if (pebs > 0) | ||
527 | ubi_msg("reserve more %d PEBs", pebs); | ||
528 | } | ||
529 | for (i = 0; i < reserved_pebs; i++) | ||
530 | new_mapping[i] = vol->eba_tbl[i]; | ||
531 | kfree(vol->eba_tbl); | ||
532 | vol->eba_tbl = new_mapping; | ||
533 | spin_unlock(&ubi->volumes_lock); | ||
534 | } | ||
535 | |||
536 | vol->reserved_pebs = reserved_pebs; | ||
537 | if (vol->vol_type == UBI_DYNAMIC_VOLUME) { | ||
538 | vol->used_ebs = reserved_pebs; | ||
539 | vol->last_eb_bytes = vol->usable_leb_size; | ||
540 | vol->used_bytes = vol->used_ebs * vol->usable_leb_size; | ||
541 | } | ||
542 | |||
543 | paranoid_check_volumes(ubi); | ||
544 | return 0; | ||
545 | |||
546 | out_acc: | ||
547 | if (pebs > 0) { | ||
548 | spin_lock(&ubi->volumes_lock); | ||
549 | ubi->rsvd_pebs -= pebs; | ||
550 | ubi->avail_pebs += pebs; | ||
551 | spin_unlock(&ubi->volumes_lock); | ||
552 | } | ||
553 | out_free: | ||
554 | kfree(new_mapping); | ||
555 | return err; | ||
556 | } | ||
557 | |||
558 | /** | ||
559 | * ubi_add_volume - add volume. | ||
560 | * @ubi: UBI device description object | ||
561 | * @vol_id: volume ID | ||
562 | * | ||
563 | * This function adds an existin volume and initializes all its data | ||
564 | * structures. Returnes zero in case of success and a negative error code in | ||
565 | * case of failure. | ||
566 | */ | ||
567 | int ubi_add_volume(struct ubi_device *ubi, int vol_id) | ||
568 | { | ||
569 | int err; | ||
570 | struct ubi_volume *vol = ubi->volumes[vol_id]; | ||
571 | |||
572 | dbg_msg("add volume %d", vol_id); | ||
573 | ubi_dbg_dump_vol_info(vol); | ||
574 | ubi_assert(vol); | ||
575 | |||
576 | /* Register character device for the volume */ | ||
577 | cdev_init(&vol->cdev, &ubi_vol_cdev_operations); | ||
578 | vol->cdev.owner = THIS_MODULE; | ||
579 | err = cdev_add(&vol->cdev, MKDEV(ubi->major, vol->vol_id + 1), 1); | ||
580 | if (err) { | ||
581 | ubi_err("cannot add character device for volume %d", vol_id); | ||
582 | return err; | ||
583 | } | ||
584 | |||
585 | err = ubi_create_gluebi(ubi, vol); | ||
586 | if (err) | ||
587 | goto out_cdev; | ||
588 | |||
589 | vol->dev.release = vol_release; | ||
590 | vol->dev.parent = &ubi->dev; | ||
591 | vol->dev.devt = MKDEV(ubi->major, vol->vol_id + 1); | ||
592 | vol->dev.class = ubi_class; | ||
593 | sprintf(&vol->dev.bus_id[0], "%s_%d", ubi->ubi_name, vol->vol_id); | ||
594 | err = device_register(&vol->dev); | ||
595 | if (err) | ||
596 | goto out_gluebi; | ||
597 | |||
598 | err = volume_sysfs_init(ubi, vol); | ||
599 | if (err) { | ||
600 | cdev_del(&vol->cdev); | ||
601 | err = ubi_destroy_gluebi(vol); | ||
602 | volume_sysfs_close(vol); | ||
603 | return err; | ||
604 | } | ||
605 | |||
606 | paranoid_check_volumes(ubi); | ||
607 | return 0; | ||
608 | |||
609 | out_gluebi: | ||
610 | err = ubi_destroy_gluebi(vol); | ||
611 | out_cdev: | ||
612 | cdev_del(&vol->cdev); | ||
613 | return err; | ||
614 | } | ||
615 | |||
616 | /** | ||
617 | * ubi_free_volume - free volume. | ||
618 | * @ubi: UBI device description object | ||
619 | * @vol_id: volume ID | ||
620 | * | ||
621 | * This function frees all resources for volume @vol_id but does not remove it. | ||
622 | * Used only when the UBI device is detached. | ||
623 | */ | ||
624 | void ubi_free_volume(struct ubi_device *ubi, int vol_id) | ||
625 | { | ||
626 | int err; | ||
627 | struct ubi_volume *vol = ubi->volumes[vol_id]; | ||
628 | |||
629 | dbg_msg("free volume %d", vol_id); | ||
630 | ubi_assert(vol); | ||
631 | |||
632 | vol->removed = 1; | ||
633 | err = ubi_destroy_gluebi(vol); | ||
634 | ubi->volumes[vol_id] = NULL; | ||
635 | cdev_del(&vol->cdev); | ||
636 | volume_sysfs_close(vol); | ||
637 | } | ||
638 | |||
639 | #ifdef CONFIG_MTD_UBI_DEBUG_PARANOID | ||
640 | |||
641 | /** | ||
642 | * paranoid_check_volume - check volume information. | ||
643 | * @ubi: UBI device description object | ||
644 | * @vol_id: volume ID | ||
645 | */ | ||
646 | static void paranoid_check_volume(const struct ubi_device *ubi, int vol_id) | ||
647 | { | ||
648 | int idx = vol_id2idx(ubi, vol_id); | ||
649 | int reserved_pebs, alignment, data_pad, vol_type, name_len, upd_marker; | ||
650 | const struct ubi_volume *vol = ubi->volumes[idx]; | ||
651 | long long n; | ||
652 | const char *name; | ||
653 | |||
654 | reserved_pebs = ubi32_to_cpu(ubi->vtbl[vol_id].reserved_pebs); | ||
655 | |||
656 | if (!vol) { | ||
657 | if (reserved_pebs) { | ||
658 | ubi_err("no volume info, but volume exists"); | ||
659 | goto fail; | ||
660 | } | ||
661 | return; | ||
662 | } | ||
663 | |||
664 | if (vol->reserved_pebs < 0 || vol->alignment < 0 || vol->data_pad < 0 || | ||
665 | vol->name_len < 0) { | ||
666 | ubi_err("negative values"); | ||
667 | goto fail; | ||
668 | } | ||
669 | if (vol->alignment > ubi->leb_size || vol->alignment == 0) { | ||
670 | ubi_err("bad alignment"); | ||
671 | goto fail; | ||
672 | } | ||
673 | |||
674 | n = vol->alignment % ubi->min_io_size; | ||
675 | if (vol->alignment != 1 && n) { | ||
676 | ubi_err("alignment is not multiple of min I/O unit"); | ||
677 | goto fail; | ||
678 | } | ||
679 | |||
680 | n = ubi->leb_size % vol->alignment; | ||
681 | if (vol->data_pad != n) { | ||
682 | ubi_err("bad data_pad, has to be %lld", n); | ||
683 | goto fail; | ||
684 | } | ||
685 | |||
686 | if (vol->vol_type != UBI_DYNAMIC_VOLUME && | ||
687 | vol->vol_type != UBI_STATIC_VOLUME) { | ||
688 | ubi_err("bad vol_type"); | ||
689 | goto fail; | ||
690 | } | ||
691 | |||
692 | if (vol->upd_marker != 0 && vol->upd_marker != 1) { | ||
693 | ubi_err("bad upd_marker"); | ||
694 | goto fail; | ||
695 | } | ||
696 | |||
697 | if (vol->upd_marker && vol->corrupted) { | ||
698 | dbg_err("update marker and corrupted simultaneously"); | ||
699 | goto fail; | ||
700 | } | ||
701 | |||
702 | if (vol->reserved_pebs > ubi->good_peb_count) { | ||
703 | ubi_err("too large reserved_pebs"); | ||
704 | goto fail; | ||
705 | } | ||
706 | |||
707 | n = ubi->leb_size - vol->data_pad; | ||
708 | if (vol->usable_leb_size != ubi->leb_size - vol->data_pad) { | ||
709 | ubi_err("bad usable_leb_size, has to be %lld", n); | ||
710 | goto fail; | ||
711 | } | ||
712 | |||
713 | if (vol->name_len > UBI_VOL_NAME_MAX) { | ||
714 | ubi_err("too long volume name, max is %d", UBI_VOL_NAME_MAX); | ||
715 | goto fail; | ||
716 | } | ||
717 | |||
718 | if (!vol->name) { | ||
719 | ubi_err("NULL volume name"); | ||
720 | goto fail; | ||
721 | } | ||
722 | |||
723 | n = strnlen(vol->name, vol->name_len + 1); | ||
724 | if (n != vol->name_len) { | ||
725 | ubi_err("bad name_len %lld", n); | ||
726 | goto fail; | ||
727 | } | ||
728 | |||
729 | n = vol->used_ebs * vol->usable_leb_size; | ||
730 | if (vol->vol_type == UBI_DYNAMIC_VOLUME) { | ||
731 | if (vol->corrupted != 0) { | ||
732 | ubi_err("corrupted dynamic volume"); | ||
733 | goto fail; | ||
734 | } | ||
735 | if (vol->used_ebs != vol->reserved_pebs) { | ||
736 | ubi_err("bad used_ebs"); | ||
737 | goto fail; | ||
738 | } | ||
739 | if (vol->last_eb_bytes != vol->usable_leb_size) { | ||
740 | ubi_err("bad last_eb_bytes"); | ||
741 | goto fail; | ||
742 | } | ||
743 | if (vol->used_bytes != n) { | ||
744 | ubi_err("bad used_bytes"); | ||
745 | goto fail; | ||
746 | } | ||
747 | } else { | ||
748 | if (vol->corrupted != 0 && vol->corrupted != 1) { | ||
749 | ubi_err("bad corrupted"); | ||
750 | goto fail; | ||
751 | } | ||
752 | if (vol->used_ebs < 0 || vol->used_ebs > vol->reserved_pebs) { | ||
753 | ubi_err("bad used_ebs"); | ||
754 | goto fail; | ||
755 | } | ||
756 | if (vol->last_eb_bytes < 0 || | ||
757 | vol->last_eb_bytes > vol->usable_leb_size) { | ||
758 | ubi_err("bad last_eb_bytes"); | ||
759 | goto fail; | ||
760 | } | ||
761 | if (vol->used_bytes < 0 || vol->used_bytes > n || | ||
762 | vol->used_bytes < n - vol->usable_leb_size) { | ||
763 | ubi_err("bad used_bytes"); | ||
764 | goto fail; | ||
765 | } | ||
766 | } | ||
767 | |||
768 | alignment = ubi32_to_cpu(ubi->vtbl[vol_id].alignment); | ||
769 | data_pad = ubi32_to_cpu(ubi->vtbl[vol_id].data_pad); | ||
770 | name_len = ubi16_to_cpu(ubi->vtbl[vol_id].name_len); | ||
771 | upd_marker = ubi->vtbl[vol_id].upd_marker; | ||
772 | name = &ubi->vtbl[vol_id].name[0]; | ||
773 | if (ubi->vtbl[vol_id].vol_type == UBI_VID_DYNAMIC) | ||
774 | vol_type = UBI_DYNAMIC_VOLUME; | ||
775 | else | ||
776 | vol_type = UBI_STATIC_VOLUME; | ||
777 | |||
778 | if (alignment != vol->alignment || data_pad != vol->data_pad || | ||
779 | upd_marker != vol->upd_marker || vol_type != vol->vol_type || | ||
780 | name_len!= vol->name_len || strncmp(name, vol->name, name_len)) { | ||
781 | ubi_err("volume info is different"); | ||
782 | goto fail; | ||
783 | } | ||
784 | |||
785 | return; | ||
786 | |||
787 | fail: | ||
788 | ubi_err("paranoid check failed"); | ||
789 | ubi_dbg_dump_vol_info(vol); | ||
790 | ubi_dbg_dump_vtbl_record(&ubi->vtbl[vol_id], vol_id); | ||
791 | BUG(); | ||
792 | } | ||
793 | |||
794 | /** | ||
795 | * paranoid_check_volumes - check information about all volumes. | ||
796 | * @ubi: UBI device description object | ||
797 | */ | ||
798 | static void paranoid_check_volumes(struct ubi_device *ubi) | ||
799 | { | ||
800 | int i; | ||
801 | |||
802 | mutex_lock(&ubi->vtbl_mutex); | ||
803 | spin_lock(&ubi->volumes_lock); | ||
804 | for (i = 0; i < ubi->vtbl_slots; i++) | ||
805 | paranoid_check_volume(ubi, i); | ||
806 | spin_unlock(&ubi->volumes_lock); | ||
807 | mutex_unlock(&ubi->vtbl_mutex); | ||
808 | } | ||
809 | #endif | ||
diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c new file mode 100644 index 000000000000..b6fd6bbd941e --- /dev/null +++ b/drivers/mtd/ubi/vtbl.c | |||
@@ -0,0 +1,809 @@ | |||
1 | /* | ||
2 | * Copyright (c) International Business Machines Corp., 2006 | ||
3 | * Copyright (c) Nokia Corporation, 2006, 2007 | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation; either version 2 of the License, or | ||
8 | * (at your option) any later version. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See | ||
13 | * the GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program; if not, write to the Free Software | ||
17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
18 | * | ||
19 | * Author: Artem Bityutskiy (Битюцкий Артём) | ||
20 | */ | ||
21 | |||
22 | /* | ||
23 | * This file includes volume table manipulation code. The volume table is an | ||
24 | * on-flash table containing volume meta-data like name, number of reserved | ||
25 | * physical eraseblocks, type, etc. The volume table is stored in the so-called | ||
26 | * "layout volume". | ||
27 | * | ||
28 | * The layout volume is an internal volume which is organized as follows. It | ||
29 | * consists of two logical eraseblocks - LEB 0 and LEB 1. Each logical | ||
30 | * eraseblock stores one volume table copy, i.e. LEB 0 and LEB 1 duplicate each | ||
31 | * other. This redundancy guarantees robustness to unclean reboots. The volume | ||
32 | * table is basically an array of volume table records. Each record contains | ||
33 | * full information about the volume and protected by a CRC checksum. | ||
34 | * | ||
35 | * The volume table is changed, it is first changed in RAM. Then LEB 0 is | ||
36 | * erased, and the updated volume table is written back to LEB 0. Then same for | ||
37 | * LEB 1. This scheme guarantees recoverability from unclean reboots. | ||
38 | * | ||
39 | * In this UBI implementation the on-flash volume table does not contain any | ||
40 | * information about how many data static volumes contain. This information may | ||
41 | * be found from the scanning data. | ||
42 | * | ||
43 | * But it would still be beneficial to store this information in the volume | ||
44 | * table. For example, suppose we have a static volume X, and all its physical | ||
45 | * eraseblocks became bad for some reasons. Suppose we are attaching the | ||
46 | * corresponding MTD device, the scanning has found no logical eraseblocks | ||
47 | * corresponding to the volume X. According to the volume table volume X does | ||
48 | * exist. So we don't know whether it is just empty or all its physical | ||
49 | * eraseblocks went bad. So we cannot alarm the user about this corruption. | ||
50 | * | ||
51 | * The volume table also stores so-called "update marker", which is used for | ||
52 | * volume updates. Before updating the volume, the update marker is set, and | ||
53 | * after the update operation is finished, the update marker is cleared. So if | ||
54 | * the update operation was interrupted (e.g. by an unclean reboot) - the | ||
55 | * update marker is still there and we know that the volume's contents is | ||
56 | * damaged. | ||
57 | */ | ||
58 | |||
59 | #include <linux/crc32.h> | ||
60 | #include <linux/err.h> | ||
61 | #include <asm/div64.h> | ||
62 | #include "ubi.h" | ||
63 | |||
64 | #ifdef CONFIG_MTD_UBI_DEBUG_PARANOID | ||
65 | static void paranoid_vtbl_check(const struct ubi_device *ubi); | ||
66 | #else | ||
67 | #define paranoid_vtbl_check(ubi) | ||
68 | #endif | ||
69 | |||
70 | /* Empty volume table record */ | ||
71 | static struct ubi_vtbl_record empty_vtbl_record; | ||
72 | |||
73 | /** | ||
74 | * ubi_change_vtbl_record - change volume table record. | ||
75 | * @ubi: UBI device description object | ||
76 | * @idx: table index to change | ||
77 | * @vtbl_rec: new volume table record | ||
78 | * | ||
79 | * This function changes volume table record @idx. If @vtbl_rec is %NULL, empty | ||
80 | * volume table record is written. The caller does not have to calculate CRC of | ||
81 | * the record as it is done by this function. Returns zero in case of success | ||
82 | * and a negative error code in case of failure. | ||
83 | */ | ||
84 | int ubi_change_vtbl_record(struct ubi_device *ubi, int idx, | ||
85 | struct ubi_vtbl_record *vtbl_rec) | ||
86 | { | ||
87 | int i, err; | ||
88 | uint32_t crc; | ||
89 | |||
90 | ubi_assert(idx >= 0 && idx < ubi->vtbl_slots); | ||
91 | |||
92 | if (!vtbl_rec) | ||
93 | vtbl_rec = &empty_vtbl_record; | ||
94 | else { | ||
95 | crc = crc32(UBI_CRC32_INIT, vtbl_rec, UBI_VTBL_RECORD_SIZE_CRC); | ||
96 | vtbl_rec->crc = cpu_to_ubi32(crc); | ||
97 | } | ||
98 | |||
99 | dbg_msg("change record %d", idx); | ||
100 | ubi_dbg_dump_vtbl_record(vtbl_rec, idx); | ||
101 | |||
102 | mutex_lock(&ubi->vtbl_mutex); | ||
103 | memcpy(&ubi->vtbl[idx], vtbl_rec, sizeof(struct ubi_vtbl_record)); | ||
104 | for (i = 0; i < UBI_LAYOUT_VOLUME_EBS; i++) { | ||
105 | err = ubi_eba_unmap_leb(ubi, UBI_LAYOUT_VOL_ID, i); | ||
106 | if (err) { | ||
107 | mutex_unlock(&ubi->vtbl_mutex); | ||
108 | return err; | ||
109 | } | ||
110 | err = ubi_eba_write_leb(ubi, UBI_LAYOUT_VOL_ID, i, ubi->vtbl, 0, | ||
111 | ubi->vtbl_size, UBI_LONGTERM); | ||
112 | if (err) { | ||
113 | mutex_unlock(&ubi->vtbl_mutex); | ||
114 | return err; | ||
115 | } | ||
116 | } | ||
117 | |||
118 | paranoid_vtbl_check(ubi); | ||
119 | mutex_unlock(&ubi->vtbl_mutex); | ||
120 | return ubi_wl_flush(ubi); | ||
121 | } | ||
122 | |||
123 | /** | ||
124 | * vol_til_check - check if volume table is not corrupted and contains sensible | ||
125 | * data. | ||
126 | * | ||
127 | * @ubi: UBI device description object | ||
128 | * @vtbl: volume table | ||
129 | * | ||
130 | * This function returns zero if @vtbl is all right, %1 if CRC is incorrect, | ||
131 | * and %-EINVAL if it contains inconsistent data. | ||
132 | */ | ||
133 | static int vtbl_check(const struct ubi_device *ubi, | ||
134 | const struct ubi_vtbl_record *vtbl) | ||
135 | { | ||
136 | int i, n, reserved_pebs, alignment, data_pad, vol_type, name_len; | ||
137 | int upd_marker; | ||
138 | uint32_t crc; | ||
139 | const char *name; | ||
140 | |||
141 | for (i = 0; i < ubi->vtbl_slots; i++) { | ||
142 | cond_resched(); | ||
143 | |||
144 | reserved_pebs = ubi32_to_cpu(vtbl[i].reserved_pebs); | ||
145 | alignment = ubi32_to_cpu(vtbl[i].alignment); | ||
146 | data_pad = ubi32_to_cpu(vtbl[i].data_pad); | ||
147 | upd_marker = vtbl[i].upd_marker; | ||
148 | vol_type = vtbl[i].vol_type; | ||
149 | name_len = ubi16_to_cpu(vtbl[i].name_len); | ||
150 | name = &vtbl[i].name[0]; | ||
151 | |||
152 | crc = crc32(UBI_CRC32_INIT, &vtbl[i], UBI_VTBL_RECORD_SIZE_CRC); | ||
153 | if (ubi32_to_cpu(vtbl[i].crc) != crc) { | ||
154 | ubi_err("bad CRC at record %u: %#08x, not %#08x", | ||
155 | i, crc, ubi32_to_cpu(vtbl[i].crc)); | ||
156 | ubi_dbg_dump_vtbl_record(&vtbl[i], i); | ||
157 | return 1; | ||
158 | } | ||
159 | |||
160 | if (reserved_pebs == 0) { | ||
161 | if (memcmp(&vtbl[i], &empty_vtbl_record, | ||
162 | UBI_VTBL_RECORD_SIZE)) { | ||
163 | dbg_err("bad empty record"); | ||
164 | goto bad; | ||
165 | } | ||
166 | continue; | ||
167 | } | ||
168 | |||
169 | if (reserved_pebs < 0 || alignment < 0 || data_pad < 0 || | ||
170 | name_len < 0) { | ||
171 | dbg_err("negative values"); | ||
172 | goto bad; | ||
173 | } | ||
174 | |||
175 | if (alignment > ubi->leb_size || alignment == 0) { | ||
176 | dbg_err("bad alignment"); | ||
177 | goto bad; | ||
178 | } | ||
179 | |||
180 | n = alignment % ubi->min_io_size; | ||
181 | if (alignment != 1 && n) { | ||
182 | dbg_err("alignment is not multiple of min I/O unit"); | ||
183 | goto bad; | ||
184 | } | ||
185 | |||
186 | n = ubi->leb_size % alignment; | ||
187 | if (data_pad != n) { | ||
188 | dbg_err("bad data_pad, has to be %d", n); | ||
189 | goto bad; | ||
190 | } | ||
191 | |||
192 | if (vol_type != UBI_VID_DYNAMIC && vol_type != UBI_VID_STATIC) { | ||
193 | dbg_err("bad vol_type"); | ||
194 | goto bad; | ||
195 | } | ||
196 | |||
197 | if (upd_marker != 0 && upd_marker != 1) { | ||
198 | dbg_err("bad upd_marker"); | ||
199 | goto bad; | ||
200 | } | ||
201 | |||
202 | if (reserved_pebs > ubi->good_peb_count) { | ||
203 | dbg_err("too large reserved_pebs, good PEBs %d", | ||
204 | ubi->good_peb_count); | ||
205 | goto bad; | ||
206 | } | ||
207 | |||
208 | if (name_len > UBI_VOL_NAME_MAX) { | ||
209 | dbg_err("too long volume name, max %d", | ||
210 | UBI_VOL_NAME_MAX); | ||
211 | goto bad; | ||
212 | } | ||
213 | |||
214 | if (name[0] == '\0') { | ||
215 | dbg_err("NULL volume name"); | ||
216 | goto bad; | ||
217 | } | ||
218 | |||
219 | if (name_len != strnlen(name, name_len + 1)) { | ||
220 | dbg_err("bad name_len"); | ||
221 | goto bad; | ||
222 | } | ||
223 | } | ||
224 | |||
225 | /* Checks that all names are unique */ | ||
226 | for (i = 0; i < ubi->vtbl_slots - 1; i++) { | ||
227 | for (n = i + 1; n < ubi->vtbl_slots; n++) { | ||
228 | int len1 = ubi16_to_cpu(vtbl[i].name_len); | ||
229 | int len2 = ubi16_to_cpu(vtbl[n].name_len); | ||
230 | |||
231 | if (len1 > 0 && len1 == len2 && | ||
232 | !strncmp(vtbl[i].name, vtbl[n].name, len1)) { | ||
233 | ubi_err("volumes %d and %d have the same name" | ||
234 | " \"%s\"", i, n, vtbl[i].name); | ||
235 | ubi_dbg_dump_vtbl_record(&vtbl[i], i); | ||
236 | ubi_dbg_dump_vtbl_record(&vtbl[n], n); | ||
237 | return -EINVAL; | ||
238 | } | ||
239 | } | ||
240 | } | ||
241 | |||
242 | return 0; | ||
243 | |||
244 | bad: | ||
245 | ubi_err("volume table check failed, record %d", i); | ||
246 | ubi_dbg_dump_vtbl_record(&vtbl[i], i); | ||
247 | return -EINVAL; | ||
248 | } | ||
249 | |||
250 | /** | ||
251 | * create_vtbl - create a copy of volume table. | ||
252 | * @ubi: UBI device description object | ||
253 | * @si: scanning information | ||
254 | * @copy: number of the volume table copy | ||
255 | * @vtbl: contents of the volume table | ||
256 | * | ||
257 | * This function returns zero in case of success and a negative error code in | ||
258 | * case of failure. | ||
259 | */ | ||
260 | static int create_vtbl(const struct ubi_device *ubi, struct ubi_scan_info *si, | ||
261 | int copy, void *vtbl) | ||
262 | { | ||
263 | int err, tries = 0; | ||
264 | static struct ubi_vid_hdr *vid_hdr; | ||
265 | struct ubi_scan_volume *sv; | ||
266 | struct ubi_scan_leb *new_seb, *old_seb = NULL; | ||
267 | |||
268 | ubi_msg("create volume table (copy #%d)", copy + 1); | ||
269 | |||
270 | vid_hdr = ubi_zalloc_vid_hdr(ubi); | ||
271 | if (!vid_hdr) | ||
272 | return -ENOMEM; | ||
273 | |||
274 | /* | ||
275 | * Check if there is a logical eraseblock which would have to contain | ||
276 | * this volume table copy was found during scanning. It has to be wiped | ||
277 | * out. | ||
278 | */ | ||
279 | sv = ubi_scan_find_sv(si, UBI_LAYOUT_VOL_ID); | ||
280 | if (sv) | ||
281 | old_seb = ubi_scan_find_seb(sv, copy); | ||
282 | |||
283 | retry: | ||
284 | new_seb = ubi_scan_get_free_peb(ubi, si); | ||
285 | if (IS_ERR(new_seb)) { | ||
286 | err = PTR_ERR(new_seb); | ||
287 | goto out_free; | ||
288 | } | ||
289 | |||
290 | vid_hdr->vol_type = UBI_VID_DYNAMIC; | ||
291 | vid_hdr->vol_id = cpu_to_ubi32(UBI_LAYOUT_VOL_ID); | ||
292 | vid_hdr->compat = UBI_LAYOUT_VOLUME_COMPAT; | ||
293 | vid_hdr->data_size = vid_hdr->used_ebs = | ||
294 | vid_hdr->data_pad = cpu_to_ubi32(0); | ||
295 | vid_hdr->lnum = cpu_to_ubi32(copy); | ||
296 | vid_hdr->sqnum = cpu_to_ubi64(++si->max_sqnum); | ||
297 | vid_hdr->leb_ver = cpu_to_ubi32(old_seb ? old_seb->leb_ver + 1: 0); | ||
298 | |||
299 | /* The EC header is already there, write the VID header */ | ||
300 | err = ubi_io_write_vid_hdr(ubi, new_seb->pnum, vid_hdr); | ||
301 | if (err) | ||
302 | goto write_error; | ||
303 | |||
304 | /* Write the layout volume contents */ | ||
305 | err = ubi_io_write_data(ubi, vtbl, new_seb->pnum, 0, ubi->vtbl_size); | ||
306 | if (err) | ||
307 | goto write_error; | ||
308 | |||
309 | /* | ||
310 | * And add it to the scanning information. Don't delete the old | ||
311 | * @old_seb as it will be deleted and freed in 'ubi_scan_add_used()'. | ||
312 | */ | ||
313 | err = ubi_scan_add_used(ubi, si, new_seb->pnum, new_seb->ec, | ||
314 | vid_hdr, 0); | ||
315 | kfree(new_seb); | ||
316 | ubi_free_vid_hdr(ubi, vid_hdr); | ||
317 | return err; | ||
318 | |||
319 | write_error: | ||
320 | kfree(new_seb); | ||
321 | /* May be this physical eraseblock went bad, try to pick another one */ | ||
322 | if (++tries <= 5) { | ||
323 | err = ubi_scan_add_to_list(si, new_seb->pnum, new_seb->ec, | ||
324 | &si->corr); | ||
325 | if (!err) | ||
326 | goto retry; | ||
327 | } | ||
328 | out_free: | ||
329 | ubi_free_vid_hdr(ubi, vid_hdr); | ||
330 | return err; | ||
331 | |||
332 | } | ||
333 | |||
334 | /** | ||
335 | * process_lvol - process the layout volume. | ||
336 | * @ubi: UBI device description object | ||
337 | * @si: scanning information | ||
338 | * @sv: layout volume scanning information | ||
339 | * | ||
340 | * This function is responsible for reading the layout volume, ensuring it is | ||
341 | * not corrupted, and recovering from corruptions if needed. Returns volume | ||
342 | * table in case of success and a negative error code in case of failure. | ||
343 | */ | ||
344 | static struct ubi_vtbl_record *process_lvol(const struct ubi_device *ubi, | ||
345 | struct ubi_scan_info *si, | ||
346 | struct ubi_scan_volume *sv) | ||
347 | { | ||
348 | int err; | ||
349 | struct rb_node *rb; | ||
350 | struct ubi_scan_leb *seb; | ||
351 | struct ubi_vtbl_record *leb[UBI_LAYOUT_VOLUME_EBS] = { NULL, NULL }; | ||
352 | int leb_corrupted[UBI_LAYOUT_VOLUME_EBS] = {1, 1}; | ||
353 | |||
354 | /* | ||
355 | * UBI goes through the following steps when it changes the layout | ||
356 | * volume: | ||
357 | * a. erase LEB 0; | ||
358 | * b. write new data to LEB 0; | ||
359 | * c. erase LEB 1; | ||
360 | * d. write new data to LEB 1. | ||
361 | * | ||
362 | * Before the change, both LEBs contain the same data. | ||
363 | * | ||
364 | * Due to unclean reboots, the contents of LEB 0 may be lost, but there | ||
365 | * should LEB 1. So it is OK if LEB 0 is corrupted while LEB 1 is not. | ||
366 | * Similarly, LEB 1 may be lost, but there should be LEB 0. And | ||
367 | * finally, unclean reboots may result in a situation when neither LEB | ||
368 | * 0 nor LEB 1 are corrupted, but they are different. In this case, LEB | ||
369 | * 0 contains more recent information. | ||
370 | * | ||
371 | * So the plan is to first check LEB 0. Then | ||
372 | * a. if LEB 0 is OK, it must be containing the most resent data; then | ||
373 | * we compare it with LEB 1, and if they are different, we copy LEB | ||
374 | * 0 to LEB 1; | ||
375 | * b. if LEB 0 is corrupted, but LEB 1 has to be OK, and we copy LEB 1 | ||
376 | * to LEB 0. | ||
377 | */ | ||
378 | |||
379 | dbg_msg("check layout volume"); | ||
380 | |||
381 | /* Read both LEB 0 and LEB 1 into memory */ | ||
382 | ubi_rb_for_each_entry(rb, seb, &sv->root, u.rb) { | ||
383 | leb[seb->lnum] = kzalloc(ubi->vtbl_size, GFP_KERNEL); | ||
384 | if (!leb[seb->lnum]) { | ||
385 | err = -ENOMEM; | ||
386 | goto out_free; | ||
387 | } | ||
388 | |||
389 | err = ubi_io_read_data(ubi, leb[seb->lnum], seb->pnum, 0, | ||
390 | ubi->vtbl_size); | ||
391 | if (err == UBI_IO_BITFLIPS || err == -EBADMSG) | ||
392 | /* Scrub the PEB later */ | ||
393 | seb->scrub = 1; | ||
394 | else if (err) | ||
395 | goto out_free; | ||
396 | } | ||
397 | |||
398 | err = -EINVAL; | ||
399 | if (leb[0]) { | ||
400 | leb_corrupted[0] = vtbl_check(ubi, leb[0]); | ||
401 | if (leb_corrupted[0] < 0) | ||
402 | goto out_free; | ||
403 | } | ||
404 | |||
405 | if (!leb_corrupted[0]) { | ||
406 | /* LEB 0 is OK */ | ||
407 | if (leb[1]) | ||
408 | leb_corrupted[1] = memcmp(leb[0], leb[1], ubi->vtbl_size); | ||
409 | if (leb_corrupted[1]) { | ||
410 | ubi_warn("volume table copy #2 is corrupted"); | ||
411 | err = create_vtbl(ubi, si, 1, leb[0]); | ||
412 | if (err) | ||
413 | goto out_free; | ||
414 | ubi_msg("volume table was restored"); | ||
415 | } | ||
416 | |||
417 | /* Both LEB 1 and LEB 2 are OK and consistent */ | ||
418 | kfree(leb[1]); | ||
419 | return leb[0]; | ||
420 | } else { | ||
421 | /* LEB 0 is corrupted or does not exist */ | ||
422 | if (leb[1]) { | ||
423 | leb_corrupted[1] = vtbl_check(ubi, leb[1]); | ||
424 | if (leb_corrupted[1] < 0) | ||
425 | goto out_free; | ||
426 | } | ||
427 | if (leb_corrupted[1]) { | ||
428 | /* Both LEB 0 and LEB 1 are corrupted */ | ||
429 | ubi_err("both volume tables are corrupted"); | ||
430 | goto out_free; | ||
431 | } | ||
432 | |||
433 | ubi_warn("volume table copy #1 is corrupted"); | ||
434 | err = create_vtbl(ubi, si, 0, leb[1]); | ||
435 | if (err) | ||
436 | goto out_free; | ||
437 | ubi_msg("volume table was restored"); | ||
438 | |||
439 | kfree(leb[0]); | ||
440 | return leb[1]; | ||
441 | } | ||
442 | |||
443 | out_free: | ||
444 | kfree(leb[0]); | ||
445 | kfree(leb[1]); | ||
446 | return ERR_PTR(err); | ||
447 | } | ||
448 | |||
449 | /** | ||
450 | * create_empty_lvol - create empty layout volume. | ||
451 | * @ubi: UBI device description object | ||
452 | * @si: scanning information | ||
453 | * | ||
454 | * This function returns volume table contents in case of success and a | ||
455 | * negative error code in case of failure. | ||
456 | */ | ||
457 | static struct ubi_vtbl_record *create_empty_lvol(const struct ubi_device *ubi, | ||
458 | struct ubi_scan_info *si) | ||
459 | { | ||
460 | int i; | ||
461 | struct ubi_vtbl_record *vtbl; | ||
462 | |||
463 | vtbl = kzalloc(ubi->vtbl_size, GFP_KERNEL); | ||
464 | if (!vtbl) | ||
465 | return ERR_PTR(-ENOMEM); | ||
466 | |||
467 | for (i = 0; i < ubi->vtbl_slots; i++) | ||
468 | memcpy(&vtbl[i], &empty_vtbl_record, UBI_VTBL_RECORD_SIZE); | ||
469 | |||
470 | for (i = 0; i < UBI_LAYOUT_VOLUME_EBS; i++) { | ||
471 | int err; | ||
472 | |||
473 | err = create_vtbl(ubi, si, i, vtbl); | ||
474 | if (err) { | ||
475 | kfree(vtbl); | ||
476 | return ERR_PTR(err); | ||
477 | } | ||
478 | } | ||
479 | |||
480 | return vtbl; | ||
481 | } | ||
482 | |||
483 | /** | ||
484 | * init_volumes - initialize volume information for existing volumes. | ||
485 | * @ubi: UBI device description object | ||
486 | * @si: scanning information | ||
487 | * @vtbl: volume table | ||
488 | * | ||
489 | * This function allocates volume description objects for existing volumes. | ||
490 | * Returns zero in case of success and a negative error code in case of | ||
491 | * failure. | ||
492 | */ | ||
493 | static int init_volumes(struct ubi_device *ubi, const struct ubi_scan_info *si, | ||
494 | const struct ubi_vtbl_record *vtbl) | ||
495 | { | ||
496 | int i, reserved_pebs = 0; | ||
497 | struct ubi_scan_volume *sv; | ||
498 | struct ubi_volume *vol; | ||
499 | |||
500 | for (i = 0; i < ubi->vtbl_slots; i++) { | ||
501 | cond_resched(); | ||
502 | |||
503 | if (ubi32_to_cpu(vtbl[i].reserved_pebs) == 0) | ||
504 | continue; /* Empty record */ | ||
505 | |||
506 | vol = kzalloc(sizeof(struct ubi_volume), GFP_KERNEL); | ||
507 | if (!vol) | ||
508 | return -ENOMEM; | ||
509 | |||
510 | vol->reserved_pebs = ubi32_to_cpu(vtbl[i].reserved_pebs); | ||
511 | vol->alignment = ubi32_to_cpu(vtbl[i].alignment); | ||
512 | vol->data_pad = ubi32_to_cpu(vtbl[i].data_pad); | ||
513 | vol->vol_type = vtbl[i].vol_type == UBI_VID_DYNAMIC ? | ||
514 | UBI_DYNAMIC_VOLUME : UBI_STATIC_VOLUME; | ||
515 | vol->name_len = ubi16_to_cpu(vtbl[i].name_len); | ||
516 | vol->usable_leb_size = ubi->leb_size - vol->data_pad; | ||
517 | memcpy(vol->name, vtbl[i].name, vol->name_len); | ||
518 | vol->name[vol->name_len] = '\0'; | ||
519 | vol->vol_id = i; | ||
520 | |||
521 | ubi_assert(!ubi->volumes[i]); | ||
522 | ubi->volumes[i] = vol; | ||
523 | ubi->vol_count += 1; | ||
524 | vol->ubi = ubi; | ||
525 | reserved_pebs += vol->reserved_pebs; | ||
526 | |||
527 | /* | ||
528 | * In case of dynamic volume UBI knows nothing about how many | ||
529 | * data is stored there. So assume the whole volume is used. | ||
530 | */ | ||
531 | if (vol->vol_type == UBI_DYNAMIC_VOLUME) { | ||
532 | vol->used_ebs = vol->reserved_pebs; | ||
533 | vol->last_eb_bytes = vol->usable_leb_size; | ||
534 | vol->used_bytes = vol->used_ebs * vol->usable_leb_size; | ||
535 | continue; | ||
536 | } | ||
537 | |||
538 | /* Static volumes only */ | ||
539 | sv = ubi_scan_find_sv(si, i); | ||
540 | if (!sv) { | ||
541 | /* | ||
542 | * No eraseblocks belonging to this volume found. We | ||
543 | * don't actually know whether this static volume is | ||
544 | * completely corrupted or just contains no data. And | ||
545 | * we cannot know this as long as data size is not | ||
546 | * stored on flash. So we just assume the volume is | ||
547 | * empty. FIXME: this should be handled. | ||
548 | */ | ||
549 | continue; | ||
550 | } | ||
551 | |||
552 | if (sv->leb_count != sv->used_ebs) { | ||
553 | /* | ||
554 | * We found a static volume which misses several | ||
555 | * eraseblocks. Treat it as corrupted. | ||
556 | */ | ||
557 | ubi_warn("static volume %d misses %d LEBs - corrupted", | ||
558 | sv->vol_id, sv->used_ebs - sv->leb_count); | ||
559 | vol->corrupted = 1; | ||
560 | continue; | ||
561 | } | ||
562 | |||
563 | vol->used_ebs = sv->used_ebs; | ||
564 | vol->used_bytes = (vol->used_ebs - 1) * vol->usable_leb_size; | ||
565 | vol->used_bytes += sv->last_data_size; | ||
566 | vol->last_eb_bytes = sv->last_data_size; | ||
567 | } | ||
568 | |||
569 | vol = kzalloc(sizeof(struct ubi_volume), GFP_KERNEL); | ||
570 | if (!vol) | ||
571 | return -ENOMEM; | ||
572 | |||
573 | vol->reserved_pebs = UBI_LAYOUT_VOLUME_EBS; | ||
574 | vol->alignment = 1; | ||
575 | vol->vol_type = UBI_DYNAMIC_VOLUME; | ||
576 | vol->name_len = sizeof(UBI_LAYOUT_VOLUME_NAME) - 1; | ||
577 | memcpy(vol->name, UBI_LAYOUT_VOLUME_NAME, vol->name_len + 1); | ||
578 | vol->usable_leb_size = ubi->leb_size; | ||
579 | vol->used_ebs = vol->reserved_pebs; | ||
580 | vol->last_eb_bytes = vol->reserved_pebs; | ||
581 | vol->used_bytes = vol->used_ebs * (ubi->leb_size - vol->data_pad); | ||
582 | vol->vol_id = UBI_LAYOUT_VOL_ID; | ||
583 | |||
584 | ubi_assert(!ubi->volumes[i]); | ||
585 | ubi->volumes[vol_id2idx(ubi, vol->vol_id)] = vol; | ||
586 | reserved_pebs += vol->reserved_pebs; | ||
587 | ubi->vol_count += 1; | ||
588 | vol->ubi = ubi; | ||
589 | |||
590 | if (reserved_pebs > ubi->avail_pebs) | ||
591 | ubi_err("not enough PEBs, required %d, available %d", | ||
592 | reserved_pebs, ubi->avail_pebs); | ||
593 | ubi->rsvd_pebs += reserved_pebs; | ||
594 | ubi->avail_pebs -= reserved_pebs; | ||
595 | |||
596 | return 0; | ||
597 | } | ||
598 | |||
599 | /** | ||
600 | * check_sv - check volume scanning information. | ||
601 | * @vol: UBI volume description object | ||
602 | * @sv: volume scanning information | ||
603 | * | ||
604 | * This function returns zero if the volume scanning information is consistent | ||
605 | * to the data read from the volume tabla, and %-EINVAL if not. | ||
606 | */ | ||
607 | static int check_sv(const struct ubi_volume *vol, | ||
608 | const struct ubi_scan_volume *sv) | ||
609 | { | ||
610 | if (sv->highest_lnum >= vol->reserved_pebs) { | ||
611 | dbg_err("bad highest_lnum"); | ||
612 | goto bad; | ||
613 | } | ||
614 | if (sv->leb_count > vol->reserved_pebs) { | ||
615 | dbg_err("bad leb_count"); | ||
616 | goto bad; | ||
617 | } | ||
618 | if (sv->vol_type != vol->vol_type) { | ||
619 | dbg_err("bad vol_type"); | ||
620 | goto bad; | ||
621 | } | ||
622 | if (sv->used_ebs > vol->reserved_pebs) { | ||
623 | dbg_err("bad used_ebs"); | ||
624 | goto bad; | ||
625 | } | ||
626 | if (sv->data_pad != vol->data_pad) { | ||
627 | dbg_err("bad data_pad"); | ||
628 | goto bad; | ||
629 | } | ||
630 | return 0; | ||
631 | |||
632 | bad: | ||
633 | ubi_err("bad scanning information"); | ||
634 | ubi_dbg_dump_sv(sv); | ||
635 | ubi_dbg_dump_vol_info(vol); | ||
636 | return -EINVAL; | ||
637 | } | ||
638 | |||
639 | /** | ||
640 | * check_scanning_info - check that scanning information. | ||
641 | * @ubi: UBI device description object | ||
642 | * @si: scanning information | ||
643 | * | ||
644 | * Even though we protect on-flash data by CRC checksums, we still don't trust | ||
645 | * the media. This function ensures that scanning information is consistent to | ||
646 | * the information read from the volume table. Returns zero if the scanning | ||
647 | * information is OK and %-EINVAL if it is not. | ||
648 | */ | ||
649 | static int check_scanning_info(const struct ubi_device *ubi, | ||
650 | struct ubi_scan_info *si) | ||
651 | { | ||
652 | int err, i; | ||
653 | struct ubi_scan_volume *sv; | ||
654 | struct ubi_volume *vol; | ||
655 | |||
656 | if (si->vols_found > UBI_INT_VOL_COUNT + ubi->vtbl_slots) { | ||
657 | ubi_err("scanning found %d volumes, maximum is %d + %d", | ||
658 | si->vols_found, UBI_INT_VOL_COUNT, ubi->vtbl_slots); | ||
659 | return -EINVAL; | ||
660 | } | ||
661 | |||
662 | if (si->highest_vol_id >= ubi->vtbl_slots + UBI_INT_VOL_COUNT&& | ||
663 | si->highest_vol_id < UBI_INTERNAL_VOL_START) { | ||
664 | ubi_err("too large volume ID %d found by scanning", | ||
665 | si->highest_vol_id); | ||
666 | return -EINVAL; | ||
667 | } | ||
668 | |||
669 | |||
670 | for (i = 0; i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) { | ||
671 | cond_resched(); | ||
672 | |||
673 | sv = ubi_scan_find_sv(si, i); | ||
674 | vol = ubi->volumes[i]; | ||
675 | if (!vol) { | ||
676 | if (sv) | ||
677 | ubi_scan_rm_volume(si, sv); | ||
678 | continue; | ||
679 | } | ||
680 | |||
681 | if (vol->reserved_pebs == 0) { | ||
682 | ubi_assert(i < ubi->vtbl_slots); | ||
683 | |||
684 | if (!sv) | ||
685 | continue; | ||
686 | |||
687 | /* | ||
688 | * During scanning we found a volume which does not | ||
689 | * exist according to the information in the volume | ||
690 | * table. This must have happened due to an unclean | ||
691 | * reboot while the volume was being removed. Discard | ||
692 | * these eraseblocks. | ||
693 | */ | ||
694 | ubi_msg("finish volume %d removal", sv->vol_id); | ||
695 | ubi_scan_rm_volume(si, sv); | ||
696 | } else if (sv) { | ||
697 | err = check_sv(vol, sv); | ||
698 | if (err) | ||
699 | return err; | ||
700 | } | ||
701 | } | ||
702 | |||
703 | return 0; | ||
704 | } | ||
705 | |||
706 | /** | ||
707 | * ubi_read_volume_table - read volume table. | ||
708 | * information. | ||
709 | * @ubi: UBI device description object | ||
710 | * @si: scanning information | ||
711 | * | ||
712 | * This function reads volume table, checks it, recover from errors if needed, | ||
713 | * or creates it if needed. Returns zero in case of success and a negative | ||
714 | * error code in case of failure. | ||
715 | */ | ||
716 | int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_scan_info *si) | ||
717 | { | ||
718 | int i, err; | ||
719 | struct ubi_scan_volume *sv; | ||
720 | |||
721 | empty_vtbl_record.crc = cpu_to_ubi32(0xf116c36b); | ||
722 | |||
723 | /* | ||
724 | * The number of supported volumes is limited by the eraseblock size | ||
725 | * and by the UBI_MAX_VOLUMES constant. | ||
726 | */ | ||
727 | ubi->vtbl_slots = ubi->leb_size / UBI_VTBL_RECORD_SIZE; | ||
728 | if (ubi->vtbl_slots > UBI_MAX_VOLUMES) | ||
729 | ubi->vtbl_slots = UBI_MAX_VOLUMES; | ||
730 | |||
731 | ubi->vtbl_size = ubi->vtbl_slots * UBI_VTBL_RECORD_SIZE; | ||
732 | ubi->vtbl_size = ALIGN(ubi->vtbl_size, ubi->min_io_size); | ||
733 | |||
734 | sv = ubi_scan_find_sv(si, UBI_LAYOUT_VOL_ID); | ||
735 | if (!sv) { | ||
736 | /* | ||
737 | * No logical eraseblocks belonging to the layout volume were | ||
738 | * found. This could mean that the flash is just empty. In | ||
739 | * this case we create empty layout volume. | ||
740 | * | ||
741 | * But if flash is not empty this must be a corruption or the | ||
742 | * MTD device just contains garbage. | ||
743 | */ | ||
744 | if (si->is_empty) { | ||
745 | ubi->vtbl = create_empty_lvol(ubi, si); | ||
746 | if (IS_ERR(ubi->vtbl)) | ||
747 | return PTR_ERR(ubi->vtbl); | ||
748 | } else { | ||
749 | ubi_err("the layout volume was not found"); | ||
750 | return -EINVAL; | ||
751 | } | ||
752 | } else { | ||
753 | if (sv->leb_count > UBI_LAYOUT_VOLUME_EBS) { | ||
754 | /* This must not happen with proper UBI images */ | ||
755 | dbg_err("too many LEBs (%d) in layout volume", | ||
756 | sv->leb_count); | ||
757 | return -EINVAL; | ||
758 | } | ||
759 | |||
760 | ubi->vtbl = process_lvol(ubi, si, sv); | ||
761 | if (IS_ERR(ubi->vtbl)) | ||
762 | return PTR_ERR(ubi->vtbl); | ||
763 | } | ||
764 | |||
765 | ubi->avail_pebs = ubi->good_peb_count; | ||
766 | |||
767 | /* | ||
768 | * The layout volume is OK, initialize the corresponding in-RAM data | ||
769 | * structures. | ||
770 | */ | ||
771 | err = init_volumes(ubi, si, ubi->vtbl); | ||
772 | if (err) | ||
773 | goto out_free; | ||
774 | |||
775 | /* | ||
776 | * Get sure that the scanning information is consistent to the | ||
777 | * information stored in the volume table. | ||
778 | */ | ||
779 | err = check_scanning_info(ubi, si); | ||
780 | if (err) | ||
781 | goto out_free; | ||
782 | |||
783 | return 0; | ||
784 | |||
785 | out_free: | ||
786 | kfree(ubi->vtbl); | ||
787 | for (i = 0; i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) | ||
788 | if (ubi->volumes[i]) { | ||
789 | kfree(ubi->volumes[i]); | ||
790 | ubi->volumes[i] = NULL; | ||
791 | } | ||
792 | return err; | ||
793 | } | ||
794 | |||
795 | #ifdef CONFIG_MTD_UBI_DEBUG_PARANOID | ||
796 | |||
797 | /** | ||
798 | * paranoid_vtbl_check - check volume table. | ||
799 | * @ubi: UBI device description object | ||
800 | */ | ||
801 | static void paranoid_vtbl_check(const struct ubi_device *ubi) | ||
802 | { | ||
803 | if (vtbl_check(ubi, ubi->vtbl)) { | ||
804 | ubi_err("paranoid check failed"); | ||
805 | BUG(); | ||
806 | } | ||
807 | } | ||
808 | |||
809 | #endif /* CONFIG_MTD_UBI_DEBUG_PARANOID */ | ||
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c new file mode 100644 index 000000000000..9ecaf77eca9e --- /dev/null +++ b/drivers/mtd/ubi/wl.c | |||
@@ -0,0 +1,1671 @@ | |||
1 | /* | ||
2 | * Copyright (c) International Business Machines Corp., 2006 | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License as published by | ||
6 | * the Free Software Foundation; either version 2 of the License, or | ||
7 | * (at your option) any later version. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See | ||
12 | * the GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
17 | * | ||
18 | * Authors: Artem Bityutskiy (Битюцкий Артём), Thomas Gleixner | ||
19 | */ | ||
20 | |||
21 | /* | ||
22 | * UBI wear-leveling unit. | ||
23 | * | ||
24 | * This unit is responsible for wear-leveling. It works in terms of physical | ||
25 | * eraseblocks and erase counters and knows nothing about logical eraseblocks, | ||
26 | * volumes, etc. From this unit's perspective all physical eraseblocks are of | ||
27 | * two types - used and free. Used physical eraseblocks are those that were | ||
28 | * "get" by the 'ubi_wl_get_peb()' function, and free physical eraseblocks are | ||
29 | * those that were put by the 'ubi_wl_put_peb()' function. | ||
30 | * | ||
31 | * Physical eraseblocks returned by 'ubi_wl_get_peb()' have only erase counter | ||
32 | * header. The rest of the physical eraseblock contains only 0xFF bytes. | ||
33 | * | ||
34 | * When physical eraseblocks are returned to the WL unit by means of the | ||
35 | * 'ubi_wl_put_peb()' function, they are scheduled for erasure. The erasure is | ||
36 | * done asynchronously in context of the per-UBI device background thread, | ||
37 | * which is also managed by the WL unit. | ||
38 | * | ||
39 | * The wear-leveling is ensured by means of moving the contents of used | ||
40 | * physical eraseblocks with low erase counter to free physical eraseblocks | ||
41 | * with high erase counter. | ||
42 | * | ||
43 | * The 'ubi_wl_get_peb()' function accepts data type hints which help to pick | ||
44 | * an "optimal" physical eraseblock. For example, when it is known that the | ||
45 | * physical eraseblock will be "put" soon because it contains short-term data, | ||
46 | * the WL unit may pick a free physical eraseblock with low erase counter, and | ||
47 | * so forth. | ||
48 | * | ||
49 | * If the WL unit fails to erase a physical eraseblock, it marks it as bad. | ||
50 | * | ||
51 | * This unit is also responsible for scrubbing. If a bit-flip is detected in a | ||
52 | * physical eraseblock, it has to be moved. Technically this is the same as | ||
53 | * moving it for wear-leveling reasons. | ||
54 | * | ||
55 | * As it was said, for the UBI unit all physical eraseblocks are either "free" | ||
56 | * or "used". Free eraseblock are kept in the @wl->free RB-tree, while used | ||
57 | * eraseblocks are kept in a set of different RB-trees: @wl->used, | ||
58 | * @wl->prot.pnum, @wl->prot.aec, and @wl->scrub. | ||
59 | * | ||
60 | * Note, in this implementation, we keep a small in-RAM object for each physical | ||
61 | * eraseblock. This is surely not a scalable solution. But it appears to be good | ||
62 | * enough for moderately large flashes and it is simple. In future, one may | ||
63 | * re-work this unit and make it more scalable. | ||
64 | * | ||
65 | * At the moment this unit does not utilize the sequence number, which was | ||
66 | * introduced relatively recently. But it would be wise to do this because the | ||
67 | * sequence number of a logical eraseblock characterizes how old is it. For | ||
68 | * example, when we move a PEB with low erase counter, and we need to pick the | ||
69 | * target PEB, we pick a PEB with the highest EC if our PEB is "old" and we | ||
70 | * pick target PEB with an average EC if our PEB is not very "old". This is a | ||
71 | * room for future re-works of the WL unit. | ||
72 | * | ||
73 | * FIXME: looks too complex, should be simplified (later). | ||
74 | */ | ||
75 | |||
76 | #include <linux/slab.h> | ||
77 | #include <linux/crc32.h> | ||
78 | #include <linux/freezer.h> | ||
79 | #include <linux/kthread.h> | ||
80 | #include "ubi.h" | ||
81 | |||
82 | /* Number of physical eraseblocks reserved for wear-leveling purposes */ | ||
83 | #define WL_RESERVED_PEBS 1 | ||
84 | |||
85 | /* | ||
86 | * How many erase cycles are short term, unknown, and long term physical | ||
87 | * eraseblocks protected. | ||
88 | */ | ||
89 | #define ST_PROTECTION 16 | ||
90 | #define U_PROTECTION 10 | ||
91 | #define LT_PROTECTION 4 | ||
92 | |||
93 | /* | ||
94 | * Maximum difference between two erase counters. If this threshold is | ||
95 | * exceeded, the WL unit starts moving data from used physical eraseblocks with | ||
96 | * low erase counter to free physical eraseblocks with high erase counter. | ||
97 | */ | ||
98 | #define UBI_WL_THRESHOLD CONFIG_MTD_UBI_WL_THRESHOLD | ||
99 | |||
100 | /* | ||
101 | * When a physical eraseblock is moved, the WL unit has to pick the target | ||
102 | * physical eraseblock to move to. The simplest way would be just to pick the | ||
103 | * one with the highest erase counter. But in certain workloads this could lead | ||
104 | * to an unlimited wear of one or few physical eraseblock. Indeed, imagine a | ||
105 | * situation when the picked physical eraseblock is constantly erased after the | ||
106 | * data is written to it. So, we have a constant which limits the highest erase | ||
107 | * counter of the free physical eraseblock to pick. Namely, the WL unit does | ||
108 | * not pick eraseblocks with erase counter greater then the lowest erase | ||
109 | * counter plus %WL_FREE_MAX_DIFF. | ||
110 | */ | ||
111 | #define WL_FREE_MAX_DIFF (2*UBI_WL_THRESHOLD) | ||
112 | |||
113 | /* | ||
114 | * Maximum number of consecutive background thread failures which is enough to | ||
115 | * switch to read-only mode. | ||
116 | */ | ||
117 | #define WL_MAX_FAILURES 32 | ||
118 | |||
119 | /** | ||
120 | * struct ubi_wl_entry - wear-leveling entry. | ||
121 | * @rb: link in the corresponding RB-tree | ||
122 | * @ec: erase counter | ||
123 | * @pnum: physical eraseblock number | ||
124 | * | ||
125 | * Each physical eraseblock has a corresponding &struct wl_entry object which | ||
126 | * may be kept in different RB-trees. | ||
127 | */ | ||
128 | struct ubi_wl_entry { | ||
129 | struct rb_node rb; | ||
130 | int ec; | ||
131 | int pnum; | ||
132 | }; | ||
133 | |||
134 | /** | ||
135 | * struct ubi_wl_prot_entry - PEB protection entry. | ||
136 | * @rb_pnum: link in the @wl->prot.pnum RB-tree | ||
137 | * @rb_aec: link in the @wl->prot.aec RB-tree | ||
138 | * @abs_ec: the absolute erase counter value when the protection ends | ||
139 | * @e: the wear-leveling entry of the physical eraseblock under protection | ||
140 | * | ||
141 | * When the WL unit returns a physical eraseblock, the physical eraseblock is | ||
142 | * protected from being moved for some "time". For this reason, the physical | ||
143 | * eraseblock is not directly moved from the @wl->free tree to the @wl->used | ||
144 | * tree. There is one more tree in between where this physical eraseblock is | ||
145 | * temporarily stored (@wl->prot). | ||
146 | * | ||
147 | * All this protection stuff is needed because: | ||
148 | * o we don't want to move physical eraseblocks just after we have given them | ||
149 | * to the user; instead, we first want to let users fill them up with data; | ||
150 | * | ||
151 | * o there is a chance that the user will put the physical eraseblock very | ||
152 | * soon, so it makes sense not to move it for some time, but wait; this is | ||
153 | * especially important in case of "short term" physical eraseblocks. | ||
154 | * | ||
155 | * Physical eraseblocks stay protected only for limited time. But the "time" is | ||
156 | * measured in erase cycles in this case. This is implemented with help of the | ||
157 | * absolute erase counter (@wl->abs_ec). When it reaches certain value, the | ||
158 | * physical eraseblocks are moved from the protection trees (@wl->prot.*) to | ||
159 | * the @wl->used tree. | ||
160 | * | ||
161 | * Protected physical eraseblocks are searched by physical eraseblock number | ||
162 | * (when they are put) and by the absolute erase counter (to check if it is | ||
163 | * time to move them to the @wl->used tree). So there are actually 2 RB-trees | ||
164 | * storing the protected physical eraseblocks: @wl->prot.pnum and | ||
165 | * @wl->prot.aec. They are referred to as the "protection" trees. The | ||
166 | * first one is indexed by the physical eraseblock number. The second one is | ||
167 | * indexed by the absolute erase counter. Both trees store | ||
168 | * &struct ubi_wl_prot_entry objects. | ||
169 | * | ||
170 | * Each physical eraseblock has 2 main states: free and used. The former state | ||
171 | * corresponds to the @wl->free tree. The latter state is split up on several | ||
172 | * sub-states: | ||
173 | * o the WL movement is allowed (@wl->used tree); | ||
174 | * o the WL movement is temporarily prohibited (@wl->prot.pnum and | ||
175 | * @wl->prot.aec trees); | ||
176 | * o scrubbing is needed (@wl->scrub tree). | ||
177 | * | ||
178 | * Depending on the sub-state, wear-leveling entries of the used physical | ||
179 | * eraseblocks may be kept in one of those trees. | ||
180 | */ | ||
181 | struct ubi_wl_prot_entry { | ||
182 | struct rb_node rb_pnum; | ||
183 | struct rb_node rb_aec; | ||
184 | unsigned long long abs_ec; | ||
185 | struct ubi_wl_entry *e; | ||
186 | }; | ||
187 | |||
188 | /** | ||
189 | * struct ubi_work - UBI work description data structure. | ||
190 | * @list: a link in the list of pending works | ||
191 | * @func: worker function | ||
192 | * @priv: private data of the worker function | ||
193 | * | ||
194 | * @e: physical eraseblock to erase | ||
195 | * @torture: if the physical eraseblock has to be tortured | ||
196 | * | ||
197 | * The @func pointer points to the worker function. If the @cancel argument is | ||
198 | * not zero, the worker has to free the resources and exit immediately. The | ||
199 | * worker has to return zero in case of success and a negative error code in | ||
200 | * case of failure. | ||
201 | */ | ||
202 | struct ubi_work { | ||
203 | struct list_head list; | ||
204 | int (*func)(struct ubi_device *ubi, struct ubi_work *wrk, int cancel); | ||
205 | /* The below fields are only relevant to erasure works */ | ||
206 | struct ubi_wl_entry *e; | ||
207 | int torture; | ||
208 | }; | ||
209 | |||
210 | #ifdef CONFIG_MTD_UBI_DEBUG_PARANOID | ||
211 | static int paranoid_check_ec(const struct ubi_device *ubi, int pnum, int ec); | ||
212 | static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e, | ||
213 | struct rb_root *root); | ||
214 | #else | ||
215 | #define paranoid_check_ec(ubi, pnum, ec) 0 | ||
216 | #define paranoid_check_in_wl_tree(e, root) | ||
217 | #endif | ||
218 | |||
219 | /* Slab cache for wear-leveling entries */ | ||
220 | static struct kmem_cache *wl_entries_slab; | ||
221 | |||
222 | /** | ||
223 | * tree_empty - a helper function to check if an RB-tree is empty. | ||
224 | * @root: the root of the tree | ||
225 | * | ||
226 | * This function returns non-zero if the RB-tree is empty and zero if not. | ||
227 | */ | ||
228 | static inline int tree_empty(struct rb_root *root) | ||
229 | { | ||
230 | return root->rb_node == NULL; | ||
231 | } | ||
232 | |||
233 | /** | ||
234 | * wl_tree_add - add a wear-leveling entry to a WL RB-tree. | ||
235 | * @e: the wear-leveling entry to add | ||
236 | * @root: the root of the tree | ||
237 | * | ||
238 | * Note, we use (erase counter, physical eraseblock number) pairs as keys in | ||
239 | * the @ubi->used and @ubi->free RB-trees. | ||
240 | */ | ||
241 | static void wl_tree_add(struct ubi_wl_entry *e, struct rb_root *root) | ||
242 | { | ||
243 | struct rb_node **p, *parent = NULL; | ||
244 | |||
245 | p = &root->rb_node; | ||
246 | while (*p) { | ||
247 | struct ubi_wl_entry *e1; | ||
248 | |||
249 | parent = *p; | ||
250 | e1 = rb_entry(parent, struct ubi_wl_entry, rb); | ||
251 | |||
252 | if (e->ec < e1->ec) | ||
253 | p = &(*p)->rb_left; | ||
254 | else if (e->ec > e1->ec) | ||
255 | p = &(*p)->rb_right; | ||
256 | else { | ||
257 | ubi_assert(e->pnum != e1->pnum); | ||
258 | if (e->pnum < e1->pnum) | ||
259 | p = &(*p)->rb_left; | ||
260 | else | ||
261 | p = &(*p)->rb_right; | ||
262 | } | ||
263 | } | ||
264 | |||
265 | rb_link_node(&e->rb, parent, p); | ||
266 | rb_insert_color(&e->rb, root); | ||
267 | } | ||
268 | |||
269 | |||
270 | /* | ||
271 | * Helper functions to add and delete wear-leveling entries from different | ||
272 | * trees. | ||
273 | */ | ||
274 | |||
275 | static void free_tree_add(struct ubi_device *ubi, struct ubi_wl_entry *e) | ||
276 | { | ||
277 | wl_tree_add(e, &ubi->free); | ||
278 | } | ||
279 | static inline void used_tree_add(struct ubi_device *ubi, | ||
280 | struct ubi_wl_entry *e) | ||
281 | { | ||
282 | wl_tree_add(e, &ubi->used); | ||
283 | } | ||
284 | static inline void scrub_tree_add(struct ubi_device *ubi, | ||
285 | struct ubi_wl_entry *e) | ||
286 | { | ||
287 | wl_tree_add(e, &ubi->scrub); | ||
288 | } | ||
289 | static inline void free_tree_del(struct ubi_device *ubi, | ||
290 | struct ubi_wl_entry *e) | ||
291 | { | ||
292 | paranoid_check_in_wl_tree(e, &ubi->free); | ||
293 | rb_erase(&e->rb, &ubi->free); | ||
294 | } | ||
295 | static inline void used_tree_del(struct ubi_device *ubi, | ||
296 | struct ubi_wl_entry *e) | ||
297 | { | ||
298 | paranoid_check_in_wl_tree(e, &ubi->used); | ||
299 | rb_erase(&e->rb, &ubi->used); | ||
300 | } | ||
301 | static inline void scrub_tree_del(struct ubi_device *ubi, | ||
302 | struct ubi_wl_entry *e) | ||
303 | { | ||
304 | paranoid_check_in_wl_tree(e, &ubi->scrub); | ||
305 | rb_erase(&e->rb, &ubi->scrub); | ||
306 | } | ||
307 | |||
308 | /** | ||
309 | * do_work - do one pending work. | ||
310 | * @ubi: UBI device description object | ||
311 | * | ||
312 | * This function returns zero in case of success and a negative error code in | ||
313 | * case of failure. | ||
314 | */ | ||
315 | static int do_work(struct ubi_device *ubi) | ||
316 | { | ||
317 | int err; | ||
318 | struct ubi_work *wrk; | ||
319 | |||
320 | spin_lock(&ubi->wl_lock); | ||
321 | |||
322 | if (list_empty(&ubi->works)) { | ||
323 | spin_unlock(&ubi->wl_lock); | ||
324 | return 0; | ||
325 | } | ||
326 | |||
327 | wrk = list_entry(ubi->works.next, struct ubi_work, list); | ||
328 | list_del(&wrk->list); | ||
329 | spin_unlock(&ubi->wl_lock); | ||
330 | |||
331 | /* | ||
332 | * Call the worker function. Do not touch the work structure | ||
333 | * after this call as it will have been freed or reused by that | ||
334 | * time by the worker function. | ||
335 | */ | ||
336 | err = wrk->func(ubi, wrk, 0); | ||
337 | if (err) | ||
338 | ubi_err("work failed with error code %d", err); | ||
339 | |||
340 | spin_lock(&ubi->wl_lock); | ||
341 | ubi->works_count -= 1; | ||
342 | ubi_assert(ubi->works_count >= 0); | ||
343 | spin_unlock(&ubi->wl_lock); | ||
344 | return err; | ||
345 | } | ||
346 | |||
347 | /** | ||
348 | * produce_free_peb - produce a free physical eraseblock. | ||
349 | * @ubi: UBI device description object | ||
350 | * | ||
351 | * This function tries to make a free PEB by means of synchronous execution of | ||
352 | * pending works. This may be needed if, for example the background thread is | ||
353 | * disabled. Returns zero in case of success and a negative error code in case | ||
354 | * of failure. | ||
355 | */ | ||
356 | static int produce_free_peb(struct ubi_device *ubi) | ||
357 | { | ||
358 | int err; | ||
359 | |||
360 | spin_lock(&ubi->wl_lock); | ||
361 | while (tree_empty(&ubi->free)) { | ||
362 | spin_unlock(&ubi->wl_lock); | ||
363 | |||
364 | dbg_wl("do one work synchronously"); | ||
365 | err = do_work(ubi); | ||
366 | if (err) | ||
367 | return err; | ||
368 | |||
369 | spin_lock(&ubi->wl_lock); | ||
370 | } | ||
371 | spin_unlock(&ubi->wl_lock); | ||
372 | |||
373 | return 0; | ||
374 | } | ||
375 | |||
376 | /** | ||
377 | * in_wl_tree - check if wear-leveling entry is present in a WL RB-tree. | ||
378 | * @e: the wear-leveling entry to check | ||
379 | * @root: the root of the tree | ||
380 | * | ||
381 | * This function returns non-zero if @e is in the @root RB-tree and zero if it | ||
382 | * is not. | ||
383 | */ | ||
384 | static int in_wl_tree(struct ubi_wl_entry *e, struct rb_root *root) | ||
385 | { | ||
386 | struct rb_node *p; | ||
387 | |||
388 | p = root->rb_node; | ||
389 | while (p) { | ||
390 | struct ubi_wl_entry *e1; | ||
391 | |||
392 | e1 = rb_entry(p, struct ubi_wl_entry, rb); | ||
393 | |||
394 | if (e->pnum == e1->pnum) { | ||
395 | ubi_assert(e == e1); | ||
396 | return 1; | ||
397 | } | ||
398 | |||
399 | if (e->ec < e1->ec) | ||
400 | p = p->rb_left; | ||
401 | else if (e->ec > e1->ec) | ||
402 | p = p->rb_right; | ||
403 | else { | ||
404 | ubi_assert(e->pnum != e1->pnum); | ||
405 | if (e->pnum < e1->pnum) | ||
406 | p = p->rb_left; | ||
407 | else | ||
408 | p = p->rb_right; | ||
409 | } | ||
410 | } | ||
411 | |||
412 | return 0; | ||
413 | } | ||
414 | |||
415 | /** | ||
416 | * prot_tree_add - add physical eraseblock to protection trees. | ||
417 | * @ubi: UBI device description object | ||
418 | * @e: the physical eraseblock to add | ||
419 | * @pe: protection entry object to use | ||
420 | * @abs_ec: absolute erase counter value when this physical eraseblock has | ||
421 | * to be removed from the protection trees. | ||
422 | * | ||
423 | * @wl->lock has to be locked. | ||
424 | */ | ||
425 | static void prot_tree_add(struct ubi_device *ubi, struct ubi_wl_entry *e, | ||
426 | struct ubi_wl_prot_entry *pe, int abs_ec) | ||
427 | { | ||
428 | struct rb_node **p, *parent = NULL; | ||
429 | struct ubi_wl_prot_entry *pe1; | ||
430 | |||
431 | pe->e = e; | ||
432 | pe->abs_ec = ubi->abs_ec + abs_ec; | ||
433 | |||
434 | p = &ubi->prot.pnum.rb_node; | ||
435 | while (*p) { | ||
436 | parent = *p; | ||
437 | pe1 = rb_entry(parent, struct ubi_wl_prot_entry, rb_pnum); | ||
438 | |||
439 | if (e->pnum < pe1->e->pnum) | ||
440 | p = &(*p)->rb_left; | ||
441 | else | ||
442 | p = &(*p)->rb_right; | ||
443 | } | ||
444 | rb_link_node(&pe->rb_pnum, parent, p); | ||
445 | rb_insert_color(&pe->rb_pnum, &ubi->prot.pnum); | ||
446 | |||
447 | p = &ubi->prot.aec.rb_node; | ||
448 | parent = NULL; | ||
449 | while (*p) { | ||
450 | parent = *p; | ||
451 | pe1 = rb_entry(parent, struct ubi_wl_prot_entry, rb_aec); | ||
452 | |||
453 | if (pe->abs_ec < pe1->abs_ec) | ||
454 | p = &(*p)->rb_left; | ||
455 | else | ||
456 | p = &(*p)->rb_right; | ||
457 | } | ||
458 | rb_link_node(&pe->rb_aec, parent, p); | ||
459 | rb_insert_color(&pe->rb_aec, &ubi->prot.aec); | ||
460 | } | ||
461 | |||
462 | /** | ||
463 | * find_wl_entry - find wear-leveling entry closest to certain erase counter. | ||
464 | * @root: the RB-tree where to look for | ||
465 | * @max: highest possible erase counter | ||
466 | * | ||
467 | * This function looks for a wear leveling entry with erase counter closest to | ||
468 | * @max and less then @max. | ||
469 | */ | ||
470 | static struct ubi_wl_entry *find_wl_entry(struct rb_root *root, int max) | ||
471 | { | ||
472 | struct rb_node *p; | ||
473 | struct ubi_wl_entry *e; | ||
474 | |||
475 | e = rb_entry(rb_first(root), struct ubi_wl_entry, rb); | ||
476 | max += e->ec; | ||
477 | |||
478 | p = root->rb_node; | ||
479 | while (p) { | ||
480 | struct ubi_wl_entry *e1; | ||
481 | |||
482 | e1 = rb_entry(p, struct ubi_wl_entry, rb); | ||
483 | if (e1->ec >= max) | ||
484 | p = p->rb_left; | ||
485 | else { | ||
486 | p = p->rb_right; | ||
487 | e = e1; | ||
488 | } | ||
489 | } | ||
490 | |||
491 | return e; | ||
492 | } | ||
493 | |||
494 | /** | ||
495 | * ubi_wl_get_peb - get a physical eraseblock. | ||
496 | * @ubi: UBI device description object | ||
497 | * @dtype: type of data which will be stored in this physical eraseblock | ||
498 | * | ||
499 | * This function returns a physical eraseblock in case of success and a | ||
500 | * negative error code in case of failure. Might sleep. | ||
501 | */ | ||
502 | int ubi_wl_get_peb(struct ubi_device *ubi, int dtype) | ||
503 | { | ||
504 | int err, protect, medium_ec; | ||
505 | struct ubi_wl_entry *e, *first, *last; | ||
506 | struct ubi_wl_prot_entry *pe; | ||
507 | |||
508 | ubi_assert(dtype == UBI_LONGTERM || dtype == UBI_SHORTTERM || | ||
509 | dtype == UBI_UNKNOWN); | ||
510 | |||
511 | pe = kmalloc(sizeof(struct ubi_wl_prot_entry), GFP_KERNEL); | ||
512 | if (!pe) | ||
513 | return -ENOMEM; | ||
514 | |||
515 | retry: | ||
516 | spin_lock(&ubi->wl_lock); | ||
517 | if (tree_empty(&ubi->free)) { | ||
518 | if (ubi->works_count == 0) { | ||
519 | ubi_assert(list_empty(&ubi->works)); | ||
520 | ubi_err("no free eraseblocks"); | ||
521 | spin_unlock(&ubi->wl_lock); | ||
522 | kfree(pe); | ||
523 | return -ENOSPC; | ||
524 | } | ||
525 | spin_unlock(&ubi->wl_lock); | ||
526 | |||
527 | err = produce_free_peb(ubi); | ||
528 | if (err < 0) { | ||
529 | kfree(pe); | ||
530 | return err; | ||
531 | } | ||
532 | goto retry; | ||
533 | } | ||
534 | |||
535 | switch (dtype) { | ||
536 | case UBI_LONGTERM: | ||
537 | /* | ||
538 | * For long term data we pick a physical eraseblock | ||
539 | * with high erase counter. But the highest erase | ||
540 | * counter we can pick is bounded by the the lowest | ||
541 | * erase counter plus %WL_FREE_MAX_DIFF. | ||
542 | */ | ||
543 | e = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF); | ||
544 | protect = LT_PROTECTION; | ||
545 | break; | ||
546 | case UBI_UNKNOWN: | ||
547 | /* | ||
548 | * For unknown data we pick a physical eraseblock with | ||
549 | * medium erase counter. But we by no means can pick a | ||
550 | * physical eraseblock with erase counter greater or | ||
551 | * equivalent than the lowest erase counter plus | ||
552 | * %WL_FREE_MAX_DIFF. | ||
553 | */ | ||
554 | first = rb_entry(rb_first(&ubi->free), | ||
555 | struct ubi_wl_entry, rb); | ||
556 | last = rb_entry(rb_last(&ubi->free), | ||
557 | struct ubi_wl_entry, rb); | ||
558 | |||
559 | if (last->ec - first->ec < WL_FREE_MAX_DIFF) | ||
560 | e = rb_entry(ubi->free.rb_node, | ||
561 | struct ubi_wl_entry, rb); | ||
562 | else { | ||
563 | medium_ec = (first->ec + WL_FREE_MAX_DIFF)/2; | ||
564 | e = find_wl_entry(&ubi->free, medium_ec); | ||
565 | } | ||
566 | protect = U_PROTECTION; | ||
567 | break; | ||
568 | case UBI_SHORTTERM: | ||
569 | /* | ||
570 | * For short term data we pick a physical eraseblock | ||
571 | * with the lowest erase counter as we expect it will | ||
572 | * be erased soon. | ||
573 | */ | ||
574 | e = rb_entry(rb_first(&ubi->free), | ||
575 | struct ubi_wl_entry, rb); | ||
576 | protect = ST_PROTECTION; | ||
577 | break; | ||
578 | default: | ||
579 | protect = 0; | ||
580 | e = NULL; | ||
581 | BUG(); | ||
582 | } | ||
583 | |||
584 | /* | ||
585 | * Move the physical eraseblock to the protection trees where it will | ||
586 | * be protected from being moved for some time. | ||
587 | */ | ||
588 | free_tree_del(ubi, e); | ||
589 | prot_tree_add(ubi, e, pe, protect); | ||
590 | |||
591 | dbg_wl("PEB %d EC %d, protection %d", e->pnum, e->ec, protect); | ||
592 | spin_unlock(&ubi->wl_lock); | ||
593 | |||
594 | return e->pnum; | ||
595 | } | ||
596 | |||
597 | /** | ||
598 | * prot_tree_del - remove a physical eraseblock from the protection trees | ||
599 | * @ubi: UBI device description object | ||
600 | * @pnum: the physical eraseblock to remove | ||
601 | */ | ||
602 | static void prot_tree_del(struct ubi_device *ubi, int pnum) | ||
603 | { | ||
604 | struct rb_node *p; | ||
605 | struct ubi_wl_prot_entry *pe = NULL; | ||
606 | |||
607 | p = ubi->prot.pnum.rb_node; | ||
608 | while (p) { | ||
609 | |||
610 | pe = rb_entry(p, struct ubi_wl_prot_entry, rb_pnum); | ||
611 | |||
612 | if (pnum == pe->e->pnum) | ||
613 | break; | ||
614 | |||
615 | if (pnum < pe->e->pnum) | ||
616 | p = p->rb_left; | ||
617 | else | ||
618 | p = p->rb_right; | ||
619 | } | ||
620 | |||
621 | ubi_assert(pe->e->pnum == pnum); | ||
622 | rb_erase(&pe->rb_aec, &ubi->prot.aec); | ||
623 | rb_erase(&pe->rb_pnum, &ubi->prot.pnum); | ||
624 | kfree(pe); | ||
625 | } | ||
626 | |||
627 | /** | ||
628 | * sync_erase - synchronously erase a physical eraseblock. | ||
629 | * @ubi: UBI device description object | ||
630 | * @e: the the physical eraseblock to erase | ||
631 | * @torture: if the physical eraseblock has to be tortured | ||
632 | * | ||
633 | * This function returns zero in case of success and a negative error code in | ||
634 | * case of failure. | ||
635 | */ | ||
636 | static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, int torture) | ||
637 | { | ||
638 | int err; | ||
639 | struct ubi_ec_hdr *ec_hdr; | ||
640 | unsigned long long ec = e->ec; | ||
641 | |||
642 | dbg_wl("erase PEB %d, old EC %llu", e->pnum, ec); | ||
643 | |||
644 | err = paranoid_check_ec(ubi, e->pnum, e->ec); | ||
645 | if (err > 0) | ||
646 | return -EINVAL; | ||
647 | |||
648 | ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL); | ||
649 | if (!ec_hdr) | ||
650 | return -ENOMEM; | ||
651 | |||
652 | err = ubi_io_sync_erase(ubi, e->pnum, torture); | ||
653 | if (err < 0) | ||
654 | goto out_free; | ||
655 | |||
656 | ec += err; | ||
657 | if (ec > UBI_MAX_ERASECOUNTER) { | ||
658 | /* | ||
659 | * Erase counter overflow. Upgrade UBI and use 64-bit | ||
660 | * erase counters internally. | ||
661 | */ | ||
662 | ubi_err("erase counter overflow at PEB %d, EC %llu", | ||
663 | e->pnum, ec); | ||
664 | err = -EINVAL; | ||
665 | goto out_free; | ||
666 | } | ||
667 | |||
668 | dbg_wl("erased PEB %d, new EC %llu", e->pnum, ec); | ||
669 | |||
670 | ec_hdr->ec = cpu_to_ubi64(ec); | ||
671 | |||
672 | err = ubi_io_write_ec_hdr(ubi, e->pnum, ec_hdr); | ||
673 | if (err) | ||
674 | goto out_free; | ||
675 | |||
676 | e->ec = ec; | ||
677 | spin_lock(&ubi->wl_lock); | ||
678 | if (e->ec > ubi->max_ec) | ||
679 | ubi->max_ec = e->ec; | ||
680 | spin_unlock(&ubi->wl_lock); | ||
681 | |||
682 | out_free: | ||
683 | kfree(ec_hdr); | ||
684 | return err; | ||
685 | } | ||
686 | |||
687 | /** | ||
688 | * check_protection_over - check if it is time to stop protecting some | ||
689 | * physical eraseblocks. | ||
690 | * @ubi: UBI device description object | ||
691 | * | ||
692 | * This function is called after each erase operation, when the absolute erase | ||
693 | * counter is incremented, to check if some physical eraseblock have not to be | ||
694 | * protected any longer. These physical eraseblocks are moved from the | ||
695 | * protection trees to the used tree. | ||
696 | */ | ||
697 | static void check_protection_over(struct ubi_device *ubi) | ||
698 | { | ||
699 | struct ubi_wl_prot_entry *pe; | ||
700 | |||
701 | /* | ||
702 | * There may be several protected physical eraseblock to remove, | ||
703 | * process them all. | ||
704 | */ | ||
705 | while (1) { | ||
706 | spin_lock(&ubi->wl_lock); | ||
707 | if (tree_empty(&ubi->prot.aec)) { | ||
708 | spin_unlock(&ubi->wl_lock); | ||
709 | break; | ||
710 | } | ||
711 | |||
712 | pe = rb_entry(rb_first(&ubi->prot.aec), | ||
713 | struct ubi_wl_prot_entry, rb_aec); | ||
714 | |||
715 | if (pe->abs_ec > ubi->abs_ec) { | ||
716 | spin_unlock(&ubi->wl_lock); | ||
717 | break; | ||
718 | } | ||
719 | |||
720 | dbg_wl("PEB %d protection over, abs_ec %llu, PEB abs_ec %llu", | ||
721 | pe->e->pnum, ubi->abs_ec, pe->abs_ec); | ||
722 | rb_erase(&pe->rb_aec, &ubi->prot.aec); | ||
723 | rb_erase(&pe->rb_pnum, &ubi->prot.pnum); | ||
724 | used_tree_add(ubi, pe->e); | ||
725 | spin_unlock(&ubi->wl_lock); | ||
726 | |||
727 | kfree(pe); | ||
728 | cond_resched(); | ||
729 | } | ||
730 | } | ||
731 | |||
732 | /** | ||
733 | * schedule_ubi_work - schedule a work. | ||
734 | * @ubi: UBI device description object | ||
735 | * @wrk: the work to schedule | ||
736 | * | ||
737 | * This function enqueues a work defined by @wrk to the tail of the pending | ||
738 | * works list. | ||
739 | */ | ||
740 | static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk) | ||
741 | { | ||
742 | spin_lock(&ubi->wl_lock); | ||
743 | list_add_tail(&wrk->list, &ubi->works); | ||
744 | ubi_assert(ubi->works_count >= 0); | ||
745 | ubi->works_count += 1; | ||
746 | if (ubi->thread_enabled) | ||
747 | wake_up_process(ubi->bgt_thread); | ||
748 | spin_unlock(&ubi->wl_lock); | ||
749 | } | ||
750 | |||
751 | static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk, | ||
752 | int cancel); | ||
753 | |||
754 | /** | ||
755 | * schedule_erase - schedule an erase work. | ||
756 | * @ubi: UBI device description object | ||
757 | * @e: the WL entry of the physical eraseblock to erase | ||
758 | * @torture: if the physical eraseblock has to be tortured | ||
759 | * | ||
760 | * This function returns zero in case of success and a %-ENOMEM in case of | ||
761 | * failure. | ||
762 | */ | ||
763 | static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, | ||
764 | int torture) | ||
765 | { | ||
766 | struct ubi_work *wl_wrk; | ||
767 | |||
768 | dbg_wl("schedule erasure of PEB %d, EC %d, torture %d", | ||
769 | e->pnum, e->ec, torture); | ||
770 | |||
771 | wl_wrk = kmalloc(sizeof(struct ubi_work), GFP_KERNEL); | ||
772 | if (!wl_wrk) | ||
773 | return -ENOMEM; | ||
774 | |||
775 | wl_wrk->func = &erase_worker; | ||
776 | wl_wrk->e = e; | ||
777 | wl_wrk->torture = torture; | ||
778 | |||
779 | schedule_ubi_work(ubi, wl_wrk); | ||
780 | return 0; | ||
781 | } | ||
782 | |||
783 | /** | ||
784 | * wear_leveling_worker - wear-leveling worker function. | ||
785 | * @ubi: UBI device description object | ||
786 | * @wrk: the work object | ||
787 | * @cancel: non-zero if the worker has to free memory and exit | ||
788 | * | ||
789 | * This function copies a more worn out physical eraseblock to a less worn out | ||
790 | * one. Returns zero in case of success and a negative error code in case of | ||
791 | * failure. | ||
792 | */ | ||
793 | static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, | ||
794 | int cancel) | ||
795 | { | ||
796 | int err, put = 0; | ||
797 | struct ubi_wl_entry *e1, *e2; | ||
798 | struct ubi_vid_hdr *vid_hdr; | ||
799 | |||
800 | kfree(wrk); | ||
801 | |||
802 | if (cancel) | ||
803 | return 0; | ||
804 | |||
805 | vid_hdr = ubi_zalloc_vid_hdr(ubi); | ||
806 | if (!vid_hdr) | ||
807 | return -ENOMEM; | ||
808 | |||
809 | spin_lock(&ubi->wl_lock); | ||
810 | |||
811 | /* | ||
812 | * Only one WL worker at a time is supported at this implementation, so | ||
813 | * make sure a PEB is not being moved already. | ||
814 | */ | ||
815 | if (ubi->move_to || tree_empty(&ubi->free) || | ||
816 | (tree_empty(&ubi->used) && tree_empty(&ubi->scrub))) { | ||
817 | /* | ||
818 | * Only one WL worker at a time is supported at this | ||
819 | * implementation, so if a LEB is already being moved, cancel. | ||
820 | * | ||
821 | * No free physical eraseblocks? Well, we cancel wear-leveling | ||
822 | * then. It will be triggered again when a free physical | ||
823 | * eraseblock appears. | ||
824 | * | ||
825 | * No used physical eraseblocks? They must be temporarily | ||
826 | * protected from being moved. They will be moved to the | ||
827 | * @ubi->used tree later and the wear-leveling will be | ||
828 | * triggered again. | ||
829 | */ | ||
830 | dbg_wl("cancel WL, a list is empty: free %d, used %d", | ||
831 | tree_empty(&ubi->free), tree_empty(&ubi->used)); | ||
832 | ubi->wl_scheduled = 0; | ||
833 | spin_unlock(&ubi->wl_lock); | ||
834 | ubi_free_vid_hdr(ubi, vid_hdr); | ||
835 | return 0; | ||
836 | } | ||
837 | |||
838 | if (tree_empty(&ubi->scrub)) { | ||
839 | /* | ||
840 | * Now pick the least worn-out used physical eraseblock and a | ||
841 | * highly worn-out free physical eraseblock. If the erase | ||
842 | * counters differ much enough, start wear-leveling. | ||
843 | */ | ||
844 | e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, rb); | ||
845 | e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF); | ||
846 | |||
847 | if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) { | ||
848 | dbg_wl("no WL needed: min used EC %d, max free EC %d", | ||
849 | e1->ec, e2->ec); | ||
850 | ubi->wl_scheduled = 0; | ||
851 | spin_unlock(&ubi->wl_lock); | ||
852 | ubi_free_vid_hdr(ubi, vid_hdr); | ||
853 | return 0; | ||
854 | } | ||
855 | used_tree_del(ubi, e1); | ||
856 | dbg_wl("move PEB %d EC %d to PEB %d EC %d", | ||
857 | e1->pnum, e1->ec, e2->pnum, e2->ec); | ||
858 | } else { | ||
859 | e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, rb); | ||
860 | e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF); | ||
861 | scrub_tree_del(ubi, e1); | ||
862 | dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum); | ||
863 | } | ||
864 | |||
865 | free_tree_del(ubi, e2); | ||
866 | ubi_assert(!ubi->move_from && !ubi->move_to); | ||
867 | ubi_assert(!ubi->move_to_put && !ubi->move_from_put); | ||
868 | ubi->move_from = e1; | ||
869 | ubi->move_to = e2; | ||
870 | spin_unlock(&ubi->wl_lock); | ||
871 | |||
872 | /* | ||
873 | * Now we are going to copy physical eraseblock @e1->pnum to @e2->pnum. | ||
874 | * We so far do not know which logical eraseblock our physical | ||
875 | * eraseblock (@e1) belongs to. We have to read the volume identifier | ||
876 | * header first. | ||
877 | */ | ||
878 | |||
879 | err = ubi_io_read_vid_hdr(ubi, e1->pnum, vid_hdr, 0); | ||
880 | if (err && err != UBI_IO_BITFLIPS) { | ||
881 | if (err == UBI_IO_PEB_FREE) { | ||
882 | /* | ||
883 | * We are trying to move PEB without a VID header. UBI | ||
884 | * always write VID headers shortly after the PEB was | ||
885 | * given, so we have a situation when it did not have | ||
886 | * chance to write it down because it was preempted. | ||
887 | * Just re-schedule the work, so that next time it will | ||
888 | * likely have the VID header in place. | ||
889 | */ | ||
890 | dbg_wl("PEB %d has no VID header", e1->pnum); | ||
891 | err = 0; | ||
892 | } else { | ||
893 | ubi_err("error %d while reading VID header from PEB %d", | ||
894 | err, e1->pnum); | ||
895 | if (err > 0) | ||
896 | err = -EIO; | ||
897 | } | ||
898 | goto error; | ||
899 | } | ||
900 | |||
901 | err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vid_hdr); | ||
902 | if (err) { | ||
903 | if (err == UBI_IO_BITFLIPS) | ||
904 | err = 0; | ||
905 | goto error; | ||
906 | } | ||
907 | |||
908 | ubi_free_vid_hdr(ubi, vid_hdr); | ||
909 | spin_lock(&ubi->wl_lock); | ||
910 | if (!ubi->move_to_put) | ||
911 | used_tree_add(ubi, e2); | ||
912 | else | ||
913 | put = 1; | ||
914 | ubi->move_from = ubi->move_to = NULL; | ||
915 | ubi->move_from_put = ubi->move_to_put = 0; | ||
916 | ubi->wl_scheduled = 0; | ||
917 | spin_unlock(&ubi->wl_lock); | ||
918 | |||
919 | if (put) { | ||
920 | /* | ||
921 | * Well, the target PEB was put meanwhile, schedule it for | ||
922 | * erasure. | ||
923 | */ | ||
924 | dbg_wl("PEB %d was put meanwhile, erase", e2->pnum); | ||
925 | err = schedule_erase(ubi, e2, 0); | ||
926 | if (err) { | ||
927 | kmem_cache_free(wl_entries_slab, e2); | ||
928 | ubi_ro_mode(ubi); | ||
929 | } | ||
930 | } | ||
931 | |||
932 | err = schedule_erase(ubi, e1, 0); | ||
933 | if (err) { | ||
934 | kmem_cache_free(wl_entries_slab, e1); | ||
935 | ubi_ro_mode(ubi); | ||
936 | } | ||
937 | |||
938 | dbg_wl("done"); | ||
939 | return err; | ||
940 | |||
941 | /* | ||
942 | * Some error occurred. @e1 was not changed, so return it back. @e2 | ||
943 | * might be changed, schedule it for erasure. | ||
944 | */ | ||
945 | error: | ||
946 | if (err) | ||
947 | dbg_wl("error %d occurred, cancel operation", err); | ||
948 | ubi_assert(err <= 0); | ||
949 | |||
950 | ubi_free_vid_hdr(ubi, vid_hdr); | ||
951 | spin_lock(&ubi->wl_lock); | ||
952 | ubi->wl_scheduled = 0; | ||
953 | if (ubi->move_from_put) | ||
954 | put = 1; | ||
955 | else | ||
956 | used_tree_add(ubi, e1); | ||
957 | ubi->move_from = ubi->move_to = NULL; | ||
958 | ubi->move_from_put = ubi->move_to_put = 0; | ||
959 | spin_unlock(&ubi->wl_lock); | ||
960 | |||
961 | if (put) { | ||
962 | /* | ||
963 | * Well, the target PEB was put meanwhile, schedule it for | ||
964 | * erasure. | ||
965 | */ | ||
966 | dbg_wl("PEB %d was put meanwhile, erase", e1->pnum); | ||
967 | err = schedule_erase(ubi, e1, 0); | ||
968 | if (err) { | ||
969 | kmem_cache_free(wl_entries_slab, e1); | ||
970 | ubi_ro_mode(ubi); | ||
971 | } | ||
972 | } | ||
973 | |||
974 | err = schedule_erase(ubi, e2, 0); | ||
975 | if (err) { | ||
976 | kmem_cache_free(wl_entries_slab, e2); | ||
977 | ubi_ro_mode(ubi); | ||
978 | } | ||
979 | |||
980 | yield(); | ||
981 | return err; | ||
982 | } | ||
983 | |||
984 | /** | ||
985 | * ensure_wear_leveling - schedule wear-leveling if it is needed. | ||
986 | * @ubi: UBI device description object | ||
987 | * | ||
988 | * This function checks if it is time to start wear-leveling and schedules it | ||
989 | * if yes. This function returns zero in case of success and a negative error | ||
990 | * code in case of failure. | ||
991 | */ | ||
992 | static int ensure_wear_leveling(struct ubi_device *ubi) | ||
993 | { | ||
994 | int err = 0; | ||
995 | struct ubi_wl_entry *e1; | ||
996 | struct ubi_wl_entry *e2; | ||
997 | struct ubi_work *wrk; | ||
998 | |||
999 | spin_lock(&ubi->wl_lock); | ||
1000 | if (ubi->wl_scheduled) | ||
1001 | /* Wear-leveling is already in the work queue */ | ||
1002 | goto out_unlock; | ||
1003 | |||
1004 | /* | ||
1005 | * If the ubi->scrub tree is not empty, scrubbing is needed, and the | ||
1006 | * the WL worker has to be scheduled anyway. | ||
1007 | */ | ||
1008 | if (tree_empty(&ubi->scrub)) { | ||
1009 | if (tree_empty(&ubi->used) || tree_empty(&ubi->free)) | ||
1010 | /* No physical eraseblocks - no deal */ | ||
1011 | goto out_unlock; | ||
1012 | |||
1013 | /* | ||
1014 | * We schedule wear-leveling only if the difference between the | ||
1015 | * lowest erase counter of used physical eraseblocks and a high | ||
1016 | * erase counter of free physical eraseblocks is greater then | ||
1017 | * %UBI_WL_THRESHOLD. | ||
1018 | */ | ||
1019 | e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, rb); | ||
1020 | e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF); | ||
1021 | |||
1022 | if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) | ||
1023 | goto out_unlock; | ||
1024 | dbg_wl("schedule wear-leveling"); | ||
1025 | } else | ||
1026 | dbg_wl("schedule scrubbing"); | ||
1027 | |||
1028 | ubi->wl_scheduled = 1; | ||
1029 | spin_unlock(&ubi->wl_lock); | ||
1030 | |||
1031 | wrk = kmalloc(sizeof(struct ubi_work), GFP_KERNEL); | ||
1032 | if (!wrk) { | ||
1033 | err = -ENOMEM; | ||
1034 | goto out_cancel; | ||
1035 | } | ||
1036 | |||
1037 | wrk->func = &wear_leveling_worker; | ||
1038 | schedule_ubi_work(ubi, wrk); | ||
1039 | return err; | ||
1040 | |||
1041 | out_cancel: | ||
1042 | spin_lock(&ubi->wl_lock); | ||
1043 | ubi->wl_scheduled = 0; | ||
1044 | out_unlock: | ||
1045 | spin_unlock(&ubi->wl_lock); | ||
1046 | return err; | ||
1047 | } | ||
1048 | |||
1049 | /** | ||
1050 | * erase_worker - physical eraseblock erase worker function. | ||
1051 | * @ubi: UBI device description object | ||
1052 | * @wl_wrk: the work object | ||
1053 | * @cancel: non-zero if the worker has to free memory and exit | ||
1054 | * | ||
1055 | * This function erases a physical eraseblock and perform torture testing if | ||
1056 | * needed. It also takes care about marking the physical eraseblock bad if | ||
1057 | * needed. Returns zero in case of success and a negative error code in case of | ||
1058 | * failure. | ||
1059 | */ | ||
1060 | static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk, | ||
1061 | int cancel) | ||
1062 | { | ||
1063 | int err; | ||
1064 | struct ubi_wl_entry *e = wl_wrk->e; | ||
1065 | int pnum = e->pnum; | ||
1066 | |||
1067 | if (cancel) { | ||
1068 | dbg_wl("cancel erasure of PEB %d EC %d", pnum, e->ec); | ||
1069 | kfree(wl_wrk); | ||
1070 | kmem_cache_free(wl_entries_slab, e); | ||
1071 | return 0; | ||
1072 | } | ||
1073 | |||
1074 | dbg_wl("erase PEB %d EC %d", pnum, e->ec); | ||
1075 | |||
1076 | err = sync_erase(ubi, e, wl_wrk->torture); | ||
1077 | if (!err) { | ||
1078 | /* Fine, we've erased it successfully */ | ||
1079 | kfree(wl_wrk); | ||
1080 | |||
1081 | spin_lock(&ubi->wl_lock); | ||
1082 | ubi->abs_ec += 1; | ||
1083 | free_tree_add(ubi, e); | ||
1084 | spin_unlock(&ubi->wl_lock); | ||
1085 | |||
1086 | /* | ||
1087 | * One more erase operation has happened, take care about protected | ||
1088 | * physical eraseblocks. | ||
1089 | */ | ||
1090 | check_protection_over(ubi); | ||
1091 | |||
1092 | /* And take care about wear-leveling */ | ||
1093 | err = ensure_wear_leveling(ubi); | ||
1094 | return err; | ||
1095 | } | ||
1096 | |||
1097 | kfree(wl_wrk); | ||
1098 | kmem_cache_free(wl_entries_slab, e); | ||
1099 | |||
1100 | if (err != -EIO) { | ||
1101 | /* | ||
1102 | * If this is not %-EIO, we have no idea what to do. Scheduling | ||
1103 | * this physical eraseblock for erasure again would cause | ||
1104 | * errors again and again. Well, lets switch to RO mode. | ||
1105 | */ | ||
1106 | ubi_ro_mode(ubi); | ||
1107 | return err; | ||
1108 | } | ||
1109 | |||
1110 | /* It is %-EIO, the PEB went bad */ | ||
1111 | |||
1112 | if (!ubi->bad_allowed) { | ||
1113 | ubi_err("bad physical eraseblock %d detected", pnum); | ||
1114 | ubi_ro_mode(ubi); | ||
1115 | err = -EIO; | ||
1116 | } else { | ||
1117 | int need; | ||
1118 | |||
1119 | spin_lock(&ubi->volumes_lock); | ||
1120 | need = ubi->beb_rsvd_level - ubi->beb_rsvd_pebs + 1; | ||
1121 | if (need > 0) { | ||
1122 | need = ubi->avail_pebs >= need ? need : ubi->avail_pebs; | ||
1123 | ubi->avail_pebs -= need; | ||
1124 | ubi->rsvd_pebs += need; | ||
1125 | ubi->beb_rsvd_pebs += need; | ||
1126 | if (need > 0) | ||
1127 | ubi_msg("reserve more %d PEBs", need); | ||
1128 | } | ||
1129 | |||
1130 | if (ubi->beb_rsvd_pebs == 0) { | ||
1131 | spin_unlock(&ubi->volumes_lock); | ||
1132 | ubi_err("no reserved physical eraseblocks"); | ||
1133 | ubi_ro_mode(ubi); | ||
1134 | return -EIO; | ||
1135 | } | ||
1136 | |||
1137 | spin_unlock(&ubi->volumes_lock); | ||
1138 | ubi_msg("mark PEB %d as bad", pnum); | ||
1139 | |||
1140 | err = ubi_io_mark_bad(ubi, pnum); | ||
1141 | if (err) { | ||
1142 | ubi_ro_mode(ubi); | ||
1143 | return err; | ||
1144 | } | ||
1145 | |||
1146 | spin_lock(&ubi->volumes_lock); | ||
1147 | ubi->beb_rsvd_pebs -= 1; | ||
1148 | ubi->bad_peb_count += 1; | ||
1149 | ubi->good_peb_count -= 1; | ||
1150 | ubi_calculate_reserved(ubi); | ||
1151 | if (ubi->beb_rsvd_pebs == 0) | ||
1152 | ubi_warn("last PEB from the reserved pool was used"); | ||
1153 | spin_unlock(&ubi->volumes_lock); | ||
1154 | } | ||
1155 | |||
1156 | return err; | ||
1157 | } | ||
1158 | |||
1159 | /** | ||
1160 | * ubi_wl_put_peb - return a physical eraseblock to the wear-leveling | ||
1161 | * unit. | ||
1162 | * @ubi: UBI device description object | ||
1163 | * @pnum: physical eraseblock to return | ||
1164 | * @torture: if this physical eraseblock has to be tortured | ||
1165 | * | ||
1166 | * This function is called to return physical eraseblock @pnum to the pool of | ||
1167 | * free physical eraseblocks. The @torture flag has to be set if an I/O error | ||
1168 | * occurred to this @pnum and it has to be tested. This function returns zero | ||
1169 | * in case of success and a negative error code in case of failure. | ||
1170 | */ | ||
1171 | int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture) | ||
1172 | { | ||
1173 | int err; | ||
1174 | struct ubi_wl_entry *e; | ||
1175 | |||
1176 | dbg_wl("PEB %d", pnum); | ||
1177 | ubi_assert(pnum >= 0); | ||
1178 | ubi_assert(pnum < ubi->peb_count); | ||
1179 | |||
1180 | spin_lock(&ubi->wl_lock); | ||
1181 | |||
1182 | e = ubi->lookuptbl[pnum]; | ||
1183 | if (e == ubi->move_from) { | ||
1184 | /* | ||
1185 | * User is putting the physical eraseblock which was selected to | ||
1186 | * be moved. It will be scheduled for erasure in the | ||
1187 | * wear-leveling worker. | ||
1188 | */ | ||
1189 | dbg_wl("PEB %d is being moved", pnum); | ||
1190 | ubi_assert(!ubi->move_from_put); | ||
1191 | ubi->move_from_put = 1; | ||
1192 | spin_unlock(&ubi->wl_lock); | ||
1193 | return 0; | ||
1194 | } else if (e == ubi->move_to) { | ||
1195 | /* | ||
1196 | * User is putting the physical eraseblock which was selected | ||
1197 | * as the target the data is moved to. It may happen if the EBA | ||
1198 | * unit already re-mapped the LEB but the WL unit did has not | ||
1199 | * put the PEB to the "used" tree. | ||
1200 | */ | ||
1201 | dbg_wl("PEB %d is the target of data moving", pnum); | ||
1202 | ubi_assert(!ubi->move_to_put); | ||
1203 | ubi->move_to_put = 1; | ||
1204 | spin_unlock(&ubi->wl_lock); | ||
1205 | return 0; | ||
1206 | } else { | ||
1207 | if (in_wl_tree(e, &ubi->used)) | ||
1208 | used_tree_del(ubi, e); | ||
1209 | else if (in_wl_tree(e, &ubi->scrub)) | ||
1210 | scrub_tree_del(ubi, e); | ||
1211 | else | ||
1212 | prot_tree_del(ubi, e->pnum); | ||
1213 | } | ||
1214 | spin_unlock(&ubi->wl_lock); | ||
1215 | |||
1216 | err = schedule_erase(ubi, e, torture); | ||
1217 | if (err) { | ||
1218 | spin_lock(&ubi->wl_lock); | ||
1219 | used_tree_add(ubi, e); | ||
1220 | spin_unlock(&ubi->wl_lock); | ||
1221 | } | ||
1222 | |||
1223 | return err; | ||
1224 | } | ||
1225 | |||
1226 | /** | ||
1227 | * ubi_wl_scrub_peb - schedule a physical eraseblock for scrubbing. | ||
1228 | * @ubi: UBI device description object | ||
1229 | * @pnum: the physical eraseblock to schedule | ||
1230 | * | ||
1231 | * If a bit-flip in a physical eraseblock is detected, this physical eraseblock | ||
1232 | * needs scrubbing. This function schedules a physical eraseblock for | ||
1233 | * scrubbing which is done in background. This function returns zero in case of | ||
1234 | * success and a negative error code in case of failure. | ||
1235 | */ | ||
1236 | int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum) | ||
1237 | { | ||
1238 | struct ubi_wl_entry *e; | ||
1239 | |||
1240 | ubi_msg("schedule PEB %d for scrubbing", pnum); | ||
1241 | |||
1242 | retry: | ||
1243 | spin_lock(&ubi->wl_lock); | ||
1244 | e = ubi->lookuptbl[pnum]; | ||
1245 | if (e == ubi->move_from || in_wl_tree(e, &ubi->scrub)) { | ||
1246 | spin_unlock(&ubi->wl_lock); | ||
1247 | return 0; | ||
1248 | } | ||
1249 | |||
1250 | if (e == ubi->move_to) { | ||
1251 | /* | ||
1252 | * This physical eraseblock was used to move data to. The data | ||
1253 | * was moved but the PEB was not yet inserted to the proper | ||
1254 | * tree. We should just wait a little and let the WL worker | ||
1255 | * proceed. | ||
1256 | */ | ||
1257 | spin_unlock(&ubi->wl_lock); | ||
1258 | dbg_wl("the PEB %d is not in proper tree, retry", pnum); | ||
1259 | yield(); | ||
1260 | goto retry; | ||
1261 | } | ||
1262 | |||
1263 | if (in_wl_tree(e, &ubi->used)) | ||
1264 | used_tree_del(ubi, e); | ||
1265 | else | ||
1266 | prot_tree_del(ubi, pnum); | ||
1267 | |||
1268 | scrub_tree_add(ubi, e); | ||
1269 | spin_unlock(&ubi->wl_lock); | ||
1270 | |||
1271 | /* | ||
1272 | * Technically scrubbing is the same as wear-leveling, so it is done | ||
1273 | * by the WL worker. | ||
1274 | */ | ||
1275 | return ensure_wear_leveling(ubi); | ||
1276 | } | ||
1277 | |||
1278 | /** | ||
1279 | * ubi_wl_flush - flush all pending works. | ||
1280 | * @ubi: UBI device description object | ||
1281 | * | ||
1282 | * This function returns zero in case of success and a negative error code in | ||
1283 | * case of failure. | ||
1284 | */ | ||
1285 | int ubi_wl_flush(struct ubi_device *ubi) | ||
1286 | { | ||
1287 | int err, pending_count; | ||
1288 | |||
1289 | pending_count = ubi->works_count; | ||
1290 | |||
1291 | dbg_wl("flush (%d pending works)", pending_count); | ||
1292 | |||
1293 | /* | ||
1294 | * Erase while the pending works queue is not empty, but not more then | ||
1295 | * the number of currently pending works. | ||
1296 | */ | ||
1297 | while (pending_count-- > 0) { | ||
1298 | err = do_work(ubi); | ||
1299 | if (err) | ||
1300 | return err; | ||
1301 | } | ||
1302 | |||
1303 | return 0; | ||
1304 | } | ||
1305 | |||
1306 | /** | ||
1307 | * tree_destroy - destroy an RB-tree. | ||
1308 | * @root: the root of the tree to destroy | ||
1309 | */ | ||
1310 | static void tree_destroy(struct rb_root *root) | ||
1311 | { | ||
1312 | struct rb_node *rb; | ||
1313 | struct ubi_wl_entry *e; | ||
1314 | |||
1315 | rb = root->rb_node; | ||
1316 | while (rb) { | ||
1317 | if (rb->rb_left) | ||
1318 | rb = rb->rb_left; | ||
1319 | else if (rb->rb_right) | ||
1320 | rb = rb->rb_right; | ||
1321 | else { | ||
1322 | e = rb_entry(rb, struct ubi_wl_entry, rb); | ||
1323 | |||
1324 | rb = rb_parent(rb); | ||
1325 | if (rb) { | ||
1326 | if (rb->rb_left == &e->rb) | ||
1327 | rb->rb_left = NULL; | ||
1328 | else | ||
1329 | rb->rb_right = NULL; | ||
1330 | } | ||
1331 | |||
1332 | kmem_cache_free(wl_entries_slab, e); | ||
1333 | } | ||
1334 | } | ||
1335 | } | ||
1336 | |||
1337 | /** | ||
1338 | * ubi_thread - UBI background thread. | ||
1339 | * @u: the UBI device description object pointer | ||
1340 | */ | ||
1341 | static int ubi_thread(void *u) | ||
1342 | { | ||
1343 | int failures = 0; | ||
1344 | struct ubi_device *ubi = u; | ||
1345 | |||
1346 | ubi_msg("background thread \"%s\" started, PID %d", | ||
1347 | ubi->bgt_name, current->pid); | ||
1348 | |||
1349 | for (;;) { | ||
1350 | int err; | ||
1351 | |||
1352 | if (kthread_should_stop()) | ||
1353 | goto out; | ||
1354 | |||
1355 | if (try_to_freeze()) | ||
1356 | continue; | ||
1357 | |||
1358 | spin_lock(&ubi->wl_lock); | ||
1359 | if (list_empty(&ubi->works) || ubi->ro_mode || | ||
1360 | !ubi->thread_enabled) { | ||
1361 | set_current_state(TASK_INTERRUPTIBLE); | ||
1362 | spin_unlock(&ubi->wl_lock); | ||
1363 | schedule(); | ||
1364 | continue; | ||
1365 | } | ||
1366 | spin_unlock(&ubi->wl_lock); | ||
1367 | |||
1368 | err = do_work(ubi); | ||
1369 | if (err) { | ||
1370 | ubi_err("%s: work failed with error code %d", | ||
1371 | ubi->bgt_name, err); | ||
1372 | if (failures++ > WL_MAX_FAILURES) { | ||
1373 | /* | ||
1374 | * Too many failures, disable the thread and | ||
1375 | * switch to read-only mode. | ||
1376 | */ | ||
1377 | ubi_msg("%s: %d consecutive failures", | ||
1378 | ubi->bgt_name, WL_MAX_FAILURES); | ||
1379 | ubi_ro_mode(ubi); | ||
1380 | break; | ||
1381 | } | ||
1382 | } else | ||
1383 | failures = 0; | ||
1384 | |||
1385 | cond_resched(); | ||
1386 | } | ||
1387 | |||
1388 | out: | ||
1389 | dbg_wl("background thread \"%s\" is killed", ubi->bgt_name); | ||
1390 | return 0; | ||
1391 | } | ||
1392 | |||
1393 | /** | ||
1394 | * cancel_pending - cancel all pending works. | ||
1395 | * @ubi: UBI device description object | ||
1396 | */ | ||
1397 | static void cancel_pending(struct ubi_device *ubi) | ||
1398 | { | ||
1399 | while (!list_empty(&ubi->works)) { | ||
1400 | struct ubi_work *wrk; | ||
1401 | |||
1402 | wrk = list_entry(ubi->works.next, struct ubi_work, list); | ||
1403 | list_del(&wrk->list); | ||
1404 | wrk->func(ubi, wrk, 1); | ||
1405 | ubi->works_count -= 1; | ||
1406 | ubi_assert(ubi->works_count >= 0); | ||
1407 | } | ||
1408 | } | ||
1409 | |||
1410 | /** | ||
1411 | * ubi_wl_init_scan - initialize the wear-leveling unit using scanning | ||
1412 | * information. | ||
1413 | * @ubi: UBI device description object | ||
1414 | * @si: scanning information | ||
1415 | * | ||
1416 | * This function returns zero in case of success, and a negative error code in | ||
1417 | * case of failure. | ||
1418 | */ | ||
1419 | int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si) | ||
1420 | { | ||
1421 | int err; | ||
1422 | struct rb_node *rb1, *rb2; | ||
1423 | struct ubi_scan_volume *sv; | ||
1424 | struct ubi_scan_leb *seb, *tmp; | ||
1425 | struct ubi_wl_entry *e; | ||
1426 | |||
1427 | |||
1428 | ubi->used = ubi->free = ubi->scrub = RB_ROOT; | ||
1429 | ubi->prot.pnum = ubi->prot.aec = RB_ROOT; | ||
1430 | spin_lock_init(&ubi->wl_lock); | ||
1431 | ubi->max_ec = si->max_ec; | ||
1432 | INIT_LIST_HEAD(&ubi->works); | ||
1433 | |||
1434 | sprintf(ubi->bgt_name, UBI_BGT_NAME_PATTERN, ubi->ubi_num); | ||
1435 | |||
1436 | ubi->bgt_thread = kthread_create(ubi_thread, ubi, ubi->bgt_name); | ||
1437 | if (IS_ERR(ubi->bgt_thread)) { | ||
1438 | err = PTR_ERR(ubi->bgt_thread); | ||
1439 | ubi_err("cannot spawn \"%s\", error %d", ubi->bgt_name, | ||
1440 | err); | ||
1441 | return err; | ||
1442 | } | ||
1443 | |||
1444 | if (ubi_devices_cnt == 0) { | ||
1445 | wl_entries_slab = kmem_cache_create("ubi_wl_entry_slab", | ||
1446 | sizeof(struct ubi_wl_entry), | ||
1447 | 0, 0, NULL, NULL); | ||
1448 | if (!wl_entries_slab) | ||
1449 | return -ENOMEM; | ||
1450 | } | ||
1451 | |||
1452 | err = -ENOMEM; | ||
1453 | ubi->lookuptbl = kzalloc(ubi->peb_count * sizeof(void *), GFP_KERNEL); | ||
1454 | if (!ubi->lookuptbl) | ||
1455 | goto out_free; | ||
1456 | |||
1457 | list_for_each_entry_safe(seb, tmp, &si->erase, u.list) { | ||
1458 | cond_resched(); | ||
1459 | |||
1460 | e = kmem_cache_alloc(wl_entries_slab, GFP_KERNEL); | ||
1461 | if (!e) | ||
1462 | goto out_free; | ||
1463 | |||
1464 | e->pnum = seb->pnum; | ||
1465 | e->ec = seb->ec; | ||
1466 | ubi->lookuptbl[e->pnum] = e; | ||
1467 | if (schedule_erase(ubi, e, 0)) { | ||
1468 | kmem_cache_free(wl_entries_slab, e); | ||
1469 | goto out_free; | ||
1470 | } | ||
1471 | } | ||
1472 | |||
1473 | list_for_each_entry(seb, &si->free, u.list) { | ||
1474 | cond_resched(); | ||
1475 | |||
1476 | e = kmem_cache_alloc(wl_entries_slab, GFP_KERNEL); | ||
1477 | if (!e) | ||
1478 | goto out_free; | ||
1479 | |||
1480 | e->pnum = seb->pnum; | ||
1481 | e->ec = seb->ec; | ||
1482 | ubi_assert(e->ec >= 0); | ||
1483 | free_tree_add(ubi, e); | ||
1484 | ubi->lookuptbl[e->pnum] = e; | ||
1485 | } | ||
1486 | |||
1487 | list_for_each_entry(seb, &si->corr, u.list) { | ||
1488 | cond_resched(); | ||
1489 | |||
1490 | e = kmem_cache_alloc(wl_entries_slab, GFP_KERNEL); | ||
1491 | if (!e) | ||
1492 | goto out_free; | ||
1493 | |||
1494 | e->pnum = seb->pnum; | ||
1495 | e->ec = seb->ec; | ||
1496 | ubi->lookuptbl[e->pnum] = e; | ||
1497 | if (schedule_erase(ubi, e, 0)) { | ||
1498 | kmem_cache_free(wl_entries_slab, e); | ||
1499 | goto out_free; | ||
1500 | } | ||
1501 | } | ||
1502 | |||
1503 | ubi_rb_for_each_entry(rb1, sv, &si->volumes, rb) { | ||
1504 | ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb) { | ||
1505 | cond_resched(); | ||
1506 | |||
1507 | e = kmem_cache_alloc(wl_entries_slab, GFP_KERNEL); | ||
1508 | if (!e) | ||
1509 | goto out_free; | ||
1510 | |||
1511 | e->pnum = seb->pnum; | ||
1512 | e->ec = seb->ec; | ||
1513 | ubi->lookuptbl[e->pnum] = e; | ||
1514 | if (!seb->scrub) { | ||
1515 | dbg_wl("add PEB %d EC %d to the used tree", | ||
1516 | e->pnum, e->ec); | ||
1517 | used_tree_add(ubi, e); | ||
1518 | } else { | ||
1519 | dbg_wl("add PEB %d EC %d to the scrub tree", | ||
1520 | e->pnum, e->ec); | ||
1521 | scrub_tree_add(ubi, e); | ||
1522 | } | ||
1523 | } | ||
1524 | } | ||
1525 | |||
1526 | if (WL_RESERVED_PEBS > ubi->avail_pebs) { | ||
1527 | ubi_err("no enough physical eraseblocks (%d, need %d)", | ||
1528 | ubi->avail_pebs, WL_RESERVED_PEBS); | ||
1529 | goto out_free; | ||
1530 | } | ||
1531 | ubi->avail_pebs -= WL_RESERVED_PEBS; | ||
1532 | ubi->rsvd_pebs += WL_RESERVED_PEBS; | ||
1533 | |||
1534 | /* Schedule wear-leveling if needed */ | ||
1535 | err = ensure_wear_leveling(ubi); | ||
1536 | if (err) | ||
1537 | goto out_free; | ||
1538 | |||
1539 | return 0; | ||
1540 | |||
1541 | out_free: | ||
1542 | cancel_pending(ubi); | ||
1543 | tree_destroy(&ubi->used); | ||
1544 | tree_destroy(&ubi->free); | ||
1545 | tree_destroy(&ubi->scrub); | ||
1546 | kfree(ubi->lookuptbl); | ||
1547 | if (ubi_devices_cnt == 0) | ||
1548 | kmem_cache_destroy(wl_entries_slab); | ||
1549 | return err; | ||
1550 | } | ||
1551 | |||
1552 | /** | ||
1553 | * protection_trees_destroy - destroy the protection RB-trees. | ||
1554 | * @ubi: UBI device description object | ||
1555 | */ | ||
1556 | static void protection_trees_destroy(struct ubi_device *ubi) | ||
1557 | { | ||
1558 | struct rb_node *rb; | ||
1559 | struct ubi_wl_prot_entry *pe; | ||
1560 | |||
1561 | rb = ubi->prot.aec.rb_node; | ||
1562 | while (rb) { | ||
1563 | if (rb->rb_left) | ||
1564 | rb = rb->rb_left; | ||
1565 | else if (rb->rb_right) | ||
1566 | rb = rb->rb_right; | ||
1567 | else { | ||
1568 | pe = rb_entry(rb, struct ubi_wl_prot_entry, rb_aec); | ||
1569 | |||
1570 | rb = rb_parent(rb); | ||
1571 | if (rb) { | ||
1572 | if (rb->rb_left == &pe->rb_aec) | ||
1573 | rb->rb_left = NULL; | ||
1574 | else | ||
1575 | rb->rb_right = NULL; | ||
1576 | } | ||
1577 | |||
1578 | kmem_cache_free(wl_entries_slab, pe->e); | ||
1579 | kfree(pe); | ||
1580 | } | ||
1581 | } | ||
1582 | } | ||
1583 | |||
1584 | /** | ||
1585 | * ubi_wl_close - close the wear-leveling unit. | ||
1586 | * @ubi: UBI device description object | ||
1587 | */ | ||
1588 | void ubi_wl_close(struct ubi_device *ubi) | ||
1589 | { | ||
1590 | dbg_wl("disable \"%s\"", ubi->bgt_name); | ||
1591 | if (ubi->bgt_thread) | ||
1592 | kthread_stop(ubi->bgt_thread); | ||
1593 | |||
1594 | dbg_wl("close the UBI wear-leveling unit"); | ||
1595 | |||
1596 | cancel_pending(ubi); | ||
1597 | protection_trees_destroy(ubi); | ||
1598 | tree_destroy(&ubi->used); | ||
1599 | tree_destroy(&ubi->free); | ||
1600 | tree_destroy(&ubi->scrub); | ||
1601 | kfree(ubi->lookuptbl); | ||
1602 | if (ubi_devices_cnt == 1) | ||
1603 | kmem_cache_destroy(wl_entries_slab); | ||
1604 | } | ||
1605 | |||
1606 | #ifdef CONFIG_MTD_UBI_DEBUG_PARANOID | ||
1607 | |||
1608 | /** | ||
1609 | * paranoid_check_ec - make sure that the erase counter of a physical eraseblock | ||
1610 | * is correct. | ||
1611 | * @ubi: UBI device description object | ||
1612 | * @pnum: the physical eraseblock number to check | ||
1613 | * @ec: the erase counter to check | ||
1614 | * | ||
1615 | * This function returns zero if the erase counter of physical eraseblock @pnum | ||
1616 | * is equivalent to @ec, %1 if not, and a negative error code if an error | ||
1617 | * occurred. | ||
1618 | */ | ||
1619 | static int paranoid_check_ec(const struct ubi_device *ubi, int pnum, int ec) | ||
1620 | { | ||
1621 | int err; | ||
1622 | long long read_ec; | ||
1623 | struct ubi_ec_hdr *ec_hdr; | ||
1624 | |||
1625 | ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL); | ||
1626 | if (!ec_hdr) | ||
1627 | return -ENOMEM; | ||
1628 | |||
1629 | err = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0); | ||
1630 | if (err && err != UBI_IO_BITFLIPS) { | ||
1631 | /* The header does not have to exist */ | ||
1632 | err = 0; | ||
1633 | goto out_free; | ||
1634 | } | ||
1635 | |||
1636 | read_ec = ubi64_to_cpu(ec_hdr->ec); | ||
1637 | if (ec != read_ec) { | ||
1638 | ubi_err("paranoid check failed for PEB %d", pnum); | ||
1639 | ubi_err("read EC is %lld, should be %d", read_ec, ec); | ||
1640 | ubi_dbg_dump_stack(); | ||
1641 | err = 1; | ||
1642 | } else | ||
1643 | err = 0; | ||
1644 | |||
1645 | out_free: | ||
1646 | kfree(ec_hdr); | ||
1647 | return err; | ||
1648 | } | ||
1649 | |||
1650 | /** | ||
1651 | * paranoid_check_in_wl_tree - make sure that a wear-leveling entry is present | ||
1652 | * in a WL RB-tree. | ||
1653 | * @e: the wear-leveling entry to check | ||
1654 | * @root: the root of the tree | ||
1655 | * | ||
1656 | * This function returns zero if @e is in the @root RB-tree and %1 if it | ||
1657 | * is not. | ||
1658 | */ | ||
1659 | static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e, | ||
1660 | struct rb_root *root) | ||
1661 | { | ||
1662 | if (in_wl_tree(e, root)) | ||
1663 | return 0; | ||
1664 | |||
1665 | ubi_err("paranoid check failed for PEB %d, EC %d, RB-tree %p ", | ||
1666 | e->pnum, e->ec, root); | ||
1667 | ubi_dbg_dump_stack(); | ||
1668 | return 1; | ||
1669 | } | ||
1670 | |||
1671 | #endif /* CONFIG_MTD_UBI_DEBUG_PARANOID */ | ||
diff --git a/drivers/net/3c501.c b/drivers/net/3c501.c index 06e33786078d..4bee99ba7dbb 100644 --- a/drivers/net/3c501.c +++ b/drivers/net/3c501.c | |||
@@ -735,7 +735,6 @@ static void el_receive(struct net_device *dev) | |||
735 | else | 735 | else |
736 | { | 736 | { |
737 | skb_reserve(skb,2); /* Force 16 byte alignment */ | 737 | skb_reserve(skb,2); /* Force 16 byte alignment */ |
738 | skb->dev = dev; | ||
739 | /* | 738 | /* |
740 | * The read increments through the bytes. The interrupt | 739 | * The read increments through the bytes. The interrupt |
741 | * handler will fix the pointer when it returns to | 740 | * handler will fix the pointer when it returns to |
diff --git a/drivers/net/3c505.c b/drivers/net/3c505.c index 702bfb2a5e99..e985a85a5623 100644 --- a/drivers/net/3c505.c +++ b/drivers/net/3c505.c | |||
@@ -615,7 +615,6 @@ static void receive_packet(struct net_device *dev, int len) | |||
615 | if (test_and_set_bit(0, (void *) &adapter->dmaing)) | 615 | if (test_and_set_bit(0, (void *) &adapter->dmaing)) |
616 | printk(KERN_ERR "%s: rx blocked, DMA in progress, dir %d\n", dev->name, adapter->current_dma.direction); | 616 | printk(KERN_ERR "%s: rx blocked, DMA in progress, dir %d\n", dev->name, adapter->current_dma.direction); |
617 | 617 | ||
618 | skb->dev = dev; | ||
619 | adapter->current_dma.direction = 0; | 618 | adapter->current_dma.direction = 0; |
620 | adapter->current_dma.length = rlen; | 619 | adapter->current_dma.length = rlen; |
621 | adapter->current_dma.skb = skb; | 620 | adapter->current_dma.skb = skb; |
@@ -1026,7 +1025,7 @@ static int send_packet(struct net_device *dev, struct sk_buff *skb) | |||
1026 | adapter->current_dma.start_time = jiffies; | 1025 | adapter->current_dma.start_time = jiffies; |
1027 | 1026 | ||
1028 | if ((unsigned long)(skb->data + nlen) >= MAX_DMA_ADDRESS || nlen != skb->len) { | 1027 | if ((unsigned long)(skb->data + nlen) >= MAX_DMA_ADDRESS || nlen != skb->len) { |
1029 | memcpy(adapter->dma_buffer, skb->data, nlen); | 1028 | skb_copy_from_linear_data(skb, adapter->dma_buffer, nlen); |
1030 | memset(adapter->dma_buffer+skb->len, 0, nlen-skb->len); | 1029 | memset(adapter->dma_buffer+skb->len, 0, nlen-skb->len); |
1031 | target = isa_virt_to_bus(adapter->dma_buffer); | 1030 | target = isa_virt_to_bus(adapter->dma_buffer); |
1032 | } | 1031 | } |
diff --git a/drivers/net/3c507.c b/drivers/net/3c507.c index 54e1d5aebed3..eed4299dc426 100644 --- a/drivers/net/3c507.c +++ b/drivers/net/3c507.c | |||
@@ -873,7 +873,6 @@ static void el16_rx(struct net_device *dev) | |||
873 | } | 873 | } |
874 | 874 | ||
875 | skb_reserve(skb,2); | 875 | skb_reserve(skb,2); |
876 | skb->dev = dev; | ||
877 | 876 | ||
878 | /* 'skb->data' points to the start of sk_buff data area. */ | 877 | /* 'skb->data' points to the start of sk_buff data area. */ |
879 | memcpy_fromio(skb_put(skb,pkt_len), data_frame + 10, pkt_len); | 878 | memcpy_fromio(skb_put(skb,pkt_len), data_frame + 10, pkt_len); |
diff --git a/drivers/net/3c509.c b/drivers/net/3c509.c index f791bf026e51..c7511c4d3b68 100644 --- a/drivers/net/3c509.c +++ b/drivers/net/3c509.c | |||
@@ -1091,7 +1091,6 @@ el3_rx(struct net_device *dev) | |||
1091 | printk("Receiving packet size %d status %4.4x.\n", | 1091 | printk("Receiving packet size %d status %4.4x.\n", |
1092 | pkt_len, rx_status); | 1092 | pkt_len, rx_status); |
1093 | if (skb != NULL) { | 1093 | if (skb != NULL) { |
1094 | skb->dev = dev; | ||
1095 | skb_reserve(skb, 2); /* Align IP on 16 byte */ | 1094 | skb_reserve(skb, 2); /* Align IP on 16 byte */ |
1096 | 1095 | ||
1097 | /* 'skb->data' points to the start of sk_buff data area. */ | 1096 | /* 'skb->data' points to the start of sk_buff data area. */ |
diff --git a/drivers/net/3c515.c b/drivers/net/3c515.c index c307ce66145c..290166d5e7d1 100644 --- a/drivers/net/3c515.c +++ b/drivers/net/3c515.c | |||
@@ -1292,7 +1292,6 @@ static int corkscrew_rx(struct net_device *dev) | |||
1292 | printk("Receiving packet size %d status %4.4x.\n", | 1292 | printk("Receiving packet size %d status %4.4x.\n", |
1293 | pkt_len, rx_status); | 1293 | pkt_len, rx_status); |
1294 | if (skb != NULL) { | 1294 | if (skb != NULL) { |
1295 | skb->dev = dev; | ||
1296 | skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ | 1295 | skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ |
1297 | /* 'skb_put()' points to the start of sk_buff data area. */ | 1296 | /* 'skb_put()' points to the start of sk_buff data area. */ |
1298 | insl(ioaddr + RX_FIFO, | 1297 | insl(ioaddr + RX_FIFO, |
@@ -1363,7 +1362,6 @@ static int boomerang_rx(struct net_device *dev) | |||
1363 | copying to a properly sized skbuff. */ | 1362 | copying to a properly sized skbuff. */ |
1364 | if (pkt_len < rx_copybreak | 1363 | if (pkt_len < rx_copybreak |
1365 | && (skb = dev_alloc_skb(pkt_len + 4)) != 0) { | 1364 | && (skb = dev_alloc_skb(pkt_len + 4)) != 0) { |
1366 | skb->dev = dev; | ||
1367 | skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ | 1365 | skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ |
1368 | /* 'skb_put()' points to the start of sk_buff data area. */ | 1366 | /* 'skb_put()' points to the start of sk_buff data area. */ |
1369 | memcpy(skb_put(skb, pkt_len), | 1367 | memcpy(skb_put(skb, pkt_len), |
diff --git a/drivers/net/3c523.c b/drivers/net/3c523.c index 17d61eb0a7e5..da1a22c13865 100644 --- a/drivers/net/3c523.c +++ b/drivers/net/3c523.c | |||
@@ -988,7 +988,6 @@ static void elmc_rcv_int(struct net_device *dev) | |||
988 | rbd->status = 0; | 988 | rbd->status = 0; |
989 | skb = (struct sk_buff *) dev_alloc_skb(totlen + 2); | 989 | skb = (struct sk_buff *) dev_alloc_skb(totlen + 2); |
990 | if (skb != NULL) { | 990 | if (skb != NULL) { |
991 | skb->dev = dev; | ||
992 | skb_reserve(skb, 2); /* 16 byte alignment */ | 991 | skb_reserve(skb, 2); /* 16 byte alignment */ |
993 | skb_put(skb,totlen); | 992 | skb_put(skb,totlen); |
994 | eth_copy_and_sum(skb, (char *) p->base+(unsigned long) rbd->buffer,totlen,0); | 993 | eth_copy_and_sum(skb, (char *) p->base+(unsigned long) rbd->buffer,totlen,0); |
@@ -1146,7 +1145,7 @@ static int elmc_send_packet(struct sk_buff *skb, struct net_device *dev) | |||
1146 | 1145 | ||
1147 | if (len != skb->len) | 1146 | if (len != skb->len) |
1148 | memset((char *) p->xmit_cbuffs[p->xmit_count], 0, ETH_ZLEN); | 1147 | memset((char *) p->xmit_cbuffs[p->xmit_count], 0, ETH_ZLEN); |
1149 | memcpy((char *) p->xmit_cbuffs[p->xmit_count], (char *) (skb->data), skb->len); | 1148 | skb_copy_from_linear_data(skb, (char *) p->xmit_cbuffs[p->xmit_count], skb->len); |
1150 | 1149 | ||
1151 | #if (NUM_XMIT_BUFFS == 1) | 1150 | #if (NUM_XMIT_BUFFS == 1) |
1152 | #ifdef NO_NOPCOMMANDS | 1151 | #ifdef NO_NOPCOMMANDS |
diff --git a/drivers/net/3c527.c b/drivers/net/3c527.c index 6c7437e60bd2..c7b571be20e0 100644 --- a/drivers/net/3c527.c +++ b/drivers/net/3c527.c | |||
@@ -1189,7 +1189,6 @@ static void mc32_rx_ring(struct net_device *dev) | |||
1189 | } | 1189 | } |
1190 | 1190 | ||
1191 | skb->protocol=eth_type_trans(skb,dev); | 1191 | skb->protocol=eth_type_trans(skb,dev); |
1192 | skb->dev=dev; | ||
1193 | dev->last_rx = jiffies; | 1192 | dev->last_rx = jiffies; |
1194 | lp->net_stats.rx_packets++; | 1193 | lp->net_stats.rx_packets++; |
1195 | lp->net_stats.rx_bytes += length; | 1194 | lp->net_stats.rx_bytes += length; |
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c index b406ecfa7268..80924f76dee8 100644 --- a/drivers/net/3c59x.c +++ b/drivers/net/3c59x.c | |||
@@ -2414,7 +2414,6 @@ static int vortex_rx(struct net_device *dev) | |||
2414 | printk(KERN_DEBUG "Receiving packet size %d status %4.4x.\n", | 2414 | printk(KERN_DEBUG "Receiving packet size %d status %4.4x.\n", |
2415 | pkt_len, rx_status); | 2415 | pkt_len, rx_status); |
2416 | if (skb != NULL) { | 2416 | if (skb != NULL) { |
2417 | skb->dev = dev; | ||
2418 | skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ | 2417 | skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ |
2419 | /* 'skb_put()' points to the start of sk_buff data area. */ | 2418 | /* 'skb_put()' points to the start of sk_buff data area. */ |
2420 | if (vp->bus_master && | 2419 | if (vp->bus_master && |
@@ -2491,7 +2490,6 @@ boomerang_rx(struct net_device *dev) | |||
2491 | /* Check if the packet is long enough to just accept without | 2490 | /* Check if the packet is long enough to just accept without |
2492 | copying to a properly sized skbuff. */ | 2491 | copying to a properly sized skbuff. */ |
2493 | if (pkt_len < rx_copybreak && (skb = dev_alloc_skb(pkt_len + 2)) != 0) { | 2492 | if (pkt_len < rx_copybreak && (skb = dev_alloc_skb(pkt_len + 2)) != 0) { |
2494 | skb->dev = dev; | ||
2495 | skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ | 2493 | skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ |
2496 | pci_dma_sync_single_for_cpu(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE); | 2494 | pci_dma_sync_single_for_cpu(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE); |
2497 | /* 'skb_put()' points to the start of sk_buff data area. */ | 2495 | /* 'skb_put()' points to the start of sk_buff data area. */ |
diff --git a/drivers/net/7990.c b/drivers/net/7990.c index 1b3d11ed6cff..d396f996af57 100644 --- a/drivers/net/7990.c +++ b/drivers/net/7990.c | |||
@@ -331,7 +331,6 @@ static int lance_rx (struct net_device *dev) | |||
331 | return 0; | 331 | return 0; |
332 | } | 332 | } |
333 | 333 | ||
334 | skb->dev = dev; | ||
335 | skb_reserve (skb, 2); /* 16 byte align */ | 334 | skb_reserve (skb, 2); /* 16 byte align */ |
336 | skb_put (skb, len); /* make room */ | 335 | skb_put (skb, len); /* make room */ |
337 | eth_copy_and_sum(skb, | 336 | eth_copy_and_sum(skb, |
@@ -568,7 +567,7 @@ int lance_start_xmit (struct sk_buff *skb, struct net_device *dev) | |||
568 | 567 | ||
569 | if (skb->len < ETH_ZLEN) | 568 | if (skb->len < ETH_ZLEN) |
570 | memset((char *)&ib->tx_buf[entry][0], 0, ETH_ZLEN); | 569 | memset((char *)&ib->tx_buf[entry][0], 0, ETH_ZLEN); |
571 | memcpy ((char *)&ib->tx_buf [entry][0], skb->data, skblen); | 570 | skb_copy_from_linear_data(skb, &ib->tx_buf[entry][0], skblen); |
572 | 571 | ||
573 | /* Now, give the packet to the lance */ | 572 | /* Now, give the packet to the lance */ |
574 | ib->btx_ring [entry].tmd1_bits = (LE_T1_POK|LE_T1_OWN); | 573 | ib->btx_ring [entry].tmd1_bits = (LE_T1_POK|LE_T1_OWN); |
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c index 12c8453f44bc..e8c9f27817b0 100644 --- a/drivers/net/8139cp.c +++ b/drivers/net/8139cp.c | |||
@@ -573,7 +573,6 @@ rx_status_loop: | |||
573 | } | 573 | } |
574 | 574 | ||
575 | skb_reserve(new_skb, RX_OFFSET); | 575 | skb_reserve(new_skb, RX_OFFSET); |
576 | new_skb->dev = dev; | ||
577 | 576 | ||
578 | pci_unmap_single(cp->pdev, mapping, | 577 | pci_unmap_single(cp->pdev, mapping, |
579 | buflen, PCI_DMA_FROMDEVICE); | 578 | buflen, PCI_DMA_FROMDEVICE); |
@@ -807,7 +806,7 @@ static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev) | |||
807 | if (mss) | 806 | if (mss) |
808 | flags |= LargeSend | ((mss & MSSMask) << MSSShift); | 807 | flags |= LargeSend | ((mss & MSSMask) << MSSShift); |
809 | else if (skb->ip_summed == CHECKSUM_PARTIAL) { | 808 | else if (skb->ip_summed == CHECKSUM_PARTIAL) { |
810 | const struct iphdr *ip = skb->nh.iph; | 809 | const struct iphdr *ip = ip_hdr(skb); |
811 | if (ip->protocol == IPPROTO_TCP) | 810 | if (ip->protocol == IPPROTO_TCP) |
812 | flags |= IPCS | TCPCS; | 811 | flags |= IPCS | TCPCS; |
813 | else if (ip->protocol == IPPROTO_UDP) | 812 | else if (ip->protocol == IPPROTO_UDP) |
@@ -826,7 +825,7 @@ static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev) | |||
826 | u32 first_len, first_eor; | 825 | u32 first_len, first_eor; |
827 | dma_addr_t first_mapping; | 826 | dma_addr_t first_mapping; |
828 | int frag, first_entry = entry; | 827 | int frag, first_entry = entry; |
829 | const struct iphdr *ip = skb->nh.iph; | 828 | const struct iphdr *ip = ip_hdr(skb); |
830 | 829 | ||
831 | /* We must give this initial chunk to the device last. | 830 | /* We must give this initial chunk to the device last. |
832 | * Otherwise we could race with the device. | 831 | * Otherwise we could race with the device. |
@@ -1082,7 +1081,6 @@ static int cp_refill_rx (struct cp_private *cp) | |||
1082 | if (!skb) | 1081 | if (!skb) |
1083 | goto err_out; | 1082 | goto err_out; |
1084 | 1083 | ||
1085 | skb->dev = cp->dev; | ||
1086 | skb_reserve(skb, RX_OFFSET); | 1084 | skb_reserve(skb, RX_OFFSET); |
1087 | 1085 | ||
1088 | mapping = pci_map_single(cp->pdev, skb->data, cp->rx_buf_sz, | 1086 | mapping = pci_map_single(cp->pdev, skb->data, cp->rx_buf_sz, |
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c index 99304b2aa86e..a844b1fe2dc4 100644 --- a/drivers/net/8139too.c +++ b/drivers/net/8139too.c | |||
@@ -1904,10 +1904,10 @@ static __inline__ void wrap_copy(struct sk_buff *skb, const unsigned char *ring, | |||
1904 | u32 left = RX_BUF_LEN - offset; | 1904 | u32 left = RX_BUF_LEN - offset; |
1905 | 1905 | ||
1906 | if (size > left) { | 1906 | if (size > left) { |
1907 | memcpy(skb->data, ring + offset, left); | 1907 | skb_copy_to_linear_data(skb, ring + offset, left); |
1908 | memcpy(skb->data+left, ring, size - left); | 1908 | skb_copy_to_linear_data_offset(skb, left, ring, size - left); |
1909 | } else | 1909 | } else |
1910 | memcpy(skb->data, ring + offset, size); | 1910 | skb_copy_to_linear_data(skb, ring + offset, size); |
1911 | } | 1911 | } |
1912 | #endif | 1912 | #endif |
1913 | 1913 | ||
@@ -2013,7 +2013,6 @@ no_early_rx: | |||
2013 | 2013 | ||
2014 | skb = dev_alloc_skb (pkt_size + 2); | 2014 | skb = dev_alloc_skb (pkt_size + 2); |
2015 | if (likely(skb)) { | 2015 | if (likely(skb)) { |
2016 | skb->dev = dev; | ||
2017 | skb_reserve (skb, 2); /* 16 byte align the IP fields. */ | 2016 | skb_reserve (skb, 2); /* 16 byte align the IP fields. */ |
2018 | #if RX_BUF_IDX == 3 | 2017 | #if RX_BUF_IDX == 3 |
2019 | wrap_copy(skb, rx_ring, ring_offset+4, pkt_size); | 2018 | wrap_copy(skb, rx_ring, ring_offset+4, pkt_size); |
diff --git a/drivers/net/82596.c b/drivers/net/82596.c index 640d7ca2ebcf..3ff1155459a3 100644 --- a/drivers/net/82596.c +++ b/drivers/net/82596.c | |||
@@ -830,7 +830,6 @@ memory_squeeze: | |||
830 | lp->stats.rx_dropped++; | 830 | lp->stats.rx_dropped++; |
831 | } | 831 | } |
832 | else { | 832 | else { |
833 | skb->dev = dev; | ||
834 | if (!rx_in_place) { | 833 | if (!rx_in_place) { |
835 | /* 16 byte align the data fields */ | 834 | /* 16 byte align the data fields */ |
836 | skb_reserve(skb, 2); | 835 | skb_reserve(skb, 2); |
diff --git a/drivers/net/Makefile b/drivers/net/Makefile index 33af833667da..58527322a39d 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile | |||
@@ -206,7 +206,7 @@ obj-$(CONFIG_TR) += tokenring/ | |||
206 | obj-$(CONFIG_WAN) += wan/ | 206 | obj-$(CONFIG_WAN) += wan/ |
207 | obj-$(CONFIG_ARCNET) += arcnet/ | 207 | obj-$(CONFIG_ARCNET) += arcnet/ |
208 | obj-$(CONFIG_NET_PCMCIA) += pcmcia/ | 208 | obj-$(CONFIG_NET_PCMCIA) += pcmcia/ |
209 | obj-$(CONFIG_NET_RADIO) += wireless/ | 209 | obj-y += wireless/ |
210 | obj-$(CONFIG_NET_TULIP) += tulip/ | 210 | obj-$(CONFIG_NET_TULIP) += tulip/ |
211 | obj-$(CONFIG_HAMRADIO) += hamradio/ | 211 | obj-$(CONFIG_HAMRADIO) += hamradio/ |
212 | obj-$(CONFIG_IRDA) += irda/ | 212 | obj-$(CONFIG_IRDA) += irda/ |
diff --git a/drivers/net/a2065.c b/drivers/net/a2065.c index d76548e75350..1226cbba0450 100644 --- a/drivers/net/a2065.c +++ b/drivers/net/a2065.c | |||
@@ -320,7 +320,6 @@ static int lance_rx (struct net_device *dev) | |||
320 | return 0; | 320 | return 0; |
321 | } | 321 | } |
322 | 322 | ||
323 | skb->dev = dev; | ||
324 | skb_reserve (skb, 2); /* 16 byte align */ | 323 | skb_reserve (skb, 2); /* 16 byte align */ |
325 | skb_put (skb, len); /* make room */ | 324 | skb_put (skb, len); /* make room */ |
326 | eth_copy_and_sum(skb, | 325 | eth_copy_and_sum(skb, |
@@ -599,7 +598,7 @@ static int lance_start_xmit (struct sk_buff *skb, struct net_device *dev) | |||
599 | ib->btx_ring [entry].length = (-len) | 0xf000; | 598 | ib->btx_ring [entry].length = (-len) | 0xf000; |
600 | ib->btx_ring [entry].misc = 0; | 599 | ib->btx_ring [entry].misc = 0; |
601 | 600 | ||
602 | memcpy ((char *)&ib->tx_buf [entry][0], skb->data, skblen); | 601 | skb_copy_from_linear_data(skb, &ib->tx_buf [entry][0], skblen); |
603 | 602 | ||
604 | /* Clear the slack of the packet, do I need this? */ | 603 | /* Clear the slack of the packet, do I need this? */ |
605 | if (len != skblen) | 604 | if (len != skblen) |
diff --git a/drivers/net/acenic.c b/drivers/net/acenic.c index 7138e0e025bc..7122b7ba8d61 100644 --- a/drivers/net/acenic.c +++ b/drivers/net/acenic.c | |||
@@ -2027,7 +2027,6 @@ static void ace_rx_int(struct net_device *dev, u32 rxretprd, u32 rxretcsm) | |||
2027 | */ | 2027 | */ |
2028 | csum = retdesc->tcp_udp_csum; | 2028 | csum = retdesc->tcp_udp_csum; |
2029 | 2029 | ||
2030 | skb->dev = dev; | ||
2031 | skb->protocol = eth_type_trans(skb, dev); | 2030 | skb->protocol = eth_type_trans(skb, dev); |
2032 | 2031 | ||
2033 | /* | 2032 | /* |
diff --git a/drivers/net/amd8111e.c b/drivers/net/amd8111e.c index 962c954c2d56..675fe918421b 100644 --- a/drivers/net/amd8111e.c +++ b/drivers/net/amd8111e.c | |||
@@ -798,9 +798,7 @@ static int amd8111e_rx_poll(struct net_device *dev, int * budget) | |||
798 | pci_unmap_single(lp->pci_dev,lp->rx_dma_addr[rx_index], | 798 | pci_unmap_single(lp->pci_dev,lp->rx_dma_addr[rx_index], |
799 | lp->rx_buff_len-2, PCI_DMA_FROMDEVICE); | 799 | lp->rx_buff_len-2, PCI_DMA_FROMDEVICE); |
800 | skb_put(skb, pkt_len); | 800 | skb_put(skb, pkt_len); |
801 | skb->dev = dev; | ||
802 | lp->rx_skbuff[rx_index] = new_skb; | 801 | lp->rx_skbuff[rx_index] = new_skb; |
803 | new_skb->dev = dev; | ||
804 | lp->rx_dma_addr[rx_index] = pci_map_single(lp->pci_dev, | 802 | lp->rx_dma_addr[rx_index] = pci_map_single(lp->pci_dev, |
805 | new_skb->data, | 803 | new_skb->data, |
806 | lp->rx_buff_len-2, | 804 | lp->rx_buff_len-2, |
@@ -926,9 +924,7 @@ static int amd8111e_rx(struct net_device *dev) | |||
926 | pci_unmap_single(lp->pci_dev,lp->rx_dma_addr[rx_index], | 924 | pci_unmap_single(lp->pci_dev,lp->rx_dma_addr[rx_index], |
927 | lp->rx_buff_len-2, PCI_DMA_FROMDEVICE); | 925 | lp->rx_buff_len-2, PCI_DMA_FROMDEVICE); |
928 | skb_put(skb, pkt_len); | 926 | skb_put(skb, pkt_len); |
929 | skb->dev = dev; | ||
930 | lp->rx_skbuff[rx_index] = new_skb; | 927 | lp->rx_skbuff[rx_index] = new_skb; |
931 | new_skb->dev = dev; | ||
932 | lp->rx_dma_addr[rx_index] = pci_map_single(lp->pci_dev, | 928 | lp->rx_dma_addr[rx_index] = pci_map_single(lp->pci_dev, |
933 | new_skb->data, lp->rx_buff_len-2,PCI_DMA_FROMDEVICE); | 929 | new_skb->data, lp->rx_buff_len-2,PCI_DMA_FROMDEVICE); |
934 | 930 | ||
diff --git a/drivers/net/appletalk/cops.c b/drivers/net/appletalk/cops.c index dba5e5165452..da6ffa8cd81e 100644 --- a/drivers/net/appletalk/cops.c +++ b/drivers/net/appletalk/cops.c | |||
@@ -853,9 +853,9 @@ static void cops_rx(struct net_device *dev) | |||
853 | return; | 853 | return; |
854 | } | 854 | } |
855 | 855 | ||
856 | skb->mac.raw = skb->data; /* Point to entire packet. */ | 856 | skb_reset_mac_header(skb); /* Point to entire packet. */ |
857 | skb_pull(skb,3); | 857 | skb_pull(skb,3); |
858 | skb->h.raw = skb->data; /* Point to data (Skip header). */ | 858 | skb_reset_transport_header(skb); /* Point to data (Skip header). */ |
859 | 859 | ||
860 | /* Update the counters. */ | 860 | /* Update the counters. */ |
861 | lp->stats.rx_packets++; | 861 | lp->stats.rx_packets++; |
diff --git a/drivers/net/appletalk/ltpc.c b/drivers/net/appletalk/ltpc.c index 2ea44ce49810..6a6cbd331a16 100644 --- a/drivers/net/appletalk/ltpc.c +++ b/drivers/net/appletalk/ltpc.c | |||
@@ -770,13 +770,13 @@ static int sendup_buffer (struct net_device *dev) | |||
770 | skb->data[0] = dnode; | 770 | skb->data[0] = dnode; |
771 | skb->data[1] = snode; | 771 | skb->data[1] = snode; |
772 | skb->data[2] = llaptype; | 772 | skb->data[2] = llaptype; |
773 | skb->mac.raw = skb->data; /* save pointer to llap header */ | 773 | skb_reset_mac_header(skb); /* save pointer to llap header */ |
774 | skb_pull(skb,3); | 774 | skb_pull(skb,3); |
775 | 775 | ||
776 | /* copy ddp(s,e)hdr + contents */ | 776 | /* copy ddp(s,e)hdr + contents */ |
777 | memcpy(skb->data,(void*)ltdmabuf,len); | 777 | skb_copy_to_linear_data(skb, ltdmabuf, len); |
778 | 778 | ||
779 | skb->h.raw = skb->data; | 779 | skb_reset_transport_header(skb); |
780 | 780 | ||
781 | stats->rx_packets++; | 781 | stats->rx_packets++; |
782 | stats->rx_bytes+=skb->len; | 782 | stats->rx_bytes+=skb->len; |
@@ -917,13 +917,14 @@ static int ltpc_xmit(struct sk_buff *skb, struct net_device *dev) | |||
917 | 917 | ||
918 | int i; | 918 | int i; |
919 | struct lt_sendlap cbuf; | 919 | struct lt_sendlap cbuf; |
920 | unsigned char *hdr; | ||
920 | 921 | ||
921 | cbuf.command = LT_SENDLAP; | 922 | cbuf.command = LT_SENDLAP; |
922 | cbuf.dnode = skb->data[0]; | 923 | cbuf.dnode = skb->data[0]; |
923 | cbuf.laptype = skb->data[2]; | 924 | cbuf.laptype = skb->data[2]; |
924 | skb_pull(skb,3); /* skip past LLAP header */ | 925 | skb_pull(skb,3); /* skip past LLAP header */ |
925 | cbuf.length = skb->len; /* this is host order */ | 926 | cbuf.length = skb->len; /* this is host order */ |
926 | skb->h.raw=skb->data; | 927 | skb_reset_transport_header(skb); |
927 | 928 | ||
928 | if(debug & DEBUG_UPPER) { | 929 | if(debug & DEBUG_UPPER) { |
929 | printk("command "); | 930 | printk("command "); |
@@ -932,11 +933,13 @@ static int ltpc_xmit(struct sk_buff *skb, struct net_device *dev) | |||
932 | printk("\n"); | 933 | printk("\n"); |
933 | } | 934 | } |
934 | 935 | ||
935 | do_write(dev,&cbuf,sizeof(cbuf),skb->h.raw,skb->len); | 936 | hdr = skb_transport_header(skb); |
937 | do_write(dev, &cbuf, sizeof(cbuf), hdr, skb->len); | ||
936 | 938 | ||
937 | if(debug & DEBUG_UPPER) { | 939 | if(debug & DEBUG_UPPER) { |
938 | printk("sent %d ddp bytes\n",skb->len); | 940 | printk("sent %d ddp bytes\n",skb->len); |
939 | for(i=0;i<skb->len;i++) printk("%02x ",skb->h.raw[i]); | 941 | for (i = 0; i < skb->len; i++) |
942 | printk("%02x ", hdr[i]); | ||
940 | printk("\n"); | 943 | printk("\n"); |
941 | } | 944 | } |
942 | 945 | ||
diff --git a/drivers/net/arcnet/arc-rawmode.c b/drivers/net/arcnet/arc-rawmode.c index 6318814a11a8..e0a18e7c73cb 100644 --- a/drivers/net/arcnet/arc-rawmode.c +++ b/drivers/net/arcnet/arc-rawmode.c | |||
@@ -110,7 +110,7 @@ static void rx(struct net_device *dev, int bufnum, | |||
110 | 110 | ||
111 | pkt = (struct archdr *) skb->data; | 111 | pkt = (struct archdr *) skb->data; |
112 | 112 | ||
113 | skb->mac.raw = skb->data; | 113 | skb_reset_mac_header(skb); |
114 | skb_pull(skb, ARC_HDR_SIZE); | 114 | skb_pull(skb, ARC_HDR_SIZE); |
115 | 115 | ||
116 | /* up to sizeof(pkt->soft) has already been copied from the card */ | 116 | /* up to sizeof(pkt->soft) has already been copied from the card */ |
diff --git a/drivers/net/arcnet/arcnet.c b/drivers/net/arcnet/arcnet.c index 83004fdab0a4..681e20b8466f 100644 --- a/drivers/net/arcnet/arcnet.c +++ b/drivers/net/arcnet/arcnet.c | |||
@@ -519,9 +519,12 @@ static int arcnet_header(struct sk_buff *skb, struct net_device *dev, | |||
519 | * real header when we do rebuild_header. | 519 | * real header when we do rebuild_header. |
520 | */ | 520 | */ |
521 | *(uint16_t *) skb_push(skb, 2) = type; | 521 | *(uint16_t *) skb_push(skb, 2) = type; |
522 | if (skb->nh.raw - skb->mac.raw != 2) | 522 | /* |
523 | * XXX: Why not use skb->mac_len? | ||
524 | */ | ||
525 | if (skb->network_header - skb->mac_header != 2) | ||
523 | BUGMSG(D_NORMAL, "arcnet_header: Yikes! diff (%d) is not 2!\n", | 526 | BUGMSG(D_NORMAL, "arcnet_header: Yikes! diff (%d) is not 2!\n", |
524 | (int)(skb->nh.raw - skb->mac.raw)); | 527 | (int)(skb->network_header - skb->mac_header)); |
525 | return -2; /* return error -- can't transmit yet! */ | 528 | return -2; /* return error -- can't transmit yet! */ |
526 | } | 529 | } |
527 | else { | 530 | else { |
@@ -554,11 +557,13 @@ static int arcnet_rebuild_header(struct sk_buff *skb) | |||
554 | unsigned short type; | 557 | unsigned short type; |
555 | uint8_t daddr=0; | 558 | uint8_t daddr=0; |
556 | struct ArcProto *proto; | 559 | struct ArcProto *proto; |
557 | 560 | /* | |
558 | if (skb->nh.raw - skb->mac.raw != 2) { | 561 | * XXX: Why not use skb->mac_len? |
562 | */ | ||
563 | if (skb->network_header - skb->mac_header != 2) { | ||
559 | BUGMSG(D_NORMAL, | 564 | BUGMSG(D_NORMAL, |
560 | "rebuild_header: shouldn't be here! (hdrsize=%d)\n", | 565 | "rebuild_header: shouldn't be here! (hdrsize=%d)\n", |
561 | (int)(skb->nh.raw - skb->mac.raw)); | 566 | (int)(skb->network_header - skb->mac_header)); |
562 | return 0; | 567 | return 0; |
563 | } | 568 | } |
564 | type = *(uint16_t *) skb_pull(skb, 2); | 569 | type = *(uint16_t *) skb_pull(skb, 2); |
diff --git a/drivers/net/arcnet/capmode.c b/drivers/net/arcnet/capmode.c index 66485585ab39..cc4610db6395 100644 --- a/drivers/net/arcnet/capmode.c +++ b/drivers/net/arcnet/capmode.c | |||
@@ -122,10 +122,8 @@ static void rx(struct net_device *dev, int bufnum, | |||
122 | } | 122 | } |
123 | skb_put(skb, length + ARC_HDR_SIZE + sizeof(int)); | 123 | skb_put(skb, length + ARC_HDR_SIZE + sizeof(int)); |
124 | skb->dev = dev; | 124 | skb->dev = dev; |
125 | 125 | skb_reset_mac_header(skb); | |
126 | pkt = (struct archdr *) skb->data; | 126 | pkt = (struct archdr *)skb_mac_header(skb); |
127 | |||
128 | skb->mac.raw = skb->data; | ||
129 | skb_pull(skb, ARC_HDR_SIZE); | 127 | skb_pull(skb, ARC_HDR_SIZE); |
130 | 128 | ||
131 | /* up to sizeof(pkt->soft) has already been copied from the card */ | 129 | /* up to sizeof(pkt->soft) has already been copied from the card */ |
@@ -270,13 +268,13 @@ static int ack_tx(struct net_device *dev, int acked) | |||
270 | skb_put(ackskb, length + ARC_HDR_SIZE ); | 268 | skb_put(ackskb, length + ARC_HDR_SIZE ); |
271 | ackskb->dev = dev; | 269 | ackskb->dev = dev; |
272 | 270 | ||
273 | ackpkt = (struct archdr *) ackskb->data; | 271 | skb_reset_mac_header(ackskb); |
274 | 272 | ackpkt = (struct archdr *)skb_mac_header(ackskb); | |
275 | ackskb->mac.raw = ackskb->data; | ||
276 | /* skb_pull(ackskb, ARC_HDR_SIZE); */ | 273 | /* skb_pull(ackskb, ARC_HDR_SIZE); */ |
277 | 274 | ||
278 | 275 | ||
279 | memcpy(ackpkt, lp->outgoing.skb->data, ARC_HDR_SIZE+sizeof(struct arc_cap)); | 276 | skb_copy_from_linear_data(lp->outgoing.skb, ackpkt, |
277 | ARC_HDR_SIZE + sizeof(struct arc_cap)); | ||
280 | ackpkt->soft.cap.proto=0; /* using protocol 0 for acknowledge */ | 278 | ackpkt->soft.cap.proto=0; /* using protocol 0 for acknowledge */ |
281 | ackpkt->soft.cap.mes.ack=acked; | 279 | ackpkt->soft.cap.mes.ack=acked; |
282 | 280 | ||
diff --git a/drivers/net/arcnet/rfc1051.c b/drivers/net/arcnet/rfc1051.c index 6d6c69f036ef..2de8877ece29 100644 --- a/drivers/net/arcnet/rfc1051.c +++ b/drivers/net/arcnet/rfc1051.c | |||
@@ -94,7 +94,7 @@ static unsigned short type_trans(struct sk_buff *skb, struct net_device *dev) | |||
94 | int hdr_size = ARC_HDR_SIZE + RFC1051_HDR_SIZE; | 94 | int hdr_size = ARC_HDR_SIZE + RFC1051_HDR_SIZE; |
95 | 95 | ||
96 | /* Pull off the arcnet header. */ | 96 | /* Pull off the arcnet header. */ |
97 | skb->mac.raw = skb->data; | 97 | skb_reset_mac_header(skb); |
98 | skb_pull(skb, hdr_size); | 98 | skb_pull(skb, hdr_size); |
99 | 99 | ||
100 | if (pkt->hard.dest == 0) | 100 | if (pkt->hard.dest == 0) |
diff --git a/drivers/net/arcnet/rfc1201.c b/drivers/net/arcnet/rfc1201.c index bee34226abfa..460a095000c2 100644 --- a/drivers/net/arcnet/rfc1201.c +++ b/drivers/net/arcnet/rfc1201.c | |||
@@ -96,7 +96,7 @@ static unsigned short type_trans(struct sk_buff *skb, struct net_device *dev) | |||
96 | int hdr_size = ARC_HDR_SIZE + RFC1201_HDR_SIZE; | 96 | int hdr_size = ARC_HDR_SIZE + RFC1201_HDR_SIZE; |
97 | 97 | ||
98 | /* Pull off the arcnet header. */ | 98 | /* Pull off the arcnet header. */ |
99 | skb->mac.raw = skb->data; | 99 | skb_reset_mac_header(skb); |
100 | skb_pull(skb, hdr_size); | 100 | skb_pull(skb, hdr_size); |
101 | 101 | ||
102 | if (pkt->hard.dest == 0) | 102 | if (pkt->hard.dest == 0) |
diff --git a/drivers/net/ariadne.c b/drivers/net/ariadne.c index 9dfc09b181c1..a0e68e718531 100644 --- a/drivers/net/ariadne.c +++ b/drivers/net/ariadne.c | |||
@@ -743,7 +743,6 @@ static int ariadne_rx(struct net_device *dev) | |||
743 | } | 743 | } |
744 | 744 | ||
745 | 745 | ||
746 | skb->dev = dev; | ||
747 | skb_reserve(skb,2); /* 16 byte align */ | 746 | skb_reserve(skb,2); /* 16 byte align */ |
748 | skb_put(skb,pkt_len); /* Make room */ | 747 | skb_put(skb,pkt_len); /* Make room */ |
749 | eth_copy_and_sum(skb, (char *)priv->rx_buff[entry], pkt_len,0); | 748 | eth_copy_and_sum(skb, (char *)priv->rx_buff[entry], pkt_len,0); |
diff --git a/drivers/net/arm/am79c961a.c b/drivers/net/arm/am79c961a.c index ddd12d44ff22..8f0d7ce503c9 100644 --- a/drivers/net/arm/am79c961a.c +++ b/drivers/net/arm/am79c961a.c | |||
@@ -526,7 +526,6 @@ am79c961_rx(struct net_device *dev, struct dev_priv *priv) | |||
526 | skb = dev_alloc_skb(len + 2); | 526 | skb = dev_alloc_skb(len + 2); |
527 | 527 | ||
528 | if (skb) { | 528 | if (skb) { |
529 | skb->dev = dev; | ||
530 | skb_reserve(skb, 2); | 529 | skb_reserve(skb, 2); |
531 | 530 | ||
532 | am_readbuffer(dev, pktaddr, skb_put(skb, len), len); | 531 | am_readbuffer(dev, pktaddr, skb_put(skb, len), len); |
diff --git a/drivers/net/arm/at91_ether.c b/drivers/net/arm/at91_ether.c index 1621b8fe35cf..152fa7a042b8 100644 --- a/drivers/net/arm/at91_ether.c +++ b/drivers/net/arm/at91_ether.c | |||
@@ -858,7 +858,6 @@ static void at91ether_rx(struct net_device *dev) | |||
858 | skb_reserve(skb, 2); | 858 | skb_reserve(skb, 2); |
859 | memcpy(skb_put(skb, pktlen), p_recv, pktlen); | 859 | memcpy(skb_put(skb, pktlen), p_recv, pktlen); |
860 | 860 | ||
861 | skb->dev = dev; | ||
862 | skb->protocol = eth_type_trans(skb, dev); | 861 | skb->protocol = eth_type_trans(skb, dev); |
863 | dev->last_rx = jiffies; | 862 | dev->last_rx = jiffies; |
864 | lp->stats.rx_bytes += pktlen; | 863 | lp->stats.rx_bytes += pktlen; |
diff --git a/drivers/net/arm/ep93xx_eth.c b/drivers/net/arm/ep93xx_eth.c index dd698b033a62..2438c5bff237 100644 --- a/drivers/net/arm/ep93xx_eth.c +++ b/drivers/net/arm/ep93xx_eth.c | |||
@@ -255,7 +255,6 @@ static int ep93xx_rx(struct net_device *dev, int *budget) | |||
255 | 255 | ||
256 | skb = dev_alloc_skb(length + 2); | 256 | skb = dev_alloc_skb(length + 2); |
257 | if (likely(skb != NULL)) { | 257 | if (likely(skb != NULL)) { |
258 | skb->dev = dev; | ||
259 | skb_reserve(skb, 2); | 258 | skb_reserve(skb, 2); |
260 | dma_sync_single(NULL, ep->descs->rdesc[entry].buf_addr, | 259 | dma_sync_single(NULL, ep->descs->rdesc[entry].buf_addr, |
261 | length, DMA_FROM_DEVICE); | 260 | length, DMA_FROM_DEVICE); |
diff --git a/drivers/net/arm/ether1.c b/drivers/net/arm/ether1.c index a2921882eba8..f075cebe84ad 100644 --- a/drivers/net/arm/ether1.c +++ b/drivers/net/arm/ether1.c | |||
@@ -875,7 +875,6 @@ ether1_recv_done (struct net_device *dev) | |||
875 | skb = dev_alloc_skb (length + 2); | 875 | skb = dev_alloc_skb (length + 2); |
876 | 876 | ||
877 | if (skb) { | 877 | if (skb) { |
878 | skb->dev = dev; | ||
879 | skb_reserve (skb, 2); | 878 | skb_reserve (skb, 2); |
880 | 879 | ||
881 | ether1_readbuffer (dev, skb_put (skb, length), rbd.rbd_bufl, length); | 880 | ether1_readbuffer (dev, skb_put (skb, length), rbd.rbd_bufl, length); |
diff --git a/drivers/net/arm/ether3.c b/drivers/net/arm/ether3.c index 841178343a07..32da2eb9bcee 100644 --- a/drivers/net/arm/ether3.c +++ b/drivers/net/arm/ether3.c | |||
@@ -661,7 +661,6 @@ if (next_ptr < RX_START || next_ptr >= RX_END) { | |||
661 | if (skb) { | 661 | if (skb) { |
662 | unsigned char *buf; | 662 | unsigned char *buf; |
663 | 663 | ||
664 | skb->dev = dev; | ||
665 | skb_reserve(skb, 2); | 664 | skb_reserve(skb, 2); |
666 | buf = skb_put(skb, length); | 665 | buf = skb_put(skb, length); |
667 | ether3_readbuffer(dev, buf + 12, length - 12); | 666 | ether3_readbuffer(dev, buf + 12, length - 12); |
diff --git a/drivers/net/at1700.c b/drivers/net/at1700.c index 56ae8babd919..bed8e0ebaf19 100644 --- a/drivers/net/at1700.c +++ b/drivers/net/at1700.c | |||
@@ -768,7 +768,6 @@ net_rx(struct net_device *dev) | |||
768 | lp->stats.rx_dropped++; | 768 | lp->stats.rx_dropped++; |
769 | break; | 769 | break; |
770 | } | 770 | } |
771 | skb->dev = dev; | ||
772 | skb_reserve(skb,2); | 771 | skb_reserve(skb,2); |
773 | 772 | ||
774 | insw(ioaddr + DATAPORT, skb_put(skb,pkt_len), (pkt_len + 1) >> 1); | 773 | insw(ioaddr + DATAPORT, skb_put(skb,pkt_len), (pkt_len + 1) >> 1); |
diff --git a/drivers/net/atari_bionet.c b/drivers/net/atari_bionet.c index 4e3bf6a1f22c..3d87bd2b4194 100644 --- a/drivers/net/atari_bionet.c +++ b/drivers/net/atari_bionet.c | |||
@@ -453,7 +453,8 @@ bionet_send_packet(struct sk_buff *skb, struct net_device *dev) { | |||
453 | stdma_lock(bionet_intr, NULL); | 453 | stdma_lock(bionet_intr, NULL); |
454 | local_irq_restore(flags); | 454 | local_irq_restore(flags); |
455 | if( !STRAM_ADDR(buf+length-1) ) { | 455 | if( !STRAM_ADDR(buf+length-1) ) { |
456 | memcpy(nic_packet->buffer, skb->data, length); | 456 | skb_copy_from_linear_data(skb, nic_packet->buffer, |
457 | length); | ||
457 | buf = (unsigned long)&((struct nic_pkt_s *)phys_nic_packet)->buffer; | 458 | buf = (unsigned long)&((struct nic_pkt_s *)phys_nic_packet)->buffer; |
458 | } | 459 | } |
459 | 460 | ||
@@ -544,13 +545,13 @@ bionet_poll_rx(struct net_device *dev) { | |||
544 | break; | 545 | break; |
545 | } | 546 | } |
546 | 547 | ||
547 | skb->dev = dev; | ||
548 | skb_reserve( skb, 2 ); /* 16 Byte align */ | 548 | skb_reserve( skb, 2 ); /* 16 Byte align */ |
549 | skb_put( skb, pkt_len ); /* make room */ | 549 | skb_put( skb, pkt_len ); /* make room */ |
550 | 550 | ||
551 | /* 'skb->data' points to the start of sk_buff data area. | 551 | /* 'skb->data' points to the start of sk_buff data area. |
552 | */ | 552 | */ |
553 | memcpy(skb->data, nic_packet->buffer, pkt_len); | 553 | skb_copy_to_linear_data(skb, nic_packet->buffer, |
554 | pkt_len); | ||
554 | skb->protocol = eth_type_trans( skb, dev ); | 555 | skb->protocol = eth_type_trans( skb, dev ); |
555 | netif_rx(skb); | 556 | netif_rx(skb); |
556 | dev->last_rx = jiffies; | 557 | dev->last_rx = jiffies; |
diff --git a/drivers/net/atari_pamsnet.c b/drivers/net/atari_pamsnet.c index 3b5436149286..54714409a09b 100644 --- a/drivers/net/atari_pamsnet.c +++ b/drivers/net/atari_pamsnet.c | |||
@@ -717,7 +717,8 @@ pamsnet_send_packet(struct sk_buff *skb, struct net_device *dev) { | |||
717 | 717 | ||
718 | local_irq_restore(flags); | 718 | local_irq_restore(flags); |
719 | if( !STRAM_ADDR(buf+length-1) ) { | 719 | if( !STRAM_ADDR(buf+length-1) ) { |
720 | memcpy(nic_packet->buffer, skb->data, length); | 720 | skb_copy_from_linear_data(skb, nic_packet->buffer, |
721 | length); | ||
721 | buf = (unsigned long)phys_nic_packet; | 722 | buf = (unsigned long)phys_nic_packet; |
722 | } | 723 | } |
723 | 724 | ||
@@ -792,7 +793,8 @@ pamsnet_poll_rx(struct net_device *dev) { | |||
792 | 793 | ||
793 | /* 'skb->data' points to the start of sk_buff data area. | 794 | /* 'skb->data' points to the start of sk_buff data area. |
794 | */ | 795 | */ |
795 | memcpy(skb->data, nic_packet->buffer, pkt_len); | 796 | skb_copy_to_linear_data(skb, nic_packet->buffer, |
797 | pkt_len); | ||
796 | netif_rx(skb); | 798 | netif_rx(skb); |
797 | dev->last_rx = jiffies; | 799 | dev->last_rx = jiffies; |
798 | lp->stats.rx_packets++; | 800 | lp->stats.rx_packets++; |
diff --git a/drivers/net/atarilance.c b/drivers/net/atarilance.c index 7e37ac86a69a..dfa8b9ba4c80 100644 --- a/drivers/net/atarilance.c +++ b/drivers/net/atarilance.c | |||
@@ -1047,7 +1047,6 @@ static int lance_rx( struct net_device *dev ) | |||
1047 | pkt_len ); | 1047 | pkt_len ); |
1048 | } | 1048 | } |
1049 | 1049 | ||
1050 | skb->dev = dev; | ||
1051 | skb_reserve( skb, 2 ); /* 16 byte align */ | 1050 | skb_reserve( skb, 2 ); /* 16 byte align */ |
1052 | skb_put( skb, pkt_len ); /* Make room */ | 1051 | skb_put( skb, pkt_len ); /* Make room */ |
1053 | lp->memcpy_f( skb->data, PKTBUF_ADDR(head), pkt_len ); | 1052 | lp->memcpy_f( skb->data, PKTBUF_ADDR(head), pkt_len ); |
diff --git a/drivers/net/atl1/atl1_main.c b/drivers/net/atl1/atl1_main.c index 8606eac5bec8..4b1d4d153ecf 100644 --- a/drivers/net/atl1/atl1_main.c +++ b/drivers/net/atl1/atl1_main.c | |||
@@ -408,7 +408,6 @@ static void atl1_rx_checksum(struct atl1_adapter *adapter, | |||
408 | static u16 atl1_alloc_rx_buffers(struct atl1_adapter *adapter) | 408 | static u16 atl1_alloc_rx_buffers(struct atl1_adapter *adapter) |
409 | { | 409 | { |
410 | struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; | 410 | struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; |
411 | struct net_device *netdev = adapter->netdev; | ||
412 | struct pci_dev *pdev = adapter->pdev; | 411 | struct pci_dev *pdev = adapter->pdev; |
413 | struct page *page; | 412 | struct page *page; |
414 | unsigned long offset; | 413 | unsigned long offset; |
@@ -444,7 +443,6 @@ static u16 atl1_alloc_rx_buffers(struct atl1_adapter *adapter) | |||
444 | * the 14 byte MAC header is removed | 443 | * the 14 byte MAC header is removed |
445 | */ | 444 | */ |
446 | skb_reserve(skb, NET_IP_ALIGN); | 445 | skb_reserve(skb, NET_IP_ALIGN); |
447 | skb->dev = netdev; | ||
448 | 446 | ||
449 | buffer_info->alloced = 1; | 447 | buffer_info->alloced = 1; |
450 | buffer_info->skb = skb; | 448 | buffer_info->skb = skb; |
@@ -1296,19 +1294,21 @@ static int atl1_tso(struct atl1_adapter *adapter, struct sk_buff *skb, | |||
1296 | } | 1294 | } |
1297 | 1295 | ||
1298 | if (skb->protocol == ntohs(ETH_P_IP)) { | 1296 | if (skb->protocol == ntohs(ETH_P_IP)) { |
1299 | skb->nh.iph->tot_len = 0; | 1297 | struct iphdr *iph = ip_hdr(skb); |
1300 | skb->nh.iph->check = 0; | 1298 | |
1301 | skb->h.th->check = | 1299 | iph->tot_len = 0; |
1302 | ~csum_tcpudp_magic(skb->nh.iph->saddr, | 1300 | iph->check = 0; |
1303 | skb->nh.iph->daddr, 0, | 1301 | tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, |
1304 | IPPROTO_TCP, 0); | 1302 | iph->daddr, 0, |
1305 | ipofst = skb->nh.raw - skb->data; | 1303 | IPPROTO_TCP, |
1304 | 0); | ||
1305 | ipofst = skb_network_offset(skb); | ||
1306 | if (ipofst != ENET_HEADER_SIZE) /* 802.3 frame */ | 1306 | if (ipofst != ENET_HEADER_SIZE) /* 802.3 frame */ |
1307 | tso->tsopl |= 1 << TSO_PARAM_ETHTYPE_SHIFT; | 1307 | tso->tsopl |= 1 << TSO_PARAM_ETHTYPE_SHIFT; |
1308 | 1308 | ||
1309 | tso->tsopl |= (skb->nh.iph->ihl & | 1309 | tso->tsopl |= (iph->ihl & |
1310 | CSUM_PARAM_IPHL_MASK) << CSUM_PARAM_IPHL_SHIFT; | 1310 | CSUM_PARAM_IPHL_MASK) << CSUM_PARAM_IPHL_SHIFT; |
1311 | tso->tsopl |= ((skb->h.th->doff << 2) & | 1311 | tso->tsopl |= (tcp_hdrlen(skb) & |
1312 | TSO_PARAM_TCPHDRLEN_MASK) << TSO_PARAM_TCPHDRLEN_SHIFT; | 1312 | TSO_PARAM_TCPHDRLEN_MASK) << TSO_PARAM_TCPHDRLEN_SHIFT; |
1313 | tso->tsopl |= (skb_shinfo(skb)->gso_size & | 1313 | tso->tsopl |= (skb_shinfo(skb)->gso_size & |
1314 | TSO_PARAM_MSS_MASK) << TSO_PARAM_MSS_SHIFT; | 1314 | TSO_PARAM_MSS_MASK) << TSO_PARAM_MSS_SHIFT; |
@@ -1327,8 +1327,8 @@ static int atl1_tx_csum(struct atl1_adapter *adapter, struct sk_buff *skb, | |||
1327 | u8 css, cso; | 1327 | u8 css, cso; |
1328 | 1328 | ||
1329 | if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { | 1329 | if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { |
1330 | cso = skb->h.raw - skb->data; | 1330 | cso = skb_transport_offset(skb); |
1331 | css = (skb->h.raw + skb->csum_offset) - skb->data; | 1331 | css = cso + skb->csum_offset; |
1332 | if (unlikely(cso & 0x1)) { | 1332 | if (unlikely(cso & 0x1)) { |
1333 | printk(KERN_DEBUG "%s: payload offset != even number\n", | 1333 | printk(KERN_DEBUG "%s: payload offset != even number\n", |
1334 | atl1_driver_name); | 1334 | atl1_driver_name); |
@@ -1370,8 +1370,7 @@ static void atl1_tx_map(struct atl1_adapter *adapter, | |||
1370 | 1370 | ||
1371 | if (tcp_seg) { | 1371 | if (tcp_seg) { |
1372 | /* TSO/GSO */ | 1372 | /* TSO/GSO */ |
1373 | proto_hdr_len = | 1373 | proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); |
1374 | ((skb->h.raw - skb->data) + (skb->h.th->doff << 2)); | ||
1375 | buffer_info->length = proto_hdr_len; | 1374 | buffer_info->length = proto_hdr_len; |
1376 | page = virt_to_page(skb->data); | 1375 | page = virt_to_page(skb->data); |
1377 | offset = (unsigned long)skb->data & ~PAGE_MASK; | 1376 | offset = (unsigned long)skb->data & ~PAGE_MASK; |
@@ -1563,8 +1562,8 @@ static int atl1_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
1563 | mss = skb_shinfo(skb)->gso_size; | 1562 | mss = skb_shinfo(skb)->gso_size; |
1564 | if (mss) { | 1563 | if (mss) { |
1565 | if (skb->protocol == htons(ETH_P_IP)) { | 1564 | if (skb->protocol == htons(ETH_P_IP)) { |
1566 | proto_hdr_len = ((skb->h.raw - skb->data) + | 1565 | proto_hdr_len = (skb_transport_offset(skb) + |
1567 | (skb->h.th->doff << 2)); | 1566 | tcp_hdrlen(skb)); |
1568 | if (unlikely(proto_hdr_len > len)) { | 1567 | if (unlikely(proto_hdr_len > len)) { |
1569 | dev_kfree_skb_any(skb); | 1568 | dev_kfree_skb_any(skb); |
1570 | return NETDEV_TX_OK; | 1569 | return NETDEV_TX_OK; |
diff --git a/drivers/net/atp.c b/drivers/net/atp.c index 2d306fcb7f36..18aba838c1ff 100644 --- a/drivers/net/atp.c +++ b/drivers/net/atp.c | |||
@@ -793,7 +793,6 @@ static void net_rx(struct net_device *dev) | |||
793 | lp->stats.rx_dropped++; | 793 | lp->stats.rx_dropped++; |
794 | goto done; | 794 | goto done; |
795 | } | 795 | } |
796 | skb->dev = dev; | ||
797 | 796 | ||
798 | skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ | 797 | skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ |
799 | read_block(ioaddr, pkt_len, skb_put(skb,pkt_len), dev->if_port); | 798 | read_block(ioaddr, pkt_len, skb_put(skb,pkt_len), dev->if_port); |
diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c index 69ae229b680e..d10fb80e9a63 100644 --- a/drivers/net/au1000_eth.c +++ b/drivers/net/au1000_eth.c | |||
@@ -1125,7 +1125,7 @@ static int au1000_tx(struct sk_buff *skb, struct net_device *dev) | |||
1125 | } | 1125 | } |
1126 | 1126 | ||
1127 | pDB = aup->tx_db_inuse[aup->tx_head]; | 1127 | pDB = aup->tx_db_inuse[aup->tx_head]; |
1128 | memcpy((void *)pDB->vaddr, skb->data, skb->len); | 1128 | skb_copy_from_linear_data(skb, pDB->vaddr, skb->len); |
1129 | if (skb->len < ETH_ZLEN) { | 1129 | if (skb->len < ETH_ZLEN) { |
1130 | for (i=skb->len; i<ETH_ZLEN; i++) { | 1130 | for (i=skb->len; i<ETH_ZLEN; i++) { |
1131 | ((char *)pDB->vaddr)[i] = 0; | 1131 | ((char *)pDB->vaddr)[i] = 0; |
@@ -1205,7 +1205,6 @@ static int au1000_rx(struct net_device *dev) | |||
1205 | aup->stats.rx_dropped++; | 1205 | aup->stats.rx_dropped++; |
1206 | continue; | 1206 | continue; |
1207 | } | 1207 | } |
1208 | skb->dev = dev; | ||
1209 | skb_reserve(skb, 2); /* 16 byte IP header align */ | 1208 | skb_reserve(skb, 2); /* 16 byte IP header align */ |
1210 | eth_copy_and_sum(skb, | 1209 | eth_copy_and_sum(skb, |
1211 | (unsigned char *)pDB->vaddr, frmlen, 0); | 1210 | (unsigned char *)pDB->vaddr, frmlen, 0); |
diff --git a/drivers/net/b44.c b/drivers/net/b44.c index d742bfe24471..879a2fff474e 100644 --- a/drivers/net/b44.c +++ b/drivers/net/b44.c | |||
@@ -825,12 +825,11 @@ static int b44_rx(struct b44 *bp, int budget) | |||
825 | if (copy_skb == NULL) | 825 | if (copy_skb == NULL) |
826 | goto drop_it_no_recycle; | 826 | goto drop_it_no_recycle; |
827 | 827 | ||
828 | copy_skb->dev = bp->dev; | ||
829 | skb_reserve(copy_skb, 2); | 828 | skb_reserve(copy_skb, 2); |
830 | skb_put(copy_skb, len); | 829 | skb_put(copy_skb, len); |
831 | /* DMA sync done above, copy just the actual packet */ | 830 | /* DMA sync done above, copy just the actual packet */ |
832 | memcpy(copy_skb->data, skb->data+bp->rx_offset, len); | 831 | skb_copy_from_linear_data_offset(skb, bp->rx_offset, |
833 | 832 | copy_skb->data, len); | |
834 | skb = copy_skb; | 833 | skb = copy_skb; |
835 | } | 834 | } |
836 | skb->ip_summed = CHECKSUM_NONE; | 835 | skb->ip_summed = CHECKSUM_NONE; |
@@ -1007,7 +1006,8 @@ static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1007 | goto err_out; | 1006 | goto err_out; |
1008 | } | 1007 | } |
1009 | 1008 | ||
1010 | memcpy(skb_put(bounce_skb, len), skb->data, skb->len); | 1009 | skb_copy_from_linear_data(skb, skb_put(bounce_skb, len), |
1010 | skb->len); | ||
1011 | dev_kfree_skb_any(skb); | 1011 | dev_kfree_skb_any(skb); |
1012 | skb = bounce_skb; | 1012 | skb = bounce_skb; |
1013 | } | 1013 | } |
diff --git a/drivers/net/bmac.c b/drivers/net/bmac.c index c143304dcff5..4612725965df 100644 --- a/drivers/net/bmac.c +++ b/drivers/net/bmac.c | |||
@@ -715,7 +715,6 @@ static irqreturn_t bmac_rxdma_intr(int irq, void *dev_id) | |||
715 | if (skb != NULL) { | 715 | if (skb != NULL) { |
716 | nb -= ETHERCRC; | 716 | nb -= ETHERCRC; |
717 | skb_put(skb, nb); | 717 | skb_put(skb, nb); |
718 | skb->dev = dev; | ||
719 | skb->protocol = eth_type_trans(skb, dev); | 718 | skb->protocol = eth_type_trans(skb, dev); |
720 | netif_rx(skb); | 719 | netif_rx(skb); |
721 | dev->last_rx = jiffies; | 720 | dev->last_rx = jiffies; |
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c index e85f5ec48f96..f98a2205a090 100644 --- a/drivers/net/bnx2.c +++ b/drivers/net/bnx2.c | |||
@@ -1884,10 +1884,8 @@ bnx2_rx_int(struct bnx2 *bp, int budget) | |||
1884 | goto reuse_rx; | 1884 | goto reuse_rx; |
1885 | 1885 | ||
1886 | /* aligned copy */ | 1886 | /* aligned copy */ |
1887 | memcpy(new_skb->data, | 1887 | skb_copy_from_linear_data_offset(skb, bp->rx_offset - 2, |
1888 | skb->data + bp->rx_offset - 2, | 1888 | new_skb->data, len + 2); |
1889 | len + 2); | ||
1890 | |||
1891 | skb_reserve(new_skb, 2); | 1889 | skb_reserve(new_skb, 2); |
1892 | skb_put(new_skb, len); | 1890 | skb_put(new_skb, len); |
1893 | 1891 | ||
@@ -4513,6 +4511,7 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
4513 | if ((mss = skb_shinfo(skb)->gso_size) && | 4511 | if ((mss = skb_shinfo(skb)->gso_size) && |
4514 | (skb->len > (bp->dev->mtu + ETH_HLEN))) { | 4512 | (skb->len > (bp->dev->mtu + ETH_HLEN))) { |
4515 | u32 tcp_opt_len, ip_tcp_len; | 4513 | u32 tcp_opt_len, ip_tcp_len; |
4514 | struct iphdr *iph; | ||
4516 | 4515 | ||
4517 | if (skb_header_cloned(skb) && | 4516 | if (skb_header_cloned(skb) && |
4518 | pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) { | 4517 | pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) { |
@@ -4520,25 +4519,23 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
4520 | return NETDEV_TX_OK; | 4519 | return NETDEV_TX_OK; |
4521 | } | 4520 | } |
4522 | 4521 | ||
4523 | tcp_opt_len = ((skb->h.th->doff - 5) * 4); | ||
4524 | vlan_tag_flags |= TX_BD_FLAGS_SW_LSO; | 4522 | vlan_tag_flags |= TX_BD_FLAGS_SW_LSO; |
4525 | 4523 | ||
4526 | tcp_opt_len = 0; | 4524 | tcp_opt_len = 0; |
4527 | if (skb->h.th->doff > 5) { | 4525 | if (tcp_hdr(skb)->doff > 5) |
4528 | tcp_opt_len = (skb->h.th->doff - 5) << 2; | 4526 | tcp_opt_len = tcp_optlen(skb); |
4529 | } | 4527 | |
4530 | ip_tcp_len = (skb->nh.iph->ihl << 2) + sizeof(struct tcphdr); | 4528 | ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr); |
4531 | 4529 | ||
4532 | skb->nh.iph->check = 0; | 4530 | iph = ip_hdr(skb); |
4533 | skb->nh.iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len); | 4531 | iph->check = 0; |
4534 | skb->h.th->check = | 4532 | iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len); |
4535 | ~csum_tcpudp_magic(skb->nh.iph->saddr, | 4533 | tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, |
4536 | skb->nh.iph->daddr, | 4534 | iph->daddr, 0, |
4537 | 0, IPPROTO_TCP, 0); | 4535 | IPPROTO_TCP, 0); |
4538 | 4536 | if (tcp_opt_len || (iph->ihl > 5)) { | |
4539 | if (tcp_opt_len || (skb->nh.iph->ihl > 5)) { | 4537 | vlan_tag_flags |= ((iph->ihl - 5) + |
4540 | vlan_tag_flags |= ((skb->nh.iph->ihl - 5) + | 4538 | (tcp_opt_len >> 2)) << 8; |
4541 | (tcp_opt_len >> 2)) << 8; | ||
4542 | } | 4539 | } |
4543 | } | 4540 | } |
4544 | else | 4541 | else |
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c index 3fb354d9c515..7e03f41ae2c2 100644 --- a/drivers/net/bonding/bond_3ad.c +++ b/drivers/net/bonding/bond_3ad.c | |||
@@ -884,8 +884,8 @@ static int ad_lacpdu_send(struct port *port) | |||
884 | } | 884 | } |
885 | 885 | ||
886 | skb->dev = slave->dev; | 886 | skb->dev = slave->dev; |
887 | skb->mac.raw = skb->data; | 887 | skb_reset_mac_header(skb); |
888 | skb->nh.raw = skb->data + ETH_HLEN; | 888 | skb->network_header = skb->mac_header + ETH_HLEN; |
889 | skb->protocol = PKT_TYPE_LACPDU; | 889 | skb->protocol = PKT_TYPE_LACPDU; |
890 | skb->priority = TC_PRIO_CONTROL; | 890 | skb->priority = TC_PRIO_CONTROL; |
891 | 891 | ||
@@ -928,8 +928,8 @@ static int ad_marker_send(struct port *port, struct marker *marker) | |||
928 | skb_reserve(skb, 16); | 928 | skb_reserve(skb, 16); |
929 | 929 | ||
930 | skb->dev = slave->dev; | 930 | skb->dev = slave->dev; |
931 | skb->mac.raw = skb->data; | 931 | skb_reset_mac_header(skb); |
932 | skb->nh.raw = skb->data + ETH_HLEN; | 932 | skb->network_header = skb->mac_header + ETH_HLEN; |
933 | skb->protocol = PKT_TYPE_LACPDU; | 933 | skb->protocol = PKT_TYPE_LACPDU; |
934 | 934 | ||
935 | marker_header = (struct marker_header *)skb_put(skb, length); | 935 | marker_header = (struct marker_header *)skb_put(skb, length); |
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index 217a2eedee0a..92c3b6f6a8e7 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c | |||
@@ -104,10 +104,15 @@ struct arp_pkt { | |||
104 | }; | 104 | }; |
105 | #pragma pack() | 105 | #pragma pack() |
106 | 106 | ||
107 | static inline struct arp_pkt *arp_pkt(const struct sk_buff *skb) | ||
108 | { | ||
109 | return (struct arp_pkt *)skb_network_header(skb); | ||
110 | } | ||
111 | |||
107 | /* Forward declaration */ | 112 | /* Forward declaration */ |
108 | static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[]); | 113 | static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[]); |
109 | 114 | ||
110 | static inline u8 _simple_hash(u8 *hash_start, int hash_size) | 115 | static inline u8 _simple_hash(const u8 *hash_start, int hash_size) |
111 | { | 116 | { |
112 | int i; | 117 | int i; |
113 | u8 hash = 0; | 118 | u8 hash = 0; |
@@ -613,7 +618,7 @@ static void rlb_req_update_subnet_clients(struct bonding *bond, u32 src_ip) | |||
613 | static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bond) | 618 | static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bond) |
614 | { | 619 | { |
615 | struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); | 620 | struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); |
616 | struct arp_pkt *arp = (struct arp_pkt *)skb->nh.raw; | 621 | struct arp_pkt *arp = arp_pkt(skb); |
617 | struct slave *assigned_slave; | 622 | struct slave *assigned_slave; |
618 | struct rlb_client_info *client_info; | 623 | struct rlb_client_info *client_info; |
619 | u32 hash_index = 0; | 624 | u32 hash_index = 0; |
@@ -701,7 +706,7 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon | |||
701 | */ | 706 | */ |
702 | static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond) | 707 | static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond) |
703 | { | 708 | { |
704 | struct arp_pkt *arp = (struct arp_pkt *)skb->nh.raw; | 709 | struct arp_pkt *arp = arp_pkt(skb); |
705 | struct slave *tx_slave = NULL; | 710 | struct slave *tx_slave = NULL; |
706 | 711 | ||
707 | if (arp->op_code == __constant_htons(ARPOP_REPLY)) { | 712 | if (arp->op_code == __constant_htons(ARPOP_REPLY)) { |
@@ -890,8 +895,8 @@ static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[]) | |||
890 | data = skb_put(skb, size); | 895 | data = skb_put(skb, size); |
891 | memcpy(data, &pkt, size); | 896 | memcpy(data, &pkt, size); |
892 | 897 | ||
893 | skb->mac.raw = data; | 898 | skb_reset_mac_header(skb); |
894 | skb->nh.raw = data + ETH_HLEN; | 899 | skb->network_header = skb->mac_header + ETH_HLEN; |
895 | skb->protocol = pkt.type; | 900 | skb->protocol = pkt.type; |
896 | skb->priority = TC_PRIO_CONTROL; | 901 | skb->priority = TC_PRIO_CONTROL; |
897 | skb->dev = slave->dev; | 902 | skb->dev = slave->dev; |
@@ -1263,10 +1268,10 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev) | |||
1263 | int hash_size = 0; | 1268 | int hash_size = 0; |
1264 | int do_tx_balance = 1; | 1269 | int do_tx_balance = 1; |
1265 | u32 hash_index = 0; | 1270 | u32 hash_index = 0; |
1266 | u8 *hash_start = NULL; | 1271 | const u8 *hash_start = NULL; |
1267 | int res = 1; | 1272 | int res = 1; |
1268 | 1273 | ||
1269 | skb->mac.raw = (unsigned char *)skb->data; | 1274 | skb_reset_mac_header(skb); |
1270 | eth_data = eth_hdr(skb); | 1275 | eth_data = eth_hdr(skb); |
1271 | 1276 | ||
1272 | /* make sure that the curr_active_slave and the slaves list do | 1277 | /* make sure that the curr_active_slave and the slaves list do |
@@ -1280,15 +1285,18 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev) | |||
1280 | } | 1285 | } |
1281 | 1286 | ||
1282 | switch (ntohs(skb->protocol)) { | 1287 | switch (ntohs(skb->protocol)) { |
1283 | case ETH_P_IP: | 1288 | case ETH_P_IP: { |
1289 | const struct iphdr *iph = ip_hdr(skb); | ||
1290 | |||
1284 | if ((memcmp(eth_data->h_dest, mac_bcast, ETH_ALEN) == 0) || | 1291 | if ((memcmp(eth_data->h_dest, mac_bcast, ETH_ALEN) == 0) || |
1285 | (skb->nh.iph->daddr == ip_bcast) || | 1292 | (iph->daddr == ip_bcast) || |
1286 | (skb->nh.iph->protocol == IPPROTO_IGMP)) { | 1293 | (iph->protocol == IPPROTO_IGMP)) { |
1287 | do_tx_balance = 0; | 1294 | do_tx_balance = 0; |
1288 | break; | 1295 | break; |
1289 | } | 1296 | } |
1290 | hash_start = (char*)&(skb->nh.iph->daddr); | 1297 | hash_start = (char *)&(iph->daddr); |
1291 | hash_size = sizeof(skb->nh.iph->daddr); | 1298 | hash_size = sizeof(iph->daddr); |
1299 | } | ||
1292 | break; | 1300 | break; |
1293 | case ETH_P_IPV6: | 1301 | case ETH_P_IPV6: |
1294 | if (memcmp(eth_data->h_dest, mac_bcast, ETH_ALEN) == 0) { | 1302 | if (memcmp(eth_data->h_dest, mac_bcast, ETH_ALEN) == 0) { |
@@ -1296,8 +1304,8 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev) | |||
1296 | break; | 1304 | break; |
1297 | } | 1305 | } |
1298 | 1306 | ||
1299 | hash_start = (char*)&(skb->nh.ipv6h->daddr); | 1307 | hash_start = (char *)&(ipv6_hdr(skb)->daddr); |
1300 | hash_size = sizeof(skb->nh.ipv6h->daddr); | 1308 | hash_size = sizeof(ipv6_hdr(skb)->daddr); |
1301 | break; | 1309 | break; |
1302 | case ETH_P_IPX: | 1310 | case ETH_P_IPX: |
1303 | if (ipx_hdr(skb)->ipx_checksum != | 1311 | if (ipx_hdr(skb)->ipx_checksum != |
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index e4724d874e7c..cea3783c92c5 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c | |||
@@ -2524,7 +2524,7 @@ static int bond_arp_rcv(struct sk_buff *skb, struct net_device *dev, struct pack | |||
2524 | (2 * sizeof(u32))))) | 2524 | (2 * sizeof(u32))))) |
2525 | goto out_unlock; | 2525 | goto out_unlock; |
2526 | 2526 | ||
2527 | arp = skb->nh.arph; | 2527 | arp = arp_hdr(skb); |
2528 | if (arp->ar_hln != dev->addr_len || | 2528 | if (arp->ar_hln != dev->addr_len || |
2529 | skb->pkt_type == PACKET_OTHERHOST || | 2529 | skb->pkt_type == PACKET_OTHERHOST || |
2530 | skb->pkt_type == PACKET_LOOPBACK || | 2530 | skb->pkt_type == PACKET_LOOPBACK || |
@@ -3476,7 +3476,7 @@ static int bond_xmit_hash_policy_l34(struct sk_buff *skb, | |||
3476 | struct net_device *bond_dev, int count) | 3476 | struct net_device *bond_dev, int count) |
3477 | { | 3477 | { |
3478 | struct ethhdr *data = (struct ethhdr *)skb->data; | 3478 | struct ethhdr *data = (struct ethhdr *)skb->data; |
3479 | struct iphdr *iph = skb->nh.iph; | 3479 | struct iphdr *iph = ip_hdr(skb); |
3480 | u16 *layer4hdr = (u16 *)((u32 *)iph + iph->ihl); | 3480 | u16 *layer4hdr = (u16 *)((u32 *)iph + iph->ihl); |
3481 | int layer4_xor = 0; | 3481 | int layer4_xor = 0; |
3482 | 3482 | ||
@@ -3640,9 +3640,8 @@ static struct net_device_stats *bond_get_stats(struct net_device *bond_dev) | |||
3640 | read_lock_bh(&bond->lock); | 3640 | read_lock_bh(&bond->lock); |
3641 | 3641 | ||
3642 | bond_for_each_slave(bond, slave, i) { | 3642 | bond_for_each_slave(bond, slave, i) { |
3643 | if (slave->dev->get_stats) { | 3643 | sstats = slave->dev->get_stats(slave->dev); |
3644 | sstats = slave->dev->get_stats(slave->dev); | 3644 | if (sstats) { |
3645 | |||
3646 | stats->rx_packets += sstats->rx_packets; | 3645 | stats->rx_packets += sstats->rx_packets; |
3647 | stats->rx_bytes += sstats->rx_bytes; | 3646 | stats->rx_bytes += sstats->rx_bytes; |
3648 | stats->rx_errors += sstats->rx_errors; | 3647 | stats->rx_errors += sstats->rx_errors; |
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c index c8126484c2be..4aec747d9e43 100644 --- a/drivers/net/cassini.c +++ b/drivers/net/cassini.c | |||
@@ -1995,7 +1995,6 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc, | |||
1995 | return -1; | 1995 | return -1; |
1996 | 1996 | ||
1997 | *skbref = skb; | 1997 | *skbref = skb; |
1998 | skb->dev = cp->dev; | ||
1999 | skb_reserve(skb, swivel); | 1998 | skb_reserve(skb, swivel); |
2000 | 1999 | ||
2001 | p = skb->data; | 2000 | p = skb->data; |
@@ -2822,10 +2821,8 @@ static inline int cas_xmit_tx_ringN(struct cas *cp, int ring, | |||
2822 | 2821 | ||
2823 | ctrl = 0; | 2822 | ctrl = 0; |
2824 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | 2823 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
2825 | u64 csum_start_off, csum_stuff_off; | 2824 | const u64 csum_start_off = skb_transport_offset(skb); |
2826 | 2825 | const u64 csum_stuff_off = csum_start_off + skb->csum_offset; | |
2827 | csum_start_off = (u64) (skb->h.raw - skb->data); | ||
2828 | csum_stuff_off = csum_start_off + skb->csum_offset; | ||
2829 | 2826 | ||
2830 | ctrl = TX_DESC_CSUM_EN | | 2827 | ctrl = TX_DESC_CSUM_EN | |
2831 | CAS_BASE(TX_DESC_CSUM_START, csum_start_off) | | 2828 | CAS_BASE(TX_DESC_CSUM_START, csum_start_off) | |
@@ -2849,8 +2846,8 @@ static inline int cas_xmit_tx_ringN(struct cas *cp, int ring, | |||
2849 | ctrl | TX_DESC_SOF, 0); | 2846 | ctrl | TX_DESC_SOF, 0); |
2850 | entry = TX_DESC_NEXT(ring, entry); | 2847 | entry = TX_DESC_NEXT(ring, entry); |
2851 | 2848 | ||
2852 | memcpy(tx_tiny_buf(cp, ring, entry), skb->data + | 2849 | skb_copy_from_linear_data_offset(skb, len - tabort, |
2853 | len - tabort, tabort); | 2850 | tx_tiny_buf(cp, ring, entry), tabort); |
2854 | mapping = tx_tiny_map(cp, ring, entry, tentry); | 2851 | mapping = tx_tiny_map(cp, ring, entry, tentry); |
2855 | cas_write_txd(cp, ring, entry, mapping, tabort, ctrl, | 2852 | cas_write_txd(cp, ring, entry, mapping, tabort, ctrl, |
2856 | (nr_frags == 0)); | 2853 | (nr_frags == 0)); |
diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c index 326d4a665123..e4f874a70fe5 100644 --- a/drivers/net/chelsio/sge.c +++ b/drivers/net/chelsio/sge.c | |||
@@ -1062,7 +1062,7 @@ static inline struct sk_buff *get_packet(struct pci_dev *pdev, | |||
1062 | pci_unmap_addr(ce, dma_addr), | 1062 | pci_unmap_addr(ce, dma_addr), |
1063 | pci_unmap_len(ce, dma_len), | 1063 | pci_unmap_len(ce, dma_len), |
1064 | PCI_DMA_FROMDEVICE); | 1064 | PCI_DMA_FROMDEVICE); |
1065 | memcpy(skb->data, ce->skb->data, len); | 1065 | skb_copy_from_linear_data(ce->skb, skb->data, len); |
1066 | pci_dma_sync_single_for_device(pdev, | 1066 | pci_dma_sync_single_for_device(pdev, |
1067 | pci_unmap_addr(ce, dma_addr), | 1067 | pci_unmap_addr(ce, dma_addr), |
1068 | pci_unmap_len(ce, dma_len), | 1068 | pci_unmap_len(ce, dma_len), |
@@ -1379,12 +1379,11 @@ static void sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len) | |||
1379 | } | 1379 | } |
1380 | __skb_pull(skb, sizeof(*p)); | 1380 | __skb_pull(skb, sizeof(*p)); |
1381 | 1381 | ||
1382 | skb->dev = adapter->port[p->iff].dev; | ||
1383 | skb->dev->last_rx = jiffies; | 1382 | skb->dev->last_rx = jiffies; |
1384 | st = per_cpu_ptr(sge->port_stats[p->iff], smp_processor_id()); | 1383 | st = per_cpu_ptr(sge->port_stats[p->iff], smp_processor_id()); |
1385 | st->rx_packets++; | 1384 | st->rx_packets++; |
1386 | 1385 | ||
1387 | skb->protocol = eth_type_trans(skb, skb->dev); | 1386 | skb->protocol = eth_type_trans(skb, adapter->port[p->iff].dev); |
1388 | if ((adapter->flags & RX_CSUM_ENABLED) && p->csum == 0xffff && | 1387 | if ((adapter->flags & RX_CSUM_ENABLED) && p->csum == 0xffff && |
1389 | skb->protocol == htons(ETH_P_IP) && | 1388 | skb->protocol == htons(ETH_P_IP) && |
1390 | (skb->data[9] == IPPROTO_TCP || skb->data[9] == IPPROTO_UDP)) { | 1389 | (skb->data[9] == IPPROTO_TCP || skb->data[9] == IPPROTO_UDP)) { |
@@ -1866,14 +1865,14 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1866 | 1865 | ||
1867 | ++st->tx_tso; | 1866 | ++st->tx_tso; |
1868 | 1867 | ||
1869 | eth_type = skb->nh.raw - skb->data == ETH_HLEN ? | 1868 | eth_type = skb_network_offset(skb) == ETH_HLEN ? |
1870 | CPL_ETH_II : CPL_ETH_II_VLAN; | 1869 | CPL_ETH_II : CPL_ETH_II_VLAN; |
1871 | 1870 | ||
1872 | hdr = (struct cpl_tx_pkt_lso *)skb_push(skb, sizeof(*hdr)); | 1871 | hdr = (struct cpl_tx_pkt_lso *)skb_push(skb, sizeof(*hdr)); |
1873 | hdr->opcode = CPL_TX_PKT_LSO; | 1872 | hdr->opcode = CPL_TX_PKT_LSO; |
1874 | hdr->ip_csum_dis = hdr->l4_csum_dis = 0; | 1873 | hdr->ip_csum_dis = hdr->l4_csum_dis = 0; |
1875 | hdr->ip_hdr_words = skb->nh.iph->ihl; | 1874 | hdr->ip_hdr_words = ip_hdr(skb)->ihl; |
1876 | hdr->tcp_hdr_words = skb->h.th->doff; | 1875 | hdr->tcp_hdr_words = tcp_hdr(skb)->doff; |
1877 | hdr->eth_type_mss = htons(MK_ETH_TYPE_MSS(eth_type, | 1876 | hdr->eth_type_mss = htons(MK_ETH_TYPE_MSS(eth_type, |
1878 | skb_shinfo(skb)->gso_size)); | 1877 | skb_shinfo(skb)->gso_size)); |
1879 | hdr->len = htonl(skb->len - sizeof(*hdr)); | 1878 | hdr->len = htonl(skb->len - sizeof(*hdr)); |
@@ -1913,7 +1912,7 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1913 | 1912 | ||
1914 | if (!(adapter->flags & UDP_CSUM_CAPABLE) && | 1913 | if (!(adapter->flags & UDP_CSUM_CAPABLE) && |
1915 | skb->ip_summed == CHECKSUM_PARTIAL && | 1914 | skb->ip_summed == CHECKSUM_PARTIAL && |
1916 | skb->nh.iph->protocol == IPPROTO_UDP) { | 1915 | ip_hdr(skb)->protocol == IPPROTO_UDP) { |
1917 | if (unlikely(skb_checksum_help(skb))) { | 1916 | if (unlikely(skb_checksum_help(skb))) { |
1918 | pr_debug("%s: unable to do udp checksum\n", dev->name); | 1917 | pr_debug("%s: unable to do udp checksum\n", dev->name); |
1919 | dev_kfree_skb_any(skb); | 1918 | dev_kfree_skb_any(skb); |
@@ -1926,7 +1925,7 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1926 | */ | 1925 | */ |
1927 | if ((unlikely(!adapter->sge->espibug_skb[dev->if_port]))) { | 1926 | if ((unlikely(!adapter->sge->espibug_skb[dev->if_port]))) { |
1928 | if (skb->protocol == htons(ETH_P_ARP) && | 1927 | if (skb->protocol == htons(ETH_P_ARP) && |
1929 | skb->nh.arph->ar_op == htons(ARPOP_REQUEST)) { | 1928 | arp_hdr(skb)->ar_op == htons(ARPOP_REQUEST)) { |
1930 | adapter->sge->espibug_skb[dev->if_port] = skb; | 1929 | adapter->sge->espibug_skb[dev->if_port] = skb; |
1931 | /* We want to re-use this skb later. We | 1930 | /* We want to re-use this skb later. We |
1932 | * simply bump the reference count and it | 1931 | * simply bump the reference count and it |
@@ -2096,10 +2095,14 @@ static void espibug_workaround_t204(unsigned long data) | |||
2096 | 0x0, 0x7, 0x43, 0x0, 0x0, 0x0 | 2095 | 0x0, 0x7, 0x43, 0x0, 0x0, 0x0 |
2097 | }; | 2096 | }; |
2098 | 2097 | ||
2099 | memcpy(skb->data + sizeof(struct cpl_tx_pkt), | 2098 | skb_copy_to_linear_data_offset(skb, |
2100 | ch_mac_addr, ETH_ALEN); | 2099 | sizeof(struct cpl_tx_pkt), |
2101 | memcpy(skb->data + skb->len - 10, | 2100 | ch_mac_addr, |
2102 | ch_mac_addr, ETH_ALEN); | 2101 | ETH_ALEN); |
2102 | skb_copy_to_linear_data_offset(skb, | ||
2103 | skb->len - 10, | ||
2104 | ch_mac_addr, | ||
2105 | ETH_ALEN); | ||
2103 | skb->cb[0] = 0xff; | 2106 | skb->cb[0] = 0xff; |
2104 | } | 2107 | } |
2105 | 2108 | ||
@@ -2126,10 +2129,14 @@ static void espibug_workaround(unsigned long data) | |||
2126 | if (!skb->cb[0]) { | 2129 | if (!skb->cb[0]) { |
2127 | u8 ch_mac_addr[ETH_ALEN] = | 2130 | u8 ch_mac_addr[ETH_ALEN] = |
2128 | {0x0, 0x7, 0x43, 0x0, 0x0, 0x0}; | 2131 | {0x0, 0x7, 0x43, 0x0, 0x0, 0x0}; |
2129 | memcpy(skb->data + sizeof(struct cpl_tx_pkt), | 2132 | skb_copy_to_linear_data_offset(skb, |
2130 | ch_mac_addr, ETH_ALEN); | 2133 | sizeof(struct cpl_tx_pkt), |
2131 | memcpy(skb->data + skb->len - 10, ch_mac_addr, | 2134 | ch_mac_addr, |
2132 | ETH_ALEN); | 2135 | ETH_ALEN); |
2136 | skb_copy_to_linear_data_offset(skb, | ||
2137 | skb->len - 10, | ||
2138 | ch_mac_addr, | ||
2139 | ETH_ALEN); | ||
2133 | skb->cb[0] = 0xff; | 2140 | skb->cb[0] = 0xff; |
2134 | } | 2141 | } |
2135 | 2142 | ||
diff --git a/drivers/net/cris/eth_v10.c b/drivers/net/cris/eth_v10.c index 8eb571276000..5bdf5ca85a65 100644 --- a/drivers/net/cris/eth_v10.c +++ b/drivers/net/cris/eth_v10.c | |||
@@ -1348,7 +1348,8 @@ e100_rx(struct net_device *dev) | |||
1348 | 1348 | ||
1349 | #ifdef ETHDEBUG | 1349 | #ifdef ETHDEBUG |
1350 | printk("head = 0x%x, data = 0x%x, tail = 0x%x, end = 0x%x\n", | 1350 | printk("head = 0x%x, data = 0x%x, tail = 0x%x, end = 0x%x\n", |
1351 | skb->head, skb->data, skb->tail, skb->end); | 1351 | skb->head, skb->data, skb_tail_pointer(skb), |
1352 | skb_end_pointer(skb)); | ||
1352 | printk("copying packet to 0x%x.\n", skb_data_ptr); | 1353 | printk("copying packet to 0x%x.\n", skb_data_ptr); |
1353 | #endif | 1354 | #endif |
1354 | 1355 | ||
@@ -1375,7 +1376,6 @@ e100_rx(struct net_device *dev) | |||
1375 | myNextRxDesc->descr.buf = L1_CACHE_ALIGN(virt_to_phys(myNextRxDesc->skb->data)); | 1376 | myNextRxDesc->descr.buf = L1_CACHE_ALIGN(virt_to_phys(myNextRxDesc->skb->data)); |
1376 | } | 1377 | } |
1377 | 1378 | ||
1378 | skb->dev = dev; | ||
1379 | skb->protocol = eth_type_trans(skb, dev); | 1379 | skb->protocol = eth_type_trans(skb, dev); |
1380 | 1380 | ||
1381 | /* Send the packet to the upper layers */ | 1381 | /* Send the packet to the upper layers */ |
diff --git a/drivers/net/cs89x0.c b/drivers/net/cs89x0.c index 4612f71a7106..9774bb1b3e80 100644 --- a/drivers/net/cs89x0.c +++ b/drivers/net/cs89x0.c | |||
@@ -1004,7 +1004,6 @@ skip_this_frame: | |||
1004 | return; | 1004 | return; |
1005 | } | 1005 | } |
1006 | skb_reserve(skb, 2); /* longword align L3 header */ | 1006 | skb_reserve(skb, 2); /* longword align L3 header */ |
1007 | skb->dev = dev; | ||
1008 | 1007 | ||
1009 | if (bp + length > lp->end_dma_buff) { | 1008 | if (bp + length > lp->end_dma_buff) { |
1010 | int semi_cnt = lp->end_dma_buff - bp; | 1009 | int semi_cnt = lp->end_dma_buff - bp; |
@@ -1702,7 +1701,6 @@ net_rx(struct net_device *dev) | |||
1702 | return; | 1701 | return; |
1703 | } | 1702 | } |
1704 | skb_reserve(skb, 2); /* longword align L3 header */ | 1703 | skb_reserve(skb, 2); /* longword align L3 header */ |
1705 | skb->dev = dev; | ||
1706 | 1704 | ||
1707 | readwords(ioaddr, RX_FRAME_PORT, skb_put(skb, length), length >> 1); | 1705 | readwords(ioaddr, RX_FRAME_PORT, skb_put(skb, length), length >> 1); |
1708 | if (length & 1) | 1706 | if (length & 1) |
diff --git a/drivers/net/cxgb3/cxgb3_offload.c b/drivers/net/cxgb3/cxgb3_offload.c index 199e5066acf3..ebcf35e4cf5b 100644 --- a/drivers/net/cxgb3/cxgb3_offload.c +++ b/drivers/net/cxgb3/cxgb3_offload.c | |||
@@ -783,7 +783,7 @@ static int do_trace(struct t3cdev *dev, struct sk_buff *skb) | |||
783 | skb->protocol = htons(0xffff); | 783 | skb->protocol = htons(0xffff); |
784 | skb->dev = dev->lldev; | 784 | skb->dev = dev->lldev; |
785 | skb_pull(skb, sizeof(*p)); | 785 | skb_pull(skb, sizeof(*p)); |
786 | skb->mac.raw = skb->data; | 786 | skb_reset_mac_header(skb); |
787 | netif_receive_skb(skb); | 787 | netif_receive_skb(skb); |
788 | return 0; | 788 | return 0; |
789 | } | 789 | } |
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c index 027ab2c3825c..3666586a4831 100644 --- a/drivers/net/cxgb3/sge.c +++ b/drivers/net/cxgb3/sge.c | |||
@@ -661,7 +661,7 @@ static inline struct sk_buff *get_imm_packet(const struct rsp_desc *resp) | |||
661 | 661 | ||
662 | if (skb) { | 662 | if (skb) { |
663 | __skb_put(skb, IMMED_PKT_SIZE); | 663 | __skb_put(skb, IMMED_PKT_SIZE); |
664 | memcpy(skb->data, resp->imm_data, IMMED_PKT_SIZE); | 664 | skb_copy_to_linear_data(skb, resp->imm_data, IMMED_PKT_SIZE); |
665 | } | 665 | } |
666 | return skb; | 666 | return skb; |
667 | } | 667 | } |
@@ -897,11 +897,11 @@ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb, | |||
897 | d->flit[2] = 0; | 897 | d->flit[2] = 0; |
898 | cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT_LSO); | 898 | cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT_LSO); |
899 | hdr->cntrl = htonl(cntrl); | 899 | hdr->cntrl = htonl(cntrl); |
900 | eth_type = skb->nh.raw - skb->data == ETH_HLEN ? | 900 | eth_type = skb_network_offset(skb) == ETH_HLEN ? |
901 | CPL_ETH_II : CPL_ETH_II_VLAN; | 901 | CPL_ETH_II : CPL_ETH_II_VLAN; |
902 | tso_info |= V_LSO_ETH_TYPE(eth_type) | | 902 | tso_info |= V_LSO_ETH_TYPE(eth_type) | |
903 | V_LSO_IPHDR_WORDS(skb->nh.iph->ihl) | | 903 | V_LSO_IPHDR_WORDS(ip_hdr(skb)->ihl) | |
904 | V_LSO_TCPHDR_WORDS(skb->h.th->doff); | 904 | V_LSO_TCPHDR_WORDS(tcp_hdr(skb)->doff); |
905 | hdr->lso_info = htonl(tso_info); | 905 | hdr->lso_info = htonl(tso_info); |
906 | flits = 3; | 906 | flits = 3; |
907 | } else { | 907 | } else { |
@@ -913,7 +913,8 @@ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb, | |||
913 | if (skb->len <= WR_LEN - sizeof(*cpl)) { | 913 | if (skb->len <= WR_LEN - sizeof(*cpl)) { |
914 | q->sdesc[pidx].skb = NULL; | 914 | q->sdesc[pidx].skb = NULL; |
915 | if (!skb->data_len) | 915 | if (!skb->data_len) |
916 | memcpy(&d->flit[2], skb->data, skb->len); | 916 | skb_copy_from_linear_data(skb, &d->flit[2], |
917 | skb->len); | ||
917 | else | 918 | else |
918 | skb_copy_bits(skb, 0, &d->flit[2], skb->len); | 919 | skb_copy_bits(skb, 0, &d->flit[2], skb->len); |
919 | 920 | ||
@@ -1319,16 +1320,19 @@ static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb, | |||
1319 | /* Only TX_DATA builds SGLs */ | 1320 | /* Only TX_DATA builds SGLs */ |
1320 | 1321 | ||
1321 | from = (struct work_request_hdr *)skb->data; | 1322 | from = (struct work_request_hdr *)skb->data; |
1322 | memcpy(&d->flit[1], &from[1], skb->h.raw - skb->data - sizeof(*from)); | 1323 | memcpy(&d->flit[1], &from[1], |
1324 | skb_transport_offset(skb) - sizeof(*from)); | ||
1323 | 1325 | ||
1324 | flits = (skb->h.raw - skb->data) / 8; | 1326 | flits = skb_transport_offset(skb) / 8; |
1325 | sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl; | 1327 | sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl; |
1326 | sgl_flits = make_sgl(skb, sgp, skb->h.raw, skb->tail - skb->h.raw, | 1328 | sgl_flits = make_sgl(skb, sgp, skb_transport_header(skb), |
1329 | skb->tail - skb->transport_header, | ||
1327 | adap->pdev); | 1330 | adap->pdev); |
1328 | if (need_skb_unmap()) { | 1331 | if (need_skb_unmap()) { |
1329 | setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits); | 1332 | setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits); |
1330 | skb->destructor = deferred_unmap_destructor; | 1333 | skb->destructor = deferred_unmap_destructor; |
1331 | ((struct unmap_info *)skb->cb)->len = skb->tail - skb->h.raw; | 1334 | ((struct unmap_info *)skb->cb)->len = (skb->tail - |
1335 | skb->transport_header); | ||
1332 | } | 1336 | } |
1333 | 1337 | ||
1334 | write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, | 1338 | write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, |
@@ -1349,8 +1353,8 @@ static inline unsigned int calc_tx_descs_ofld(const struct sk_buff *skb) | |||
1349 | if (skb->len <= WR_LEN && cnt == 0) | 1353 | if (skb->len <= WR_LEN && cnt == 0) |
1350 | return 1; /* packet fits as immediate data */ | 1354 | return 1; /* packet fits as immediate data */ |
1351 | 1355 | ||
1352 | flits = (skb->h.raw - skb->data) / 8; /* headers */ | 1356 | flits = skb_transport_offset(skb) / 8; /* headers */ |
1353 | if (skb->tail != skb->h.raw) | 1357 | if (skb->tail != skb->transport_header) |
1354 | cnt++; | 1358 | cnt++; |
1355 | return flits_to_desc(flits + sgl_len(cnt)); | 1359 | return flits_to_desc(flits + sgl_len(cnt)); |
1356 | } | 1360 | } |
@@ -1620,7 +1624,9 @@ static inline int rx_offload(struct t3cdev *tdev, struct sge_rspq *rq, | |||
1620 | unsigned int gather_idx) | 1624 | unsigned int gather_idx) |
1621 | { | 1625 | { |
1622 | rq->offload_pkts++; | 1626 | rq->offload_pkts++; |
1623 | skb->mac.raw = skb->nh.raw = skb->h.raw = skb->data; | 1627 | skb_reset_mac_header(skb); |
1628 | skb_reset_network_header(skb); | ||
1629 | skb_reset_transport_header(skb); | ||
1624 | 1630 | ||
1625 | if (rq->polling) { | 1631 | if (rq->polling) { |
1626 | rx_gather[gather_idx++] = skb; | 1632 | rx_gather[gather_idx++] = skb; |
@@ -1684,9 +1690,8 @@ static void rx_eth(struct adapter *adap, struct sge_rspq *rq, | |||
1684 | struct port_info *pi; | 1690 | struct port_info *pi; |
1685 | 1691 | ||
1686 | skb_pull(skb, sizeof(*p) + pad); | 1692 | skb_pull(skb, sizeof(*p) + pad); |
1687 | skb->dev = adap->port[p->iff]; | ||
1688 | skb->dev->last_rx = jiffies; | 1693 | skb->dev->last_rx = jiffies; |
1689 | skb->protocol = eth_type_trans(skb, skb->dev); | 1694 | skb->protocol = eth_type_trans(skb, adap->port[p->iff]); |
1690 | pi = netdev_priv(skb->dev); | 1695 | pi = netdev_priv(skb->dev); |
1691 | if (pi->rx_csum_offload && p->csum_valid && p->csum == 0xffff && | 1696 | if (pi->rx_csum_offload && p->csum_valid && p->csum == 0xffff && |
1692 | !p->fragment) { | 1697 | !p->fragment) { |
@@ -1717,11 +1722,11 @@ static void skb_data_init(struct sk_buff *skb, struct sge_fl_page *p, | |||
1717 | { | 1722 | { |
1718 | skb->len = len; | 1723 | skb->len = len; |
1719 | if (len <= SKB_DATA_SIZE) { | 1724 | if (len <= SKB_DATA_SIZE) { |
1720 | memcpy(skb->data, p->va, len); | 1725 | skb_copy_to_linear_data(skb, p->va, len); |
1721 | skb->tail += len; | 1726 | skb->tail += len; |
1722 | put_page(p->frag.page); | 1727 | put_page(p->frag.page); |
1723 | } else { | 1728 | } else { |
1724 | memcpy(skb->data, p->va, SKB_DATA_SIZE); | 1729 | skb_copy_to_linear_data(skb, p->va, SKB_DATA_SIZE); |
1725 | skb_shinfo(skb)->frags[0].page = p->frag.page; | 1730 | skb_shinfo(skb)->frags[0].page = p->frag.page; |
1726 | skb_shinfo(skb)->frags[0].page_offset = | 1731 | skb_shinfo(skb)->frags[0].page_offset = |
1727 | p->frag.page_offset + SKB_DATA_SIZE; | 1732 | p->frag.page_offset + SKB_DATA_SIZE; |
@@ -1767,7 +1772,7 @@ static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl, | |||
1767 | __skb_put(skb, len); | 1772 | __skb_put(skb, len); |
1768 | pci_dma_sync_single_for_cpu(adap->pdev, mapping, len, | 1773 | pci_dma_sync_single_for_cpu(adap->pdev, mapping, len, |
1769 | PCI_DMA_FROMDEVICE); | 1774 | PCI_DMA_FROMDEVICE); |
1770 | memcpy(skb->data, sd->t.skb->data, len); | 1775 | skb_copy_from_linear_data(sd->t.skb, skb->data, len); |
1771 | pci_dma_sync_single_for_device(adap->pdev, mapping, len, | 1776 | pci_dma_sync_single_for_device(adap->pdev, mapping, len, |
1772 | PCI_DMA_FROMDEVICE); | 1777 | PCI_DMA_FROMDEVICE); |
1773 | } else if (!drop_thres) | 1778 | } else if (!drop_thres) |
diff --git a/drivers/net/de600.c b/drivers/net/de600.c index e547ce14eefe..dae97b860daa 100644 --- a/drivers/net/de600.c +++ b/drivers/net/de600.c | |||
@@ -359,7 +359,6 @@ static void de600_rx_intr(struct net_device *dev) | |||
359 | } | 359 | } |
360 | /* else */ | 360 | /* else */ |
361 | 361 | ||
362 | skb->dev = dev; | ||
363 | skb_reserve(skb,2); /* Align */ | 362 | skb_reserve(skb,2); /* Align */ |
364 | 363 | ||
365 | /* 'skb->data' points to the start of sk_buff data area. */ | 364 | /* 'skb->data' points to the start of sk_buff data area. */ |
diff --git a/drivers/net/de620.c b/drivers/net/de620.c index b6ad0cb50552..dc4892426174 100644 --- a/drivers/net/de620.c +++ b/drivers/net/de620.c | |||
@@ -697,7 +697,6 @@ static int de620_rx_intr(struct net_device *dev) | |||
697 | } | 697 | } |
698 | else { /* Yep! Go get it! */ | 698 | else { /* Yep! Go get it! */ |
699 | skb_reserve(skb,2); /* Align */ | 699 | skb_reserve(skb,2); /* Align */ |
700 | skb->dev = dev; | ||
701 | /* skb->data points to the start of sk_buff data area */ | 700 | /* skb->data points to the start of sk_buff data area */ |
702 | buffer = skb_put(skb,size); | 701 | buffer = skb_put(skb,size); |
703 | /* copy the packet into the buffer */ | 702 | /* copy the packet into the buffer */ |
diff --git a/drivers/net/declance.c b/drivers/net/declance.c index 9f7e1db8ce62..95d854e2295c 100644 --- a/drivers/net/declance.c +++ b/drivers/net/declance.c | |||
@@ -616,7 +616,6 @@ static int lance_rx(struct net_device *dev) | |||
616 | } | 616 | } |
617 | lp->stats.rx_bytes += len; | 617 | lp->stats.rx_bytes += len; |
618 | 618 | ||
619 | skb->dev = dev; | ||
620 | skb_reserve(skb, 2); /* 16 byte align */ | 619 | skb_reserve(skb, 2); /* 16 byte align */ |
621 | skb_put(skb, len); /* make room */ | 620 | skb_put(skb, len); /* make room */ |
622 | 621 | ||
diff --git a/drivers/net/defxx.c b/drivers/net/defxx.c index 07d2731c1aa8..571d82f8008c 100644 --- a/drivers/net/defxx.c +++ b/drivers/net/defxx.c | |||
@@ -3091,13 +3091,13 @@ static void dfx_rcv_queue_process( | |||
3091 | { | 3091 | { |
3092 | /* Receive buffer allocated, pass receive packet up */ | 3092 | /* Receive buffer allocated, pass receive packet up */ |
3093 | 3093 | ||
3094 | memcpy(skb->data, p_buff + RCV_BUFF_K_PADDING, pkt_len+3); | 3094 | skb_copy_to_linear_data(skb, |
3095 | p_buff + RCV_BUFF_K_PADDING, | ||
3096 | pkt_len + 3); | ||
3095 | } | 3097 | } |
3096 | 3098 | ||
3097 | skb_reserve(skb,3); /* adjust data field so that it points to FC byte */ | 3099 | skb_reserve(skb,3); /* adjust data field so that it points to FC byte */ |
3098 | skb_put(skb, pkt_len); /* pass up packet length, NOT including CRC */ | 3100 | skb_put(skb, pkt_len); /* pass up packet length, NOT including CRC */ |
3099 | skb->dev = bp->dev; /* pass up device pointer */ | ||
3100 | |||
3101 | skb->protocol = fddi_type_trans(skb, bp->dev); | 3101 | skb->protocol = fddi_type_trans(skb, bp->dev); |
3102 | bp->rcv_total_bytes += skb->len; | 3102 | bp->rcv_total_bytes += skb->len; |
3103 | netif_rx(skb); | 3103 | netif_rx(skb); |
diff --git a/drivers/net/depca.c b/drivers/net/depca.c index f3807aaf10aa..183497020bfc 100644 --- a/drivers/net/depca.c +++ b/drivers/net/depca.c | |||
@@ -1044,7 +1044,6 @@ static int depca_rx(struct net_device *dev) | |||
1044 | unsigned char *buf; | 1044 | unsigned char *buf; |
1045 | skb_reserve(skb, 2); /* 16 byte align the IP header */ | 1045 | skb_reserve(skb, 2); /* 16 byte align the IP header */ |
1046 | buf = skb_put(skb, pkt_len); | 1046 | buf = skb_put(skb, pkt_len); |
1047 | skb->dev = dev; | ||
1048 | if (entry < lp->rx_old) { /* Wrapped buffer */ | 1047 | if (entry < lp->rx_old) { /* Wrapped buffer */ |
1049 | len = (lp->rxRingMask - lp->rx_old + 1) * RX_BUFF_SZ; | 1048 | len = (lp->rxRingMask - lp->rx_old + 1) * RX_BUFF_SZ; |
1050 | memcpy_fromio(buf, lp->rx_buff[lp->rx_old], len); | 1049 | memcpy_fromio(buf, lp->rx_buff[lp->rx_old], len); |
diff --git a/drivers/net/dgrs.c b/drivers/net/dgrs.c index a79520295fd0..df62c0232f36 100644 --- a/drivers/net/dgrs.c +++ b/drivers/net/dgrs.c | |||
@@ -503,7 +503,6 @@ dgrs_rcv_frame( | |||
503 | /* discarding the frame */ | 503 | /* discarding the frame */ |
504 | goto out; | 504 | goto out; |
505 | } | 505 | } |
506 | skb->dev = devN; | ||
507 | skb_reserve(skb, 2); /* Align IP header */ | 506 | skb_reserve(skb, 2); /* Align IP header */ |
508 | 507 | ||
509 | again: | 508 | again: |
@@ -742,7 +741,7 @@ static int dgrs_start_xmit(struct sk_buff *skb, struct net_device *devN) | |||
742 | } | 741 | } |
743 | 742 | ||
744 | amt = min_t(unsigned int, len, rbdp->size - count); | 743 | amt = min_t(unsigned int, len, rbdp->size - count); |
745 | memcpy( (char *) S2H(rbdp->buf) + count, skb->data + i, amt); | 744 | skb_copy_from_linear_data_offset(skb, i, S2H(rbdp->buf) + count, amt); |
746 | i += amt; | 745 | i += amt; |
747 | count += amt; | 746 | count += amt; |
748 | len -= amt; | 747 | len -= amt; |
diff --git a/drivers/net/dl2k.c b/drivers/net/dl2k.c index 9d446a0fe0bf..74ec64a1625d 100644 --- a/drivers/net/dl2k.c +++ b/drivers/net/dl2k.c | |||
@@ -504,7 +504,6 @@ rio_timer (unsigned long data) | |||
504 | break; | 504 | break; |
505 | } | 505 | } |
506 | np->rx_skbuff[entry] = skb; | 506 | np->rx_skbuff[entry] = skb; |
507 | skb->dev = dev; | ||
508 | /* 16 byte align the IP header */ | 507 | /* 16 byte align the IP header */ |
509 | skb_reserve (skb, 2); | 508 | skb_reserve (skb, 2); |
510 | np->rx_ring[entry].fraginfo = | 509 | np->rx_ring[entry].fraginfo = |
@@ -575,7 +574,6 @@ alloc_list (struct net_device *dev) | |||
575 | dev->name); | 574 | dev->name); |
576 | break; | 575 | break; |
577 | } | 576 | } |
578 | skb->dev = dev; /* Mark as being used by this device. */ | ||
579 | skb_reserve (skb, 2); /* 16 byte align the IP header. */ | 577 | skb_reserve (skb, 2); /* 16 byte align the IP header. */ |
580 | /* Rubicon now supports 40 bits of addressing space. */ | 578 | /* Rubicon now supports 40 bits of addressing space. */ |
581 | np->rx_ring[i].fraginfo = | 579 | np->rx_ring[i].fraginfo = |
@@ -866,7 +864,6 @@ receive_packet (struct net_device *dev) | |||
866 | DMA_48BIT_MASK, | 864 | DMA_48BIT_MASK, |
867 | np->rx_buf_sz, | 865 | np->rx_buf_sz, |
868 | PCI_DMA_FROMDEVICE); | 866 | PCI_DMA_FROMDEVICE); |
869 | skb->dev = dev; | ||
870 | /* 16 byte align the IP header */ | 867 | /* 16 byte align the IP header */ |
871 | skb_reserve (skb, 2); | 868 | skb_reserve (skb, 2); |
872 | eth_copy_and_sum (skb, | 869 | eth_copy_and_sum (skb, |
@@ -910,7 +907,6 @@ receive_packet (struct net_device *dev) | |||
910 | break; | 907 | break; |
911 | } | 908 | } |
912 | np->rx_skbuff[entry] = skb; | 909 | np->rx_skbuff[entry] = skb; |
913 | skb->dev = dev; | ||
914 | /* 16 byte align the IP header */ | 910 | /* 16 byte align the IP header */ |
915 | skb_reserve (skb, 2); | 911 | skb_reserve (skb, 2); |
916 | np->rx_ring[entry].fraginfo = | 912 | np->rx_ring[entry].fraginfo = |
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c index 615d2b14efa7..8cc1174e7f64 100644 --- a/drivers/net/dm9000.c +++ b/drivers/net/dm9000.c | |||
@@ -954,7 +954,6 @@ dm9000_rx(struct net_device *dev) | |||
954 | /* Move data from DM9000 */ | 954 | /* Move data from DM9000 */ |
955 | if (GoodPacket | 955 | if (GoodPacket |
956 | && ((skb = dev_alloc_skb(RxLen + 4)) != NULL)) { | 956 | && ((skb = dev_alloc_skb(RxLen + 4)) != NULL)) { |
957 | skb->dev = dev; | ||
958 | skb_reserve(skb, 2); | 957 | skb_reserve(skb, 2); |
959 | rdptr = (u8 *) skb_put(skb, RxLen - 4); | 958 | rdptr = (u8 *) skb_put(skb, RxLen - 4); |
960 | 959 | ||
diff --git a/drivers/net/e100.c b/drivers/net/e100.c index 0cefef5e3f06..4d0e0aea72bf 100644 --- a/drivers/net/e100.c +++ b/drivers/net/e100.c | |||
@@ -1769,7 +1769,7 @@ static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx) | |||
1769 | 1769 | ||
1770 | /* Align, init, and map the RFD. */ | 1770 | /* Align, init, and map the RFD. */ |
1771 | skb_reserve(rx->skb, NET_IP_ALIGN); | 1771 | skb_reserve(rx->skb, NET_IP_ALIGN); |
1772 | memcpy(rx->skb->data, &nic->blank_rfd, sizeof(struct rfd)); | 1772 | skb_copy_to_linear_data(rx->skb, &nic->blank_rfd, sizeof(struct rfd)); |
1773 | rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data, | 1773 | rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data, |
1774 | RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL); | 1774 | RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL); |
1775 | 1775 | ||
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c index b28a915bd980..9267f16b1b32 100644 --- a/drivers/net/e1000/e1000_main.c +++ b/drivers/net/e1000/e1000_main.c | |||
@@ -409,25 +409,21 @@ e1000_release_hw_control(struct e1000_adapter *adapter) | |||
409 | { | 409 | { |
410 | uint32_t ctrl_ext; | 410 | uint32_t ctrl_ext; |
411 | uint32_t swsm; | 411 | uint32_t swsm; |
412 | uint32_t extcnf; | ||
413 | 412 | ||
414 | /* Let firmware taken over control of h/w */ | 413 | /* Let firmware taken over control of h/w */ |
415 | switch (adapter->hw.mac_type) { | 414 | switch (adapter->hw.mac_type) { |
416 | case e1000_82571: | ||
417 | case e1000_82572: | ||
418 | case e1000_80003es2lan: | ||
419 | ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT); | ||
420 | E1000_WRITE_REG(&adapter->hw, CTRL_EXT, | ||
421 | ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); | ||
422 | break; | ||
423 | case e1000_82573: | 415 | case e1000_82573: |
424 | swsm = E1000_READ_REG(&adapter->hw, SWSM); | 416 | swsm = E1000_READ_REG(&adapter->hw, SWSM); |
425 | E1000_WRITE_REG(&adapter->hw, SWSM, | 417 | E1000_WRITE_REG(&adapter->hw, SWSM, |
426 | swsm & ~E1000_SWSM_DRV_LOAD); | 418 | swsm & ~E1000_SWSM_DRV_LOAD); |
419 | break; | ||
420 | case e1000_82571: | ||
421 | case e1000_82572: | ||
422 | case e1000_80003es2lan: | ||
427 | case e1000_ich8lan: | 423 | case e1000_ich8lan: |
428 | extcnf = E1000_READ_REG(&adapter->hw, CTRL_EXT); | 424 | ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT); |
429 | E1000_WRITE_REG(&adapter->hw, CTRL_EXT, | 425 | E1000_WRITE_REG(&adapter->hw, CTRL_EXT, |
430 | extcnf & ~E1000_CTRL_EXT_DRV_LOAD); | 426 | ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); |
431 | break; | 427 | break; |
432 | default: | 428 | default: |
433 | break; | 429 | break; |
@@ -450,26 +446,21 @@ e1000_get_hw_control(struct e1000_adapter *adapter) | |||
450 | { | 446 | { |
451 | uint32_t ctrl_ext; | 447 | uint32_t ctrl_ext; |
452 | uint32_t swsm; | 448 | uint32_t swsm; |
453 | uint32_t extcnf; | ||
454 | 449 | ||
455 | /* Let firmware know the driver has taken over */ | 450 | /* Let firmware know the driver has taken over */ |
456 | switch (adapter->hw.mac_type) { | 451 | switch (adapter->hw.mac_type) { |
457 | case e1000_82571: | ||
458 | case e1000_82572: | ||
459 | case e1000_80003es2lan: | ||
460 | ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT); | ||
461 | E1000_WRITE_REG(&adapter->hw, CTRL_EXT, | ||
462 | ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); | ||
463 | break; | ||
464 | case e1000_82573: | 452 | case e1000_82573: |
465 | swsm = E1000_READ_REG(&adapter->hw, SWSM); | 453 | swsm = E1000_READ_REG(&adapter->hw, SWSM); |
466 | E1000_WRITE_REG(&adapter->hw, SWSM, | 454 | E1000_WRITE_REG(&adapter->hw, SWSM, |
467 | swsm | E1000_SWSM_DRV_LOAD); | 455 | swsm | E1000_SWSM_DRV_LOAD); |
468 | break; | 456 | break; |
457 | case e1000_82571: | ||
458 | case e1000_82572: | ||
459 | case e1000_80003es2lan: | ||
469 | case e1000_ich8lan: | 460 | case e1000_ich8lan: |
470 | extcnf = E1000_READ_REG(&adapter->hw, EXTCNF_CTRL); | 461 | ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT); |
471 | E1000_WRITE_REG(&adapter->hw, EXTCNF_CTRL, | 462 | E1000_WRITE_REG(&adapter->hw, CTRL_EXT, |
472 | extcnf | E1000_EXTCNF_CTRL_SWFLAG); | 463 | ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); |
473 | break; | 464 | break; |
474 | default: | 465 | default: |
475 | break; | 466 | break; |
@@ -522,14 +513,15 @@ e1000_release_manageability(struct e1000_adapter *adapter) | |||
522 | } | 513 | } |
523 | } | 514 | } |
524 | 515 | ||
525 | int | 516 | /** |
526 | e1000_up(struct e1000_adapter *adapter) | 517 | * e1000_configure - configure the hardware for RX and TX |
518 | * @adapter = private board structure | ||
519 | **/ | ||
520 | static void e1000_configure(struct e1000_adapter *adapter) | ||
527 | { | 521 | { |
528 | struct net_device *netdev = adapter->netdev; | 522 | struct net_device *netdev = adapter->netdev; |
529 | int i; | 523 | int i; |
530 | 524 | ||
531 | /* hardware has been reset, we need to reload some things */ | ||
532 | |||
533 | e1000_set_multi(netdev); | 525 | e1000_set_multi(netdev); |
534 | 526 | ||
535 | e1000_restore_vlan(adapter); | 527 | e1000_restore_vlan(adapter); |
@@ -548,14 +540,20 @@ e1000_up(struct e1000_adapter *adapter) | |||
548 | } | 540 | } |
549 | 541 | ||
550 | adapter->tx_queue_len = netdev->tx_queue_len; | 542 | adapter->tx_queue_len = netdev->tx_queue_len; |
543 | } | ||
544 | |||
545 | int e1000_up(struct e1000_adapter *adapter) | ||
546 | { | ||
547 | /* hardware has been reset, we need to reload some things */ | ||
548 | e1000_configure(adapter); | ||
549 | |||
550 | clear_bit(__E1000_DOWN, &adapter->flags); | ||
551 | 551 | ||
552 | #ifdef CONFIG_E1000_NAPI | 552 | #ifdef CONFIG_E1000_NAPI |
553 | netif_poll_enable(netdev); | 553 | netif_poll_enable(adapter->netdev); |
554 | #endif | 554 | #endif |
555 | e1000_irq_enable(adapter); | 555 | e1000_irq_enable(adapter); |
556 | 556 | ||
557 | clear_bit(__E1000_DOWN, &adapter->flags); | ||
558 | |||
559 | /* fire a link change interrupt to start the watchdog */ | 557 | /* fire a link change interrupt to start the watchdog */ |
560 | E1000_WRITE_REG(&adapter->hw, ICS, E1000_ICS_LSC); | 558 | E1000_WRITE_REG(&adapter->hw, ICS, E1000_ICS_LSC); |
561 | return 0; | 559 | return 0; |
@@ -640,15 +638,15 @@ e1000_down(struct e1000_adapter *adapter) | |||
640 | * reschedule our watchdog timer */ | 638 | * reschedule our watchdog timer */ |
641 | set_bit(__E1000_DOWN, &adapter->flags); | 639 | set_bit(__E1000_DOWN, &adapter->flags); |
642 | 640 | ||
641 | #ifdef CONFIG_E1000_NAPI | ||
642 | netif_poll_disable(netdev); | ||
643 | #endif | ||
643 | e1000_irq_disable(adapter); | 644 | e1000_irq_disable(adapter); |
644 | 645 | ||
645 | del_timer_sync(&adapter->tx_fifo_stall_timer); | 646 | del_timer_sync(&adapter->tx_fifo_stall_timer); |
646 | del_timer_sync(&adapter->watchdog_timer); | 647 | del_timer_sync(&adapter->watchdog_timer); |
647 | del_timer_sync(&adapter->phy_info_timer); | 648 | del_timer_sync(&adapter->phy_info_timer); |
648 | 649 | ||
649 | #ifdef CONFIG_E1000_NAPI | ||
650 | netif_poll_disable(netdev); | ||
651 | #endif | ||
652 | netdev->tx_queue_len = adapter->tx_queue_len; | 650 | netdev->tx_queue_len = adapter->tx_queue_len; |
653 | adapter->link_speed = 0; | 651 | adapter->link_speed = 0; |
654 | adapter->link_duplex = 0; | 652 | adapter->link_duplex = 0; |
@@ -1410,21 +1408,17 @@ e1000_open(struct net_device *netdev) | |||
1410 | return -EBUSY; | 1408 | return -EBUSY; |
1411 | 1409 | ||
1412 | /* allocate transmit descriptors */ | 1410 | /* allocate transmit descriptors */ |
1413 | if ((err = e1000_setup_all_tx_resources(adapter))) | 1411 | err = e1000_setup_all_tx_resources(adapter); |
1412 | if (err) | ||
1414 | goto err_setup_tx; | 1413 | goto err_setup_tx; |
1415 | 1414 | ||
1416 | /* allocate receive descriptors */ | 1415 | /* allocate receive descriptors */ |
1417 | if ((err = e1000_setup_all_rx_resources(adapter))) | 1416 | err = e1000_setup_all_rx_resources(adapter); |
1418 | goto err_setup_rx; | ||
1419 | |||
1420 | err = e1000_request_irq(adapter); | ||
1421 | if (err) | 1417 | if (err) |
1422 | goto err_req_irq; | 1418 | goto err_setup_rx; |
1423 | 1419 | ||
1424 | e1000_power_up_phy(adapter); | 1420 | e1000_power_up_phy(adapter); |
1425 | 1421 | ||
1426 | if ((err = e1000_up(adapter))) | ||
1427 | goto err_up; | ||
1428 | adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; | 1422 | adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; |
1429 | if ((adapter->hw.mng_cookie.status & | 1423 | if ((adapter->hw.mng_cookie.status & |
1430 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) { | 1424 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) { |
@@ -1437,12 +1431,33 @@ e1000_open(struct net_device *netdev) | |||
1437 | e1000_check_mng_mode(&adapter->hw)) | 1431 | e1000_check_mng_mode(&adapter->hw)) |
1438 | e1000_get_hw_control(adapter); | 1432 | e1000_get_hw_control(adapter); |
1439 | 1433 | ||
1434 | /* before we allocate an interrupt, we must be ready to handle it. | ||
1435 | * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt | ||
1436 | * as soon as we call pci_request_irq, so we have to setup our | ||
1437 | * clean_rx handler before we do so. */ | ||
1438 | e1000_configure(adapter); | ||
1439 | |||
1440 | err = e1000_request_irq(adapter); | ||
1441 | if (err) | ||
1442 | goto err_req_irq; | ||
1443 | |||
1444 | /* From here on the code is the same as e1000_up() */ | ||
1445 | clear_bit(__E1000_DOWN, &adapter->flags); | ||
1446 | |||
1447 | #ifdef CONFIG_E1000_NAPI | ||
1448 | netif_poll_enable(netdev); | ||
1449 | #endif | ||
1450 | |||
1451 | e1000_irq_enable(adapter); | ||
1452 | |||
1453 | /* fire a link status change interrupt to start the watchdog */ | ||
1454 | E1000_WRITE_REG(&adapter->hw, ICS, E1000_ICS_LSC); | ||
1455 | |||
1440 | return E1000_SUCCESS; | 1456 | return E1000_SUCCESS; |
1441 | 1457 | ||
1442 | err_up: | ||
1443 | e1000_power_down_phy(adapter); | ||
1444 | e1000_free_irq(adapter); | ||
1445 | err_req_irq: | 1458 | err_req_irq: |
1459 | e1000_release_hw_control(adapter); | ||
1460 | e1000_power_down_phy(adapter); | ||
1446 | e1000_free_all_rx_resources(adapter); | 1461 | e1000_free_all_rx_resources(adapter); |
1447 | err_setup_rx: | 1462 | err_setup_rx: |
1448 | e1000_free_all_tx_resources(adapter); | 1463 | e1000_free_all_tx_resources(adapter); |
@@ -2887,33 +2902,30 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, | |||
2887 | return err; | 2902 | return err; |
2888 | } | 2903 | } |
2889 | 2904 | ||
2890 | hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2)); | 2905 | hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); |
2891 | mss = skb_shinfo(skb)->gso_size; | 2906 | mss = skb_shinfo(skb)->gso_size; |
2892 | if (skb->protocol == htons(ETH_P_IP)) { | 2907 | if (skb->protocol == htons(ETH_P_IP)) { |
2893 | skb->nh.iph->tot_len = 0; | 2908 | struct iphdr *iph = ip_hdr(skb); |
2894 | skb->nh.iph->check = 0; | 2909 | iph->tot_len = 0; |
2895 | skb->h.th->check = | 2910 | iph->check = 0; |
2896 | ~csum_tcpudp_magic(skb->nh.iph->saddr, | 2911 | tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, |
2897 | skb->nh.iph->daddr, | 2912 | iph->daddr, 0, |
2898 | 0, | 2913 | IPPROTO_TCP, |
2899 | IPPROTO_TCP, | 2914 | 0); |
2900 | 0); | ||
2901 | cmd_length = E1000_TXD_CMD_IP; | 2915 | cmd_length = E1000_TXD_CMD_IP; |
2902 | ipcse = skb->h.raw - skb->data - 1; | 2916 | ipcse = skb_transport_offset(skb) - 1; |
2903 | } else if (skb->protocol == htons(ETH_P_IPV6)) { | 2917 | } else if (skb->protocol == htons(ETH_P_IPV6)) { |
2904 | skb->nh.ipv6h->payload_len = 0; | 2918 | ipv6_hdr(skb)->payload_len = 0; |
2905 | skb->h.th->check = | 2919 | tcp_hdr(skb)->check = |
2906 | ~csum_ipv6_magic(&skb->nh.ipv6h->saddr, | 2920 | ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, |
2907 | &skb->nh.ipv6h->daddr, | 2921 | &ipv6_hdr(skb)->daddr, |
2908 | 0, | 2922 | 0, IPPROTO_TCP, 0); |
2909 | IPPROTO_TCP, | ||
2910 | 0); | ||
2911 | ipcse = 0; | 2923 | ipcse = 0; |
2912 | } | 2924 | } |
2913 | ipcss = skb->nh.raw - skb->data; | 2925 | ipcss = skb_network_offset(skb); |
2914 | ipcso = (void *)&(skb->nh.iph->check) - (void *)skb->data; | 2926 | ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data; |
2915 | tucss = skb->h.raw - skb->data; | 2927 | tucss = skb_transport_offset(skb); |
2916 | tucso = (void *)&(skb->h.th->check) - (void *)skb->data; | 2928 | tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data; |
2917 | tucse = 0; | 2929 | tucse = 0; |
2918 | 2930 | ||
2919 | cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE | | 2931 | cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE | |
@@ -2954,7 +2966,7 @@ e1000_tx_csum(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, | |||
2954 | uint8_t css; | 2966 | uint8_t css; |
2955 | 2967 | ||
2956 | if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { | 2968 | if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { |
2957 | css = skb->h.raw - skb->data; | 2969 | css = skb_transport_offset(skb); |
2958 | 2970 | ||
2959 | i = tx_ring->next_to_use; | 2971 | i = tx_ring->next_to_use; |
2960 | buffer_info = &tx_ring->buffer_info[i]; | 2972 | buffer_info = &tx_ring->buffer_info[i]; |
@@ -2962,7 +2974,8 @@ e1000_tx_csum(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, | |||
2962 | 2974 | ||
2963 | context_desc->lower_setup.ip_config = 0; | 2975 | context_desc->lower_setup.ip_config = 0; |
2964 | context_desc->upper_setup.tcp_fields.tucss = css; | 2976 | context_desc->upper_setup.tcp_fields.tucss = css; |
2965 | context_desc->upper_setup.tcp_fields.tucso = css + skb->csum; | 2977 | context_desc->upper_setup.tcp_fields.tucso = |
2978 | css + skb->csum_offset; | ||
2966 | context_desc->upper_setup.tcp_fields.tucse = 0; | 2979 | context_desc->upper_setup.tcp_fields.tucse = 0; |
2967 | context_desc->tcp_seg_setup.data = 0; | 2980 | context_desc->tcp_seg_setup.data = 0; |
2968 | context_desc->cmd_and_length = cpu_to_le32(E1000_TXD_CMD_DEXT); | 2981 | context_desc->cmd_and_length = cpu_to_le32(E1000_TXD_CMD_DEXT); |
@@ -3296,7 +3309,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
3296 | /* TSO Workaround for 82571/2/3 Controllers -- if skb->data | 3309 | /* TSO Workaround for 82571/2/3 Controllers -- if skb->data |
3297 | * points to just header, pull a few bytes of payload from | 3310 | * points to just header, pull a few bytes of payload from |
3298 | * frags into skb->data */ | 3311 | * frags into skb->data */ |
3299 | hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2)); | 3312 | hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); |
3300 | if (skb->data_len && (hdr_len == (skb->len - skb->data_len))) { | 3313 | if (skb->data_len && (hdr_len == (skb->len - skb->data_len))) { |
3301 | switch (adapter->hw.mac_type) { | 3314 | switch (adapter->hw.mac_type) { |
3302 | unsigned int pull_size; | 3315 | unsigned int pull_size; |
@@ -3307,7 +3320,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
3307 | * NOTE: this is a TSO only workaround | 3320 | * NOTE: this is a TSO only workaround |
3308 | * if end byte alignment not correct move us | 3321 | * if end byte alignment not correct move us |
3309 | * into the next dword */ | 3322 | * into the next dword */ |
3310 | if ((unsigned long)(skb->tail - 1) & 4) | 3323 | if ((unsigned long)(skb_tail_pointer(skb) - 1) & 4) |
3311 | break; | 3324 | break; |
3312 | /* fall through */ | 3325 | /* fall through */ |
3313 | case e1000_82571: | 3326 | case e1000_82571: |
@@ -3363,12 +3376,9 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
3363 | (adapter->hw.mac_type == e1000_82573)) | 3376 | (adapter->hw.mac_type == e1000_82573)) |
3364 | e1000_transfer_dhcp_info(adapter, skb); | 3377 | e1000_transfer_dhcp_info(adapter, skb); |
3365 | 3378 | ||
3366 | local_irq_save(flags); | 3379 | if (!spin_trylock_irqsave(&tx_ring->tx_lock, flags)) |
3367 | if (!spin_trylock(&tx_ring->tx_lock)) { | ||
3368 | /* Collision - tell upper layer to requeue */ | 3380 | /* Collision - tell upper layer to requeue */ |
3369 | local_irq_restore(flags); | ||
3370 | return NETDEV_TX_LOCKED; | 3381 | return NETDEV_TX_LOCKED; |
3371 | } | ||
3372 | 3382 | ||
3373 | /* need: count + 2 desc gap to keep tail from touching | 3383 | /* need: count + 2 desc gap to keep tail from touching |
3374 | * head, otherwise try next time */ | 3384 | * head, otherwise try next time */ |
@@ -4227,9 +4237,12 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter, | |||
4227 | netdev_alloc_skb(netdev, length + NET_IP_ALIGN); | 4237 | netdev_alloc_skb(netdev, length + NET_IP_ALIGN); |
4228 | if (new_skb) { | 4238 | if (new_skb) { |
4229 | skb_reserve(new_skb, NET_IP_ALIGN); | 4239 | skb_reserve(new_skb, NET_IP_ALIGN); |
4230 | memcpy(new_skb->data - NET_IP_ALIGN, | 4240 | skb_copy_to_linear_data_offset(new_skb, |
4231 | skb->data - NET_IP_ALIGN, | 4241 | -NET_IP_ALIGN, |
4232 | length + NET_IP_ALIGN); | 4242 | (skb->data - |
4243 | NET_IP_ALIGN), | ||
4244 | (length + | ||
4245 | NET_IP_ALIGN)); | ||
4233 | /* save the skb in buffer_info as good */ | 4246 | /* save the skb in buffer_info as good */ |
4234 | buffer_info->skb = skb; | 4247 | buffer_info->skb = skb; |
4235 | skb = new_skb; | 4248 | skb = new_skb; |
@@ -4391,7 +4404,7 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, | |||
4391 | PCI_DMA_FROMDEVICE); | 4404 | PCI_DMA_FROMDEVICE); |
4392 | vaddr = kmap_atomic(ps_page->ps_page[0], | 4405 | vaddr = kmap_atomic(ps_page->ps_page[0], |
4393 | KM_SKB_DATA_SOFTIRQ); | 4406 | KM_SKB_DATA_SOFTIRQ); |
4394 | memcpy(skb->tail, vaddr, l1); | 4407 | memcpy(skb_tail_pointer(skb), vaddr, l1); |
4395 | kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ); | 4408 | kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ); |
4396 | pci_dma_sync_single_for_device(pdev, | 4409 | pci_dma_sync_single_for_device(pdev, |
4397 | ps_page_dma->ps_page_dma[0], | 4410 | ps_page_dma->ps_page_dma[0], |
diff --git a/drivers/net/eepro.c b/drivers/net/eepro.c index b4463094c93a..39654e1e2bed 100644 --- a/drivers/net/eepro.c +++ b/drivers/net/eepro.c | |||
@@ -1591,7 +1591,6 @@ eepro_rx(struct net_device *dev) | |||
1591 | 1591 | ||
1592 | break; | 1592 | break; |
1593 | } | 1593 | } |
1594 | skb->dev = dev; | ||
1595 | skb_reserve(skb,2); | 1594 | skb_reserve(skb,2); |
1596 | 1595 | ||
1597 | if (lp->version == LAN595) | 1596 | if (lp->version == LAN595) |
diff --git a/drivers/net/eepro100.c b/drivers/net/eepro100.c index e28bb1e38f8d..6c267c38df97 100644 --- a/drivers/net/eepro100.c +++ b/drivers/net/eepro100.c | |||
@@ -1793,7 +1793,6 @@ speedo_rx(struct net_device *dev) | |||
1793 | copying to a properly sized skbuff. */ | 1793 | copying to a properly sized skbuff. */ |
1794 | if (pkt_len < rx_copybreak | 1794 | if (pkt_len < rx_copybreak |
1795 | && (skb = dev_alloc_skb(pkt_len + 2)) != 0) { | 1795 | && (skb = dev_alloc_skb(pkt_len + 2)) != 0) { |
1796 | skb->dev = dev; | ||
1797 | skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ | 1796 | skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ |
1798 | /* 'skb_put()' points to the start of sk_buff data area. */ | 1797 | /* 'skb_put()' points to the start of sk_buff data area. */ |
1799 | pci_dma_sync_single_for_cpu(sp->pdev, sp->rx_ring_dma[entry], | 1798 | pci_dma_sync_single_for_cpu(sp->pdev, sp->rx_ring_dma[entry], |
@@ -1805,8 +1804,9 @@ speedo_rx(struct net_device *dev) | |||
1805 | eth_copy_and_sum(skb, sp->rx_skbuff[entry]->data, pkt_len, 0); | 1804 | eth_copy_and_sum(skb, sp->rx_skbuff[entry]->data, pkt_len, 0); |
1806 | skb_put(skb, pkt_len); | 1805 | skb_put(skb, pkt_len); |
1807 | #else | 1806 | #else |
1808 | memcpy(skb_put(skb, pkt_len), sp->rx_skbuff[entry]->data, | 1807 | skb_copy_from_linear_data(sp->rx_skbuff[entry], |
1809 | pkt_len); | 1808 | skb_put(skb, pkt_len), |
1809 | pkt_len); | ||
1810 | #endif | 1810 | #endif |
1811 | pci_dma_sync_single_for_device(sp->pdev, sp->rx_ring_dma[entry], | 1811 | pci_dma_sync_single_for_device(sp->pdev, sp->rx_ring_dma[entry], |
1812 | sizeof(struct RxFD) + pkt_len, | 1812 | sizeof(struct RxFD) + pkt_len, |
diff --git a/drivers/net/eexpress.c b/drivers/net/eexpress.c index 3868b8031266..8aaf5ec0c360 100644 --- a/drivers/net/eexpress.c +++ b/drivers/net/eexpress.c | |||
@@ -976,7 +976,6 @@ static void eexp_hw_rx_pio(struct net_device *dev) | |||
976 | lp->stats.rx_dropped++; | 976 | lp->stats.rx_dropped++; |
977 | break; | 977 | break; |
978 | } | 978 | } |
979 | skb->dev = dev; | ||
980 | skb_reserve(skb, 2); | 979 | skb_reserve(skb, 2); |
981 | outw(pbuf+10, ioaddr+READ_PTR); | 980 | outw(pbuf+10, ioaddr+READ_PTR); |
982 | insw(ioaddr+DATAPORT, skb_put(skb,pkt_len),(pkt_len+1)>>1); | 981 | insw(ioaddr+DATAPORT, skb_put(skb,pkt_len),(pkt_len+1)>>1); |
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c index 0e4042bc0a48..58364a0ff378 100644 --- a/drivers/net/ehea/ehea_main.c +++ b/drivers/net/ehea/ehea_main.c | |||
@@ -391,8 +391,8 @@ static int ehea_poll(struct net_device *dev, int *budget) | |||
391 | if (!skb) | 391 | if (!skb) |
392 | break; | 392 | break; |
393 | } | 393 | } |
394 | memcpy(skb->data, ((char*)cqe) + 64, | 394 | skb_copy_to_linear_data(skb, ((char*)cqe) + 64, |
395 | cqe->num_bytes_transfered - 4); | 395 | cqe->num_bytes_transfered - 4); |
396 | ehea_fill_skb(dev, skb, cqe); | 396 | ehea_fill_skb(dev, skb, cqe); |
397 | } else if (rq == 2) { /* RQ2 */ | 397 | } else if (rq == 2) { /* RQ2 */ |
398 | skb = get_skb_by_index(skb_arr_rq2, | 398 | skb = get_skb_by_index(skb_arr_rq2, |
@@ -1262,8 +1262,8 @@ static int ehea_clean_portres(struct ehea_port *port, struct ehea_port_res *pr) | |||
1262 | static inline void write_ip_start_end(struct ehea_swqe *swqe, | 1262 | static inline void write_ip_start_end(struct ehea_swqe *swqe, |
1263 | const struct sk_buff *skb) | 1263 | const struct sk_buff *skb) |
1264 | { | 1264 | { |
1265 | swqe->ip_start = (u8)(((u64)skb->nh.iph) - ((u64)skb->data)); | 1265 | swqe->ip_start = skb_network_offset(skb); |
1266 | swqe->ip_end = (u8)(swqe->ip_start + skb->nh.iph->ihl * 4 - 1); | 1266 | swqe->ip_end = (u8)(swqe->ip_start + ip_hdrlen(skb) - 1); |
1267 | } | 1267 | } |
1268 | 1268 | ||
1269 | static inline void write_tcp_offset_end(struct ehea_swqe *swqe, | 1269 | static inline void write_tcp_offset_end(struct ehea_swqe *swqe, |
@@ -1300,13 +1300,13 @@ static void write_swqe2_TSO(struct sk_buff *skb, | |||
1300 | /* copy only eth/ip/tcp headers to immediate data and | 1300 | /* copy only eth/ip/tcp headers to immediate data and |
1301 | * the rest of skb->data to sg1entry | 1301 | * the rest of skb->data to sg1entry |
1302 | */ | 1302 | */ |
1303 | headersize = ETH_HLEN + (skb->nh.iph->ihl * 4) + (skb->h.th->doff * 4); | 1303 | headersize = ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb); |
1304 | 1304 | ||
1305 | skb_data_size = skb->len - skb->data_len; | 1305 | skb_data_size = skb->len - skb->data_len; |
1306 | 1306 | ||
1307 | if (skb_data_size >= headersize) { | 1307 | if (skb_data_size >= headersize) { |
1308 | /* copy immediate data */ | 1308 | /* copy immediate data */ |
1309 | memcpy(imm_data, skb->data, headersize); | 1309 | skb_copy_from_linear_data(skb, imm_data, headersize); |
1310 | swqe->immediate_data_length = headersize; | 1310 | swqe->immediate_data_length = headersize; |
1311 | 1311 | ||
1312 | if (skb_data_size > headersize) { | 1312 | if (skb_data_size > headersize) { |
@@ -1337,7 +1337,7 @@ static void write_swqe2_nonTSO(struct sk_buff *skb, | |||
1337 | */ | 1337 | */ |
1338 | if (skb_data_size >= SWQE2_MAX_IMM) { | 1338 | if (skb_data_size >= SWQE2_MAX_IMM) { |
1339 | /* copy immediate data */ | 1339 | /* copy immediate data */ |
1340 | memcpy(imm_data, skb->data, SWQE2_MAX_IMM); | 1340 | skb_copy_from_linear_data(skb, imm_data, SWQE2_MAX_IMM); |
1341 | 1341 | ||
1342 | swqe->immediate_data_length = SWQE2_MAX_IMM; | 1342 | swqe->immediate_data_length = SWQE2_MAX_IMM; |
1343 | 1343 | ||
@@ -1350,7 +1350,7 @@ static void write_swqe2_nonTSO(struct sk_buff *skb, | |||
1350 | swqe->descriptors++; | 1350 | swqe->descriptors++; |
1351 | } | 1351 | } |
1352 | } else { | 1352 | } else { |
1353 | memcpy(imm_data, skb->data, skb_data_size); | 1353 | skb_copy_from_linear_data(skb, imm_data, skb_data_size); |
1354 | swqe->immediate_data_length = skb_data_size; | 1354 | swqe->immediate_data_length = skb_data_size; |
1355 | } | 1355 | } |
1356 | } | 1356 | } |
@@ -1688,6 +1688,7 @@ static void ehea_xmit2(struct sk_buff *skb, struct net_device *dev, | |||
1688 | struct ehea_swqe *swqe, u32 lkey) | 1688 | struct ehea_swqe *swqe, u32 lkey) |
1689 | { | 1689 | { |
1690 | if (skb->protocol == htons(ETH_P_IP)) { | 1690 | if (skb->protocol == htons(ETH_P_IP)) { |
1691 | const struct iphdr *iph = ip_hdr(skb); | ||
1691 | /* IPv4 */ | 1692 | /* IPv4 */ |
1692 | swqe->tx_control |= EHEA_SWQE_CRC | 1693 | swqe->tx_control |= EHEA_SWQE_CRC |
1693 | | EHEA_SWQE_IP_CHECKSUM | 1694 | | EHEA_SWQE_IP_CHECKSUM |
@@ -1697,15 +1698,15 @@ static void ehea_xmit2(struct sk_buff *skb, struct net_device *dev, | |||
1697 | 1698 | ||
1698 | write_ip_start_end(swqe, skb); | 1699 | write_ip_start_end(swqe, skb); |
1699 | 1700 | ||
1700 | if (skb->nh.iph->protocol == IPPROTO_UDP) { | 1701 | if (iph->protocol == IPPROTO_UDP) { |
1701 | if ((skb->nh.iph->frag_off & IP_MF) || | 1702 | if ((iph->frag_off & IP_MF) || |
1702 | (skb->nh.iph->frag_off & IP_OFFSET)) | 1703 | (iph->frag_off & IP_OFFSET)) |
1703 | /* IP fragment, so don't change cs */ | 1704 | /* IP fragment, so don't change cs */ |
1704 | swqe->tx_control &= ~EHEA_SWQE_TCP_CHECKSUM; | 1705 | swqe->tx_control &= ~EHEA_SWQE_TCP_CHECKSUM; |
1705 | else | 1706 | else |
1706 | write_udp_offset_end(swqe, skb); | 1707 | write_udp_offset_end(swqe, skb); |
1707 | 1708 | ||
1708 | } else if (skb->nh.iph->protocol == IPPROTO_TCP) { | 1709 | } else if (iph->protocol == IPPROTO_TCP) { |
1709 | write_tcp_offset_end(swqe, skb); | 1710 | write_tcp_offset_end(swqe, skb); |
1710 | } | 1711 | } |
1711 | 1712 | ||
@@ -1731,10 +1732,11 @@ static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev, | |||
1731 | int i; | 1732 | int i; |
1732 | 1733 | ||
1733 | if (skb->protocol == htons(ETH_P_IP)) { | 1734 | if (skb->protocol == htons(ETH_P_IP)) { |
1735 | const struct iphdr *iph = ip_hdr(skb); | ||
1734 | /* IPv4 */ | 1736 | /* IPv4 */ |
1735 | write_ip_start_end(swqe, skb); | 1737 | write_ip_start_end(swqe, skb); |
1736 | 1738 | ||
1737 | if (skb->nh.iph->protocol == IPPROTO_TCP) { | 1739 | if (iph->protocol == IPPROTO_TCP) { |
1738 | swqe->tx_control |= EHEA_SWQE_CRC | 1740 | swqe->tx_control |= EHEA_SWQE_CRC |
1739 | | EHEA_SWQE_IP_CHECKSUM | 1741 | | EHEA_SWQE_IP_CHECKSUM |
1740 | | EHEA_SWQE_TCP_CHECKSUM | 1742 | | EHEA_SWQE_TCP_CHECKSUM |
@@ -1742,9 +1744,9 @@ static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev, | |||
1742 | 1744 | ||
1743 | write_tcp_offset_end(swqe, skb); | 1745 | write_tcp_offset_end(swqe, skb); |
1744 | 1746 | ||
1745 | } else if (skb->nh.iph->protocol == IPPROTO_UDP) { | 1747 | } else if (iph->protocol == IPPROTO_UDP) { |
1746 | if ((skb->nh.iph->frag_off & IP_MF) || | 1748 | if ((iph->frag_off & IP_MF) || |
1747 | (skb->nh.iph->frag_off & IP_OFFSET)) | 1749 | (iph->frag_off & IP_OFFSET)) |
1748 | /* IP fragment, so don't change cs */ | 1750 | /* IP fragment, so don't change cs */ |
1749 | swqe->tx_control |= EHEA_SWQE_CRC | 1751 | swqe->tx_control |= EHEA_SWQE_CRC |
1750 | | EHEA_SWQE_IMM_DATA_PRESENT; | 1752 | | EHEA_SWQE_IMM_DATA_PRESENT; |
@@ -1770,10 +1772,11 @@ static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev, | |||
1770 | /* copy (immediate) data */ | 1772 | /* copy (immediate) data */ |
1771 | if (nfrags == 0) { | 1773 | if (nfrags == 0) { |
1772 | /* data is in a single piece */ | 1774 | /* data is in a single piece */ |
1773 | memcpy(imm_data, skb->data, skb->len); | 1775 | skb_copy_from_linear_data(skb, imm_data, skb->len); |
1774 | } else { | 1776 | } else { |
1775 | /* first copy data from the skb->data buffer ... */ | 1777 | /* first copy data from the skb->data buffer ... */ |
1776 | memcpy(imm_data, skb->data, skb->len - skb->data_len); | 1778 | skb_copy_from_linear_data(skb, imm_data, |
1779 | skb->len - skb->data_len); | ||
1777 | imm_data += skb->len - skb->data_len; | 1780 | imm_data += skb->len - skb->data_len; |
1778 | 1781 | ||
1779 | /* ... then copy data from the fragments */ | 1782 | /* ... then copy data from the fragments */ |
diff --git a/drivers/net/epic100.c b/drivers/net/epic100.c index 3a6a83d3ee1c..4e3f14c9c717 100644 --- a/drivers/net/epic100.c +++ b/drivers/net/epic100.c | |||
@@ -934,7 +934,6 @@ static void epic_init_ring(struct net_device *dev) | |||
934 | ep->rx_skbuff[i] = skb; | 934 | ep->rx_skbuff[i] = skb; |
935 | if (skb == NULL) | 935 | if (skb == NULL) |
936 | break; | 936 | break; |
937 | skb->dev = dev; /* Mark as being used by this device. */ | ||
938 | skb_reserve(skb, 2); /* 16 byte align the IP header. */ | 937 | skb_reserve(skb, 2); /* 16 byte align the IP header. */ |
939 | ep->rx_ring[i].bufaddr = pci_map_single(ep->pci_dev, | 938 | ep->rx_ring[i].bufaddr = pci_map_single(ep->pci_dev, |
940 | skb->data, ep->rx_buf_sz, PCI_DMA_FROMDEVICE); | 939 | skb->data, ep->rx_buf_sz, PCI_DMA_FROMDEVICE); |
@@ -1199,7 +1198,6 @@ static int epic_rx(struct net_device *dev, int budget) | |||
1199 | to a minimally-sized skbuff. */ | 1198 | to a minimally-sized skbuff. */ |
1200 | if (pkt_len < rx_copybreak | 1199 | if (pkt_len < rx_copybreak |
1201 | && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) { | 1200 | && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) { |
1202 | skb->dev = dev; | ||
1203 | skb_reserve(skb, 2); /* 16 byte align the IP header */ | 1201 | skb_reserve(skb, 2); /* 16 byte align the IP header */ |
1204 | pci_dma_sync_single_for_cpu(ep->pci_dev, | 1202 | pci_dma_sync_single_for_cpu(ep->pci_dev, |
1205 | ep->rx_ring[entry].bufaddr, | 1203 | ep->rx_ring[entry].bufaddr, |
@@ -1236,7 +1234,6 @@ static int epic_rx(struct net_device *dev, int budget) | |||
1236 | skb = ep->rx_skbuff[entry] = dev_alloc_skb(ep->rx_buf_sz); | 1234 | skb = ep->rx_skbuff[entry] = dev_alloc_skb(ep->rx_buf_sz); |
1237 | if (skb == NULL) | 1235 | if (skb == NULL) |
1238 | break; | 1236 | break; |
1239 | skb->dev = dev; /* Mark as being used by this device. */ | ||
1240 | skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ | 1237 | skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ |
1241 | ep->rx_ring[entry].bufaddr = pci_map_single(ep->pci_dev, | 1238 | ep->rx_ring[entry].bufaddr = pci_map_single(ep->pci_dev, |
1242 | skb->data, ep->rx_buf_sz, PCI_DMA_FROMDEVICE); | 1239 | skb->data, ep->rx_buf_sz, PCI_DMA_FROMDEVICE); |
diff --git a/drivers/net/eth16i.c b/drivers/net/eth16i.c index 93283e386f3a..04abf59e5007 100644 --- a/drivers/net/eth16i.c +++ b/drivers/net/eth16i.c | |||
@@ -1175,7 +1175,6 @@ static void eth16i_rx(struct net_device *dev) | |||
1175 | break; | 1175 | break; |
1176 | } | 1176 | } |
1177 | 1177 | ||
1178 | skb->dev = dev; | ||
1179 | skb_reserve(skb,2); | 1178 | skb_reserve(skb,2); |
1180 | 1179 | ||
1181 | /* | 1180 | /* |
diff --git a/drivers/net/ewrk3.c b/drivers/net/ewrk3.c index 714ea1176ec7..cb0792c187ba 100644 --- a/drivers/net/ewrk3.c +++ b/drivers/net/ewrk3.c | |||
@@ -993,7 +993,6 @@ static int ewrk3_rx(struct net_device *dev) | |||
993 | 993 | ||
994 | if ((skb = dev_alloc_skb(pkt_len + 2)) != NULL) { | 994 | if ((skb = dev_alloc_skb(pkt_len + 2)) != NULL) { |
995 | unsigned char *p; | 995 | unsigned char *p; |
996 | skb->dev = dev; | ||
997 | skb_reserve(skb, 2); /* Align to 16 bytes */ | 996 | skb_reserve(skb, 2); /* Align to 16 bytes */ |
998 | p = skb_put(skb, pkt_len); | 997 | p = skb_put(skb, pkt_len); |
999 | 998 | ||
diff --git a/drivers/net/fealnx.c b/drivers/net/fealnx.c index 38a13f440530..abe9b089c610 100644 --- a/drivers/net/fealnx.c +++ b/drivers/net/fealnx.c | |||
@@ -1719,7 +1719,6 @@ static int netdev_rx(struct net_device *dev) | |||
1719 | to a minimally-sized skbuff. */ | 1719 | to a minimally-sized skbuff. */ |
1720 | if (pkt_len < rx_copybreak && | 1720 | if (pkt_len < rx_copybreak && |
1721 | (skb = dev_alloc_skb(pkt_len + 2)) != NULL) { | 1721 | (skb = dev_alloc_skb(pkt_len + 2)) != NULL) { |
1722 | skb->dev = dev; | ||
1723 | skb_reserve(skb, 2); /* 16 byte align the IP header */ | 1722 | skb_reserve(skb, 2); /* 16 byte align the IP header */ |
1724 | pci_dma_sync_single_for_cpu(np->pci_dev, | 1723 | pci_dma_sync_single_for_cpu(np->pci_dev, |
1725 | np->cur_rx->buffer, | 1724 | np->cur_rx->buffer, |
diff --git a/drivers/net/fec.c b/drivers/net/fec.c index 6764281b4531..255b09124e11 100644 --- a/drivers/net/fec.c +++ b/drivers/net/fec.c | |||
@@ -647,7 +647,6 @@ while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) { | |||
647 | printk("%s: Memory squeeze, dropping packet.\n", dev->name); | 647 | printk("%s: Memory squeeze, dropping packet.\n", dev->name); |
648 | fep->stats.rx_dropped++; | 648 | fep->stats.rx_dropped++; |
649 | } else { | 649 | } else { |
650 | skb->dev = dev; | ||
651 | skb_put(skb,pkt_len-4); /* Make room */ | 650 | skb_put(skb,pkt_len-4); /* Make room */ |
652 | eth_copy_and_sum(skb, data, pkt_len-4, 0); | 651 | eth_copy_and_sum(skb, data, pkt_len-4, 0); |
653 | skb->protocol=eth_type_trans(skb,dev); | 652 | skb->protocol=eth_type_trans(skb,dev); |
diff --git a/drivers/net/fec_8xx/fec_main.c b/drivers/net/fec_8xx/fec_main.c index 77f747a5afa7..e824d5d231af 100644 --- a/drivers/net/fec_8xx/fec_main.c +++ b/drivers/net/fec_8xx/fec_main.c | |||
@@ -551,7 +551,9 @@ static int fec_enet_rx_common(struct net_device *dev, int *budget) | |||
551 | skbn = dev_alloc_skb(pkt_len + 2); | 551 | skbn = dev_alloc_skb(pkt_len + 2); |
552 | if (skbn != NULL) { | 552 | if (skbn != NULL) { |
553 | skb_reserve(skbn, 2); /* align IP header */ | 553 | skb_reserve(skbn, 2); /* align IP header */ |
554 | memcpy(skbn->data, skb->data, pkt_len); | 554 | skb_copy_from_linear_data(skb |
555 | skbn->data, | ||
556 | pkt_len); | ||
555 | /* swap */ | 557 | /* swap */ |
556 | skbt = skb; | 558 | skbt = skb; |
557 | skb = skbn; | 559 | skb = skbn; |
@@ -561,7 +563,6 @@ static int fec_enet_rx_common(struct net_device *dev, int *budget) | |||
561 | skbn = dev_alloc_skb(ENET_RX_FRSIZE); | 563 | skbn = dev_alloc_skb(ENET_RX_FRSIZE); |
562 | 564 | ||
563 | if (skbn != NULL) { | 565 | if (skbn != NULL) { |
564 | skb->dev = dev; | ||
565 | skb_put(skb, pkt_len); /* Make room */ | 566 | skb_put(skb, pkt_len); /* Make room */ |
566 | skb->protocol = eth_type_trans(skb, dev); | 567 | skb->protocol = eth_type_trans(skb, dev); |
567 | received++; | 568 | received++; |
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c index d04214e4e581..7a018027fcc0 100644 --- a/drivers/net/forcedeth.c +++ b/drivers/net/forcedeth.c | |||
@@ -1385,11 +1385,12 @@ static int nv_alloc_rx(struct net_device *dev) | |||
1385 | while (np->put_rx.orig != less_rx) { | 1385 | while (np->put_rx.orig != less_rx) { |
1386 | struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD); | 1386 | struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD); |
1387 | if (skb) { | 1387 | if (skb) { |
1388 | skb->dev = dev; | ||
1389 | np->put_rx_ctx->skb = skb; | 1388 | np->put_rx_ctx->skb = skb; |
1390 | np->put_rx_ctx->dma = pci_map_single(np->pci_dev, skb->data, | 1389 | np->put_rx_ctx->dma = pci_map_single(np->pci_dev, |
1391 | skb->end-skb->data, PCI_DMA_FROMDEVICE); | 1390 | skb->data, |
1392 | np->put_rx_ctx->dma_len = skb->end-skb->data; | 1391 | skb_tailroom(skb), |
1392 | PCI_DMA_FROMDEVICE); | ||
1393 | np->put_rx_ctx->dma_len = skb_tailroom(skb); | ||
1393 | np->put_rx.orig->buf = cpu_to_le32(np->put_rx_ctx->dma); | 1394 | np->put_rx.orig->buf = cpu_to_le32(np->put_rx_ctx->dma); |
1394 | wmb(); | 1395 | wmb(); |
1395 | np->put_rx.orig->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL); | 1396 | np->put_rx.orig->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL); |
@@ -1416,11 +1417,12 @@ static int nv_alloc_rx_optimized(struct net_device *dev) | |||
1416 | while (np->put_rx.ex != less_rx) { | 1417 | while (np->put_rx.ex != less_rx) { |
1417 | struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD); | 1418 | struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD); |
1418 | if (skb) { | 1419 | if (skb) { |
1419 | skb->dev = dev; | ||
1420 | np->put_rx_ctx->skb = skb; | 1420 | np->put_rx_ctx->skb = skb; |
1421 | np->put_rx_ctx->dma = pci_map_single(np->pci_dev, skb->data, | 1421 | np->put_rx_ctx->dma = pci_map_single(np->pci_dev, |
1422 | skb->end-skb->data, PCI_DMA_FROMDEVICE); | 1422 | skb->data, |
1423 | np->put_rx_ctx->dma_len = skb->end-skb->data; | 1423 | skb_tailroom(skb), |
1424 | PCI_DMA_FROMDEVICE); | ||
1425 | np->put_rx_ctx->dma_len = skb_tailroom(skb); | ||
1424 | np->put_rx.ex->bufhigh = cpu_to_le64(np->put_rx_ctx->dma) >> 32; | 1426 | np->put_rx.ex->bufhigh = cpu_to_le64(np->put_rx_ctx->dma) >> 32; |
1425 | np->put_rx.ex->buflow = cpu_to_le64(np->put_rx_ctx->dma) & 0x0FFFFFFFF; | 1427 | np->put_rx.ex->buflow = cpu_to_le64(np->put_rx_ctx->dma) & 0x0FFFFFFFF; |
1426 | wmb(); | 1428 | wmb(); |
@@ -1604,8 +1606,9 @@ static void nv_drain_rx(struct net_device *dev) | |||
1604 | wmb(); | 1606 | wmb(); |
1605 | if (np->rx_skb[i].skb) { | 1607 | if (np->rx_skb[i].skb) { |
1606 | pci_unmap_single(np->pci_dev, np->rx_skb[i].dma, | 1608 | pci_unmap_single(np->pci_dev, np->rx_skb[i].dma, |
1607 | np->rx_skb[i].skb->end-np->rx_skb[i].skb->data, | 1609 | (skb_end_pointer(np->rx_skb[i].skb) - |
1608 | PCI_DMA_FROMDEVICE); | 1610 | np->rx_skb[i].skb->data), |
1611 | PCI_DMA_FROMDEVICE); | ||
1609 | dev_kfree_skb(np->rx_skb[i].skb); | 1612 | dev_kfree_skb(np->rx_skb[i].skb); |
1610 | np->rx_skb[i].skb = NULL; | 1613 | np->rx_skb[i].skb = NULL; |
1611 | } | 1614 | } |
@@ -4376,11 +4379,12 @@ static int nv_loopback_test(struct net_device *dev) | |||
4376 | ret = 0; | 4379 | ret = 0; |
4377 | goto out; | 4380 | goto out; |
4378 | } | 4381 | } |
4382 | test_dma_addr = pci_map_single(np->pci_dev, tx_skb->data, | ||
4383 | skb_tailroom(tx_skb), | ||
4384 | PCI_DMA_FROMDEVICE); | ||
4379 | pkt_data = skb_put(tx_skb, pkt_len); | 4385 | pkt_data = skb_put(tx_skb, pkt_len); |
4380 | for (i = 0; i < pkt_len; i++) | 4386 | for (i = 0; i < pkt_len; i++) |
4381 | pkt_data[i] = (u8)(i & 0xff); | 4387 | pkt_data[i] = (u8)(i & 0xff); |
4382 | test_dma_addr = pci_map_single(np->pci_dev, tx_skb->data, | ||
4383 | tx_skb->end-tx_skb->data, PCI_DMA_FROMDEVICE); | ||
4384 | 4388 | ||
4385 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { | 4389 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { |
4386 | np->tx_ring.orig[0].buf = cpu_to_le32(test_dma_addr); | 4390 | np->tx_ring.orig[0].buf = cpu_to_le32(test_dma_addr); |
@@ -4437,7 +4441,7 @@ static int nv_loopback_test(struct net_device *dev) | |||
4437 | } | 4441 | } |
4438 | 4442 | ||
4439 | pci_unmap_page(np->pci_dev, test_dma_addr, | 4443 | pci_unmap_page(np->pci_dev, test_dma_addr, |
4440 | tx_skb->end-tx_skb->data, | 4444 | (skb_end_pointer(tx_skb) - tx_skb->data), |
4441 | PCI_DMA_TODEVICE); | 4445 | PCI_DMA_TODEVICE); |
4442 | dev_kfree_skb_any(tx_skb); | 4446 | dev_kfree_skb_any(tx_skb); |
4443 | out: | 4447 | out: |
diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c index 4a05c14bf7ec..e2ddd617493a 100644 --- a/drivers/net/fs_enet/fs_enet-main.c +++ b/drivers/net/fs_enet/fs_enet-main.c | |||
@@ -160,7 +160,8 @@ static int fs_enet_rx_napi(struct net_device *dev, int *budget) | |||
160 | skbn = dev_alloc_skb(pkt_len + 2); | 160 | skbn = dev_alloc_skb(pkt_len + 2); |
161 | if (skbn != NULL) { | 161 | if (skbn != NULL) { |
162 | skb_reserve(skbn, 2); /* align IP header */ | 162 | skb_reserve(skbn, 2); /* align IP header */ |
163 | memcpy(skbn->data, skb->data, pkt_len); | 163 | skb_copy_from_linear_data(skb, |
164 | skbn->data, pkt_len); | ||
164 | /* swap */ | 165 | /* swap */ |
165 | skbt = skb; | 166 | skbt = skb; |
166 | skb = skbn; | 167 | skb = skbn; |
@@ -170,7 +171,6 @@ static int fs_enet_rx_napi(struct net_device *dev, int *budget) | |||
170 | skbn = dev_alloc_skb(ENET_RX_FRSIZE); | 171 | skbn = dev_alloc_skb(ENET_RX_FRSIZE); |
171 | 172 | ||
172 | if (skbn != NULL) { | 173 | if (skbn != NULL) { |
173 | skb->dev = dev; | ||
174 | skb_put(skb, pkt_len); /* Make room */ | 174 | skb_put(skb, pkt_len); /* Make room */ |
175 | skb->protocol = eth_type_trans(skb, dev); | 175 | skb->protocol = eth_type_trans(skb, dev); |
176 | received++; | 176 | received++; |
@@ -294,7 +294,8 @@ static int fs_enet_rx_non_napi(struct net_device *dev) | |||
294 | skbn = dev_alloc_skb(pkt_len + 2); | 294 | skbn = dev_alloc_skb(pkt_len + 2); |
295 | if (skbn != NULL) { | 295 | if (skbn != NULL) { |
296 | skb_reserve(skbn, 2); /* align IP header */ | 296 | skb_reserve(skbn, 2); /* align IP header */ |
297 | memcpy(skbn->data, skb->data, pkt_len); | 297 | skb_copy_from_linear_data(skb, |
298 | skbn->data, pkt_len); | ||
298 | /* swap */ | 299 | /* swap */ |
299 | skbt = skb; | 300 | skbt = skb; |
300 | skb = skbn; | 301 | skb = skbn; |
@@ -304,7 +305,6 @@ static int fs_enet_rx_non_napi(struct net_device *dev) | |||
304 | skbn = dev_alloc_skb(ENET_RX_FRSIZE); | 305 | skbn = dev_alloc_skb(ENET_RX_FRSIZE); |
305 | 306 | ||
306 | if (skbn != NULL) { | 307 | if (skbn != NULL) { |
307 | skb->dev = dev; | ||
308 | skb_put(skb, pkt_len); /* Make room */ | 308 | skb_put(skb, pkt_len); /* Make room */ |
309 | skb->protocol = eth_type_trans(skb, dev); | 309 | skb->protocol = eth_type_trans(skb, dev); |
310 | received++; | 310 | received++; |
@@ -516,7 +516,6 @@ void fs_init_bds(struct net_device *dev) | |||
516 | break; | 516 | break; |
517 | } | 517 | } |
518 | fep->rx_skbuff[i] = skb; | 518 | fep->rx_skbuff[i] = skb; |
519 | skb->dev = dev; | ||
520 | CBDW_BUFADDR(bdp, | 519 | CBDW_BUFADDR(bdp, |
521 | dma_map_single(fep->dev, skb->data, | 520 | dma_map_single(fep->dev, skb->data, |
522 | L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), | 521 | L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), |
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c index d981d4c41dd3..b666a0cc0642 100644 --- a/drivers/net/gianfar.c +++ b/drivers/net/gianfar.c | |||
@@ -942,18 +942,18 @@ static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb) | |||
942 | 942 | ||
943 | /* Tell the controller what the protocol is */ | 943 | /* Tell the controller what the protocol is */ |
944 | /* And provide the already calculated phcs */ | 944 | /* And provide the already calculated phcs */ |
945 | if (skb->nh.iph->protocol == IPPROTO_UDP) { | 945 | if (ip_hdr(skb)->protocol == IPPROTO_UDP) { |
946 | flags |= TXFCB_UDP; | 946 | flags |= TXFCB_UDP; |
947 | fcb->phcs = skb->h.uh->check; | 947 | fcb->phcs = udp_hdr(skb)->check; |
948 | } else | 948 | } else |
949 | fcb->phcs = skb->h.th->check; | 949 | fcb->phcs = udp_hdr(skb)->check; |
950 | 950 | ||
951 | /* l3os is the distance between the start of the | 951 | /* l3os is the distance between the start of the |
952 | * frame (skb->data) and the start of the IP hdr. | 952 | * frame (skb->data) and the start of the IP hdr. |
953 | * l4os is the distance between the start of the | 953 | * l4os is the distance between the start of the |
954 | * l3 hdr and the l4 hdr */ | 954 | * l3 hdr and the l4 hdr */ |
955 | fcb->l3os = (u16)(skb->nh.raw - skb->data - GMAC_FCB_LEN); | 955 | fcb->l3os = (u16)(skb_network_offset(skb) - GMAC_FCB_LEN); |
956 | fcb->l4os = (u16)(skb->h.raw - skb->nh.raw); | 956 | fcb->l4os = skb_network_header_len(skb); |
957 | 957 | ||
958 | fcb->flags = flags; | 958 | fcb->flags = flags; |
959 | } | 959 | } |
@@ -1295,8 +1295,6 @@ struct sk_buff * gfar_new_skb(struct net_device *dev, struct rxbd8 *bdp) | |||
1295 | */ | 1295 | */ |
1296 | skb_reserve(skb, alignamount); | 1296 | skb_reserve(skb, alignamount); |
1297 | 1297 | ||
1298 | skb->dev = dev; | ||
1299 | |||
1300 | bdp->bufPtr = dma_map_single(NULL, skb->data, | 1298 | bdp->bufPtr = dma_map_single(NULL, skb->data, |
1301 | priv->rx_buffer_size, DMA_FROM_DEVICE); | 1299 | priv->rx_buffer_size, DMA_FROM_DEVICE); |
1302 | 1300 | ||
diff --git a/drivers/net/hamachi.c b/drivers/net/hamachi.c index c3c0d67fc383..2521b111b3a5 100644 --- a/drivers/net/hamachi.c +++ b/drivers/net/hamachi.c | |||
@@ -1568,7 +1568,6 @@ static int hamachi_rx(struct net_device *dev) | |||
1568 | printk(KERN_ERR "%s: rx_copybreak non-zero " | 1568 | printk(KERN_ERR "%s: rx_copybreak non-zero " |
1569 | "not good with RX_CHECKSUM\n", dev->name); | 1569 | "not good with RX_CHECKSUM\n", dev->name); |
1570 | #endif | 1570 | #endif |
1571 | skb->dev = dev; | ||
1572 | skb_reserve(skb, 2); /* 16 byte align the IP header */ | 1571 | skb_reserve(skb, 2); /* 16 byte align the IP header */ |
1573 | pci_dma_sync_single_for_cpu(hmp->pci_dev, | 1572 | pci_dma_sync_single_for_cpu(hmp->pci_dev, |
1574 | hmp->rx_ring[entry].addr, | 1573 | hmp->rx_ring[entry].addr, |
diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c index d2542697e298..656f2789c9ba 100644 --- a/drivers/net/hamradio/bpqether.c +++ b/drivers/net/hamradio/bpqether.c | |||
@@ -282,7 +282,7 @@ static int bpq_xmit(struct sk_buff *skb, struct net_device *dev) | |||
282 | } | 282 | } |
283 | 283 | ||
284 | skb->protocol = ax25_type_trans(skb, dev); | 284 | skb->protocol = ax25_type_trans(skb, dev); |
285 | skb->nh.raw = skb->data; | 285 | skb_reset_network_header(skb); |
286 | dev->hard_header(skb, dev, ETH_P_BPQ, bpq->dest_addr, NULL, 0); | 286 | dev->hard_header(skb, dev, ETH_P_BPQ, bpq->dest_addr, NULL, 0); |
287 | bpq->stats.tx_packets++; | 287 | bpq->stats.tx_packets++; |
288 | bpq->stats.tx_bytes+=skb->len; | 288 | bpq->stats.tx_bytes+=skb->len; |
diff --git a/drivers/net/hamradio/dmascc.c b/drivers/net/hamradio/dmascc.c index 0fbb414b5a4d..3be8c5047599 100644 --- a/drivers/net/hamradio/dmascc.c +++ b/drivers/net/hamradio/dmascc.c | |||
@@ -930,7 +930,7 @@ static int scc_send_packet(struct sk_buff *skb, struct net_device *dev) | |||
930 | 930 | ||
931 | /* Transfer data to DMA buffer */ | 931 | /* Transfer data to DMA buffer */ |
932 | i = priv->tx_head; | 932 | i = priv->tx_head; |
933 | memcpy(priv->tx_buf[i], skb->data + 1, skb->len - 1); | 933 | skb_copy_from_linear_data_offset(skb, 1, priv->tx_buf[i], skb->len - 1); |
934 | priv->tx_len[i] = skb->len - 1; | 934 | priv->tx_len[i] = skb->len - 1; |
935 | 935 | ||
936 | /* Clear interrupts while we touch our circular buffers */ | 936 | /* Clear interrupts while we touch our circular buffers */ |
diff --git a/drivers/net/hamradio/hdlcdrv.c b/drivers/net/hamradio/hdlcdrv.c index f5a17ad9d3d6..b33adc6a340b 100644 --- a/drivers/net/hamradio/hdlcdrv.c +++ b/drivers/net/hamradio/hdlcdrv.c | |||
@@ -317,7 +317,9 @@ void hdlcdrv_transmitter(struct net_device *dev, struct hdlcdrv_state *s) | |||
317 | dev_kfree_skb_irq(skb); | 317 | dev_kfree_skb_irq(skb); |
318 | break; | 318 | break; |
319 | } | 319 | } |
320 | memcpy(s->hdlctx.buffer, skb->data+1, pkt_len); | 320 | skb_copy_from_linear_data_offset(skb, 1, |
321 | s->hdlctx.buffer, | ||
322 | pkt_len); | ||
321 | dev_kfree_skb_irq(skb); | 323 | dev_kfree_skb_irq(skb); |
322 | s->hdlctx.bp = s->hdlctx.buffer; | 324 | s->hdlctx.bp = s->hdlctx.buffer; |
323 | append_crc_ccitt(s->hdlctx.buffer, pkt_len); | 325 | append_crc_ccitt(s->hdlctx.buffer, pkt_len); |
diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c index ee3ea4fa729f..467559debfd6 100644 --- a/drivers/net/hamradio/yam.c +++ b/drivers/net/hamradio/yam.c | |||
@@ -638,7 +638,9 @@ static void yam_tx_byte(struct net_device *dev, struct yam_port *yp) | |||
638 | dev_kfree_skb_any(skb); | 638 | dev_kfree_skb_any(skb); |
639 | break; | 639 | break; |
640 | } | 640 | } |
641 | memcpy(yp->tx_buf, skb->data + 1, yp->tx_len); | 641 | skb_copy_from_linear_data_offset(skb, 1, |
642 | yp->tx_buf, | ||
643 | yp->tx_len); | ||
642 | dev_kfree_skb_any(skb); | 644 | dev_kfree_skb_any(skb); |
643 | yp->tx_count = 0; | 645 | yp->tx_count = 0; |
644 | yp->tx_crcl = 0x21; | 646 | yp->tx_crcl = 0x21; |
diff --git a/drivers/net/hp100.c b/drivers/net/hp100.c index 7dc5185aa2c0..8118a6750b61 100644 --- a/drivers/net/hp100.c +++ b/drivers/net/hp100.c | |||
@@ -1816,7 +1816,6 @@ static void hp100_rx(struct net_device *dev) | |||
1816 | u_char *ptr; | 1816 | u_char *ptr; |
1817 | 1817 | ||
1818 | skb_reserve(skb,2); | 1818 | skb_reserve(skb,2); |
1819 | skb->dev = dev; | ||
1820 | 1819 | ||
1821 | /* ptr to start of the sk_buff data area */ | 1820 | /* ptr to start of the sk_buff data area */ |
1822 | skb_put(skb, pkt_len); | 1821 | skb_put(skb, pkt_len); |
diff --git a/drivers/net/ibm_emac/ibm_emac_core.c b/drivers/net/ibm_emac/ibm_emac_core.c index dd8ad8746825..3d82d46f4998 100644 --- a/drivers/net/ibm_emac/ibm_emac_core.c +++ b/drivers/net/ibm_emac/ibm_emac_core.c | |||
@@ -1338,7 +1338,7 @@ static inline int emac_rx_sg_append(struct ocp_enet_private *dev, int slot) | |||
1338 | dev_kfree_skb(dev->rx_sg_skb); | 1338 | dev_kfree_skb(dev->rx_sg_skb); |
1339 | dev->rx_sg_skb = NULL; | 1339 | dev->rx_sg_skb = NULL; |
1340 | } else { | 1340 | } else { |
1341 | cacheable_memcpy(dev->rx_sg_skb->tail, | 1341 | cacheable_memcpy(skb_tail_pointer(dev->rx_sg_skb), |
1342 | dev->rx_skb[slot]->data, len); | 1342 | dev->rx_skb[slot]->data, len); |
1343 | skb_put(dev->rx_sg_skb, len); | 1343 | skb_put(dev->rx_sg_skb, len); |
1344 | emac_recycle_rx_skb(dev, slot, len); | 1344 | emac_recycle_rx_skb(dev, slot, len); |
@@ -1398,7 +1398,6 @@ static int emac_poll_rx(void *param, int budget) | |||
1398 | 1398 | ||
1399 | skb_put(skb, len); | 1399 | skb_put(skb, len); |
1400 | push_packet: | 1400 | push_packet: |
1401 | skb->dev = dev->ndev; | ||
1402 | skb->protocol = eth_type_trans(skb, dev->ndev); | 1401 | skb->protocol = eth_type_trans(skb, dev->ndev); |
1403 | emac_rx_csum(dev, skb, ctrl); | 1402 | emac_rx_csum(dev, skb, ctrl); |
1404 | 1403 | ||
diff --git a/drivers/net/ibmlana.c b/drivers/net/ibmlana.c index 3f946c811511..fe85d6fcba33 100644 --- a/drivers/net/ibmlana.c +++ b/drivers/net/ibmlana.c | |||
@@ -601,7 +601,6 @@ static void irqrx_handler(struct net_device *dev) | |||
601 | 601 | ||
602 | /* set up skb fields */ | 602 | /* set up skb fields */ |
603 | 603 | ||
604 | skb->dev = dev; | ||
605 | skb->protocol = eth_type_trans(skb, dev); | 604 | skb->protocol = eth_type_trans(skb, dev); |
606 | skb->ip_summed = CHECKSUM_NONE; | 605 | skb->ip_summed = CHECKSUM_NONE; |
607 | 606 | ||
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c index 458db0538a9a..0573fcfcb2c4 100644 --- a/drivers/net/ibmveth.c +++ b/drivers/net/ibmveth.c | |||
@@ -798,7 +798,6 @@ static int ibmveth_poll(struct net_device *netdev, int *budget) | |||
798 | 798 | ||
799 | skb_reserve(skb, offset); | 799 | skb_reserve(skb, offset); |
800 | skb_put(skb, length); | 800 | skb_put(skb, length); |
801 | skb->dev = netdev; | ||
802 | skb->protocol = eth_type_trans(skb, netdev); | 801 | skb->protocol = eth_type_trans(skb, netdev); |
803 | 802 | ||
804 | netif_receive_skb(skb); /* send it up */ | 803 | netif_receive_skb(skb); /* send it up */ |
diff --git a/drivers/net/ioc3-eth.c b/drivers/net/ioc3-eth.c index 4ad780719a84..f749e07c6425 100644 --- a/drivers/net/ioc3-eth.c +++ b/drivers/net/ioc3-eth.c | |||
@@ -633,8 +633,6 @@ static inline void ioc3_rx(struct ioc3_private *ip) | |||
633 | 633 | ||
634 | ip->rx_skbs[rx_entry] = NULL; /* Poison */ | 634 | ip->rx_skbs[rx_entry] = NULL; /* Poison */ |
635 | 635 | ||
636 | new_skb->dev = priv_netdev(ip); | ||
637 | |||
638 | /* Because we reserve afterwards. */ | 636 | /* Because we reserve afterwards. */ |
639 | skb_put(new_skb, (1664 + RX_OFFSET)); | 637 | skb_put(new_skb, (1664 + RX_OFFSET)); |
640 | rxb = (struct ioc3_erxbuf *) new_skb->data; | 638 | rxb = (struct ioc3_erxbuf *) new_skb->data; |
@@ -940,7 +938,6 @@ static void ioc3_alloc_rings(struct net_device *dev) | |||
940 | } | 938 | } |
941 | 939 | ||
942 | ip->rx_skbs[i] = skb; | 940 | ip->rx_skbs[i] = skb; |
943 | skb->dev = dev; | ||
944 | 941 | ||
945 | /* Because we reserve afterwards. */ | 942 | /* Because we reserve afterwards. */ |
946 | skb_put(skb, (1664 + RX_OFFSET)); | 943 | skb_put(skb, (1664 + RX_OFFSET)); |
@@ -1396,9 +1393,9 @@ static int ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1396 | * manually. | 1393 | * manually. |
1397 | */ | 1394 | */ |
1398 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | 1395 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
1399 | int proto = ntohs(skb->nh.iph->protocol); | 1396 | const struct iphdr *ih = ip_hdr(skb); |
1397 | const int proto = ntohs(ih->protocol); | ||
1400 | unsigned int csoff; | 1398 | unsigned int csoff; |
1401 | struct iphdr *ih = skb->nh.iph; | ||
1402 | uint32_t csum, ehsum; | 1399 | uint32_t csum, ehsum; |
1403 | uint16_t *eh; | 1400 | uint16_t *eh; |
1404 | 1401 | ||
@@ -1425,11 +1422,11 @@ static int ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1425 | csoff = ETH_HLEN + (ih->ihl << 2); | 1422 | csoff = ETH_HLEN + (ih->ihl << 2); |
1426 | if (proto == IPPROTO_UDP) { | 1423 | if (proto == IPPROTO_UDP) { |
1427 | csoff += offsetof(struct udphdr, check); | 1424 | csoff += offsetof(struct udphdr, check); |
1428 | skb->h.uh->check = csum; | 1425 | udp_hdr(skb)->check = csum; |
1429 | } | 1426 | } |
1430 | if (proto == IPPROTO_TCP) { | 1427 | if (proto == IPPROTO_TCP) { |
1431 | csoff += offsetof(struct tcphdr, check); | 1428 | csoff += offsetof(struct tcphdr, check); |
1432 | skb->h.th->check = csum; | 1429 | tcp_hdr(skb)->check = csum; |
1433 | } | 1430 | } |
1434 | 1431 | ||
1435 | w0 = ETXD_DOCHECKSUM | (csoff << ETXD_CHKOFF_SHIFT); | 1432 | w0 = ETXD_DOCHECKSUM | (csoff << ETXD_CHKOFF_SHIFT); |
@@ -1446,7 +1443,7 @@ static int ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1446 | 1443 | ||
1447 | if (len <= 104) { | 1444 | if (len <= 104) { |
1448 | /* Short packet, let's copy it directly into the ring. */ | 1445 | /* Short packet, let's copy it directly into the ring. */ |
1449 | memcpy(desc->data, skb->data, skb->len); | 1446 | skb_copy_from_linear_data(skb, desc->data, skb->len); |
1450 | if (len < ETH_ZLEN) { | 1447 | if (len < ETH_ZLEN) { |
1451 | /* Very short packet, pad with zeros at the end. */ | 1448 | /* Very short packet, pad with zeros at the end. */ |
1452 | memset(desc->data + len, 0, ETH_ZLEN - len); | 1449 | memset(desc->data + len, 0, ETH_ZLEN - len); |
diff --git a/drivers/net/irda/ali-ircc.c b/drivers/net/irda/ali-ircc.c index cebf8c374bc5..f9c889c0dd07 100644 --- a/drivers/net/irda/ali-ircc.c +++ b/drivers/net/irda/ali-ircc.c | |||
@@ -1472,9 +1472,8 @@ static int ali_ircc_fir_hard_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1472 | 1472 | ||
1473 | self->stats.tx_bytes += skb->len; | 1473 | self->stats.tx_bytes += skb->len; |
1474 | 1474 | ||
1475 | memcpy(self->tx_fifo.queue[self->tx_fifo.free].start, skb->data, | 1475 | skb_copy_from_linear_data(skb, self->tx_fifo.queue[self->tx_fifo.free].start, |
1476 | skb->len); | 1476 | skb->len); |
1477 | |||
1478 | self->tx_fifo.len++; | 1477 | self->tx_fifo.len++; |
1479 | self->tx_fifo.free++; | 1478 | self->tx_fifo.free++; |
1480 | 1479 | ||
@@ -1924,7 +1923,7 @@ static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self) | |||
1924 | 1923 | ||
1925 | /* Copy frame without CRC, CRC is removed by hardware*/ | 1924 | /* Copy frame without CRC, CRC is removed by hardware*/ |
1926 | skb_put(skb, len); | 1925 | skb_put(skb, len); |
1927 | memcpy(skb->data, self->rx_buff.data, len); | 1926 | skb_copy_to_linear_data(skb, self->rx_buff.data, len); |
1928 | 1927 | ||
1929 | /* Move to next frame */ | 1928 | /* Move to next frame */ |
1930 | self->rx_buff.data += len; | 1929 | self->rx_buff.data += len; |
@@ -1932,7 +1931,7 @@ static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self) | |||
1932 | self->stats.rx_packets++; | 1931 | self->stats.rx_packets++; |
1933 | 1932 | ||
1934 | skb->dev = self->netdev; | 1933 | skb->dev = self->netdev; |
1935 | skb->mac.raw = skb->data; | 1934 | skb_reset_mac_header(skb); |
1936 | skb->protocol = htons(ETH_P_IRDA); | 1935 | skb->protocol = htons(ETH_P_IRDA); |
1937 | netif_rx(skb); | 1936 | netif_rx(skb); |
1938 | self->netdev->last_rx = jiffies; | 1937 | self->netdev->last_rx = jiffies; |
diff --git a/drivers/net/irda/au1k_ir.c b/drivers/net/irda/au1k_ir.c index 37914dc5b90e..4dbdfaaf37bf 100644 --- a/drivers/net/irda/au1k_ir.c +++ b/drivers/net/irda/au1k_ir.c | |||
@@ -526,7 +526,7 @@ static int au1k_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev) | |||
526 | 526 | ||
527 | if (aup->speed == 4000000) { | 527 | if (aup->speed == 4000000) { |
528 | /* FIR */ | 528 | /* FIR */ |
529 | memcpy((void *)pDB->vaddr, skb->data, skb->len); | 529 | skb_copy_from_linear_data(skb, pDB->vaddr, skb->len); |
530 | ptxd->count_0 = skb->len & 0xff; | 530 | ptxd->count_0 = skb->len & 0xff; |
531 | ptxd->count_1 = (skb->len >> 8) & 0xff; | 531 | ptxd->count_1 = (skb->len >> 8) & 0xff; |
532 | 532 | ||
@@ -604,9 +604,9 @@ static int au1k_irda_rx(struct net_device *dev) | |||
604 | skb_put(skb, count); | 604 | skb_put(skb, count); |
605 | else | 605 | else |
606 | skb_put(skb, count-2); | 606 | skb_put(skb, count-2); |
607 | memcpy(skb->data, (void *)pDB->vaddr, count-2); | 607 | skb_copy_to_linear_data(skb, pDB->vaddr, count - 2); |
608 | skb->dev = dev; | 608 | skb->dev = dev; |
609 | skb->mac.raw = skb->data; | 609 | skb_reset_mac_header(skb); |
610 | skb->protocol = htons(ETH_P_IRDA); | 610 | skb->protocol = htons(ETH_P_IRDA); |
611 | netif_rx(skb); | 611 | netif_rx(skb); |
612 | prxd->count_0 = 0; | 612 | prxd->count_0 = 0; |
diff --git a/drivers/net/irda/donauboe.c b/drivers/net/irda/donauboe.c index 11af0ae7510e..3ca47bf6dfec 100644 --- a/drivers/net/irda/donauboe.c +++ b/drivers/net/irda/donauboe.c | |||
@@ -1119,7 +1119,7 @@ dumpbufs(skb->data,skb->len,'>'); | |||
1119 | else | 1119 | else |
1120 | { | 1120 | { |
1121 | len = skb->len; | 1121 | len = skb->len; |
1122 | memcpy (self->tx_bufs[self->txs], skb->data, len); | 1122 | skb_copy_from_linear_data(skb, self->tx_bufs[self->txs], len); |
1123 | } | 1123 | } |
1124 | self->ring->tx[self->txs].len = len & 0x0fff; | 1124 | self->ring->tx[self->txs].len = len & 0x0fff; |
1125 | 1125 | ||
@@ -1282,11 +1282,11 @@ dumpbufs(self->rx_bufs[self->rxs],len,'<'); | |||
1282 | skb_reserve (skb, 1); | 1282 | skb_reserve (skb, 1); |
1283 | 1283 | ||
1284 | skb_put (skb, len); | 1284 | skb_put (skb, len); |
1285 | memcpy (skb->data, self->rx_bufs[self->rxs], len); | 1285 | skb_copy_to_linear_data(skb, self->rx_bufs[self->rxs], |
1286 | 1286 | len); | |
1287 | self->stats.rx_packets++; | 1287 | self->stats.rx_packets++; |
1288 | skb->dev = self->netdev; | 1288 | skb->dev = self->netdev; |
1289 | skb->mac.raw = skb->data; | 1289 | skb_reset_mac_header(skb); |
1290 | skb->protocol = htons (ETH_P_IRDA); | 1290 | skb->protocol = htons (ETH_P_IRDA); |
1291 | } | 1291 | } |
1292 | else | 1292 | else |
diff --git a/drivers/net/irda/irda-usb.c b/drivers/net/irda/irda-usb.c index 1d510bdc9b84..0ac240ca905b 100644 --- a/drivers/net/irda/irda-usb.c +++ b/drivers/net/irda/irda-usb.c | |||
@@ -441,7 +441,7 @@ static int irda_usb_hard_xmit(struct sk_buff *skb, struct net_device *netdev) | |||
441 | goto drop; | 441 | goto drop; |
442 | } | 442 | } |
443 | 443 | ||
444 | memcpy(self->tx_buff + self->header_length, skb->data, skb->len); | 444 | skb_copy_from_linear_data(skb, self->tx_buff + self->header_length, skb->len); |
445 | 445 | ||
446 | /* Change setting for next frame */ | 446 | /* Change setting for next frame */ |
447 | if (self->capability & IUC_STIR421X) { | 447 | if (self->capability & IUC_STIR421X) { |
@@ -902,7 +902,7 @@ static void irda_usb_receive(struct urb *urb) | |||
902 | 902 | ||
903 | if(docopy) { | 903 | if(docopy) { |
904 | /* Copy packet, so we can recycle the original */ | 904 | /* Copy packet, so we can recycle the original */ |
905 | memcpy(newskb->data, skb->data, urb->actual_length); | 905 | skb_copy_from_linear_data(skb, newskb->data, urb->actual_length); |
906 | /* Deliver this new skb */ | 906 | /* Deliver this new skb */ |
907 | dataskb = newskb; | 907 | dataskb = newskb; |
908 | /* And hook the old skb to the URB | 908 | /* And hook the old skb to the URB |
@@ -921,7 +921,7 @@ static void irda_usb_receive(struct urb *urb) | |||
921 | 921 | ||
922 | /* Ask the networking layer to queue the packet for the IrDA stack */ | 922 | /* Ask the networking layer to queue the packet for the IrDA stack */ |
923 | dataskb->dev = self->netdev; | 923 | dataskb->dev = self->netdev; |
924 | dataskb->mac.raw = dataskb->data; | 924 | skb_reset_mac_header(dataskb); |
925 | dataskb->protocol = htons(ETH_P_IRDA); | 925 | dataskb->protocol = htons(ETH_P_IRDA); |
926 | len = dataskb->len; | 926 | len = dataskb->len; |
927 | netif_rx(dataskb); | 927 | netif_rx(dataskb); |
diff --git a/drivers/net/irda/mcs7780.c b/drivers/net/irda/mcs7780.c index f0c61f3b2a82..0de867288a47 100644 --- a/drivers/net/irda/mcs7780.c +++ b/drivers/net/irda/mcs7780.c | |||
@@ -200,14 +200,14 @@ static inline int mcs_setup_transceiver_vishay(struct mcs_cb *mcs) | |||
200 | /* Setup a communication between mcs7780 and agilent chip. */ | 200 | /* Setup a communication between mcs7780 and agilent chip. */ |
201 | static inline int mcs_setup_transceiver_agilent(struct mcs_cb *mcs) | 201 | static inline int mcs_setup_transceiver_agilent(struct mcs_cb *mcs) |
202 | { | 202 | { |
203 | IRDA_WARNING("This transceiver type is not supported yet."); | 203 | IRDA_WARNING("This transceiver type is not supported yet.\n"); |
204 | return 1; | 204 | return 1; |
205 | } | 205 | } |
206 | 206 | ||
207 | /* Setup a communication between mcs7780 and sharp chip. */ | 207 | /* Setup a communication between mcs7780 and sharp chip. */ |
208 | static inline int mcs_setup_transceiver_sharp(struct mcs_cb *mcs) | 208 | static inline int mcs_setup_transceiver_sharp(struct mcs_cb *mcs) |
209 | { | 209 | { |
210 | IRDA_WARNING("This transceiver type is not supported yet."); | 210 | IRDA_WARNING("This transceiver type is not supported yet.\n"); |
211 | return 1; | 211 | return 1; |
212 | } | 212 | } |
213 | 213 | ||
@@ -279,7 +279,7 @@ static inline int mcs_setup_transceiver(struct mcs_cb *mcs) | |||
279 | break; | 279 | break; |
280 | 280 | ||
281 | default: | 281 | default: |
282 | IRDA_WARNING("Unknown transceiver type: %d", | 282 | IRDA_WARNING("Unknown transceiver type: %d\n", |
283 | mcs->transceiver_type); | 283 | mcs->transceiver_type); |
284 | ret = 1; | 284 | ret = 1; |
285 | } | 285 | } |
@@ -318,7 +318,7 @@ static inline int mcs_setup_transceiver(struct mcs_cb *mcs) | |||
318 | return ret; | 318 | return ret; |
319 | 319 | ||
320 | error: | 320 | error: |
321 | IRDA_ERROR("%s", msg); | 321 | IRDA_ERROR("%s\n", msg); |
322 | return ret; | 322 | return ret; |
323 | } | 323 | } |
324 | 324 | ||
@@ -353,7 +353,7 @@ static unsigned mcs_wrap_fir_skb(const struct sk_buff *skb, __u8 *buf) | |||
353 | buf[0] = len & 0xff; | 353 | buf[0] = len & 0xff; |
354 | buf[1] = (len >> 8) & 0xff; | 354 | buf[1] = (len >> 8) & 0xff; |
355 | /* copy the data into the tx buffer. */ | 355 | /* copy the data into the tx buffer. */ |
356 | memcpy(buf+2, skb->data, skb->len); | 356 | skb_copy_from_linear_data(skb, buf + 2, skb->len); |
357 | /* put the fcs in the last four bytes in little endian order. */ | 357 | /* put the fcs in the last four bytes in little endian order. */ |
358 | buf[len - 4] = fcs & 0xff; | 358 | buf[len - 4] = fcs & 0xff; |
359 | buf[len - 3] = (fcs >> 8) & 0xff; | 359 | buf[len - 3] = (fcs >> 8) & 0xff; |
@@ -377,7 +377,7 @@ static unsigned mcs_wrap_mir_skb(const struct sk_buff *skb, __u8 *buf) | |||
377 | buf[0] = len & 0xff; | 377 | buf[0] = len & 0xff; |
378 | buf[1] = (len >> 8) & 0xff; | 378 | buf[1] = (len >> 8) & 0xff; |
379 | /* copy the data */ | 379 | /* copy the data */ |
380 | memcpy(buf+2, skb->data, skb->len); | 380 | skb_copy_from_linear_data(skb, buf + 2, skb->len); |
381 | /* put the fcs in last two bytes in little endian order. */ | 381 | /* put the fcs in last two bytes in little endian order. */ |
382 | buf[len - 2] = fcs & 0xff; | 382 | buf[len - 2] = fcs & 0xff; |
383 | buf[len - 1] = (fcs >> 8) & 0xff; | 383 | buf[len - 1] = (fcs >> 8) & 0xff; |
@@ -426,9 +426,9 @@ static void mcs_unwrap_mir(struct mcs_cb *mcs, __u8 *buf, int len) | |||
426 | } | 426 | } |
427 | 427 | ||
428 | skb_reserve(skb, 1); | 428 | skb_reserve(skb, 1); |
429 | memcpy(skb->data, buf, new_len); | 429 | skb_copy_to_linear_data(skb, buf, new_len); |
430 | skb_put(skb, new_len); | 430 | skb_put(skb, new_len); |
431 | skb->mac.raw = skb->data; | 431 | skb_reset_mac_header(skb); |
432 | skb->protocol = htons(ETH_P_IRDA); | 432 | skb->protocol = htons(ETH_P_IRDA); |
433 | skb->dev = mcs->netdev; | 433 | skb->dev = mcs->netdev; |
434 | 434 | ||
@@ -479,9 +479,9 @@ static void mcs_unwrap_fir(struct mcs_cb *mcs, __u8 *buf, int len) | |||
479 | } | 479 | } |
480 | 480 | ||
481 | skb_reserve(skb, 1); | 481 | skb_reserve(skb, 1); |
482 | memcpy(skb->data, buf, new_len); | 482 | skb_copy_to_linear_data(skb, buf, new_len); |
483 | skb_put(skb, new_len); | 483 | skb_put(skb, new_len); |
484 | skb->mac.raw = skb->data; | 484 | skb_reset_mac_header(skb); |
485 | skb->protocol = htons(ETH_P_IRDA); | 485 | skb->protocol = htons(ETH_P_IRDA); |
486 | skb->dev = mcs->netdev; | 486 | skb->dev = mcs->netdev; |
487 | 487 | ||
@@ -587,7 +587,7 @@ static int mcs_speed_change(struct mcs_cb *mcs) | |||
587 | } while(cnt++ < 100 && (rval & MCS_IRINTX)); | 587 | } while(cnt++ < 100 && (rval & MCS_IRINTX)); |
588 | 588 | ||
589 | if(cnt >= 100) { | 589 | if(cnt >= 100) { |
590 | IRDA_ERROR("unable to change speed"); | 590 | IRDA_ERROR("unable to change speed\n"); |
591 | ret = -EIO; | 591 | ret = -EIO; |
592 | goto error; | 592 | goto error; |
593 | } | 593 | } |
@@ -638,7 +638,7 @@ static int mcs_speed_change(struct mcs_cb *mcs) | |||
638 | 638 | ||
639 | default: | 639 | default: |
640 | ret = 1; | 640 | ret = 1; |
641 | IRDA_WARNING("Unknown transceiver type: %d", | 641 | IRDA_WARNING("Unknown transceiver type: %d\n", |
642 | mcs->transceiver_type); | 642 | mcs->transceiver_type); |
643 | } | 643 | } |
644 | if (unlikely(ret)) | 644 | if (unlikely(ret)) |
@@ -733,7 +733,7 @@ static int mcs_net_open(struct net_device *netdev) | |||
733 | sprintf(hwname, "usb#%d", mcs->usbdev->devnum); | 733 | sprintf(hwname, "usb#%d", mcs->usbdev->devnum); |
734 | mcs->irlap = irlap_open(netdev, &mcs->qos, hwname); | 734 | mcs->irlap = irlap_open(netdev, &mcs->qos, hwname); |
735 | if (!mcs->irlap) { | 735 | if (!mcs->irlap) { |
736 | IRDA_ERROR("mcs7780: irlap_open failed"); | 736 | IRDA_ERROR("mcs7780: irlap_open failed\n"); |
737 | goto error2; | 737 | goto error2; |
738 | } | 738 | } |
739 | 739 | ||
@@ -862,7 +862,7 @@ static int mcs_hard_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
862 | mcs->out_buf, wraplen, mcs_send_irq, mcs); | 862 | mcs->out_buf, wraplen, mcs_send_irq, mcs); |
863 | 863 | ||
864 | if ((ret = usb_submit_urb(mcs->tx_urb, GFP_ATOMIC))) { | 864 | if ((ret = usb_submit_urb(mcs->tx_urb, GFP_ATOMIC))) { |
865 | IRDA_ERROR("failed tx_urb: %d", ret); | 865 | IRDA_ERROR("failed tx_urb: %d\n", ret); |
866 | switch (ret) { | 866 | switch (ret) { |
867 | case -ENODEV: | 867 | case -ENODEV: |
868 | case -EPIPE: | 868 | case -EPIPE: |
@@ -897,7 +897,7 @@ static int mcs_probe(struct usb_interface *intf, | |||
897 | if (!ndev) | 897 | if (!ndev) |
898 | goto error1; | 898 | goto error1; |
899 | 899 | ||
900 | IRDA_DEBUG(1, "MCS7780 USB-IrDA bridge found at %d.", udev->devnum); | 900 | IRDA_DEBUG(1, "MCS7780 USB-IrDA bridge found at %d.\n", udev->devnum); |
901 | 901 | ||
902 | /* what is it realy for? */ | 902 | /* what is it realy for? */ |
903 | SET_MODULE_OWNER(ndev); | 903 | SET_MODULE_OWNER(ndev); |
@@ -905,7 +905,7 @@ static int mcs_probe(struct usb_interface *intf, | |||
905 | 905 | ||
906 | ret = usb_reset_configuration(udev); | 906 | ret = usb_reset_configuration(udev); |
907 | if (ret != 0) { | 907 | if (ret != 0) { |
908 | IRDA_ERROR("mcs7780: usb reset configuration failed"); | 908 | IRDA_ERROR("mcs7780: usb reset configuration failed\n"); |
909 | goto error2; | 909 | goto error2; |
910 | } | 910 | } |
911 | 911 | ||
@@ -950,7 +950,7 @@ static int mcs_probe(struct usb_interface *intf, | |||
950 | if (ret != 0) | 950 | if (ret != 0) |
951 | goto error2; | 951 | goto error2; |
952 | 952 | ||
953 | IRDA_DEBUG(1, "IrDA: Registered MosChip MCS7780 device as %s", | 953 | IRDA_DEBUG(1, "IrDA: Registered MosChip MCS7780 device as %s\n", |
954 | ndev->name); | 954 | ndev->name); |
955 | 955 | ||
956 | mcs->transceiver_type = transceiver_type; | 956 | mcs->transceiver_type = transceiver_type; |
@@ -981,7 +981,7 @@ static void mcs_disconnect(struct usb_interface *intf) | |||
981 | free_netdev(mcs->netdev); | 981 | free_netdev(mcs->netdev); |
982 | 982 | ||
983 | usb_set_intfdata(intf, NULL); | 983 | usb_set_intfdata(intf, NULL); |
984 | IRDA_DEBUG(0, "MCS7780 now disconnected."); | 984 | IRDA_DEBUG(0, "MCS7780 now disconnected.\n"); |
985 | } | 985 | } |
986 | 986 | ||
987 | /* Module insertion */ | 987 | /* Module insertion */ |
@@ -992,7 +992,7 @@ static int __init mcs_init(void) | |||
992 | /* register this driver with the USB subsystem */ | 992 | /* register this driver with the USB subsystem */ |
993 | result = usb_register(&mcs_driver); | 993 | result = usb_register(&mcs_driver); |
994 | if (result) | 994 | if (result) |
995 | IRDA_ERROR("usb_register failed. Error number %d", result); | 995 | IRDA_ERROR("usb_register failed. Error number %d\n", result); |
996 | 996 | ||
997 | return result; | 997 | return result; |
998 | } | 998 | } |
diff --git a/drivers/net/irda/nsc-ircc.c b/drivers/net/irda/nsc-ircc.c index 29b5ccd29d0b..d96c89751a71 100644 --- a/drivers/net/irda/nsc-ircc.c +++ b/drivers/net/irda/nsc-ircc.c | |||
@@ -1466,9 +1466,8 @@ static int nsc_ircc_hard_xmit_fir(struct sk_buff *skb, struct net_device *dev) | |||
1466 | 1466 | ||
1467 | self->stats.tx_bytes += skb->len; | 1467 | self->stats.tx_bytes += skb->len; |
1468 | 1468 | ||
1469 | memcpy(self->tx_fifo.queue[self->tx_fifo.free].start, skb->data, | 1469 | skb_copy_from_linear_data(skb, self->tx_fifo.queue[self->tx_fifo.free].start, |
1470 | skb->len); | 1470 | skb->len); |
1471 | |||
1472 | self->tx_fifo.len++; | 1471 | self->tx_fifo.len++; |
1473 | self->tx_fifo.free++; | 1472 | self->tx_fifo.free++; |
1474 | 1473 | ||
@@ -1869,10 +1868,14 @@ static int nsc_ircc_dma_receive_complete(struct nsc_ircc_cb *self, int iobase) | |||
1869 | /* Copy frame without CRC */ | 1868 | /* Copy frame without CRC */ |
1870 | if (self->io.speed < 4000000) { | 1869 | if (self->io.speed < 4000000) { |
1871 | skb_put(skb, len-2); | 1870 | skb_put(skb, len-2); |
1872 | memcpy(skb->data, self->rx_buff.data, len-2); | 1871 | skb_copy_to_linear_data(skb, |
1872 | self->rx_buff.data, | ||
1873 | len - 2); | ||
1873 | } else { | 1874 | } else { |
1874 | skb_put(skb, len-4); | 1875 | skb_put(skb, len-4); |
1875 | memcpy(skb->data, self->rx_buff.data, len-4); | 1876 | skb_copy_to_linear_data(skb, |
1877 | self->rx_buff.data, | ||
1878 | len - 4); | ||
1876 | } | 1879 | } |
1877 | 1880 | ||
1878 | /* Move to next frame */ | 1881 | /* Move to next frame */ |
@@ -1881,7 +1884,7 @@ static int nsc_ircc_dma_receive_complete(struct nsc_ircc_cb *self, int iobase) | |||
1881 | self->stats.rx_packets++; | 1884 | self->stats.rx_packets++; |
1882 | 1885 | ||
1883 | skb->dev = self->netdev; | 1886 | skb->dev = self->netdev; |
1884 | skb->mac.raw = skb->data; | 1887 | skb_reset_mac_header(skb); |
1885 | skb->protocol = htons(ETH_P_IRDA); | 1888 | skb->protocol = htons(ETH_P_IRDA); |
1886 | netif_rx(skb); | 1889 | netif_rx(skb); |
1887 | self->netdev->last_rx = jiffies; | 1890 | self->netdev->last_rx = jiffies; |
diff --git a/drivers/net/irda/pxaficp_ir.c b/drivers/net/irda/pxaficp_ir.c index 2272156af31e..fb196fd91855 100644 --- a/drivers/net/irda/pxaficp_ir.c +++ b/drivers/net/irda/pxaficp_ir.c | |||
@@ -386,12 +386,12 @@ static void pxa_irda_fir_irq_eif(struct pxa_irda *si, struct net_device *dev, in | |||
386 | 386 | ||
387 | /* Align IP header to 20 bytes */ | 387 | /* Align IP header to 20 bytes */ |
388 | skb_reserve(skb, 1); | 388 | skb_reserve(skb, 1); |
389 | memcpy(skb->data, si->dma_rx_buff, len); | 389 | skb_copy_to_linear_data(skb, si->dma_rx_buff, len); |
390 | skb_put(skb, len); | 390 | skb_put(skb, len); |
391 | 391 | ||
392 | /* Feed it to IrLAP */ | 392 | /* Feed it to IrLAP */ |
393 | skb->dev = dev; | 393 | skb->dev = dev; |
394 | skb->mac.raw = skb->data; | 394 | skb_reset_mac_header(skb); |
395 | skb->protocol = htons(ETH_P_IRDA); | 395 | skb->protocol = htons(ETH_P_IRDA); |
396 | netif_rx(skb); | 396 | netif_rx(skb); |
397 | 397 | ||
@@ -484,7 +484,7 @@ static int pxa_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev) | |||
484 | unsigned long mtt = irda_get_mtt(skb); | 484 | unsigned long mtt = irda_get_mtt(skb); |
485 | 485 | ||
486 | si->dma_tx_buff_len = skb->len; | 486 | si->dma_tx_buff_len = skb->len; |
487 | memcpy(si->dma_tx_buff, skb->data, skb->len); | 487 | skb_copy_from_linear_data(skb, si->dma_tx_buff, skb->len); |
488 | 488 | ||
489 | if (mtt) | 489 | if (mtt) |
490 | while ((unsigned)(OSCR - si->last_oscr)/4 < mtt) | 490 | while ((unsigned)(OSCR - si->last_oscr)/4 < mtt) |
diff --git a/drivers/net/irda/sa1100_ir.c b/drivers/net/irda/sa1100_ir.c index 937372d00398..056639f72bec 100644 --- a/drivers/net/irda/sa1100_ir.c +++ b/drivers/net/irda/sa1100_ir.c | |||
@@ -504,7 +504,7 @@ static void sa1100_irda_fir_error(struct sa1100_irda *si, struct net_device *dev | |||
504 | 504 | ||
505 | skb_put(skb, len); | 505 | skb_put(skb, len); |
506 | skb->dev = dev; | 506 | skb->dev = dev; |
507 | skb->mac.raw = skb->data; | 507 | skb_reset_mac_header(skb); |
508 | skb->protocol = htons(ETH_P_IRDA); | 508 | skb->protocol = htons(ETH_P_IRDA); |
509 | si->stats.rx_packets++; | 509 | si->stats.rx_packets++; |
510 | si->stats.rx_bytes += len; | 510 | si->stats.rx_bytes += len; |
diff --git a/drivers/net/irda/smsc-ircc2.c b/drivers/net/irda/smsc-ircc2.c index 31c623381ea8..198bf3bfa70f 100644 --- a/drivers/net/irda/smsc-ircc2.c +++ b/drivers/net/irda/smsc-ircc2.c | |||
@@ -315,6 +315,7 @@ static struct smsc_chip __initdata lpc_chips_flat[] = | |||
315 | { | 315 | { |
316 | /* Base address 0x2E or 0x4E */ | 316 | /* Base address 0x2E or 0x4E */ |
317 | { "47N227", KEY55_1|FIR|SERx4, 0x5a, 0x00 }, | 317 | { "47N227", KEY55_1|FIR|SERx4, 0x5a, 0x00 }, |
318 | { "47N227", KEY55_1|FIR|SERx4, 0x7a, 0x00 }, | ||
318 | { "47N267", KEY55_1|FIR|SERx4, 0x5e, 0x00 }, | 319 | { "47N267", KEY55_1|FIR|SERx4, 0x5e, 0x00 }, |
319 | { NULL } | 320 | { NULL } |
320 | }; | 321 | }; |
@@ -1161,7 +1162,7 @@ static int smsc_ircc_hard_xmit_fir(struct sk_buff *skb, struct net_device *dev) | |||
1161 | self->new_speed = speed; | 1162 | self->new_speed = speed; |
1162 | } | 1163 | } |
1163 | 1164 | ||
1164 | memcpy(self->tx_buff.head, skb->data, skb->len); | 1165 | skb_copy_from_linear_data(skb, self->tx_buff.head, skb->len); |
1165 | 1166 | ||
1166 | self->tx_buff.len = skb->len; | 1167 | self->tx_buff.len = skb->len; |
1167 | self->tx_buff.data = self->tx_buff.head; | 1168 | self->tx_buff.data = self->tx_buff.head; |
@@ -1412,7 +1413,7 @@ static void smsc_ircc_dma_receive_complete(struct smsc_ircc_cb *self) | |||
1412 | self->stats.rx_bytes += len; | 1413 | self->stats.rx_bytes += len; |
1413 | 1414 | ||
1414 | skb->dev = self->netdev; | 1415 | skb->dev = self->netdev; |
1415 | skb->mac.raw = skb->data; | 1416 | skb_reset_mac_header(skb); |
1416 | skb->protocol = htons(ETH_P_IRDA); | 1417 | skb->protocol = htons(ETH_P_IRDA); |
1417 | netif_rx(skb); | 1418 | netif_rx(skb); |
1418 | } | 1419 | } |
diff --git a/drivers/net/irda/stir4200.c b/drivers/net/irda/stir4200.c index 20d306fea4cb..755aa444a4dd 100644 --- a/drivers/net/irda/stir4200.c +++ b/drivers/net/irda/stir4200.c | |||
@@ -52,7 +52,6 @@ | |||
52 | #include <linux/kthread.h> | 52 | #include <linux/kthread.h> |
53 | #include <linux/freezer.h> | 53 | #include <linux/freezer.h> |
54 | #include <net/irda/irda.h> | 54 | #include <net/irda/irda.h> |
55 | #include <net/irda/irlap.h> | ||
56 | #include <net/irda/irda_device.h> | 55 | #include <net/irda/irda_device.h> |
57 | #include <net/irda/wrapper.h> | 56 | #include <net/irda/wrapper.h> |
58 | #include <net/irda/crc.h> | 57 | #include <net/irda/crc.h> |
@@ -349,7 +348,7 @@ static void fir_eof(struct stir_cb *stir) | |||
349 | } | 348 | } |
350 | skb_reserve(nskb, 1); | 349 | skb_reserve(nskb, 1); |
351 | skb = nskb; | 350 | skb = nskb; |
352 | memcpy(nskb->data, rx_buff->data, len); | 351 | skb_copy_to_linear_data(nskb, rx_buff->data, len); |
353 | } else { | 352 | } else { |
354 | nskb = dev_alloc_skb(rx_buff->truesize); | 353 | nskb = dev_alloc_skb(rx_buff->truesize); |
355 | if (unlikely(!nskb)) { | 354 | if (unlikely(!nskb)) { |
@@ -364,7 +363,7 @@ static void fir_eof(struct stir_cb *stir) | |||
364 | 363 | ||
365 | skb_put(skb, len); | 364 | skb_put(skb, len); |
366 | 365 | ||
367 | skb->mac.raw = skb->data; | 366 | skb_reset_mac_header(skb); |
368 | skb->protocol = htons(ETH_P_IRDA); | 367 | skb->protocol = htons(ETH_P_IRDA); |
369 | skb->dev = stir->netdev; | 368 | skb->dev = stir->netdev; |
370 | 369 | ||
diff --git a/drivers/net/irda/via-ircc.c b/drivers/net/irda/via-ircc.c index c3ed9b3067e5..ff5358574d0a 100644 --- a/drivers/net/irda/via-ircc.c +++ b/drivers/net/irda/via-ircc.c | |||
@@ -925,8 +925,8 @@ static int via_ircc_hard_xmit_fir(struct sk_buff *skb, | |||
925 | 925 | ||
926 | self->tx_fifo.tail += skb->len; | 926 | self->tx_fifo.tail += skb->len; |
927 | self->stats.tx_bytes += skb->len; | 927 | self->stats.tx_bytes += skb->len; |
928 | memcpy(self->tx_fifo.queue[self->tx_fifo.free].start, skb->data, | 928 | skb_copy_from_linear_data(skb, |
929 | skb->len); | 929 | self->tx_fifo.queue[self->tx_fifo.free].start, skb->len); |
930 | self->tx_fifo.len++; | 930 | self->tx_fifo.len++; |
931 | self->tx_fifo.free++; | 931 | self->tx_fifo.free++; |
932 | //F01 if (self->tx_fifo.len == 1) { | 932 | //F01 if (self->tx_fifo.len == 1) { |
@@ -1125,7 +1125,7 @@ static int via_ircc_dma_receive_complete(struct via_ircc_cb *self, | |||
1125 | self->stats.rx_bytes += len; | 1125 | self->stats.rx_bytes += len; |
1126 | self->stats.rx_packets++; | 1126 | self->stats.rx_packets++; |
1127 | skb->dev = self->netdev; | 1127 | skb->dev = self->netdev; |
1128 | skb->mac.raw = skb->data; | 1128 | skb_reset_mac_header(skb); |
1129 | skb->protocol = htons(ETH_P_IRDA); | 1129 | skb->protocol = htons(ETH_P_IRDA); |
1130 | netif_rx(skb); | 1130 | netif_rx(skb); |
1131 | return TRUE; | 1131 | return TRUE; |
@@ -1189,7 +1189,7 @@ F01_E */ | |||
1189 | skb_reserve(skb, 1); | 1189 | skb_reserve(skb, 1); |
1190 | skb_put(skb, len - 4); | 1190 | skb_put(skb, len - 4); |
1191 | 1191 | ||
1192 | memcpy(skb->data, self->rx_buff.data, len - 4); | 1192 | skb_copy_to_linear_data(skb, self->rx_buff.data, len - 4); |
1193 | IRDA_DEBUG(2, "%s(): len=%x.rx_buff=%p\n", __FUNCTION__, | 1193 | IRDA_DEBUG(2, "%s(): len=%x.rx_buff=%p\n", __FUNCTION__, |
1194 | len - 4, self->rx_buff.data); | 1194 | len - 4, self->rx_buff.data); |
1195 | 1195 | ||
@@ -1198,7 +1198,7 @@ F01_E */ | |||
1198 | self->stats.rx_bytes += len; | 1198 | self->stats.rx_bytes += len; |
1199 | self->stats.rx_packets++; | 1199 | self->stats.rx_packets++; |
1200 | skb->dev = self->netdev; | 1200 | skb->dev = self->netdev; |
1201 | skb->mac.raw = skb->data; | 1201 | skb_reset_mac_header(skb); |
1202 | skb->protocol = htons(ETH_P_IRDA); | 1202 | skb->protocol = htons(ETH_P_IRDA); |
1203 | netif_rx(skb); | 1203 | netif_rx(skb); |
1204 | 1204 | ||
@@ -1234,7 +1234,7 @@ static int upload_rxdata(struct via_ircc_cb *self, int iobase) | |||
1234 | } | 1234 | } |
1235 | skb_reserve(skb, 1); | 1235 | skb_reserve(skb, 1); |
1236 | skb_put(skb, len - 4 + 1); | 1236 | skb_put(skb, len - 4 + 1); |
1237 | memcpy(skb->data, self->rx_buff.data, len - 4 + 1); | 1237 | skb_copy_to_linear_data(skb, self->rx_buff.data, len - 4 + 1); |
1238 | st_fifo->tail++; | 1238 | st_fifo->tail++; |
1239 | st_fifo->len++; | 1239 | st_fifo->len++; |
1240 | if (st_fifo->tail > MAX_RX_WINDOW) | 1240 | if (st_fifo->tail > MAX_RX_WINDOW) |
@@ -1244,7 +1244,7 @@ static int upload_rxdata(struct via_ircc_cb *self, int iobase) | |||
1244 | self->stats.rx_bytes += len; | 1244 | self->stats.rx_bytes += len; |
1245 | self->stats.rx_packets++; | 1245 | self->stats.rx_packets++; |
1246 | skb->dev = self->netdev; | 1246 | skb->dev = self->netdev; |
1247 | skb->mac.raw = skb->data; | 1247 | skb_reset_mac_header(skb); |
1248 | skb->protocol = htons(ETH_P_IRDA); | 1248 | skb->protocol = htons(ETH_P_IRDA); |
1249 | netif_rx(skb); | 1249 | netif_rx(skb); |
1250 | if (st_fifo->len < (MAX_RX_WINDOW + 2)) { | 1250 | if (st_fifo->len < (MAX_RX_WINDOW + 2)) { |
@@ -1303,7 +1303,7 @@ static int RxTimerHandler(struct via_ircc_cb *self, int iobase) | |||
1303 | } | 1303 | } |
1304 | skb_reserve(skb, 1); | 1304 | skb_reserve(skb, 1); |
1305 | skb_put(skb, len - 4); | 1305 | skb_put(skb, len - 4); |
1306 | memcpy(skb->data, self->rx_buff.data, len - 4); | 1306 | skb_copy_to_linear_data(skb, self->rx_buff.data, len - 4); |
1307 | 1307 | ||
1308 | IRDA_DEBUG(2, "%s(): len=%x.head=%x\n", __FUNCTION__, | 1308 | IRDA_DEBUG(2, "%s(): len=%x.head=%x\n", __FUNCTION__, |
1309 | len - 4, st_fifo->head); | 1309 | len - 4, st_fifo->head); |
@@ -1313,7 +1313,7 @@ static int RxTimerHandler(struct via_ircc_cb *self, int iobase) | |||
1313 | self->stats.rx_bytes += len; | 1313 | self->stats.rx_bytes += len; |
1314 | self->stats.rx_packets++; | 1314 | self->stats.rx_packets++; |
1315 | skb->dev = self->netdev; | 1315 | skb->dev = self->netdev; |
1316 | skb->mac.raw = skb->data; | 1316 | skb_reset_mac_header(skb); |
1317 | skb->protocol = htons(ETH_P_IRDA); | 1317 | skb->protocol = htons(ETH_P_IRDA); |
1318 | netif_rx(skb); | 1318 | netif_rx(skb); |
1319 | } //while | 1319 | } //while |
diff --git a/drivers/net/irda/vlsi_ir.c b/drivers/net/irda/vlsi_ir.c index 3457e9d8b667..c4be973867a6 100644 --- a/drivers/net/irda/vlsi_ir.c +++ b/drivers/net/irda/vlsi_ir.c | |||
@@ -595,7 +595,7 @@ static int vlsi_process_rx(struct vlsi_ring *r, struct ring_descr *rd) | |||
595 | rd->skb = NULL; | 595 | rd->skb = NULL; |
596 | skb->dev = ndev; | 596 | skb->dev = ndev; |
597 | memcpy(skb_put(skb,len), rd->buf, len); | 597 | memcpy(skb_put(skb,len), rd->buf, len); |
598 | skb->mac.raw = skb->data; | 598 | skb_reset_mac_header(skb); |
599 | if (in_interrupt()) | 599 | if (in_interrupt()) |
600 | netif_rx(skb); | 600 | netif_rx(skb); |
601 | else | 601 | else |
@@ -993,7 +993,7 @@ static int vlsi_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
993 | goto drop; | 993 | goto drop; |
994 | } | 994 | } |
995 | else | 995 | else |
996 | memcpy(rd->buf, skb->data, len); | 996 | skb_copy_from_linear_data(skb, rd->buf, len); |
997 | } | 997 | } |
998 | 998 | ||
999 | rd->skb = skb; /* remember skb for tx-complete stats */ | 999 | rd->skb = skb; /* remember skb for tx-complete stats */ |
diff --git a/drivers/net/irda/w83977af_ir.c b/drivers/net/irda/w83977af_ir.c index 4212657fa4f9..5182e800cc18 100644 --- a/drivers/net/irda/w83977af_ir.c +++ b/drivers/net/irda/w83977af_ir.c | |||
@@ -529,7 +529,7 @@ int w83977af_hard_xmit(struct sk_buff *skb, struct net_device *dev) | |||
529 | /* Decide if we should use PIO or DMA transfer */ | 529 | /* Decide if we should use PIO or DMA transfer */ |
530 | if (self->io.speed > PIO_MAX_SPEED) { | 530 | if (self->io.speed > PIO_MAX_SPEED) { |
531 | self->tx_buff.data = self->tx_buff.head; | 531 | self->tx_buff.data = self->tx_buff.head; |
532 | memcpy(self->tx_buff.data, skb->data, skb->len); | 532 | skb_copy_from_linear_data(skb, self->tx_buff.data, skb->len); |
533 | self->tx_buff.len = skb->len; | 533 | self->tx_buff.len = skb->len; |
534 | 534 | ||
535 | mtt = irda_get_mtt(skb); | 535 | mtt = irda_get_mtt(skb); |
@@ -908,10 +908,14 @@ int w83977af_dma_receive_complete(struct w83977af_ir *self) | |||
908 | /* Copy frame without CRC */ | 908 | /* Copy frame without CRC */ |
909 | if (self->io.speed < 4000000) { | 909 | if (self->io.speed < 4000000) { |
910 | skb_put(skb, len-2); | 910 | skb_put(skb, len-2); |
911 | memcpy(skb->data, self->rx_buff.data, len-2); | 911 | skb_copy_to_linear_data(skb, |
912 | self->rx_buff.data, | ||
913 | len - 2); | ||
912 | } else { | 914 | } else { |
913 | skb_put(skb, len-4); | 915 | skb_put(skb, len-4); |
914 | memcpy(skb->data, self->rx_buff.data, len-4); | 916 | skb_copy_to_linear_data(skb, |
917 | self->rx_buff.data, | ||
918 | len - 4); | ||
915 | } | 919 | } |
916 | 920 | ||
917 | /* Move to next frame */ | 921 | /* Move to next frame */ |
@@ -919,7 +923,7 @@ int w83977af_dma_receive_complete(struct w83977af_ir *self) | |||
919 | self->stats.rx_packets++; | 923 | self->stats.rx_packets++; |
920 | 924 | ||
921 | skb->dev = self->netdev; | 925 | skb->dev = self->netdev; |
922 | skb->mac.raw = skb->data; | 926 | skb_reset_mac_header(skb); |
923 | skb->protocol = htons(ETH_P_IRDA); | 927 | skb->protocol = htons(ETH_P_IRDA); |
924 | netif_rx(skb); | 928 | netif_rx(skb); |
925 | self->netdev->last_rx = jiffies; | 929 | self->netdev->last_rx = jiffies; |
diff --git a/drivers/net/iseries_veth.c b/drivers/net/iseries_veth.c index 0e9ba3c3faf7..347d50cd77d4 100644 --- a/drivers/net/iseries_veth.c +++ b/drivers/net/iseries_veth.c | |||
@@ -1540,7 +1540,6 @@ static void veth_receive(struct veth_lpar_connection *cnx, | |||
1540 | } | 1540 | } |
1541 | 1541 | ||
1542 | skb_put(skb, length); | 1542 | skb_put(skb, length); |
1543 | skb->dev = dev; | ||
1544 | skb->protocol = eth_type_trans(skb, dev); | 1543 | skb->protocol = eth_type_trans(skb, dev); |
1545 | skb->ip_summed = CHECKSUM_NONE; | 1544 | skb->ip_summed = CHECKSUM_NONE; |
1546 | netif_rx(skb); /* send it up */ | 1545 | netif_rx(skb); /* send it up */ |
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c index afc2ec72529e..dfde80e54aef 100644 --- a/drivers/net/ixgb/ixgb_main.c +++ b/drivers/net/ixgb/ixgb_main.c | |||
@@ -1182,24 +1182,27 @@ ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb) | |||
1182 | 1182 | ||
1183 | if (likely(skb_is_gso(skb))) { | 1183 | if (likely(skb_is_gso(skb))) { |
1184 | struct ixgb_buffer *buffer_info; | 1184 | struct ixgb_buffer *buffer_info; |
1185 | struct iphdr *iph; | ||
1186 | |||
1185 | if (skb_header_cloned(skb)) { | 1187 | if (skb_header_cloned(skb)) { |
1186 | err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); | 1188 | err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); |
1187 | if (err) | 1189 | if (err) |
1188 | return err; | 1190 | return err; |
1189 | } | 1191 | } |
1190 | 1192 | ||
1191 | hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2)); | 1193 | hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); |
1192 | mss = skb_shinfo(skb)->gso_size; | 1194 | mss = skb_shinfo(skb)->gso_size; |
1193 | skb->nh.iph->tot_len = 0; | 1195 | iph = ip_hdr(skb); |
1194 | skb->nh.iph->check = 0; | 1196 | iph->tot_len = 0; |
1195 | skb->h.th->check = ~csum_tcpudp_magic(skb->nh.iph->saddr, | 1197 | iph->check = 0; |
1196 | skb->nh.iph->daddr, | 1198 | tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, |
1197 | 0, IPPROTO_TCP, 0); | 1199 | iph->daddr, 0, |
1198 | ipcss = skb->nh.raw - skb->data; | 1200 | IPPROTO_TCP, 0); |
1199 | ipcso = (void *)&(skb->nh.iph->check) - (void *)skb->data; | 1201 | ipcss = skb_network_offset(skb); |
1200 | ipcse = skb->h.raw - skb->data - 1; | 1202 | ipcso = (void *)&(iph->check) - (void *)skb->data; |
1201 | tucss = skb->h.raw - skb->data; | 1203 | ipcse = skb_transport_offset(skb) - 1; |
1202 | tucso = (void *)&(skb->h.th->check) - (void *)skb->data; | 1204 | tucss = skb_transport_offset(skb); |
1205 | tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data; | ||
1203 | tucse = 0; | 1206 | tucse = 0; |
1204 | 1207 | ||
1205 | i = adapter->tx_ring.next_to_use; | 1208 | i = adapter->tx_ring.next_to_use; |
@@ -1243,7 +1246,7 @@ ixgb_tx_csum(struct ixgb_adapter *adapter, struct sk_buff *skb) | |||
1243 | 1246 | ||
1244 | if(likely(skb->ip_summed == CHECKSUM_PARTIAL)) { | 1247 | if(likely(skb->ip_summed == CHECKSUM_PARTIAL)) { |
1245 | struct ixgb_buffer *buffer_info; | 1248 | struct ixgb_buffer *buffer_info; |
1246 | css = skb->h.raw - skb->data; | 1249 | css = skb_transport_offset(skb); |
1247 | cso = css + skb->csum_offset; | 1250 | cso = css + skb->csum_offset; |
1248 | 1251 | ||
1249 | i = adapter->tx_ring.next_to_use; | 1252 | i = adapter->tx_ring.next_to_use; |
@@ -2014,9 +2017,12 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter) | |||
2014 | netdev_alloc_skb(netdev, length + NET_IP_ALIGN); | 2017 | netdev_alloc_skb(netdev, length + NET_IP_ALIGN); |
2015 | if (new_skb) { | 2018 | if (new_skb) { |
2016 | skb_reserve(new_skb, NET_IP_ALIGN); | 2019 | skb_reserve(new_skb, NET_IP_ALIGN); |
2017 | memcpy(new_skb->data - NET_IP_ALIGN, | 2020 | skb_copy_to_linear_data_offset(new_skb, |
2018 | skb->data - NET_IP_ALIGN, | 2021 | -NET_IP_ALIGN, |
2019 | length + NET_IP_ALIGN); | 2022 | (skb->data - |
2023 | NET_IP_ALIGN), | ||
2024 | (length + | ||
2025 | NET_IP_ALIGN)); | ||
2020 | /* save the skb in buffer_info as good */ | 2026 | /* save the skb in buffer_info as good */ |
2021 | buffer_info->skb = skb; | 2027 | buffer_info->skb = skb; |
2022 | skb = new_skb; | 2028 | skb = new_skb; |
diff --git a/drivers/net/ixp2000/ixpdev.c b/drivers/net/ixp2000/ixpdev.c index a4eccb11d677..6683afc02aaa 100644 --- a/drivers/net/ixp2000/ixpdev.c +++ b/drivers/net/ixp2000/ixpdev.c | |||
@@ -110,11 +110,10 @@ static int ixpdev_rx(struct net_device *dev, int *budget) | |||
110 | 110 | ||
111 | skb = dev_alloc_skb(desc->pkt_length + 2); | 111 | skb = dev_alloc_skb(desc->pkt_length + 2); |
112 | if (likely(skb != NULL)) { | 112 | if (likely(skb != NULL)) { |
113 | skb->dev = nds[desc->channel]; | ||
114 | skb_reserve(skb, 2); | 113 | skb_reserve(skb, 2); |
115 | eth_copy_and_sum(skb, buf, desc->pkt_length, 0); | 114 | eth_copy_and_sum(skb, buf, desc->pkt_length, 0); |
116 | skb_put(skb, desc->pkt_length); | 115 | skb_put(skb, desc->pkt_length); |
117 | skb->protocol = eth_type_trans(skb, skb->dev); | 116 | skb->protocol = eth_type_trans(skb, nds[desc->channel]); |
118 | 117 | ||
119 | skb->dev->last_rx = jiffies; | 118 | skb->dev->last_rx = jiffies; |
120 | 119 | ||
diff --git a/drivers/net/lance.c b/drivers/net/lance.c index a3843320dbe1..0fe96c85828b 100644 --- a/drivers/net/lance.c +++ b/drivers/net/lance.c | |||
@@ -988,7 +988,7 @@ static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
988 | if (lance_debug > 5) | 988 | if (lance_debug > 5) |
989 | printk("%s: bouncing a high-memory packet (%#x).\n", | 989 | printk("%s: bouncing a high-memory packet (%#x).\n", |
990 | dev->name, (u32)isa_virt_to_bus(skb->data)); | 990 | dev->name, (u32)isa_virt_to_bus(skb->data)); |
991 | memcpy(&lp->tx_bounce_buffs[entry], skb->data, skb->len); | 991 | skb_copy_from_linear_data(skb, &lp->tx_bounce_buffs[entry], skb->len); |
992 | lp->tx_ring[entry].base = | 992 | lp->tx_ring[entry].base = |
993 | ((u32)isa_virt_to_bus((lp->tx_bounce_buffs + entry)) & 0xffffff) | 0x83000000; | 993 | ((u32)isa_virt_to_bus((lp->tx_bounce_buffs + entry)) & 0xffffff) | 0x83000000; |
994 | dev_kfree_skb(skb); | 994 | dev_kfree_skb(skb); |
@@ -1184,7 +1184,6 @@ lance_rx(struct net_device *dev) | |||
1184 | } | 1184 | } |
1185 | break; | 1185 | break; |
1186 | } | 1186 | } |
1187 | skb->dev = dev; | ||
1188 | skb_reserve(skb,2); /* 16 byte align */ | 1187 | skb_reserve(skb,2); /* 16 byte align */ |
1189 | skb_put(skb,pkt_len); /* Make room */ | 1188 | skb_put(skb,pkt_len); /* Make room */ |
1190 | eth_copy_and_sum(skb, | 1189 | eth_copy_and_sum(skb, |
diff --git a/drivers/net/lasi_82596.c b/drivers/net/lasi_82596.c index 452863d5d498..0edcd125fd61 100644 --- a/drivers/net/lasi_82596.c +++ b/drivers/net/lasi_82596.c | |||
@@ -801,7 +801,6 @@ memory_squeeze: | |||
801 | lp->stats.rx_dropped++; | 801 | lp->stats.rx_dropped++; |
802 | } | 802 | } |
803 | else { | 803 | else { |
804 | skb->dev = dev; | ||
805 | if (!rx_in_place) { | 804 | if (!rx_in_place) { |
806 | /* 16 byte align the data fields */ | 805 | /* 16 byte align the data fields */ |
807 | dma_sync_single_for_cpu(lp->dev, (dma_addr_t)WSWAPchar(rbd->b_data), PKT_BUF_SZ, DMA_FROM_DEVICE); | 806 | dma_sync_single_for_cpu(lp->dev, (dma_addr_t)WSWAPchar(rbd->b_data), PKT_BUF_SZ, DMA_FROM_DEVICE); |
diff --git a/drivers/net/lib8390.c b/drivers/net/lib8390.c index e726c06b8dc6..5c86e737f954 100644 --- a/drivers/net/lib8390.c +++ b/drivers/net/lib8390.c | |||
@@ -722,7 +722,6 @@ static void ei_receive(struct net_device *dev) | |||
722 | else | 722 | else |
723 | { | 723 | { |
724 | skb_reserve(skb,2); /* IP headers on 16 byte boundaries */ | 724 | skb_reserve(skb,2); /* IP headers on 16 byte boundaries */ |
725 | skb->dev = dev; | ||
726 | skb_put(skb, pkt_len); /* Make room */ | 725 | skb_put(skb, pkt_len); /* Make room */ |
727 | ei_block_input(dev, pkt_len, skb, current_offset + sizeof(rx_frame)); | 726 | ei_block_input(dev, pkt_len, skb, current_offset + sizeof(rx_frame)); |
728 | skb->protocol=eth_type_trans(skb,dev); | 727 | skb->protocol=eth_type_trans(skb,dev); |
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c index 2b739fd584f1..6ba6ed2b480a 100644 --- a/drivers/net/loopback.c +++ b/drivers/net/loopback.c | |||
@@ -75,8 +75,9 @@ static DEFINE_PER_CPU(struct pcpu_lstats, pcpu_lstats); | |||
75 | #ifdef LOOPBACK_TSO | 75 | #ifdef LOOPBACK_TSO |
76 | static void emulate_large_send_offload(struct sk_buff *skb) | 76 | static void emulate_large_send_offload(struct sk_buff *skb) |
77 | { | 77 | { |
78 | struct iphdr *iph = skb->nh.iph; | 78 | struct iphdr *iph = ip_hdr(skb); |
79 | struct tcphdr *th = (struct tcphdr*)(skb->nh.raw + (iph->ihl * 4)); | 79 | struct tcphdr *th = (struct tcphdr *)(skb_network_header(skb) + |
80 | (iph->ihl * 4)); | ||
80 | unsigned int doffset = (iph->ihl + th->doff) * 4; | 81 | unsigned int doffset = (iph->ihl + th->doff) * 4; |
81 | unsigned int mtu = skb_shinfo(skb)->gso_size + doffset; | 82 | unsigned int mtu = skb_shinfo(skb)->gso_size + doffset; |
82 | unsigned int offset = 0; | 83 | unsigned int offset = 0; |
@@ -90,10 +91,11 @@ static void emulate_large_send_offload(struct sk_buff *skb) | |||
90 | if (!nskb) | 91 | if (!nskb) |
91 | break; | 92 | break; |
92 | skb_reserve(nskb, 32); | 93 | skb_reserve(nskb, 32); |
93 | nskb->mac.raw = nskb->data - 14; | 94 | skb_set_mac_header(nskb, -ETH_HLEN); |
94 | nskb->nh.raw = nskb->data; | 95 | skb_reset_network_header(nskb); |
95 | iph = nskb->nh.iph; | 96 | iph = ip_hdr(nskb); |
96 | memcpy(nskb->data, skb->nh.raw, doffset); | 97 | skb_copy_to_linear_data(nskb, skb_network_header(skb), |
98 | doffset); | ||
97 | if (skb_copy_bits(skb, | 99 | if (skb_copy_bits(skb, |
98 | doffset + offset, | 100 | doffset + offset, |
99 | nskb->data + doffset, | 101 | nskb->data + doffset, |
@@ -108,7 +110,7 @@ static void emulate_large_send_offload(struct sk_buff *skb) | |||
108 | memcpy(nskb->cb, skb->cb, sizeof(skb->cb)); | 110 | memcpy(nskb->cb, skb->cb, sizeof(skb->cb)); |
109 | nskb->pkt_type = skb->pkt_type; | 111 | nskb->pkt_type = skb->pkt_type; |
110 | 112 | ||
111 | th = (struct tcphdr*)(nskb->nh.raw + iph->ihl*4); | 113 | th = (struct tcphdr *)(skb_network_header(nskb) + iph->ihl * 4); |
112 | iph->tot_len = htons(frag_size + doffset); | 114 | iph->tot_len = htons(frag_size + doffset); |
113 | iph->id = htons(id); | 115 | iph->id = htons(id); |
114 | iph->check = 0; | 116 | iph->check = 0; |
@@ -137,7 +139,6 @@ static int loopback_xmit(struct sk_buff *skb, struct net_device *dev) | |||
137 | skb_orphan(skb); | 139 | skb_orphan(skb); |
138 | 140 | ||
139 | skb->protocol = eth_type_trans(skb,dev); | 141 | skb->protocol = eth_type_trans(skb,dev); |
140 | skb->dev = dev; | ||
141 | #ifndef LOOPBACK_MUST_CHECKSUM | 142 | #ifndef LOOPBACK_MUST_CHECKSUM |
142 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 143 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
143 | #endif | 144 | #endif |
@@ -145,7 +146,7 @@ static int loopback_xmit(struct sk_buff *skb, struct net_device *dev) | |||
145 | #ifdef LOOPBACK_TSO | 146 | #ifdef LOOPBACK_TSO |
146 | if (skb_is_gso(skb)) { | 147 | if (skb_is_gso(skb)) { |
147 | BUG_ON(skb->protocol != htons(ETH_P_IP)); | 148 | BUG_ON(skb->protocol != htons(ETH_P_IP)); |
148 | BUG_ON(skb->nh.iph->protocol != IPPROTO_TCP); | 149 | BUG_ON(ip_hdr(skb)->protocol != IPPROTO_TCP); |
149 | 150 | ||
150 | emulate_large_send_offload(skb); | 151 | emulate_large_send_offload(skb); |
151 | return 0; | 152 | return 0; |
@@ -163,11 +164,9 @@ static int loopback_xmit(struct sk_buff *skb, struct net_device *dev) | |||
163 | return 0; | 164 | return 0; |
164 | } | 165 | } |
165 | 166 | ||
166 | static struct net_device_stats loopback_stats; | ||
167 | |||
168 | static struct net_device_stats *get_stats(struct net_device *dev) | 167 | static struct net_device_stats *get_stats(struct net_device *dev) |
169 | { | 168 | { |
170 | struct net_device_stats *stats = &loopback_stats; | 169 | struct net_device_stats *stats = &dev->stats; |
171 | unsigned long bytes = 0; | 170 | unsigned long bytes = 0; |
172 | unsigned long packets = 0; | 171 | unsigned long packets = 0; |
173 | int i; | 172 | int i; |
@@ -207,7 +206,6 @@ static const struct ethtool_ops loopback_ethtool_ops = { | |||
207 | struct net_device loopback_dev = { | 206 | struct net_device loopback_dev = { |
208 | .name = "lo", | 207 | .name = "lo", |
209 | .get_stats = &get_stats, | 208 | .get_stats = &get_stats, |
210 | .priv = &loopback_stats, | ||
211 | .mtu = (16 * 1024) + 20 + 20 + 12, | 209 | .mtu = (16 * 1024) + 20 + 20 + 12, |
212 | .hard_start_xmit = loopback_xmit, | 210 | .hard_start_xmit = loopback_xmit, |
213 | .hard_header = eth_header, | 211 | .hard_header = eth_header, |
diff --git a/drivers/net/lp486e.c b/drivers/net/lp486e.c index 177c502f7385..5fc18da1873d 100644 --- a/drivers/net/lp486e.c +++ b/drivers/net/lp486e.c | |||
@@ -676,7 +676,6 @@ i596_rx_one(struct net_device *dev, struct i596_private *lp, | |||
676 | return 1; | 676 | return 1; |
677 | } | 677 | } |
678 | 678 | ||
679 | skb->dev = dev; | ||
680 | memcpy(skb_put(skb,pkt_len), rfd->data, pkt_len); | 679 | memcpy(skb_put(skb,pkt_len), rfd->data, pkt_len); |
681 | 680 | ||
682 | skb->protocol = eth_type_trans(skb,dev); | 681 | skb->protocol = eth_type_trans(skb,dev); |
diff --git a/drivers/net/mac89x0.c b/drivers/net/mac89x0.c index e960138011c0..90e695d53266 100644 --- a/drivers/net/mac89x0.c +++ b/drivers/net/mac89x0.c | |||
@@ -530,7 +530,6 @@ net_rx(struct net_device *dev) | |||
530 | return; | 530 | return; |
531 | } | 531 | } |
532 | skb_put(skb, length); | 532 | skb_put(skb, length); |
533 | skb->dev = dev; | ||
534 | 533 | ||
535 | memcpy_fromio(skb->data, dev->mem_start + PP_RxFrame, length); | 534 | memcpy_fromio(skb->data, dev->mem_start + PP_RxFrame, length); |
536 | 535 | ||
diff --git a/drivers/net/macb.c b/drivers/net/macb.c index 2e9571bf0736..0e04f7ac3f2e 100644 --- a/drivers/net/macb.c +++ b/drivers/net/macb.c | |||
@@ -357,7 +357,6 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag, | |||
357 | } | 357 | } |
358 | 358 | ||
359 | skb_reserve(skb, RX_OFFSET); | 359 | skb_reserve(skb, RX_OFFSET); |
360 | skb->dev = bp->dev; | ||
361 | skb->ip_summed = CHECKSUM_NONE; | 360 | skb->ip_summed = CHECKSUM_NONE; |
362 | skb_put(skb, len); | 361 | skb_put(skb, len); |
363 | 362 | ||
@@ -368,9 +367,10 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag, | |||
368 | BUG_ON(frag != last_frag); | 367 | BUG_ON(frag != last_frag); |
369 | frag_len = len - offset; | 368 | frag_len = len - offset; |
370 | } | 369 | } |
371 | memcpy(skb->data + offset, | 370 | skb_copy_to_linear_data_offset(skb, offset, |
372 | bp->rx_buffers + (RX_BUFFER_SIZE * frag), | 371 | (bp->rx_buffers + |
373 | frag_len); | 372 | (RX_BUFFER_SIZE * frag)), |
373 | frag_len); | ||
374 | offset += RX_BUFFER_SIZE; | 374 | offset += RX_BUFFER_SIZE; |
375 | bp->rx_ring[frag].addr &= ~MACB_BIT(RX_USED); | 375 | bp->rx_ring[frag].addr &= ~MACB_BIT(RX_USED); |
376 | wmb(); | 376 | wmb(); |
@@ -576,7 +576,8 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
576 | int i; | 576 | int i; |
577 | dev_dbg(&bp->pdev->dev, | 577 | dev_dbg(&bp->pdev->dev, |
578 | "start_xmit: len %u head %p data %p tail %p end %p\n", | 578 | "start_xmit: len %u head %p data %p tail %p end %p\n", |
579 | skb->len, skb->head, skb->data, skb->tail, skb->end); | 579 | skb->len, skb->head, skb->data, |
580 | skb_tail_pointer(skb), skb_end_pointer(skb)); | ||
580 | dev_dbg(&bp->pdev->dev, | 581 | dev_dbg(&bp->pdev->dev, |
581 | "data:"); | 582 | "data:"); |
582 | for (i = 0; i < 16; i++) | 583 | for (i = 0; i < 16; i++) |
diff --git a/drivers/net/mace.c b/drivers/net/mace.c index 9ec24f0d5d68..b3bd62394958 100644 --- a/drivers/net/mace.c +++ b/drivers/net/mace.c | |||
@@ -939,7 +939,6 @@ static irqreturn_t mace_rxdma_intr(int irq, void *dev_id) | |||
939 | else /* Ethernet header; mace includes FCS */ | 939 | else /* Ethernet header; mace includes FCS */ |
940 | nb -= 8; | 940 | nb -= 8; |
941 | skb_put(skb, nb); | 941 | skb_put(skb, nb); |
942 | skb->dev = dev; | ||
943 | skb->protocol = eth_type_trans(skb, dev); | 942 | skb->protocol = eth_type_trans(skb, dev); |
944 | mp->stats.rx_bytes += skb->len; | 943 | mp->stats.rx_bytes += skb->len; |
945 | netif_rx(skb); | 944 | netif_rx(skb); |
diff --git a/drivers/net/macmace.c b/drivers/net/macmace.c index 5d541e873041..27911c07558d 100644 --- a/drivers/net/macmace.c +++ b/drivers/net/macmace.c | |||
@@ -420,8 +420,7 @@ static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev) | |||
420 | mp->stats.tx_bytes += skb->len; | 420 | mp->stats.tx_bytes += skb->len; |
421 | 421 | ||
422 | /* We need to copy into our xmit buffer to take care of alignment and caching issues */ | 422 | /* We need to copy into our xmit buffer to take care of alignment and caching issues */ |
423 | 423 | skb_copy_from_linear_data(skb, mp->tx_ring, skb->len); | |
424 | memcpy((void *) mp->tx_ring, skb->data, skb->len); | ||
425 | 424 | ||
426 | /* load the Tx DMA and fire it off */ | 425 | /* load the Tx DMA and fire it off */ |
427 | 426 | ||
@@ -621,7 +620,6 @@ static void mace_dma_rx_frame(struct net_device *dev, struct mace_frame *mf) | |||
621 | skb_reserve(skb,2); | 620 | skb_reserve(skb,2); |
622 | memcpy(skb_put(skb, mf->len), mf->data, mf->len); | 621 | memcpy(skb_put(skb, mf->len), mf->data, mf->len); |
623 | 622 | ||
624 | skb->dev = dev; | ||
625 | skb->protocol = eth_type_trans(skb, dev); | 623 | skb->protocol = eth_type_trans(skb, dev); |
626 | netif_rx(skb); | 624 | netif_rx(skb); |
627 | dev->last_rx = jiffies; | 625 | dev->last_rx = jiffies; |
diff --git a/drivers/net/meth.c b/drivers/net/meth.c index 7e69ca6edd91..0343ea12b299 100644 --- a/drivers/net/meth.c +++ b/drivers/net/meth.c | |||
@@ -421,7 +421,6 @@ static void meth_rx(struct net_device* dev, unsigned long int_status) | |||
421 | /* Write metadata, and then pass to the receive level */ | 421 | /* Write metadata, and then pass to the receive level */ |
422 | skb_put(skb_c, len); | 422 | skb_put(skb_c, len); |
423 | priv->rx_skbs[priv->rx_write] = skb; | 423 | priv->rx_skbs[priv->rx_write] = skb; |
424 | skb_c->dev = dev; | ||
425 | skb_c->protocol = eth_type_trans(skb_c, dev); | 424 | skb_c->protocol = eth_type_trans(skb_c, dev); |
426 | dev->last_rx = jiffies; | 425 | dev->last_rx = jiffies; |
427 | priv->stats.rx_packets++; | 426 | priv->stats.rx_packets++; |
@@ -609,7 +608,7 @@ static void meth_tx_short_prepare(struct meth_private *priv, | |||
609 | 608 | ||
610 | desc->header.raw = METH_TX_CMD_INT_EN | (len-1) | ((128-len) << 16); | 609 | desc->header.raw = METH_TX_CMD_INT_EN | (len-1) | ((128-len) << 16); |
611 | /* maybe I should set whole thing to 0 first... */ | 610 | /* maybe I should set whole thing to 0 first... */ |
612 | memcpy(desc->data.dt + (120 - len), skb->data, skb->len); | 611 | skb_copy_from_linear_data(skb, desc->data.dt + (120 - len), skb->len); |
613 | if (skb->len < len) | 612 | if (skb->len < len) |
614 | memset(desc->data.dt + 120 - len + skb->len, 0, len-skb->len); | 613 | memset(desc->data.dt + 120 - len + skb->len, 0, len-skb->len); |
615 | } | 614 | } |
@@ -627,8 +626,8 @@ static void meth_tx_1page_prepare(struct meth_private *priv, | |||
627 | 626 | ||
628 | /* unaligned part */ | 627 | /* unaligned part */ |
629 | if (unaligned_len) { | 628 | if (unaligned_len) { |
630 | memcpy(desc->data.dt + (120 - unaligned_len), | 629 | skb_copy_from_linear_data(skb, desc->data.dt + (120 - unaligned_len), |
631 | skb->data, unaligned_len); | 630 | unaligned_len); |
632 | desc->header.raw |= (128 - unaligned_len) << 16; | 631 | desc->header.raw |= (128 - unaligned_len) << 16; |
633 | } | 632 | } |
634 | 633 | ||
@@ -653,8 +652,8 @@ static void meth_tx_2page_prepare(struct meth_private *priv, | |||
653 | desc->header.raw = METH_TX_CMD_INT_EN | TX_CATBUF1 | TX_CATBUF2| (skb->len - 1); | 652 | desc->header.raw = METH_TX_CMD_INT_EN | TX_CATBUF1 | TX_CATBUF2| (skb->len - 1); |
654 | /* unaligned part */ | 653 | /* unaligned part */ |
655 | if (unaligned_len){ | 654 | if (unaligned_len){ |
656 | memcpy(desc->data.dt + (120 - unaligned_len), | 655 | skb_copy_from_linear_data(skb, desc->data.dt + (120 - unaligned_len), |
657 | skb->data, unaligned_len); | 656 | unaligned_len); |
658 | desc->header.raw |= (128 - unaligned_len) << 16; | 657 | desc->header.raw |= (128 - unaligned_len) << 16; |
659 | } | 658 | } |
660 | 659 | ||
diff --git a/drivers/net/mipsnet.c b/drivers/net/mipsnet.c index f42b9e201937..403f63afd201 100644 --- a/drivers/net/mipsnet.c +++ b/drivers/net/mipsnet.c | |||
@@ -101,7 +101,6 @@ static inline ssize_t mipsnet_get_fromdev(struct net_device *dev, size_t count) | |||
101 | if (ioiocpy_frommipsnet(dev, skb_put(skb, len), len)) | 101 | if (ioiocpy_frommipsnet(dev, skb_put(skb, len), len)) |
102 | return -EFAULT; | 102 | return -EFAULT; |
103 | 103 | ||
104 | skb->dev = dev; | ||
105 | skb->protocol = eth_type_trans(skb, dev); | 104 | skb->protocol = eth_type_trans(skb, dev); |
106 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 105 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
107 | 106 | ||
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c index 8015a7c5b0c9..ab15ecd4b3d6 100644 --- a/drivers/net/mv643xx_eth.c +++ b/drivers/net/mv643xx_eth.c | |||
@@ -434,7 +434,6 @@ static int mv643xx_eth_receive_queue(struct net_device *dev, int budget) | |||
434 | * received packet | 434 | * received packet |
435 | */ | 435 | */ |
436 | skb_put(skb, pkt_info.byte_cnt - 4); | 436 | skb_put(skb, pkt_info.byte_cnt - 4); |
437 | skb->dev = dev; | ||
438 | 437 | ||
439 | if (pkt_info.cmd_sts & ETH_LAYER_4_CHECKSUM_OK) { | 438 | if (pkt_info.cmd_sts & ETH_LAYER_4_CHECKSUM_OK) { |
440 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 439 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
@@ -1162,15 +1161,15 @@ static void eth_tx_submit_descs_for_skb(struct mv643xx_private *mp, | |||
1162 | 1161 | ||
1163 | cmd_sts |= ETH_GEN_TCP_UDP_CHECKSUM | | 1162 | cmd_sts |= ETH_GEN_TCP_UDP_CHECKSUM | |
1164 | ETH_GEN_IP_V_4_CHECKSUM | | 1163 | ETH_GEN_IP_V_4_CHECKSUM | |
1165 | skb->nh.iph->ihl << ETH_TX_IHL_SHIFT; | 1164 | ip_hdr(skb)->ihl << ETH_TX_IHL_SHIFT; |
1166 | 1165 | ||
1167 | switch (skb->nh.iph->protocol) { | 1166 | switch (ip_hdr(skb)->protocol) { |
1168 | case IPPROTO_UDP: | 1167 | case IPPROTO_UDP: |
1169 | cmd_sts |= ETH_UDP_FRAME; | 1168 | cmd_sts |= ETH_UDP_FRAME; |
1170 | desc->l4i_chk = skb->h.uh->check; | 1169 | desc->l4i_chk = udp_hdr(skb)->check; |
1171 | break; | 1170 | break; |
1172 | case IPPROTO_TCP: | 1171 | case IPPROTO_TCP: |
1173 | desc->l4i_chk = skb->h.th->check; | 1172 | desc->l4i_chk = tcp_hdr(skb)->check; |
1174 | break; | 1173 | break; |
1175 | default: | 1174 | default: |
1176 | BUG(); | 1175 | BUG(); |
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c index f8efe0e70a6b..16e3c4315e82 100644 --- a/drivers/net/myri10ge/myri10ge.c +++ b/drivers/net/myri10ge/myri10ge.c | |||
@@ -879,7 +879,7 @@ myri10ge_rx_skb_build(struct sk_buff *skb, u8 * va, | |||
879 | * skb_pull() (for ether_pad and eth_type_trans()) requires | 879 | * skb_pull() (for ether_pad and eth_type_trans()) requires |
880 | * the beginning of the packet in skb_headlen(), move it | 880 | * the beginning of the packet in skb_headlen(), move it |
881 | * manually */ | 881 | * manually */ |
882 | memcpy(skb->data, va, hlen); | 882 | skb_copy_to_linear_data(skb, va, hlen); |
883 | skb_shinfo(skb)->frags[0].page_offset += hlen; | 883 | skb_shinfo(skb)->frags[0].page_offset += hlen; |
884 | skb_shinfo(skb)->frags[0].size -= hlen; | 884 | skb_shinfo(skb)->frags[0].size -= hlen; |
885 | skb->data_len -= hlen; | 885 | skb->data_len -= hlen; |
@@ -1020,7 +1020,6 @@ myri10ge_rx_done(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx, | |||
1020 | skb_shinfo(skb)->nr_frags = 0; | 1020 | skb_shinfo(skb)->nr_frags = 0; |
1021 | } | 1021 | } |
1022 | skb->protocol = eth_type_trans(skb, dev); | 1022 | skb->protocol = eth_type_trans(skb, dev); |
1023 | skb->dev = dev; | ||
1024 | 1023 | ||
1025 | if (mgp->csum_flag) { | 1024 | if (mgp->csum_flag) { |
1026 | if ((skb->protocol == htons(ETH_P_IP)) || | 1025 | if ((skb->protocol == htons(ETH_P_IP)) || |
@@ -2030,7 +2029,7 @@ again: | |||
2030 | odd_flag = 0; | 2029 | odd_flag = 0; |
2031 | flags = (MXGEFW_FLAGS_NO_TSO | MXGEFW_FLAGS_FIRST); | 2030 | flags = (MXGEFW_FLAGS_NO_TSO | MXGEFW_FLAGS_FIRST); |
2032 | if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { | 2031 | if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { |
2033 | cksum_offset = (skb->h.raw - skb->data); | 2032 | cksum_offset = skb_transport_offset(skb); |
2034 | pseudo_hdr_offset = cksum_offset + skb->csum_offset; | 2033 | pseudo_hdr_offset = cksum_offset + skb->csum_offset; |
2035 | /* If the headers are excessively large, then we must | 2034 | /* If the headers are excessively large, then we must |
2036 | * fall back to a software checksum */ | 2035 | * fall back to a software checksum */ |
@@ -2055,7 +2054,7 @@ again: | |||
2055 | * send loop that we are still in the | 2054 | * send loop that we are still in the |
2056 | * header portion of the TSO packet. | 2055 | * header portion of the TSO packet. |
2057 | * TSO header must be at most 134 bytes long */ | 2056 | * TSO header must be at most 134 bytes long */ |
2058 | cum_len = -((skb->h.raw - skb->data) + (skb->h.th->doff << 2)); | 2057 | cum_len = -(skb_transport_offset(skb) + tcp_hdrlen(skb)); |
2059 | 2058 | ||
2060 | /* for TSO, pseudo_hdr_offset holds mss. | 2059 | /* for TSO, pseudo_hdr_offset holds mss. |
2061 | * The firmware figures out where to put | 2060 | * The firmware figures out where to put |
diff --git a/drivers/net/myri_sbus.c b/drivers/net/myri_sbus.c index ee26ef52289f..13444da93273 100644 --- a/drivers/net/myri_sbus.c +++ b/drivers/net/myri_sbus.c | |||
@@ -368,7 +368,7 @@ static __be16 myri_type_trans(struct sk_buff *skb, struct net_device *dev) | |||
368 | struct ethhdr *eth; | 368 | struct ethhdr *eth; |
369 | unsigned char *rawp; | 369 | unsigned char *rawp; |
370 | 370 | ||
371 | skb->mac.raw = (((unsigned char *)skb->data) + MYRI_PAD_LEN); | 371 | skb_set_mac_header(skb, MYRI_PAD_LEN); |
372 | skb_pull(skb, dev->hard_header_len); | 372 | skb_pull(skb, dev->hard_header_len); |
373 | eth = eth_hdr(skb); | 373 | eth = eth_hdr(skb); |
374 | 374 | ||
@@ -502,7 +502,7 @@ static void myri_rx(struct myri_eth *mp, struct net_device *dev) | |||
502 | copy_skb->dev = dev; | 502 | copy_skb->dev = dev; |
503 | DRX(("resv_and_put ")); | 503 | DRX(("resv_and_put ")); |
504 | skb_put(copy_skb, len); | 504 | skb_put(copy_skb, len); |
505 | memcpy(copy_skb->data, skb->data, len); | 505 | skb_copy_from_linear_data(skb, copy_skb->data, len); |
506 | 506 | ||
507 | /* Reuse original ring buffer. */ | 507 | /* Reuse original ring buffer. */ |
508 | DRX(("reuse ")); | 508 | DRX(("reuse ")); |
diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c index 349b96a3ec4c..a8d7ff2c96ac 100644 --- a/drivers/net/natsemi.c +++ b/drivers/net/natsemi.c | |||
@@ -2289,7 +2289,6 @@ static void netdev_rx(struct net_device *dev, int *work_done, int work_to_do) | |||
2289 | * without copying to a minimally-sized skbuff. */ | 2289 | * without copying to a minimally-sized skbuff. */ |
2290 | if (pkt_len < rx_copybreak | 2290 | if (pkt_len < rx_copybreak |
2291 | && (skb = dev_alloc_skb(pkt_len + RX_OFFSET)) != NULL) { | 2291 | && (skb = dev_alloc_skb(pkt_len + RX_OFFSET)) != NULL) { |
2292 | skb->dev = dev; | ||
2293 | /* 16 byte align the IP header */ | 2292 | /* 16 byte align the IP header */ |
2294 | skb_reserve(skb, RX_OFFSET); | 2293 | skb_reserve(skb, RX_OFFSET); |
2295 | pci_dma_sync_single_for_cpu(np->pci_dev, | 2294 | pci_dma_sync_single_for_cpu(np->pci_dev, |
diff --git a/drivers/net/netx-eth.c b/drivers/net/netx-eth.c index a53644f6a29b..2b8da0a54998 100644 --- a/drivers/net/netx-eth.c +++ b/drivers/net/netx-eth.c | |||
@@ -168,7 +168,6 @@ static void netx_eth_receive(struct net_device *ndev) | |||
168 | FIFO_PTR_SEGMENT(seg) | FIFO_PTR_FRAMENO(frameno)); | 168 | FIFO_PTR_SEGMENT(seg) | FIFO_PTR_FRAMENO(frameno)); |
169 | 169 | ||
170 | ndev->last_rx = jiffies; | 170 | ndev->last_rx = jiffies; |
171 | skb->dev = ndev; | ||
172 | skb->protocol = eth_type_trans(skb, ndev); | 171 | skb->protocol = eth_type_trans(skb, ndev); |
173 | netif_rx(skb); | 172 | netif_rx(skb); |
174 | priv->stats.rx_packets++; | 173 | priv->stats.rx_packets++; |
diff --git a/drivers/net/netxen/netxen_nic_hw.c b/drivers/net/netxen/netxen_nic_hw.c index 6537574a9cda..0fba8f190762 100644 --- a/drivers/net/netxen/netxen_nic_hw.c +++ b/drivers/net/netxen/netxen_nic_hw.c | |||
@@ -35,6 +35,8 @@ | |||
35 | #include "netxen_nic_hw.h" | 35 | #include "netxen_nic_hw.h" |
36 | #include "netxen_nic_phan_reg.h" | 36 | #include "netxen_nic_phan_reg.h" |
37 | 37 | ||
38 | #include <net/ip.h> | ||
39 | |||
38 | /* PCI Windowing for DDR regions. */ | 40 | /* PCI Windowing for DDR regions. */ |
39 | 41 | ||
40 | #define ADDR_IN_RANGE(addr, low, high) \ | 42 | #define ADDR_IN_RANGE(addr, low, high) \ |
@@ -371,22 +373,21 @@ void netxen_tso_check(struct netxen_adapter *adapter, | |||
371 | struct cmd_desc_type0 *desc, struct sk_buff *skb) | 373 | struct cmd_desc_type0 *desc, struct sk_buff *skb) |
372 | { | 374 | { |
373 | if (desc->mss) { | 375 | if (desc->mss) { |
374 | desc->total_hdr_length = sizeof(struct ethhdr) + | 376 | desc->total_hdr_length = (sizeof(struct ethhdr) + |
375 | ((skb->nh.iph)->ihl * sizeof(u32)) + | 377 | ip_hdrlen(skb) + tcp_hdrlen(skb)); |
376 | ((skb->h.th)->doff * sizeof(u32)); | ||
377 | netxen_set_cmd_desc_opcode(desc, TX_TCP_LSO); | 378 | netxen_set_cmd_desc_opcode(desc, TX_TCP_LSO); |
378 | } else if (skb->ip_summed == CHECKSUM_PARTIAL) { | 379 | } else if (skb->ip_summed == CHECKSUM_PARTIAL) { |
379 | if (skb->nh.iph->protocol == IPPROTO_TCP) { | 380 | if (ip_hdr(skb)->protocol == IPPROTO_TCP) { |
380 | netxen_set_cmd_desc_opcode(desc, TX_TCP_PKT); | 381 | netxen_set_cmd_desc_opcode(desc, TX_TCP_PKT); |
381 | } else if (skb->nh.iph->protocol == IPPROTO_UDP) { | 382 | } else if (ip_hdr(skb)->protocol == IPPROTO_UDP) { |
382 | netxen_set_cmd_desc_opcode(desc, TX_UDP_PKT); | 383 | netxen_set_cmd_desc_opcode(desc, TX_UDP_PKT); |
383 | } else { | 384 | } else { |
384 | return; | 385 | return; |
385 | } | 386 | } |
386 | } | 387 | } |
387 | adapter->stats.xmitcsummed++; | 388 | adapter->stats.xmitcsummed++; |
388 | desc->tcp_hdr_offset = skb->h.raw - skb->data; | 389 | desc->tcp_hdr_offset = skb_transport_offset(skb); |
389 | desc->ip_hdr_offset = skb->nh.raw - skb->data; | 390 | desc->ip_hdr_offset = skb_network_offset(skb); |
390 | } | 391 | } |
391 | 392 | ||
392 | int netxen_is_flash_supported(struct netxen_adapter *adapter) | 393 | int netxen_is_flash_supported(struct netxen_adapter *adapter) |
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c index eff965dc5fff..5cd40562da7c 100644 --- a/drivers/net/netxen/netxen_nic_init.c +++ b/drivers/net/netxen/netxen_nic_init.c | |||
@@ -1129,7 +1129,6 @@ netxen_process_rcv(struct netxen_adapter *adapter, int ctxid, | |||
1129 | port->stats.csummed++; | 1129 | port->stats.csummed++; |
1130 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 1130 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
1131 | } | 1131 | } |
1132 | skb->dev = netdev; | ||
1133 | if (desc_ctx == RCV_DESC_LRO_CTXID) { | 1132 | if (desc_ctx == RCV_DESC_LRO_CTXID) { |
1134 | /* True length was only available on the last pkt */ | 1133 | /* True length was only available on the last pkt */ |
1135 | skb_put(skb, buffer->lro_length); | 1134 | skb_put(skb, buffer->lro_length); |
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c index 7d2525e76abb..ab25c225a07e 100644 --- a/drivers/net/netxen/netxen_nic_main.c +++ b/drivers/net/netxen/netxen_nic_main.c | |||
@@ -41,6 +41,7 @@ | |||
41 | 41 | ||
42 | #include <linux/dma-mapping.h> | 42 | #include <linux/dma-mapping.h> |
43 | #include <linux/vmalloc.h> | 43 | #include <linux/vmalloc.h> |
44 | #include <net/ip.h> | ||
44 | 45 | ||
45 | MODULE_DESCRIPTION("NetXen Multi port (1/10) Gigabit Network Driver"); | 46 | MODULE_DESCRIPTION("NetXen Multi port (1/10) Gigabit Network Driver"); |
46 | MODULE_LICENSE("GPL"); | 47 | MODULE_LICENSE("GPL"); |
@@ -778,9 +779,8 @@ static int netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
778 | if (skb_shinfo(skb)->gso_size > 0) { | 779 | if (skb_shinfo(skb)->gso_size > 0) { |
779 | 780 | ||
780 | no_of_desc++; | 781 | no_of_desc++; |
781 | if (((skb->nh.iph)->ihl * sizeof(u32)) + | 782 | if ((ip_hdrlen(skb) + tcp_hdrlen(skb) + |
782 | ((skb->h.th)->doff * sizeof(u32)) + | 783 | sizeof(struct ethhdr)) > |
783 | sizeof(struct ethhdr) > | ||
784 | (sizeof(struct cmd_desc_type0) - 2)) { | 784 | (sizeof(struct cmd_desc_type0) - 2)) { |
785 | no_of_desc++; | 785 | no_of_desc++; |
786 | } | 786 | } |
@@ -920,8 +920,10 @@ static int netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
920 | /* copy the next 64 bytes - should be enough except | 920 | /* copy the next 64 bytes - should be enough except |
921 | * for pathological case | 921 | * for pathological case |
922 | */ | 922 | */ |
923 | memcpy((void *)hwdesc, (void *)(skb->data) + | 923 | skb_copy_from_linear_data_offset(skb, first_hdr_len, |
924 | first_hdr_len, hdr_len - first_hdr_len); | 924 | hwdesc, |
925 | (hdr_len - | ||
926 | first_hdr_len)); | ||
925 | producer = get_next_index(producer, max_tx_desc_count); | 927 | producer = get_next_index(producer, max_tx_desc_count); |
926 | } | 928 | } |
927 | } | 929 | } |
diff --git a/drivers/net/ni5010.c b/drivers/net/ni5010.c index 8be0d030d6f4..3d5b4232f65f 100644 --- a/drivers/net/ni5010.c +++ b/drivers/net/ni5010.c | |||
@@ -562,7 +562,6 @@ static void ni5010_rx(struct net_device *dev) | |||
562 | return; | 562 | return; |
563 | } | 563 | } |
564 | 564 | ||
565 | skb->dev = dev; | ||
566 | skb_reserve(skb, 2); | 565 | skb_reserve(skb, 2); |
567 | 566 | ||
568 | /* Read packet into buffer */ | 567 | /* Read packet into buffer */ |
diff --git a/drivers/net/ni52.c b/drivers/net/ni52.c index a6f4b24b0176..8dbd6d1900b5 100644 --- a/drivers/net/ni52.c +++ b/drivers/net/ni52.c | |||
@@ -934,7 +934,6 @@ static void ni52_rcv_int(struct net_device *dev) | |||
934 | skb = (struct sk_buff *) dev_alloc_skb(totlen+2); | 934 | skb = (struct sk_buff *) dev_alloc_skb(totlen+2); |
935 | if(skb != NULL) | 935 | if(skb != NULL) |
936 | { | 936 | { |
937 | skb->dev = dev; | ||
938 | skb_reserve(skb,2); | 937 | skb_reserve(skb,2); |
939 | skb_put(skb,totlen); | 938 | skb_put(skb,totlen); |
940 | eth_copy_and_sum(skb,(char *) p->base+(unsigned long) rbd->buffer,totlen,0); | 939 | eth_copy_and_sum(skb,(char *) p->base+(unsigned long) rbd->buffer,totlen,0); |
@@ -1183,7 +1182,7 @@ static int ni52_send_packet(struct sk_buff *skb, struct net_device *dev) | |||
1183 | else | 1182 | else |
1184 | #endif | 1183 | #endif |
1185 | { | 1184 | { |
1186 | memcpy((char *)p->xmit_cbuffs[p->xmit_count],(char *)(skb->data),skb->len); | 1185 | skb_copy_from_linear_data(skb, (char *) p->xmit_cbuffs[p->xmit_count], skb->len); |
1187 | len = skb->len; | 1186 | len = skb->len; |
1188 | if (len < ETH_ZLEN) { | 1187 | if (len < ETH_ZLEN) { |
1189 | len = ETH_ZLEN; | 1188 | len = ETH_ZLEN; |
diff --git a/drivers/net/ni65.c b/drivers/net/ni65.c index 1578f4d98498..3818edf0ac18 100644 --- a/drivers/net/ni65.c +++ b/drivers/net/ni65.c | |||
@@ -610,7 +610,6 @@ static void *ni65_alloc_mem(struct net_device *dev,char *what,int size,int type) | |||
610 | printk(KERN_WARNING "%s: unable to allocate %s memory.\n",dev->name,what); | 610 | printk(KERN_WARNING "%s: unable to allocate %s memory.\n",dev->name,what); |
611 | return NULL; | 611 | return NULL; |
612 | } | 612 | } |
613 | skb->dev = dev; | ||
614 | skb_reserve(skb,2+16); | 613 | skb_reserve(skb,2+16); |
615 | skb_put(skb,R_BUF_SIZE); /* grab the whole space .. (not necessary) */ | 614 | skb_put(skb,R_BUF_SIZE); /* grab the whole space .. (not necessary) */ |
616 | ptr = skb->data; | 615 | ptr = skb->data; |
@@ -1094,7 +1093,6 @@ static void ni65_recv_intr(struct net_device *dev,int csr0) | |||
1094 | if(skb) | 1093 | if(skb) |
1095 | { | 1094 | { |
1096 | skb_reserve(skb,2); | 1095 | skb_reserve(skb,2); |
1097 | skb->dev = dev; | ||
1098 | #ifdef RCV_VIA_SKB | 1096 | #ifdef RCV_VIA_SKB |
1099 | if( (unsigned long) (skb->data + R_BUF_SIZE) > 0x1000000) { | 1097 | if( (unsigned long) (skb->data + R_BUF_SIZE) > 0x1000000) { |
1100 | skb_put(skb,len); | 1098 | skb_put(skb,len); |
@@ -1178,8 +1176,9 @@ static int ni65_send_packet(struct sk_buff *skb, struct net_device *dev) | |||
1178 | if( (unsigned long) (skb->data + skb->len) > 0x1000000) { | 1176 | if( (unsigned long) (skb->data + skb->len) > 0x1000000) { |
1179 | #endif | 1177 | #endif |
1180 | 1178 | ||
1181 | memcpy((char *) p->tmdbounce[p->tmdbouncenum] ,(char *)skb->data, | 1179 | skb_copy_from_linear_data(skb, p->tmdbounce[p->tmdbouncenum], |
1182 | (skb->len > T_BUF_SIZE) ? T_BUF_SIZE : skb->len); | 1180 | skb->len > T_BUF_SIZE ? T_BUF_SIZE : |
1181 | skb->len); | ||
1183 | if (len > skb->len) | 1182 | if (len > skb->len) |
1184 | memset((char *)p->tmdbounce[p->tmdbouncenum]+skb->len, 0, len-skb->len); | 1183 | memset((char *)p->tmdbounce[p->tmdbouncenum]+skb->len, 0, len-skb->len); |
1185 | dev_kfree_skb (skb); | 1184 | dev_kfree_skb (skb); |
diff --git a/drivers/net/ns83820.c b/drivers/net/ns83820.c index 9ec6e9e54f47..6a32338623f1 100644 --- a/drivers/net/ns83820.c +++ b/drivers/net/ns83820.c | |||
@@ -607,7 +607,6 @@ static inline int rx_refill(struct net_device *ndev, gfp_t gfp) | |||
607 | res &= 0xf; | 607 | res &= 0xf; |
608 | skb_reserve(skb, res); | 608 | skb_reserve(skb, res); |
609 | 609 | ||
610 | skb->dev = ndev; | ||
611 | if (gfp != GFP_ATOMIC) | 610 | if (gfp != GFP_ATOMIC) |
612 | spin_lock_irqsave(&dev->rx_info.lock, flags); | 611 | spin_lock_irqsave(&dev->rx_info.lock, flags); |
613 | res = ns83820_add_rx_skb(dev, skb); | 612 | res = ns83820_add_rx_skb(dev, skb); |
@@ -1157,9 +1156,9 @@ again: | |||
1157 | extsts = 0; | 1156 | extsts = 0; |
1158 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | 1157 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
1159 | extsts |= EXTSTS_IPPKT; | 1158 | extsts |= EXTSTS_IPPKT; |
1160 | if (IPPROTO_TCP == skb->nh.iph->protocol) | 1159 | if (IPPROTO_TCP == ip_hdr(skb)->protocol) |
1161 | extsts |= EXTSTS_TCPPKT; | 1160 | extsts |= EXTSTS_TCPPKT; |
1162 | else if (IPPROTO_UDP == skb->nh.iph->protocol) | 1161 | else if (IPPROTO_UDP == ip_hdr(skb)->protocol) |
1163 | extsts |= EXTSTS_UDPPKT; | 1162 | extsts |= EXTSTS_UDPPKT; |
1164 | } | 1163 | } |
1165 | 1164 | ||
diff --git a/drivers/net/pasemi_mac.c b/drivers/net/pasemi_mac.c index d670ac74824f..76fe9dd8e841 100644 --- a/drivers/net/pasemi_mac.c +++ b/drivers/net/pasemi_mac.c | |||
@@ -334,8 +334,6 @@ static void pasemi_mac_replenish_rx_ring(struct net_device *dev) | |||
334 | break; | 334 | break; |
335 | } | 335 | } |
336 | 336 | ||
337 | skb->dev = dev; | ||
338 | |||
339 | dma = pci_map_single(mac->dma_pdev, skb->data, skb->len, | 337 | dma = pci_map_single(mac->dma_pdev, skb->data, skb->len, |
340 | PCI_DMA_FROMDEVICE); | 338 | PCI_DMA_FROMDEVICE); |
341 | 339 | ||
@@ -731,16 +729,18 @@ static int pasemi_mac_start_tx(struct sk_buff *skb, struct net_device *dev) | |||
731 | dflags = XCT_MACTX_O | XCT_MACTX_ST | XCT_MACTX_SS | XCT_MACTX_CRC_PAD; | 729 | dflags = XCT_MACTX_O | XCT_MACTX_ST | XCT_MACTX_SS | XCT_MACTX_CRC_PAD; |
732 | 730 | ||
733 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | 731 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
734 | switch (skb->nh.iph->protocol) { | 732 | const unsigned char *nh = skb_network_header(skb); |
733 | |||
734 | switch (ip_hdr(skb)->protocol) { | ||
735 | case IPPROTO_TCP: | 735 | case IPPROTO_TCP: |
736 | dflags |= XCT_MACTX_CSUM_TCP; | 736 | dflags |= XCT_MACTX_CSUM_TCP; |
737 | dflags |= XCT_MACTX_IPH((skb->h.raw - skb->nh.raw) >> 2); | 737 | dflags |= XCT_MACTX_IPH(skb_network_header_len(skb) >> 2); |
738 | dflags |= XCT_MACTX_IPO(skb->nh.raw - skb->data); | 738 | dflags |= XCT_MACTX_IPO(nh - skb->data); |
739 | break; | 739 | break; |
740 | case IPPROTO_UDP: | 740 | case IPPROTO_UDP: |
741 | dflags |= XCT_MACTX_CSUM_UDP; | 741 | dflags |= XCT_MACTX_CSUM_UDP; |
742 | dflags |= XCT_MACTX_IPH((skb->h.raw - skb->nh.raw) >> 2); | 742 | dflags |= XCT_MACTX_IPH(skb_network_header_len(skb) >> 2); |
743 | dflags |= XCT_MACTX_IPO(skb->nh.raw - skb->data); | 743 | dflags |= XCT_MACTX_IPO(nh - skb->data); |
744 | break; | 744 | break; |
745 | } | 745 | } |
746 | } | 746 | } |
diff --git a/drivers/net/pci-skeleton.c b/drivers/net/pci-skeleton.c index 6ca4e4fa6b88..df8998b4f37e 100644 --- a/drivers/net/pci-skeleton.c +++ b/drivers/net/pci-skeleton.c | |||
@@ -1344,7 +1344,7 @@ static int netdrv_start_xmit (struct sk_buff *skb, struct net_device *dev) | |||
1344 | 1344 | ||
1345 | tp->tx_info[entry].skb = skb; | 1345 | tp->tx_info[entry].skb = skb; |
1346 | /* tp->tx_info[entry].mapping = 0; */ | 1346 | /* tp->tx_info[entry].mapping = 0; */ |
1347 | memcpy (tp->tx_buf[entry], skb->data, skb->len); | 1347 | skb_copy_from_linear_data(skb, tp->tx_buf[entry], skb->len); |
1348 | 1348 | ||
1349 | /* Note: the chip doesn't have auto-pad! */ | 1349 | /* Note: the chip doesn't have auto-pad! */ |
1350 | NETDRV_W32 (TxStatus0 + (entry * sizeof(u32)), | 1350 | NETDRV_W32 (TxStatus0 + (entry * sizeof(u32)), |
@@ -1565,7 +1565,6 @@ static void netdrv_rx_interrupt (struct net_device *dev, | |||
1565 | 1565 | ||
1566 | skb = dev_alloc_skb (pkt_size + 2); | 1566 | skb = dev_alloc_skb (pkt_size + 2); |
1567 | if (skb) { | 1567 | if (skb) { |
1568 | skb->dev = dev; | ||
1569 | skb_reserve (skb, 2); /* 16 byte align the IP fields. */ | 1568 | skb_reserve (skb, 2); /* 16 byte align the IP fields. */ |
1570 | 1569 | ||
1571 | eth_copy_and_sum (skb, &rx_ring[ring_offset + 4], pkt_size, 0); | 1570 | eth_copy_and_sum (skb, &rx_ring[ring_offset + 4], pkt_size, 0); |
diff --git a/drivers/net/pcmcia/3c574_cs.c b/drivers/net/pcmcia/3c574_cs.c index c7bd9c1c7f31..2b395ee21f75 100644 --- a/drivers/net/pcmcia/3c574_cs.c +++ b/drivers/net/pcmcia/3c574_cs.c | |||
@@ -1056,7 +1056,6 @@ static int el3_rx(struct net_device *dev, int worklimit) | |||
1056 | DEBUG(3, " Receiving packet size %d status %4.4x.\n", | 1056 | DEBUG(3, " Receiving packet size %d status %4.4x.\n", |
1057 | pkt_len, rx_status); | 1057 | pkt_len, rx_status); |
1058 | if (skb != NULL) { | 1058 | if (skb != NULL) { |
1059 | skb->dev = dev; | ||
1060 | skb_reserve(skb, 2); | 1059 | skb_reserve(skb, 2); |
1061 | insl(ioaddr+RX_FIFO, skb_put(skb, pkt_len), | 1060 | insl(ioaddr+RX_FIFO, skb_put(skb, pkt_len), |
1062 | ((pkt_len+3)>>2)); | 1061 | ((pkt_len+3)>>2)); |
diff --git a/drivers/net/pcmcia/3c589_cs.c b/drivers/net/pcmcia/3c589_cs.c index 461e8274ef69..143ae2ff309e 100644 --- a/drivers/net/pcmcia/3c589_cs.c +++ b/drivers/net/pcmcia/3c589_cs.c | |||
@@ -883,7 +883,6 @@ static int el3_rx(struct net_device *dev) | |||
883 | DEBUG(3, " Receiving packet size %d status %4.4x.\n", | 883 | DEBUG(3, " Receiving packet size %d status %4.4x.\n", |
884 | pkt_len, rx_status); | 884 | pkt_len, rx_status); |
885 | if (skb != NULL) { | 885 | if (skb != NULL) { |
886 | skb->dev = dev; | ||
887 | skb_reserve(skb, 2); | 886 | skb_reserve(skb, 2); |
888 | insl(ioaddr+RX_FIFO, skb_put(skb, pkt_len), | 887 | insl(ioaddr+RX_FIFO, skb_put(skb, pkt_len), |
889 | (pkt_len+3)>>2); | 888 | (pkt_len+3)>>2); |
diff --git a/drivers/net/pcmcia/axnet_cs.c b/drivers/net/pcmcia/axnet_cs.c index 6139048f8117..808fae1577e0 100644 --- a/drivers/net/pcmcia/axnet_cs.c +++ b/drivers/net/pcmcia/axnet_cs.c | |||
@@ -1136,7 +1136,7 @@ static int ei_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1136 | ei_block_output(dev, length, skb->data, output_page); | 1136 | ei_block_output(dev, length, skb->data, output_page); |
1137 | else { | 1137 | else { |
1138 | memset(packet, 0, ETH_ZLEN); | 1138 | memset(packet, 0, ETH_ZLEN); |
1139 | memcpy(packet, skb->data, skb->len); | 1139 | skb_copy_from_linear_data(skb, packet, skb->len); |
1140 | ei_block_output(dev, length, packet, output_page); | 1140 | ei_block_output(dev, length, packet, output_page); |
1141 | } | 1141 | } |
1142 | 1142 | ||
@@ -1496,7 +1496,6 @@ static void ei_receive(struct net_device *dev) | |||
1496 | else | 1496 | else |
1497 | { | 1497 | { |
1498 | skb_reserve(skb,2); /* IP headers on 16 byte boundaries */ | 1498 | skb_reserve(skb,2); /* IP headers on 16 byte boundaries */ |
1499 | skb->dev = dev; | ||
1500 | skb_put(skb, pkt_len); /* Make room */ | 1499 | skb_put(skb, pkt_len); /* Make room */ |
1501 | ei_block_input(dev, pkt_len, skb, current_offset + sizeof(rx_frame)); | 1500 | ei_block_input(dev, pkt_len, skb, current_offset + sizeof(rx_frame)); |
1502 | skb->protocol=eth_type_trans(skb,dev); | 1501 | skb->protocol=eth_type_trans(skb,dev); |
diff --git a/drivers/net/pcmcia/fmvj18x_cs.c b/drivers/net/pcmcia/fmvj18x_cs.c index 0d7de617e535..3f93d4933235 100644 --- a/drivers/net/pcmcia/fmvj18x_cs.c +++ b/drivers/net/pcmcia/fmvj18x_cs.c | |||
@@ -999,7 +999,6 @@ static void fjn_rx(struct net_device *dev) | |||
999 | lp->stats.rx_dropped++; | 999 | lp->stats.rx_dropped++; |
1000 | break; | 1000 | break; |
1001 | } | 1001 | } |
1002 | skb->dev = dev; | ||
1003 | 1002 | ||
1004 | skb_reserve(skb, 2); | 1003 | skb_reserve(skb, 2); |
1005 | insw(ioaddr + DATAPORT, skb_put(skb, pkt_len), | 1004 | insw(ioaddr + DATAPORT, skb_put(skb, pkt_len), |
diff --git a/drivers/net/pcmcia/nmclan_cs.c b/drivers/net/pcmcia/nmclan_cs.c index 3b707747a811..73da611fd536 100644 --- a/drivers/net/pcmcia/nmclan_cs.c +++ b/drivers/net/pcmcia/nmclan_cs.c | |||
@@ -1182,12 +1182,10 @@ static int mace_rx(struct net_device *dev, unsigned char RxCnt) | |||
1182 | skb = dev_alloc_skb(pkt_len+2); | 1182 | skb = dev_alloc_skb(pkt_len+2); |
1183 | 1183 | ||
1184 | if (skb != NULL) { | 1184 | if (skb != NULL) { |
1185 | skb->dev = dev; | ||
1186 | |||
1187 | skb_reserve(skb, 2); | 1185 | skb_reserve(skb, 2); |
1188 | insw(ioaddr + AM2150_RCV, skb_put(skb, pkt_len), pkt_len>>1); | 1186 | insw(ioaddr + AM2150_RCV, skb_put(skb, pkt_len), pkt_len>>1); |
1189 | if (pkt_len & 1) | 1187 | if (pkt_len & 1) |
1190 | *(skb->tail-1) = inb(ioaddr + AM2150_RCV); | 1188 | *(skb_tail_pointer(skb) - 1) = inb(ioaddr + AM2150_RCV); |
1191 | skb->protocol = eth_type_trans(skb, dev); | 1189 | skb->protocol = eth_type_trans(skb, dev); |
1192 | 1190 | ||
1193 | netif_rx(skb); /* Send the packet to the upper (protocol) layers. */ | 1191 | netif_rx(skb); /* Send the packet to the upper (protocol) layers. */ |
diff --git a/drivers/net/pcmcia/smc91c92_cs.c b/drivers/net/pcmcia/smc91c92_cs.c index 2561f76033ea..7912dbd14251 100644 --- a/drivers/net/pcmcia/smc91c92_cs.c +++ b/drivers/net/pcmcia/smc91c92_cs.c | |||
@@ -1669,7 +1669,6 @@ static void smc_rx(struct net_device *dev) | |||
1669 | (packet_length+1)>>1); | 1669 | (packet_length+1)>>1); |
1670 | skb->protocol = eth_type_trans(skb, dev); | 1670 | skb->protocol = eth_type_trans(skb, dev); |
1671 | 1671 | ||
1672 | skb->dev = dev; | ||
1673 | netif_rx(skb); | 1672 | netif_rx(skb); |
1674 | dev->last_rx = jiffies; | 1673 | dev->last_rx = jiffies; |
1675 | smc->stats.rx_packets++; | 1674 | smc->stats.rx_packets++; |
diff --git a/drivers/net/pcmcia/xirc2ps_cs.c b/drivers/net/pcmcia/xirc2ps_cs.c index 5879e7c36988..809ec440b8eb 100644 --- a/drivers/net/pcmcia/xirc2ps_cs.c +++ b/drivers/net/pcmcia/xirc2ps_cs.c | |||
@@ -1226,7 +1226,6 @@ xirc2ps_interrupt(int irq, void *dev_id) | |||
1226 | (pktlen+1)>>1); | 1226 | (pktlen+1)>>1); |
1227 | } | 1227 | } |
1228 | skb->protocol = eth_type_trans(skb, dev); | 1228 | skb->protocol = eth_type_trans(skb, dev); |
1229 | skb->dev = dev; | ||
1230 | netif_rx(skb); | 1229 | netif_rx(skb); |
1231 | dev->last_rx = jiffies; | 1230 | dev->last_rx = jiffies; |
1232 | lp->stats.rx_packets++; | 1231 | lp->stats.rx_packets++; |
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c index 4d94ba7899bf..0791360a6a66 100644 --- a/drivers/net/pcnet32.c +++ b/drivers/net/pcnet32.c | |||
@@ -1206,7 +1206,6 @@ static void pcnet32_rx_entry(struct net_device *dev, | |||
1206 | PCI_DMA_FROMDEVICE); | 1206 | PCI_DMA_FROMDEVICE); |
1207 | skb_put(skb, pkt_len); | 1207 | skb_put(skb, pkt_len); |
1208 | lp->rx_skbuff[entry] = newskb; | 1208 | lp->rx_skbuff[entry] = newskb; |
1209 | newskb->dev = dev; | ||
1210 | lp->rx_dma_addr[entry] = | 1209 | lp->rx_dma_addr[entry] = |
1211 | pci_map_single(lp->pci_dev, | 1210 | pci_map_single(lp->pci_dev, |
1212 | newskb->data, | 1211 | newskb->data, |
diff --git a/drivers/net/plip.c b/drivers/net/plip.c index 6bb085f54437..8754cf3356b0 100644 --- a/drivers/net/plip.c +++ b/drivers/net/plip.c | |||
@@ -546,7 +546,7 @@ static __be16 plip_type_trans(struct sk_buff *skb, struct net_device *dev) | |||
546 | struct ethhdr *eth; | 546 | struct ethhdr *eth; |
547 | unsigned char *rawp; | 547 | unsigned char *rawp; |
548 | 548 | ||
549 | skb->mac.raw=skb->data; | 549 | skb_reset_mac_header(skb); |
550 | skb_pull(skb,dev->hard_header_len); | 550 | skb_pull(skb,dev->hard_header_len); |
551 | eth = eth_hdr(skb); | 551 | eth = eth_hdr(skb); |
552 | 552 | ||
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c index ef58e4128782..6d596ca50cfd 100644 --- a/drivers/net/ppp_generic.c +++ b/drivers/net/ppp_generic.c | |||
@@ -88,8 +88,6 @@ struct ppp_file { | |||
88 | #define PF_TO_PPP(pf) PF_TO_X(pf, struct ppp) | 88 | #define PF_TO_PPP(pf) PF_TO_X(pf, struct ppp) |
89 | #define PF_TO_CHANNEL(pf) PF_TO_X(pf, struct channel) | 89 | #define PF_TO_CHANNEL(pf) PF_TO_X(pf, struct channel) |
90 | 90 | ||
91 | #define ROUNDUP(n, x) (((n) + (x) - 1) / (x)) | ||
92 | |||
93 | /* | 91 | /* |
94 | * Data structure describing one ppp unit. | 92 | * Data structure describing one ppp unit. |
95 | * A ppp unit corresponds to a ppp network interface device | 93 | * A ppp unit corresponds to a ppp network interface device |
@@ -1297,7 +1295,7 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb) | |||
1297 | */ | 1295 | */ |
1298 | fragsize = len; | 1296 | fragsize = len; |
1299 | if (nfree > 1) | 1297 | if (nfree > 1) |
1300 | fragsize = ROUNDUP(fragsize, nfree); | 1298 | fragsize = DIV_ROUND_UP(fragsize, nfree); |
1301 | /* nbigger channels get fragsize bytes, the rest get fragsize-1, | 1299 | /* nbigger channels get fragsize bytes, the rest get fragsize-1, |
1302 | except if nbigger==0, then they all get fragsize. */ | 1300 | except if nbigger==0, then they all get fragsize. */ |
1303 | nbigger = len % nfree; | 1301 | nbigger = len % nfree; |
@@ -1685,7 +1683,7 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb) | |||
1685 | skb_pull_rcsum(skb, 2); | 1683 | skb_pull_rcsum(skb, 2); |
1686 | skb->dev = ppp->dev; | 1684 | skb->dev = ppp->dev; |
1687 | skb->protocol = htons(npindex_to_ethertype[npi]); | 1685 | skb->protocol = htons(npindex_to_ethertype[npi]); |
1688 | skb->mac.raw = skb->data; | 1686 | skb_reset_mac_header(skb); |
1689 | netif_rx(skb); | 1687 | netif_rx(skb); |
1690 | ppp->dev->last_rx = jiffies; | 1688 | ppp->dev->last_rx = jiffies; |
1691 | } | 1689 | } |
diff --git a/drivers/net/ppp_synctty.c b/drivers/net/ppp_synctty.c index b6f0e9a25e26..5918fab38349 100644 --- a/drivers/net/ppp_synctty.c +++ b/drivers/net/ppp_synctty.c | |||
@@ -594,7 +594,8 @@ ppp_sync_txmunge(struct syncppp *ap, struct sk_buff *skb) | |||
594 | return NULL; | 594 | return NULL; |
595 | } | 595 | } |
596 | skb_reserve(npkt,2); | 596 | skb_reserve(npkt,2); |
597 | memcpy(skb_put(npkt,skb->len), skb->data, skb->len); | 597 | skb_copy_from_linear_data(skb, |
598 | skb_put(npkt, skb->len), skb->len); | ||
598 | kfree_skb(skb); | 599 | kfree_skb(skb); |
599 | skb = npkt; | 600 | skb = npkt; |
600 | } | 601 | } |
diff --git a/drivers/net/pppoe.c b/drivers/net/pppoe.c index ebfa2967cd68..6f98834e6ace 100644 --- a/drivers/net/pppoe.c +++ b/drivers/net/pppoe.c | |||
@@ -207,7 +207,7 @@ static inline struct pppox_sock *get_item(unsigned long sid, | |||
207 | 207 | ||
208 | static inline struct pppox_sock *get_item_by_addr(struct sockaddr_pppox *sp) | 208 | static inline struct pppox_sock *get_item_by_addr(struct sockaddr_pppox *sp) |
209 | { | 209 | { |
210 | struct net_device *dev = NULL; | 210 | struct net_device *dev; |
211 | int ifindex; | 211 | int ifindex; |
212 | 212 | ||
213 | dev = dev_get_by_name(sp->sa_addr.pppoe.dev); | 213 | dev = dev_get_by_name(sp->sa_addr.pppoe.dev); |
@@ -218,20 +218,6 @@ static inline struct pppox_sock *get_item_by_addr(struct sockaddr_pppox *sp) | |||
218 | return get_item(sp->sa_addr.pppoe.sid, sp->sa_addr.pppoe.remote, ifindex); | 218 | return get_item(sp->sa_addr.pppoe.sid, sp->sa_addr.pppoe.remote, ifindex); |
219 | } | 219 | } |
220 | 220 | ||
221 | static inline int set_item(struct pppox_sock *po) | ||
222 | { | ||
223 | int i; | ||
224 | |||
225 | if (!po) | ||
226 | return -EINVAL; | ||
227 | |||
228 | write_lock_bh(&pppoe_hash_lock); | ||
229 | i = __set_item(po); | ||
230 | write_unlock_bh(&pppoe_hash_lock); | ||
231 | |||
232 | return i; | ||
233 | } | ||
234 | |||
235 | static inline struct pppox_sock *delete_item(unsigned long sid, char *addr, int ifindex) | 221 | static inline struct pppox_sock *delete_item(unsigned long sid, char *addr, int ifindex) |
236 | { | 222 | { |
237 | struct pppox_sock *ret; | 223 | struct pppox_sock *ret; |
@@ -255,54 +241,53 @@ static inline struct pppox_sock *delete_item(unsigned long sid, char *addr, int | |||
255 | static void pppoe_flush_dev(struct net_device *dev) | 241 | static void pppoe_flush_dev(struct net_device *dev) |
256 | { | 242 | { |
257 | int hash; | 243 | int hash; |
258 | |||
259 | BUG_ON(dev == NULL); | 244 | BUG_ON(dev == NULL); |
260 | 245 | ||
261 | read_lock_bh(&pppoe_hash_lock); | 246 | write_lock_bh(&pppoe_hash_lock); |
262 | for (hash = 0; hash < PPPOE_HASH_SIZE; hash++) { | 247 | for (hash = 0; hash < PPPOE_HASH_SIZE; hash++) { |
263 | struct pppox_sock *po = item_hash_table[hash]; | 248 | struct pppox_sock *po = item_hash_table[hash]; |
264 | 249 | ||
265 | while (po != NULL) { | 250 | while (po != NULL) { |
266 | if (po->pppoe_dev == dev) { | 251 | struct sock *sk = sk_pppox(po); |
267 | struct sock *sk = sk_pppox(po); | 252 | if (po->pppoe_dev != dev) { |
268 | 253 | po = po->next; | |
269 | sock_hold(sk); | 254 | continue; |
270 | po->pppoe_dev = NULL; | 255 | } |
256 | po->pppoe_dev = NULL; | ||
257 | dev_put(dev); | ||
271 | 258 | ||
272 | /* We hold a reference to SK, now drop the | ||
273 | * hash table lock so that we may attempt | ||
274 | * to lock the socket (which can sleep). | ||
275 | */ | ||
276 | read_unlock_bh(&pppoe_hash_lock); | ||
277 | 259 | ||
278 | lock_sock(sk); | 260 | /* We always grab the socket lock, followed by the |
261 | * pppoe_hash_lock, in that order. Since we should | ||
262 | * hold the sock lock while doing any unbinding, | ||
263 | * we need to release the lock we're holding. | ||
264 | * Hold a reference to the sock so it doesn't disappear | ||
265 | * as we're jumping between locks. | ||
266 | */ | ||
279 | 267 | ||
280 | if (sk->sk_state & | 268 | sock_hold(sk); |
281 | (PPPOX_CONNECTED | PPPOX_BOUND)) { | ||
282 | pppox_unbind_sock(sk); | ||
283 | dev_put(dev); | ||
284 | sk->sk_state = PPPOX_ZOMBIE; | ||
285 | sk->sk_state_change(sk); | ||
286 | } | ||
287 | 269 | ||
288 | release_sock(sk); | 270 | write_unlock_bh(&pppoe_hash_lock); |
271 | lock_sock(sk); | ||
289 | 272 | ||
290 | sock_put(sk); | 273 | if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) { |
274 | pppox_unbind_sock(sk); | ||
275 | sk->sk_state = PPPOX_ZOMBIE; | ||
276 | sk->sk_state_change(sk); | ||
277 | } | ||
291 | 278 | ||
292 | read_lock_bh(&pppoe_hash_lock); | 279 | release_sock(sk); |
280 | sock_put(sk); | ||
293 | 281 | ||
294 | /* Now restart from the beginning of this | 282 | /* Restart scan at the beginning of this hash chain. |
295 | * hash chain. We always NULL out pppoe_dev | 283 | * While the lock was dropped the chain contents may |
296 | * so we are guaranteed to make forward | 284 | * have changed. |
297 | * progress. | 285 | */ |
298 | */ | 286 | write_lock_bh(&pppoe_hash_lock); |
299 | po = item_hash_table[hash]; | 287 | po = item_hash_table[hash]; |
300 | continue; | ||
301 | } | ||
302 | po = po->next; | ||
303 | } | 288 | } |
304 | } | 289 | } |
305 | read_unlock_bh(&pppoe_hash_lock); | 290 | write_unlock_bh(&pppoe_hash_lock); |
306 | } | 291 | } |
307 | 292 | ||
308 | static int pppoe_device_event(struct notifier_block *this, | 293 | static int pppoe_device_event(struct notifier_block *this, |
@@ -344,10 +329,10 @@ static struct notifier_block pppoe_notifier = { | |||
344 | static int pppoe_rcv_core(struct sock *sk, struct sk_buff *skb) | 329 | static int pppoe_rcv_core(struct sock *sk, struct sk_buff *skb) |
345 | { | 330 | { |
346 | struct pppox_sock *po = pppox_sk(sk); | 331 | struct pppox_sock *po = pppox_sk(sk); |
347 | struct pppox_sock *relay_po = NULL; | 332 | struct pppox_sock *relay_po; |
348 | 333 | ||
349 | if (sk->sk_state & PPPOX_BOUND) { | 334 | if (sk->sk_state & PPPOX_BOUND) { |
350 | struct pppoe_hdr *ph = (struct pppoe_hdr *) skb->nh.raw; | 335 | struct pppoe_hdr *ph = pppoe_hdr(skb); |
351 | int len = ntohs(ph->length); | 336 | int len = ntohs(ph->length); |
352 | skb_pull_rcsum(skb, sizeof(struct pppoe_hdr)); | 337 | skb_pull_rcsum(skb, sizeof(struct pppoe_hdr)); |
353 | if (pskb_trim_rcsum(skb, len)) | 338 | if (pskb_trim_rcsum(skb, len)) |
@@ -401,7 +386,7 @@ static int pppoe_rcv(struct sk_buff *skb, | |||
401 | if (!(skb = skb_share_check(skb, GFP_ATOMIC))) | 386 | if (!(skb = skb_share_check(skb, GFP_ATOMIC))) |
402 | goto out; | 387 | goto out; |
403 | 388 | ||
404 | ph = (struct pppoe_hdr *) skb->nh.raw; | 389 | ph = pppoe_hdr(skb); |
405 | 390 | ||
406 | po = get_item((unsigned long) ph->sid, eth_hdr(skb)->h_source, dev->ifindex); | 391 | po = get_item((unsigned long) ph->sid, eth_hdr(skb)->h_source, dev->ifindex); |
407 | if (po != NULL) | 392 | if (po != NULL) |
@@ -433,7 +418,7 @@ static int pppoe_disc_rcv(struct sk_buff *skb, | |||
433 | if (!(skb = skb_share_check(skb, GFP_ATOMIC))) | 418 | if (!(skb = skb_share_check(skb, GFP_ATOMIC))) |
434 | goto out; | 419 | goto out; |
435 | 420 | ||
436 | ph = (struct pppoe_hdr *) skb->nh.raw; | 421 | ph = pppoe_hdr(skb); |
437 | if (ph->code != PADT_CODE) | 422 | if (ph->code != PADT_CODE) |
438 | goto abort; | 423 | goto abort; |
439 | 424 | ||
@@ -514,36 +499,49 @@ static int pppoe_release(struct socket *sock) | |||
514 | { | 499 | { |
515 | struct sock *sk = sock->sk; | 500 | struct sock *sk = sock->sk; |
516 | struct pppox_sock *po; | 501 | struct pppox_sock *po; |
517 | int error = 0; | ||
518 | 502 | ||
519 | if (!sk) | 503 | if (!sk) |
520 | return 0; | 504 | return 0; |
521 | 505 | ||
522 | if (sock_flag(sk, SOCK_DEAD)) | 506 | lock_sock(sk); |
507 | if (sock_flag(sk, SOCK_DEAD)){ | ||
508 | release_sock(sk); | ||
523 | return -EBADF; | 509 | return -EBADF; |
510 | } | ||
524 | 511 | ||
525 | pppox_unbind_sock(sk); | 512 | pppox_unbind_sock(sk); |
526 | 513 | ||
527 | /* Signal the death of the socket. */ | 514 | /* Signal the death of the socket. */ |
528 | sk->sk_state = PPPOX_DEAD; | 515 | sk->sk_state = PPPOX_DEAD; |
529 | 516 | ||
517 | |||
518 | /* Write lock on hash lock protects the entire "po" struct from | ||
519 | * concurrent updates via pppoe_flush_dev. The "po" struct should | ||
520 | * be considered part of the hash table contents, thus protected | ||
521 | * by the hash table lock */ | ||
522 | write_lock_bh(&pppoe_hash_lock); | ||
523 | |||
530 | po = pppox_sk(sk); | 524 | po = pppox_sk(sk); |
531 | if (po->pppoe_pa.sid) { | 525 | if (po->pppoe_pa.sid) { |
532 | delete_item(po->pppoe_pa.sid, po->pppoe_pa.remote, po->pppoe_ifindex); | 526 | __delete_item(po->pppoe_pa.sid, |
527 | po->pppoe_pa.remote, po->pppoe_ifindex); | ||
533 | } | 528 | } |
534 | 529 | ||
535 | if (po->pppoe_dev) | 530 | if (po->pppoe_dev) { |
536 | dev_put(po->pppoe_dev); | 531 | dev_put(po->pppoe_dev); |
532 | po->pppoe_dev = NULL; | ||
533 | } | ||
537 | 534 | ||
538 | po->pppoe_dev = NULL; | 535 | write_unlock_bh(&pppoe_hash_lock); |
539 | 536 | ||
540 | sock_orphan(sk); | 537 | sock_orphan(sk); |
541 | sock->sk = NULL; | 538 | sock->sk = NULL; |
542 | 539 | ||
543 | skb_queue_purge(&sk->sk_receive_queue); | 540 | skb_queue_purge(&sk->sk_receive_queue); |
541 | release_sock(sk); | ||
544 | sock_put(sk); | 542 | sock_put(sk); |
545 | 543 | ||
546 | return error; | 544 | return 0; |
547 | } | 545 | } |
548 | 546 | ||
549 | 547 | ||
@@ -599,14 +597,18 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr, | |||
599 | po->pppoe_dev = dev; | 597 | po->pppoe_dev = dev; |
600 | po->pppoe_ifindex = dev->ifindex; | 598 | po->pppoe_ifindex = dev->ifindex; |
601 | 599 | ||
602 | if (!(dev->flags & IFF_UP)) | 600 | write_lock_bh(&pppoe_hash_lock); |
601 | if (!(dev->flags & IFF_UP)){ | ||
602 | write_unlock_bh(&pppoe_hash_lock); | ||
603 | goto err_put; | 603 | goto err_put; |
604 | } | ||
604 | 605 | ||
605 | memcpy(&po->pppoe_pa, | 606 | memcpy(&po->pppoe_pa, |
606 | &sp->sa_addr.pppoe, | 607 | &sp->sa_addr.pppoe, |
607 | sizeof(struct pppoe_addr)); | 608 | sizeof(struct pppoe_addr)); |
608 | 609 | ||
609 | error = set_item(po); | 610 | error = __set_item(po); |
611 | write_unlock_bh(&pppoe_hash_lock); | ||
610 | if (error < 0) | 612 | if (error < 0) |
611 | goto err_put; | 613 | goto err_put; |
612 | 614 | ||
@@ -762,10 +764,10 @@ static int pppoe_ioctl(struct socket *sock, unsigned int cmd, | |||
762 | static int pppoe_sendmsg(struct kiocb *iocb, struct socket *sock, | 764 | static int pppoe_sendmsg(struct kiocb *iocb, struct socket *sock, |
763 | struct msghdr *m, size_t total_len) | 765 | struct msghdr *m, size_t total_len) |
764 | { | 766 | { |
765 | struct sk_buff *skb = NULL; | 767 | struct sk_buff *skb; |
766 | struct sock *sk = sock->sk; | 768 | struct sock *sk = sock->sk; |
767 | struct pppox_sock *po = pppox_sk(sk); | 769 | struct pppox_sock *po = pppox_sk(sk); |
768 | int error = 0; | 770 | int error; |
769 | struct pppoe_hdr hdr; | 771 | struct pppoe_hdr hdr; |
770 | struct pppoe_hdr *ph; | 772 | struct pppoe_hdr *ph; |
771 | struct net_device *dev; | 773 | struct net_device *dev; |
@@ -799,7 +801,7 @@ static int pppoe_sendmsg(struct kiocb *iocb, struct socket *sock, | |||
799 | 801 | ||
800 | /* Reserve space for headers. */ | 802 | /* Reserve space for headers. */ |
801 | skb_reserve(skb, dev->hard_header_len); | 803 | skb_reserve(skb, dev->hard_header_len); |
802 | skb->nh.raw = skb->data; | 804 | skb_reset_network_header(skb); |
803 | 805 | ||
804 | skb->dev = dev; | 806 | skb->dev = dev; |
805 | 807 | ||
@@ -869,7 +871,8 @@ static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb) | |||
869 | goto abort; | 871 | goto abort; |
870 | 872 | ||
871 | skb_reserve(skb2, dev->hard_header_len + sizeof(struct pppoe_hdr)); | 873 | skb_reserve(skb2, dev->hard_header_len + sizeof(struct pppoe_hdr)); |
872 | memcpy(skb_put(skb2, skb->len), skb->data, skb->len); | 874 | skb_copy_from_linear_data(skb, skb_put(skb2, skb->len), |
875 | skb->len); | ||
873 | } else { | 876 | } else { |
874 | /* Make a clone so as to not disturb the original skb, | 877 | /* Make a clone so as to not disturb the original skb, |
875 | * give dev_queue_xmit something it can free. | 878 | * give dev_queue_xmit something it can free. |
@@ -884,7 +887,7 @@ static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb) | |||
884 | memcpy(ph, &hdr, sizeof(struct pppoe_hdr)); | 887 | memcpy(ph, &hdr, sizeof(struct pppoe_hdr)); |
885 | skb2->protocol = __constant_htons(ETH_P_PPP_SES); | 888 | skb2->protocol = __constant_htons(ETH_P_PPP_SES); |
886 | 889 | ||
887 | skb2->nh.raw = skb2->data; | 890 | skb_reset_network_header(skb2); |
888 | 891 | ||
889 | skb2->dev = dev; | 892 | skb2->dev = dev; |
890 | 893 | ||
@@ -929,10 +932,8 @@ static int pppoe_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
929 | struct msghdr *m, size_t total_len, int flags) | 932 | struct msghdr *m, size_t total_len, int flags) |
930 | { | 933 | { |
931 | struct sock *sk = sock->sk; | 934 | struct sock *sk = sock->sk; |
932 | struct sk_buff *skb = NULL; | 935 | struct sk_buff *skb; |
933 | int error = 0; | 936 | int error = 0; |
934 | int len; | ||
935 | struct pppoe_hdr *ph = NULL; | ||
936 | 937 | ||
937 | if (sk->sk_state & PPPOX_BOUND) { | 938 | if (sk->sk_state & PPPOX_BOUND) { |
938 | error = -EIO; | 939 | error = -EIO; |
@@ -942,26 +943,21 @@ static int pppoe_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
942 | skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, | 943 | skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, |
943 | flags & MSG_DONTWAIT, &error); | 944 | flags & MSG_DONTWAIT, &error); |
944 | 945 | ||
945 | if (error < 0) { | 946 | if (error < 0) |
946 | goto end; | 947 | goto end; |
947 | } | ||
948 | 948 | ||
949 | m->msg_namelen = 0; | 949 | m->msg_namelen = 0; |
950 | 950 | ||
951 | if (skb) { | 951 | if (skb) { |
952 | error = 0; | 952 | struct pppoe_hdr *ph = pppoe_hdr(skb); |
953 | ph = (struct pppoe_hdr *) skb->nh.raw; | 953 | const int len = ntohs(ph->length); |
954 | len = ntohs(ph->length); | ||
955 | 954 | ||
956 | error = memcpy_toiovec(m->msg_iov, (unsigned char *) &ph->tag[0], len); | 955 | error = memcpy_toiovec(m->msg_iov, (unsigned char *) &ph->tag[0], len); |
957 | if (error < 0) | 956 | if (error == 0) |
958 | goto do_skb_free; | 957 | error = len; |
959 | error = len; | ||
960 | } | 958 | } |
961 | 959 | ||
962 | do_skb_free: | 960 | kfree_skb(skb); |
963 | if (skb) | ||
964 | kfree_skb(skb); | ||
965 | end: | 961 | end: |
966 | return error; | 962 | return error; |
967 | } | 963 | } |
@@ -991,7 +987,7 @@ out: | |||
991 | 987 | ||
992 | static __inline__ struct pppox_sock *pppoe_get_idx(loff_t pos) | 988 | static __inline__ struct pppox_sock *pppoe_get_idx(loff_t pos) |
993 | { | 989 | { |
994 | struct pppox_sock *po = NULL; | 990 | struct pppox_sock *po; |
995 | int i = 0; | 991 | int i = 0; |
996 | 992 | ||
997 | for (; i < PPPOE_HASH_SIZE; i++) { | 993 | for (; i < PPPOE_HASH_SIZE; i++) { |
diff --git a/drivers/net/pppox.c b/drivers/net/pppox.c index 9315046b3f55..3f8115db4d54 100644 --- a/drivers/net/pppox.c +++ b/drivers/net/pppox.c | |||
@@ -58,7 +58,7 @@ void pppox_unbind_sock(struct sock *sk) | |||
58 | { | 58 | { |
59 | /* Clear connection to ppp device, if attached. */ | 59 | /* Clear connection to ppp device, if attached. */ |
60 | 60 | ||
61 | if (sk->sk_state & (PPPOX_BOUND | PPPOX_ZOMBIE)) { | 61 | if (sk->sk_state & (PPPOX_BOUND | PPPOX_CONNECTED | PPPOX_ZOMBIE)) { |
62 | ppp_unregister_channel(&pppox_sk(sk)->chan); | 62 | ppp_unregister_channel(&pppox_sk(sk)->chan); |
63 | sk->sk_state = PPPOX_DEAD; | 63 | sk->sk_state = PPPOX_DEAD; |
64 | } | 64 | } |
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c index a8246eb2f8d9..7b80fb7a9d9b 100755 --- a/drivers/net/qla3xxx.c +++ b/drivers/net/qla3xxx.c | |||
@@ -1873,7 +1873,6 @@ static void ql_process_mac_rx_intr(struct ql3_adapter *qdev, | |||
1873 | pci_unmap_len(lrg_buf_cb2, maplen), | 1873 | pci_unmap_len(lrg_buf_cb2, maplen), |
1874 | PCI_DMA_FROMDEVICE); | 1874 | PCI_DMA_FROMDEVICE); |
1875 | prefetch(skb->data); | 1875 | prefetch(skb->data); |
1876 | skb->dev = qdev->ndev; | ||
1877 | skb->ip_summed = CHECKSUM_NONE; | 1876 | skb->ip_summed = CHECKSUM_NONE; |
1878 | skb->protocol = eth_type_trans(skb, qdev->ndev); | 1877 | skb->protocol = eth_type_trans(skb, qdev->ndev); |
1879 | 1878 | ||
@@ -1928,7 +1927,8 @@ static void ql_process_macip_rx_intr(struct ql3_adapter *qdev, | |||
1928 | * Copy the ethhdr from first buffer to second. This | 1927 | * Copy the ethhdr from first buffer to second. This |
1929 | * is necessary for 3022 IP completions. | 1928 | * is necessary for 3022 IP completions. |
1930 | */ | 1929 | */ |
1931 | memcpy(skb_push(skb2, size), skb1->data + VLAN_ID_LEN, size); | 1930 | skb_copy_from_linear_data_offset(skb1, VLAN_ID_LEN, |
1931 | skb_push(skb2, size), size); | ||
1932 | } else { | 1932 | } else { |
1933 | u16 checksum = le16_to_cpu(ib_ip_rsp_ptr->checksum); | 1933 | u16 checksum = le16_to_cpu(ib_ip_rsp_ptr->checksum); |
1934 | if (checksum & | 1934 | if (checksum & |
@@ -1946,7 +1946,6 @@ static void ql_process_macip_rx_intr(struct ql3_adapter *qdev, | |||
1946 | skb2->ip_summed = CHECKSUM_UNNECESSARY; | 1946 | skb2->ip_summed = CHECKSUM_UNNECESSARY; |
1947 | } | 1947 | } |
1948 | } | 1948 | } |
1949 | skb2->dev = qdev->ndev; | ||
1950 | skb2->protocol = eth_type_trans(skb2, qdev->ndev); | 1949 | skb2->protocol = eth_type_trans(skb2, qdev->ndev); |
1951 | 1950 | ||
1952 | netif_receive_skb(skb2); | 1951 | netif_receive_skb(skb2); |
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c index 6a77b8a92245..45876a854f00 100644 --- a/drivers/net/r8169.c +++ b/drivers/net/r8169.c | |||
@@ -2284,7 +2284,7 @@ static inline u32 rtl8169_tso_csum(struct sk_buff *skb, struct net_device *dev) | |||
2284 | return LargeSend | ((mss & MSSMask) << MSSShift); | 2284 | return LargeSend | ((mss & MSSMask) << MSSShift); |
2285 | } | 2285 | } |
2286 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | 2286 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
2287 | const struct iphdr *ip = skb->nh.iph; | 2287 | const struct iphdr *ip = ip_hdr(skb); |
2288 | 2288 | ||
2289 | if (ip->protocol == IPPROTO_TCP) | 2289 | if (ip->protocol == IPPROTO_TCP) |
2290 | return IPCS | TCPCS; | 2290 | return IPCS | TCPCS; |
@@ -2586,7 +2586,6 @@ rtl8169_rx_interrupt(struct net_device *dev, struct rtl8169_private *tp, | |||
2586 | pci_action(tp->pci_dev, le64_to_cpu(desc->addr), | 2586 | pci_action(tp->pci_dev, le64_to_cpu(desc->addr), |
2587 | tp->rx_buf_sz, PCI_DMA_FROMDEVICE); | 2587 | tp->rx_buf_sz, PCI_DMA_FROMDEVICE); |
2588 | 2588 | ||
2589 | skb->dev = dev; | ||
2590 | skb_put(skb, pkt_size); | 2589 | skb_put(skb, pkt_size); |
2591 | skb->protocol = eth_type_trans(skb, dev); | 2590 | skb->protocol = eth_type_trans(skb, dev); |
2592 | 2591 | ||
diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c index b7ff484af3e1..df6b73872fdb 100644 --- a/drivers/net/rionet.c +++ b/drivers/net/rionet.c | |||
@@ -115,7 +115,6 @@ static int rionet_rx_clean(struct net_device *ndev) | |||
115 | 115 | ||
116 | rnet->rx_skb[i]->data = data; | 116 | rnet->rx_skb[i]->data = data; |
117 | skb_put(rnet->rx_skb[i], RIO_MAX_MSG_SIZE); | 117 | skb_put(rnet->rx_skb[i], RIO_MAX_MSG_SIZE); |
118 | rnet->rx_skb[i]->dev = ndev; | ||
119 | rnet->rx_skb[i]->protocol = | 118 | rnet->rx_skb[i]->protocol = |
120 | eth_type_trans(rnet->rx_skb[i], ndev); | 119 | eth_type_trans(rnet->rx_skb[i], ndev); |
121 | error = netif_rx(rnet->rx_skb[i]); | 120 | error = netif_rx(rnet->rx_skb[i]); |
diff --git a/drivers/net/rrunner.c b/drivers/net/rrunner.c index d81536f90df6..25c73d47daad 100644 --- a/drivers/net/rrunner.c +++ b/drivers/net/rrunner.c | |||
@@ -1029,7 +1029,6 @@ static void rx_int(struct net_device *dev, u32 rxlimit, u32 index) | |||
1029 | goto defer; | 1029 | goto defer; |
1030 | } | 1030 | } |
1031 | } | 1031 | } |
1032 | skb->dev = dev; | ||
1033 | skb->protocol = hippi_type_trans(skb, dev); | 1032 | skb->protocol = hippi_type_trans(skb, dev); |
1034 | 1033 | ||
1035 | netif_rx(skb); /* send it up */ | 1034 | netif_rx(skb); /* send it up */ |
@@ -1452,7 +1451,7 @@ static int rr_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1452 | } | 1451 | } |
1453 | skb_reserve(new_skb, 8); | 1452 | skb_reserve(new_skb, 8); |
1454 | skb_put(new_skb, len); | 1453 | skb_put(new_skb, len); |
1455 | memcpy(new_skb->data, skb->data, len); | 1454 | skb_copy_from_linear_data(skb, new_skb->data, len); |
1456 | dev_kfree_skb(skb); | 1455 | dev_kfree_skb(skb); |
1457 | skb = new_skb; | 1456 | skb = new_skb; |
1458 | } | 1457 | } |
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c index 46ebf141ee5a..600d3ff347fc 100644 --- a/drivers/net/s2io.c +++ b/drivers/net/s2io.c | |||
@@ -2195,7 +2195,7 @@ static int fill_rxd_3buf(struct s2io_nic *nic, struct RxD_t *rxdp, struct \ | |||
2195 | frag_list->next = NULL; | 2195 | frag_list->next = NULL; |
2196 | tmp = (void *)ALIGN((long)frag_list->data, ALIGN_SIZE + 1); | 2196 | tmp = (void *)ALIGN((long)frag_list->data, ALIGN_SIZE + 1); |
2197 | frag_list->data = tmp; | 2197 | frag_list->data = tmp; |
2198 | frag_list->tail = tmp; | 2198 | skb_reset_tail_pointer(frag_list); |
2199 | 2199 | ||
2200 | /* Buffer-2 receives L4 data payload */ | 2200 | /* Buffer-2 receives L4 data payload */ |
2201 | ((struct RxD3*)rxdp)->Buffer2_ptr = pci_map_single(nic->pdev, | 2201 | ((struct RxD3*)rxdp)->Buffer2_ptr = pci_map_single(nic->pdev, |
@@ -2349,7 +2349,7 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no) | |||
2349 | tmp += ALIGN_SIZE; | 2349 | tmp += ALIGN_SIZE; |
2350 | tmp &= ~ALIGN_SIZE; | 2350 | tmp &= ~ALIGN_SIZE; |
2351 | skb->data = (void *) (unsigned long)tmp; | 2351 | skb->data = (void *) (unsigned long)tmp; |
2352 | skb->tail = (void *) (unsigned long)tmp; | 2352 | skb_reset_tail_pointer(skb); |
2353 | 2353 | ||
2354 | if (!(((struct RxD3*)rxdp)->Buffer0_ptr)) | 2354 | if (!(((struct RxD3*)rxdp)->Buffer0_ptr)) |
2355 | ((struct RxD3*)rxdp)->Buffer0_ptr = | 2355 | ((struct RxD3*)rxdp)->Buffer0_ptr = |
diff --git a/drivers/net/saa9730.c b/drivers/net/saa9730.c index 143958f1ef0a..ad94358ece89 100644 --- a/drivers/net/saa9730.c +++ b/drivers/net/saa9730.c | |||
@@ -688,7 +688,6 @@ static int lan_saa9730_rx(struct net_device *dev) | |||
688 | } else { | 688 | } else { |
689 | lp->stats.rx_bytes += len; | 689 | lp->stats.rx_bytes += len; |
690 | lp->stats.rx_packets++; | 690 | lp->stats.rx_packets++; |
691 | skb->dev = dev; | ||
692 | skb_reserve(skb, 2); /* 16 byte align */ | 691 | skb_reserve(skb, 2); /* 16 byte align */ |
693 | skb_put(skb, len); /* make room */ | 692 | skb_put(skb, len); /* make room */ |
694 | eth_copy_and_sum(skb, | 693 | eth_copy_and_sum(skb, |
diff --git a/drivers/net/sb1000.c b/drivers/net/sb1000.c index b9fa4fbb1398..1de3eec1a792 100644 --- a/drivers/net/sb1000.c +++ b/drivers/net/sb1000.c | |||
@@ -834,7 +834,7 @@ printk("cm0: IP identification: %02x%02x fragment offset: %02x%02x\n", buffer[3 | |||
834 | goto dropped_frame; | 834 | goto dropped_frame; |
835 | } | 835 | } |
836 | skb->dev = dev; | 836 | skb->dev = dev; |
837 | skb->mac.raw = skb->data; | 837 | skb_reset_mac_header(skb); |
838 | skb->protocol = (unsigned short) buffer[NewDatagramHeaderSkip + 16]; | 838 | skb->protocol = (unsigned short) buffer[NewDatagramHeaderSkip + 16]; |
839 | insw(ioaddr, skb_put(skb, NewDatagramDataSize), | 839 | insw(ioaddr, skb_put(skb, NewDatagramDataSize), |
840 | NewDatagramDataSize / 2); | 840 | NewDatagramDataSize / 2); |
diff --git a/drivers/net/sb1250-mac.c b/drivers/net/sb1250-mac.c index 103c3174ab54..0a3a379b634c 100644 --- a/drivers/net/sb1250-mac.c +++ b/drivers/net/sb1250-mac.c | |||
@@ -933,9 +933,6 @@ static int sbdma_add_rcvbuffer(sbmacdma_t *d,struct sk_buff *sb) | |||
933 | } | 933 | } |
934 | 934 | ||
935 | sbdma_align_skb(sb_new, SMP_CACHE_BYTES, ETHER_ALIGN); | 935 | sbdma_align_skb(sb_new, SMP_CACHE_BYTES, ETHER_ALIGN); |
936 | |||
937 | /* mark skbuff owned by our device */ | ||
938 | sb_new->dev = d->sbdma_eth->sbm_dev; | ||
939 | } | 936 | } |
940 | else { | 937 | else { |
941 | sb_new = sb; | 938 | sb_new = sb; |
diff --git a/drivers/net/sc92031.c b/drivers/net/sc92031.c index c32c21af3fdd..5b7284c955dc 100644 --- a/drivers/net/sc92031.c +++ b/drivers/net/sc92031.c | |||
@@ -814,7 +814,6 @@ static void _sc92031_rx_tasklet(struct net_device *dev) | |||
814 | memcpy(skb_put(skb, pkt_size), rx_ring + rx_ring_offset, pkt_size); | 814 | memcpy(skb_put(skb, pkt_size), rx_ring + rx_ring_offset, pkt_size); |
815 | } | 815 | } |
816 | 816 | ||
817 | skb->dev = dev; | ||
818 | skb->protocol = eth_type_trans(skb, dev); | 817 | skb->protocol = eth_type_trans(skb, dev); |
819 | dev->last_rx = jiffies; | 818 | dev->last_rx = jiffies; |
820 | netif_rx(skb); | 819 | netif_rx(skb); |
diff --git a/drivers/net/seeq8005.c b/drivers/net/seeq8005.c index 0d6c95c7aedf..4bce7c4f373c 100644 --- a/drivers/net/seeq8005.c +++ b/drivers/net/seeq8005.c | |||
@@ -550,7 +550,6 @@ static void seeq8005_rx(struct net_device *dev) | |||
550 | lp->stats.rx_dropped++; | 550 | lp->stats.rx_dropped++; |
551 | break; | 551 | break; |
552 | } | 552 | } |
553 | skb->dev = dev; | ||
554 | skb_reserve(skb, 2); /* align data on 16 byte */ | 553 | skb_reserve(skb, 2); /* align data on 16 byte */ |
555 | buf = skb_put(skb,pkt_len); | 554 | buf = skb_put(skb,pkt_len); |
556 | 555 | ||
diff --git a/drivers/net/sgiseeq.c b/drivers/net/sgiseeq.c index 52ed522a234c..d8c9c5d66d4f 100644 --- a/drivers/net/sgiseeq.c +++ b/drivers/net/sgiseeq.c | |||
@@ -318,7 +318,6 @@ static inline void sgiseeq_rx(struct net_device *dev, struct sgiseeq_private *sp | |||
318 | skb = dev_alloc_skb(len + 2); | 318 | skb = dev_alloc_skb(len + 2); |
319 | 319 | ||
320 | if (skb) { | 320 | if (skb) { |
321 | skb->dev = dev; | ||
322 | skb_reserve(skb, 2); | 321 | skb_reserve(skb, 2); |
323 | skb_put(skb, len); | 322 | skb_put(skb, len); |
324 | 323 | ||
@@ -535,7 +534,7 @@ static int sgiseeq_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
535 | * entry and the HPC got to the end of the chain before we | 534 | * entry and the HPC got to the end of the chain before we |
536 | * added this new entry and restarted it. | 535 | * added this new entry and restarted it. |
537 | */ | 536 | */ |
538 | memcpy((char *)(long)td->buf_vaddr, skb->data, skblen); | 537 | skb_copy_from_linear_data(skb, (char *)(long)td->buf_vaddr, skblen); |
539 | if (len != skblen) | 538 | if (len != skblen) |
540 | memset((char *)(long)td->buf_vaddr + skb->len, 0, len-skblen); | 539 | memset((char *)(long)td->buf_vaddr + skb->len, 0, len-skblen); |
541 | td->tdma.cntinfo = (len & HPCDMA_BCNT) | | 540 | td->tdma.cntinfo = (len & HPCDMA_BCNT) | |
diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c index 34463ce6f132..bc8de48da313 100644 --- a/drivers/net/sis190.c +++ b/drivers/net/sis190.c | |||
@@ -632,7 +632,6 @@ static int sis190_rx_interrupt(struct net_device *dev, | |||
632 | pci_action(tp->pci_dev, le32_to_cpu(desc->addr), | 632 | pci_action(tp->pci_dev, le32_to_cpu(desc->addr), |
633 | tp->rx_buf_sz, PCI_DMA_FROMDEVICE); | 633 | tp->rx_buf_sz, PCI_DMA_FROMDEVICE); |
634 | 634 | ||
635 | skb->dev = dev; | ||
636 | skb_put(skb, pkt_size); | 635 | skb_put(skb, pkt_size); |
637 | skb->protocol = eth_type_trans(skb, dev); | 636 | skb->protocol = eth_type_trans(skb, dev); |
638 | 637 | ||
diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c index b2a3b19d773a..dea0126723da 100644 --- a/drivers/net/sis900.c +++ b/drivers/net/sis900.c | |||
@@ -1160,7 +1160,6 @@ sis900_init_rx_ring(struct net_device *net_dev) | |||
1160 | buffer */ | 1160 | buffer */ |
1161 | break; | 1161 | break; |
1162 | } | 1162 | } |
1163 | skb->dev = net_dev; | ||
1164 | sis_priv->rx_skbuff[i] = skb; | 1163 | sis_priv->rx_skbuff[i] = skb; |
1165 | sis_priv->rx_ring[i].cmdsts = RX_BUF_SIZE; | 1164 | sis_priv->rx_ring[i].cmdsts = RX_BUF_SIZE; |
1166 | sis_priv->rx_ring[i].bufptr = pci_map_single(sis_priv->pci_dev, | 1165 | sis_priv->rx_ring[i].bufptr = pci_map_single(sis_priv->pci_dev, |
@@ -1800,7 +1799,6 @@ static int sis900_rx(struct net_device *net_dev) | |||
1800 | sis_priv->stats.rx_packets++; | 1799 | sis_priv->stats.rx_packets++; |
1801 | sis_priv->dirty_rx++; | 1800 | sis_priv->dirty_rx++; |
1802 | refill_rx_ring: | 1801 | refill_rx_ring: |
1803 | skb->dev = net_dev; | ||
1804 | sis_priv->rx_skbuff[entry] = skb; | 1802 | sis_priv->rx_skbuff[entry] = skb; |
1805 | sis_priv->rx_ring[entry].cmdsts = RX_BUF_SIZE; | 1803 | sis_priv->rx_ring[entry].cmdsts = RX_BUF_SIZE; |
1806 | sis_priv->rx_ring[entry].bufptr = | 1804 | sis_priv->rx_ring[entry].bufptr = |
@@ -1832,7 +1830,6 @@ refill_rx_ring: | |||
1832 | sis_priv->stats.rx_dropped++; | 1830 | sis_priv->stats.rx_dropped++; |
1833 | break; | 1831 | break; |
1834 | } | 1832 | } |
1835 | skb->dev = net_dev; | ||
1836 | sis_priv->rx_skbuff[entry] = skb; | 1833 | sis_priv->rx_skbuff[entry] = skb; |
1837 | sis_priv->rx_ring[entry].cmdsts = RX_BUF_SIZE; | 1834 | sis_priv->rx_ring[entry].cmdsts = RX_BUF_SIZE; |
1838 | sis_priv->rx_ring[entry].bufptr = | 1835 | sis_priv->rx_ring[entry].bufptr = |
diff --git a/drivers/net/sk98lin/skge.c b/drivers/net/sk98lin/skge.c index e94ab256b540..e0a93005e6dc 100644 --- a/drivers/net/sk98lin/skge.c +++ b/drivers/net/sk98lin/skge.c | |||
@@ -1562,10 +1562,10 @@ struct sk_buff *pMessage) /* pointer to send-message */ | |||
1562 | pTxd->pMBuf = pMessage; | 1562 | pTxd->pMBuf = pMessage; |
1563 | 1563 | ||
1564 | if (pMessage->ip_summed == CHECKSUM_PARTIAL) { | 1564 | if (pMessage->ip_summed == CHECKSUM_PARTIAL) { |
1565 | u16 hdrlen = pMessage->h.raw - pMessage->data; | 1565 | u16 hdrlen = skb_transport_offset(pMessage); |
1566 | u16 offset = hdrlen + pMessage->csum_offset; | 1566 | u16 offset = hdrlen + pMessage->csum_offset; |
1567 | 1567 | ||
1568 | if ((pMessage->h.ipiph->protocol == IPPROTO_UDP ) && | 1568 | if ((ipip_hdr(pMessage)->protocol == IPPROTO_UDP) && |
1569 | (pAC->GIni.GIChipRev == 0) && | 1569 | (pAC->GIni.GIChipRev == 0) && |
1570 | (pAC->GIni.GIChipId == CHIP_ID_YUKON)) { | 1570 | (pAC->GIni.GIChipId == CHIP_ID_YUKON)) { |
1571 | pTxd->TBControl = BMU_TCP_CHECK; | 1571 | pTxd->TBControl = BMU_TCP_CHECK; |
@@ -1681,7 +1681,7 @@ struct sk_buff *pMessage) /* pointer to send-message */ | |||
1681 | ** Does the HW need to evaluate checksum for TCP or UDP packets? | 1681 | ** Does the HW need to evaluate checksum for TCP or UDP packets? |
1682 | */ | 1682 | */ |
1683 | if (pMessage->ip_summed == CHECKSUM_PARTIAL) { | 1683 | if (pMessage->ip_summed == CHECKSUM_PARTIAL) { |
1684 | u16 hdrlen = pMessage->h.raw - pMessage->data; | 1684 | u16 hdrlen = skb_transport_offset(pMessage); |
1685 | u16 offset = hdrlen + pMessage->csum_offset; | 1685 | u16 offset = hdrlen + pMessage->csum_offset; |
1686 | 1686 | ||
1687 | Control = BMU_STFWD; | 1687 | Control = BMU_STFWD; |
@@ -1691,7 +1691,7 @@ struct sk_buff *pMessage) /* pointer to send-message */ | |||
1691 | ** opcode for udp is not working in the hardware yet | 1691 | ** opcode for udp is not working in the hardware yet |
1692 | ** (Revision 2.0) | 1692 | ** (Revision 2.0) |
1693 | */ | 1693 | */ |
1694 | if ((pMessage->h.ipiph->protocol == IPPROTO_UDP ) && | 1694 | if ((ipip_hdr(pMessage)->protocol == IPPROTO_UDP) && |
1695 | (pAC->GIni.GIChipRev == 0) && | 1695 | (pAC->GIni.GIChipRev == 0) && |
1696 | (pAC->GIni.GIChipId == CHIP_ID_YUKON)) { | 1696 | (pAC->GIni.GIChipId == CHIP_ID_YUKON)) { |
1697 | Control |= BMU_TCP_CHECK; | 1697 | Control |= BMU_TCP_CHECK; |
@@ -2127,7 +2127,7 @@ rx_start: | |||
2127 | (dma_addr_t) PhysAddr, | 2127 | (dma_addr_t) PhysAddr, |
2128 | FrameLength, | 2128 | FrameLength, |
2129 | PCI_DMA_FROMDEVICE); | 2129 | PCI_DMA_FROMDEVICE); |
2130 | memcpy(pNewMsg->data, pMsg, FrameLength); | 2130 | skb_copy_to_linear_data(pNewMsg, pMsg, FrameLength); |
2131 | 2131 | ||
2132 | pci_dma_sync_single_for_device(pAC->PciDev, | 2132 | pci_dma_sync_single_for_device(pAC->PciDev, |
2133 | (dma_addr_t) PhysAddr, | 2133 | (dma_addr_t) PhysAddr, |
@@ -2193,7 +2193,6 @@ rx_start: | |||
2193 | SK_PNMI_CNT_RX_OCTETS_DELIVERED(pAC, | 2193 | SK_PNMI_CNT_RX_OCTETS_DELIVERED(pAC, |
2194 | FrameLength, pRxPort->PortIndex); | 2194 | FrameLength, pRxPort->PortIndex); |
2195 | 2195 | ||
2196 | pMsg->dev = pAC->dev[pRxPort->PortIndex]; | ||
2197 | pMsg->protocol = eth_type_trans(pMsg, | 2196 | pMsg->protocol = eth_type_trans(pMsg, |
2198 | pAC->dev[pRxPort->PortIndex]); | 2197 | pAC->dev[pRxPort->PortIndex]); |
2199 | netif_rx(pMsg); | 2198 | netif_rx(pMsg); |
@@ -2246,7 +2245,6 @@ rx_start: | |||
2246 | (IFF_PROMISC | IFF_ALLMULTI)) != 0 || | 2245 | (IFF_PROMISC | IFF_ALLMULTI)) != 0 || |
2247 | (ForRlmt & SK_RLMT_RX_PROTOCOL) == | 2246 | (ForRlmt & SK_RLMT_RX_PROTOCOL) == |
2248 | SK_RLMT_RX_PROTOCOL) { | 2247 | SK_RLMT_RX_PROTOCOL) { |
2249 | pMsg->dev = pAC->dev[pRxPort->PortIndex]; | ||
2250 | pMsg->protocol = eth_type_trans(pMsg, | 2248 | pMsg->protocol = eth_type_trans(pMsg, |
2251 | pAC->dev[pRxPort->PortIndex]); | 2249 | pAC->dev[pRxPort->PortIndex]); |
2252 | netif_rx(pMsg); | 2250 | netif_rx(pMsg); |
diff --git a/drivers/net/skfp/skfddi.c b/drivers/net/skfp/skfddi.c index 9733a11c6146..a7ef6c8b7721 100644 --- a/drivers/net/skfp/skfddi.c +++ b/drivers/net/skfp/skfddi.c | |||
@@ -1680,7 +1680,6 @@ void mac_drv_rx_complete(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd, | |||
1680 | rxd->rxd_os.skb = NULL; | 1680 | rxd->rxd_os.skb = NULL; |
1681 | skb_trim(skb, len); | 1681 | skb_trim(skb, len); |
1682 | skb->protocol = fddi_type_trans(skb, bp->dev); | 1682 | skb->protocol = fddi_type_trans(skb, bp->dev); |
1683 | skb->dev = bp->dev; /* pass up device pointer */ | ||
1684 | 1683 | ||
1685 | netif_rx(skb); | 1684 | netif_rx(skb); |
1686 | bp->dev->last_rx = jiffies; | 1685 | bp->dev->last_rx = jiffies; |
@@ -1938,7 +1937,7 @@ int mac_drv_rx_init(struct s_smc *smc, int len, int fc, | |||
1938 | } | 1937 | } |
1939 | skb_reserve(skb, 3); | 1938 | skb_reserve(skb, 3); |
1940 | skb_put(skb, len); | 1939 | skb_put(skb, len); |
1941 | memcpy(skb->data, look_ahead, len); | 1940 | skb_copy_to_linear_data(skb, look_ahead, len); |
1942 | 1941 | ||
1943 | // deliver frame to system | 1942 | // deliver frame to system |
1944 | skb->protocol = fddi_type_trans(skb, smc->os.dev); | 1943 | skb->protocol = fddi_type_trans(skb, smc->os.dev); |
diff --git a/drivers/net/skge.c b/drivers/net/skge.c index d476a3cc2e94..f1a0e6c0fbdd 100644 --- a/drivers/net/skge.c +++ b/drivers/net/skge.c | |||
@@ -2654,12 +2654,12 @@ static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev) | |||
2654 | td->dma_hi = map >> 32; | 2654 | td->dma_hi = map >> 32; |
2655 | 2655 | ||
2656 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | 2656 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
2657 | int offset = skb->h.raw - skb->data; | 2657 | const int offset = skb_transport_offset(skb); |
2658 | 2658 | ||
2659 | /* This seems backwards, but it is what the sk98lin | 2659 | /* This seems backwards, but it is what the sk98lin |
2660 | * does. Looks like hardware is wrong? | 2660 | * does. Looks like hardware is wrong? |
2661 | */ | 2661 | */ |
2662 | if (skb->h.ipiph->protocol == IPPROTO_UDP | 2662 | if (ipip_hdr(skb)->protocol == IPPROTO_UDP |
2663 | && hw->chip_rev == 0 && hw->chip_id == CHIP_ID_YUKON) | 2663 | && hw->chip_rev == 0 && hw->chip_id == CHIP_ID_YUKON) |
2664 | control = BMU_TCP_CHECK; | 2664 | control = BMU_TCP_CHECK; |
2665 | else | 2665 | else |
@@ -2950,7 +2950,7 @@ static struct sk_buff *skge_rx_get(struct net_device *dev, | |||
2950 | pci_dma_sync_single_for_cpu(skge->hw->pdev, | 2950 | pci_dma_sync_single_for_cpu(skge->hw->pdev, |
2951 | pci_unmap_addr(e, mapaddr), | 2951 | pci_unmap_addr(e, mapaddr), |
2952 | len, PCI_DMA_FROMDEVICE); | 2952 | len, PCI_DMA_FROMDEVICE); |
2953 | memcpy(skb->data, e->skb->data, len); | 2953 | skb_copy_from_linear_data(e->skb, skb->data, len); |
2954 | pci_dma_sync_single_for_device(skge->hw->pdev, | 2954 | pci_dma_sync_single_for_device(skge->hw->pdev, |
2955 | pci_unmap_addr(e, mapaddr), | 2955 | pci_unmap_addr(e, mapaddr), |
2956 | len, PCI_DMA_FROMDEVICE); | 2956 | len, PCI_DMA_FROMDEVICE); |
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c index ac36152c68bf..238c2ca34da6 100644 --- a/drivers/net/sky2.c +++ b/drivers/net/sky2.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #include <linux/ethtool.h> | 32 | #include <linux/ethtool.h> |
33 | #include <linux/pci.h> | 33 | #include <linux/pci.h> |
34 | #include <linux/ip.h> | 34 | #include <linux/ip.h> |
35 | #include <net/ip.h> | ||
35 | #include <linux/tcp.h> | 36 | #include <linux/tcp.h> |
36 | #include <linux/in.h> | 37 | #include <linux/in.h> |
37 | #include <linux/delay.h> | 38 | #include <linux/delay.h> |
@@ -1391,8 +1392,8 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev) | |||
1391 | /* Check for TCP Segmentation Offload */ | 1392 | /* Check for TCP Segmentation Offload */ |
1392 | mss = skb_shinfo(skb)->gso_size; | 1393 | mss = skb_shinfo(skb)->gso_size; |
1393 | if (mss != 0) { | 1394 | if (mss != 0) { |
1394 | mss += ((skb->h.th->doff - 5) * 4); /* TCP options */ | 1395 | mss += tcp_optlen(skb); /* TCP options */ |
1395 | mss += (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr); | 1396 | mss += ip_hdrlen(skb) + sizeof(struct tcphdr); |
1396 | mss += ETH_HLEN; | 1397 | mss += ETH_HLEN; |
1397 | 1398 | ||
1398 | if (mss != sky2->tx_last_mss) { | 1399 | if (mss != sky2->tx_last_mss) { |
@@ -1420,14 +1421,14 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev) | |||
1420 | 1421 | ||
1421 | /* Handle TCP checksum offload */ | 1422 | /* Handle TCP checksum offload */ |
1422 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | 1423 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
1423 | unsigned offset = skb->h.raw - skb->data; | 1424 | const unsigned offset = skb_transport_offset(skb); |
1424 | u32 tcpsum; | 1425 | u32 tcpsum; |
1425 | 1426 | ||
1426 | tcpsum = offset << 16; /* sum start */ | 1427 | tcpsum = offset << 16; /* sum start */ |
1427 | tcpsum |= offset + skb->csum_offset; /* sum write */ | 1428 | tcpsum |= offset + skb->csum_offset; /* sum write */ |
1428 | 1429 | ||
1429 | ctrl = CALSUM | WR_SUM | INIT_SUM | LOCK_SUM; | 1430 | ctrl = CALSUM | WR_SUM | INIT_SUM | LOCK_SUM; |
1430 | if (skb->nh.iph->protocol == IPPROTO_UDP) | 1431 | if (ip_hdr(skb)->protocol == IPPROTO_UDP) |
1431 | ctrl |= UDPTCP; | 1432 | ctrl |= UDPTCP; |
1432 | 1433 | ||
1433 | if (tcpsum != sky2->tx_tcpsum) { | 1434 | if (tcpsum != sky2->tx_tcpsum) { |
@@ -1970,7 +1971,7 @@ static struct sk_buff *receive_copy(struct sky2_port *sky2, | |||
1970 | skb_reserve(skb, 2); | 1971 | skb_reserve(skb, 2); |
1971 | pci_dma_sync_single_for_cpu(sky2->hw->pdev, re->data_addr, | 1972 | pci_dma_sync_single_for_cpu(sky2->hw->pdev, re->data_addr, |
1972 | length, PCI_DMA_FROMDEVICE); | 1973 | length, PCI_DMA_FROMDEVICE); |
1973 | memcpy(skb->data, re->skb->data, length); | 1974 | skb_copy_from_linear_data(re->skb, skb->data, length); |
1974 | skb->ip_summed = re->skb->ip_summed; | 1975 | skb->ip_summed = re->skb->ip_summed; |
1975 | skb->csum = re->skb->csum; | 1976 | skb->csum = re->skb->csum; |
1976 | pci_dma_sync_single_for_device(sky2->hw->pdev, re->data_addr, | 1977 | pci_dma_sync_single_for_device(sky2->hw->pdev, re->data_addr, |
diff --git a/drivers/net/slip.c b/drivers/net/slip.c index 2f4b1de7a2b4..65bd20fac820 100644 --- a/drivers/net/slip.c +++ b/drivers/net/slip.c | |||
@@ -363,7 +363,7 @@ sl_bump(struct slip *sl) | |||
363 | } | 363 | } |
364 | skb->dev = sl->dev; | 364 | skb->dev = sl->dev; |
365 | memcpy(skb_put(skb,count), sl->rbuff, count); | 365 | memcpy(skb_put(skb,count), sl->rbuff, count); |
366 | skb->mac.raw=skb->data; | 366 | skb_reset_mac_header(skb); |
367 | skb->protocol=htons(ETH_P_IP); | 367 | skb->protocol=htons(ETH_P_IP); |
368 | netif_rx(skb); | 368 | netif_rx(skb); |
369 | sl->dev->last_rx = jiffies; | 369 | sl->dev->last_rx = jiffies; |
diff --git a/drivers/net/smc911x.c b/drivers/net/smc911x.c index c95614131980..8a2109a913b6 100644 --- a/drivers/net/smc911x.c +++ b/drivers/net/smc911x.c | |||
@@ -502,7 +502,6 @@ static inline void smc911x_rcv(struct net_device *dev) | |||
502 | DBG(SMC_DEBUG_PKTS, "%s: Received packet\n", dev->name,); | 502 | DBG(SMC_DEBUG_PKTS, "%s: Received packet\n", dev->name,); |
503 | PRINT_PKT(data, ((pkt_len - 4) <= 64) ? pkt_len - 4 : 64); | 503 | PRINT_PKT(data, ((pkt_len - 4) <= 64) ? pkt_len - 4 : 64); |
504 | dev->last_rx = jiffies; | 504 | dev->last_rx = jiffies; |
505 | skb->dev = dev; | ||
506 | skb->protocol = eth_type_trans(skb, dev); | 505 | skb->protocol = eth_type_trans(skb, dev); |
507 | netif_rx(skb); | 506 | netif_rx(skb); |
508 | lp->stats.rx_packets++; | 507 | lp->stats.rx_packets++; |
@@ -1307,7 +1306,6 @@ smc911x_rx_dma_irq(int dma, void *data) | |||
1307 | lp->current_rx_skb = NULL; | 1306 | lp->current_rx_skb = NULL; |
1308 | PRINT_PKT(skb->data, skb->len); | 1307 | PRINT_PKT(skb->data, skb->len); |
1309 | dev->last_rx = jiffies; | 1308 | dev->last_rx = jiffies; |
1310 | skb->dev = dev; | ||
1311 | skb->protocol = eth_type_trans(skb, dev); | 1309 | skb->protocol = eth_type_trans(skb, dev); |
1312 | netif_rx(skb); | 1310 | netif_rx(skb); |
1313 | lp->stats.rx_packets++; | 1311 | lp->stats.rx_packets++; |
diff --git a/drivers/net/smc9194.c b/drivers/net/smc9194.c index bd6e84506c29..36c1ebadbf20 100644 --- a/drivers/net/smc9194.c +++ b/drivers/net/smc9194.c | |||
@@ -1262,7 +1262,6 @@ static void smc_rcv(struct net_device *dev) | |||
1262 | 1262 | ||
1263 | skb_reserve( skb, 2 ); /* 16 bit alignment */ | 1263 | skb_reserve( skb, 2 ); /* 16 bit alignment */ |
1264 | 1264 | ||
1265 | skb->dev = dev; | ||
1266 | data = skb_put( skb, packet_length); | 1265 | data = skb_put( skb, packet_length); |
1267 | 1266 | ||
1268 | #ifdef USE_32_BIT | 1267 | #ifdef USE_32_BIT |
diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c index 49f4b7712ebf..01cc3c742c38 100644 --- a/drivers/net/smc91x.c +++ b/drivers/net/smc91x.c | |||
@@ -568,7 +568,6 @@ static inline void smc_rcv(struct net_device *dev) | |||
568 | PRINT_PKT(data, packet_len - 4); | 568 | PRINT_PKT(data, packet_len - 4); |
569 | 569 | ||
570 | dev->last_rx = jiffies; | 570 | dev->last_rx = jiffies; |
571 | skb->dev = dev; | ||
572 | skb->protocol = eth_type_trans(skb, dev); | 571 | skb->protocol = eth_type_trans(skb, dev); |
573 | netif_rx(skb); | 572 | netif_rx(skb); |
574 | lp->stats.rx_packets++; | 573 | lp->stats.rx_packets++; |
diff --git a/drivers/net/sonic.c b/drivers/net/sonic.c index ed7aa0a5acca..c6320c719931 100644 --- a/drivers/net/sonic.c +++ b/drivers/net/sonic.c | |||
@@ -85,7 +85,6 @@ static int sonic_open(struct net_device *dev) | |||
85 | dev->name); | 85 | dev->name); |
86 | return -ENOMEM; | 86 | return -ENOMEM; |
87 | } | 87 | } |
88 | skb->dev = dev; | ||
89 | /* align IP header unless DMA requires otherwise */ | 88 | /* align IP header unless DMA requires otherwise */ |
90 | if (SONIC_BUS_SCALE(lp->dma_bitmode) == 2) | 89 | if (SONIC_BUS_SCALE(lp->dma_bitmode) == 2) |
91 | skb_reserve(skb, 2); | 90 | skb_reserve(skb, 2); |
@@ -451,7 +450,6 @@ static void sonic_rx(struct net_device *dev) | |||
451 | lp->stats.rx_dropped++; | 450 | lp->stats.rx_dropped++; |
452 | break; | 451 | break; |
453 | } | 452 | } |
454 | new_skb->dev = dev; | ||
455 | /* provide 16 byte IP header alignment unless DMA requires otherwise */ | 453 | /* provide 16 byte IP header alignment unless DMA requires otherwise */ |
456 | if(SONIC_BUS_SCALE(lp->dma_bitmode) == 2) | 454 | if(SONIC_BUS_SCALE(lp->dma_bitmode) == 2) |
457 | skb_reserve(new_skb, 2); | 455 | skb_reserve(new_skb, 2); |
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c index e3019d52c30f..230da14b1b68 100644 --- a/drivers/net/spider_net.c +++ b/drivers/net/spider_net.c | |||
@@ -720,7 +720,7 @@ spider_net_prepare_tx_descr(struct spider_net_card *card, | |||
720 | spin_unlock_irqrestore(&chain->lock, flags); | 720 | spin_unlock_irqrestore(&chain->lock, flags); |
721 | 721 | ||
722 | if (skb->protocol == htons(ETH_P_IP) && skb->ip_summed == CHECKSUM_PARTIAL) | 722 | if (skb->protocol == htons(ETH_P_IP) && skb->ip_summed == CHECKSUM_PARTIAL) |
723 | switch (skb->nh.iph->protocol) { | 723 | switch (ip_hdr(skb)->protocol) { |
724 | case IPPROTO_TCP: | 724 | case IPPROTO_TCP: |
725 | hwdescr->dmac_cmd_status |= SPIDER_NET_DMAC_TCP; | 725 | hwdescr->dmac_cmd_status |= SPIDER_NET_DMAC_TCP; |
726 | break; | 726 | break; |
@@ -990,7 +990,6 @@ spider_net_pass_skb_up(struct spider_net_descr *descr, | |||
990 | netdev = card->netdev; | 990 | netdev = card->netdev; |
991 | 991 | ||
992 | skb = descr->skb; | 992 | skb = descr->skb; |
993 | skb->dev = netdev; | ||
994 | skb_put(skb, hwdescr->valid_size); | 993 | skb_put(skb, hwdescr->valid_size); |
995 | 994 | ||
996 | /* the card seems to add 2 bytes of junk in front | 995 | /* the card seems to add 2 bytes of junk in front |
diff --git a/drivers/net/starfire.c b/drivers/net/starfire.c index 8bba2e3da7e1..9d6e454a8f98 100644 --- a/drivers/net/starfire.c +++ b/drivers/net/starfire.c | |||
@@ -1452,7 +1452,6 @@ static int __netdev_rx(struct net_device *dev, int *quota) | |||
1452 | to a minimally-sized skbuff. */ | 1452 | to a minimally-sized skbuff. */ |
1453 | if (pkt_len < rx_copybreak | 1453 | if (pkt_len < rx_copybreak |
1454 | && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) { | 1454 | && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) { |
1455 | skb->dev = dev; | ||
1456 | skb_reserve(skb, 2); /* 16 byte align the IP header */ | 1455 | skb_reserve(skb, 2); /* 16 byte align the IP header */ |
1457 | pci_dma_sync_single_for_cpu(np->pci_dev, | 1456 | pci_dma_sync_single_for_cpu(np->pci_dev, |
1458 | np->rx_info[entry].mapping, | 1457 | np->rx_info[entry].mapping, |
diff --git a/drivers/net/sun3_82586.c b/drivers/net/sun3_82586.c index 4757aa647c7a..396c3d961f88 100644 --- a/drivers/net/sun3_82586.c +++ b/drivers/net/sun3_82586.c | |||
@@ -775,7 +775,6 @@ static void sun3_82586_rcv_int(struct net_device *dev) | |||
775 | skb = (struct sk_buff *) dev_alloc_skb(totlen+2); | 775 | skb = (struct sk_buff *) dev_alloc_skb(totlen+2); |
776 | if(skb != NULL) | 776 | if(skb != NULL) |
777 | { | 777 | { |
778 | skb->dev = dev; | ||
779 | skb_reserve(skb,2); | 778 | skb_reserve(skb,2); |
780 | skb_put(skb,totlen); | 779 | skb_put(skb,totlen); |
781 | eth_copy_and_sum(skb,(char *) p->base+swab32((unsigned long) rbd->buffer),totlen,0); | 780 | eth_copy_and_sum(skb,(char *) p->base+swab32((unsigned long) rbd->buffer),totlen,0); |
@@ -1027,7 +1026,7 @@ static int sun3_82586_send_packet(struct sk_buff *skb, struct net_device *dev) | |||
1027 | memset((char *)p->xmit_cbuffs[p->xmit_count], 0, ETH_ZLEN); | 1026 | memset((char *)p->xmit_cbuffs[p->xmit_count], 0, ETH_ZLEN); |
1028 | len = ETH_ZLEN; | 1027 | len = ETH_ZLEN; |
1029 | } | 1028 | } |
1030 | memcpy((char *)p->xmit_cbuffs[p->xmit_count],(char *)(skb->data),skb->len); | 1029 | skb_copy_from_linear_data(skb, p->xmit_cbuffs[p->xmit_count], skb->len); |
1031 | 1030 | ||
1032 | #if (NUM_XMIT_BUFFS == 1) | 1031 | #if (NUM_XMIT_BUFFS == 1) |
1033 | # ifdef NO_NOPCOMMANDS | 1032 | # ifdef NO_NOPCOMMANDS |
diff --git a/drivers/net/sun3lance.c b/drivers/net/sun3lance.c index 7bee45b42a2c..791e081fdc15 100644 --- a/drivers/net/sun3lance.c +++ b/drivers/net/sun3lance.c | |||
@@ -629,7 +629,7 @@ static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev ) | |||
629 | head->length = (-len) | 0xf000; | 629 | head->length = (-len) | 0xf000; |
630 | head->misc = 0; | 630 | head->misc = 0; |
631 | 631 | ||
632 | memcpy( PKTBUF_ADDR(head), (void *)skb->data, skb->len ); | 632 | skb_copy_from_linear_data(skb, PKTBUF_ADDR(head), skb->len); |
633 | if (len != skb->len) | 633 | if (len != skb->len) |
634 | memset(PKTBUF_ADDR(head) + skb->len, 0, len-skb->len); | 634 | memset(PKTBUF_ADDR(head) + skb->len, 0, len-skb->len); |
635 | 635 | ||
@@ -851,10 +851,9 @@ static int lance_rx( struct net_device *dev ) | |||
851 | } | 851 | } |
852 | 852 | ||
853 | 853 | ||
854 | skb->dev = dev; | ||
855 | skb_reserve( skb, 2 ); /* 16 byte align */ | 854 | skb_reserve( skb, 2 ); /* 16 byte align */ |
856 | skb_put( skb, pkt_len ); /* Make room */ | 855 | skb_put( skb, pkt_len ); /* Make room */ |
857 | // memcpy( skb->data, PKTBUF_ADDR(head), pkt_len ); | 856 | // skb_copy_to_linear_data(skb, PKTBUF_ADDR(head), pkt_len); |
858 | eth_copy_and_sum(skb, | 857 | eth_copy_and_sum(skb, |
859 | PKTBUF_ADDR(head), | 858 | PKTBUF_ADDR(head), |
860 | pkt_len, 0); | 859 | pkt_len, 0); |
diff --git a/drivers/net/sunbmac.c b/drivers/net/sunbmac.c index 18f88853e1e5..2ad8d58dee3b 100644 --- a/drivers/net/sunbmac.c +++ b/drivers/net/sunbmac.c | |||
@@ -855,7 +855,6 @@ static void bigmac_rx(struct bigmac *bp) | |||
855 | drops++; | 855 | drops++; |
856 | goto drop_it; | 856 | goto drop_it; |
857 | } | 857 | } |
858 | copy_skb->dev = bp->dev; | ||
859 | skb_reserve(copy_skb, 2); | 858 | skb_reserve(copy_skb, 2); |
860 | skb_put(copy_skb, len); | 859 | skb_put(copy_skb, len); |
861 | sbus_dma_sync_single_for_cpu(bp->bigmac_sdev, | 860 | sbus_dma_sync_single_for_cpu(bp->bigmac_sdev, |
diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c index c06ecc8002b9..f51ba31970aa 100644 --- a/drivers/net/sundance.c +++ b/drivers/net/sundance.c | |||
@@ -1308,7 +1308,6 @@ static void rx_poll(unsigned long data) | |||
1308 | to a minimally-sized skbuff. */ | 1308 | to a minimally-sized skbuff. */ |
1309 | if (pkt_len < rx_copybreak | 1309 | if (pkt_len < rx_copybreak |
1310 | && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) { | 1310 | && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) { |
1311 | skb->dev = dev; | ||
1312 | skb_reserve(skb, 2); /* 16 byte align the IP header */ | 1311 | skb_reserve(skb, 2); /* 16 byte align the IP header */ |
1313 | pci_dma_sync_single_for_cpu(np->pci_dev, | 1312 | pci_dma_sync_single_for_cpu(np->pci_dev, |
1314 | desc->frag[0].addr, | 1313 | desc->frag[0].addr, |
diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c index 08ea61db46fe..5da73212ac91 100644 --- a/drivers/net/sungem.c +++ b/drivers/net/sungem.c | |||
@@ -64,11 +64,9 @@ | |||
64 | #include <asm/uaccess.h> | 64 | #include <asm/uaccess.h> |
65 | #include <asm/irq.h> | 65 | #include <asm/irq.h> |
66 | 66 | ||
67 | #ifdef __sparc__ | 67 | #ifdef CONFIG_SPARC |
68 | #include <asm/idprom.h> | 68 | #include <asm/idprom.h> |
69 | #include <asm/openprom.h> | 69 | #include <asm/prom.h> |
70 | #include <asm/oplib.h> | ||
71 | #include <asm/pbm.h> | ||
72 | #endif | 70 | #endif |
73 | 71 | ||
74 | #ifdef CONFIG_PPC_PMAC | 72 | #ifdef CONFIG_PPC_PMAC |
@@ -845,11 +843,10 @@ static int gem_rx(struct gem *gp, int work_to_do) | |||
845 | goto drop_it; | 843 | goto drop_it; |
846 | } | 844 | } |
847 | 845 | ||
848 | copy_skb->dev = gp->dev; | ||
849 | skb_reserve(copy_skb, 2); | 846 | skb_reserve(copy_skb, 2); |
850 | skb_put(copy_skb, len); | 847 | skb_put(copy_skb, len); |
851 | pci_dma_sync_single_for_cpu(gp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); | 848 | pci_dma_sync_single_for_cpu(gp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); |
852 | memcpy(copy_skb->data, skb->data, len); | 849 | skb_copy_from_linear_data(skb, copy_skb->data, len); |
853 | pci_dma_sync_single_for_device(gp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); | 850 | pci_dma_sync_single_for_device(gp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); |
854 | 851 | ||
855 | /* We'll reuse the original ring buffer. */ | 852 | /* We'll reuse the original ring buffer. */ |
@@ -1029,10 +1026,8 @@ static int gem_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1029 | 1026 | ||
1030 | ctrl = 0; | 1027 | ctrl = 0; |
1031 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | 1028 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
1032 | u64 csum_start_off, csum_stuff_off; | 1029 | const u64 csum_start_off = skb_transport_offset(skb); |
1033 | 1030 | const u64 csum_stuff_off = csum_start_off + skb->csum_offset; | |
1034 | csum_start_off = (u64) (skb->h.raw - skb->data); | ||
1035 | csum_stuff_off = csum_start_off + skb->csum_offset; | ||
1036 | 1031 | ||
1037 | ctrl = (TXDCTRL_CENAB | | 1032 | ctrl = (TXDCTRL_CENAB | |
1038 | (csum_start_off << 15) | | 1033 | (csum_start_off << 15) | |
@@ -2849,7 +2844,7 @@ static int gem_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |||
2849 | return rc; | 2844 | return rc; |
2850 | } | 2845 | } |
2851 | 2846 | ||
2852 | #if (!defined(__sparc__) && !defined(CONFIG_PPC_PMAC)) | 2847 | #if (!defined(CONFIG_SPARC) && !defined(CONFIG_PPC_PMAC)) |
2853 | /* Fetch MAC address from vital product data of PCI ROM. */ | 2848 | /* Fetch MAC address from vital product data of PCI ROM. */ |
2854 | static int find_eth_addr_in_vpd(void __iomem *rom_base, int len, unsigned char *dev_addr) | 2849 | static int find_eth_addr_in_vpd(void __iomem *rom_base, int len, unsigned char *dev_addr) |
2855 | { | 2850 | { |
@@ -2904,36 +2899,19 @@ static void get_gem_mac_nonobp(struct pci_dev *pdev, unsigned char *dev_addr) | |||
2904 | 2899 | ||
2905 | static int __devinit gem_get_device_address(struct gem *gp) | 2900 | static int __devinit gem_get_device_address(struct gem *gp) |
2906 | { | 2901 | { |
2907 | #if defined(__sparc__) || defined(CONFIG_PPC_PMAC) | 2902 | #if defined(CONFIG_SPARC) || defined(CONFIG_PPC_PMAC) |
2908 | struct net_device *dev = gp->dev; | 2903 | struct net_device *dev = gp->dev; |
2909 | #endif | ||
2910 | |||
2911 | #if defined(__sparc__) | ||
2912 | struct pci_dev *pdev = gp->pdev; | ||
2913 | struct pcidev_cookie *pcp = pdev->sysdata; | ||
2914 | int use_idprom = 1; | ||
2915 | |||
2916 | if (pcp != NULL) { | ||
2917 | unsigned char *addr; | ||
2918 | int len; | ||
2919 | |||
2920 | addr = of_get_property(pcp->prom_node, "local-mac-address", | ||
2921 | &len); | ||
2922 | if (addr && len == 6) { | ||
2923 | use_idprom = 0; | ||
2924 | memcpy(dev->dev_addr, addr, 6); | ||
2925 | } | ||
2926 | } | ||
2927 | if (use_idprom) | ||
2928 | memcpy(dev->dev_addr, idprom->id_ethaddr, 6); | ||
2929 | #elif defined(CONFIG_PPC_PMAC) | ||
2930 | const unsigned char *addr; | 2904 | const unsigned char *addr; |
2931 | 2905 | ||
2932 | addr = get_property(gp->of_node, "local-mac-address", NULL); | 2906 | addr = get_property(gp->of_node, "local-mac-address", NULL); |
2933 | if (addr == NULL) { | 2907 | if (addr == NULL) { |
2908 | #ifdef CONFIG_SPARC | ||
2909 | addr = idprom->id_ethaddr; | ||
2910 | #else | ||
2934 | printk("\n"); | 2911 | printk("\n"); |
2935 | printk(KERN_ERR "%s: can't get mac-address\n", dev->name); | 2912 | printk(KERN_ERR "%s: can't get mac-address\n", dev->name); |
2936 | return -1; | 2913 | return -1; |
2914 | #endif | ||
2937 | } | 2915 | } |
2938 | memcpy(dev->dev_addr, addr, 6); | 2916 | memcpy(dev->dev_addr, addr, 6); |
2939 | #else | 2917 | #else |
@@ -3091,7 +3069,7 @@ static int __devinit gem_init_one(struct pci_dev *pdev, | |||
3091 | /* On Apple, we want a reference to the Open Firmware device-tree | 3069 | /* On Apple, we want a reference to the Open Firmware device-tree |
3092 | * node. We use it for clock control. | 3070 | * node. We use it for clock control. |
3093 | */ | 3071 | */ |
3094 | #ifdef CONFIG_PPC_PMAC | 3072 | #if defined(CONFIG_PPC_PMAC) || defined(CONFIG_SPARC) |
3095 | gp->of_node = pci_device_to_OF_node(pdev); | 3073 | gp->of_node = pci_device_to_OF_node(pdev); |
3096 | #endif | 3074 | #endif |
3097 | 3075 | ||
diff --git a/drivers/net/sungem.h b/drivers/net/sungem.h index a70067c85cc9..58cf87c5751e 100644 --- a/drivers/net/sungem.h +++ b/drivers/net/sungem.h | |||
@@ -1025,7 +1025,7 @@ struct gem { | |||
1025 | 1025 | ||
1026 | struct pci_dev *pdev; | 1026 | struct pci_dev *pdev; |
1027 | struct net_device *dev; | 1027 | struct net_device *dev; |
1028 | #ifdef CONFIG_PPC_PMAC | 1028 | #if defined(CONFIG_PPC_PMAC) || defined(CONFIG_SPARC) |
1029 | struct device_node *of_node; | 1029 | struct device_node *of_node; |
1030 | #endif | 1030 | #endif |
1031 | }; | 1031 | }; |
diff --git a/drivers/net/sunhme.c b/drivers/net/sunhme.c index 192bbc91c731..51c3fe2108a3 100644 --- a/drivers/net/sunhme.c +++ b/drivers/net/sunhme.c | |||
@@ -55,9 +55,6 @@ | |||
55 | 55 | ||
56 | #ifdef CONFIG_PCI | 56 | #ifdef CONFIG_PCI |
57 | #include <linux/pci.h> | 57 | #include <linux/pci.h> |
58 | #ifdef CONFIG_SPARC | ||
59 | #include <asm/pbm.h> | ||
60 | #endif | ||
61 | #endif | 58 | #endif |
62 | 59 | ||
63 | #include "sunhme.h" | 60 | #include "sunhme.h" |
@@ -2058,11 +2055,10 @@ static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev) | |||
2058 | goto drop_it; | 2055 | goto drop_it; |
2059 | } | 2056 | } |
2060 | 2057 | ||
2061 | copy_skb->dev = dev; | ||
2062 | skb_reserve(copy_skb, 2); | 2058 | skb_reserve(copy_skb, 2); |
2063 | skb_put(copy_skb, len); | 2059 | skb_put(copy_skb, len); |
2064 | hme_dma_sync_for_cpu(hp, dma_addr, len, DMA_FROMDEVICE); | 2060 | hme_dma_sync_for_cpu(hp, dma_addr, len, DMA_FROMDEVICE); |
2065 | memcpy(copy_skb->data, skb->data, len); | 2061 | skb_copy_from_linear_data(skb, copy_skb->data, len); |
2066 | hme_dma_sync_for_device(hp, dma_addr, len, DMA_FROMDEVICE); | 2062 | hme_dma_sync_for_device(hp, dma_addr, len, DMA_FROMDEVICE); |
2067 | 2063 | ||
2068 | /* Reuse original ring buffer. */ | 2064 | /* Reuse original ring buffer. */ |
@@ -2270,10 +2266,8 @@ static int happy_meal_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2270 | 2266 | ||
2271 | tx_flags = TXFLAG_OWN; | 2267 | tx_flags = TXFLAG_OWN; |
2272 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | 2268 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
2273 | u32 csum_start_off, csum_stuff_off; | 2269 | const u32 csum_start_off = skb_transport_offset(skb); |
2274 | 2270 | const u32 csum_stuff_off = csum_start_off + skb->csum_offset; | |
2275 | csum_start_off = (u32) (skb->h.raw - skb->data); | ||
2276 | csum_stuff_off = csum_start_off + skb->csum_offset; | ||
2277 | 2271 | ||
2278 | tx_flags = (TXFLAG_OWN | TXFLAG_CSENABLE | | 2272 | tx_flags = (TXFLAG_OWN | TXFLAG_CSENABLE | |
2279 | ((csum_start_off << 14) & TXFLAG_CSBUFBEGIN) | | 2273 | ((csum_start_off << 14) & TXFLAG_CSBUFBEGIN) | |
@@ -2704,7 +2698,7 @@ static int __devinit happy_meal_sbus_probe_one(struct sbus_dev *sdev, int is_qfe | |||
2704 | dev->dev_addr[i] = macaddr[i]; | 2698 | dev->dev_addr[i] = macaddr[i]; |
2705 | macaddr[5]++; | 2699 | macaddr[5]++; |
2706 | } else { | 2700 | } else { |
2707 | unsigned char *addr; | 2701 | const unsigned char *addr; |
2708 | int len; | 2702 | int len; |
2709 | 2703 | ||
2710 | addr = of_get_property(dp, "local-mac-address", &len); | 2704 | addr = of_get_property(dp, "local-mac-address", &len); |
@@ -2986,7 +2980,7 @@ static int __devinit happy_meal_pci_probe(struct pci_dev *pdev, | |||
2986 | { | 2980 | { |
2987 | struct quattro *qp = NULL; | 2981 | struct quattro *qp = NULL; |
2988 | #ifdef CONFIG_SPARC | 2982 | #ifdef CONFIG_SPARC |
2989 | struct pcidev_cookie *pcp; | 2983 | struct device_node *dp; |
2990 | #endif | 2984 | #endif |
2991 | struct happy_meal *hp; | 2985 | struct happy_meal *hp; |
2992 | struct net_device *dev; | 2986 | struct net_device *dev; |
@@ -2998,13 +2992,8 @@ static int __devinit happy_meal_pci_probe(struct pci_dev *pdev, | |||
2998 | 2992 | ||
2999 | /* Now make sure pci_dev cookie is there. */ | 2993 | /* Now make sure pci_dev cookie is there. */ |
3000 | #ifdef CONFIG_SPARC | 2994 | #ifdef CONFIG_SPARC |
3001 | pcp = pdev->sysdata; | 2995 | dp = pci_device_to_OF_node(pdev); |
3002 | if (pcp == NULL) { | 2996 | strcpy(prom_name, dp->name); |
3003 | printk(KERN_ERR "happymeal(PCI): Some PCI device info missing\n"); | ||
3004 | return -ENODEV; | ||
3005 | } | ||
3006 | |||
3007 | strcpy(prom_name, pcp->prom_node->name); | ||
3008 | #else | 2997 | #else |
3009 | if (is_quattro_p(pdev)) | 2998 | if (is_quattro_p(pdev)) |
3010 | strcpy(prom_name, "SUNW,qfe"); | 2999 | strcpy(prom_name, "SUNW,qfe"); |
@@ -3081,11 +3070,11 @@ static int __devinit happy_meal_pci_probe(struct pci_dev *pdev, | |||
3081 | macaddr[5]++; | 3070 | macaddr[5]++; |
3082 | } else { | 3071 | } else { |
3083 | #ifdef CONFIG_SPARC | 3072 | #ifdef CONFIG_SPARC |
3084 | unsigned char *addr; | 3073 | const unsigned char *addr; |
3085 | int len; | 3074 | int len; |
3086 | 3075 | ||
3087 | if (qfe_slot != -1 && | 3076 | if (qfe_slot != -1 && |
3088 | (addr = of_get_property(pcp->prom_node, | 3077 | (addr = of_get_property(dp, |
3089 | "local-mac-address", &len)) != NULL | 3078 | "local-mac-address", &len)) != NULL |
3090 | && len == 6) { | 3079 | && len == 6) { |
3091 | memcpy(dev->dev_addr, addr, 6); | 3080 | memcpy(dev->dev_addr, addr, 6); |
@@ -3105,7 +3094,7 @@ static int __devinit happy_meal_pci_probe(struct pci_dev *pdev, | |||
3105 | hp->tcvregs = (hpreg_base + 0x7000UL); | 3094 | hp->tcvregs = (hpreg_base + 0x7000UL); |
3106 | 3095 | ||
3107 | #ifdef CONFIG_SPARC | 3096 | #ifdef CONFIG_SPARC |
3108 | hp->hm_revision = of_getintprop_default(pcp->prom_node, "hm-rev", 0xff); | 3097 | hp->hm_revision = of_getintprop_default(dp, "hm-rev", 0xff); |
3109 | if (hp->hm_revision == 0xff) { | 3098 | if (hp->hm_revision == 0xff) { |
3110 | unsigned char prev; | 3099 | unsigned char prev; |
3111 | 3100 | ||
@@ -3300,7 +3289,7 @@ static int __devinit hme_sbus_probe(struct of_device *dev, const struct of_devic | |||
3300 | { | 3289 | { |
3301 | struct sbus_dev *sdev = to_sbus_device(&dev->dev); | 3290 | struct sbus_dev *sdev = to_sbus_device(&dev->dev); |
3302 | struct device_node *dp = dev->node; | 3291 | struct device_node *dp = dev->node; |
3303 | char *model = of_get_property(dp, "model", NULL); | 3292 | const char *model = of_get_property(dp, "model", NULL); |
3304 | int is_qfe = (match->data != NULL); | 3293 | int is_qfe = (match->data != NULL); |
3305 | 3294 | ||
3306 | if (!is_qfe && model && !strcmp(model, "SUNW,sbus-qfe")) | 3295 | if (!is_qfe && model && !strcmp(model, "SUNW,sbus-qfe")) |
diff --git a/drivers/net/sunlance.c b/drivers/net/sunlance.c index b0929a457b60..42722530ab24 100644 --- a/drivers/net/sunlance.c +++ b/drivers/net/sunlance.c | |||
@@ -547,7 +547,6 @@ static void lance_rx_dvma(struct net_device *dev) | |||
547 | 547 | ||
548 | lp->stats.rx_bytes += len; | 548 | lp->stats.rx_bytes += len; |
549 | 549 | ||
550 | skb->dev = dev; | ||
551 | skb_reserve(skb, 2); /* 16 byte align */ | 550 | skb_reserve(skb, 2); /* 16 byte align */ |
552 | skb_put(skb, len); /* make room */ | 551 | skb_put(skb, len); /* make room */ |
553 | eth_copy_and_sum(skb, | 552 | eth_copy_and_sum(skb, |
@@ -721,7 +720,6 @@ static void lance_rx_pio(struct net_device *dev) | |||
721 | 720 | ||
722 | lp->stats.rx_bytes += len; | 721 | lp->stats.rx_bytes += len; |
723 | 722 | ||
724 | skb->dev = dev; | ||
725 | skb_reserve (skb, 2); /* 16 byte align */ | 723 | skb_reserve (skb, 2); /* 16 byte align */ |
726 | skb_put(skb, len); /* make room */ | 724 | skb_put(skb, len); /* make room */ |
727 | lance_piocopy_to_skb(skb, &(ib->rx_buf[entry][0]), len); | 725 | lance_piocopy_to_skb(skb, &(ib->rx_buf[entry][0]), len); |
@@ -1145,7 +1143,7 @@ static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1145 | struct lance_init_block *ib = lp->init_block_mem; | 1143 | struct lance_init_block *ib = lp->init_block_mem; |
1146 | ib->btx_ring [entry].length = (-len) | 0xf000; | 1144 | ib->btx_ring [entry].length = (-len) | 0xf000; |
1147 | ib->btx_ring [entry].misc = 0; | 1145 | ib->btx_ring [entry].misc = 0; |
1148 | memcpy((char *)&ib->tx_buf [entry][0], skb->data, skblen); | 1146 | skb_copy_from_linear_data(skb, &ib->tx_buf [entry][0], skblen); |
1149 | if (len != skblen) | 1147 | if (len != skblen) |
1150 | memset((char *) &ib->tx_buf [entry][skblen], 0, len - skblen); | 1148 | memset((char *) &ib->tx_buf [entry][skblen], 0, len - skblen); |
1151 | ib->btx_ring [entry].tmd1_bits = (LE_T1_POK | LE_T1_OWN); | 1149 | ib->btx_ring [entry].tmd1_bits = (LE_T1_POK | LE_T1_OWN); |
diff --git a/drivers/net/sunqe.c b/drivers/net/sunqe.c index f3bad56d476a..fa70e0b78af7 100644 --- a/drivers/net/sunqe.c +++ b/drivers/net/sunqe.c | |||
@@ -437,7 +437,6 @@ static void qe_rx(struct sunqe *qep) | |||
437 | drops++; | 437 | drops++; |
438 | qep->net_stats.rx_dropped++; | 438 | qep->net_stats.rx_dropped++; |
439 | } else { | 439 | } else { |
440 | skb->dev = qep->dev; | ||
441 | skb_reserve(skb, 2); | 440 | skb_reserve(skb, 2); |
442 | skb_put(skb, len); | 441 | skb_put(skb, len); |
443 | eth_copy_and_sum(skb, (unsigned char *) this_qbuf, | 442 | eth_copy_and_sum(skb, (unsigned char *) this_qbuf, |
@@ -593,7 +592,7 @@ static int qe_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
593 | /* Avoid a race... */ | 592 | /* Avoid a race... */ |
594 | qep->qe_block->qe_txd[entry].tx_flags = TXD_UPDATE; | 593 | qep->qe_block->qe_txd[entry].tx_flags = TXD_UPDATE; |
595 | 594 | ||
596 | memcpy(txbuf, skb->data, len); | 595 | skb_copy_from_linear_data(skb, txbuf, len); |
597 | 596 | ||
598 | qep->qe_block->qe_txd[entry].tx_addr = txbuf_dvma; | 597 | qep->qe_block->qe_txd[entry].tx_addr = txbuf_dvma; |
599 | qep->qe_block->qe_txd[entry].tx_flags = | 598 | qep->qe_block->qe_txd[entry].tx_flags = |
diff --git a/drivers/net/tc35815.c b/drivers/net/tc35815.c index e3a7e3ceab77..d7741e23f8de 100644 --- a/drivers/net/tc35815.c +++ b/drivers/net/tc35815.c | |||
@@ -1145,7 +1145,6 @@ tc35815_rx(struct net_device *dev) | |||
1145 | break; | 1145 | break; |
1146 | } | 1146 | } |
1147 | skb_reserve(skb, 2); /* 16 bit alignment */ | 1147 | skb_reserve(skb, 2); /* 16 bit alignment */ |
1148 | skb->dev = dev; | ||
1149 | 1148 | ||
1150 | data = skb_put(skb, pkt_len); | 1149 | data = skb_put(skb, pkt_len); |
1151 | 1150 | ||
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index 256969e1300c..9488f49ea569 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c | |||
@@ -40,16 +40,16 @@ | |||
40 | #include <linux/dma-mapping.h> | 40 | #include <linux/dma-mapping.h> |
41 | 41 | ||
42 | #include <net/checksum.h> | 42 | #include <net/checksum.h> |
43 | #include <net/ip.h> | ||
43 | 44 | ||
44 | #include <asm/system.h> | 45 | #include <asm/system.h> |
45 | #include <asm/io.h> | 46 | #include <asm/io.h> |
46 | #include <asm/byteorder.h> | 47 | #include <asm/byteorder.h> |
47 | #include <asm/uaccess.h> | 48 | #include <asm/uaccess.h> |
48 | 49 | ||
49 | #ifdef CONFIG_SPARC64 | 50 | #ifdef CONFIG_SPARC |
50 | #include <asm/idprom.h> | 51 | #include <asm/idprom.h> |
51 | #include <asm/oplib.h> | 52 | #include <asm/prom.h> |
52 | #include <asm/pbm.h> | ||
53 | #endif | 53 | #endif |
54 | 54 | ||
55 | #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) | 55 | #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) |
@@ -3349,7 +3349,7 @@ static int tg3_rx(struct tg3 *tp, int budget) | |||
3349 | skb_reserve(copy_skb, 2); | 3349 | skb_reserve(copy_skb, 2); |
3350 | skb_put(copy_skb, len); | 3350 | skb_put(copy_skb, len); |
3351 | pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); | 3351 | pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); |
3352 | memcpy(copy_skb->data, skb->data, len); | 3352 | skb_copy_from_linear_data(skb, copy_skb->data, len); |
3353 | pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); | 3353 | pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); |
3354 | 3354 | ||
3355 | /* We'll reuse the original ring buffer. */ | 3355 | /* We'll reuse the original ring buffer. */ |
@@ -3908,20 +3908,20 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
3908 | if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) | 3908 | if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) |
3909 | mss |= (skb_headlen(skb) - ETH_HLEN) << 9; | 3909 | mss |= (skb_headlen(skb) - ETH_HLEN) << 9; |
3910 | else { | 3910 | else { |
3911 | tcp_opt_len = ((skb->h.th->doff - 5) * 4); | 3911 | struct iphdr *iph = ip_hdr(skb); |
3912 | ip_tcp_len = (skb->nh.iph->ihl * 4) + | 3912 | |
3913 | sizeof(struct tcphdr); | 3913 | tcp_opt_len = tcp_optlen(skb); |
3914 | ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr); | ||
3914 | 3915 | ||
3915 | skb->nh.iph->check = 0; | 3916 | iph->check = 0; |
3916 | skb->nh.iph->tot_len = htons(mss + ip_tcp_len + | 3917 | iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len); |
3917 | tcp_opt_len); | ||
3918 | mss |= (ip_tcp_len + tcp_opt_len) << 9; | 3918 | mss |= (ip_tcp_len + tcp_opt_len) << 9; |
3919 | } | 3919 | } |
3920 | 3920 | ||
3921 | base_flags |= (TXD_FLAG_CPU_PRE_DMA | | 3921 | base_flags |= (TXD_FLAG_CPU_PRE_DMA | |
3922 | TXD_FLAG_CPU_POST_DMA); | 3922 | TXD_FLAG_CPU_POST_DMA); |
3923 | 3923 | ||
3924 | skb->h.th->check = 0; | 3924 | tcp_hdr(skb)->check = 0; |
3925 | 3925 | ||
3926 | } | 3926 | } |
3927 | else if (skb->ip_summed == CHECKSUM_PARTIAL) | 3927 | else if (skb->ip_summed == CHECKSUM_PARTIAL) |
@@ -4055,6 +4055,7 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev) | |||
4055 | mss = 0; | 4055 | mss = 0; |
4056 | if (skb->len > (tp->dev->mtu + ETH_HLEN) && | 4056 | if (skb->len > (tp->dev->mtu + ETH_HLEN) && |
4057 | (mss = skb_shinfo(skb)->gso_size) != 0) { | 4057 | (mss = skb_shinfo(skb)->gso_size) != 0) { |
4058 | struct iphdr *iph; | ||
4058 | int tcp_opt_len, ip_tcp_len, hdr_len; | 4059 | int tcp_opt_len, ip_tcp_len, hdr_len; |
4059 | 4060 | ||
4060 | if (skb_header_cloned(skb) && | 4061 | if (skb_header_cloned(skb) && |
@@ -4063,8 +4064,8 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev) | |||
4063 | goto out_unlock; | 4064 | goto out_unlock; |
4064 | } | 4065 | } |
4065 | 4066 | ||
4066 | tcp_opt_len = ((skb->h.th->doff - 5) * 4); | 4067 | tcp_opt_len = tcp_optlen(skb); |
4067 | ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr); | 4068 | ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr); |
4068 | 4069 | ||
4069 | hdr_len = ip_tcp_len + tcp_opt_len; | 4070 | hdr_len = ip_tcp_len + tcp_opt_len; |
4070 | if (unlikely((ETH_HLEN + hdr_len) > 80) && | 4071 | if (unlikely((ETH_HLEN + hdr_len) > 80) && |
@@ -4074,34 +4075,31 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev) | |||
4074 | base_flags |= (TXD_FLAG_CPU_PRE_DMA | | 4075 | base_flags |= (TXD_FLAG_CPU_PRE_DMA | |
4075 | TXD_FLAG_CPU_POST_DMA); | 4076 | TXD_FLAG_CPU_POST_DMA); |
4076 | 4077 | ||
4077 | skb->nh.iph->check = 0; | 4078 | iph = ip_hdr(skb); |
4078 | skb->nh.iph->tot_len = htons(mss + hdr_len); | 4079 | iph->check = 0; |
4080 | iph->tot_len = htons(mss + hdr_len); | ||
4079 | if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) { | 4081 | if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) { |
4080 | skb->h.th->check = 0; | 4082 | tcp_hdr(skb)->check = 0; |
4081 | base_flags &= ~TXD_FLAG_TCPUDP_CSUM; | 4083 | base_flags &= ~TXD_FLAG_TCPUDP_CSUM; |
4082 | } | 4084 | } else |
4083 | else { | 4085 | tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, |
4084 | skb->h.th->check = | 4086 | iph->daddr, 0, |
4085 | ~csum_tcpudp_magic(skb->nh.iph->saddr, | 4087 | IPPROTO_TCP, |
4086 | skb->nh.iph->daddr, | 4088 | 0); |
4087 | 0, IPPROTO_TCP, 0); | ||
4088 | } | ||
4089 | 4089 | ||
4090 | if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) || | 4090 | if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) || |
4091 | (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) { | 4091 | (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) { |
4092 | if (tcp_opt_len || skb->nh.iph->ihl > 5) { | 4092 | if (tcp_opt_len || iph->ihl > 5) { |
4093 | int tsflags; | 4093 | int tsflags; |
4094 | 4094 | ||
4095 | tsflags = ((skb->nh.iph->ihl - 5) + | 4095 | tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2); |
4096 | (tcp_opt_len >> 2)); | ||
4097 | mss |= (tsflags << 11); | 4096 | mss |= (tsflags << 11); |
4098 | } | 4097 | } |
4099 | } else { | 4098 | } else { |
4100 | if (tcp_opt_len || skb->nh.iph->ihl > 5) { | 4099 | if (tcp_opt_len || iph->ihl > 5) { |
4101 | int tsflags; | 4100 | int tsflags; |
4102 | 4101 | ||
4103 | tsflags = ((skb->nh.iph->ihl - 5) + | 4102 | tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2); |
4104 | (tcp_opt_len >> 2)); | ||
4105 | base_flags |= tsflags << 12; | 4103 | base_flags |= tsflags << 12; |
4106 | } | 4104 | } |
4107 | } | 4105 | } |
@@ -10988,24 +10986,20 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) | |||
10988 | return err; | 10986 | return err; |
10989 | } | 10987 | } |
10990 | 10988 | ||
10991 | #ifdef CONFIG_SPARC64 | 10989 | #ifdef CONFIG_SPARC |
10992 | static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp) | 10990 | static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp) |
10993 | { | 10991 | { |
10994 | struct net_device *dev = tp->dev; | 10992 | struct net_device *dev = tp->dev; |
10995 | struct pci_dev *pdev = tp->pdev; | 10993 | struct pci_dev *pdev = tp->pdev; |
10996 | struct pcidev_cookie *pcp = pdev->sysdata; | 10994 | struct device_node *dp = pci_device_to_OF_node(pdev); |
10997 | 10995 | const unsigned char *addr; | |
10998 | if (pcp != NULL) { | 10996 | int len; |
10999 | unsigned char *addr; | 10997 | |
11000 | int len; | 10998 | addr = of_get_property(dp, "local-mac-address", &len); |
11001 | 10999 | if (addr && len == 6) { | |
11002 | addr = of_get_property(pcp->prom_node, "local-mac-address", | 11000 | memcpy(dev->dev_addr, addr, 6); |
11003 | &len); | 11001 | memcpy(dev->perm_addr, dev->dev_addr, 6); |
11004 | if (addr && len == 6) { | 11002 | return 0; |
11005 | memcpy(dev->dev_addr, addr, 6); | ||
11006 | memcpy(dev->perm_addr, dev->dev_addr, 6); | ||
11007 | return 0; | ||
11008 | } | ||
11009 | } | 11003 | } |
11010 | return -ENODEV; | 11004 | return -ENODEV; |
11011 | } | 11005 | } |
@@ -11026,7 +11020,7 @@ static int __devinit tg3_get_device_address(struct tg3 *tp) | |||
11026 | u32 hi, lo, mac_offset; | 11020 | u32 hi, lo, mac_offset; |
11027 | int addr_ok = 0; | 11021 | int addr_ok = 0; |
11028 | 11022 | ||
11029 | #ifdef CONFIG_SPARC64 | 11023 | #ifdef CONFIG_SPARC |
11030 | if (!tg3_get_macaddr_sparc(tp)) | 11024 | if (!tg3_get_macaddr_sparc(tp)) |
11031 | return 0; | 11025 | return 0; |
11032 | #endif | 11026 | #endif |
diff --git a/drivers/net/tlan.c b/drivers/net/tlan.c index f85f00251123..106dc1ef0acb 100644 --- a/drivers/net/tlan.c +++ b/drivers/net/tlan.c | |||
@@ -1112,7 +1112,7 @@ static int TLan_StartTx( struct sk_buff *skb, struct net_device *dev ) | |||
1112 | 1112 | ||
1113 | if ( bbuf ) { | 1113 | if ( bbuf ) { |
1114 | tail_buffer = priv->txBuffer + ( priv->txTail * TLAN_MAX_FRAME_SIZE ); | 1114 | tail_buffer = priv->txBuffer + ( priv->txTail * TLAN_MAX_FRAME_SIZE ); |
1115 | memcpy( tail_buffer, skb->data, skb->len ); | 1115 | skb_copy_from_linear_data(skb, tail_buffer, skb->len); |
1116 | } else { | 1116 | } else { |
1117 | tail_list->buffer[0].address = pci_map_single(priv->pciDev, skb->data, skb->len, PCI_DMA_TODEVICE); | 1117 | tail_list->buffer[0].address = pci_map_single(priv->pciDev, skb->data, skb->len, PCI_DMA_TODEVICE); |
1118 | TLan_StoreSKB(tail_list, skb); | 1118 | TLan_StoreSKB(tail_list, skb); |
@@ -1577,7 +1577,6 @@ u32 TLan_HandleRxEOF( struct net_device *dev, u16 host_int ) | |||
1577 | printk(KERN_INFO "TLAN: Couldn't allocate memory for received data.\n"); | 1577 | printk(KERN_INFO "TLAN: Couldn't allocate memory for received data.\n"); |
1578 | else { | 1578 | else { |
1579 | head_buffer = priv->rxBuffer + (priv->rxHead * TLAN_MAX_FRAME_SIZE); | 1579 | head_buffer = priv->rxBuffer + (priv->rxHead * TLAN_MAX_FRAME_SIZE); |
1580 | skb->dev = dev; | ||
1581 | skb_reserve(skb, 2); | 1580 | skb_reserve(skb, 2); |
1582 | t = (void *) skb_put(skb, frameSize); | 1581 | t = (void *) skb_put(skb, frameSize); |
1583 | 1582 | ||
@@ -1608,7 +1607,6 @@ u32 TLan_HandleRxEOF( struct net_device *dev, u16 host_int ) | |||
1608 | skb->protocol = eth_type_trans( skb, dev ); | 1607 | skb->protocol = eth_type_trans( skb, dev ); |
1609 | netif_rx( skb ); | 1608 | netif_rx( skb ); |
1610 | 1609 | ||
1611 | new_skb->dev = dev; | ||
1612 | skb_reserve( new_skb, 2 ); | 1610 | skb_reserve( new_skb, 2 ); |
1613 | t = (void *) skb_put( new_skb, TLAN_MAX_FRAME_SIZE ); | 1611 | t = (void *) skb_put( new_skb, TLAN_MAX_FRAME_SIZE ); |
1614 | head_list->buffer[0].address = pci_map_single(priv->pciDev, new_skb->data, TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE); | 1612 | head_list->buffer[0].address = pci_map_single(priv->pciDev, new_skb->data, TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE); |
diff --git a/drivers/net/tokenring/3c359.c b/drivers/net/tokenring/3c359.c index 7580bdeacadc..e22a3f5333ef 100644 --- a/drivers/net/tokenring/3c359.c +++ b/drivers/net/tokenring/3c359.c | |||
@@ -933,20 +933,21 @@ static void xl_rx(struct net_device *dev) | |||
933 | return ; | 933 | return ; |
934 | } | 934 | } |
935 | 935 | ||
936 | skb->dev = dev ; | ||
937 | |||
938 | while (xl_priv->rx_ring_tail != temp_ring_loc) { | 936 | while (xl_priv->rx_ring_tail != temp_ring_loc) { |
939 | copy_len = xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfraglen & 0x7FFF ; | 937 | copy_len = xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfraglen & 0x7FFF ; |
940 | frame_length -= copy_len ; | 938 | frame_length -= copy_len ; |
941 | pci_dma_sync_single_for_cpu(xl_priv->pdev,xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr,xl_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ; | 939 | pci_dma_sync_single_for_cpu(xl_priv->pdev,xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr,xl_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ; |
942 | memcpy(skb_put(skb,copy_len), xl_priv->rx_ring_skb[xl_priv->rx_ring_tail]->data, copy_len) ; | 940 | skb_copy_from_linear_data(xl_priv->rx_ring_skb[xl_priv->rx_ring_tail], |
941 | skb_put(skb, copy_len), | ||
942 | copy_len); | ||
943 | pci_dma_sync_single_for_device(xl_priv->pdev,xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr,xl_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ; | 943 | pci_dma_sync_single_for_device(xl_priv->pdev,xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr,xl_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ; |
944 | adv_rx_ring(dev) ; | 944 | adv_rx_ring(dev) ; |
945 | } | 945 | } |
946 | 946 | ||
947 | /* Now we have found the last fragment */ | 947 | /* Now we have found the last fragment */ |
948 | pci_dma_sync_single_for_cpu(xl_priv->pdev,xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr,xl_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ; | 948 | pci_dma_sync_single_for_cpu(xl_priv->pdev,xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr,xl_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ; |
949 | memcpy(skb_put(skb,copy_len), xl_priv->rx_ring_skb[xl_priv->rx_ring_tail]->data, frame_length) ; | 949 | skb_copy_from_linear_data(xl_priv->rx_ring_skb[xl_priv->rx_ring_tail], |
950 | skb_put(skb,copy_len), frame_length); | ||
950 | /* memcpy(skb_put(skb,frame_length), bus_to_virt(xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr), frame_length) ; */ | 951 | /* memcpy(skb_put(skb,frame_length), bus_to_virt(xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr), frame_length) ; */ |
951 | pci_dma_sync_single_for_device(xl_priv->pdev,xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr,xl_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ; | 952 | pci_dma_sync_single_for_device(xl_priv->pdev,xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr,xl_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ; |
952 | adv_rx_ring(dev) ; | 953 | adv_rx_ring(dev) ; |
@@ -967,8 +968,6 @@ static void xl_rx(struct net_device *dev) | |||
967 | return ; | 968 | return ; |
968 | } | 969 | } |
969 | 970 | ||
970 | skb->dev = dev ; | ||
971 | |||
972 | skb2 = xl_priv->rx_ring_skb[xl_priv->rx_ring_tail] ; | 971 | skb2 = xl_priv->rx_ring_skb[xl_priv->rx_ring_tail] ; |
973 | pci_unmap_single(xl_priv->pdev, xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr, xl_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ; | 972 | pci_unmap_single(xl_priv->pdev, xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr, xl_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ; |
974 | skb_put(skb2, frame_length) ; | 973 | skb_put(skb2, frame_length) ; |
diff --git a/drivers/net/tokenring/ibmtr.c b/drivers/net/tokenring/ibmtr.c index 01d55315ee8c..1e8958ee2d0a 100644 --- a/drivers/net/tokenring/ibmtr.c +++ b/drivers/net/tokenring/ibmtr.c | |||
@@ -1771,7 +1771,6 @@ static void tr_rx(struct net_device *dev) | |||
1771 | /*BMS again, if she comes in with few but leaves with many */ | 1771 | /*BMS again, if she comes in with few but leaves with many */ |
1772 | skb_reserve(skb, sizeof(struct trh_hdr) - lan_hdr_len); | 1772 | skb_reserve(skb, sizeof(struct trh_hdr) - lan_hdr_len); |
1773 | skb_put(skb, length); | 1773 | skb_put(skb, length); |
1774 | skb->dev = dev; | ||
1775 | data = skb->data; | 1774 | data = skb->data; |
1776 | rbuffer_len = ntohs(readw(rbuf + offsetof(struct rec_buf, buf_len))); | 1775 | rbuffer_len = ntohs(readw(rbuf + offsetof(struct rec_buf, buf_len))); |
1777 | rbufdata = rbuf + offsetof(struct rec_buf, data); | 1776 | rbufdata = rbuf + offsetof(struct rec_buf, data); |
diff --git a/drivers/net/tokenring/lanstreamer.c b/drivers/net/tokenring/lanstreamer.c index e999feb8c0bb..5d849c089a3b 100644 --- a/drivers/net/tokenring/lanstreamer.c +++ b/drivers/net/tokenring/lanstreamer.c | |||
@@ -944,8 +944,6 @@ static void streamer_rx(struct net_device *dev) | |||
944 | printk(KERN_WARNING "%s: Not enough memory to copy packet to upper layers. \n", dev->name); | 944 | printk(KERN_WARNING "%s: Not enough memory to copy packet to upper layers. \n", dev->name); |
945 | streamer_priv->streamer_stats.rx_dropped++; | 945 | streamer_priv->streamer_stats.rx_dropped++; |
946 | } else { /* we allocated an skb OK */ | 946 | } else { /* we allocated an skb OK */ |
947 | skb->dev = dev; | ||
948 | |||
949 | if (buffer_cnt == 1) { | 947 | if (buffer_cnt == 1) { |
950 | /* release the DMA mapping */ | 948 | /* release the DMA mapping */ |
951 | pci_unmap_single(streamer_priv->pci_dev, | 949 | pci_unmap_single(streamer_priv->pci_dev, |
@@ -1607,10 +1605,11 @@ static void streamer_arb_cmd(struct net_device *dev) | |||
1607 | frame_data, buffer_len); | 1605 | frame_data, buffer_len); |
1608 | } while (next_ptr && (buff_off = next_ptr)); | 1606 | } while (next_ptr && (buff_off = next_ptr)); |
1609 | 1607 | ||
1608 | mac_frame->protocol = tr_type_trans(mac_frame, dev); | ||
1610 | #if STREAMER_NETWORK_MONITOR | 1609 | #if STREAMER_NETWORK_MONITOR |
1611 | printk(KERN_WARNING "%s: Received MAC Frame, details: \n", | 1610 | printk(KERN_WARNING "%s: Received MAC Frame, details: \n", |
1612 | dev->name); | 1611 | dev->name); |
1613 | mac_hdr = (struct trh_hdr *) mac_frame->data; | 1612 | mac_hdr = tr_hdr(mac_frame); |
1614 | printk(KERN_WARNING | 1613 | printk(KERN_WARNING |
1615 | "%s: MAC Frame Dest. Addr: %02x:%02x:%02x:%02x:%02x:%02x \n", | 1614 | "%s: MAC Frame Dest. Addr: %02x:%02x:%02x:%02x:%02x:%02x \n", |
1616 | dev->name, mac_hdr->daddr[0], mac_hdr->daddr[1], | 1615 | dev->name, mac_hdr->daddr[0], mac_hdr->daddr[1], |
@@ -1622,8 +1621,6 @@ static void streamer_arb_cmd(struct net_device *dev) | |||
1622 | mac_hdr->saddr[2], mac_hdr->saddr[3], | 1621 | mac_hdr->saddr[2], mac_hdr->saddr[3], |
1623 | mac_hdr->saddr[4], mac_hdr->saddr[5]); | 1622 | mac_hdr->saddr[4], mac_hdr->saddr[5]); |
1624 | #endif | 1623 | #endif |
1625 | mac_frame->dev = dev; | ||
1626 | mac_frame->protocol = tr_type_trans(mac_frame, dev); | ||
1627 | netif_rx(mac_frame); | 1624 | netif_rx(mac_frame); |
1628 | 1625 | ||
1629 | /* Now tell the card we have dealt with the received frame */ | 1626 | /* Now tell the card we have dealt with the received frame */ |
diff --git a/drivers/net/tokenring/olympic.c b/drivers/net/tokenring/olympic.c index 8f4ecc1109cb..09b3cfb8e809 100644 --- a/drivers/net/tokenring/olympic.c +++ b/drivers/net/tokenring/olympic.c | |||
@@ -814,8 +814,6 @@ static void olympic_rx(struct net_device *dev) | |||
814 | olympic_priv->rx_ring_last_received += i ; | 814 | olympic_priv->rx_ring_last_received += i ; |
815 | olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1) ; | 815 | olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1) ; |
816 | } else { | 816 | } else { |
817 | skb->dev = dev ; | ||
818 | |||
819 | /* Optimise based upon number of buffers used. | 817 | /* Optimise based upon number of buffers used. |
820 | If only one buffer is used we can simply swap the buffers around. | 818 | If only one buffer is used we can simply swap the buffers around. |
821 | If more than one then we must use the new buffer and copy the information | 819 | If more than one then we must use the new buffer and copy the information |
@@ -847,7 +845,9 @@ static void olympic_rx(struct net_device *dev) | |||
847 | pci_dma_sync_single_for_cpu(olympic_priv->pdev, | 845 | pci_dma_sync_single_for_cpu(olympic_priv->pdev, |
848 | le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer), | 846 | le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer), |
849 | olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ; | 847 | olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ; |
850 | memcpy(skb_put(skb,length-4),olympic_priv->rx_ring_skb[rx_ring_last_received]->data,length-4) ; | 848 | skb_copy_from_linear_data(olympic_priv->rx_ring_skb[rx_ring_last_received], |
849 | skb_put(skb,length - 4), | ||
850 | length - 4); | ||
851 | pci_dma_sync_single_for_device(olympic_priv->pdev, | 851 | pci_dma_sync_single_for_device(olympic_priv->pdev, |
852 | le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer), | 852 | le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer), |
853 | olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ; | 853 | olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ; |
@@ -864,7 +864,9 @@ static void olympic_rx(struct net_device *dev) | |||
864 | olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ; | 864 | olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ; |
865 | rx_desc = &(olympic_priv->olympic_rx_ring[rx_ring_last_received]); | 865 | rx_desc = &(olympic_priv->olympic_rx_ring[rx_ring_last_received]); |
866 | cpy_length = (i == 1 ? frag_len : le32_to_cpu(rx_desc->res_length)); | 866 | cpy_length = (i == 1 ? frag_len : le32_to_cpu(rx_desc->res_length)); |
867 | memcpy(skb_put(skb, cpy_length), olympic_priv->rx_ring_skb[rx_ring_last_received]->data, cpy_length) ; | 867 | skb_copy_from_linear_data(olympic_priv->rx_ring_skb[rx_ring_last_received], |
868 | skb_put(skb, cpy_length), | ||
869 | cpy_length); | ||
868 | pci_dma_sync_single_for_device(olympic_priv->pdev, | 870 | pci_dma_sync_single_for_device(olympic_priv->pdev, |
869 | le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer), | 871 | le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer), |
870 | olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ; | 872 | olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ; |
@@ -1440,16 +1442,16 @@ static void olympic_arb_cmd(struct net_device *dev) | |||
1440 | next_ptr=readw(buf_ptr+offsetof(struct mac_receive_buffer,next)); | 1442 | next_ptr=readw(buf_ptr+offsetof(struct mac_receive_buffer,next)); |
1441 | } while (next_ptr && (buf_ptr=olympic_priv->olympic_lap + ntohs(next_ptr))); | 1443 | } while (next_ptr && (buf_ptr=olympic_priv->olympic_lap + ntohs(next_ptr))); |
1442 | 1444 | ||
1445 | mac_frame->protocol = tr_type_trans(mac_frame, dev); | ||
1446 | |||
1443 | if (olympic_priv->olympic_network_monitor) { | 1447 | if (olympic_priv->olympic_network_monitor) { |
1444 | struct trh_hdr *mac_hdr ; | 1448 | struct trh_hdr *mac_hdr ; |
1445 | printk(KERN_WARNING "%s: Received MAC Frame, details: \n",dev->name) ; | 1449 | printk(KERN_WARNING "%s: Received MAC Frame, details: \n",dev->name) ; |
1446 | mac_hdr = (struct trh_hdr *)mac_frame->data ; | 1450 | mac_hdr = tr_hdr(mac_frame); |
1447 | printk(KERN_WARNING "%s: MAC Frame Dest. Addr: %02x:%02x:%02x:%02x:%02x:%02x \n", dev->name , mac_hdr->daddr[0], mac_hdr->daddr[1], mac_hdr->daddr[2], mac_hdr->daddr[3], mac_hdr->daddr[4], mac_hdr->daddr[5]) ; | 1451 | printk(KERN_WARNING "%s: MAC Frame Dest. Addr: %02x:%02x:%02x:%02x:%02x:%02x \n", dev->name , mac_hdr->daddr[0], mac_hdr->daddr[1], mac_hdr->daddr[2], mac_hdr->daddr[3], mac_hdr->daddr[4], mac_hdr->daddr[5]) ; |
1448 | printk(KERN_WARNING "%s: MAC Frame Srce. Addr: %02x:%02x:%02x:%02x:%02x:%02x \n", dev->name , mac_hdr->saddr[0], mac_hdr->saddr[1], mac_hdr->saddr[2], mac_hdr->saddr[3], mac_hdr->saddr[4], mac_hdr->saddr[5]) ; | 1452 | printk(KERN_WARNING "%s: MAC Frame Srce. Addr: %02x:%02x:%02x:%02x:%02x:%02x \n", dev->name , mac_hdr->saddr[0], mac_hdr->saddr[1], mac_hdr->saddr[2], mac_hdr->saddr[3], mac_hdr->saddr[4], mac_hdr->saddr[5]) ; |
1449 | } | 1453 | } |
1450 | mac_frame->dev = dev ; | 1454 | netif_rx(mac_frame); |
1451 | mac_frame->protocol = tr_type_trans(mac_frame,dev); | ||
1452 | netif_rx(mac_frame) ; | ||
1453 | dev->last_rx = jiffies; | 1455 | dev->last_rx = jiffies; |
1454 | 1456 | ||
1455 | drop_frame: | 1457 | drop_frame: |
diff --git a/drivers/net/tokenring/smctr.c b/drivers/net/tokenring/smctr.c index cec282a6f62d..9bbea5c8acf4 100644 --- a/drivers/net/tokenring/smctr.c +++ b/drivers/net/tokenring/smctr.c | |||
@@ -3889,14 +3889,13 @@ static int smctr_process_rx_packet(MAC_HEADER *rmf, __u16 size, | |||
3889 | 3889 | ||
3890 | /* Slide data into a sleek skb. */ | 3890 | /* Slide data into a sleek skb. */ |
3891 | skb_put(skb, skb->len); | 3891 | skb_put(skb, skb->len); |
3892 | memcpy(skb->data, rmf, skb->len); | 3892 | skb_copy_to_linear_data(skb, rmf, skb->len); |
3893 | 3893 | ||
3894 | /* Update Counters */ | 3894 | /* Update Counters */ |
3895 | tp->MacStat.rx_packets++; | 3895 | tp->MacStat.rx_packets++; |
3896 | tp->MacStat.rx_bytes += skb->len; | 3896 | tp->MacStat.rx_bytes += skb->len; |
3897 | 3897 | ||
3898 | /* Kick the packet on up. */ | 3898 | /* Kick the packet on up. */ |
3899 | skb->dev = dev; | ||
3900 | skb->protocol = tr_type_trans(skb, dev); | 3899 | skb->protocol = tr_type_trans(skb, dev); |
3901 | netif_rx(skb); | 3900 | netif_rx(skb); |
3902 | dev->last_rx = jiffies; | 3901 | dev->last_rx = jiffies; |
@@ -4476,14 +4475,13 @@ static int smctr_rx_frame(struct net_device *dev) | |||
4476 | if (skb) { | 4475 | if (skb) { |
4477 | skb_put(skb, rx_size); | 4476 | skb_put(skb, rx_size); |
4478 | 4477 | ||
4479 | memcpy(skb->data, pbuff, rx_size); | 4478 | skb_copy_to_linear_data(skb, pbuff, rx_size); |
4480 | 4479 | ||
4481 | /* Update Counters */ | 4480 | /* Update Counters */ |
4482 | tp->MacStat.rx_packets++; | 4481 | tp->MacStat.rx_packets++; |
4483 | tp->MacStat.rx_bytes += skb->len; | 4482 | tp->MacStat.rx_bytes += skb->len; |
4484 | 4483 | ||
4485 | /* Kick the packet on up. */ | 4484 | /* Kick the packet on up. */ |
4486 | skb->dev = dev; | ||
4487 | skb->protocol = tr_type_trans(skb, dev); | 4485 | skb->protocol = tr_type_trans(skb, dev); |
4488 | netif_rx(skb); | 4486 | netif_rx(skb); |
4489 | dev->last_rx = jiffies; | 4487 | dev->last_rx = jiffies; |
diff --git a/drivers/net/tokenring/tms380tr.c b/drivers/net/tokenring/tms380tr.c index ea797ca2b988..12bd294045a7 100644 --- a/drivers/net/tokenring/tms380tr.c +++ b/drivers/net/tokenring/tms380tr.c | |||
@@ -644,7 +644,7 @@ static int tms380tr_hardware_send_packet(struct sk_buff *skb, struct net_device | |||
644 | dmabuf = 0; | 644 | dmabuf = 0; |
645 | i = tp->TplFree->TPLIndex; | 645 | i = tp->TplFree->TPLIndex; |
646 | buf = tp->LocalTxBuffers[i]; | 646 | buf = tp->LocalTxBuffers[i]; |
647 | memcpy(buf, skb->data, length); | 647 | skb_copy_from_linear_data(skb, buf, length); |
648 | newbuf = ((char *)buf - (char *)tp) + tp->dmabuffer; | 648 | newbuf = ((char *)buf - (char *)tp) + tp->dmabuffer; |
649 | } | 649 | } |
650 | else { | 650 | else { |
@@ -2168,7 +2168,6 @@ static void tms380tr_rcv_status_irq(struct net_device *dev) | |||
2168 | } | 2168 | } |
2169 | else | 2169 | else |
2170 | { | 2170 | { |
2171 | skb->dev = dev; | ||
2172 | skb_put(skb, tp->MaxPacketSize); | 2171 | skb_put(skb, tp->MaxPacketSize); |
2173 | rpl->SkbStat = SKB_DATA_COPY; | 2172 | rpl->SkbStat = SKB_DATA_COPY; |
2174 | ReceiveDataPtr = rpl->MData; | 2173 | ReceiveDataPtr = rpl->MData; |
@@ -2179,7 +2178,8 @@ static void tms380tr_rcv_status_irq(struct net_device *dev) | |||
2179 | || rpl->SkbStat == SKB_DMA_DIRECT)) | 2178 | || rpl->SkbStat == SKB_DMA_DIRECT)) |
2180 | { | 2179 | { |
2181 | if(rpl->SkbStat == SKB_DATA_COPY) | 2180 | if(rpl->SkbStat == SKB_DATA_COPY) |
2182 | memcpy(skb->data, ReceiveDataPtr, Length); | 2181 | skb_copy_to_linear_data(skb, ReceiveDataPtr, |
2182 | Length); | ||
2183 | 2183 | ||
2184 | /* Deliver frame to system */ | 2184 | /* Deliver frame to system */ |
2185 | rpl->Skb = NULL; | 2185 | rpl->Skb = NULL; |
diff --git a/drivers/net/tsi108_eth.c b/drivers/net/tsi108_eth.c index d92c5c597e16..0bfc2c9c1c08 100644 --- a/drivers/net/tsi108_eth.c +++ b/drivers/net/tsi108_eth.c | |||
@@ -788,7 +788,6 @@ static int tsi108_complete_rx(struct net_device *dev, int budget) | |||
788 | printk(".\n"); | 788 | printk(".\n"); |
789 | } | 789 | } |
790 | 790 | ||
791 | skb->dev = dev; | ||
792 | skb_put(skb, data->rxring[rx].len); | 791 | skb_put(skb, data->rxring[rx].len); |
793 | skb->protocol = eth_type_trans(skb, dev); | 792 | skb->protocol = eth_type_trans(skb, dev); |
794 | netif_receive_skb(skb); | 793 | netif_receive_skb(skb); |
diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c index c82befa209a2..861729806dc1 100644 --- a/drivers/net/tulip/de2104x.c +++ b/drivers/net/tulip/de2104x.c | |||
@@ -63,7 +63,7 @@ MODULE_PARM_DESC (debug, "de2104x bitmapped message enable number"); | |||
63 | 63 | ||
64 | /* Set the copy breakpoint for the copy-only-tiny-buffer Rx structure. */ | 64 | /* Set the copy breakpoint for the copy-only-tiny-buffer Rx structure. */ |
65 | #if defined(__alpha__) || defined(__arm__) || defined(__hppa__) \ | 65 | #if defined(__alpha__) || defined(__arm__) || defined(__hppa__) \ |
66 | || defined(__sparc__) || defined(__ia64__) \ | 66 | || defined(CONFIG_SPARC) || defined(__ia64__) \ |
67 | || defined(__sh__) || defined(__mips__) | 67 | || defined(__sh__) || defined(__mips__) |
68 | static int rx_copybreak = 1518; | 68 | static int rx_copybreak = 1518; |
69 | #else | 69 | #else |
@@ -435,7 +435,6 @@ static void de_rx (struct de_private *de) | |||
435 | rx_work = 100; | 435 | rx_work = 100; |
436 | goto rx_next; | 436 | goto rx_next; |
437 | } | 437 | } |
438 | copy_skb->dev = de->dev; | ||
439 | 438 | ||
440 | if (!copying_skb) { | 439 | if (!copying_skb) { |
441 | pci_unmap_single(de->pdev, mapping, | 440 | pci_unmap_single(de->pdev, mapping, |
@@ -450,8 +449,8 @@ static void de_rx (struct de_private *de) | |||
450 | } else { | 449 | } else { |
451 | pci_dma_sync_single_for_cpu(de->pdev, mapping, len, PCI_DMA_FROMDEVICE); | 450 | pci_dma_sync_single_for_cpu(de->pdev, mapping, len, PCI_DMA_FROMDEVICE); |
452 | skb_reserve(copy_skb, RX_OFFSET); | 451 | skb_reserve(copy_skb, RX_OFFSET); |
453 | memcpy(skb_put(copy_skb, len), skb->data, len); | 452 | skb_copy_from_linear_data(skb, skb_put(copy_skb, len), |
454 | 453 | len); | |
455 | pci_dma_sync_single_for_device(de->pdev, mapping, len, PCI_DMA_FROMDEVICE); | 454 | pci_dma_sync_single_for_device(de->pdev, mapping, len, PCI_DMA_FROMDEVICE); |
456 | 455 | ||
457 | /* We'll reuse the original ring buffer. */ | 456 | /* We'll reuse the original ring buffer. */ |
diff --git a/drivers/net/tulip/de4x5.c b/drivers/net/tulip/de4x5.c index 4b3cd3d8b62a..62143f92c231 100644 --- a/drivers/net/tulip/de4x5.c +++ b/drivers/net/tulip/de4x5.c | |||
@@ -1160,7 +1160,7 @@ de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev) | |||
1160 | sprintf(lp->adapter_name,"%s (%s)", name, gendev->bus_id); | 1160 | sprintf(lp->adapter_name,"%s (%s)", name, gendev->bus_id); |
1161 | 1161 | ||
1162 | lp->dma_size = (NUM_RX_DESC + NUM_TX_DESC) * sizeof(struct de4x5_desc); | 1162 | lp->dma_size = (NUM_RX_DESC + NUM_TX_DESC) * sizeof(struct de4x5_desc); |
1163 | #if defined(__alpha__) || defined(__powerpc__) || defined(__sparc_v9__) || defined(DE4X5_DO_MEMCPY) | 1163 | #if defined(__alpha__) || defined(__powerpc__) || defined(CONFIG_SPARC) || defined(DE4X5_DO_MEMCPY) |
1164 | lp->dma_size += RX_BUFF_SZ * NUM_RX_DESC + DE4X5_ALIGN; | 1164 | lp->dma_size += RX_BUFF_SZ * NUM_RX_DESC + DE4X5_ALIGN; |
1165 | #endif | 1165 | #endif |
1166 | lp->rx_ring = dma_alloc_coherent(gendev, lp->dma_size, | 1166 | lp->rx_ring = dma_alloc_coherent(gendev, lp->dma_size, |
@@ -1175,7 +1175,7 @@ de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev) | |||
1175 | ** Set up the RX descriptor ring (Intels) | 1175 | ** Set up the RX descriptor ring (Intels) |
1176 | ** Allocate contiguous receive buffers, long word aligned (Alphas) | 1176 | ** Allocate contiguous receive buffers, long word aligned (Alphas) |
1177 | */ | 1177 | */ |
1178 | #if !defined(__alpha__) && !defined(__powerpc__) && !defined(__sparc_v9__) && !defined(DE4X5_DO_MEMCPY) | 1178 | #if !defined(__alpha__) && !defined(__powerpc__) && !defined(CONFIG_SPARC) && !defined(DE4X5_DO_MEMCPY) |
1179 | for (i=0; i<NUM_RX_DESC; i++) { | 1179 | for (i=0; i<NUM_RX_DESC; i++) { |
1180 | lp->rx_ring[i].status = 0; | 1180 | lp->rx_ring[i].status = 0; |
1181 | lp->rx_ring[i].des1 = cpu_to_le32(RX_BUFF_SZ); | 1181 | lp->rx_ring[i].des1 = cpu_to_le32(RX_BUFF_SZ); |
@@ -1252,11 +1252,7 @@ de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev) | |||
1252 | mii_get_phy(dev); | 1252 | mii_get_phy(dev); |
1253 | } | 1253 | } |
1254 | 1254 | ||
1255 | #ifndef __sparc_v9__ | ||
1256 | printk(" and requires IRQ%d (provided by %s).\n", dev->irq, | 1255 | printk(" and requires IRQ%d (provided by %s).\n", dev->irq, |
1257 | #else | ||
1258 | printk(" and requires IRQ%x (provided by %s).\n", dev->irq, | ||
1259 | #endif | ||
1260 | ((lp->bus == PCI) ? "PCI BIOS" : "EISA CNFG")); | 1256 | ((lp->bus == PCI) ? "PCI BIOS" : "EISA CNFG")); |
1261 | } | 1257 | } |
1262 | 1258 | ||
@@ -3627,14 +3623,13 @@ de4x5_alloc_rx_buff(struct net_device *dev, int index, int len) | |||
3627 | struct de4x5_private *lp = netdev_priv(dev); | 3623 | struct de4x5_private *lp = netdev_priv(dev); |
3628 | struct sk_buff *p; | 3624 | struct sk_buff *p; |
3629 | 3625 | ||
3630 | #if !defined(__alpha__) && !defined(__powerpc__) && !defined(__sparc_v9__) && !defined(DE4X5_DO_MEMCPY) | 3626 | #if !defined(__alpha__) && !defined(__powerpc__) && !defined(CONFIG_SPARC) && !defined(DE4X5_DO_MEMCPY) |
3631 | struct sk_buff *ret; | 3627 | struct sk_buff *ret; |
3632 | u_long i=0, tmp; | 3628 | u_long i=0, tmp; |
3633 | 3629 | ||
3634 | p = dev_alloc_skb(IEEE802_3_SZ + DE4X5_ALIGN + 2); | 3630 | p = dev_alloc_skb(IEEE802_3_SZ + DE4X5_ALIGN + 2); |
3635 | if (!p) return NULL; | 3631 | if (!p) return NULL; |
3636 | 3632 | ||
3637 | p->dev = dev; | ||
3638 | tmp = virt_to_bus(p->data); | 3633 | tmp = virt_to_bus(p->data); |
3639 | i = ((tmp + DE4X5_ALIGN) & ~DE4X5_ALIGN) - tmp; | 3634 | i = ((tmp + DE4X5_ALIGN) & ~DE4X5_ALIGN) - tmp; |
3640 | skb_reserve(p, i); | 3635 | skb_reserve(p, i); |
@@ -3655,7 +3650,6 @@ de4x5_alloc_rx_buff(struct net_device *dev, int index, int len) | |||
3655 | p = dev_alloc_skb(len + 2); | 3650 | p = dev_alloc_skb(len + 2); |
3656 | if (!p) return NULL; | 3651 | if (!p) return NULL; |
3657 | 3652 | ||
3658 | p->dev = dev; | ||
3659 | skb_reserve(p, 2); /* Align */ | 3653 | skb_reserve(p, 2); /* Align */ |
3660 | if (index < lp->rx_old) { /* Wrapped buffer */ | 3654 | if (index < lp->rx_old) { /* Wrapped buffer */ |
3661 | short tlen = (lp->rxRingSize - lp->rx_old) * RX_BUFF_SZ; | 3655 | short tlen = (lp->rxRingSize - lp->rx_old) * RX_BUFF_SZ; |
diff --git a/drivers/net/tulip/dmfe.c b/drivers/net/tulip/dmfe.c index 9aeac76184f3..b3a64ca98634 100644 --- a/drivers/net/tulip/dmfe.c +++ b/drivers/net/tulip/dmfe.c | |||
@@ -682,7 +682,7 @@ static int dmfe_start_xmit(struct sk_buff *skb, struct DEVICE *dev) | |||
682 | 682 | ||
683 | /* transmit this packet */ | 683 | /* transmit this packet */ |
684 | txptr = db->tx_insert_ptr; | 684 | txptr = db->tx_insert_ptr; |
685 | memcpy(txptr->tx_buf_ptr, skb->data, skb->len); | 685 | skb_copy_from_linear_data(skb, txptr->tx_buf_ptr, skb->len); |
686 | txptr->tdes1 = cpu_to_le32(0xe1000000 | skb->len); | 686 | txptr->tdes1 = cpu_to_le32(0xe1000000 | skb->len); |
687 | 687 | ||
688 | /* Point to next transmit free descriptor */ | 688 | /* Point to next transmit free descriptor */ |
@@ -988,14 +988,14 @@ static void dmfe_rx_packet(struct DEVICE *dev, struct dmfe_board_info * db) | |||
988 | 988 | ||
989 | skb = newskb; | 989 | skb = newskb; |
990 | /* size less than COPY_SIZE, allocate a rxlen SKB */ | 990 | /* size less than COPY_SIZE, allocate a rxlen SKB */ |
991 | skb->dev = dev; | ||
992 | skb_reserve(skb, 2); /* 16byte align */ | 991 | skb_reserve(skb, 2); /* 16byte align */ |
993 | memcpy(skb_put(skb, rxlen), rxptr->rx_skb_ptr->data, rxlen); | 992 | skb_copy_from_linear_data(rxptr->rx_skb_ptr, |
993 | skb_put(skb, rxlen), | ||
994 | rxlen); | ||
994 | dmfe_reuse_skb(db, rxptr->rx_skb_ptr); | 995 | dmfe_reuse_skb(db, rxptr->rx_skb_ptr); |
995 | } else { | 996 | } else |
996 | skb->dev = dev; | ||
997 | skb_put(skb, rxlen); | 997 | skb_put(skb, rxlen); |
998 | } | 998 | |
999 | skb->protocol = eth_type_trans(skb, dev); | 999 | skb->protocol = eth_type_trans(skb, dev); |
1000 | netif_rx(skb); | 1000 | netif_rx(skb); |
1001 | dev->last_rx = jiffies; | 1001 | dev->last_rx = jiffies; |
diff --git a/drivers/net/tulip/interrupt.c b/drivers/net/tulip/interrupt.c index e3488d7b8ede..e86df07769a1 100644 --- a/drivers/net/tulip/interrupt.c +++ b/drivers/net/tulip/interrupt.c | |||
@@ -192,7 +192,6 @@ int tulip_poll(struct net_device *dev, int *budget) | |||
192 | to a minimally-sized skbuff. */ | 192 | to a minimally-sized skbuff. */ |
193 | if (pkt_len < tulip_rx_copybreak | 193 | if (pkt_len < tulip_rx_copybreak |
194 | && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) { | 194 | && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) { |
195 | skb->dev = dev; | ||
196 | skb_reserve(skb, 2); /* 16 byte align the IP header */ | 195 | skb_reserve(skb, 2); /* 16 byte align the IP header */ |
197 | pci_dma_sync_single_for_cpu(tp->pdev, | 196 | pci_dma_sync_single_for_cpu(tp->pdev, |
198 | tp->rx_buffers[entry].mapping, | 197 | tp->rx_buffers[entry].mapping, |
@@ -416,7 +415,6 @@ static int tulip_rx(struct net_device *dev) | |||
416 | to a minimally-sized skbuff. */ | 415 | to a minimally-sized skbuff. */ |
417 | if (pkt_len < tulip_rx_copybreak | 416 | if (pkt_len < tulip_rx_copybreak |
418 | && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) { | 417 | && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) { |
419 | skb->dev = dev; | ||
420 | skb_reserve(skb, 2); /* 16 byte align the IP header */ | 418 | skb_reserve(skb, 2); /* 16 byte align the IP header */ |
421 | pci_dma_sync_single_for_cpu(tp->pdev, | 419 | pci_dma_sync_single_for_cpu(tp->pdev, |
422 | tp->rx_buffers[entry].mapping, | 420 | tp->rx_buffers[entry].mapping, |
diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c index e3774a522372..e9bf526ec534 100644 --- a/drivers/net/tulip/tulip_core.c +++ b/drivers/net/tulip/tulip_core.c | |||
@@ -36,8 +36,8 @@ | |||
36 | #include <asm/unaligned.h> | 36 | #include <asm/unaligned.h> |
37 | #include <asm/uaccess.h> | 37 | #include <asm/uaccess.h> |
38 | 38 | ||
39 | #ifdef __sparc__ | 39 | #ifdef CONFIG_SPARC |
40 | #include <asm/pbm.h> | 40 | #include <asm/prom.h> |
41 | #endif | 41 | #endif |
42 | 42 | ||
43 | static char version[] __devinitdata = | 43 | static char version[] __devinitdata = |
@@ -67,7 +67,7 @@ const char * const medianame[32] = { | |||
67 | 67 | ||
68 | /* Set the copy breakpoint for the copy-only-tiny-buffer Rx structure. */ | 68 | /* Set the copy breakpoint for the copy-only-tiny-buffer Rx structure. */ |
69 | #if defined(__alpha__) || defined(__arm__) || defined(__hppa__) \ | 69 | #if defined(__alpha__) || defined(__arm__) || defined(__hppa__) \ |
70 | || defined(__sparc__) || defined(__ia64__) \ | 70 | || defined(CONFIG_SPARC) || defined(__ia64__) \ |
71 | || defined(__sh__) || defined(__mips__) | 71 | || defined(__sh__) || defined(__mips__) |
72 | static int rx_copybreak = 1518; | 72 | static int rx_copybreak = 1518; |
73 | #else | 73 | #else |
@@ -91,7 +91,7 @@ static int rx_copybreak = 100; | |||
91 | static int csr0 = 0x01A00000 | 0xE000; | 91 | static int csr0 = 0x01A00000 | 0xE000; |
92 | #elif defined(__i386__) || defined(__powerpc__) || defined(__x86_64__) | 92 | #elif defined(__i386__) || defined(__powerpc__) || defined(__x86_64__) |
93 | static int csr0 = 0x01A00000 | 0x8000; | 93 | static int csr0 = 0x01A00000 | 0x8000; |
94 | #elif defined(__sparc__) || defined(__hppa__) | 94 | #elif defined(CONFIG_SPARC) || defined(__hppa__) |
95 | /* The UltraSparc PCI controllers will disconnect at every 64-byte | 95 | /* The UltraSparc PCI controllers will disconnect at every 64-byte |
96 | * crossing anyways so it makes no sense to tell Tulip to burst | 96 | * crossing anyways so it makes no sense to tell Tulip to burst |
97 | * any more than that. | 97 | * any more than that. |
@@ -1315,7 +1315,7 @@ static int __devinit tulip_init_one (struct pci_dev *pdev, | |||
1315 | /* DM9102A has troubles with MRM & clear reserved bits 24:22, 20, 16, 7:1 */ | 1315 | /* DM9102A has troubles with MRM & clear reserved bits 24:22, 20, 16, 7:1 */ |
1316 | if (tulip_uli_dm_quirk(pdev)) { | 1316 | if (tulip_uli_dm_quirk(pdev)) { |
1317 | csr0 &= ~0x01f100ff; | 1317 | csr0 &= ~0x01f100ff; |
1318 | #if defined(__sparc__) | 1318 | #if defined(CONFIG_SPARC) |
1319 | csr0 = (csr0 & ~0xff00) | 0xe000; | 1319 | csr0 = (csr0 & ~0xff00) | 0xe000; |
1320 | #endif | 1320 | #endif |
1321 | } | 1321 | } |
@@ -1535,23 +1535,19 @@ static int __devinit tulip_init_one (struct pci_dev *pdev, | |||
1535 | Many PCI BIOSes also incorrectly report the IRQ line, so we correct | 1535 | Many PCI BIOSes also incorrectly report the IRQ line, so we correct |
1536 | that here as well. */ | 1536 | that here as well. */ |
1537 | if (sum == 0 || sum == 6*0xff) { | 1537 | if (sum == 0 || sum == 6*0xff) { |
1538 | #if defined(__sparc__) | 1538 | #if defined(CONFIG_SPARC) |
1539 | struct pcidev_cookie *pcp = pdev->sysdata; | 1539 | struct device_node *dp = pci_device_to_OF_node(pdev); |
1540 | const unsigned char *addr; | ||
1541 | int len; | ||
1540 | #endif | 1542 | #endif |
1541 | eeprom_missing = 1; | 1543 | eeprom_missing = 1; |
1542 | for (i = 0; i < 5; i++) | 1544 | for (i = 0; i < 5; i++) |
1543 | dev->dev_addr[i] = last_phys_addr[i]; | 1545 | dev->dev_addr[i] = last_phys_addr[i]; |
1544 | dev->dev_addr[i] = last_phys_addr[i] + 1; | 1546 | dev->dev_addr[i] = last_phys_addr[i] + 1; |
1545 | #if defined(__sparc__) | 1547 | #if defined(CONFIG_SPARC) |
1546 | if (pcp) { | 1548 | addr = of_get_property(dp, "local-mac-address", &len); |
1547 | unsigned char *addr; | 1549 | if (addr && len == 6) |
1548 | int len; | 1550 | memcpy(dev->dev_addr, addr, 6); |
1549 | |||
1550 | addr = of_get_property(pcp->prom_node, | ||
1551 | "local-mac-address", &len); | ||
1552 | if (addr && len == 6) | ||
1553 | memcpy(dev->dev_addr, addr, 6); | ||
1554 | } | ||
1555 | #endif | 1551 | #endif |
1556 | #if defined(__i386__) || defined(__x86_64__) /* Patch up x86 BIOS bug. */ | 1552 | #if defined(__i386__) || defined(__x86_64__) /* Patch up x86 BIOS bug. */ |
1557 | if (last_irq) | 1553 | if (last_irq) |
diff --git a/drivers/net/tulip/uli526x.c b/drivers/net/tulip/uli526x.c index 229158e8e4be..ca2548eb7d63 100644 --- a/drivers/net/tulip/uli526x.c +++ b/drivers/net/tulip/uli526x.c | |||
@@ -583,7 +583,7 @@ static int uli526x_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
583 | 583 | ||
584 | /* transmit this packet */ | 584 | /* transmit this packet */ |
585 | txptr = db->tx_insert_ptr; | 585 | txptr = db->tx_insert_ptr; |
586 | memcpy(txptr->tx_buf_ptr, skb->data, skb->len); | 586 | skb_copy_from_linear_data(skb, txptr->tx_buf_ptr, skb->len); |
587 | txptr->tdes1 = cpu_to_le32(0xe1000000 | skb->len); | 587 | txptr->tdes1 = cpu_to_le32(0xe1000000 | skb->len); |
588 | 588 | ||
589 | /* Point to next transmit free descriptor */ | 589 | /* Point to next transmit free descriptor */ |
@@ -828,14 +828,14 @@ static void uli526x_rx_packet(struct net_device *dev, struct uli526x_board_info | |||
828 | ( (skb = dev_alloc_skb(rxlen + 2) ) | 828 | ( (skb = dev_alloc_skb(rxlen + 2) ) |
829 | != NULL) ) { | 829 | != NULL) ) { |
830 | /* size less than COPY_SIZE, allocate a rxlen SKB */ | 830 | /* size less than COPY_SIZE, allocate a rxlen SKB */ |
831 | skb->dev = dev; | ||
832 | skb_reserve(skb, 2); /* 16byte align */ | 831 | skb_reserve(skb, 2); /* 16byte align */ |
833 | memcpy(skb_put(skb, rxlen), rxptr->rx_skb_ptr->tail, rxlen); | 832 | memcpy(skb_put(skb, rxlen), |
833 | skb_tail_pointer(rxptr->rx_skb_ptr), | ||
834 | rxlen); | ||
834 | uli526x_reuse_skb(db, rxptr->rx_skb_ptr); | 835 | uli526x_reuse_skb(db, rxptr->rx_skb_ptr); |
835 | } else { | 836 | } else |
836 | skb->dev = dev; | ||
837 | skb_put(skb, rxlen); | 837 | skb_put(skb, rxlen); |
838 | } | 838 | |
839 | skb->protocol = eth_type_trans(skb, dev); | 839 | skb->protocol = eth_type_trans(skb, dev); |
840 | netif_rx(skb); | 840 | netif_rx(skb); |
841 | dev->last_rx = jiffies; | 841 | dev->last_rx = jiffies; |
@@ -1177,7 +1177,10 @@ static void uli526x_reuse_skb(struct uli526x_board_info *db, struct sk_buff * sk | |||
1177 | 1177 | ||
1178 | if (!(rxptr->rdes0 & cpu_to_le32(0x80000000))) { | 1178 | if (!(rxptr->rdes0 & cpu_to_le32(0x80000000))) { |
1179 | rxptr->rx_skb_ptr = skb; | 1179 | rxptr->rx_skb_ptr = skb; |
1180 | rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, skb->tail, RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) ); | 1180 | rxptr->rdes2 = cpu_to_le32(pci_map_single(db->pdev, |
1181 | skb_tail_pointer(skb), | ||
1182 | RX_ALLOC_SIZE, | ||
1183 | PCI_DMA_FROMDEVICE)); | ||
1181 | wmb(); | 1184 | wmb(); |
1182 | rxptr->rdes0 = cpu_to_le32(0x80000000); | 1185 | rxptr->rdes0 = cpu_to_le32(0x80000000); |
1183 | db->rx_avail_cnt++; | 1186 | db->rx_avail_cnt++; |
@@ -1341,7 +1344,10 @@ static void allocate_rx_buffer(struct uli526x_board_info *db) | |||
1341 | if ( ( skb = dev_alloc_skb(RX_ALLOC_SIZE) ) == NULL ) | 1344 | if ( ( skb = dev_alloc_skb(RX_ALLOC_SIZE) ) == NULL ) |
1342 | break; | 1345 | break; |
1343 | rxptr->rx_skb_ptr = skb; /* FIXME (?) */ | 1346 | rxptr->rx_skb_ptr = skb; /* FIXME (?) */ |
1344 | rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, skb->tail, RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) ); | 1347 | rxptr->rdes2 = cpu_to_le32(pci_map_single(db->pdev, |
1348 | skb_tail_pointer(skb), | ||
1349 | RX_ALLOC_SIZE, | ||
1350 | PCI_DMA_FROMDEVICE)); | ||
1345 | wmb(); | 1351 | wmb(); |
1346 | rxptr->rdes0 = cpu_to_le32(0x80000000); | 1352 | rxptr->rdes0 = cpu_to_le32(0x80000000); |
1347 | rxptr = rxptr->next_rx_desc; | 1353 | rxptr = rxptr->next_rx_desc; |
diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c index 002a05e0722f..5b71ac78bca2 100644 --- a/drivers/net/tulip/winbond-840.c +++ b/drivers/net/tulip/winbond-840.c | |||
@@ -813,7 +813,6 @@ static void init_rxtx_rings(struct net_device *dev) | |||
813 | np->rx_skbuff[i] = skb; | 813 | np->rx_skbuff[i] = skb; |
814 | if (skb == NULL) | 814 | if (skb == NULL) |
815 | break; | 815 | break; |
816 | skb->dev = dev; /* Mark as being used by this device. */ | ||
817 | np->rx_addr[i] = pci_map_single(np->pci_dev,skb->data, | 816 | np->rx_addr[i] = pci_map_single(np->pci_dev,skb->data, |
818 | np->rx_buf_sz,PCI_DMA_FROMDEVICE); | 817 | np->rx_buf_sz,PCI_DMA_FROMDEVICE); |
819 | 818 | ||
@@ -903,7 +902,7 @@ static void init_registers(struct net_device *dev) | |||
903 | } | 902 | } |
904 | #elif defined(__powerpc__) || defined(__i386__) || defined(__alpha__) || defined(__ia64__) || defined(__x86_64__) | 903 | #elif defined(__powerpc__) || defined(__i386__) || defined(__alpha__) || defined(__ia64__) || defined(__x86_64__) |
905 | i |= 0xE000; | 904 | i |= 0xE000; |
906 | #elif defined(__sparc__) || defined (CONFIG_PARISC) | 905 | #elif defined(CONFIG_SPARC) || defined (CONFIG_PARISC) |
907 | i |= 0x4800; | 906 | i |= 0x4800; |
908 | #else | 907 | #else |
909 | #warning Processor architecture undefined | 908 | #warning Processor architecture undefined |
@@ -1229,7 +1228,6 @@ static int netdev_rx(struct net_device *dev) | |||
1229 | to a minimally-sized skbuff. */ | 1228 | to a minimally-sized skbuff. */ |
1230 | if (pkt_len < rx_copybreak | 1229 | if (pkt_len < rx_copybreak |
1231 | && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) { | 1230 | && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) { |
1232 | skb->dev = dev; | ||
1233 | skb_reserve(skb, 2); /* 16 byte align the IP header */ | 1231 | skb_reserve(skb, 2); /* 16 byte align the IP header */ |
1234 | pci_dma_sync_single_for_cpu(np->pci_dev,np->rx_addr[entry], | 1232 | pci_dma_sync_single_for_cpu(np->pci_dev,np->rx_addr[entry], |
1235 | np->rx_skbuff[entry]->len, | 1233 | np->rx_skbuff[entry]->len, |
@@ -1278,7 +1276,6 @@ static int netdev_rx(struct net_device *dev) | |||
1278 | np->rx_skbuff[entry] = skb; | 1276 | np->rx_skbuff[entry] = skb; |
1279 | if (skb == NULL) | 1277 | if (skb == NULL) |
1280 | break; /* Better luck next round. */ | 1278 | break; /* Better luck next round. */ |
1281 | skb->dev = dev; /* Mark as being used by this device. */ | ||
1282 | np->rx_addr[entry] = pci_map_single(np->pci_dev, | 1279 | np->rx_addr[entry] = pci_map_single(np->pci_dev, |
1283 | skb->data, | 1280 | skb->data, |
1284 | np->rx_buf_sz, PCI_DMA_FROMDEVICE); | 1281 | np->rx_buf_sz, PCI_DMA_FROMDEVICE); |
diff --git a/drivers/net/tulip/xircom_cb.c b/drivers/net/tulip/xircom_cb.c index 61d313049dd0..985a1810ca59 100644 --- a/drivers/net/tulip/xircom_cb.c +++ b/drivers/net/tulip/xircom_cb.c | |||
@@ -411,9 +411,9 @@ static int xircom_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
411 | sometimes sends more than you ask it to. */ | 411 | sometimes sends more than you ask it to. */ |
412 | 412 | ||
413 | memset(&card->tx_buffer[bufferoffsets[desc]/4],0,1536); | 413 | memset(&card->tx_buffer[bufferoffsets[desc]/4],0,1536); |
414 | memcpy(&(card->tx_buffer[bufferoffsets[desc]/4]),skb->data,skb->len); | 414 | skb_copy_from_linear_data(skb, |
415 | 415 | &(card->tx_buffer[bufferoffsets[desc] / 4]), | |
416 | 416 | skb->len); | |
417 | /* FIXME: The specification tells us that the length we send HAS to be a multiple of | 417 | /* FIXME: The specification tells us that the length we send HAS to be a multiple of |
418 | 4 bytes. */ | 418 | 4 bytes. */ |
419 | 419 | ||
@@ -1207,7 +1207,6 @@ static void investigate_read_descriptor(struct net_device *dev,struct xircom_pri | |||
1207 | card->stats.rx_dropped++; | 1207 | card->stats.rx_dropped++; |
1208 | goto out; | 1208 | goto out; |
1209 | } | 1209 | } |
1210 | skb->dev = dev; | ||
1211 | skb_reserve(skb, 2); | 1210 | skb_reserve(skb, 2); |
1212 | eth_copy_and_sum(skb, (unsigned char*)&card->rx_buffer[bufferoffset / 4], pkt_len, 0); | 1211 | eth_copy_and_sum(skb, (unsigned char*)&card->rx_buffer[bufferoffset / 4], pkt_len, 0); |
1213 | skb_put(skb, pkt_len); | 1212 | skb_put(skb, pkt_len); |
diff --git a/drivers/net/tulip/xircom_tulip_cb.c b/drivers/net/tulip/xircom_tulip_cb.c index a998c5d0ae9c..f64172927377 100644 --- a/drivers/net/tulip/xircom_tulip_cb.c +++ b/drivers/net/tulip/xircom_tulip_cb.c | |||
@@ -65,7 +65,7 @@ static int rx_copybreak = 100; | |||
65 | static int csr0 = 0x01A00000 | 0xE000; | 65 | static int csr0 = 0x01A00000 | 0xE000; |
66 | #elif defined(__powerpc__) | 66 | #elif defined(__powerpc__) |
67 | static int csr0 = 0x01B00000 | 0x8000; | 67 | static int csr0 = 0x01B00000 | 0x8000; |
68 | #elif defined(__sparc__) | 68 | #elif defined(CONFIG_SPARC) |
69 | static int csr0 = 0x01B00080 | 0x8000; | 69 | static int csr0 = 0x01B00080 | 0x8000; |
70 | #elif defined(__i386__) | 70 | #elif defined(__i386__) |
71 | static int csr0 = 0x01A00000 | 0x8000; | 71 | static int csr0 = 0x01A00000 | 0x8000; |
@@ -915,7 +915,9 @@ xircom_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
915 | 915 | ||
916 | tp->tx_skbuff[entry] = skb; | 916 | tp->tx_skbuff[entry] = skb; |
917 | if (tp->chip_id == X3201_3) { | 917 | if (tp->chip_id == X3201_3) { |
918 | memcpy(tp->tx_aligned_skbuff[entry]->data,skb->data,skb->len); | 918 | skb_copy_from_linear_data(skb, |
919 | tp->tx_aligned_skbuff[entry]->data, | ||
920 | skb->len); | ||
919 | tp->tx_ring[entry].buffer1 = virt_to_bus(tp->tx_aligned_skbuff[entry]->data); | 921 | tp->tx_ring[entry].buffer1 = virt_to_bus(tp->tx_aligned_skbuff[entry]->data); |
920 | } else | 922 | } else |
921 | tp->tx_ring[entry].buffer1 = virt_to_bus(skb->data); | 923 | tp->tx_ring[entry].buffer1 = virt_to_bus(skb->data); |
@@ -1238,7 +1240,6 @@ xircom_rx(struct net_device *dev) | |||
1238 | to a minimally-sized skbuff. */ | 1240 | to a minimally-sized skbuff. */ |
1239 | if (pkt_len < rx_copybreak | 1241 | if (pkt_len < rx_copybreak |
1240 | && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) { | 1242 | && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) { |
1241 | skb->dev = dev; | ||
1242 | skb_reserve(skb, 2); /* 16 byte align the IP header */ | 1243 | skb_reserve(skb, 2); /* 16 byte align the IP header */ |
1243 | #if ! defined(__alpha__) | 1244 | #if ! defined(__alpha__) |
1244 | eth_copy_and_sum(skb, bus_to_virt(tp->rx_ring[entry].buffer1), | 1245 | eth_copy_and_sum(skb, bus_to_virt(tp->rx_ring[entry].buffer1), |
diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 5643d1e84ed6..a2c6caaaae93 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c | |||
@@ -18,6 +18,10 @@ | |||
18 | /* | 18 | /* |
19 | * Changes: | 19 | * Changes: |
20 | * | 20 | * |
21 | * Brian Braunstein <linuxkernel@bristyle.com> 2007/03/23 | ||
22 | * Fixed hw address handling. Now net_device.dev_addr is kept consistent | ||
23 | * with tun.dev_addr when the address is set by this module. | ||
24 | * | ||
21 | * Mike Kershaw <dragorn@kismetwireless.net> 2005/08/14 | 25 | * Mike Kershaw <dragorn@kismetwireless.net> 2005/08/14 |
22 | * Add TUNSETLINK ioctl to set the link encapsulation | 26 | * Add TUNSETLINK ioctl to set the link encapsulation |
23 | * | 27 | * |
@@ -196,7 +200,10 @@ static void tun_net_init(struct net_device *dev) | |||
196 | dev->set_multicast_list = tun_net_mclist; | 200 | dev->set_multicast_list = tun_net_mclist; |
197 | 201 | ||
198 | ether_setup(dev); | 202 | ether_setup(dev); |
199 | random_ether_addr(dev->dev_addr); | 203 | |
204 | /* random address already created for us by tun_set_iff, use it */ | ||
205 | memcpy(dev->dev_addr, tun->dev_addr, min(sizeof(tun->dev_addr), sizeof(dev->dev_addr)) ); | ||
206 | |||
200 | dev->tx_queue_len = TUN_READQ_SIZE; /* We prefer our own queue length */ | 207 | dev->tx_queue_len = TUN_READQ_SIZE; /* We prefer our own queue length */ |
201 | break; | 208 | break; |
202 | } | 209 | } |
@@ -254,11 +261,11 @@ static __inline__ ssize_t tun_get_user(struct tun_struct *tun, struct iovec *iv, | |||
254 | return -EFAULT; | 261 | return -EFAULT; |
255 | } | 262 | } |
256 | 263 | ||
257 | skb->dev = tun->dev; | ||
258 | switch (tun->flags & TUN_TYPE_MASK) { | 264 | switch (tun->flags & TUN_TYPE_MASK) { |
259 | case TUN_TUN_DEV: | 265 | case TUN_TUN_DEV: |
260 | skb->mac.raw = skb->data; | 266 | skb_reset_mac_header(skb); |
261 | skb->protocol = pi.proto; | 267 | skb->protocol = pi.proto; |
268 | skb->dev = tun->dev; | ||
262 | break; | 269 | break; |
263 | case TUN_TAP_DEV: | 270 | case TUN_TAP_DEV: |
264 | skb->protocol = eth_type_trans(skb, tun->dev); | 271 | skb->protocol = eth_type_trans(skb, tun->dev); |
@@ -386,8 +393,8 @@ static ssize_t tun_chr_aio_read(struct kiocb *iocb, const struct iovec *iv, | |||
386 | * - we are multicast promiscous. | 393 | * - we are multicast promiscous. |
387 | * - we belong to the multicast group. | 394 | * - we belong to the multicast group. |
388 | */ | 395 | */ |
389 | memcpy(addr, skb->data, | 396 | skb_copy_from_linear_data(skb, addr, min_t(size_t, sizeof addr, |
390 | min_t(size_t, sizeof addr, skb->len)); | 397 | skb->len)); |
391 | bit_nr = ether_crc(sizeof addr, addr) >> 26; | 398 | bit_nr = ether_crc(sizeof addr, addr) >> 26; |
392 | if ((tun->if_flags & IFF_PROMISC) || | 399 | if ((tun->if_flags & IFF_PROMISC) || |
393 | memcmp(addr, tun->dev_addr, sizeof addr) == 0 || | 400 | memcmp(addr, tun->dev_addr, sizeof addr) == 0 || |
@@ -636,6 +643,7 @@ static int tun_chr_ioctl(struct inode *inode, struct file *file, | |||
636 | return 0; | 643 | return 0; |
637 | 644 | ||
638 | case SIOCGIFHWADDR: | 645 | case SIOCGIFHWADDR: |
646 | /* Note: the actual net device's address may be different */ | ||
639 | memcpy(ifr.ifr_hwaddr.sa_data, tun->dev_addr, | 647 | memcpy(ifr.ifr_hwaddr.sa_data, tun->dev_addr, |
640 | min(sizeof ifr.ifr_hwaddr.sa_data, sizeof tun->dev_addr)); | 648 | min(sizeof ifr.ifr_hwaddr.sa_data, sizeof tun->dev_addr)); |
641 | if (copy_to_user( argp, &ifr, sizeof ifr)) | 649 | if (copy_to_user( argp, &ifr, sizeof ifr)) |
@@ -643,16 +651,24 @@ static int tun_chr_ioctl(struct inode *inode, struct file *file, | |||
643 | return 0; | 651 | return 0; |
644 | 652 | ||
645 | case SIOCSIFHWADDR: | 653 | case SIOCSIFHWADDR: |
646 | /** Set the character device's hardware address. This is used when | 654 | { |
647 | * filtering packets being sent from the network device to the character | 655 | /* try to set the actual net device's hw address */ |
648 | * device. */ | 656 | int ret = dev_set_mac_address(tun->dev, &ifr.ifr_hwaddr); |
649 | memcpy(tun->dev_addr, ifr.ifr_hwaddr.sa_data, | 657 | |
650 | min(sizeof ifr.ifr_hwaddr.sa_data, sizeof tun->dev_addr)); | 658 | if (ret == 0) { |
651 | DBG(KERN_DEBUG "%s: set hardware address: %x:%x:%x:%x:%x:%x\n", | 659 | /** Set the character device's hardware address. This is used when |
652 | tun->dev->name, | 660 | * filtering packets being sent from the network device to the character |
653 | tun->dev_addr[0], tun->dev_addr[1], tun->dev_addr[2], | 661 | * device. */ |
654 | tun->dev_addr[3], tun->dev_addr[4], tun->dev_addr[5]); | 662 | memcpy(tun->dev_addr, ifr.ifr_hwaddr.sa_data, |
655 | return 0; | 663 | min(sizeof ifr.ifr_hwaddr.sa_data, sizeof tun->dev_addr)); |
664 | DBG(KERN_DEBUG "%s: set hardware address: %x:%x:%x:%x:%x:%x\n", | ||
665 | tun->dev->name, | ||
666 | tun->dev_addr[0], tun->dev_addr[1], tun->dev_addr[2], | ||
667 | tun->dev_addr[3], tun->dev_addr[4], tun->dev_addr[5]); | ||
668 | } | ||
669 | |||
670 | return ret; | ||
671 | } | ||
656 | 672 | ||
657 | case SIOCADDMULTI: | 673 | case SIOCADDMULTI: |
658 | /** Add the specified group to the character device's multicast filter | 674 | /** Add the specified group to the character device's multicast filter |
diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c index 0d91d094edd9..f2dd7763cd0b 100644 --- a/drivers/net/typhoon.c +++ b/drivers/net/typhoon.c | |||
@@ -1708,7 +1708,6 @@ typhoon_rx(struct typhoon *tp, struct basic_ring *rxRing, volatile u32 * ready, | |||
1708 | 1708 | ||
1709 | if(pkt_len < rx_copybreak && | 1709 | if(pkt_len < rx_copybreak && |
1710 | (new_skb = dev_alloc_skb(pkt_len + 2)) != NULL) { | 1710 | (new_skb = dev_alloc_skb(pkt_len + 2)) != NULL) { |
1711 | new_skb->dev = tp->dev; | ||
1712 | skb_reserve(new_skb, 2); | 1711 | skb_reserve(new_skb, 2); |
1713 | pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, | 1712 | pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, |
1714 | PKT_BUF_SZ, | 1713 | PKT_BUF_SZ, |
diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c index f3a972e74e9a..adea290a9d5e 100644 --- a/drivers/net/via-rhine.c +++ b/drivers/net/via-rhine.c | |||
@@ -1486,7 +1486,6 @@ static int rhine_rx(struct net_device *dev, int limit) | |||
1486 | copying to a minimally-sized skbuff. */ | 1486 | copying to a minimally-sized skbuff. */ |
1487 | if (pkt_len < rx_copybreak && | 1487 | if (pkt_len < rx_copybreak && |
1488 | (skb = dev_alloc_skb(pkt_len + 2)) != NULL) { | 1488 | (skb = dev_alloc_skb(pkt_len + 2)) != NULL) { |
1489 | skb->dev = dev; | ||
1490 | skb_reserve(skb, 2); /* 16 byte align the IP header */ | 1489 | skb_reserve(skb, 2); /* 16 byte align the IP header */ |
1491 | pci_dma_sync_single_for_cpu(rp->pdev, | 1490 | pci_dma_sync_single_for_cpu(rp->pdev, |
1492 | rp->rx_skbuff_dma[entry], | 1491 | rp->rx_skbuff_dma[entry], |
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c index 8e5d82051bd4..25b75b615188 100644 --- a/drivers/net/via-velocity.c +++ b/drivers/net/via-velocity.c | |||
@@ -1339,7 +1339,8 @@ static inline int velocity_rx_copy(struct sk_buff **rx_skb, int pkt_size, | |||
1339 | if (vptr->flags & VELOCITY_FLAGS_IP_ALIGN) | 1339 | if (vptr->flags & VELOCITY_FLAGS_IP_ALIGN) |
1340 | skb_reserve(new_skb, 2); | 1340 | skb_reserve(new_skb, 2); |
1341 | 1341 | ||
1342 | memcpy(new_skb->data, rx_skb[0]->data, pkt_size); | 1342 | skb_copy_from_linear_data(rx_skb[0], new_skb->data, |
1343 | pkt_size); | ||
1343 | *rx_skb = new_skb; | 1344 | *rx_skb = new_skb; |
1344 | ret = 0; | 1345 | ret = 0; |
1345 | } | 1346 | } |
@@ -1398,7 +1399,6 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx) | |||
1398 | vptr->stats.multicast++; | 1399 | vptr->stats.multicast++; |
1399 | 1400 | ||
1400 | skb = rd_info->skb; | 1401 | skb = rd_info->skb; |
1401 | skb->dev = vptr->dev; | ||
1402 | 1402 | ||
1403 | pci_dma_sync_single_for_cpu(vptr->pdev, rd_info->skb_dma, | 1403 | pci_dma_sync_single_for_cpu(vptr->pdev, rd_info->skb_dma, |
1404 | vptr->rx_buf_sz, PCI_DMA_FROMDEVICE); | 1404 | vptr->rx_buf_sz, PCI_DMA_FROMDEVICE); |
@@ -1428,7 +1428,7 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx) | |||
1428 | PCI_DMA_FROMDEVICE); | 1428 | PCI_DMA_FROMDEVICE); |
1429 | 1429 | ||
1430 | skb_put(skb, pkt_len - 4); | 1430 | skb_put(skb, pkt_len - 4); |
1431 | skb->protocol = eth_type_trans(skb, skb->dev); | 1431 | skb->protocol = eth_type_trans(skb, vptr->dev); |
1432 | 1432 | ||
1433 | stats->rx_bytes += pkt_len; | 1433 | stats->rx_bytes += pkt_len; |
1434 | netif_rx(skb); | 1434 | netif_rx(skb); |
@@ -1928,7 +1928,7 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1928 | if (pktlen < ETH_ZLEN) { | 1928 | if (pktlen < ETH_ZLEN) { |
1929 | /* Cannot occur until ZC support */ | 1929 | /* Cannot occur until ZC support */ |
1930 | pktlen = ETH_ZLEN; | 1930 | pktlen = ETH_ZLEN; |
1931 | memcpy(tdinfo->buf, skb->data, skb->len); | 1931 | skb_copy_from_linear_data(skb, tdinfo->buf, skb->len); |
1932 | memset(tdinfo->buf + skb->len, 0, ETH_ZLEN - skb->len); | 1932 | memset(tdinfo->buf + skb->len, 0, ETH_ZLEN - skb->len); |
1933 | tdinfo->skb = skb; | 1933 | tdinfo->skb = skb; |
1934 | tdinfo->skb_dma[0] = tdinfo->buf_dma; | 1934 | tdinfo->skb_dma[0] = tdinfo->buf_dma; |
@@ -1944,7 +1944,7 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1944 | int nfrags = skb_shinfo(skb)->nr_frags; | 1944 | int nfrags = skb_shinfo(skb)->nr_frags; |
1945 | tdinfo->skb = skb; | 1945 | tdinfo->skb = skb; |
1946 | if (nfrags > 6) { | 1946 | if (nfrags > 6) { |
1947 | memcpy(tdinfo->buf, skb->data, skb->len); | 1947 | skb_copy_from_linear_data(skb, tdinfo->buf, skb->len); |
1948 | tdinfo->skb_dma[0] = tdinfo->buf_dma; | 1948 | tdinfo->skb_dma[0] = tdinfo->buf_dma; |
1949 | td_ptr->tdesc0.pktsize = | 1949 | td_ptr->tdesc0.pktsize = |
1950 | td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]); | 1950 | td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]); |
@@ -2007,7 +2007,7 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2007 | */ | 2007 | */ |
2008 | if ((vptr->flags & VELOCITY_FLAGS_TX_CSUM) | 2008 | if ((vptr->flags & VELOCITY_FLAGS_TX_CSUM) |
2009 | && (skb->ip_summed == CHECKSUM_PARTIAL)) { | 2009 | && (skb->ip_summed == CHECKSUM_PARTIAL)) { |
2010 | struct iphdr *ip = skb->nh.iph; | 2010 | const struct iphdr *ip = ip_hdr(skb); |
2011 | if (ip->protocol == IPPROTO_TCP) | 2011 | if (ip->protocol == IPPROTO_TCP) |
2012 | td_ptr->tdesc1.TCR |= TCR0_TCPCK; | 2012 | td_ptr->tdesc1.TCR |= TCR0_TCPCK; |
2013 | else if (ip->protocol == IPPROTO_UDP) | 2013 | else if (ip->protocol == IPPROTO_UDP) |
diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c index 5b82e4fd0d73..23464735fa88 100644 --- a/drivers/net/wan/cosa.c +++ b/drivers/net/wan/cosa.c | |||
@@ -773,7 +773,7 @@ static int sppp_rx_done(struct channel_data *chan) | |||
773 | } | 773 | } |
774 | chan->rx_skb->protocol = htons(ETH_P_WAN_PPP); | 774 | chan->rx_skb->protocol = htons(ETH_P_WAN_PPP); |
775 | chan->rx_skb->dev = chan->pppdev.dev; | 775 | chan->rx_skb->dev = chan->pppdev.dev; |
776 | chan->rx_skb->mac.raw = chan->rx_skb->data; | 776 | skb_reset_mac_header(chan->rx_skb); |
777 | chan->stats.rx_packets++; | 777 | chan->stats.rx_packets++; |
778 | chan->stats.rx_bytes += chan->cosa->rxsize; | 778 | chan->stats.rx_bytes += chan->cosa->rxsize; |
779 | netif_rx(chan->rx_skb); | 779 | netif_rx(chan->rx_skb); |
diff --git a/drivers/net/wan/cycx_x25.c b/drivers/net/wan/cycx_x25.c index a631d1c2fa14..016b3ff3ea5e 100644 --- a/drivers/net/wan/cycx_x25.c +++ b/drivers/net/wan/cycx_x25.c | |||
@@ -834,7 +834,7 @@ static void cycx_x25_irq_rx(struct cycx_device *card, struct cycx_x25_cmd *cmd) | |||
834 | ++chan->ifstats.rx_packets; | 834 | ++chan->ifstats.rx_packets; |
835 | chan->ifstats.rx_bytes += pktlen; | 835 | chan->ifstats.rx_bytes += pktlen; |
836 | 836 | ||
837 | skb->mac.raw = skb->data; | 837 | skb_reset_mac_header(skb); |
838 | netif_rx(skb); | 838 | netif_rx(skb); |
839 | dev->last_rx = jiffies; /* timestamp */ | 839 | dev->last_rx = jiffies; /* timestamp */ |
840 | } | 840 | } |
diff --git a/drivers/net/wan/dlci.c b/drivers/net/wan/dlci.c index 736987559432..66be20c292b6 100644 --- a/drivers/net/wan/dlci.c +++ b/drivers/net/wan/dlci.c | |||
@@ -176,7 +176,7 @@ static void dlci_receive(struct sk_buff *skb, struct net_device *dev) | |||
176 | if (process) | 176 | if (process) |
177 | { | 177 | { |
178 | /* we've set up the protocol, so discard the header */ | 178 | /* we've set up the protocol, so discard the header */ |
179 | skb->mac.raw = skb->data; | 179 | skb_reset_mac_header(skb); |
180 | skb_pull(skb, header); | 180 | skb_pull(skb, header); |
181 | dlp->stats.rx_bytes += skb->len; | 181 | dlp->stats.rx_bytes += skb->len; |
182 | netif_rx(skb); | 182 | netif_rx(skb); |
diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c index 25021a7992a9..dca024471455 100644 --- a/drivers/net/wan/dscc4.c +++ b/drivers/net/wan/dscc4.c | |||
@@ -1904,7 +1904,8 @@ static struct sk_buff *dscc4_init_dummy_skb(struct dscc4_dev_priv *dpriv) | |||
1904 | struct TxFD *tx_fd = dpriv->tx_fd + last; | 1904 | struct TxFD *tx_fd = dpriv->tx_fd + last; |
1905 | 1905 | ||
1906 | skb->len = DUMMY_SKB_SIZE; | 1906 | skb->len = DUMMY_SKB_SIZE; |
1907 | memcpy(skb->data, version, strlen(version)%DUMMY_SKB_SIZE); | 1907 | skb_copy_to_linear_data(skb, version, |
1908 | strlen(version) % DUMMY_SKB_SIZE); | ||
1908 | tx_fd->state = FrameEnd | TO_STATE_TX(DUMMY_SKB_SIZE); | 1909 | tx_fd->state = FrameEnd | TO_STATE_TX(DUMMY_SKB_SIZE); |
1909 | tx_fd->data = pci_map_single(dpriv->pci_priv->pdev, skb->data, | 1910 | tx_fd->data = pci_map_single(dpriv->pci_priv->pdev, skb->data, |
1910 | DUMMY_SKB_SIZE, PCI_DMA_TODEVICE); | 1911 | DUMMY_SKB_SIZE, PCI_DMA_TODEVICE); |
diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c index c45d6a83339d..58a53b6d9b42 100644 --- a/drivers/net/wan/farsync.c +++ b/drivers/net/wan/farsync.c | |||
@@ -864,7 +864,7 @@ fst_tx_dma_complete(struct fst_card_info *card, struct fst_port_info *port, | |||
864 | static __be16 farsync_type_trans(struct sk_buff *skb, struct net_device *dev) | 864 | static __be16 farsync_type_trans(struct sk_buff *skb, struct net_device *dev) |
865 | { | 865 | { |
866 | skb->dev = dev; | 866 | skb->dev = dev; |
867 | skb->mac.raw = skb->data; | 867 | skb_reset_mac_header(skb); |
868 | skb->pkt_type = PACKET_HOST; | 868 | skb->pkt_type = PACKET_HOST; |
869 | return htons(ETH_P_CUST); | 869 | return htons(ETH_P_CUST); |
870 | } | 870 | } |
diff --git a/drivers/net/wan/hdlc_cisco.c b/drivers/net/wan/hdlc_cisco.c index c9664fd8a917..00e0aaadabcc 100644 --- a/drivers/net/wan/hdlc_cisco.c +++ b/drivers/net/wan/hdlc_cisco.c | |||
@@ -124,7 +124,7 @@ static void cisco_keepalive_send(struct net_device *dev, u32 type, | |||
124 | skb_put(skb, sizeof(struct cisco_packet)); | 124 | skb_put(skb, sizeof(struct cisco_packet)); |
125 | skb->priority = TC_PRIO_CONTROL; | 125 | skb->priority = TC_PRIO_CONTROL; |
126 | skb->dev = dev; | 126 | skb->dev = dev; |
127 | skb->nh.raw = skb->data; | 127 | skb_reset_network_header(skb); |
128 | 128 | ||
129 | dev_queue_xmit(skb); | 129 | dev_queue_xmit(skb); |
130 | } | 130 | } |
diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c index c6c3c757d6f1..aeb2789adf26 100644 --- a/drivers/net/wan/hdlc_fr.c +++ b/drivers/net/wan/hdlc_fr.c | |||
@@ -533,7 +533,7 @@ static void fr_lmi_send(struct net_device *dev, int fullrep) | |||
533 | skb->protocol = __constant_htons(NLPID_CCITT_ANSI_LMI); | 533 | skb->protocol = __constant_htons(NLPID_CCITT_ANSI_LMI); |
534 | fr_hard_header(&skb, LMI_CCITT_ANSI_DLCI); | 534 | fr_hard_header(&skb, LMI_CCITT_ANSI_DLCI); |
535 | } | 535 | } |
536 | data = skb->tail; | 536 | data = skb_tail_pointer(skb); |
537 | data[i++] = LMI_CALLREF; | 537 | data[i++] = LMI_CALLREF; |
538 | data[i++] = dce ? LMI_STATUS : LMI_STATUS_ENQUIRY; | 538 | data[i++] = dce ? LMI_STATUS : LMI_STATUS_ENQUIRY; |
539 | if (lmi == LMI_ANSI) | 539 | if (lmi == LMI_ANSI) |
@@ -590,7 +590,7 @@ static void fr_lmi_send(struct net_device *dev, int fullrep) | |||
590 | skb_put(skb, i); | 590 | skb_put(skb, i); |
591 | skb->priority = TC_PRIO_CONTROL; | 591 | skb->priority = TC_PRIO_CONTROL; |
592 | skb->dev = dev; | 592 | skb->dev = dev; |
593 | skb->nh.raw = skb->data; | 593 | skb_reset_network_header(skb); |
594 | 594 | ||
595 | dev_queue_xmit(skb); | 595 | dev_queue_xmit(skb); |
596 | } | 596 | } |
@@ -1011,7 +1011,6 @@ static int fr_rx(struct sk_buff *skb) | |||
1011 | stats->rx_bytes += skb->len; | 1011 | stats->rx_bytes += skb->len; |
1012 | if (pvc->state.becn) | 1012 | if (pvc->state.becn) |
1013 | stats->rx_compressed++; | 1013 | stats->rx_compressed++; |
1014 | skb->dev = dev; | ||
1015 | netif_rx(skb); | 1014 | netif_rx(skb); |
1016 | return NET_RX_SUCCESS; | 1015 | return NET_RX_SUCCESS; |
1017 | } else { | 1016 | } else { |
diff --git a/drivers/net/wan/hostess_sv11.c b/drivers/net/wan/hostess_sv11.c index a02c5fb40567..9ba3e4ee6ec7 100644 --- a/drivers/net/wan/hostess_sv11.c +++ b/drivers/net/wan/hostess_sv11.c | |||
@@ -59,7 +59,7 @@ static void hostess_input(struct z8530_channel *c, struct sk_buff *skb) | |||
59 | /* Drop the CRC - it's not a good idea to try and negotiate it ;) */ | 59 | /* Drop the CRC - it's not a good idea to try and negotiate it ;) */ |
60 | skb_trim(skb, skb->len-2); | 60 | skb_trim(skb, skb->len-2); |
61 | skb->protocol=__constant_htons(ETH_P_WAN_PPP); | 61 | skb->protocol=__constant_htons(ETH_P_WAN_PPP); |
62 | skb->mac.raw=skb->data; | 62 | skb_reset_mac_header(skb); |
63 | skb->dev=c->netdevice; | 63 | skb->dev=c->netdevice; |
64 | /* | 64 | /* |
65 | * Send it to the PPP layer. We don't have time to process | 65 | * Send it to the PPP layer. We don't have time to process |
diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c index 2b54f1bc3a0d..ae132c1c5459 100644 --- a/drivers/net/wan/lmc/lmc_main.c +++ b/drivers/net/wan/lmc/lmc_main.c | |||
@@ -1636,7 +1636,7 @@ static int lmc_rx (struct net_device *dev) /*fold00*/ | |||
1636 | if (nsb) { | 1636 | if (nsb) { |
1637 | sc->lmc_rxq[i] = nsb; | 1637 | sc->lmc_rxq[i] = nsb; |
1638 | nsb->dev = dev; | 1638 | nsb->dev = dev; |
1639 | sc->lmc_rxring[i].buffer1 = virt_to_bus (nsb->tail); | 1639 | sc->lmc_rxring[i].buffer1 = virt_to_bus(skb_tail_pointer(nsb)); |
1640 | } | 1640 | } |
1641 | sc->failed_recv_alloc = 1; | 1641 | sc->failed_recv_alloc = 1; |
1642 | goto skip_packet; | 1642 | goto skip_packet; |
@@ -1667,8 +1667,8 @@ static int lmc_rx (struct net_device *dev) /*fold00*/ | |||
1667 | skb_put (skb, len); | 1667 | skb_put (skb, len); |
1668 | skb->protocol = lmc_proto_type(sc, skb); | 1668 | skb->protocol = lmc_proto_type(sc, skb); |
1669 | skb->protocol = htons(ETH_P_WAN_PPP); | 1669 | skb->protocol = htons(ETH_P_WAN_PPP); |
1670 | skb->mac.raw = skb->data; | 1670 | skb_reset_mac_header(skb); |
1671 | // skb->nh.raw = skb->data; | 1671 | /* skb_reset_network_header(skb); */ |
1672 | skb->dev = dev; | 1672 | skb->dev = dev; |
1673 | lmc_proto_netif(sc, skb); | 1673 | lmc_proto_netif(sc, skb); |
1674 | 1674 | ||
@@ -1679,7 +1679,7 @@ static int lmc_rx (struct net_device *dev) /*fold00*/ | |||
1679 | if (nsb) { | 1679 | if (nsb) { |
1680 | sc->lmc_rxq[i] = nsb; | 1680 | sc->lmc_rxq[i] = nsb; |
1681 | nsb->dev = dev; | 1681 | nsb->dev = dev; |
1682 | sc->lmc_rxring[i].buffer1 = virt_to_bus (nsb->tail); | 1682 | sc->lmc_rxring[i].buffer1 = virt_to_bus(skb_tail_pointer(nsb)); |
1683 | /* Transferred to 21140 below */ | 1683 | /* Transferred to 21140 below */ |
1684 | } | 1684 | } |
1685 | else { | 1685 | else { |
@@ -1702,11 +1702,11 @@ static int lmc_rx (struct net_device *dev) /*fold00*/ | |||
1702 | if(!nsb) { | 1702 | if(!nsb) { |
1703 | goto give_it_anyways; | 1703 | goto give_it_anyways; |
1704 | } | 1704 | } |
1705 | memcpy(skb_put(nsb, len), skb->data, len); | 1705 | skb_copy_from_linear_data(skb, skb_put(nsb, len), len); |
1706 | 1706 | ||
1707 | nsb->protocol = lmc_proto_type(sc, skb); | 1707 | nsb->protocol = lmc_proto_type(sc, skb); |
1708 | nsb->mac.raw = nsb->data; | 1708 | skb_reset_mac_header(nsb); |
1709 | // nsb->nh.raw = nsb->data; | 1709 | /* skb_reset_network_header(nsb); */ |
1710 | nsb->dev = dev; | 1710 | nsb->dev = dev; |
1711 | lmc_proto_netif(sc, nsb); | 1711 | lmc_proto_netif(sc, nsb); |
1712 | } | 1712 | } |
@@ -1932,7 +1932,7 @@ static void lmc_softreset (lmc_softc_t * const sc) /*fold00*/ | |||
1932 | sc->lmc_rxring[i].status = 0x80000000; | 1932 | sc->lmc_rxring[i].status = 0x80000000; |
1933 | 1933 | ||
1934 | /* used to be PKT_BUF_SZ now uses skb since we lose some to head room */ | 1934 | /* used to be PKT_BUF_SZ now uses skb since we lose some to head room */ |
1935 | sc->lmc_rxring[i].length = skb->end - skb->data; | 1935 | sc->lmc_rxring[i].length = skb_tailroom(skb); |
1936 | 1936 | ||
1937 | /* use to be tail which is dumb since you're thinking why write | 1937 | /* use to be tail which is dumb since you're thinking why write |
1938 | * to the end of the packj,et but since there's nothing there tail == data | 1938 | * to the end of the packj,et but since there's nothing there tail == data |
diff --git a/drivers/net/wan/pc300_drv.c b/drivers/net/wan/pc300_drv.c index 62184dee377c..999bf71937ca 100644 --- a/drivers/net/wan/pc300_drv.c +++ b/drivers/net/wan/pc300_drv.c | |||
@@ -1755,17 +1755,17 @@ cpc_trace(struct net_device *dev, struct sk_buff *skb_main, char rx_tx) | |||
1755 | 1755 | ||
1756 | skb->dev = dev; | 1756 | skb->dev = dev; |
1757 | skb->protocol = htons(ETH_P_CUST); | 1757 | skb->protocol = htons(ETH_P_CUST); |
1758 | skb->mac.raw = skb->data; | 1758 | skb_reset_mac_header(skb); |
1759 | skb->pkt_type = PACKET_HOST; | 1759 | skb->pkt_type = PACKET_HOST; |
1760 | skb->len = 10 + skb_main->len; | 1760 | skb->len = 10 + skb_main->len; |
1761 | 1761 | ||
1762 | memcpy(skb->data, dev->name, 5); | 1762 | skb_copy_to_linear_data(skb, dev->name, 5); |
1763 | skb->data[5] = '['; | 1763 | skb->data[5] = '['; |
1764 | skb->data[6] = rx_tx; | 1764 | skb->data[6] = rx_tx; |
1765 | skb->data[7] = ']'; | 1765 | skb->data[7] = ']'; |
1766 | skb->data[8] = ':'; | 1766 | skb->data[8] = ':'; |
1767 | skb->data[9] = ' '; | 1767 | skb->data[9] = ' '; |
1768 | memcpy(&skb->data[10], skb_main->data, skb_main->len); | 1768 | skb_copy_from_linear_data(skb_main, &skb->data[10], skb_main->len); |
1769 | 1769 | ||
1770 | netif_rx(skb); | 1770 | netif_rx(skb); |
1771 | } | 1771 | } |
diff --git a/drivers/net/wan/pc300_tty.c b/drivers/net/wan/pc300_tty.c index 5873c346e7e9..07dbdfbfc15d 100644 --- a/drivers/net/wan/pc300_tty.c +++ b/drivers/net/wan/pc300_tty.c | |||
@@ -1003,17 +1003,17 @@ static void cpc_tty_trace(pc300dev_t *dev, char* buf, int len, char rxtx) | |||
1003 | skb_put (skb, 10 + len); | 1003 | skb_put (skb, 10 + len); |
1004 | skb->dev = dev->dev; | 1004 | skb->dev = dev->dev; |
1005 | skb->protocol = htons(ETH_P_CUST); | 1005 | skb->protocol = htons(ETH_P_CUST); |
1006 | skb->mac.raw = skb->data; | 1006 | skb_reset_mac_header(skb); |
1007 | skb->pkt_type = PACKET_HOST; | 1007 | skb->pkt_type = PACKET_HOST; |
1008 | skb->len = 10 + len; | 1008 | skb->len = 10 + len; |
1009 | 1009 | ||
1010 | memcpy(skb->data,dev->dev->name,5); | 1010 | skb_copy_to_linear_data(skb, dev->dev->name, 5); |
1011 | skb->data[5] = '['; | 1011 | skb->data[5] = '['; |
1012 | skb->data[6] = rxtx; | 1012 | skb->data[6] = rxtx; |
1013 | skb->data[7] = ']'; | 1013 | skb->data[7] = ']'; |
1014 | skb->data[8] = ':'; | 1014 | skb->data[8] = ':'; |
1015 | skb->data[9] = ' '; | 1015 | skb->data[9] = ' '; |
1016 | memcpy(&skb->data[10], buf, len); | 1016 | skb_copy_to_linear_data_offset(skb, 10, buf, len); |
1017 | netif_rx(skb); | 1017 | netif_rx(skb); |
1018 | } | 1018 | } |
1019 | 1019 | ||
diff --git a/drivers/net/wan/sbni.c b/drivers/net/wan/sbni.c index fc5c0c611ffd..35eded7ffb2d 100644 --- a/drivers/net/wan/sbni.c +++ b/drivers/net/wan/sbni.c | |||
@@ -999,11 +999,6 @@ get_rx_buf( struct net_device *dev ) | |||
999 | if( !skb ) | 999 | if( !skb ) |
1000 | return NULL; | 1000 | return NULL; |
1001 | 1001 | ||
1002 | #ifdef CONFIG_SBNI_MULTILINE | ||
1003 | skb->dev = ((struct net_local *) dev->priv)->master; | ||
1004 | #else | ||
1005 | skb->dev = dev; | ||
1006 | #endif | ||
1007 | skb_reserve( skb, 2 ); /* Align IP on longword boundaries */ | 1002 | skb_reserve( skb, 2 ); /* Align IP on longword boundaries */ |
1008 | return skb; | 1003 | return skb; |
1009 | } | 1004 | } |
diff --git a/drivers/net/wan/sealevel.c b/drivers/net/wan/sealevel.c index 70fb1b98b1dd..131358108c5a 100644 --- a/drivers/net/wan/sealevel.c +++ b/drivers/net/wan/sealevel.c | |||
@@ -61,7 +61,7 @@ static void sealevel_input(struct z8530_channel *c, struct sk_buff *skb) | |||
61 | /* Drop the CRC - it's not a good idea to try and negotiate it ;) */ | 61 | /* Drop the CRC - it's not a good idea to try and negotiate it ;) */ |
62 | skb_trim(skb, skb->len-2); | 62 | skb_trim(skb, skb->len-2); |
63 | skb->protocol=htons(ETH_P_WAN_PPP); | 63 | skb->protocol=htons(ETH_P_WAN_PPP); |
64 | skb->mac.raw=skb->data; | 64 | skb_reset_mac_header(skb); |
65 | skb->dev=c->netdevice; | 65 | skb->dev=c->netdevice; |
66 | /* | 66 | /* |
67 | * Send it to the PPP layer. We don't have time to process | 67 | * Send it to the PPP layer. We don't have time to process |
diff --git a/drivers/net/wan/syncppp.c b/drivers/net/wan/syncppp.c index 218f7b574ab3..67fc67cfd452 100644 --- a/drivers/net/wan/syncppp.c +++ b/drivers/net/wan/syncppp.c | |||
@@ -227,7 +227,7 @@ static void sppp_input (struct net_device *dev, struct sk_buff *skb) | |||
227 | unsigned long flags; | 227 | unsigned long flags; |
228 | 228 | ||
229 | skb->dev=dev; | 229 | skb->dev=dev; |
230 | skb->mac.raw=skb->data; | 230 | skb_reset_mac_header(skb); |
231 | 231 | ||
232 | if (dev->flags & IFF_RUNNING) | 232 | if (dev->flags & IFF_RUNNING) |
233 | { | 233 | { |
diff --git a/drivers/net/wan/z85230.c b/drivers/net/wan/z85230.c index 8b4540bfc1b0..98ef400908b8 100644 --- a/drivers/net/wan/z85230.c +++ b/drivers/net/wan/z85230.c | |||
@@ -1656,7 +1656,7 @@ static void z8530_rx_done(struct z8530_channel *c) | |||
1656 | else | 1656 | else |
1657 | { | 1657 | { |
1658 | skb_put(skb, ct); | 1658 | skb_put(skb, ct); |
1659 | memcpy(skb->data, rxb, ct); | 1659 | skb_copy_to_linear_data(skb, rxb, ct); |
1660 | c->stats.rx_packets++; | 1660 | c->stats.rx_packets++; |
1661 | c->stats.rx_bytes+=ct; | 1661 | c->stats.rx_bytes+=ct; |
1662 | } | 1662 | } |
@@ -1782,7 +1782,7 @@ int z8530_queue_xmit(struct z8530_channel *c, struct sk_buff *skb) | |||
1782 | */ | 1782 | */ |
1783 | c->tx_next_ptr=c->tx_dma_buf[c->tx_dma_used]; | 1783 | c->tx_next_ptr=c->tx_dma_buf[c->tx_dma_used]; |
1784 | c->tx_dma_used^=1; /* Flip temp buffer */ | 1784 | c->tx_dma_used^=1; /* Flip temp buffer */ |
1785 | memcpy(c->tx_next_ptr, skb->data, skb->len); | 1785 | skb_copy_from_linear_data(skb, c->tx_next_ptr, skb->len); |
1786 | } | 1786 | } |
1787 | else | 1787 | else |
1788 | c->tx_next_ptr=skb->data; | 1788 | c->tx_next_ptr=skb->data; |
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig index ece3d9c2dc61..4426841b2be6 100644 --- a/drivers/net/wireless/Kconfig +++ b/drivers/net/wireless/Kconfig | |||
@@ -2,47 +2,21 @@ | |||
2 | # Wireless LAN device configuration | 2 | # Wireless LAN device configuration |
3 | # | 3 | # |
4 | 4 | ||
5 | menu "Wireless LAN (non-hamradio)" | 5 | menu "Wireless LAN" |
6 | depends on NETDEVICES | ||
7 | |||
8 | config NET_RADIO | ||
9 | bool "Wireless LAN drivers (non-hamradio) & Wireless Extensions" | ||
10 | select WIRELESS_EXT | ||
11 | ---help--- | ||
12 | Support for wireless LANs and everything having to do with radio, | ||
13 | but not with amateur radio or FM broadcasting. | ||
14 | |||
15 | Saying Y here also enables the Wireless Extensions (creates | ||
16 | /proc/net/wireless and enables iwconfig access). The Wireless | ||
17 | Extension is a generic API allowing a driver to expose to the user | ||
18 | space configuration and statistics specific to common Wireless LANs. | ||
19 | The beauty of it is that a single set of tool can support all the | ||
20 | variations of Wireless LANs, regardless of their type (as long as | ||
21 | the driver supports Wireless Extension). Another advantage is that | ||
22 | these parameters may be changed on the fly without restarting the | ||
23 | driver (or Linux). If you wish to use Wireless Extensions with | ||
24 | wireless PCMCIA (PC-) cards, you need to say Y here; you can fetch | ||
25 | the tools from | ||
26 | <http://www.hpl.hp.com/personal/Jean_Tourrilhes/Linux/Tools.html>. | ||
27 | 6 | ||
28 | config NET_WIRELESS_RTNETLINK | 7 | config WLAN_PRE80211 |
29 | bool "Wireless Extension API over RtNetlink" | 8 | bool "Wireless LAN (pre-802.11)" |
30 | depends on NET_RADIO | 9 | depends on NETDEVICES |
31 | ---help--- | 10 | ---help--- |
32 | Support the Wireless Extension API over the RtNetlink socket | 11 | Say Y if you have any pre-802.11 wireless LAN hardware. |
33 | in addition to the traditional ioctl interface (selected above). | ||
34 | 12 | ||
35 | For now, few tools use this facility, but it might grow in the | 13 | This option does not affect the kernel build, it only |
36 | future. The only downside is that it adds 4.5 kB to your kernel. | 14 | lets you choose drivers. |
37 | |||
38 | # Note : the cards are obsolete (can't buy them anymore), but the drivers | ||
39 | # are not, as people are still using them... | ||
40 | comment "Obsolete Wireless cards support (pre-802.11)" | ||
41 | depends on NET_RADIO && (INET || ISA || PCMCIA) | ||
42 | 15 | ||
43 | config STRIP | 16 | config STRIP |
44 | tristate "STRIP (Metricom starmode radio IP)" | 17 | tristate "STRIP (Metricom starmode radio IP)" |
45 | depends on NET_RADIO && INET | 18 | depends on INET && WLAN_PRE80211 |
19 | select WIRELESS_EXT | ||
46 | ---help--- | 20 | ---help--- |
47 | Say Y if you have a Metricom radio and intend to use Starmode Radio | 21 | Say Y if you have a Metricom radio and intend to use Starmode Radio |
48 | IP. STRIP is a radio protocol developed for the MosquitoNet project | 22 | IP. STRIP is a radio protocol developed for the MosquitoNet project |
@@ -65,7 +39,8 @@ config STRIP | |||
65 | 39 | ||
66 | config ARLAN | 40 | config ARLAN |
67 | tristate "Aironet Arlan 655 & IC2200 DS support" | 41 | tristate "Aironet Arlan 655 & IC2200 DS support" |
68 | depends on NET_RADIO && ISA && !64BIT | 42 | depends on ISA && !64BIT && WLAN_PRE80211 |
43 | select WIRELESS_EXT | ||
69 | ---help--- | 44 | ---help--- |
70 | Aironet makes Arlan, a class of wireless LAN adapters. These use the | 45 | Aironet makes Arlan, a class of wireless LAN adapters. These use the |
71 | www.Telxon.com chip, which is also used on several similar cards. | 46 | www.Telxon.com chip, which is also used on several similar cards. |
@@ -80,7 +55,8 @@ config ARLAN | |||
80 | 55 | ||
81 | config WAVELAN | 56 | config WAVELAN |
82 | tristate "AT&T/Lucent old WaveLAN & DEC RoamAbout DS ISA support" | 57 | tristate "AT&T/Lucent old WaveLAN & DEC RoamAbout DS ISA support" |
83 | depends on NET_RADIO && ISA | 58 | depends on ISA && WLAN_PRE80211 |
59 | select WIRELESS_EXT | ||
84 | ---help--- | 60 | ---help--- |
85 | The Lucent WaveLAN (formerly NCR and AT&T; or DEC RoamAbout DS) is | 61 | The Lucent WaveLAN (formerly NCR and AT&T; or DEC RoamAbout DS) is |
86 | a Radio LAN (wireless Ethernet-like Local Area Network) using the | 62 | a Radio LAN (wireless Ethernet-like Local Area Network) using the |
@@ -107,7 +83,8 @@ config WAVELAN | |||
107 | 83 | ||
108 | config PCMCIA_WAVELAN | 84 | config PCMCIA_WAVELAN |
109 | tristate "AT&T/Lucent old WaveLAN Pcmcia wireless support" | 85 | tristate "AT&T/Lucent old WaveLAN Pcmcia wireless support" |
110 | depends on NET_RADIO && PCMCIA | 86 | depends on PCMCIA && WLAN_PRE80211 |
87 | select WIRELESS_EXT | ||
111 | help | 88 | help |
112 | Say Y here if you intend to attach an AT&T/Lucent Wavelan PCMCIA | 89 | Say Y here if you intend to attach an AT&T/Lucent Wavelan PCMCIA |
113 | (PC-card) wireless Ethernet networking card to your computer. This | 90 | (PC-card) wireless Ethernet networking card to your computer. This |
@@ -118,7 +95,8 @@ config PCMCIA_WAVELAN | |||
118 | 95 | ||
119 | config PCMCIA_NETWAVE | 96 | config PCMCIA_NETWAVE |
120 | tristate "Xircom Netwave AirSurfer Pcmcia wireless support" | 97 | tristate "Xircom Netwave AirSurfer Pcmcia wireless support" |
121 | depends on NET_RADIO && PCMCIA | 98 | depends on PCMCIA && WLAN_PRE80211 |
99 | select WIRELESS_EXT | ||
122 | help | 100 | help |
123 | Say Y here if you intend to attach this type of PCMCIA (PC-card) | 101 | Say Y here if you intend to attach this type of PCMCIA (PC-card) |
124 | wireless Ethernet networking card to your computer. | 102 | wireless Ethernet networking card to your computer. |
@@ -126,12 +104,20 @@ config PCMCIA_NETWAVE | |||
126 | To compile this driver as a module, choose M here: the module will be | 104 | To compile this driver as a module, choose M here: the module will be |
127 | called netwave_cs. If unsure, say N. | 105 | called netwave_cs. If unsure, say N. |
128 | 106 | ||
129 | comment "Wireless 802.11 Frequency Hopping cards support" | 107 | |
130 | depends on NET_RADIO && PCMCIA | 108 | config WLAN_80211 |
109 | bool "Wireless LAN (IEEE 802.11)" | ||
110 | depends on NETDEVICES | ||
111 | ---help--- | ||
112 | Say Y if you have any 802.11 wireless LAN hardware. | ||
113 | |||
114 | This option does not affect the kernel build, it only | ||
115 | lets you choose drivers. | ||
131 | 116 | ||
132 | config PCMCIA_RAYCS | 117 | config PCMCIA_RAYCS |
133 | tristate "Aviator/Raytheon 2.4MHz wireless support" | 118 | tristate "Aviator/Raytheon 2.4MHz wireless support" |
134 | depends on NET_RADIO && PCMCIA | 119 | depends on PCMCIA && WLAN_80211 |
120 | select WIRELESS_EXT | ||
135 | ---help--- | 121 | ---help--- |
136 | Say Y here if you intend to attach an Aviator/Raytheon PCMCIA | 122 | Say Y here if you intend to attach an Aviator/Raytheon PCMCIA |
137 | (PC-card) wireless Ethernet networking card to your computer. | 123 | (PC-card) wireless Ethernet networking card to your computer. |
@@ -141,12 +127,10 @@ config PCMCIA_RAYCS | |||
141 | To compile this driver as a module, choose M here: the module will be | 127 | To compile this driver as a module, choose M here: the module will be |
142 | called ray_cs. If unsure, say N. | 128 | called ray_cs. If unsure, say N. |
143 | 129 | ||
144 | comment "Wireless 802.11b ISA/PCI cards support" | ||
145 | depends on NET_RADIO && (ISA || PCI || PPC_PMAC || PCMCIA) | ||
146 | |||
147 | config IPW2100 | 130 | config IPW2100 |
148 | tristate "Intel PRO/Wireless 2100 Network Connection" | 131 | tristate "Intel PRO/Wireless 2100 Network Connection" |
149 | depends on NET_RADIO && PCI | 132 | depends on PCI && WLAN_80211 |
133 | select WIRELESS_EXT | ||
150 | select FW_LOADER | 134 | select FW_LOADER |
151 | select IEEE80211 | 135 | select IEEE80211 |
152 | ---help--- | 136 | ---help--- |
@@ -200,7 +184,8 @@ config IPW2100_DEBUG | |||
200 | 184 | ||
201 | config IPW2200 | 185 | config IPW2200 |
202 | tristate "Intel PRO/Wireless 2200BG and 2915ABG Network Connection" | 186 | tristate "Intel PRO/Wireless 2200BG and 2915ABG Network Connection" |
203 | depends on NET_RADIO && PCI | 187 | depends on PCI && WLAN_80211 |
188 | select WIRELESS_EXT | ||
204 | select FW_LOADER | 189 | select FW_LOADER |
205 | select IEEE80211 | 190 | select IEEE80211 |
206 | ---help--- | 191 | ---help--- |
@@ -282,7 +267,8 @@ config IPW2200_DEBUG | |||
282 | 267 | ||
283 | config AIRO | 268 | config AIRO |
284 | tristate "Cisco/Aironet 34X/35X/4500/4800 ISA and PCI cards" | 269 | tristate "Cisco/Aironet 34X/35X/4500/4800 ISA and PCI cards" |
285 | depends on NET_RADIO && ISA_DMA_API && (PCI || BROKEN) | 270 | depends on ISA_DMA_API && WLAN_80211 && (PCI || BROKEN) |
271 | select WIRELESS_EXT | ||
286 | select CRYPTO | 272 | select CRYPTO |
287 | ---help--- | 273 | ---help--- |
288 | This is the standard Linux driver to support Cisco/Aironet ISA and | 274 | This is the standard Linux driver to support Cisco/Aironet ISA and |
@@ -299,7 +285,8 @@ config AIRO | |||
299 | 285 | ||
300 | config HERMES | 286 | config HERMES |
301 | tristate "Hermes chipset 802.11b support (Orinoco/Prism2/Symbol)" | 287 | tristate "Hermes chipset 802.11b support (Orinoco/Prism2/Symbol)" |
302 | depends on NET_RADIO && (PPC_PMAC || PCI || PCMCIA) | 288 | depends on (PPC_PMAC || PCI || PCMCIA) && WLAN_80211 |
289 | select WIRELESS_EXT | ||
303 | ---help--- | 290 | ---help--- |
304 | A driver for 802.11b wireless cards based on the "Hermes" or | 291 | A driver for 802.11b wireless cards based on the "Hermes" or |
305 | Intersil HFA384x (Prism 2) MAC controller. This includes the vast | 292 | Intersil HFA384x (Prism 2) MAC controller. This includes the vast |
@@ -373,7 +360,8 @@ config PCI_HERMES | |||
373 | 360 | ||
374 | config ATMEL | 361 | config ATMEL |
375 | tristate "Atmel at76c50x chipset 802.11b support" | 362 | tristate "Atmel at76c50x chipset 802.11b support" |
376 | depends on NET_RADIO && (PCI || PCMCIA) | 363 | depends on (PCI || PCMCIA) && WLAN_80211 |
364 | select WIRELESS_EXT | ||
377 | select FW_LOADER | 365 | select FW_LOADER |
378 | select CRC32 | 366 | select CRC32 |
379 | ---help--- | 367 | ---help--- |
@@ -394,13 +382,9 @@ config PCI_ATMEL | |||
394 | Enable support for PCI and mini-PCI cards containing the | 382 | Enable support for PCI and mini-PCI cards containing the |
395 | Atmel at76c506 chip. | 383 | Atmel at76c506 chip. |
396 | 384 | ||
397 | # If Pcmcia is compiled in, offer Pcmcia cards... | ||
398 | comment "Wireless 802.11b Pcmcia/Cardbus cards support" | ||
399 | depends on NET_RADIO && PCMCIA | ||
400 | |||
401 | config PCMCIA_HERMES | 385 | config PCMCIA_HERMES |
402 | tristate "Hermes PCMCIA card support" | 386 | tristate "Hermes PCMCIA card support" |
403 | depends on NET_RADIO && PCMCIA && HERMES | 387 | depends on PCMCIA && HERMES |
404 | ---help--- | 388 | ---help--- |
405 | A driver for "Hermes" chipset based PCMCIA wireless adaptors, such | 389 | A driver for "Hermes" chipset based PCMCIA wireless adaptors, such |
406 | as the Lucent WavelanIEEE/Orinoco cards and their OEM (Cabletron/ | 390 | as the Lucent WavelanIEEE/Orinoco cards and their OEM (Cabletron/ |
@@ -420,7 +404,7 @@ config PCMCIA_HERMES | |||
420 | 404 | ||
421 | config PCMCIA_SPECTRUM | 405 | config PCMCIA_SPECTRUM |
422 | tristate "Symbol Spectrum24 Trilogy PCMCIA card support" | 406 | tristate "Symbol Spectrum24 Trilogy PCMCIA card support" |
423 | depends on NET_RADIO && PCMCIA && HERMES | 407 | depends on PCMCIA && HERMES |
424 | select FW_LOADER | 408 | select FW_LOADER |
425 | ---help--- | 409 | ---help--- |
426 | 410 | ||
@@ -434,7 +418,8 @@ config PCMCIA_SPECTRUM | |||
434 | 418 | ||
435 | config AIRO_CS | 419 | config AIRO_CS |
436 | tristate "Cisco/Aironet 34X/35X/4500/4800 PCMCIA cards" | 420 | tristate "Cisco/Aironet 34X/35X/4500/4800 PCMCIA cards" |
437 | depends on NET_RADIO && PCMCIA && (BROKEN || !M32R) | 421 | depends on PCMCIA && (BROKEN || !M32R) && WLAN_80211 |
422 | select WIRELESS_EXT | ||
438 | select CRYPTO | 423 | select CRYPTO |
439 | select CRYPTO_AES | 424 | select CRYPTO_AES |
440 | ---help--- | 425 | ---help--- |
@@ -458,7 +443,8 @@ config AIRO_CS | |||
458 | 443 | ||
459 | config PCMCIA_ATMEL | 444 | config PCMCIA_ATMEL |
460 | tristate "Atmel at76c502/at76c504 PCMCIA cards" | 445 | tristate "Atmel at76c502/at76c504 PCMCIA cards" |
461 | depends on NET_RADIO && ATMEL && PCMCIA | 446 | depends on ATMEL && PCMCIA |
447 | select WIRELESS_EXT | ||
462 | select FW_LOADER | 448 | select FW_LOADER |
463 | select CRC32 | 449 | select CRC32 |
464 | ---help--- | 450 | ---help--- |
@@ -467,17 +453,17 @@ config PCMCIA_ATMEL | |||
467 | 453 | ||
468 | config PCMCIA_WL3501 | 454 | config PCMCIA_WL3501 |
469 | tristate "Planet WL3501 PCMCIA cards" | 455 | tristate "Planet WL3501 PCMCIA cards" |
470 | depends on NET_RADIO && EXPERIMENTAL && PCMCIA | 456 | depends on EXPERIMENTAL && PCMCIA && WLAN_80211 |
457 | select WIRELESS_EXT | ||
471 | ---help--- | 458 | ---help--- |
472 | A driver for WL3501 PCMCIA 802.11 wireless cards made by Planet. | 459 | A driver for WL3501 PCMCIA 802.11 wireless cards made by Planet. |
473 | It has basic support for Linux wireless extensions and initial | 460 | It has basic support for Linux wireless extensions and initial |
474 | micro support for ethtool. | 461 | micro support for ethtool. |
475 | 462 | ||
476 | comment "Prism GT/Duette 802.11(a/b/g) PCI/Cardbus support" | ||
477 | depends on NET_RADIO && PCI | ||
478 | config PRISM54 | 463 | config PRISM54 |
479 | tristate 'Intersil Prism GT/Duette/Indigo PCI/Cardbus' | 464 | tristate 'Intersil Prism GT/Duette/Indigo PCI/Cardbus' |
480 | depends on PCI && NET_RADIO && EXPERIMENTAL | 465 | depends on PCI && EXPERIMENTAL && WLAN_80211 |
466 | select WIRELESS_EXT | ||
481 | select FW_LOADER | 467 | select FW_LOADER |
482 | ---help--- | 468 | ---help--- |
483 | Enable PCI and Cardbus support for the following chipset based cards: | 469 | Enable PCI and Cardbus support for the following chipset based cards: |
@@ -523,7 +509,8 @@ config PRISM54 | |||
523 | 509 | ||
524 | config USB_ZD1201 | 510 | config USB_ZD1201 |
525 | tristate "USB ZD1201 based Wireless device support" | 511 | tristate "USB ZD1201 based Wireless device support" |
526 | depends on USB && NET_RADIO | 512 | depends on USB && WLAN_80211 |
513 | select WIRELESS_EXT | ||
527 | select FW_LOADER | 514 | select FW_LOADER |
528 | ---help--- | 515 | ---help--- |
529 | Say Y if you want to use wireless LAN adapters based on the ZyDAS | 516 | Say Y if you want to use wireless LAN adapters based on the ZyDAS |
@@ -542,11 +529,4 @@ source "drivers/net/wireless/hostap/Kconfig" | |||
542 | source "drivers/net/wireless/bcm43xx/Kconfig" | 529 | source "drivers/net/wireless/bcm43xx/Kconfig" |
543 | source "drivers/net/wireless/zd1211rw/Kconfig" | 530 | source "drivers/net/wireless/zd1211rw/Kconfig" |
544 | 531 | ||
545 | # yes, this works even when no drivers are selected | ||
546 | config NET_WIRELESS | ||
547 | bool | ||
548 | depends on NET_RADIO && (ISA || PCI || PPC_PMAC || PCMCIA) | ||
549 | default y | ||
550 | |||
551 | endmenu | 532 | endmenu |
552 | |||
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c index 2ada76a93cb6..7fe0a61091a6 100644 --- a/drivers/net/wireless/airo.c +++ b/drivers/net/wireless/airo.c | |||
@@ -2444,7 +2444,7 @@ static int add_airo_dev( struct net_device *dev ); | |||
2444 | 2444 | ||
2445 | static int wll_header_parse(struct sk_buff *skb, unsigned char *haddr) | 2445 | static int wll_header_parse(struct sk_buff *skb, unsigned char *haddr) |
2446 | { | 2446 | { |
2447 | memcpy(haddr, skb->mac.raw + 10, ETH_ALEN); | 2447 | memcpy(haddr, skb_mac_header(skb) + 10, ETH_ALEN); |
2448 | return ETH_ALEN; | 2448 | return ETH_ALEN; |
2449 | } | 2449 | } |
2450 | 2450 | ||
@@ -3411,14 +3411,12 @@ badrx: | |||
3411 | OUT4500( apriv, EVACK, EV_RX); | 3411 | OUT4500( apriv, EVACK, EV_RX); |
3412 | 3412 | ||
3413 | if (test_bit(FLAG_802_11, &apriv->flags)) { | 3413 | if (test_bit(FLAG_802_11, &apriv->flags)) { |
3414 | skb->mac.raw = skb->data; | 3414 | skb_reset_mac_header(skb); |
3415 | skb->pkt_type = PACKET_OTHERHOST; | 3415 | skb->pkt_type = PACKET_OTHERHOST; |
3416 | skb->dev = apriv->wifidev; | 3416 | skb->dev = apriv->wifidev; |
3417 | skb->protocol = htons(ETH_P_802_2); | 3417 | skb->protocol = htons(ETH_P_802_2); |
3418 | } else { | 3418 | } else |
3419 | skb->dev = dev; | ||
3420 | skb->protocol = eth_type_trans(skb,dev); | 3419 | skb->protocol = eth_type_trans(skb,dev); |
3421 | } | ||
3422 | skb->dev->last_rx = jiffies; | 3420 | skb->dev->last_rx = jiffies; |
3423 | skb->ip_summed = CHECKSUM_NONE; | 3421 | skb->ip_summed = CHECKSUM_NONE; |
3424 | 3422 | ||
@@ -3641,7 +3639,6 @@ badmic: | |||
3641 | } | 3639 | } |
3642 | #endif /* WIRELESS_SPY */ | 3640 | #endif /* WIRELESS_SPY */ |
3643 | 3641 | ||
3644 | skb->dev = ai->dev; | ||
3645 | skb->ip_summed = CHECKSUM_NONE; | 3642 | skb->ip_summed = CHECKSUM_NONE; |
3646 | skb->protocol = eth_type_trans(skb, ai->dev); | 3643 | skb->protocol = eth_type_trans(skb, ai->dev); |
3647 | skb->dev->last_rx = jiffies; | 3644 | skb->dev->last_rx = jiffies; |
@@ -3749,7 +3746,7 @@ void mpi_receive_802_11 (struct airo_info *ai) | |||
3749 | wireless_spy_update(ai->dev, sa, &wstats); | 3746 | wireless_spy_update(ai->dev, sa, &wstats); |
3750 | } | 3747 | } |
3751 | #endif /* IW_WIRELESS_SPY */ | 3748 | #endif /* IW_WIRELESS_SPY */ |
3752 | skb->mac.raw = skb->data; | 3749 | skb_reset_mac_header(skb); |
3753 | skb->pkt_type = PACKET_OTHERHOST; | 3750 | skb->pkt_type = PACKET_OTHERHOST; |
3754 | skb->dev = ai->wifidev; | 3751 | skb->dev = ai->wifidev; |
3755 | skb->protocol = htons(ETH_P_802_2); | 3752 | skb->protocol = htons(ETH_P_802_2); |
diff --git a/drivers/net/wireless/arlan-main.c b/drivers/net/wireless/arlan-main.c index 4688e56b69c7..498e8486d125 100644 --- a/drivers/net/wireless/arlan-main.c +++ b/drivers/net/wireless/arlan-main.c | |||
@@ -1500,7 +1500,6 @@ static void arlan_rx_interrupt(struct net_device *dev, u_char rxStatus, u_short | |||
1500 | break; | 1500 | break; |
1501 | } | 1501 | } |
1502 | skb_reserve(skb, 2); | 1502 | skb_reserve(skb, 2); |
1503 | skb->dev = dev; | ||
1504 | skbtmp = skb_put(skb, pkt_len); | 1503 | skbtmp = skb_put(skb, pkt_len); |
1505 | 1504 | ||
1506 | memcpy_fromio(skbtmp + ARLAN_FAKE_HDR_LEN, ((char __iomem *) arlan) + rxOffset, pkt_len - ARLAN_FAKE_HDR_LEN); | 1505 | memcpy_fromio(skbtmp + ARLAN_FAKE_HDR_LEN, ((char __iomem *) arlan) + rxOffset, pkt_len - ARLAN_FAKE_HDR_LEN); |
diff --git a/drivers/net/wireless/atmel.c b/drivers/net/wireless/atmel.c index 23eba698aec5..51a7db53afa5 100644 --- a/drivers/net/wireless/atmel.c +++ b/drivers/net/wireless/atmel.c | |||
@@ -827,14 +827,14 @@ static int start_tx(struct sk_buff *skb, struct net_device *dev) | |||
827 | if (priv->wep_is_on) | 827 | if (priv->wep_is_on) |
828 | frame_ctl |= IEEE80211_FCTL_PROTECTED; | 828 | frame_ctl |= IEEE80211_FCTL_PROTECTED; |
829 | if (priv->operating_mode == IW_MODE_ADHOC) { | 829 | if (priv->operating_mode == IW_MODE_ADHOC) { |
830 | memcpy(&header.addr1, skb->data, 6); | 830 | skb_copy_from_linear_data(skb, &header.addr1, 6); |
831 | memcpy(&header.addr2, dev->dev_addr, 6); | 831 | memcpy(&header.addr2, dev->dev_addr, 6); |
832 | memcpy(&header.addr3, priv->BSSID, 6); | 832 | memcpy(&header.addr3, priv->BSSID, 6); |
833 | } else { | 833 | } else { |
834 | frame_ctl |= IEEE80211_FCTL_TODS; | 834 | frame_ctl |= IEEE80211_FCTL_TODS; |
835 | memcpy(&header.addr1, priv->CurrentBSSID, 6); | 835 | memcpy(&header.addr1, priv->CurrentBSSID, 6); |
836 | memcpy(&header.addr2, dev->dev_addr, 6); | 836 | memcpy(&header.addr2, dev->dev_addr, 6); |
837 | memcpy(&header.addr3, skb->data, 6); | 837 | skb_copy_from_linear_data(skb, &header.addr3, 6); |
838 | } | 838 | } |
839 | 839 | ||
840 | if (priv->use_wpa) | 840 | if (priv->use_wpa) |
@@ -920,7 +920,6 @@ static void fast_rx_path(struct atmel_private *priv, | |||
920 | memcpy(&skbp[6], header->addr2, 6); /* source address */ | 920 | memcpy(&skbp[6], header->addr2, 6); /* source address */ |
921 | 921 | ||
922 | priv->dev->last_rx = jiffies; | 922 | priv->dev->last_rx = jiffies; |
923 | skb->dev = priv->dev; | ||
924 | skb->protocol = eth_type_trans(skb, priv->dev); | 923 | skb->protocol = eth_type_trans(skb, priv->dev); |
925 | skb->ip_summed = CHECKSUM_NONE; | 924 | skb->ip_summed = CHECKSUM_NONE; |
926 | netif_rx(skb); | 925 | netif_rx(skb); |
@@ -1028,7 +1027,6 @@ static void frag_rx_path(struct atmel_private *priv, | |||
1028 | priv->rx_buf, | 1027 | priv->rx_buf, |
1029 | priv->frag_len + 12); | 1028 | priv->frag_len + 12); |
1030 | priv->dev->last_rx = jiffies; | 1029 | priv->dev->last_rx = jiffies; |
1031 | skb->dev = priv->dev; | ||
1032 | skb->protocol = eth_type_trans(skb, priv->dev); | 1030 | skb->protocol = eth_type_trans(skb, priv->dev); |
1033 | skb->ip_summed = CHECKSUM_NONE; | 1031 | skb->ip_summed = CHECKSUM_NONE; |
1034 | netif_rx(skb); | 1032 | netif_rx(skb); |
diff --git a/drivers/net/wireless/bcm43xx/Kconfig b/drivers/net/wireless/bcm43xx/Kconfig index 533993f538fc..ce397e4284f4 100644 --- a/drivers/net/wireless/bcm43xx/Kconfig +++ b/drivers/net/wireless/bcm43xx/Kconfig | |||
@@ -1,6 +1,7 @@ | |||
1 | config BCM43XX | 1 | config BCM43XX |
2 | tristate "Broadcom BCM43xx wireless support" | 2 | tristate "Broadcom BCM43xx wireless support" |
3 | depends on PCI && IEEE80211 && IEEE80211_SOFTMAC && NET_RADIO && EXPERIMENTAL | 3 | depends on PCI && IEEE80211 && IEEE80211_SOFTMAC && WLAN_80211 && EXPERIMENTAL |
4 | select WIRELESS_EXT | ||
4 | select FW_LOADER | 5 | select FW_LOADER |
5 | select HW_RANDOM | 6 | select HW_RANDOM |
6 | ---help--- | 7 | ---help--- |
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_dma.c b/drivers/net/wireless/bcm43xx/bcm43xx_dma.c index 6e0dc76400e5..e3d2e61a31ee 100644 --- a/drivers/net/wireless/bcm43xx/bcm43xx_dma.c +++ b/drivers/net/wireless/bcm43xx/bcm43xx_dma.c | |||
@@ -998,7 +998,8 @@ static void dma_tx_fragment(struct bcm43xx_dmaring *ring, | |||
998 | assert(0); | 998 | assert(0); |
999 | return; | 999 | return; |
1000 | } | 1000 | } |
1001 | memcpy(skb_put(bounce_skb, skb->len), skb->data, skb->len); | 1001 | skb_copy_from_linear_data(skb, skb_put(bounce_skb, skb->len), |
1002 | skb->len); | ||
1002 | dev_kfree_skb_any(skb); | 1003 | dev_kfree_skb_any(skb); |
1003 | skb = bounce_skb; | 1004 | skb = bounce_skb; |
1004 | } | 1005 | } |
diff --git a/drivers/net/wireless/hostap/Kconfig b/drivers/net/wireless/hostap/Kconfig index 308f773ad566..1fef33169fdd 100644 --- a/drivers/net/wireless/hostap/Kconfig +++ b/drivers/net/wireless/hostap/Kconfig | |||
@@ -1,6 +1,7 @@ | |||
1 | config HOSTAP | 1 | config HOSTAP |
2 | tristate "IEEE 802.11 for Host AP (Prism2/2.5/3 and WEP/TKIP/CCMP)" | 2 | tristate "IEEE 802.11 for Host AP (Prism2/2.5/3 and WEP/TKIP/CCMP)" |
3 | depends on NET_RADIO | 3 | depends on WLAN_80211 |
4 | select WIRELESS_EXT | ||
4 | select IEEE80211 | 5 | select IEEE80211 |
5 | select IEEE80211_CRYPT_WEP | 6 | select IEEE80211_CRYPT_WEP |
6 | ---help--- | 7 | ---help--- |
diff --git a/drivers/net/wireless/hostap/hostap_80211_rx.c b/drivers/net/wireless/hostap/hostap_80211_rx.c index 7e04dc94b3bc..cbedc9ee740a 100644 --- a/drivers/net/wireless/hostap/hostap_80211_rx.c +++ b/drivers/net/wireless/hostap/hostap_80211_rx.c | |||
@@ -167,7 +167,7 @@ hdr->f.status = s; hdr->f.len = l; hdr->f.data = d | |||
167 | 167 | ||
168 | ret = skb->len - phdrlen; | 168 | ret = skb->len - phdrlen; |
169 | skb->dev = dev; | 169 | skb->dev = dev; |
170 | skb->mac.raw = skb->data; | 170 | skb_reset_mac_header(skb); |
171 | skb_pull(skb, hdrlen); | 171 | skb_pull(skb, hdrlen); |
172 | if (prism_header) | 172 | if (prism_header) |
173 | skb_pull(skb, phdrlen); | 173 | skb_pull(skb, phdrlen); |
@@ -933,12 +933,14 @@ void hostap_80211_rx(struct net_device *dev, struct sk_buff *skb, | |||
933 | if (frag == 0) { | 933 | if (frag == 0) { |
934 | /* copy first fragment (including full headers) into | 934 | /* copy first fragment (including full headers) into |
935 | * beginning of the fragment cache skb */ | 935 | * beginning of the fragment cache skb */ |
936 | memcpy(skb_put(frag_skb, flen), skb->data, flen); | 936 | skb_copy_from_linear_data(skb, skb_put(frag_skb, flen), |
937 | flen); | ||
937 | } else { | 938 | } else { |
938 | /* append frame payload to the end of the fragment | 939 | /* append frame payload to the end of the fragment |
939 | * cache skb */ | 940 | * cache skb */ |
940 | memcpy(skb_put(frag_skb, flen), skb->data + hdrlen, | 941 | skb_copy_from_linear_data_offset(skb, hdrlen, |
941 | flen); | 942 | skb_put(frag_skb, |
943 | flen), flen); | ||
942 | } | 944 | } |
943 | dev_kfree_skb(skb); | 945 | dev_kfree_skb(skb); |
944 | skb = NULL; | 946 | skb = NULL; |
@@ -1044,8 +1046,9 @@ void hostap_80211_rx(struct net_device *dev, struct sk_buff *skb, | |||
1044 | skb->len >= ETH_HLEN + ETH_ALEN) { | 1046 | skb->len >= ETH_HLEN + ETH_ALEN) { |
1045 | /* Non-standard frame: get addr4 from its bogus location after | 1047 | /* Non-standard frame: get addr4 from its bogus location after |
1046 | * the payload */ | 1048 | * the payload */ |
1047 | memcpy(skb->data + ETH_ALEN, | 1049 | skb_copy_from_linear_data_offset(skb, skb->len - ETH_ALEN, |
1048 | skb->data + skb->len - ETH_ALEN, ETH_ALEN); | 1050 | skb->data + ETH_ALEN, |
1051 | ETH_ALEN); | ||
1049 | skb_trim(skb, skb->len - ETH_ALEN); | 1052 | skb_trim(skb, skb->len - ETH_ALEN); |
1050 | } | 1053 | } |
1051 | 1054 | ||
@@ -1073,17 +1076,17 @@ void hostap_80211_rx(struct net_device *dev, struct sk_buff *skb, | |||
1073 | 1076 | ||
1074 | if (skb2 != NULL) { | 1077 | if (skb2 != NULL) { |
1075 | /* send to wireless media */ | 1078 | /* send to wireless media */ |
1076 | skb2->protocol = __constant_htons(ETH_P_802_3); | ||
1077 | skb2->mac.raw = skb2->nh.raw = skb2->data; | ||
1078 | /* skb2->nh.raw = skb2->data + ETH_HLEN; */ | ||
1079 | skb2->dev = dev; | 1079 | skb2->dev = dev; |
1080 | skb2->protocol = __constant_htons(ETH_P_802_3); | ||
1081 | skb_reset_mac_header(skb2); | ||
1082 | skb_reset_network_header(skb2); | ||
1083 | /* skb2->network_header += ETH_HLEN; */ | ||
1080 | dev_queue_xmit(skb2); | 1084 | dev_queue_xmit(skb2); |
1081 | } | 1085 | } |
1082 | 1086 | ||
1083 | if (skb) { | 1087 | if (skb) { |
1084 | skb->protocol = eth_type_trans(skb, dev); | 1088 | skb->protocol = eth_type_trans(skb, dev); |
1085 | memset(skb->cb, 0, sizeof(skb->cb)); | 1089 | memset(skb->cb, 0, sizeof(skb->cb)); |
1086 | skb->dev = dev; | ||
1087 | netif_rx(skb); | 1090 | netif_rx(skb); |
1088 | } | 1091 | } |
1089 | 1092 | ||
diff --git a/drivers/net/wireless/hostap/hostap_80211_tx.c b/drivers/net/wireless/hostap/hostap_80211_tx.c index 4a5be70c0419..246fac0e8001 100644 --- a/drivers/net/wireless/hostap/hostap_80211_tx.c +++ b/drivers/net/wireless/hostap/hostap_80211_tx.c | |||
@@ -146,7 +146,8 @@ int hostap_data_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
146 | fc |= IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS; | 146 | fc |= IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS; |
147 | /* From&To DS: Addr1 = RA, Addr2 = TA, Addr3 = DA, | 147 | /* From&To DS: Addr1 = RA, Addr2 = TA, Addr3 = DA, |
148 | * Addr4 = SA */ | 148 | * Addr4 = SA */ |
149 | memcpy(&hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN); | 149 | skb_copy_from_linear_data_offset(skb, ETH_ALEN, |
150 | &hdr.addr4, ETH_ALEN); | ||
150 | hdr_len += ETH_ALEN; | 151 | hdr_len += ETH_ALEN; |
151 | } else { | 152 | } else { |
152 | /* bogus 4-addr format to workaround Prism2 station | 153 | /* bogus 4-addr format to workaround Prism2 station |
@@ -159,7 +160,8 @@ int hostap_data_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
159 | /* SA from skb->data + ETH_ALEN will be added after | 160 | /* SA from skb->data + ETH_ALEN will be added after |
160 | * frame payload; use hdr.addr4 as a temporary buffer | 161 | * frame payload; use hdr.addr4 as a temporary buffer |
161 | */ | 162 | */ |
162 | memcpy(&hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN); | 163 | skb_copy_from_linear_data_offset(skb, ETH_ALEN, |
164 | &hdr.addr4, ETH_ALEN); | ||
163 | need_tailroom += ETH_ALEN; | 165 | need_tailroom += ETH_ALEN; |
164 | } | 166 | } |
165 | 167 | ||
@@ -174,24 +176,27 @@ int hostap_data_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
174 | else | 176 | else |
175 | memcpy(&hdr.addr1, local->bssid, ETH_ALEN); | 177 | memcpy(&hdr.addr1, local->bssid, ETH_ALEN); |
176 | memcpy(&hdr.addr2, dev->dev_addr, ETH_ALEN); | 178 | memcpy(&hdr.addr2, dev->dev_addr, ETH_ALEN); |
177 | memcpy(&hdr.addr3, skb->data, ETH_ALEN); | 179 | skb_copy_from_linear_data(skb, &hdr.addr3, ETH_ALEN); |
178 | } else if (local->iw_mode == IW_MODE_MASTER && !to_assoc_ap) { | 180 | } else if (local->iw_mode == IW_MODE_MASTER && !to_assoc_ap) { |
179 | fc |= IEEE80211_FCTL_FROMDS; | 181 | fc |= IEEE80211_FCTL_FROMDS; |
180 | /* From DS: Addr1 = DA, Addr2 = BSSID, Addr3 = SA */ | 182 | /* From DS: Addr1 = DA, Addr2 = BSSID, Addr3 = SA */ |
181 | memcpy(&hdr.addr1, skb->data, ETH_ALEN); | 183 | skb_copy_from_linear_data(skb, &hdr.addr1, ETH_ALEN); |
182 | memcpy(&hdr.addr2, dev->dev_addr, ETH_ALEN); | 184 | memcpy(&hdr.addr2, dev->dev_addr, ETH_ALEN); |
183 | memcpy(&hdr.addr3, skb->data + ETH_ALEN, ETH_ALEN); | 185 | skb_copy_from_linear_data_offset(skb, ETH_ALEN, &hdr.addr3, |
186 | ETH_ALEN); | ||
184 | } else if (local->iw_mode == IW_MODE_INFRA || to_assoc_ap) { | 187 | } else if (local->iw_mode == IW_MODE_INFRA || to_assoc_ap) { |
185 | fc |= IEEE80211_FCTL_TODS; | 188 | fc |= IEEE80211_FCTL_TODS; |
186 | /* To DS: Addr1 = BSSID, Addr2 = SA, Addr3 = DA */ | 189 | /* To DS: Addr1 = BSSID, Addr2 = SA, Addr3 = DA */ |
187 | memcpy(&hdr.addr1, to_assoc_ap ? | 190 | memcpy(&hdr.addr1, to_assoc_ap ? |
188 | local->assoc_ap_addr : local->bssid, ETH_ALEN); | 191 | local->assoc_ap_addr : local->bssid, ETH_ALEN); |
189 | memcpy(&hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN); | 192 | skb_copy_from_linear_data_offset(skb, ETH_ALEN, &hdr.addr2, |
190 | memcpy(&hdr.addr3, skb->data, ETH_ALEN); | 193 | ETH_ALEN); |
194 | skb_copy_from_linear_data(skb, &hdr.addr3, ETH_ALEN); | ||
191 | } else if (local->iw_mode == IW_MODE_ADHOC) { | 195 | } else if (local->iw_mode == IW_MODE_ADHOC) { |
192 | /* not From/To DS: Addr1 = DA, Addr2 = SA, Addr3 = BSSID */ | 196 | /* not From/To DS: Addr1 = DA, Addr2 = SA, Addr3 = BSSID */ |
193 | memcpy(&hdr.addr1, skb->data, ETH_ALEN); | 197 | skb_copy_from_linear_data(skb, &hdr.addr1, ETH_ALEN); |
194 | memcpy(&hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN); | 198 | skb_copy_from_linear_data_offset(skb, ETH_ALEN, &hdr.addr2, |
199 | ETH_ALEN); | ||
195 | memcpy(&hdr.addr3, local->bssid, ETH_ALEN); | 200 | memcpy(&hdr.addr3, local->bssid, ETH_ALEN); |
196 | } | 201 | } |
197 | 202 | ||
@@ -237,7 +242,7 @@ int hostap_data_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
237 | iface->stats.tx_packets++; | 242 | iface->stats.tx_packets++; |
238 | iface->stats.tx_bytes += skb->len; | 243 | iface->stats.tx_bytes += skb->len; |
239 | 244 | ||
240 | skb->mac.raw = skb->data; | 245 | skb_reset_mac_header(skb); |
241 | meta = (struct hostap_skb_tx_data *) skb->cb; | 246 | meta = (struct hostap_skb_tx_data *) skb->cb; |
242 | memset(meta, 0, sizeof(*meta)); | 247 | memset(meta, 0, sizeof(*meta)); |
243 | meta->magic = HOSTAP_SKB_TX_DATA_MAGIC; | 248 | meta->magic = HOSTAP_SKB_TX_DATA_MAGIC; |
diff --git a/drivers/net/wireless/hostap/hostap_ap.c b/drivers/net/wireless/hostap/hostap_ap.c index efb8cf3bd8ad..4ca8a27b8c55 100644 --- a/drivers/net/wireless/hostap/hostap_ap.c +++ b/drivers/net/wireless/hostap/hostap_ap.c | |||
@@ -982,7 +982,8 @@ static void prism2_send_mgmt(struct net_device *dev, | |||
982 | meta->tx_cb_idx = tx_cb_idx; | 982 | meta->tx_cb_idx = tx_cb_idx; |
983 | 983 | ||
984 | skb->dev = dev; | 984 | skb->dev = dev; |
985 | skb->mac.raw = skb->nh.raw = skb->data; | 985 | skb_reset_mac_header(skb); |
986 | skb_reset_network_header(skb); | ||
986 | dev_queue_xmit(skb); | 987 | dev_queue_xmit(skb); |
987 | } | 988 | } |
988 | #endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */ | 989 | #endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */ |
@@ -1276,8 +1277,8 @@ static char * ap_auth_make_challenge(struct ap_data *ap) | |||
1276 | return NULL; | 1277 | return NULL; |
1277 | } | 1278 | } |
1278 | 1279 | ||
1279 | memcpy(tmpbuf, skb->data + ap->crypt->extra_mpdu_prefix_len, | 1280 | skb_copy_from_linear_data_offset(skb, ap->crypt->extra_mpdu_prefix_len, |
1280 | WLAN_AUTH_CHALLENGE_LEN); | 1281 | tmpbuf, WLAN_AUTH_CHALLENGE_LEN); |
1281 | dev_kfree_skb(skb); | 1282 | dev_kfree_skb(skb); |
1282 | 1283 | ||
1283 | return tmpbuf; | 1284 | return tmpbuf; |
diff --git a/drivers/net/wireless/hostap/hostap_hw.c b/drivers/net/wireless/hostap/hostap_hw.c index 3079378fb8cd..fb01fb95a9f0 100644 --- a/drivers/net/wireless/hostap/hostap_hw.c +++ b/drivers/net/wireless/hostap/hostap_hw.c | |||
@@ -1838,13 +1838,14 @@ static int prism2_tx_80211(struct sk_buff *skb, struct net_device *dev) | |||
1838 | 1838 | ||
1839 | /* skb->data starts with txdesc->frame_control */ | 1839 | /* skb->data starts with txdesc->frame_control */ |
1840 | hdr_len = 24; | 1840 | hdr_len = 24; |
1841 | memcpy(&txdesc.frame_control, skb->data, hdr_len); | 1841 | skb_copy_from_linear_data(skb, &txdesc.frame_control, hdr_len); |
1842 | fc = le16_to_cpu(txdesc.frame_control); | 1842 | fc = le16_to_cpu(txdesc.frame_control); |
1843 | if (WLAN_FC_GET_TYPE(fc) == IEEE80211_FTYPE_DATA && | 1843 | if (WLAN_FC_GET_TYPE(fc) == IEEE80211_FTYPE_DATA && |
1844 | (fc & IEEE80211_FCTL_FROMDS) && (fc & IEEE80211_FCTL_TODS) && | 1844 | (fc & IEEE80211_FCTL_FROMDS) && (fc & IEEE80211_FCTL_TODS) && |
1845 | skb->len >= 30) { | 1845 | skb->len >= 30) { |
1846 | /* Addr4 */ | 1846 | /* Addr4 */ |
1847 | memcpy(txdesc.addr4, skb->data + hdr_len, ETH_ALEN); | 1847 | skb_copy_from_linear_data_offset(skb, hdr_len, txdesc.addr4, |
1848 | ETH_ALEN); | ||
1848 | hdr_len += ETH_ALEN; | 1849 | hdr_len += ETH_ALEN; |
1849 | } | 1850 | } |
1850 | 1851 | ||
@@ -2217,7 +2218,7 @@ static void hostap_tx_callback(local_info_t *local, | |||
2217 | memcpy(skb_put(skb, len), payload, len); | 2218 | memcpy(skb_put(skb, len), payload, len); |
2218 | 2219 | ||
2219 | skb->dev = local->dev; | 2220 | skb->dev = local->dev; |
2220 | skb->mac.raw = skb->data; | 2221 | skb_reset_mac_header(skb); |
2221 | 2222 | ||
2222 | cb->func(skb, ok, cb->data); | 2223 | cb->func(skb, ok, cb->data); |
2223 | } | 2224 | } |
diff --git a/drivers/net/wireless/hostap/hostap_main.c b/drivers/net/wireless/hostap/hostap_main.c index 9077e6edde34..1f9edd91565d 100644 --- a/drivers/net/wireless/hostap/hostap_main.c +++ b/drivers/net/wireless/hostap/hostap_main.c | |||
@@ -590,20 +590,20 @@ void hostap_dump_tx_header(const char *name, const struct hfa384x_tx_frame *tx) | |||
590 | 590 | ||
591 | int hostap_80211_header_parse(struct sk_buff *skb, unsigned char *haddr) | 591 | int hostap_80211_header_parse(struct sk_buff *skb, unsigned char *haddr) |
592 | { | 592 | { |
593 | memcpy(haddr, skb->mac.raw + 10, ETH_ALEN); /* addr2 */ | 593 | memcpy(haddr, skb_mac_header(skb) + 10, ETH_ALEN); /* addr2 */ |
594 | return ETH_ALEN; | 594 | return ETH_ALEN; |
595 | } | 595 | } |
596 | 596 | ||
597 | 597 | ||
598 | int hostap_80211_prism_header_parse(struct sk_buff *skb, unsigned char *haddr) | 598 | int hostap_80211_prism_header_parse(struct sk_buff *skb, unsigned char *haddr) |
599 | { | 599 | { |
600 | if (*(u32 *)skb->mac.raw == LWNG_CAP_DID_BASE) { | 600 | const unsigned char *mac = skb_mac_header(skb); |
601 | memcpy(haddr, skb->mac.raw + | 601 | |
602 | sizeof(struct linux_wlan_ng_prism_hdr) + 10, | 602 | if (*(u32 *)mac == LWNG_CAP_DID_BASE) { |
603 | memcpy(haddr, mac + sizeof(struct linux_wlan_ng_prism_hdr) + 10, | ||
603 | ETH_ALEN); /* addr2 */ | 604 | ETH_ALEN); /* addr2 */ |
604 | } else { /* (*(u32 *)skb->mac.raw == htonl(LWNG_CAPHDR_VERSION)) */ | 605 | } else { /* (*(u32 *)mac == htonl(LWNG_CAPHDR_VERSION)) */ |
605 | memcpy(haddr, skb->mac.raw + | 606 | memcpy(haddr, mac + sizeof(struct linux_wlan_ng_cap_hdr) + 10, |
606 | sizeof(struct linux_wlan_ng_cap_hdr) + 10, | ||
607 | ETH_ALEN); /* addr2 */ | 607 | ETH_ALEN); /* addr2 */ |
608 | } | 608 | } |
609 | return ETH_ALEN; | 609 | return ETH_ALEN; |
@@ -1063,7 +1063,8 @@ int prism2_sta_send_mgmt(local_info_t *local, u8 *dst, u16 stype, | |||
1063 | meta->iface = netdev_priv(dev); | 1063 | meta->iface = netdev_priv(dev); |
1064 | 1064 | ||
1065 | skb->dev = dev; | 1065 | skb->dev = dev; |
1066 | skb->mac.raw = skb->nh.raw = skb->data; | 1066 | skb_reset_mac_header(skb); |
1067 | skb_reset_network_header(skb); | ||
1067 | dev_queue_xmit(skb); | 1068 | dev_queue_xmit(skb); |
1068 | 1069 | ||
1069 | return 0; | 1070 | return 0; |
diff --git a/drivers/net/wireless/ipw2100.c b/drivers/net/wireless/ipw2100.c index ad6e4a428355..9137a4dd02eb 100644 --- a/drivers/net/wireless/ipw2100.c +++ b/drivers/net/wireless/ipw2100.c | |||
@@ -2416,8 +2416,9 @@ static void isr_rx(struct ipw2100_priv *priv, int i, | |||
2416 | #ifdef IPW2100_RX_DEBUG | 2416 | #ifdef IPW2100_RX_DEBUG |
2417 | /* Make a copy of the frame so we can dump it to the logs if | 2417 | /* Make a copy of the frame so we can dump it to the logs if |
2418 | * ieee80211_rx fails */ | 2418 | * ieee80211_rx fails */ |
2419 | memcpy(packet_data, packet->skb->data, | 2419 | skb_copy_from_linear_data(packet->skb, packet_data, |
2420 | min_t(u32, status->frame_size, IPW_RX_NIC_BUFFER_LENGTH)); | 2420 | min_t(u32, status->frame_size, |
2421 | IPW_RX_NIC_BUFFER_LENGTH)); | ||
2421 | #endif | 2422 | #endif |
2422 | 2423 | ||
2423 | if (!ieee80211_rx(priv->ieee, packet->skb, stats)) { | 2424 | if (!ieee80211_rx(priv->ieee, packet->skb, stats)) { |
diff --git a/drivers/net/wireless/ipw2200.c b/drivers/net/wireless/ipw2200.c index c878a2f3239c..4839a45098cb 100644 --- a/drivers/net/wireless/ipw2200.c +++ b/drivers/net/wireless/ipw2200.c | |||
@@ -8133,7 +8133,7 @@ static void ipw_handle_mgmt_packet(struct ipw_priv *priv, | |||
8133 | skb->dev = priv->ieee->dev; | 8133 | skb->dev = priv->ieee->dev; |
8134 | 8134 | ||
8135 | /* Point raw at the ieee80211_stats */ | 8135 | /* Point raw at the ieee80211_stats */ |
8136 | skb->mac.raw = skb->data; | 8136 | skb_reset_mac_header(skb); |
8137 | 8137 | ||
8138 | skb->pkt_type = PACKET_OTHERHOST; | 8138 | skb->pkt_type = PACKET_OTHERHOST; |
8139 | skb->protocol = __constant_htons(ETH_P_80211_STATS); | 8139 | skb->protocol = __constant_htons(ETH_P_80211_STATS); |
@@ -10355,7 +10355,7 @@ static void ipw_handle_promiscuous_tx(struct ipw_priv *priv, | |||
10355 | 10355 | ||
10356 | rt_hdr->it_len = dst->len; | 10356 | rt_hdr->it_len = dst->len; |
10357 | 10357 | ||
10358 | memcpy(skb_put(dst, len), src->data, len); | 10358 | skb_copy_from_linear_data(src, skb_put(dst, len), len); |
10359 | 10359 | ||
10360 | if (!ieee80211_rx(priv->prom_priv->ieee, dst, &dummystats)) | 10360 | if (!ieee80211_rx(priv->prom_priv->ieee, dst, &dummystats)) |
10361 | dev_kfree_skb_any(dst); | 10361 | dev_kfree_skb_any(dst); |
diff --git a/drivers/net/wireless/netwave_cs.c b/drivers/net/wireless/netwave_cs.c index a009ab517710..45b00e13ab2b 100644 --- a/drivers/net/wireless/netwave_cs.c +++ b/drivers/net/wireless/netwave_cs.c | |||
@@ -1283,7 +1283,6 @@ static int netwave_rx(struct net_device *dev) | |||
1283 | 1283 | ||
1284 | skb_reserve( skb, 2); /* Align IP on 16 byte */ | 1284 | skb_reserve( skb, 2); /* Align IP on 16 byte */ |
1285 | skb_put( skb, rcvLen); | 1285 | skb_put( skb, rcvLen); |
1286 | skb->dev = dev; | ||
1287 | 1286 | ||
1288 | /* Copy packet fragments to the skb data area */ | 1287 | /* Copy packet fragments to the skb data area */ |
1289 | ptr = (u_char*) skb->data; | 1288 | ptr = (u_char*) skb->data; |
diff --git a/drivers/net/wireless/orinoco.c b/drivers/net/wireless/orinoco.c index 4e7f6cf51436..062286dc8e15 100644 --- a/drivers/net/wireless/orinoco.c +++ b/drivers/net/wireless/orinoco.c | |||
@@ -689,7 +689,7 @@ static void orinoco_stat_gather(struct net_device *dev, | |||
689 | /* Note : gcc will optimise the whole section away if | 689 | /* Note : gcc will optimise the whole section away if |
690 | * WIRELESS_SPY is not defined... - Jean II */ | 690 | * WIRELESS_SPY is not defined... - Jean II */ |
691 | if (SPY_NUMBER(priv)) { | 691 | if (SPY_NUMBER(priv)) { |
692 | orinoco_spy_gather(dev, skb->mac.raw + ETH_ALEN, | 692 | orinoco_spy_gather(dev, skb_mac_header(skb) + ETH_ALEN, |
693 | desc->signal, desc->silence); | 693 | desc->signal, desc->silence); |
694 | } | 694 | } |
695 | } | 695 | } |
@@ -770,7 +770,7 @@ static void orinoco_rx_monitor(struct net_device *dev, u16 rxfid, | |||
770 | 770 | ||
771 | /* Copy the 802.11 header to the skb */ | 771 | /* Copy the 802.11 header to the skb */ |
772 | memcpy(skb_put(skb, hdrlen), &(desc->frame_ctl), hdrlen); | 772 | memcpy(skb_put(skb, hdrlen), &(desc->frame_ctl), hdrlen); |
773 | skb->mac.raw = skb->data; | 773 | skb_reset_mac_header(skb); |
774 | 774 | ||
775 | /* If any, copy the data from the card to the skb */ | 775 | /* If any, copy the data from the card to the skb */ |
776 | if (datalen > 0) { | 776 | if (datalen > 0) { |
@@ -915,7 +915,6 @@ static void __orinoco_ev_rx(struct net_device *dev, hermes_t *hw) | |||
915 | memcpy(hdr->h_source, desc.addr2, ETH_ALEN); | 915 | memcpy(hdr->h_source, desc.addr2, ETH_ALEN); |
916 | 916 | ||
917 | dev->last_rx = jiffies; | 917 | dev->last_rx = jiffies; |
918 | skb->dev = dev; | ||
919 | skb->protocol = eth_type_trans(skb, dev); | 918 | skb->protocol = eth_type_trans(skb, dev); |
920 | skb->ip_summed = CHECKSUM_NONE; | 919 | skb->ip_summed = CHECKSUM_NONE; |
921 | if (fc & IEEE80211_FCTL_TODS) | 920 | if (fc & IEEE80211_FCTL_TODS) |
diff --git a/drivers/net/wireless/prism54/islpci_eth.c b/drivers/net/wireless/prism54/islpci_eth.c index b1122912ee2d..dd070cccf324 100644 --- a/drivers/net/wireless/prism54/islpci_eth.c +++ b/drivers/net/wireless/prism54/islpci_eth.c | |||
@@ -136,7 +136,7 @@ islpci_eth_transmit(struct sk_buff *skb, struct net_device *ndev) | |||
136 | printk("islpci_eth_transmit:wds_mac\n"); | 136 | printk("islpci_eth_transmit:wds_mac\n"); |
137 | #endif | 137 | #endif |
138 | memmove(skb->data + 6, src, skb->len); | 138 | memmove(skb->data + 6, src, skb->len); |
139 | memcpy(skb->data, wds_mac, 6); | 139 | skb_copy_to_linear_data(skb, wds_mac, 6); |
140 | } else { | 140 | } else { |
141 | memmove(skb->data, src, skb->len); | 141 | memmove(skb->data, src, skb->len); |
142 | } | 142 | } |
@@ -162,13 +162,16 @@ islpci_eth_transmit(struct sk_buff *skb, struct net_device *ndev) | |||
162 | 162 | ||
163 | skb_put(newskb, init_wds ? skb->len + 6 : skb->len); | 163 | skb_put(newskb, init_wds ? skb->len + 6 : skb->len); |
164 | if (init_wds) { | 164 | if (init_wds) { |
165 | memcpy(newskb->data + 6, skb->data, skb->len); | 165 | skb_copy_from_linear_data(skb, |
166 | memcpy(newskb->data, wds_mac, 6); | 166 | newskb->data + 6, |
167 | skb->len); | ||
168 | skb_copy_to_linear_data(newskb, wds_mac, 6); | ||
167 | #ifdef ISLPCI_ETH_DEBUG | 169 | #ifdef ISLPCI_ETH_DEBUG |
168 | printk("islpci_eth_transmit:wds_mac\n"); | 170 | printk("islpci_eth_transmit:wds_mac\n"); |
169 | #endif | 171 | #endif |
170 | } else | 172 | } else |
171 | memcpy(newskb->data, skb->data, skb->len); | 173 | skb_copy_from_linear_data(skb, newskb->data, |
174 | skb->len); | ||
172 | 175 | ||
173 | #if VERBOSE > SHOW_ERROR_MESSAGES | 176 | #if VERBOSE > SHOW_ERROR_MESSAGES |
174 | DEBUG(SHOW_TRACING, "memcpy %p %p %i wds %i\n", | 177 | DEBUG(SHOW_TRACING, "memcpy %p %p %i wds %i\n", |
@@ -303,7 +306,7 @@ islpci_monitor_rx(islpci_private *priv, struct sk_buff **skb) | |||
303 | skb_pull(*skb, sizeof (struct rfmon_header)); | 306 | skb_pull(*skb, sizeof (struct rfmon_header)); |
304 | 307 | ||
305 | (*skb)->protocol = htons(ETH_P_802_2); | 308 | (*skb)->protocol = htons(ETH_P_802_2); |
306 | (*skb)->mac.raw = (*skb)->data; | 309 | skb_reset_mac_header(*skb); |
307 | (*skb)->pkt_type = PACKET_OTHERHOST; | 310 | (*skb)->pkt_type = PACKET_OTHERHOST; |
308 | 311 | ||
309 | return 0; | 312 | return 0; |
@@ -374,10 +377,6 @@ islpci_eth_receive(islpci_private *priv) | |||
374 | DEBUG(SHOW_BUFFER_CONTENTS, "\nrx %p ", skb->data); | 377 | DEBUG(SHOW_BUFFER_CONTENTS, "\nrx %p ", skb->data); |
375 | display_buffer((char *) skb->data, skb->len); | 378 | display_buffer((char *) skb->data, skb->len); |
376 | #endif | 379 | #endif |
377 | |||
378 | /* do some additional sk_buff and network layer parameters */ | ||
379 | skb->dev = ndev; | ||
380 | |||
381 | /* take care of monitor mode and spy monitoring. */ | 380 | /* take care of monitor mode and spy monitoring. */ |
382 | if (unlikely(priv->iw_mode == IW_MODE_MONITOR)) | 381 | if (unlikely(priv->iw_mode == IW_MODE_MONITOR)) |
383 | discard = islpci_monitor_rx(priv, &skb); | 382 | discard = islpci_monitor_rx(priv, &skb); |
@@ -398,8 +397,10 @@ islpci_eth_receive(islpci_private *priv) | |||
398 | /* Update spy records */ | 397 | /* Update spy records */ |
399 | wireless_spy_update(ndev, annex->addr2, &wstats); | 398 | wireless_spy_update(ndev, annex->addr2, &wstats); |
400 | 399 | ||
401 | memcpy(skb->data + sizeof (struct rfmon_header), | 400 | skb_copy_from_linear_data(skb, |
402 | skb->data, 2 * ETH_ALEN); | 401 | (skb->data + |
402 | sizeof(struct rfmon_header)), | ||
403 | 2 * ETH_ALEN); | ||
403 | skb_pull(skb, sizeof (struct rfmon_header)); | 404 | skb_pull(skb, sizeof (struct rfmon_header)); |
404 | } | 405 | } |
405 | skb->protocol = eth_type_trans(skb, ndev); | 406 | skb->protocol = eth_type_trans(skb, ndev); |
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c index 47b2ccb6a633..3be624295a1f 100644 --- a/drivers/net/wireless/ray_cs.c +++ b/drivers/net/wireless/ray_cs.c | |||
@@ -2232,7 +2232,6 @@ static void rx_data(struct net_device *dev, struct rcs __iomem *prcs, unsigned i | |||
2232 | return; | 2232 | return; |
2233 | } | 2233 | } |
2234 | skb_reserve( skb, 2); /* Align IP on 16 byte (TBD check this)*/ | 2234 | skb_reserve( skb, 2); /* Align IP on 16 byte (TBD check this)*/ |
2235 | skb->dev = dev; | ||
2236 | 2235 | ||
2237 | DEBUG(4,"ray_cs rx_data total_len = %x, rx_len = %x\n",total_len,rx_len); | 2236 | DEBUG(4,"ray_cs rx_data total_len = %x, rx_len = %x\n",total_len,rx_len); |
2238 | 2237 | ||
@@ -2243,7 +2242,8 @@ static void rx_data(struct net_device *dev, struct rcs __iomem *prcs, unsigned i | |||
2243 | rx_ptr += copy_from_rx_buff(local, rx_ptr, pkt_addr & RX_BUFF_END, rx_len); | 2242 | rx_ptr += copy_from_rx_buff(local, rx_ptr, pkt_addr & RX_BUFF_END, rx_len); |
2244 | /* Get source address */ | 2243 | /* Get source address */ |
2245 | #ifdef WIRELESS_SPY | 2244 | #ifdef WIRELESS_SPY |
2246 | memcpy(linksrcaddr, ((struct mac_header *)skb->data)->addr_2, ETH_ALEN); | 2245 | skb_copy_from_linear_data_offset(skb, offsetof(struct mac_header, addr_2), |
2246 | linksrcaddr, ETH_ALEN); | ||
2247 | #endif | 2247 | #endif |
2248 | /* Now, deal with encapsulation/translation/sniffer */ | 2248 | /* Now, deal with encapsulation/translation/sniffer */ |
2249 | if (!sniffer) { | 2249 | if (!sniffer) { |
diff --git a/drivers/net/wireless/strip.c b/drivers/net/wireless/strip.c index f5ce1c6063d8..2a299a0676a6 100644 --- a/drivers/net/wireless/strip.c +++ b/drivers/net/wireless/strip.c | |||
@@ -2009,7 +2009,7 @@ static void deliver_packet(struct strip *strip_info, STRIP_Header * header, | |||
2009 | packetlen); | 2009 | packetlen); |
2010 | skb->dev = get_strip_dev(strip_info); | 2010 | skb->dev = get_strip_dev(strip_info); |
2011 | skb->protocol = header->protocol; | 2011 | skb->protocol = header->protocol; |
2012 | skb->mac.raw = skb->data; | 2012 | skb_reset_mac_header(skb); |
2013 | 2013 | ||
2014 | /* Having put a fake header on the front of the sk_buff for the */ | 2014 | /* Having put a fake header on the front of the sk_buff for the */ |
2015 | /* benefit of tools like tcpdump, skb_pull now 'consumes' that */ | 2015 | /* benefit of tools like tcpdump, skb_pull now 'consumes' that */ |
diff --git a/drivers/net/wireless/wavelan.c b/drivers/net/wireless/wavelan.c index 2aa3c761dd83..1cf090d60edc 100644 --- a/drivers/net/wireless/wavelan.c +++ b/drivers/net/wireless/wavelan.c | |||
@@ -2512,14 +2512,13 @@ wv_packet_read(struct net_device * dev, u16 buf_off, int sksize) | |||
2512 | return; | 2512 | return; |
2513 | } | 2513 | } |
2514 | 2514 | ||
2515 | skb->dev = dev; | ||
2516 | |||
2517 | /* Copy the packet to the buffer. */ | 2515 | /* Copy the packet to the buffer. */ |
2518 | obram_read(ioaddr, buf_off, skb_put(skb, sksize), sksize); | 2516 | obram_read(ioaddr, buf_off, skb_put(skb, sksize), sksize); |
2519 | skb->protocol = eth_type_trans(skb, dev); | 2517 | skb->protocol = eth_type_trans(skb, dev); |
2520 | 2518 | ||
2521 | #ifdef DEBUG_RX_INFO | 2519 | #ifdef DEBUG_RX_INFO |
2522 | wv_packet_info(skb->mac.raw, sksize, dev->name, "wv_packet_read"); | 2520 | wv_packet_info(skb_mac_header(skb), sksize, dev->name, |
2521 | "wv_packet_read"); | ||
2523 | #endif /* DEBUG_RX_INFO */ | 2522 | #endif /* DEBUG_RX_INFO */ |
2524 | 2523 | ||
2525 | /* Statistics-gathering and associated stuff. | 2524 | /* Statistics-gathering and associated stuff. |
@@ -2555,7 +2554,7 @@ wv_packet_read(struct net_device * dev, u16 buf_off, int sksize) | |||
2555 | 2554 | ||
2556 | /* Spying stuff */ | 2555 | /* Spying stuff */ |
2557 | #ifdef IW_WIRELESS_SPY | 2556 | #ifdef IW_WIRELESS_SPY |
2558 | wl_spy_gather(dev, skb->mac.raw + WAVELAN_ADDR_SIZE, | 2557 | wl_spy_gather(dev, skb_mac_header(skb) + WAVELAN_ADDR_SIZE, |
2559 | stats); | 2558 | stats); |
2560 | #endif /* IW_WIRELESS_SPY */ | 2559 | #endif /* IW_WIRELESS_SPY */ |
2561 | #ifdef HISTOGRAM | 2560 | #ifdef HISTOGRAM |
@@ -2939,7 +2938,7 @@ static int wavelan_packet_xmit(struct sk_buff *skb, struct net_device * dev) | |||
2939 | * need to pad. Jean II */ | 2938 | * need to pad. Jean II */ |
2940 | if (skb->len < ETH_ZLEN) { | 2939 | if (skb->len < ETH_ZLEN) { |
2941 | memset(data, 0, ETH_ZLEN); | 2940 | memset(data, 0, ETH_ZLEN); |
2942 | memcpy(data, skb->data, skb->len); | 2941 | skb_copy_from_linear_data(skb, data, skb->len); |
2943 | /* Write packet on the card */ | 2942 | /* Write packet on the card */ |
2944 | if(wv_packet_write(dev, data, ETH_ZLEN)) | 2943 | if(wv_packet_write(dev, data, ETH_ZLEN)) |
2945 | return 1; /* We failed */ | 2944 | return 1; /* We failed */ |
diff --git a/drivers/net/wireless/wavelan_cs.c b/drivers/net/wireless/wavelan_cs.c index b04239792f63..67b867f837ca 100644 --- a/drivers/net/wireless/wavelan_cs.c +++ b/drivers/net/wireless/wavelan_cs.c | |||
@@ -2884,14 +2884,12 @@ wv_packet_read(struct net_device * dev, | |||
2884 | return; | 2884 | return; |
2885 | } | 2885 | } |
2886 | 2886 | ||
2887 | skb->dev = dev; | ||
2888 | |||
2889 | skb_reserve(skb, 2); | 2887 | skb_reserve(skb, 2); |
2890 | fd_p = read_ringbuf(dev, fd_p, (char *) skb_put(skb, sksize), sksize); | 2888 | fd_p = read_ringbuf(dev, fd_p, (char *) skb_put(skb, sksize), sksize); |
2891 | skb->protocol = eth_type_trans(skb, dev); | 2889 | skb->protocol = eth_type_trans(skb, dev); |
2892 | 2890 | ||
2893 | #ifdef DEBUG_RX_INFO | 2891 | #ifdef DEBUG_RX_INFO |
2894 | wv_packet_info(skb->mac.raw, sksize, dev->name, "wv_packet_read"); | 2892 | wv_packet_info(skb_mac_header(skb), sksize, dev->name, "wv_packet_read"); |
2895 | #endif /* DEBUG_RX_INFO */ | 2893 | #endif /* DEBUG_RX_INFO */ |
2896 | 2894 | ||
2897 | /* Statistics gathering & stuff associated. | 2895 | /* Statistics gathering & stuff associated. |
@@ -2925,7 +2923,7 @@ wv_packet_read(struct net_device * dev, | |||
2925 | #endif /* WAVELAN_ROAMING */ | 2923 | #endif /* WAVELAN_ROAMING */ |
2926 | 2924 | ||
2927 | #ifdef WIRELESS_SPY | 2925 | #ifdef WIRELESS_SPY |
2928 | wl_spy_gather(dev, skb->mac.raw + WAVELAN_ADDR_SIZE, stats); | 2926 | wl_spy_gather(dev, skb_mac_header(skb) + WAVELAN_ADDR_SIZE, stats); |
2929 | #endif /* WIRELESS_SPY */ | 2927 | #endif /* WIRELESS_SPY */ |
2930 | #ifdef HISTOGRAM | 2928 | #ifdef HISTOGRAM |
2931 | wl_his_gather(dev, stats); | 2929 | wl_his_gather(dev, stats); |
diff --git a/drivers/net/wireless/zd1201.c b/drivers/net/wireless/zd1201.c index 6cb66a356c96..935b144d9b56 100644 --- a/drivers/net/wireless/zd1201.c +++ b/drivers/net/wireless/zd1201.c | |||
@@ -327,7 +327,6 @@ static void zd1201_usbrx(struct urb *urb) | |||
327 | memcpy(skb_put(skb, 6), &data[datalen-8], 6); | 327 | memcpy(skb_put(skb, 6), &data[datalen-8], 6); |
328 | memcpy(skb_put(skb, 2), &data[datalen-24], 2); | 328 | memcpy(skb_put(skb, 2), &data[datalen-24], 2); |
329 | memcpy(skb_put(skb, len), data, len); | 329 | memcpy(skb_put(skb, len), data, len); |
330 | skb->dev = zd->dev; | ||
331 | skb->dev->last_rx = jiffies; | 330 | skb->dev->last_rx = jiffies; |
332 | skb->protocol = eth_type_trans(skb, zd->dev); | 331 | skb->protocol = eth_type_trans(skb, zd->dev); |
333 | zd->stats.rx_packets++; | 332 | zd->stats.rx_packets++; |
@@ -385,7 +384,6 @@ static void zd1201_usbrx(struct urb *urb) | |||
385 | memcpy(skb_put(skb, 2), &data[6], 2); | 384 | memcpy(skb_put(skb, 2), &data[6], 2); |
386 | memcpy(skb_put(skb, len), data+8, len); | 385 | memcpy(skb_put(skb, len), data+8, len); |
387 | } | 386 | } |
388 | skb->dev = zd->dev; | ||
389 | skb->dev->last_rx = jiffies; | 387 | skb->dev->last_rx = jiffies; |
390 | skb->protocol = eth_type_trans(skb, zd->dev); | 388 | skb->protocol = eth_type_trans(skb, zd->dev); |
391 | zd->stats.rx_packets++; | 389 | zd->stats.rx_packets++; |
@@ -809,10 +807,10 @@ static int zd1201_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
809 | txbuf[4] = 0x00; | 807 | txbuf[4] = 0x00; |
810 | txbuf[5] = 0x00; | 808 | txbuf[5] = 0x00; |
811 | 809 | ||
812 | memcpy(txbuf+6, skb->data+12, skb->len-12); | 810 | skb_copy_from_linear_data_offset(skb, 12, txbuf + 6, skb->len - 12); |
813 | if (pad) | 811 | if (pad) |
814 | txbuf[skb->len-12+6]=0; | 812 | txbuf[skb->len-12+6]=0; |
815 | memcpy(txbuf+skb->len-12+6+pad, skb->data, 12); | 813 | skb_copy_from_linear_data(skb, txbuf + skb->len - 12 + 6 + pad, 12); |
816 | *(__be16*)&txbuf[skb->len+6+pad] = htons(skb->len-12+6); | 814 | *(__be16*)&txbuf[skb->len+6+pad] = htons(skb->len-12+6); |
817 | txbuf[txbuflen-1] = 0; | 815 | txbuf[txbuflen-1] = 0; |
818 | 816 | ||
diff --git a/drivers/net/wireless/zd1211rw/Kconfig b/drivers/net/wireless/zd1211rw/Kconfig index 66ed55bc5460..d1ab24a95630 100644 --- a/drivers/net/wireless/zd1211rw/Kconfig +++ b/drivers/net/wireless/zd1211rw/Kconfig | |||
@@ -1,6 +1,7 @@ | |||
1 | config ZD1211RW | 1 | config ZD1211RW |
2 | tristate "ZyDAS ZD1211/ZD1211B USB-wireless support" | 2 | tristate "ZyDAS ZD1211/ZD1211B USB-wireless support" |
3 | depends on USB && IEEE80211 && IEEE80211_SOFTMAC && NET_RADIO && EXPERIMENTAL | 3 | depends on USB && IEEE80211_SOFTMAC && WLAN_80211 && EXPERIMENTAL |
4 | select WIRELESS_EXT | ||
4 | select FW_LOADER | 5 | select FW_LOADER |
5 | ---help--- | 6 | ---help--- |
6 | This is an experimental driver for the ZyDAS ZD1211/ZD1211B wireless | 7 | This is an experimental driver for the ZyDAS ZD1211/ZD1211B wireless |
diff --git a/drivers/net/yellowfin.c b/drivers/net/yellowfin.c index 2412ce4917f2..3f4a7cf9efea 100644 --- a/drivers/net/yellowfin.c +++ b/drivers/net/yellowfin.c | |||
@@ -1137,7 +1137,6 @@ static int yellowfin_rx(struct net_device *dev) | |||
1137 | skb = dev_alloc_skb(pkt_len + 2); | 1137 | skb = dev_alloc_skb(pkt_len + 2); |
1138 | if (skb == NULL) | 1138 | if (skb == NULL) |
1139 | break; | 1139 | break; |
1140 | skb->dev = dev; | ||
1141 | skb_reserve(skb, 2); /* 16 byte align the IP header */ | 1140 | skb_reserve(skb, 2); /* 16 byte align the IP header */ |
1142 | eth_copy_and_sum(skb, rx_skb->data, pkt_len, 0); | 1141 | eth_copy_and_sum(skb, rx_skb->data, pkt_len, 0); |
1143 | skb_put(skb, pkt_len); | 1142 | skb_put(skb, pkt_len); |
diff --git a/drivers/net/znet.c b/drivers/net/znet.c index b24b0727108c..4032e9f6f9b0 100644 --- a/drivers/net/znet.c +++ b/drivers/net/znet.c | |||
@@ -774,7 +774,6 @@ static void znet_rx(struct net_device *dev) | |||
774 | znet->stats.rx_dropped++; | 774 | znet->stats.rx_dropped++; |
775 | break; | 775 | break; |
776 | } | 776 | } |
777 | skb->dev = dev; | ||
778 | 777 | ||
779 | if (&znet->rx_cur[(pkt_len+1)>>1] > znet->rx_end) { | 778 | if (&znet->rx_cur[(pkt_len+1)>>1] > znet->rx_end) { |
780 | int semi_cnt = (znet->rx_end - znet->rx_cur)<<1; | 779 | int semi_cnt = (znet->rx_end - znet->rx_cur)<<1; |
diff --git a/drivers/parisc/led.c b/drivers/parisc/led.c index d190c05d87ed..453e6829756c 100644 --- a/drivers/parisc/led.c +++ b/drivers/parisc/led.c | |||
@@ -372,9 +372,9 @@ static __inline__ int led_get_net_activity(void) | |||
372 | continue; | 372 | continue; |
373 | if (LOOPBACK(in_dev->ifa_list->ifa_local)) | 373 | if (LOOPBACK(in_dev->ifa_list->ifa_local)) |
374 | continue; | 374 | continue; |
375 | if (!dev->get_stats) | ||
376 | continue; | ||
377 | stats = dev->get_stats(dev); | 375 | stats = dev->get_stats(dev); |
376 | if (!stats) | ||
377 | continue; | ||
378 | rx_total += stats->rx_packets; | 378 | rx_total += stats->rx_packets; |
379 | tx_total += stats->tx_packets; | 379 | tx_total += stats->tx_packets; |
380 | } | 380 | } |
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index eb5dc62f0d9c..e71929db8b06 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c | |||
@@ -398,6 +398,9 @@ dasd_change_state(struct dasd_device *device) | |||
398 | 398 | ||
399 | if (device->state == device->target) | 399 | if (device->state == device->target) |
400 | wake_up(&dasd_init_waitq); | 400 | wake_up(&dasd_init_waitq); |
401 | |||
402 | /* let user-space know that the device status changed */ | ||
403 | kobject_uevent(&device->cdev->dev.kobj, KOBJ_CHANGE); | ||
401 | } | 404 | } |
402 | 405 | ||
403 | /* | 406 | /* |
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c index ed70852cc915..6a89cefe99bb 100644 --- a/drivers/s390/block/dasd_devmap.c +++ b/drivers/s390/block/dasd_devmap.c | |||
@@ -19,6 +19,7 @@ | |||
19 | 19 | ||
20 | #include <asm/debug.h> | 20 | #include <asm/debug.h> |
21 | #include <asm/uaccess.h> | 21 | #include <asm/uaccess.h> |
22 | #include <asm/ipl.h> | ||
22 | 23 | ||
23 | /* This is ugly... */ | 24 | /* This is ugly... */ |
24 | #define PRINTK_HEADER "dasd_devmap:" | 25 | #define PRINTK_HEADER "dasd_devmap:" |
@@ -133,6 +134,8 @@ dasd_call_setup(char *str) | |||
133 | __setup ("dasd=", dasd_call_setup); | 134 | __setup ("dasd=", dasd_call_setup); |
134 | #endif /* #ifndef MODULE */ | 135 | #endif /* #ifndef MODULE */ |
135 | 136 | ||
137 | #define DASD_IPLDEV "ipldev" | ||
138 | |||
136 | /* | 139 | /* |
137 | * Read a device busid/devno from a string. | 140 | * Read a device busid/devno from a string. |
138 | */ | 141 | */ |
@@ -141,6 +144,20 @@ dasd_busid(char **str, int *id0, int *id1, int *devno) | |||
141 | { | 144 | { |
142 | int val, old_style; | 145 | int val, old_style; |
143 | 146 | ||
147 | /* Interpret ipldev busid */ | ||
148 | if (strncmp(DASD_IPLDEV, *str, strlen(DASD_IPLDEV)) == 0) { | ||
149 | if (ipl_info.type != IPL_TYPE_CCW) { | ||
150 | MESSAGE(KERN_ERR, "%s", "ipl device is not a ccw " | ||
151 | "device"); | ||
152 | return -EINVAL; | ||
153 | } | ||
154 | *id0 = 0; | ||
155 | *id1 = ipl_info.data.ccw.dev_id.ssid; | ||
156 | *devno = ipl_info.data.ccw.dev_id.devno; | ||
157 | *str += strlen(DASD_IPLDEV); | ||
158 | |||
159 | return 0; | ||
160 | } | ||
144 | /* check for leading '0x' */ | 161 | /* check for leading '0x' */ |
145 | old_style = 0; | 162 | old_style = 0; |
146 | if ((*str)[0] == '0' && (*str)[1] == 'x') { | 163 | if ((*str)[0] == '0' && (*str)[1] == 'x') { |
@@ -829,6 +846,46 @@ dasd_discipline_show(struct device *dev, struct device_attribute *attr, | |||
829 | static DEVICE_ATTR(discipline, 0444, dasd_discipline_show, NULL); | 846 | static DEVICE_ATTR(discipline, 0444, dasd_discipline_show, NULL); |
830 | 847 | ||
831 | static ssize_t | 848 | static ssize_t |
849 | dasd_device_status_show(struct device *dev, struct device_attribute *attr, | ||
850 | char *buf) | ||
851 | { | ||
852 | struct dasd_device *device; | ||
853 | ssize_t len; | ||
854 | |||
855 | device = dasd_device_from_cdev(to_ccwdev(dev)); | ||
856 | if (!IS_ERR(device)) { | ||
857 | switch (device->state) { | ||
858 | case DASD_STATE_NEW: | ||
859 | len = snprintf(buf, PAGE_SIZE, "new\n"); | ||
860 | break; | ||
861 | case DASD_STATE_KNOWN: | ||
862 | len = snprintf(buf, PAGE_SIZE, "detected\n"); | ||
863 | break; | ||
864 | case DASD_STATE_BASIC: | ||
865 | len = snprintf(buf, PAGE_SIZE, "basic\n"); | ||
866 | break; | ||
867 | case DASD_STATE_UNFMT: | ||
868 | len = snprintf(buf, PAGE_SIZE, "unformatted\n"); | ||
869 | break; | ||
870 | case DASD_STATE_READY: | ||
871 | len = snprintf(buf, PAGE_SIZE, "ready\n"); | ||
872 | break; | ||
873 | case DASD_STATE_ONLINE: | ||
874 | len = snprintf(buf, PAGE_SIZE, "online\n"); | ||
875 | break; | ||
876 | default: | ||
877 | len = snprintf(buf, PAGE_SIZE, "no stat\n"); | ||
878 | break; | ||
879 | } | ||
880 | dasd_put_device(device); | ||
881 | } else | ||
882 | len = snprintf(buf, PAGE_SIZE, "unknown\n"); | ||
883 | return len; | ||
884 | } | ||
885 | |||
886 | static DEVICE_ATTR(status, 0444, dasd_device_status_show, NULL); | ||
887 | |||
888 | static ssize_t | ||
832 | dasd_alias_show(struct device *dev, struct device_attribute *attr, char *buf) | 889 | dasd_alias_show(struct device *dev, struct device_attribute *attr, char *buf) |
833 | { | 890 | { |
834 | struct dasd_devmap *devmap; | 891 | struct dasd_devmap *devmap; |
@@ -939,6 +996,7 @@ static DEVICE_ATTR(eer_enabled, 0644, dasd_eer_show, dasd_eer_store); | |||
939 | static struct attribute * dasd_attrs[] = { | 996 | static struct attribute * dasd_attrs[] = { |
940 | &dev_attr_readonly.attr, | 997 | &dev_attr_readonly.attr, |
941 | &dev_attr_discipline.attr, | 998 | &dev_attr_discipline.attr, |
999 | &dev_attr_status.attr, | ||
942 | &dev_attr_alias.attr, | 1000 | &dev_attr_alias.attr, |
943 | &dev_attr_vendor.attr, | 1001 | &dev_attr_vendor.attr, |
944 | &dev_attr_uid.attr, | 1002 | &dev_attr_uid.attr, |
diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile index 293e667b50f2..c210784bdf46 100644 --- a/drivers/s390/char/Makefile +++ b/drivers/s390/char/Makefile | |||
@@ -3,7 +3,7 @@ | |||
3 | # | 3 | # |
4 | 4 | ||
5 | obj-y += ctrlchar.o keyboard.o defkeymap.o sclp.o sclp_rw.o sclp_quiesce.o \ | 5 | obj-y += ctrlchar.o keyboard.o defkeymap.o sclp.o sclp_rw.o sclp_quiesce.o \ |
6 | sclp_info.o | 6 | sclp_info.o sclp_config.o sclp_chp.o |
7 | 7 | ||
8 | obj-$(CONFIG_TN3270) += raw3270.o | 8 | obj-$(CONFIG_TN3270) += raw3270.o |
9 | obj-$(CONFIG_TN3270_CONSOLE) += con3270.o | 9 | obj-$(CONFIG_TN3270_CONSOLE) += con3270.o |
@@ -29,3 +29,6 @@ obj-$(CONFIG_S390_TAPE_34XX) += tape_34xx.o | |||
29 | obj-$(CONFIG_S390_TAPE_3590) += tape_3590.o | 29 | obj-$(CONFIG_S390_TAPE_3590) += tape_3590.o |
30 | obj-$(CONFIG_MONREADER) += monreader.o | 30 | obj-$(CONFIG_MONREADER) += monreader.o |
31 | obj-$(CONFIG_MONWRITER) += monwriter.o | 31 | obj-$(CONFIG_MONWRITER) += monwriter.o |
32 | |||
33 | zcore_mod-objs := sclp_sdias.o zcore.o | ||
34 | obj-$(CONFIG_ZFCPDUMP) += zcore_mod.o | ||
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c index 9a328f14a641..6000bdee4082 100644 --- a/drivers/s390/char/con3215.c +++ b/drivers/s390/char/con3215.c | |||
@@ -813,12 +813,6 @@ con3215_unblank(void) | |||
813 | spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags); | 813 | spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags); |
814 | } | 814 | } |
815 | 815 | ||
816 | static int __init | ||
817 | con3215_consetup(struct console *co, char *options) | ||
818 | { | ||
819 | return 0; | ||
820 | } | ||
821 | |||
822 | /* | 816 | /* |
823 | * The console structure for the 3215 console | 817 | * The console structure for the 3215 console |
824 | */ | 818 | */ |
@@ -827,7 +821,6 @@ static struct console con3215 = { | |||
827 | .write = con3215_write, | 821 | .write = con3215_write, |
828 | .device = con3215_device, | 822 | .device = con3215_device, |
829 | .unblank = con3215_unblank, | 823 | .unblank = con3215_unblank, |
830 | .setup = con3215_consetup, | ||
831 | .flags = CON_PRINTBUFFER, | 824 | .flags = CON_PRINTBUFFER, |
832 | }; | 825 | }; |
833 | 826 | ||
diff --git a/drivers/s390/char/con3270.c b/drivers/s390/char/con3270.c index 8e7f2d7633d6..fd3479119eb4 100644 --- a/drivers/s390/char/con3270.c +++ b/drivers/s390/char/con3270.c | |||
@@ -555,12 +555,6 @@ con3270_unblank(void) | |||
555 | spin_unlock_irqrestore(&cp->view.lock, flags); | 555 | spin_unlock_irqrestore(&cp->view.lock, flags); |
556 | } | 556 | } |
557 | 557 | ||
558 | static int __init | ||
559 | con3270_consetup(struct console *co, char *options) | ||
560 | { | ||
561 | return 0; | ||
562 | } | ||
563 | |||
564 | /* | 558 | /* |
565 | * The console structure for the 3270 console | 559 | * The console structure for the 3270 console |
566 | */ | 560 | */ |
@@ -569,7 +563,6 @@ static struct console con3270 = { | |||
569 | .write = con3270_write, | 563 | .write = con3270_write, |
570 | .device = con3270_device, | 564 | .device = con3270_device, |
571 | .unblank = con3270_unblank, | 565 | .unblank = con3270_unblank, |
572 | .setup = con3270_consetup, | ||
573 | .flags = CON_PRINTBUFFER, | 566 | .flags = CON_PRINTBUFFER, |
574 | }; | 567 | }; |
575 | 568 | ||
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c index f171de3b0b11..fa62e6944057 100644 --- a/drivers/s390/char/sclp.c +++ b/drivers/s390/char/sclp.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/timer.h> | 15 | #include <linux/timer.h> |
16 | #include <linux/reboot.h> | 16 | #include <linux/reboot.h> |
17 | #include <linux/jiffies.h> | 17 | #include <linux/jiffies.h> |
18 | #include <linux/init.h> | ||
18 | #include <asm/types.h> | 19 | #include <asm/types.h> |
19 | #include <asm/s390_ext.h> | 20 | #include <asm/s390_ext.h> |
20 | 21 | ||
@@ -510,7 +511,7 @@ sclp_state_change_cb(struct evbuf_header *evbuf) | |||
510 | } | 511 | } |
511 | 512 | ||
512 | static struct sclp_register sclp_state_change_event = { | 513 | static struct sclp_register sclp_state_change_event = { |
513 | .receive_mask = EvTyp_StateChange_Mask, | 514 | .receive_mask = EVTYP_STATECHANGE_MASK, |
514 | .receiver_fn = sclp_state_change_cb | 515 | .receiver_fn = sclp_state_change_cb |
515 | }; | 516 | }; |
516 | 517 | ||
@@ -930,3 +931,10 @@ sclp_init(void) | |||
930 | sclp_init_mask(1); | 931 | sclp_init_mask(1); |
931 | return 0; | 932 | return 0; |
932 | } | 933 | } |
934 | |||
935 | static __init int sclp_initcall(void) | ||
936 | { | ||
937 | return sclp_init(); | ||
938 | } | ||
939 | |||
940 | arch_initcall(sclp_initcall); | ||
diff --git a/drivers/s390/char/sclp.h b/drivers/s390/char/sclp.h index 7d29ab45a6ed..87ac4a3ad49d 100644 --- a/drivers/s390/char/sclp.h +++ b/drivers/s390/char/sclp.h | |||
@@ -19,33 +19,37 @@ | |||
19 | #define MAX_KMEM_PAGES (sizeof(unsigned long) << 3) | 19 | #define MAX_KMEM_PAGES (sizeof(unsigned long) << 3) |
20 | #define MAX_CONSOLE_PAGES 4 | 20 | #define MAX_CONSOLE_PAGES 4 |
21 | 21 | ||
22 | #define EvTyp_OpCmd 0x01 | 22 | #define EVTYP_OPCMD 0x01 |
23 | #define EvTyp_Msg 0x02 | 23 | #define EVTYP_MSG 0x02 |
24 | #define EvTyp_StateChange 0x08 | 24 | #define EVTYP_STATECHANGE 0x08 |
25 | #define EvTyp_PMsgCmd 0x09 | 25 | #define EVTYP_PMSGCMD 0x09 |
26 | #define EvTyp_CntlProgOpCmd 0x20 | 26 | #define EVTYP_CNTLPROGOPCMD 0x20 |
27 | #define EvTyp_CntlProgIdent 0x0B | 27 | #define EVTYP_CNTLPROGIDENT 0x0B |
28 | #define EvTyp_SigQuiesce 0x1D | 28 | #define EVTYP_SIGQUIESCE 0x1D |
29 | #define EvTyp_VT220Msg 0x1A | 29 | #define EVTYP_VT220MSG 0x1A |
30 | 30 | #define EVTYP_CONFMGMDATA 0x04 | |
31 | #define EvTyp_OpCmd_Mask 0x80000000 | 31 | #define EVTYP_SDIAS 0x1C |
32 | #define EvTyp_Msg_Mask 0x40000000 | 32 | |
33 | #define EvTyp_StateChange_Mask 0x01000000 | 33 | #define EVTYP_OPCMD_MASK 0x80000000 |
34 | #define EvTyp_PMsgCmd_Mask 0x00800000 | 34 | #define EVTYP_MSG_MASK 0x40000000 |
35 | #define EvTyp_CtlProgOpCmd_Mask 0x00000001 | 35 | #define EVTYP_STATECHANGE_MASK 0x01000000 |
36 | #define EvTyp_CtlProgIdent_Mask 0x00200000 | 36 | #define EVTYP_PMSGCMD_MASK 0x00800000 |
37 | #define EvTyp_SigQuiesce_Mask 0x00000008 | 37 | #define EVTYP_CTLPROGOPCMD_MASK 0x00000001 |
38 | #define EvTyp_VT220Msg_Mask 0x00000040 | 38 | #define EVTYP_CTLPROGIDENT_MASK 0x00200000 |
39 | 39 | #define EVTYP_SIGQUIESCE_MASK 0x00000008 | |
40 | #define GnrlMsgFlgs_DOM 0x8000 | 40 | #define EVTYP_VT220MSG_MASK 0x00000040 |
41 | #define GnrlMsgFlgs_SndAlrm 0x4000 | 41 | #define EVTYP_CONFMGMDATA_MASK 0x10000000 |
42 | #define GnrlMsgFlgs_HoldMsg 0x2000 | 42 | #define EVTYP_SDIAS_MASK 0x00000010 |
43 | 43 | ||
44 | #define LnTpFlgs_CntlText 0x8000 | 44 | #define GNRLMSGFLGS_DOM 0x8000 |
45 | #define LnTpFlgs_LabelText 0x4000 | 45 | #define GNRLMSGFLGS_SNDALRM 0x4000 |
46 | #define LnTpFlgs_DataText 0x2000 | 46 | #define GNRLMSGFLGS_HOLDMSG 0x2000 |
47 | #define LnTpFlgs_EndText 0x1000 | 47 | |
48 | #define LnTpFlgs_PromptText 0x0800 | 48 | #define LNTPFLGS_CNTLTEXT 0x8000 |
49 | #define LNTPFLGS_LABELTEXT 0x4000 | ||
50 | #define LNTPFLGS_DATATEXT 0x2000 | ||
51 | #define LNTPFLGS_ENDTEXT 0x1000 | ||
52 | #define LNTPFLGS_PROMPTTEXT 0x0800 | ||
49 | 53 | ||
50 | typedef unsigned int sclp_cmdw_t; | 54 | typedef unsigned int sclp_cmdw_t; |
51 | 55 | ||
@@ -56,15 +60,15 @@ typedef unsigned int sclp_cmdw_t; | |||
56 | #define SCLP_CMDW_READ_SCP_INFO_FORCED 0x00120001 | 60 | #define SCLP_CMDW_READ_SCP_INFO_FORCED 0x00120001 |
57 | 61 | ||
58 | #define GDS_ID_MDSMU 0x1310 | 62 | #define GDS_ID_MDSMU 0x1310 |
59 | #define GDS_ID_MDSRouteInfo 0x1311 | 63 | #define GDS_ID_MDSROUTEINFO 0x1311 |
60 | #define GDS_ID_AgUnWrkCorr 0x1549 | 64 | #define GDS_ID_AGUNWRKCORR 0x1549 |
61 | #define GDS_ID_SNACondReport 0x1532 | 65 | #define GDS_ID_SNACONDREPORT 0x1532 |
62 | #define GDS_ID_CPMSU 0x1212 | 66 | #define GDS_ID_CPMSU 0x1212 |
63 | #define GDS_ID_RoutTargInstr 0x154D | 67 | #define GDS_ID_ROUTTARGINSTR 0x154D |
64 | #define GDS_ID_OpReq 0x8070 | 68 | #define GDS_ID_OPREQ 0x8070 |
65 | #define GDS_ID_TextCmd 0x1320 | 69 | #define GDS_ID_TEXTCMD 0x1320 |
66 | 70 | ||
67 | #define GDS_KEY_SelfDefTextMsg 0x31 | 71 | #define GDS_KEY_SELFDEFTEXTMSG 0x31 |
68 | 72 | ||
69 | typedef u32 sccb_mask_t; /* ATTENTION: assumes 32bit mask !!! */ | 73 | typedef u32 sccb_mask_t; /* ATTENTION: assumes 32bit mask !!! */ |
70 | 74 | ||
diff --git a/drivers/s390/char/sclp_chp.c b/drivers/s390/char/sclp_chp.c new file mode 100644 index 000000000000..a66b914519b5 --- /dev/null +++ b/drivers/s390/char/sclp_chp.c | |||
@@ -0,0 +1,196 @@ | |||
1 | /* | ||
2 | * drivers/s390/char/sclp_chp.c | ||
3 | * | ||
4 | * Copyright IBM Corp. 2007 | ||
5 | * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com> | ||
6 | */ | ||
7 | |||
8 | #include <linux/types.h> | ||
9 | #include <linux/gfp.h> | ||
10 | #include <linux/errno.h> | ||
11 | #include <linux/completion.h> | ||
12 | #include <asm/sclp.h> | ||
13 | #include <asm/chpid.h> | ||
14 | |||
15 | #include "sclp.h" | ||
16 | |||
17 | #define TAG "sclp_chp: " | ||
18 | |||
19 | #define SCLP_CMDW_CONFIGURE_CHANNEL_PATH 0x000f0001 | ||
20 | #define SCLP_CMDW_DECONFIGURE_CHANNEL_PATH 0x000e0001 | ||
21 | #define SCLP_CMDW_READ_CHANNEL_PATH_INFORMATION 0x00030001 | ||
22 | |||
23 | static inline sclp_cmdw_t get_configure_cmdw(struct chp_id chpid) | ||
24 | { | ||
25 | return SCLP_CMDW_CONFIGURE_CHANNEL_PATH | chpid.id << 8; | ||
26 | } | ||
27 | |||
28 | static inline sclp_cmdw_t get_deconfigure_cmdw(struct chp_id chpid) | ||
29 | { | ||
30 | return SCLP_CMDW_DECONFIGURE_CHANNEL_PATH | chpid.id << 8; | ||
31 | } | ||
32 | |||
33 | static void chp_callback(struct sclp_req *req, void *data) | ||
34 | { | ||
35 | struct completion *completion = data; | ||
36 | |||
37 | complete(completion); | ||
38 | } | ||
39 | |||
40 | struct chp_cfg_sccb { | ||
41 | struct sccb_header header; | ||
42 | u8 ccm; | ||
43 | u8 reserved[6]; | ||
44 | u8 cssid; | ||
45 | } __attribute__((packed)); | ||
46 | |||
47 | struct chp_cfg_data { | ||
48 | struct chp_cfg_sccb sccb; | ||
49 | struct sclp_req req; | ||
50 | struct completion completion; | ||
51 | } __attribute__((packed)); | ||
52 | |||
53 | static int do_configure(sclp_cmdw_t cmd) | ||
54 | { | ||
55 | struct chp_cfg_data *data; | ||
56 | int rc; | ||
57 | |||
58 | /* Prepare sccb. */ | ||
59 | data = (struct chp_cfg_data *) get_zeroed_page(GFP_KERNEL | GFP_DMA); | ||
60 | if (!data) | ||
61 | return -ENOMEM; | ||
62 | data->sccb.header.length = sizeof(struct chp_cfg_sccb); | ||
63 | data->req.command = cmd; | ||
64 | data->req.sccb = &(data->sccb); | ||
65 | data->req.status = SCLP_REQ_FILLED; | ||
66 | data->req.callback = chp_callback; | ||
67 | data->req.callback_data = &(data->completion); | ||
68 | init_completion(&data->completion); | ||
69 | |||
70 | /* Perform sclp request. */ | ||
71 | rc = sclp_add_request(&(data->req)); | ||
72 | if (rc) | ||
73 | goto out; | ||
74 | wait_for_completion(&data->completion); | ||
75 | |||
76 | /* Check response .*/ | ||
77 | if (data->req.status != SCLP_REQ_DONE) { | ||
78 | printk(KERN_WARNING TAG "configure channel-path request failed " | ||
79 | "(status=0x%02x)\n", data->req.status); | ||
80 | rc = -EIO; | ||
81 | goto out; | ||
82 | } | ||
83 | switch (data->sccb.header.response_code) { | ||
84 | case 0x0020: | ||
85 | case 0x0120: | ||
86 | case 0x0440: | ||
87 | case 0x0450: | ||
88 | break; | ||
89 | default: | ||
90 | printk(KERN_WARNING TAG "configure channel-path failed " | ||
91 | "(cmd=0x%08x, response=0x%04x)\n", cmd, | ||
92 | data->sccb.header.response_code); | ||
93 | rc = -EIO; | ||
94 | break; | ||
95 | } | ||
96 | out: | ||
97 | free_page((unsigned long) data); | ||
98 | |||
99 | return rc; | ||
100 | } | ||
101 | |||
102 | /** | ||
103 | * sclp_chp_configure - perform configure channel-path sclp command | ||
104 | * @chpid: channel-path ID | ||
105 | * | ||
106 | * Perform configure channel-path command sclp command for specified chpid. | ||
107 | * Return 0 after command successfully finished, non-zero otherwise. | ||
108 | */ | ||
109 | int sclp_chp_configure(struct chp_id chpid) | ||
110 | { | ||
111 | return do_configure(get_configure_cmdw(chpid)); | ||
112 | } | ||
113 | |||
114 | /** | ||
115 | * sclp_chp_deconfigure - perform deconfigure channel-path sclp command | ||
116 | * @chpid: channel-path ID | ||
117 | * | ||
118 | * Perform deconfigure channel-path command sclp command for specified chpid | ||
119 | * and wait for completion. On success return 0. Return non-zero otherwise. | ||
120 | */ | ||
121 | int sclp_chp_deconfigure(struct chp_id chpid) | ||
122 | { | ||
123 | return do_configure(get_deconfigure_cmdw(chpid)); | ||
124 | } | ||
125 | |||
126 | struct chp_info_sccb { | ||
127 | struct sccb_header header; | ||
128 | u8 recognized[SCLP_CHP_INFO_MASK_SIZE]; | ||
129 | u8 standby[SCLP_CHP_INFO_MASK_SIZE]; | ||
130 | u8 configured[SCLP_CHP_INFO_MASK_SIZE]; | ||
131 | u8 ccm; | ||
132 | u8 reserved[6]; | ||
133 | u8 cssid; | ||
134 | } __attribute__((packed)); | ||
135 | |||
136 | struct chp_info_data { | ||
137 | struct chp_info_sccb sccb; | ||
138 | struct sclp_req req; | ||
139 | struct completion completion; | ||
140 | } __attribute__((packed)); | ||
141 | |||
142 | /** | ||
143 | * sclp_chp_read_info - perform read channel-path information sclp command | ||
144 | * @info: resulting channel-path information data | ||
145 | * | ||
146 | * Perform read channel-path information sclp command and wait for completion. | ||
147 | * On success, store channel-path information in @info and return 0. Return | ||
148 | * non-zero otherwise. | ||
149 | */ | ||
150 | int sclp_chp_read_info(struct sclp_chp_info *info) | ||
151 | { | ||
152 | struct chp_info_data *data; | ||
153 | int rc; | ||
154 | |||
155 | /* Prepare sccb. */ | ||
156 | data = (struct chp_info_data *) get_zeroed_page(GFP_KERNEL | GFP_DMA); | ||
157 | if (!data) | ||
158 | return -ENOMEM; | ||
159 | data->sccb.header.length = sizeof(struct chp_info_sccb); | ||
160 | data->req.command = SCLP_CMDW_READ_CHANNEL_PATH_INFORMATION; | ||
161 | data->req.sccb = &(data->sccb); | ||
162 | data->req.status = SCLP_REQ_FILLED; | ||
163 | data->req.callback = chp_callback; | ||
164 | data->req.callback_data = &(data->completion); | ||
165 | init_completion(&data->completion); | ||
166 | |||
167 | /* Perform sclp request. */ | ||
168 | rc = sclp_add_request(&(data->req)); | ||
169 | if (rc) | ||
170 | goto out; | ||
171 | wait_for_completion(&data->completion); | ||
172 | |||
173 | /* Check response .*/ | ||
174 | if (data->req.status != SCLP_REQ_DONE) { | ||
175 | printk(KERN_WARNING TAG "read channel-path info request failed " | ||
176 | "(status=0x%02x)\n", data->req.status); | ||
177 | rc = -EIO; | ||
178 | goto out; | ||
179 | } | ||
180 | if (data->sccb.header.response_code != 0x0010) { | ||
181 | printk(KERN_WARNING TAG "read channel-path info failed " | ||
182 | "(response=0x%04x)\n", data->sccb.header.response_code); | ||
183 | rc = -EIO; | ||
184 | goto out; | ||
185 | } | ||
186 | memcpy(info->recognized, data->sccb.recognized, | ||
187 | SCLP_CHP_INFO_MASK_SIZE); | ||
188 | memcpy(info->standby, data->sccb.standby, | ||
189 | SCLP_CHP_INFO_MASK_SIZE); | ||
190 | memcpy(info->configured, data->sccb.configured, | ||
191 | SCLP_CHP_INFO_MASK_SIZE); | ||
192 | out: | ||
193 | free_page((unsigned long) data); | ||
194 | |||
195 | return rc; | ||
196 | } | ||
diff --git a/drivers/s390/char/sclp_config.c b/drivers/s390/char/sclp_config.c new file mode 100644 index 000000000000..5322e5e54a98 --- /dev/null +++ b/drivers/s390/char/sclp_config.c | |||
@@ -0,0 +1,75 @@ | |||
1 | /* | ||
2 | * drivers/s390/char/sclp_config.c | ||
3 | * | ||
4 | * Copyright IBM Corp. 2007 | ||
5 | * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> | ||
6 | */ | ||
7 | |||
8 | #include <linux/init.h> | ||
9 | #include <linux/errno.h> | ||
10 | #include <linux/cpu.h> | ||
11 | #include <linux/sysdev.h> | ||
12 | #include <linux/workqueue.h> | ||
13 | #include "sclp.h" | ||
14 | |||
15 | #define TAG "sclp_config: " | ||
16 | |||
17 | struct conf_mgm_data { | ||
18 | u8 reserved; | ||
19 | u8 ev_qualifier; | ||
20 | } __attribute__((packed)); | ||
21 | |||
22 | #define EV_QUAL_CAP_CHANGE 3 | ||
23 | |||
24 | static struct work_struct sclp_cpu_capability_work; | ||
25 | |||
26 | static void sclp_cpu_capability_notify(struct work_struct *work) | ||
27 | { | ||
28 | int cpu; | ||
29 | struct sys_device *sysdev; | ||
30 | |||
31 | printk(KERN_WARNING TAG "cpu capability changed.\n"); | ||
32 | lock_cpu_hotplug(); | ||
33 | for_each_online_cpu(cpu) { | ||
34 | sysdev = get_cpu_sysdev(cpu); | ||
35 | kobject_uevent(&sysdev->kobj, KOBJ_CHANGE); | ||
36 | } | ||
37 | unlock_cpu_hotplug(); | ||
38 | } | ||
39 | |||
40 | static void sclp_conf_receiver_fn(struct evbuf_header *evbuf) | ||
41 | { | ||
42 | struct conf_mgm_data *cdata; | ||
43 | |||
44 | cdata = (struct conf_mgm_data *)(evbuf + 1); | ||
45 | if (cdata->ev_qualifier == EV_QUAL_CAP_CHANGE) | ||
46 | schedule_work(&sclp_cpu_capability_work); | ||
47 | } | ||
48 | |||
49 | static struct sclp_register sclp_conf_register = | ||
50 | { | ||
51 | .receive_mask = EVTYP_CONFMGMDATA_MASK, | ||
52 | .receiver_fn = sclp_conf_receiver_fn, | ||
53 | }; | ||
54 | |||
55 | static int __init sclp_conf_init(void) | ||
56 | { | ||
57 | int rc; | ||
58 | |||
59 | INIT_WORK(&sclp_cpu_capability_work, sclp_cpu_capability_notify); | ||
60 | |||
61 | rc = sclp_register(&sclp_conf_register); | ||
62 | if (rc) { | ||
63 | printk(KERN_ERR TAG "failed to register (%d).\n", rc); | ||
64 | return rc; | ||
65 | } | ||
66 | |||
67 | if (!(sclp_conf_register.sclp_receive_mask & EVTYP_CONFMGMDATA_MASK)) { | ||
68 | printk(KERN_WARNING TAG "no configuration management.\n"); | ||
69 | sclp_unregister(&sclp_conf_register); | ||
70 | rc = -ENOSYS; | ||
71 | } | ||
72 | return rc; | ||
73 | } | ||
74 | |||
75 | __initcall(sclp_conf_init); | ||
diff --git a/drivers/s390/char/sclp_cpi.c b/drivers/s390/char/sclp_cpi.c index 65aa2c85737f..29fe2a5ec2fe 100644 --- a/drivers/s390/char/sclp_cpi.c +++ b/drivers/s390/char/sclp_cpi.c | |||
@@ -46,7 +46,7 @@ struct cpi_sccb { | |||
46 | /* Event type structure for write message and write priority message */ | 46 | /* Event type structure for write message and write priority message */ |
47 | static struct sclp_register sclp_cpi_event = | 47 | static struct sclp_register sclp_cpi_event = |
48 | { | 48 | { |
49 | .send_mask = EvTyp_CtlProgIdent_Mask | 49 | .send_mask = EVTYP_CTLPROGIDENT_MASK |
50 | }; | 50 | }; |
51 | 51 | ||
52 | MODULE_LICENSE("GPL"); | 52 | MODULE_LICENSE("GPL"); |
@@ -201,7 +201,7 @@ cpi_module_init(void) | |||
201 | "console.\n"); | 201 | "console.\n"); |
202 | return -EINVAL; | 202 | return -EINVAL; |
203 | } | 203 | } |
204 | if (!(sclp_cpi_event.sclp_send_mask & EvTyp_CtlProgIdent_Mask)) { | 204 | if (!(sclp_cpi_event.sclp_send_mask & EVTYP_CTLPROGIDENT_MASK)) { |
205 | printk(KERN_WARNING "cpi: no control program identification " | 205 | printk(KERN_WARNING "cpi: no control program identification " |
206 | "support\n"); | 206 | "support\n"); |
207 | sclp_unregister(&sclp_cpi_event); | 207 | sclp_unregister(&sclp_cpi_event); |
diff --git a/drivers/s390/char/sclp_quiesce.c b/drivers/s390/char/sclp_quiesce.c index baa8fe669ed2..45ff25e787cb 100644 --- a/drivers/s390/char/sclp_quiesce.c +++ b/drivers/s390/char/sclp_quiesce.c | |||
@@ -43,7 +43,7 @@ sclp_quiesce_handler(struct evbuf_header *evbuf) | |||
43 | } | 43 | } |
44 | 44 | ||
45 | static struct sclp_register sclp_quiesce_event = { | 45 | static struct sclp_register sclp_quiesce_event = { |
46 | .receive_mask = EvTyp_SigQuiesce_Mask, | 46 | .receive_mask = EVTYP_SIGQUIESCE_MASK, |
47 | .receiver_fn = sclp_quiesce_handler | 47 | .receiver_fn = sclp_quiesce_handler |
48 | }; | 48 | }; |
49 | 49 | ||
diff --git a/drivers/s390/char/sclp_rw.c b/drivers/s390/char/sclp_rw.c index 2486783ea58e..bbd5b8b66f42 100644 --- a/drivers/s390/char/sclp_rw.c +++ b/drivers/s390/char/sclp_rw.c | |||
@@ -30,7 +30,7 @@ | |||
30 | 30 | ||
31 | /* Event type structure for write message and write priority message */ | 31 | /* Event type structure for write message and write priority message */ |
32 | static struct sclp_register sclp_rw_event = { | 32 | static struct sclp_register sclp_rw_event = { |
33 | .send_mask = EvTyp_Msg_Mask | EvTyp_PMsgCmd_Mask | 33 | .send_mask = EVTYP_MSG_MASK | EVTYP_PMSGCMD_MASK |
34 | }; | 34 | }; |
35 | 35 | ||
36 | /* | 36 | /* |
@@ -64,7 +64,7 @@ sclp_make_buffer(void *page, unsigned short columns, unsigned short htab) | |||
64 | memset(sccb, 0, sizeof(struct write_sccb)); | 64 | memset(sccb, 0, sizeof(struct write_sccb)); |
65 | sccb->header.length = sizeof(struct write_sccb); | 65 | sccb->header.length = sizeof(struct write_sccb); |
66 | sccb->msg_buf.header.length = sizeof(struct msg_buf); | 66 | sccb->msg_buf.header.length = sizeof(struct msg_buf); |
67 | sccb->msg_buf.header.type = EvTyp_Msg; | 67 | sccb->msg_buf.header.type = EVTYP_MSG; |
68 | sccb->msg_buf.mdb.header.length = sizeof(struct mdb); | 68 | sccb->msg_buf.mdb.header.length = sizeof(struct mdb); |
69 | sccb->msg_buf.mdb.header.type = 1; | 69 | sccb->msg_buf.mdb.header.type = 1; |
70 | sccb->msg_buf.mdb.header.tag = 0xD4C4C240; /* ebcdic "MDB " */ | 70 | sccb->msg_buf.mdb.header.tag = 0xD4C4C240; /* ebcdic "MDB " */ |
@@ -114,7 +114,7 @@ sclp_initialize_mto(struct sclp_buffer *buffer, int max_len) | |||
114 | memset(mto, 0, sizeof(struct mto)); | 114 | memset(mto, 0, sizeof(struct mto)); |
115 | mto->length = sizeof(struct mto); | 115 | mto->length = sizeof(struct mto); |
116 | mto->type = 4; /* message text object */ | 116 | mto->type = 4; /* message text object */ |
117 | mto->line_type_flags = LnTpFlgs_EndText; /* end text */ | 117 | mto->line_type_flags = LNTPFLGS_ENDTEXT; /* end text */ |
118 | 118 | ||
119 | /* set pointer to first byte after struct mto. */ | 119 | /* set pointer to first byte after struct mto. */ |
120 | buffer->current_line = (char *) (mto + 1); | 120 | buffer->current_line = (char *) (mto + 1); |
@@ -215,7 +215,7 @@ sclp_write(struct sclp_buffer *buffer, const unsigned char *msg, int count) | |||
215 | case '\a': /* bell, one for several times */ | 215 | case '\a': /* bell, one for several times */ |
216 | /* set SCLP sound alarm bit in General Object */ | 216 | /* set SCLP sound alarm bit in General Object */ |
217 | buffer->sccb->msg_buf.mdb.go.general_msg_flags |= | 217 | buffer->sccb->msg_buf.mdb.go.general_msg_flags |= |
218 | GnrlMsgFlgs_SndAlrm; | 218 | GNRLMSGFLGS_SNDALRM; |
219 | break; | 219 | break; |
220 | case '\t': /* horizontal tabulator */ | 220 | case '\t': /* horizontal tabulator */ |
221 | /* check if new mto needs to be created */ | 221 | /* check if new mto needs to be created */ |
@@ -452,12 +452,12 @@ sclp_emit_buffer(struct sclp_buffer *buffer, | |||
452 | return -EIO; | 452 | return -EIO; |
453 | 453 | ||
454 | sccb = buffer->sccb; | 454 | sccb = buffer->sccb; |
455 | if (sclp_rw_event.sclp_send_mask & EvTyp_Msg_Mask) | 455 | if (sclp_rw_event.sclp_send_mask & EVTYP_MSG_MASK) |
456 | /* Use normal write message */ | 456 | /* Use normal write message */ |
457 | sccb->msg_buf.header.type = EvTyp_Msg; | 457 | sccb->msg_buf.header.type = EVTYP_MSG; |
458 | else if (sclp_rw_event.sclp_send_mask & EvTyp_PMsgCmd_Mask) | 458 | else if (sclp_rw_event.sclp_send_mask & EVTYP_PMSGCMD_MASK) |
459 | /* Use write priority message */ | 459 | /* Use write priority message */ |
460 | sccb->msg_buf.header.type = EvTyp_PMsgCmd; | 460 | sccb->msg_buf.header.type = EVTYP_PMSGCMD; |
461 | else | 461 | else |
462 | return -ENOSYS; | 462 | return -ENOSYS; |
463 | buffer->request.command = SCLP_CMDW_WRITE_EVENT_DATA; | 463 | buffer->request.command = SCLP_CMDW_WRITE_EVENT_DATA; |
diff --git a/drivers/s390/char/sclp_sdias.c b/drivers/s390/char/sclp_sdias.c new file mode 100644 index 000000000000..52283daddaef --- /dev/null +++ b/drivers/s390/char/sclp_sdias.c | |||
@@ -0,0 +1,255 @@ | |||
1 | /* | ||
2 | * Sclp "store data in absolut storage" | ||
3 | * | ||
4 | * Copyright IBM Corp. 2003,2007 | ||
5 | * Author(s): Michael Holzheu | ||
6 | */ | ||
7 | |||
8 | #include <linux/sched.h> | ||
9 | #include <asm/sclp.h> | ||
10 | #include <asm/debug.h> | ||
11 | #include <asm/ipl.h> | ||
12 | #include "sclp.h" | ||
13 | #include "sclp_rw.h" | ||
14 | |||
15 | #define TRACE(x...) debug_sprintf_event(sdias_dbf, 1, x) | ||
16 | #define ERROR_MSG(x...) printk ( KERN_ALERT "SDIAS: " x ) | ||
17 | |||
18 | #define SDIAS_RETRIES 300 | ||
19 | #define SDIAS_SLEEP_TICKS 50 | ||
20 | |||
21 | #define EQ_STORE_DATA 0x0 | ||
22 | #define EQ_SIZE 0x1 | ||
23 | #define DI_FCP_DUMP 0x0 | ||
24 | #define ASA_SIZE_32 0x0 | ||
25 | #define ASA_SIZE_64 0x1 | ||
26 | #define EVSTATE_ALL_STORED 0x0 | ||
27 | #define EVSTATE_NO_DATA 0x3 | ||
28 | #define EVSTATE_PART_STORED 0x10 | ||
29 | |||
30 | static struct debug_info *sdias_dbf; | ||
31 | |||
32 | static struct sclp_register sclp_sdias_register = { | ||
33 | .send_mask = EVTYP_SDIAS_MASK, | ||
34 | }; | ||
35 | |||
36 | struct sdias_evbuf { | ||
37 | struct evbuf_header hdr; | ||
38 | u8 event_qual; | ||
39 | u8 data_id; | ||
40 | u64 reserved2; | ||
41 | u32 event_id; | ||
42 | u16 reserved3; | ||
43 | u8 asa_size; | ||
44 | u8 event_status; | ||
45 | u32 reserved4; | ||
46 | u32 blk_cnt; | ||
47 | u64 asa; | ||
48 | u32 reserved5; | ||
49 | u32 fbn; | ||
50 | u32 reserved6; | ||
51 | u32 lbn; | ||
52 | u16 reserved7; | ||
53 | u16 dbs; | ||
54 | } __attribute__((packed)); | ||
55 | |||
56 | struct sdias_sccb { | ||
57 | struct sccb_header hdr; | ||
58 | struct sdias_evbuf evbuf; | ||
59 | } __attribute__((packed)); | ||
60 | |||
61 | static struct sdias_sccb sccb __attribute__((aligned(4096))); | ||
62 | |||
63 | static int sclp_req_done; | ||
64 | static wait_queue_head_t sdias_wq; | ||
65 | static DEFINE_MUTEX(sdias_mutex); | ||
66 | |||
67 | static void sdias_callback(struct sclp_req *request, void *data) | ||
68 | { | ||
69 | struct sdias_sccb *sccb; | ||
70 | |||
71 | sccb = (struct sdias_sccb *) request->sccb; | ||
72 | sclp_req_done = 1; | ||
73 | wake_up(&sdias_wq); /* Inform caller, that request is complete */ | ||
74 | TRACE("callback done\n"); | ||
75 | } | ||
76 | |||
77 | static int sdias_sclp_send(struct sclp_req *req) | ||
78 | { | ||
79 | int retries; | ||
80 | int rc; | ||
81 | |||
82 | for (retries = SDIAS_RETRIES; retries; retries--) { | ||
83 | sclp_req_done = 0; | ||
84 | TRACE("add request\n"); | ||
85 | rc = sclp_add_request(req); | ||
86 | if (rc) { | ||
87 | /* not initiated, wait some time and retry */ | ||
88 | set_current_state(TASK_INTERRUPTIBLE); | ||
89 | TRACE("add request failed: rc = %i\n",rc); | ||
90 | schedule_timeout(SDIAS_SLEEP_TICKS); | ||
91 | continue; | ||
92 | } | ||
93 | /* initiated, wait for completion of service call */ | ||
94 | wait_event(sdias_wq, (sclp_req_done == 1)); | ||
95 | if (req->status == SCLP_REQ_FAILED) { | ||
96 | TRACE("sclp request failed\n"); | ||
97 | rc = -EIO; | ||
98 | continue; | ||
99 | } | ||
100 | TRACE("request done\n"); | ||
101 | break; | ||
102 | } | ||
103 | return rc; | ||
104 | } | ||
105 | |||
106 | /* | ||
107 | * Get number of blocks (4K) available in the HSA | ||
108 | */ | ||
109 | int sclp_sdias_blk_count(void) | ||
110 | { | ||
111 | struct sclp_req request; | ||
112 | int rc; | ||
113 | |||
114 | mutex_lock(&sdias_mutex); | ||
115 | |||
116 | memset(&sccb, 0, sizeof(sccb)); | ||
117 | memset(&request, 0, sizeof(request)); | ||
118 | |||
119 | sccb.hdr.length = sizeof(sccb); | ||
120 | sccb.evbuf.hdr.length = sizeof(struct sdias_evbuf); | ||
121 | sccb.evbuf.hdr.type = EVTYP_SDIAS; | ||
122 | sccb.evbuf.event_qual = EQ_SIZE; | ||
123 | sccb.evbuf.data_id = DI_FCP_DUMP; | ||
124 | sccb.evbuf.event_id = 4712; | ||
125 | sccb.evbuf.dbs = 1; | ||
126 | |||
127 | request.sccb = &sccb; | ||
128 | request.command = SCLP_CMDW_WRITE_EVENT_DATA; | ||
129 | request.status = SCLP_REQ_FILLED; | ||
130 | request.callback = sdias_callback; | ||
131 | |||
132 | rc = sdias_sclp_send(&request); | ||
133 | if (rc) { | ||
134 | ERROR_MSG("sclp_send failed for get_nr_blocks\n"); | ||
135 | goto out; | ||
136 | } | ||
137 | if (sccb.hdr.response_code != 0x0020) { | ||
138 | TRACE("send failed: %x\n", sccb.hdr.response_code); | ||
139 | rc = -EIO; | ||
140 | goto out; | ||
141 | } | ||
142 | |||
143 | switch (sccb.evbuf.event_status) { | ||
144 | case 0: | ||
145 | rc = sccb.evbuf.blk_cnt; | ||
146 | break; | ||
147 | default: | ||
148 | ERROR_MSG("SCLP error: %x\n", sccb.evbuf.event_status); | ||
149 | rc = -EIO; | ||
150 | goto out; | ||
151 | } | ||
152 | TRACE("%i blocks\n", rc); | ||
153 | out: | ||
154 | mutex_unlock(&sdias_mutex); | ||
155 | return rc; | ||
156 | } | ||
157 | |||
158 | /* | ||
159 | * Copy from HSA to absolute storage (not reentrant): | ||
160 | * | ||
161 | * @dest : Address of buffer where data should be copied | ||
162 | * @start_blk: Start Block (beginning with 1) | ||
163 | * @nr_blks : Number of 4K blocks to copy | ||
164 | * | ||
165 | * Return Value: 0 : Requested 'number' of blocks of data copied | ||
166 | * <0: ERROR - negative event status | ||
167 | */ | ||
168 | int sclp_sdias_copy(void *dest, int start_blk, int nr_blks) | ||
169 | { | ||
170 | struct sclp_req request; | ||
171 | int rc; | ||
172 | |||
173 | mutex_lock(&sdias_mutex); | ||
174 | |||
175 | memset(&sccb, 0, sizeof(sccb)); | ||
176 | memset(&request, 0, sizeof(request)); | ||
177 | |||
178 | sccb.hdr.length = sizeof(sccb); | ||
179 | sccb.evbuf.hdr.length = sizeof(struct sdias_evbuf); | ||
180 | sccb.evbuf.hdr.type = EVTYP_SDIAS; | ||
181 | sccb.evbuf.hdr.flags = 0; | ||
182 | sccb.evbuf.event_qual = EQ_STORE_DATA; | ||
183 | sccb.evbuf.data_id = DI_FCP_DUMP; | ||
184 | sccb.evbuf.event_id = 4712; | ||
185 | #ifdef __s390x__ | ||
186 | sccb.evbuf.asa_size = ASA_SIZE_64; | ||
187 | #else | ||
188 | sccb.evbuf.asa_size = ASA_SIZE_32; | ||
189 | #endif | ||
190 | sccb.evbuf.event_status = 0; | ||
191 | sccb.evbuf.blk_cnt = nr_blks; | ||
192 | sccb.evbuf.asa = (unsigned long)dest; | ||
193 | sccb.evbuf.fbn = start_blk; | ||
194 | sccb.evbuf.lbn = 0; | ||
195 | sccb.evbuf.dbs = 1; | ||
196 | |||
197 | request.sccb = &sccb; | ||
198 | request.command = SCLP_CMDW_WRITE_EVENT_DATA; | ||
199 | request.status = SCLP_REQ_FILLED; | ||
200 | request.callback = sdias_callback; | ||
201 | |||
202 | rc = sdias_sclp_send(&request); | ||
203 | if (rc) { | ||
204 | ERROR_MSG("sclp_send failed: %x\n", rc); | ||
205 | goto out; | ||
206 | } | ||
207 | if (sccb.hdr.response_code != 0x0020) { | ||
208 | TRACE("copy failed: %x\n", sccb.hdr.response_code); | ||
209 | rc = -EIO; | ||
210 | goto out; | ||
211 | } | ||
212 | |||
213 | switch (sccb.evbuf.event_status) { | ||
214 | case EVSTATE_ALL_STORED: | ||
215 | TRACE("all stored\n"); | ||
216 | case EVSTATE_PART_STORED: | ||
217 | TRACE("part stored: %i\n", sccb.evbuf.blk_cnt); | ||
218 | break; | ||
219 | case EVSTATE_NO_DATA: | ||
220 | TRACE("no data\n"); | ||
221 | default: | ||
222 | ERROR_MSG("Error from SCLP while copying hsa. " | ||
223 | "Event status = %x\n", | ||
224 | sccb.evbuf.event_status); | ||
225 | rc = -EIO; | ||
226 | } | ||
227 | out: | ||
228 | mutex_unlock(&sdias_mutex); | ||
229 | return rc; | ||
230 | } | ||
231 | |||
232 | int __init sdias_init(void) | ||
233 | { | ||
234 | int rc; | ||
235 | |||
236 | if (ipl_info.type != IPL_TYPE_FCP_DUMP) | ||
237 | return 0; | ||
238 | sdias_dbf = debug_register("dump_sdias", 4, 1, 4 * sizeof(long)); | ||
239 | debug_register_view(sdias_dbf, &debug_sprintf_view); | ||
240 | debug_set_level(sdias_dbf, 6); | ||
241 | rc = sclp_register(&sclp_sdias_register); | ||
242 | if (rc) { | ||
243 | ERROR_MSG("sclp register failed\n"); | ||
244 | return rc; | ||
245 | } | ||
246 | init_waitqueue_head(&sdias_wq); | ||
247 | TRACE("init done\n"); | ||
248 | return 0; | ||
249 | } | ||
250 | |||
251 | void __exit sdias_exit(void) | ||
252 | { | ||
253 | debug_unregister(sdias_dbf); | ||
254 | sclp_unregister(&sclp_sdias_register); | ||
255 | } | ||
diff --git a/drivers/s390/char/sclp_tty.c b/drivers/s390/char/sclp_tty.c index 076816b9d528..e3b3d390b4a3 100644 --- a/drivers/s390/char/sclp_tty.c +++ b/drivers/s390/char/sclp_tty.c | |||
@@ -648,7 +648,7 @@ sclp_eval_textcmd(struct gds_subvector *start, | |||
648 | subvec = start; | 648 | subvec = start; |
649 | while (subvec < end) { | 649 | while (subvec < end) { |
650 | subvec = find_gds_subvector(subvec, end, | 650 | subvec = find_gds_subvector(subvec, end, |
651 | GDS_KEY_SelfDefTextMsg); | 651 | GDS_KEY_SELFDEFTEXTMSG); |
652 | if (!subvec) | 652 | if (!subvec) |
653 | break; | 653 | break; |
654 | sclp_eval_selfdeftextmsg((struct gds_subvector *)(subvec + 1), | 654 | sclp_eval_selfdeftextmsg((struct gds_subvector *)(subvec + 1), |
@@ -664,7 +664,7 @@ sclp_eval_cpmsu(struct gds_vector *start, struct gds_vector *end) | |||
664 | 664 | ||
665 | vec = start; | 665 | vec = start; |
666 | while (vec < end) { | 666 | while (vec < end) { |
667 | vec = find_gds_vector(vec, end, GDS_ID_TextCmd); | 667 | vec = find_gds_vector(vec, end, GDS_ID_TEXTCMD); |
668 | if (!vec) | 668 | if (!vec) |
669 | break; | 669 | break; |
670 | sclp_eval_textcmd((struct gds_subvector *)(vec + 1), | 670 | sclp_eval_textcmd((struct gds_subvector *)(vec + 1), |
@@ -703,7 +703,7 @@ sclp_tty_state_change(struct sclp_register *reg) | |||
703 | 703 | ||
704 | static struct sclp_register sclp_input_event = | 704 | static struct sclp_register sclp_input_event = |
705 | { | 705 | { |
706 | .receive_mask = EvTyp_OpCmd_Mask | EvTyp_PMsgCmd_Mask, | 706 | .receive_mask = EVTYP_OPCMD_MASK | EVTYP_PMSGCMD_MASK, |
707 | .state_change_fn = sclp_tty_state_change, | 707 | .state_change_fn = sclp_tty_state_change, |
708 | .receiver_fn = sclp_tty_receiver | 708 | .receiver_fn = sclp_tty_receiver |
709 | }; | 709 | }; |
diff --git a/drivers/s390/char/sclp_vt220.c b/drivers/s390/char/sclp_vt220.c index f77dc33b5f8d..726334757bbf 100644 --- a/drivers/s390/char/sclp_vt220.c +++ b/drivers/s390/char/sclp_vt220.c | |||
@@ -99,8 +99,8 @@ static void sclp_vt220_emit_current(void); | |||
99 | 99 | ||
100 | /* Registration structure for our interest in SCLP event buffers */ | 100 | /* Registration structure for our interest in SCLP event buffers */ |
101 | static struct sclp_register sclp_vt220_register = { | 101 | static struct sclp_register sclp_vt220_register = { |
102 | .send_mask = EvTyp_VT220Msg_Mask, | 102 | .send_mask = EVTYP_VT220MSG_MASK, |
103 | .receive_mask = EvTyp_VT220Msg_Mask, | 103 | .receive_mask = EVTYP_VT220MSG_MASK, |
104 | .state_change_fn = NULL, | 104 | .state_change_fn = NULL, |
105 | .receiver_fn = sclp_vt220_receiver_fn | 105 | .receiver_fn = sclp_vt220_receiver_fn |
106 | }; | 106 | }; |
@@ -202,7 +202,7 @@ sclp_vt220_callback(struct sclp_req *request, void *data) | |||
202 | static int | 202 | static int |
203 | __sclp_vt220_emit(struct sclp_vt220_request *request) | 203 | __sclp_vt220_emit(struct sclp_vt220_request *request) |
204 | { | 204 | { |
205 | if (!(sclp_vt220_register.sclp_send_mask & EvTyp_VT220Msg_Mask)) { | 205 | if (!(sclp_vt220_register.sclp_send_mask & EVTYP_VT220MSG_MASK)) { |
206 | request->sclp_req.status = SCLP_REQ_FAILED; | 206 | request->sclp_req.status = SCLP_REQ_FAILED; |
207 | return -EIO; | 207 | return -EIO; |
208 | } | 208 | } |
@@ -284,7 +284,7 @@ sclp_vt220_initialize_page(void *page) | |||
284 | sccb->header.length = sizeof(struct sclp_vt220_sccb); | 284 | sccb->header.length = sizeof(struct sclp_vt220_sccb); |
285 | sccb->header.function_code = SCLP_NORMAL_WRITE; | 285 | sccb->header.function_code = SCLP_NORMAL_WRITE; |
286 | sccb->header.response_code = 0x0000; | 286 | sccb->header.response_code = 0x0000; |
287 | sccb->evbuf.type = EvTyp_VT220Msg; | 287 | sccb->evbuf.type = EVTYP_VT220MSG; |
288 | sccb->evbuf.length = sizeof(struct evbuf_header); | 288 | sccb->evbuf.length = sizeof(struct evbuf_header); |
289 | 289 | ||
290 | return request; | 290 | return request; |
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c index b87d3b019936..a5a00e9ae4d0 100644 --- a/drivers/s390/char/vmlogrdr.c +++ b/drivers/s390/char/vmlogrdr.c | |||
@@ -125,7 +125,7 @@ static struct vmlogrdr_priv_t sys_ser[] = { | |||
125 | .recording_name = "EREP", | 125 | .recording_name = "EREP", |
126 | .minor_num = 0, | 126 | .minor_num = 0, |
127 | .buffer_free = 1, | 127 | .buffer_free = 1, |
128 | .priv_lock = SPIN_LOCK_UNLOCKED, | 128 | .priv_lock = __SPIN_LOCK_UNLOCKED(sys_ser[0].priv_lock), |
129 | .autorecording = 1, | 129 | .autorecording = 1, |
130 | .autopurge = 1, | 130 | .autopurge = 1, |
131 | }, | 131 | }, |
@@ -134,7 +134,7 @@ static struct vmlogrdr_priv_t sys_ser[] = { | |||
134 | .recording_name = "ACCOUNT", | 134 | .recording_name = "ACCOUNT", |
135 | .minor_num = 1, | 135 | .minor_num = 1, |
136 | .buffer_free = 1, | 136 | .buffer_free = 1, |
137 | .priv_lock = SPIN_LOCK_UNLOCKED, | 137 | .priv_lock = __SPIN_LOCK_UNLOCKED(sys_ser[1].priv_lock), |
138 | .autorecording = 1, | 138 | .autorecording = 1, |
139 | .autopurge = 1, | 139 | .autopurge = 1, |
140 | }, | 140 | }, |
@@ -143,7 +143,7 @@ static struct vmlogrdr_priv_t sys_ser[] = { | |||
143 | .recording_name = "SYMPTOM", | 143 | .recording_name = "SYMPTOM", |
144 | .minor_num = 2, | 144 | .minor_num = 2, |
145 | .buffer_free = 1, | 145 | .buffer_free = 1, |
146 | .priv_lock = SPIN_LOCK_UNLOCKED, | 146 | .priv_lock = __SPIN_LOCK_UNLOCKED(sys_ser[2].priv_lock), |
147 | .autorecording = 1, | 147 | .autorecording = 1, |
148 | .autopurge = 1, | 148 | .autopurge = 1, |
149 | } | 149 | } |
@@ -385,6 +385,9 @@ static int vmlogrdr_release (struct inode *inode, struct file *filp) | |||
385 | 385 | ||
386 | struct vmlogrdr_priv_t * logptr = filp->private_data; | 386 | struct vmlogrdr_priv_t * logptr = filp->private_data; |
387 | 387 | ||
388 | iucv_path_sever(logptr->path, NULL); | ||
389 | kfree(logptr->path); | ||
390 | logptr->path = NULL; | ||
388 | if (logptr->autorecording) { | 391 | if (logptr->autorecording) { |
389 | ret = vmlogrdr_recording(logptr,0,logptr->autopurge); | 392 | ret = vmlogrdr_recording(logptr,0,logptr->autopurge); |
390 | if (ret) | 393 | if (ret) |
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c new file mode 100644 index 000000000000..89d439316a53 --- /dev/null +++ b/drivers/s390/char/zcore.c | |||
@@ -0,0 +1,651 @@ | |||
1 | /* | ||
2 | * zcore module to export memory content and register sets for creating system | ||
3 | * dumps on SCSI disks (zfcpdump). The "zcore/mem" debugfs file shows the same | ||
4 | * dump format as s390 standalone dumps. | ||
5 | * | ||
6 | * For more information please refer to Documentation/s390/zfcpdump.txt | ||
7 | * | ||
8 | * Copyright IBM Corp. 2003,2007 | ||
9 | * Author(s): Michael Holzheu | ||
10 | */ | ||
11 | |||
12 | #include <linux/init.h> | ||
13 | #include <linux/miscdevice.h> | ||
14 | #include <linux/utsname.h> | ||
15 | #include <linux/debugfs.h> | ||
16 | #include <asm/ipl.h> | ||
17 | #include <asm/sclp.h> | ||
18 | #include <asm/setup.h> | ||
19 | #include <asm/sigp.h> | ||
20 | #include <asm/uaccess.h> | ||
21 | #include <asm/debug.h> | ||
22 | #include <asm/processor.h> | ||
23 | #include <asm/irqflags.h> | ||
24 | |||
25 | #define TRACE(x...) debug_sprintf_event(zcore_dbf, 1, x) | ||
26 | #define MSG(x...) printk( KERN_ALERT x ) | ||
27 | #define ERROR_MSG(x...) printk ( KERN_ALERT "DUMP: " x ) | ||
28 | |||
29 | #define TO_USER 0 | ||
30 | #define TO_KERNEL 1 | ||
31 | |||
32 | enum arch_id { | ||
33 | ARCH_S390 = 0, | ||
34 | ARCH_S390X = 1, | ||
35 | }; | ||
36 | |||
37 | /* dump system info */ | ||
38 | |||
39 | struct sys_info { | ||
40 | enum arch_id arch; | ||
41 | unsigned long sa_base; | ||
42 | u32 sa_size; | ||
43 | int cpu_map[NR_CPUS]; | ||
44 | unsigned long mem_size; | ||
45 | union save_area lc_mask; | ||
46 | }; | ||
47 | |||
48 | static struct sys_info sys_info; | ||
49 | static struct debug_info *zcore_dbf; | ||
50 | static int hsa_available; | ||
51 | static struct dentry *zcore_dir; | ||
52 | static struct dentry *zcore_file; | ||
53 | |||
54 | /* | ||
55 | * Copy memory from HSA to kernel or user memory (not reentrant): | ||
56 | * | ||
57 | * @dest: Kernel or user buffer where memory should be copied to | ||
58 | * @src: Start address within HSA where data should be copied | ||
59 | * @count: Size of buffer, which should be copied | ||
60 | * @mode: Either TO_KERNEL or TO_USER | ||
61 | */ | ||
62 | static int memcpy_hsa(void *dest, unsigned long src, size_t count, int mode) | ||
63 | { | ||
64 | int offs, blk_num; | ||
65 | static char buf[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE))); | ||
66 | |||
67 | if (count == 0) | ||
68 | return 0; | ||
69 | |||
70 | /* copy first block */ | ||
71 | offs = 0; | ||
72 | if ((src % PAGE_SIZE) != 0) { | ||
73 | blk_num = src / PAGE_SIZE + 2; | ||
74 | if (sclp_sdias_copy(buf, blk_num, 1)) { | ||
75 | TRACE("sclp_sdias_copy() failed\n"); | ||
76 | return -EIO; | ||
77 | } | ||
78 | offs = min((PAGE_SIZE - (src % PAGE_SIZE)), count); | ||
79 | if (mode == TO_USER) { | ||
80 | if (copy_to_user((__force __user void*) dest, | ||
81 | buf + (src % PAGE_SIZE), offs)) | ||
82 | return -EFAULT; | ||
83 | } else | ||
84 | memcpy(dest, buf + (src % PAGE_SIZE), offs); | ||
85 | } | ||
86 | if (offs == count) | ||
87 | goto out; | ||
88 | |||
89 | /* copy middle */ | ||
90 | for (; (offs + PAGE_SIZE) <= count; offs += PAGE_SIZE) { | ||
91 | blk_num = (src + offs) / PAGE_SIZE + 2; | ||
92 | if (sclp_sdias_copy(buf, blk_num, 1)) { | ||
93 | TRACE("sclp_sdias_copy() failed\n"); | ||
94 | return -EIO; | ||
95 | } | ||
96 | if (mode == TO_USER) { | ||
97 | if (copy_to_user((__force __user void*) dest + offs, | ||
98 | buf, PAGE_SIZE)) | ||
99 | return -EFAULT; | ||
100 | } else | ||
101 | memcpy(dest + offs, buf, PAGE_SIZE); | ||
102 | } | ||
103 | if (offs == count) | ||
104 | goto out; | ||
105 | |||
106 | /* copy last block */ | ||
107 | blk_num = (src + offs) / PAGE_SIZE + 2; | ||
108 | if (sclp_sdias_copy(buf, blk_num, 1)) { | ||
109 | TRACE("sclp_sdias_copy() failed\n"); | ||
110 | return -EIO; | ||
111 | } | ||
112 | if (mode == TO_USER) { | ||
113 | if (copy_to_user((__force __user void*) dest + offs, buf, | ||
114 | PAGE_SIZE)) | ||
115 | return -EFAULT; | ||
116 | } else | ||
117 | memcpy(dest + offs, buf, count - offs); | ||
118 | out: | ||
119 | return 0; | ||
120 | } | ||
121 | |||
122 | static int memcpy_hsa_user(void __user *dest, unsigned long src, size_t count) | ||
123 | { | ||
124 | return memcpy_hsa((void __force *) dest, src, count, TO_USER); | ||
125 | } | ||
126 | |||
127 | static int memcpy_hsa_kernel(void *dest, unsigned long src, size_t count) | ||
128 | { | ||
129 | return memcpy_hsa(dest, src, count, TO_KERNEL); | ||
130 | } | ||
131 | |||
132 | static int memcpy_real(void *dest, unsigned long src, size_t count) | ||
133 | { | ||
134 | unsigned long flags; | ||
135 | int rc = -EFAULT; | ||
136 | register unsigned long _dest asm("2") = (unsigned long) dest; | ||
137 | register unsigned long _len1 asm("3") = (unsigned long) count; | ||
138 | register unsigned long _src asm("4") = src; | ||
139 | register unsigned long _len2 asm("5") = (unsigned long) count; | ||
140 | |||
141 | if (count == 0) | ||
142 | return 0; | ||
143 | flags = __raw_local_irq_stnsm(0xf8); /* switch to real mode */ | ||
144 | asm volatile ( | ||
145 | "0: mvcle %1,%2,0x0\n" | ||
146 | "1: jo 0b\n" | ||
147 | " lhi %0,0x0\n" | ||
148 | "2:\n" | ||
149 | EX_TABLE(1b,2b) | ||
150 | : "+d" (rc) | ||
151 | : "d" (_dest), "d" (_src), "d" (_len1), "d" (_len2) | ||
152 | : "cc", "memory"); | ||
153 | __raw_local_irq_ssm(flags); | ||
154 | |||
155 | return rc; | ||
156 | } | ||
157 | |||
158 | static int memcpy_real_user(__user void *dest, unsigned long src, size_t count) | ||
159 | { | ||
160 | static char buf[4096]; | ||
161 | int offs = 0, size; | ||
162 | |||
163 | while (offs < count) { | ||
164 | size = min(sizeof(buf), count - offs); | ||
165 | if (memcpy_real(buf, src + offs, size)) | ||
166 | return -EFAULT; | ||
167 | if (copy_to_user(dest + offs, buf, size)) | ||
168 | return -EFAULT; | ||
169 | offs += size; | ||
170 | } | ||
171 | return 0; | ||
172 | } | ||
173 | |||
174 | #ifdef __s390x__ | ||
175 | /* | ||
176 | * Convert s390x (64 bit) cpu info to s390 (32 bit) cpu info | ||
177 | */ | ||
178 | static void __init s390x_to_s390_regs(union save_area *out, union save_area *in, | ||
179 | int cpu) | ||
180 | { | ||
181 | int i; | ||
182 | |||
183 | for (i = 0; i < 16; i++) { | ||
184 | out->s390.gp_regs[i] = in->s390x.gp_regs[i] & 0x00000000ffffffff; | ||
185 | out->s390.acc_regs[i] = in->s390x.acc_regs[i]; | ||
186 | out->s390.ctrl_regs[i] = | ||
187 | in->s390x.ctrl_regs[i] & 0x00000000ffffffff; | ||
188 | } | ||
189 | /* locore for 31 bit has only space for fpregs 0,2,4,6 */ | ||
190 | out->s390.fp_regs[0] = in->s390x.fp_regs[0]; | ||
191 | out->s390.fp_regs[1] = in->s390x.fp_regs[2]; | ||
192 | out->s390.fp_regs[2] = in->s390x.fp_regs[4]; | ||
193 | out->s390.fp_regs[3] = in->s390x.fp_regs[6]; | ||
194 | memcpy(&(out->s390.psw[0]), &(in->s390x.psw[0]), 4); | ||
195 | out->s390.psw[1] |= 0x8; /* set bit 12 */ | ||
196 | memcpy(&(out->s390.psw[4]),&(in->s390x.psw[12]), 4); | ||
197 | out->s390.psw[4] |= 0x80; /* set (31bit) addressing bit */ | ||
198 | out->s390.pref_reg = in->s390x.pref_reg; | ||
199 | out->s390.timer = in->s390x.timer; | ||
200 | out->s390.clk_cmp = in->s390x.clk_cmp; | ||
201 | } | ||
202 | |||
203 | static void __init s390x_to_s390_save_areas(void) | ||
204 | { | ||
205 | int i = 1; | ||
206 | static union save_area tmp; | ||
207 | |||
208 | while (zfcpdump_save_areas[i]) { | ||
209 | s390x_to_s390_regs(&tmp, zfcpdump_save_areas[i], i); | ||
210 | memcpy(zfcpdump_save_areas[i], &tmp, sizeof(tmp)); | ||
211 | i++; | ||
212 | } | ||
213 | } | ||
214 | |||
215 | #endif /* __s390x__ */ | ||
216 | |||
217 | static int __init init_cpu_info(enum arch_id arch) | ||
218 | { | ||
219 | union save_area *sa; | ||
220 | |||
221 | /* get info for boot cpu from lowcore, stored in the HSA */ | ||
222 | |||
223 | sa = kmalloc(sizeof(*sa), GFP_KERNEL); | ||
224 | if (!sa) { | ||
225 | ERROR_MSG("kmalloc failed: %s: %i\n",__FUNCTION__, __LINE__); | ||
226 | return -ENOMEM; | ||
227 | } | ||
228 | if (memcpy_hsa_kernel(sa, sys_info.sa_base, sys_info.sa_size) < 0) { | ||
229 | ERROR_MSG("could not copy from HSA\n"); | ||
230 | kfree(sa); | ||
231 | return -EIO; | ||
232 | } | ||
233 | zfcpdump_save_areas[0] = sa; | ||
234 | |||
235 | #ifdef __s390x__ | ||
236 | /* convert s390x regs to s390, if we are dumping an s390 Linux */ | ||
237 | |||
238 | if (arch == ARCH_S390) | ||
239 | s390x_to_s390_save_areas(); | ||
240 | #endif | ||
241 | |||
242 | return 0; | ||
243 | } | ||
244 | |||
245 | static DEFINE_MUTEX(zcore_mutex); | ||
246 | |||
247 | #define DUMP_VERSION 0x3 | ||
248 | #define DUMP_MAGIC 0xa8190173618f23fdULL | ||
249 | #define DUMP_ARCH_S390X 2 | ||
250 | #define DUMP_ARCH_S390 1 | ||
251 | #define HEADER_SIZE 4096 | ||
252 | |||
253 | /* dump header dumped according to s390 crash dump format */ | ||
254 | |||
255 | struct zcore_header { | ||
256 | u64 magic; | ||
257 | u32 version; | ||
258 | u32 header_size; | ||
259 | u32 dump_level; | ||
260 | u32 page_size; | ||
261 | u64 mem_size; | ||
262 | u64 mem_start; | ||
263 | u64 mem_end; | ||
264 | u32 num_pages; | ||
265 | u32 pad1; | ||
266 | u64 tod; | ||
267 | cpuid_t cpu_id; | ||
268 | u32 arch_id; | ||
269 | u32 build_arch; | ||
270 | char pad2[4016]; | ||
271 | } __attribute__((packed,__aligned__(16))); | ||
272 | |||
273 | static struct zcore_header zcore_header = { | ||
274 | .magic = DUMP_MAGIC, | ||
275 | .version = DUMP_VERSION, | ||
276 | .header_size = 4096, | ||
277 | .dump_level = 0, | ||
278 | .page_size = PAGE_SIZE, | ||
279 | .mem_start = 0, | ||
280 | #ifdef __s390x__ | ||
281 | .build_arch = DUMP_ARCH_S390X, | ||
282 | #else | ||
283 | .build_arch = DUMP_ARCH_S390, | ||
284 | #endif | ||
285 | }; | ||
286 | |||
287 | /* | ||
288 | * Copy lowcore info to buffer. Use map in order to copy only register parts. | ||
289 | * | ||
290 | * @buf: User buffer | ||
291 | * @sa: Pointer to save area | ||
292 | * @sa_off: Offset in save area to copy | ||
293 | * @len: Number of bytes to copy | ||
294 | */ | ||
295 | static int copy_lc(void __user *buf, void *sa, int sa_off, int len) | ||
296 | { | ||
297 | int i; | ||
298 | char *lc_mask = (char*)&sys_info.lc_mask; | ||
299 | |||
300 | for (i = 0; i < len; i++) { | ||
301 | if (!lc_mask[i + sa_off]) | ||
302 | continue; | ||
303 | if (copy_to_user(buf + i, sa + sa_off + i, 1)) | ||
304 | return -EFAULT; | ||
305 | } | ||
306 | return 0; | ||
307 | } | ||
308 | |||
309 | /* | ||
310 | * Copy lowcores info to memory, if necessary | ||
311 | * | ||
312 | * @buf: User buffer | ||
313 | * @addr: Start address of buffer in dump memory | ||
314 | * @count: Size of buffer | ||
315 | */ | ||
316 | static int zcore_add_lc(char __user *buf, unsigned long start, size_t count) | ||
317 | { | ||
318 | unsigned long end; | ||
319 | int i = 0; | ||
320 | |||
321 | if (count == 0) | ||
322 | return 0; | ||
323 | |||
324 | end = start + count; | ||
325 | while (zfcpdump_save_areas[i]) { | ||
326 | unsigned long cp_start, cp_end; /* copy range */ | ||
327 | unsigned long sa_start, sa_end; /* save area range */ | ||
328 | unsigned long prefix; | ||
329 | unsigned long sa_off, len, buf_off; | ||
330 | |||
331 | if (sys_info.arch == ARCH_S390) | ||
332 | prefix = zfcpdump_save_areas[i]->s390.pref_reg; | ||
333 | else | ||
334 | prefix = zfcpdump_save_areas[i]->s390x.pref_reg; | ||
335 | |||
336 | sa_start = prefix + sys_info.sa_base; | ||
337 | sa_end = prefix + sys_info.sa_base + sys_info.sa_size; | ||
338 | |||
339 | if ((end < sa_start) || (start > sa_end)) | ||
340 | goto next; | ||
341 | cp_start = max(start, sa_start); | ||
342 | cp_end = min(end, sa_end); | ||
343 | |||
344 | buf_off = cp_start - start; | ||
345 | sa_off = cp_start - sa_start; | ||
346 | len = cp_end - cp_start; | ||
347 | |||
348 | TRACE("copy_lc for: %lx\n", start); | ||
349 | if (copy_lc(buf + buf_off, zfcpdump_save_areas[i], sa_off, len)) | ||
350 | return -EFAULT; | ||
351 | next: | ||
352 | i++; | ||
353 | } | ||
354 | return 0; | ||
355 | } | ||
356 | |||
357 | /* | ||
358 | * Read routine for zcore character device | ||
359 | * First 4K are dump header | ||
360 | * Next 32MB are HSA Memory | ||
361 | * Rest is read from absolute Memory | ||
362 | */ | ||
363 | static ssize_t zcore_read(struct file *file, char __user *buf, size_t count, | ||
364 | loff_t *ppos) | ||
365 | { | ||
366 | unsigned long mem_start; /* Start address in memory */ | ||
367 | size_t mem_offs; /* Offset in dump memory */ | ||
368 | size_t hdr_count; /* Size of header part of output buffer */ | ||
369 | size_t size; | ||
370 | int rc; | ||
371 | |||
372 | mutex_lock(&zcore_mutex); | ||
373 | |||
374 | if (*ppos > (sys_info.mem_size + HEADER_SIZE)) { | ||
375 | rc = -EINVAL; | ||
376 | goto fail; | ||
377 | } | ||
378 | |||
379 | count = min(count, (size_t) (sys_info.mem_size + HEADER_SIZE - *ppos)); | ||
380 | |||
381 | /* Copy dump header */ | ||
382 | if (*ppos < HEADER_SIZE) { | ||
383 | size = min(count, (size_t) (HEADER_SIZE - *ppos)); | ||
384 | if (copy_to_user(buf, &zcore_header + *ppos, size)) { | ||
385 | rc = -EFAULT; | ||
386 | goto fail; | ||
387 | } | ||
388 | hdr_count = size; | ||
389 | mem_start = 0; | ||
390 | } else { | ||
391 | hdr_count = 0; | ||
392 | mem_start = *ppos - HEADER_SIZE; | ||
393 | } | ||
394 | |||
395 | mem_offs = 0; | ||
396 | |||
397 | /* Copy from HSA data */ | ||
398 | if (*ppos < (ZFCPDUMP_HSA_SIZE + HEADER_SIZE)) { | ||
399 | size = min((count - hdr_count), (size_t) (ZFCPDUMP_HSA_SIZE | ||
400 | - mem_start)); | ||
401 | rc = memcpy_hsa_user(buf + hdr_count, mem_start, size); | ||
402 | if (rc) | ||
403 | goto fail; | ||
404 | |||
405 | mem_offs += size; | ||
406 | } | ||
407 | |||
408 | /* Copy from real mem */ | ||
409 | size = count - mem_offs - hdr_count; | ||
410 | rc = memcpy_real_user(buf + hdr_count + mem_offs, mem_start + mem_offs, | ||
411 | size); | ||
412 | if (rc) | ||
413 | goto fail; | ||
414 | |||
415 | /* | ||
416 | * Since s390 dump analysis tools like lcrash or crash | ||
417 | * expect register sets in the prefix pages of the cpus, | ||
418 | * we copy them into the read buffer, if necessary. | ||
419 | * buf + hdr_count: Start of memory part of output buffer | ||
420 | * mem_start: Start memory address to copy from | ||
421 | * count - hdr_count: Size of memory area to copy | ||
422 | */ | ||
423 | if (zcore_add_lc(buf + hdr_count, mem_start, count - hdr_count)) { | ||
424 | rc = -EFAULT; | ||
425 | goto fail; | ||
426 | } | ||
427 | *ppos += count; | ||
428 | fail: | ||
429 | mutex_unlock(&zcore_mutex); | ||
430 | return (rc < 0) ? rc : count; | ||
431 | } | ||
432 | |||
433 | static int zcore_open(struct inode *inode, struct file *filp) | ||
434 | { | ||
435 | if (!hsa_available) | ||
436 | return -ENODATA; | ||
437 | else | ||
438 | return capable(CAP_SYS_RAWIO) ? 0 : -EPERM; | ||
439 | } | ||
440 | |||
441 | static int zcore_release(struct inode *inode, struct file *filep) | ||
442 | { | ||
443 | diag308(DIAG308_REL_HSA, NULL); | ||
444 | hsa_available = 0; | ||
445 | return 0; | ||
446 | } | ||
447 | |||
448 | static loff_t zcore_lseek(struct file *file, loff_t offset, int orig) | ||
449 | { | ||
450 | loff_t rc; | ||
451 | |||
452 | mutex_lock(&zcore_mutex); | ||
453 | switch (orig) { | ||
454 | case 0: | ||
455 | file->f_pos = offset; | ||
456 | rc = file->f_pos; | ||
457 | break; | ||
458 | case 1: | ||
459 | file->f_pos += offset; | ||
460 | rc = file->f_pos; | ||
461 | break; | ||
462 | default: | ||
463 | rc = -EINVAL; | ||
464 | } | ||
465 | mutex_unlock(&zcore_mutex); | ||
466 | return rc; | ||
467 | } | ||
468 | |||
469 | static struct file_operations zcore_fops = { | ||
470 | .owner = THIS_MODULE, | ||
471 | .llseek = zcore_lseek, | ||
472 | .read = zcore_read, | ||
473 | .open = zcore_open, | ||
474 | .release = zcore_release, | ||
475 | }; | ||
476 | |||
477 | |||
478 | static void __init set_s390_lc_mask(union save_area *map) | ||
479 | { | ||
480 | memset(&map->s390.ext_save, 0xff, sizeof(map->s390.ext_save)); | ||
481 | memset(&map->s390.timer, 0xff, sizeof(map->s390.timer)); | ||
482 | memset(&map->s390.clk_cmp, 0xff, sizeof(map->s390.clk_cmp)); | ||
483 | memset(&map->s390.psw, 0xff, sizeof(map->s390.psw)); | ||
484 | memset(&map->s390.pref_reg, 0xff, sizeof(map->s390.pref_reg)); | ||
485 | memset(&map->s390.acc_regs, 0xff, sizeof(map->s390.acc_regs)); | ||
486 | memset(&map->s390.fp_regs, 0xff, sizeof(map->s390.fp_regs)); | ||
487 | memset(&map->s390.gp_regs, 0xff, sizeof(map->s390.gp_regs)); | ||
488 | memset(&map->s390.ctrl_regs, 0xff, sizeof(map->s390.ctrl_regs)); | ||
489 | } | ||
490 | |||
491 | static void __init set_s390x_lc_mask(union save_area *map) | ||
492 | { | ||
493 | memset(&map->s390x.fp_regs, 0xff, sizeof(map->s390x.fp_regs)); | ||
494 | memset(&map->s390x.gp_regs, 0xff, sizeof(map->s390x.gp_regs)); | ||
495 | memset(&map->s390x.psw, 0xff, sizeof(map->s390x.psw)); | ||
496 | memset(&map->s390x.pref_reg, 0xff, sizeof(map->s390x.pref_reg)); | ||
497 | memset(&map->s390x.fp_ctrl_reg, 0xff, sizeof(map->s390x.fp_ctrl_reg)); | ||
498 | memset(&map->s390x.tod_reg, 0xff, sizeof(map->s390x.tod_reg)); | ||
499 | memset(&map->s390x.timer, 0xff, sizeof(map->s390x.timer)); | ||
500 | memset(&map->s390x.clk_cmp, 0xff, sizeof(map->s390x.clk_cmp)); | ||
501 | memset(&map->s390x.acc_regs, 0xff, sizeof(map->s390x.acc_regs)); | ||
502 | memset(&map->s390x.ctrl_regs, 0xff, sizeof(map->s390x.ctrl_regs)); | ||
503 | } | ||
504 | |||
505 | /* | ||
506 | * Initialize dump globals for a given architecture | ||
507 | */ | ||
508 | static int __init sys_info_init(enum arch_id arch) | ||
509 | { | ||
510 | switch (arch) { | ||
511 | case ARCH_S390X: | ||
512 | MSG("DETECTED 'S390X (64 bit) OS'\n"); | ||
513 | sys_info.sa_base = SAVE_AREA_BASE_S390X; | ||
514 | sys_info.sa_size = sizeof(struct save_area_s390x); | ||
515 | set_s390x_lc_mask(&sys_info.lc_mask); | ||
516 | break; | ||
517 | case ARCH_S390: | ||
518 | MSG("DETECTED 'S390 (32 bit) OS'\n"); | ||
519 | sys_info.sa_base = SAVE_AREA_BASE_S390; | ||
520 | sys_info.sa_size = sizeof(struct save_area_s390); | ||
521 | set_s390_lc_mask(&sys_info.lc_mask); | ||
522 | break; | ||
523 | default: | ||
524 | ERROR_MSG("unknown architecture 0x%x.\n",arch); | ||
525 | return -EINVAL; | ||
526 | } | ||
527 | sys_info.arch = arch; | ||
528 | if (init_cpu_info(arch)) { | ||
529 | ERROR_MSG("get cpu info failed\n"); | ||
530 | return -ENOMEM; | ||
531 | } | ||
532 | sys_info.mem_size = real_memory_size; | ||
533 | |||
534 | return 0; | ||
535 | } | ||
536 | |||
537 | static int __init check_sdias(void) | ||
538 | { | ||
539 | int rc, act_hsa_size; | ||
540 | |||
541 | rc = sclp_sdias_blk_count(); | ||
542 | if (rc < 0) { | ||
543 | ERROR_MSG("Could not determine HSA size\n"); | ||
544 | return rc; | ||
545 | } | ||
546 | act_hsa_size = (rc - 1) * PAGE_SIZE; | ||
547 | if (act_hsa_size < ZFCPDUMP_HSA_SIZE) { | ||
548 | ERROR_MSG("HSA size too small: %i\n", act_hsa_size); | ||
549 | return -EINVAL; | ||
550 | } | ||
551 | return 0; | ||
552 | } | ||
553 | |||
554 | static void __init zcore_header_init(int arch, struct zcore_header *hdr) | ||
555 | { | ||
556 | if (arch == ARCH_S390X) | ||
557 | hdr->arch_id = DUMP_ARCH_S390X; | ||
558 | else | ||
559 | hdr->arch_id = DUMP_ARCH_S390; | ||
560 | hdr->mem_size = sys_info.mem_size; | ||
561 | hdr->mem_end = sys_info.mem_size; | ||
562 | hdr->num_pages = sys_info.mem_size / PAGE_SIZE; | ||
563 | hdr->tod = get_clock(); | ||
564 | get_cpu_id(&hdr->cpu_id); | ||
565 | } | ||
566 | |||
567 | extern int sdias_init(void); | ||
568 | |||
569 | static int __init zcore_init(void) | ||
570 | { | ||
571 | unsigned char arch; | ||
572 | int rc; | ||
573 | |||
574 | if (ipl_info.type != IPL_TYPE_FCP_DUMP) | ||
575 | return -ENODATA; | ||
576 | |||
577 | zcore_dbf = debug_register("zcore", 4, 1, 4 * sizeof(long)); | ||
578 | debug_register_view(zcore_dbf, &debug_sprintf_view); | ||
579 | debug_set_level(zcore_dbf, 6); | ||
580 | |||
581 | TRACE("devno: %x\n", ipl_info.data.fcp.dev_id.devno); | ||
582 | TRACE("wwpn: %llx\n", (unsigned long long) ipl_info.data.fcp.wwpn); | ||
583 | TRACE("lun: %llx\n", (unsigned long long) ipl_info.data.fcp.lun); | ||
584 | |||
585 | rc = sdias_init(); | ||
586 | if (rc) | ||
587 | goto fail; | ||
588 | |||
589 | rc = check_sdias(); | ||
590 | if (rc) { | ||
591 | ERROR_MSG("Dump initialization failed\n"); | ||
592 | goto fail; | ||
593 | } | ||
594 | |||
595 | rc = memcpy_hsa_kernel(&arch, __LC_AR_MODE_ID, 1); | ||
596 | if (rc) { | ||
597 | ERROR_MSG("sdial memcpy for arch id failed\n"); | ||
598 | goto fail; | ||
599 | } | ||
600 | |||
601 | #ifndef __s390x__ | ||
602 | if (arch == ARCH_S390X) { | ||
603 | ERROR_MSG("32 bit dumper can't dump 64 bit system!\n"); | ||
604 | rc = -EINVAL; | ||
605 | goto fail; | ||
606 | } | ||
607 | #endif | ||
608 | |||
609 | rc = sys_info_init(arch); | ||
610 | if (rc) { | ||
611 | ERROR_MSG("arch init failed\n"); | ||
612 | goto fail; | ||
613 | } | ||
614 | |||
615 | zcore_header_init(arch, &zcore_header); | ||
616 | |||
617 | zcore_dir = debugfs_create_dir("zcore" , NULL); | ||
618 | if (!zcore_dir) { | ||
619 | rc = -ENOMEM; | ||
620 | goto fail; | ||
621 | } | ||
622 | zcore_file = debugfs_create_file("mem", S_IRUSR, zcore_dir, NULL, | ||
623 | &zcore_fops); | ||
624 | if (!zcore_file) { | ||
625 | debugfs_remove(zcore_dir); | ||
626 | rc = -ENOMEM; | ||
627 | goto fail; | ||
628 | } | ||
629 | hsa_available = 1; | ||
630 | return 0; | ||
631 | |||
632 | fail: | ||
633 | diag308(DIAG308_REL_HSA, NULL); | ||
634 | return rc; | ||
635 | } | ||
636 | |||
637 | extern void sdias_exit(void); | ||
638 | |||
639 | static void __exit zcore_exit(void) | ||
640 | { | ||
641 | debug_unregister(zcore_dbf); | ||
642 | sdias_exit(); | ||
643 | diag308(DIAG308_REL_HSA, NULL); | ||
644 | } | ||
645 | |||
646 | MODULE_AUTHOR("Copyright IBM Corp. 2003,2007"); | ||
647 | MODULE_DESCRIPTION("zcore module for zfcpdump support"); | ||
648 | MODULE_LICENSE("GPL"); | ||
649 | |||
650 | subsys_initcall(zcore_init); | ||
651 | module_exit(zcore_exit); | ||
diff --git a/drivers/s390/cio/Makefile b/drivers/s390/cio/Makefile index c490c2a1c2fc..cfaf77b320f5 100644 --- a/drivers/s390/cio/Makefile +++ b/drivers/s390/cio/Makefile | |||
@@ -2,7 +2,7 @@ | |||
2 | # Makefile for the S/390 common i/o drivers | 2 | # Makefile for the S/390 common i/o drivers |
3 | # | 3 | # |
4 | 4 | ||
5 | obj-y += airq.o blacklist.o chsc.o cio.o css.o | 5 | obj-y += airq.o blacklist.o chsc.o cio.o css.o chp.o idset.o |
6 | ccw_device-objs += device.o device_fsm.o device_ops.o | 6 | ccw_device-objs += device.o device_fsm.o device_ops.o |
7 | ccw_device-objs += device_id.o device_pgid.o device_status.o | 7 | ccw_device-objs += device_id.o device_pgid.o device_status.o |
8 | obj-y += ccw_device.o cmf.o | 8 | obj-y += ccw_device.o cmf.o |
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c index 5aeb68e732b0..e5ccda63e883 100644 --- a/drivers/s390/cio/ccwgroup.c +++ b/drivers/s390/cio/ccwgroup.c | |||
@@ -75,8 +75,10 @@ static void ccwgroup_ungroup_callback(struct device *dev) | |||
75 | { | 75 | { |
76 | struct ccwgroup_device *gdev = to_ccwgroupdev(dev); | 76 | struct ccwgroup_device *gdev = to_ccwgroupdev(dev); |
77 | 77 | ||
78 | mutex_lock(&gdev->reg_mutex); | ||
78 | __ccwgroup_remove_symlinks(gdev); | 79 | __ccwgroup_remove_symlinks(gdev); |
79 | device_unregister(dev); | 80 | device_unregister(dev); |
81 | mutex_unlock(&gdev->reg_mutex); | ||
80 | } | 82 | } |
81 | 83 | ||
82 | static ssize_t | 84 | static ssize_t |
@@ -173,7 +175,8 @@ ccwgroup_create(struct device *root, | |||
173 | return -ENOMEM; | 175 | return -ENOMEM; |
174 | 176 | ||
175 | atomic_set(&gdev->onoff, 0); | 177 | atomic_set(&gdev->onoff, 0); |
176 | 178 | mutex_init(&gdev->reg_mutex); | |
179 | mutex_lock(&gdev->reg_mutex); | ||
177 | for (i = 0; i < argc; i++) { | 180 | for (i = 0; i < argc; i++) { |
178 | gdev->cdev[i] = get_ccwdev_by_busid(cdrv, argv[i]); | 181 | gdev->cdev[i] = get_ccwdev_by_busid(cdrv, argv[i]); |
179 | 182 | ||
@@ -183,12 +186,12 @@ ccwgroup_create(struct device *root, | |||
183 | || gdev->cdev[i]->id.driver_info != | 186 | || gdev->cdev[i]->id.driver_info != |
184 | gdev->cdev[0]->id.driver_info) { | 187 | gdev->cdev[0]->id.driver_info) { |
185 | rc = -EINVAL; | 188 | rc = -EINVAL; |
186 | goto free_dev; | 189 | goto error; |
187 | } | 190 | } |
188 | /* Don't allow a device to belong to more than one group. */ | 191 | /* Don't allow a device to belong to more than one group. */ |
189 | if (gdev->cdev[i]->dev.driver_data) { | 192 | if (gdev->cdev[i]->dev.driver_data) { |
190 | rc = -EINVAL; | 193 | rc = -EINVAL; |
191 | goto free_dev; | 194 | goto error; |
192 | } | 195 | } |
193 | gdev->cdev[i]->dev.driver_data = gdev; | 196 | gdev->cdev[i]->dev.driver_data = gdev; |
194 | } | 197 | } |
@@ -203,9 +206,8 @@ ccwgroup_create(struct device *root, | |||
203 | gdev->cdev[0]->dev.bus_id); | 206 | gdev->cdev[0]->dev.bus_id); |
204 | 207 | ||
205 | rc = device_register(&gdev->dev); | 208 | rc = device_register(&gdev->dev); |
206 | |||
207 | if (rc) | 209 | if (rc) |
208 | goto free_dev; | 210 | goto error; |
209 | get_device(&gdev->dev); | 211 | get_device(&gdev->dev); |
210 | rc = device_create_file(&gdev->dev, &dev_attr_ungroup); | 212 | rc = device_create_file(&gdev->dev, &dev_attr_ungroup); |
211 | 213 | ||
@@ -216,6 +218,7 @@ ccwgroup_create(struct device *root, | |||
216 | 218 | ||
217 | rc = __ccwgroup_create_symlinks(gdev); | 219 | rc = __ccwgroup_create_symlinks(gdev); |
218 | if (!rc) { | 220 | if (!rc) { |
221 | mutex_unlock(&gdev->reg_mutex); | ||
219 | put_device(&gdev->dev); | 222 | put_device(&gdev->dev); |
220 | return 0; | 223 | return 0; |
221 | } | 224 | } |
@@ -224,19 +227,12 @@ ccwgroup_create(struct device *root, | |||
224 | error: | 227 | error: |
225 | for (i = 0; i < argc; i++) | 228 | for (i = 0; i < argc; i++) |
226 | if (gdev->cdev[i]) { | 229 | if (gdev->cdev[i]) { |
227 | put_device(&gdev->cdev[i]->dev); | ||
228 | gdev->cdev[i]->dev.driver_data = NULL; | ||
229 | } | ||
230 | put_device(&gdev->dev); | ||
231 | return rc; | ||
232 | free_dev: | ||
233 | for (i = 0; i < argc; i++) | ||
234 | if (gdev->cdev[i]) { | ||
235 | if (gdev->cdev[i]->dev.driver_data == gdev) | 230 | if (gdev->cdev[i]->dev.driver_data == gdev) |
236 | gdev->cdev[i]->dev.driver_data = NULL; | 231 | gdev->cdev[i]->dev.driver_data = NULL; |
237 | put_device(&gdev->cdev[i]->dev); | 232 | put_device(&gdev->cdev[i]->dev); |
238 | } | 233 | } |
239 | kfree(gdev); | 234 | mutex_unlock(&gdev->reg_mutex); |
235 | put_device(&gdev->dev); | ||
240 | return rc; | 236 | return rc; |
241 | } | 237 | } |
242 | 238 | ||
@@ -422,8 +418,12 @@ ccwgroup_driver_unregister (struct ccwgroup_driver *cdriver) | |||
422 | get_driver(&cdriver->driver); | 418 | get_driver(&cdriver->driver); |
423 | while ((dev = driver_find_device(&cdriver->driver, NULL, NULL, | 419 | while ((dev = driver_find_device(&cdriver->driver, NULL, NULL, |
424 | __ccwgroup_match_all))) { | 420 | __ccwgroup_match_all))) { |
425 | __ccwgroup_remove_symlinks(to_ccwgroupdev(dev)); | 421 | struct ccwgroup_device *gdev = to_ccwgroupdev(dev); |
422 | |||
423 | mutex_lock(&gdev->reg_mutex); | ||
424 | __ccwgroup_remove_symlinks(gdev); | ||
426 | device_unregister(dev); | 425 | device_unregister(dev); |
426 | mutex_unlock(&gdev->reg_mutex); | ||
427 | put_device(dev); | 427 | put_device(dev); |
428 | } | 428 | } |
429 | put_driver(&cdriver->driver); | 429 | put_driver(&cdriver->driver); |
@@ -444,8 +444,10 @@ __ccwgroup_get_gdev_by_cdev(struct ccw_device *cdev) | |||
444 | if (cdev->dev.driver_data) { | 444 | if (cdev->dev.driver_data) { |
445 | gdev = (struct ccwgroup_device *)cdev->dev.driver_data; | 445 | gdev = (struct ccwgroup_device *)cdev->dev.driver_data; |
446 | if (get_device(&gdev->dev)) { | 446 | if (get_device(&gdev->dev)) { |
447 | mutex_lock(&gdev->reg_mutex); | ||
447 | if (device_is_registered(&gdev->dev)) | 448 | if (device_is_registered(&gdev->dev)) |
448 | return gdev; | 449 | return gdev; |
450 | mutex_unlock(&gdev->reg_mutex); | ||
449 | put_device(&gdev->dev); | 451 | put_device(&gdev->dev); |
450 | } | 452 | } |
451 | return NULL; | 453 | return NULL; |
@@ -465,6 +467,7 @@ ccwgroup_remove_ccwdev(struct ccw_device *cdev) | |||
465 | if (gdev) { | 467 | if (gdev) { |
466 | __ccwgroup_remove_symlinks(gdev); | 468 | __ccwgroup_remove_symlinks(gdev); |
467 | device_unregister(&gdev->dev); | 469 | device_unregister(&gdev->dev); |
470 | mutex_unlock(&gdev->reg_mutex); | ||
468 | put_device(&gdev->dev); | 471 | put_device(&gdev->dev); |
469 | } | 472 | } |
470 | } | 473 | } |
diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c new file mode 100644 index 000000000000..ac289e6eadfe --- /dev/null +++ b/drivers/s390/cio/chp.c | |||
@@ -0,0 +1,683 @@ | |||
1 | /* | ||
2 | * drivers/s390/cio/chp.c | ||
3 | * | ||
4 | * Copyright IBM Corp. 1999,2007 | ||
5 | * Author(s): Cornelia Huck (cornelia.huck@de.ibm.com) | ||
6 | * Arnd Bergmann (arndb@de.ibm.com) | ||
7 | * Peter Oberparleiter <peter.oberparleiter@de.ibm.com> | ||
8 | */ | ||
9 | |||
10 | #include <linux/bug.h> | ||
11 | #include <linux/workqueue.h> | ||
12 | #include <linux/spinlock.h> | ||
13 | #include <linux/init.h> | ||
14 | #include <linux/jiffies.h> | ||
15 | #include <linux/wait.h> | ||
16 | #include <linux/mutex.h> | ||
17 | #include <asm/errno.h> | ||
18 | #include <asm/chpid.h> | ||
19 | #include <asm/sclp.h> | ||
20 | |||
21 | #include "cio.h" | ||
22 | #include "css.h" | ||
23 | #include "ioasm.h" | ||
24 | #include "cio_debug.h" | ||
25 | #include "chp.h" | ||
26 | |||
27 | #define to_channelpath(device) container_of(device, struct channel_path, dev) | ||
28 | #define CHP_INFO_UPDATE_INTERVAL 1*HZ | ||
29 | |||
30 | enum cfg_task_t { | ||
31 | cfg_none, | ||
32 | cfg_configure, | ||
33 | cfg_deconfigure | ||
34 | }; | ||
35 | |||
36 | /* Map for pending configure tasks. */ | ||
37 | static enum cfg_task_t chp_cfg_task[__MAX_CSSID + 1][__MAX_CHPID + 1]; | ||
38 | static DEFINE_MUTEX(cfg_lock); | ||
39 | static int cfg_busy; | ||
40 | |||
41 | /* Map for channel-path status. */ | ||
42 | static struct sclp_chp_info chp_info; | ||
43 | static DEFINE_MUTEX(info_lock); | ||
44 | |||
45 | /* Time after which channel-path status may be outdated. */ | ||
46 | static unsigned long chp_info_expires; | ||
47 | |||
48 | /* Workqueue to perform pending configure tasks. */ | ||
49 | static struct workqueue_struct *chp_wq; | ||
50 | static struct work_struct cfg_work; | ||
51 | |||
52 | /* Wait queue for configure completion events. */ | ||
53 | static wait_queue_head_t cfg_wait_queue; | ||
54 | |||
55 | /* Return channel_path struct for given chpid. */ | ||
56 | static inline struct channel_path *chpid_to_chp(struct chp_id chpid) | ||
57 | { | ||
58 | return css[chpid.cssid]->chps[chpid.id]; | ||
59 | } | ||
60 | |||
61 | /* Set vary state for given chpid. */ | ||
62 | static void set_chp_logically_online(struct chp_id chpid, int onoff) | ||
63 | { | ||
64 | chpid_to_chp(chpid)->state = onoff; | ||
65 | } | ||
66 | |||
67 | /* On succes return 0 if channel-path is varied offline, 1 if it is varied | ||
68 | * online. Return -ENODEV if channel-path is not registered. */ | ||
69 | int chp_get_status(struct chp_id chpid) | ||
70 | { | ||
71 | return (chpid_to_chp(chpid) ? chpid_to_chp(chpid)->state : -ENODEV); | ||
72 | } | ||
73 | |||
74 | /** | ||
75 | * chp_get_sch_opm - return opm for subchannel | ||
76 | * @sch: subchannel | ||
77 | * | ||
78 | * Calculate and return the operational path mask (opm) based on the chpids | ||
79 | * used by the subchannel and the status of the associated channel-paths. | ||
80 | */ | ||
81 | u8 chp_get_sch_opm(struct subchannel *sch) | ||
82 | { | ||
83 | struct chp_id chpid; | ||
84 | int opm; | ||
85 | int i; | ||
86 | |||
87 | opm = 0; | ||
88 | chp_id_init(&chpid); | ||
89 | for (i=0; i < 8; i++) { | ||
90 | opm <<= 1; | ||
91 | chpid.id = sch->schib.pmcw.chpid[i]; | ||
92 | if (chp_get_status(chpid) != 0) | ||
93 | opm |= 1; | ||
94 | } | ||
95 | return opm; | ||
96 | } | ||
97 | |||
98 | /** | ||
99 | * chp_is_registered - check if a channel-path is registered | ||
100 | * @chpid: channel-path ID | ||
101 | * | ||
102 | * Return non-zero if a channel-path with the given chpid is registered, | ||
103 | * zero otherwise. | ||
104 | */ | ||
105 | int chp_is_registered(struct chp_id chpid) | ||
106 | { | ||
107 | return chpid_to_chp(chpid) != NULL; | ||
108 | } | ||
109 | |||
110 | /* | ||
111 | * Function: s390_vary_chpid | ||
112 | * Varies the specified chpid online or offline | ||
113 | */ | ||
114 | static int s390_vary_chpid(struct chp_id chpid, int on) | ||
115 | { | ||
116 | char dbf_text[15]; | ||
117 | int status; | ||
118 | |||
119 | sprintf(dbf_text, on?"varyon%x.%02x":"varyoff%x.%02x", chpid.cssid, | ||
120 | chpid.id); | ||
121 | CIO_TRACE_EVENT( 2, dbf_text); | ||
122 | |||
123 | status = chp_get_status(chpid); | ||
124 | if (status < 0) { | ||
125 | printk(KERN_ERR "Can't vary unknown chpid %x.%02x\n", | ||
126 | chpid.cssid, chpid.id); | ||
127 | return -EINVAL; | ||
128 | } | ||
129 | |||
130 | if (!on && !status) { | ||
131 | printk(KERN_ERR "chpid %x.%02x is already offline\n", | ||
132 | chpid.cssid, chpid.id); | ||
133 | return -EINVAL; | ||
134 | } | ||
135 | |||
136 | set_chp_logically_online(chpid, on); | ||
137 | chsc_chp_vary(chpid, on); | ||
138 | return 0; | ||
139 | } | ||
140 | |||
141 | /* | ||
142 | * Channel measurement related functions | ||
143 | */ | ||
144 | static ssize_t chp_measurement_chars_read(struct kobject *kobj, char *buf, | ||
145 | loff_t off, size_t count) | ||
146 | { | ||
147 | struct channel_path *chp; | ||
148 | unsigned int size; | ||
149 | |||
150 | chp = to_channelpath(container_of(kobj, struct device, kobj)); | ||
151 | if (!chp->cmg_chars) | ||
152 | return 0; | ||
153 | |||
154 | size = sizeof(struct cmg_chars); | ||
155 | |||
156 | if (off > size) | ||
157 | return 0; | ||
158 | if (off + count > size) | ||
159 | count = size - off; | ||
160 | memcpy(buf, chp->cmg_chars + off, count); | ||
161 | return count; | ||
162 | } | ||
163 | |||
164 | static struct bin_attribute chp_measurement_chars_attr = { | ||
165 | .attr = { | ||
166 | .name = "measurement_chars", | ||
167 | .mode = S_IRUSR, | ||
168 | .owner = THIS_MODULE, | ||
169 | }, | ||
170 | .size = sizeof(struct cmg_chars), | ||
171 | .read = chp_measurement_chars_read, | ||
172 | }; | ||
173 | |||
174 | static void chp_measurement_copy_block(struct cmg_entry *buf, | ||
175 | struct channel_subsystem *css, | ||
176 | struct chp_id chpid) | ||
177 | { | ||
178 | void *area; | ||
179 | struct cmg_entry *entry, reference_buf; | ||
180 | int idx; | ||
181 | |||
182 | if (chpid.id < 128) { | ||
183 | area = css->cub_addr1; | ||
184 | idx = chpid.id; | ||
185 | } else { | ||
186 | area = css->cub_addr2; | ||
187 | idx = chpid.id - 128; | ||
188 | } | ||
189 | entry = area + (idx * sizeof(struct cmg_entry)); | ||
190 | do { | ||
191 | memcpy(buf, entry, sizeof(*entry)); | ||
192 | memcpy(&reference_buf, entry, sizeof(*entry)); | ||
193 | } while (reference_buf.values[0] != buf->values[0]); | ||
194 | } | ||
195 | |||
196 | static ssize_t chp_measurement_read(struct kobject *kobj, char *buf, | ||
197 | loff_t off, size_t count) | ||
198 | { | ||
199 | struct channel_path *chp; | ||
200 | struct channel_subsystem *css; | ||
201 | unsigned int size; | ||
202 | |||
203 | chp = to_channelpath(container_of(kobj, struct device, kobj)); | ||
204 | css = to_css(chp->dev.parent); | ||
205 | |||
206 | size = sizeof(struct cmg_entry); | ||
207 | |||
208 | /* Only allow single reads. */ | ||
209 | if (off || count < size) | ||
210 | return 0; | ||
211 | chp_measurement_copy_block((struct cmg_entry *)buf, css, chp->chpid); | ||
212 | count = size; | ||
213 | return count; | ||
214 | } | ||
215 | |||
216 | static struct bin_attribute chp_measurement_attr = { | ||
217 | .attr = { | ||
218 | .name = "measurement", | ||
219 | .mode = S_IRUSR, | ||
220 | .owner = THIS_MODULE, | ||
221 | }, | ||
222 | .size = sizeof(struct cmg_entry), | ||
223 | .read = chp_measurement_read, | ||
224 | }; | ||
225 | |||
226 | void chp_remove_cmg_attr(struct channel_path *chp) | ||
227 | { | ||
228 | device_remove_bin_file(&chp->dev, &chp_measurement_chars_attr); | ||
229 | device_remove_bin_file(&chp->dev, &chp_measurement_attr); | ||
230 | } | ||
231 | |||
232 | int chp_add_cmg_attr(struct channel_path *chp) | ||
233 | { | ||
234 | int ret; | ||
235 | |||
236 | ret = device_create_bin_file(&chp->dev, &chp_measurement_chars_attr); | ||
237 | if (ret) | ||
238 | return ret; | ||
239 | ret = device_create_bin_file(&chp->dev, &chp_measurement_attr); | ||
240 | if (ret) | ||
241 | device_remove_bin_file(&chp->dev, &chp_measurement_chars_attr); | ||
242 | return ret; | ||
243 | } | ||
244 | |||
245 | /* | ||
246 | * Files for the channel path entries. | ||
247 | */ | ||
248 | static ssize_t chp_status_show(struct device *dev, | ||
249 | struct device_attribute *attr, char *buf) | ||
250 | { | ||
251 | struct channel_path *chp = container_of(dev, struct channel_path, dev); | ||
252 | |||
253 | if (!chp) | ||
254 | return 0; | ||
255 | return (chp_get_status(chp->chpid) ? sprintf(buf, "online\n") : | ||
256 | sprintf(buf, "offline\n")); | ||
257 | } | ||
258 | |||
259 | static ssize_t chp_status_write(struct device *dev, | ||
260 | struct device_attribute *attr, | ||
261 | const char *buf, size_t count) | ||
262 | { | ||
263 | struct channel_path *cp = container_of(dev, struct channel_path, dev); | ||
264 | char cmd[10]; | ||
265 | int num_args; | ||
266 | int error; | ||
267 | |||
268 | num_args = sscanf(buf, "%5s", cmd); | ||
269 | if (!num_args) | ||
270 | return count; | ||
271 | |||
272 | if (!strnicmp(cmd, "on", 2) || !strcmp(cmd, "1")) | ||
273 | error = s390_vary_chpid(cp->chpid, 1); | ||
274 | else if (!strnicmp(cmd, "off", 3) || !strcmp(cmd, "0")) | ||
275 | error = s390_vary_chpid(cp->chpid, 0); | ||
276 | else | ||
277 | error = -EINVAL; | ||
278 | |||
279 | return error < 0 ? error : count; | ||
280 | |||
281 | } | ||
282 | |||
283 | static DEVICE_ATTR(status, 0644, chp_status_show, chp_status_write); | ||
284 | |||
285 | static ssize_t chp_configure_show(struct device *dev, | ||
286 | struct device_attribute *attr, char *buf) | ||
287 | { | ||
288 | struct channel_path *cp; | ||
289 | int status; | ||
290 | |||
291 | cp = container_of(dev, struct channel_path, dev); | ||
292 | status = chp_info_get_status(cp->chpid); | ||
293 | if (status < 0) | ||
294 | return status; | ||
295 | |||
296 | return snprintf(buf, PAGE_SIZE, "%d\n", status); | ||
297 | } | ||
298 | |||
299 | static int cfg_wait_idle(void); | ||
300 | |||
301 | static ssize_t chp_configure_write(struct device *dev, | ||
302 | struct device_attribute *attr, | ||
303 | const char *buf, size_t count) | ||
304 | { | ||
305 | struct channel_path *cp; | ||
306 | int val; | ||
307 | char delim; | ||
308 | |||
309 | if (sscanf(buf, "%d %c", &val, &delim) != 1) | ||
310 | return -EINVAL; | ||
311 | if (val != 0 && val != 1) | ||
312 | return -EINVAL; | ||
313 | cp = container_of(dev, struct channel_path, dev); | ||
314 | chp_cfg_schedule(cp->chpid, val); | ||
315 | cfg_wait_idle(); | ||
316 | |||
317 | return count; | ||
318 | } | ||
319 | |||
320 | static DEVICE_ATTR(configure, 0644, chp_configure_show, chp_configure_write); | ||
321 | |||
322 | static ssize_t chp_type_show(struct device *dev, struct device_attribute *attr, | ||
323 | char *buf) | ||
324 | { | ||
325 | struct channel_path *chp = container_of(dev, struct channel_path, dev); | ||
326 | |||
327 | if (!chp) | ||
328 | return 0; | ||
329 | return sprintf(buf, "%x\n", chp->desc.desc); | ||
330 | } | ||
331 | |||
332 | static DEVICE_ATTR(type, 0444, chp_type_show, NULL); | ||
333 | |||
334 | static ssize_t chp_cmg_show(struct device *dev, struct device_attribute *attr, | ||
335 | char *buf) | ||
336 | { | ||
337 | struct channel_path *chp = to_channelpath(dev); | ||
338 | |||
339 | if (!chp) | ||
340 | return 0; | ||
341 | if (chp->cmg == -1) /* channel measurements not available */ | ||
342 | return sprintf(buf, "unknown\n"); | ||
343 | return sprintf(buf, "%x\n", chp->cmg); | ||
344 | } | ||
345 | |||
346 | static DEVICE_ATTR(cmg, 0444, chp_cmg_show, NULL); | ||
347 | |||
348 | static ssize_t chp_shared_show(struct device *dev, | ||
349 | struct device_attribute *attr, char *buf) | ||
350 | { | ||
351 | struct channel_path *chp = to_channelpath(dev); | ||
352 | |||
353 | if (!chp) | ||
354 | return 0; | ||
355 | if (chp->shared == -1) /* channel measurements not available */ | ||
356 | return sprintf(buf, "unknown\n"); | ||
357 | return sprintf(buf, "%x\n", chp->shared); | ||
358 | } | ||
359 | |||
360 | static DEVICE_ATTR(shared, 0444, chp_shared_show, NULL); | ||
361 | |||
362 | static struct attribute * chp_attrs[] = { | ||
363 | &dev_attr_status.attr, | ||
364 | &dev_attr_configure.attr, | ||
365 | &dev_attr_type.attr, | ||
366 | &dev_attr_cmg.attr, | ||
367 | &dev_attr_shared.attr, | ||
368 | NULL, | ||
369 | }; | ||
370 | |||
371 | static struct attribute_group chp_attr_group = { | ||
372 | .attrs = chp_attrs, | ||
373 | }; | ||
374 | |||
375 | static void chp_release(struct device *dev) | ||
376 | { | ||
377 | struct channel_path *cp; | ||
378 | |||
379 | cp = container_of(dev, struct channel_path, dev); | ||
380 | kfree(cp); | ||
381 | } | ||
382 | |||
383 | /** | ||
384 | * chp_new - register a new channel-path | ||
385 | * @chpid - channel-path ID | ||
386 | * | ||
387 | * Create and register data structure representing new channel-path. Return | ||
388 | * zero on success, non-zero otherwise. | ||
389 | */ | ||
390 | int chp_new(struct chp_id chpid) | ||
391 | { | ||
392 | struct channel_path *chp; | ||
393 | int ret; | ||
394 | |||
395 | if (chp_is_registered(chpid)) | ||
396 | return 0; | ||
397 | chp = kzalloc(sizeof(struct channel_path), GFP_KERNEL); | ||
398 | if (!chp) | ||
399 | return -ENOMEM; | ||
400 | |||
401 | /* fill in status, etc. */ | ||
402 | chp->chpid = chpid; | ||
403 | chp->state = 1; | ||
404 | chp->dev.parent = &css[chpid.cssid]->device; | ||
405 | chp->dev.release = chp_release; | ||
406 | snprintf(chp->dev.bus_id, BUS_ID_SIZE, "chp%x.%02x", chpid.cssid, | ||
407 | chpid.id); | ||
408 | |||
409 | /* Obtain channel path description and fill it in. */ | ||
410 | ret = chsc_determine_channel_path_description(chpid, &chp->desc); | ||
411 | if (ret) | ||
412 | goto out_free; | ||
413 | if ((chp->desc.flags & 0x80) == 0) { | ||
414 | ret = -ENODEV; | ||
415 | goto out_free; | ||
416 | } | ||
417 | /* Get channel-measurement characteristics. */ | ||
418 | if (css_characteristics_avail && css_chsc_characteristics.scmc | ||
419 | && css_chsc_characteristics.secm) { | ||
420 | ret = chsc_get_channel_measurement_chars(chp); | ||
421 | if (ret) | ||
422 | goto out_free; | ||
423 | } else { | ||
424 | static int msg_done; | ||
425 | |||
426 | if (!msg_done) { | ||
427 | printk(KERN_WARNING "cio: Channel measurements not " | ||
428 | "available, continuing.\n"); | ||
429 | msg_done = 1; | ||
430 | } | ||
431 | chp->cmg = -1; | ||
432 | } | ||
433 | |||
434 | /* make it known to the system */ | ||
435 | ret = device_register(&chp->dev); | ||
436 | if (ret) { | ||
437 | printk(KERN_WARNING "%s: could not register %x.%02x\n", | ||
438 | __func__, chpid.cssid, chpid.id); | ||
439 | goto out_free; | ||
440 | } | ||
441 | ret = sysfs_create_group(&chp->dev.kobj, &chp_attr_group); | ||
442 | if (ret) { | ||
443 | device_unregister(&chp->dev); | ||
444 | goto out_free; | ||
445 | } | ||
446 | mutex_lock(&css[chpid.cssid]->mutex); | ||
447 | if (css[chpid.cssid]->cm_enabled) { | ||
448 | ret = chp_add_cmg_attr(chp); | ||
449 | if (ret) { | ||
450 | sysfs_remove_group(&chp->dev.kobj, &chp_attr_group); | ||
451 | device_unregister(&chp->dev); | ||
452 | mutex_unlock(&css[chpid.cssid]->mutex); | ||
453 | goto out_free; | ||
454 | } | ||
455 | } | ||
456 | css[chpid.cssid]->chps[chpid.id] = chp; | ||
457 | mutex_unlock(&css[chpid.cssid]->mutex); | ||
458 | return ret; | ||
459 | out_free: | ||
460 | kfree(chp); | ||
461 | return ret; | ||
462 | } | ||
463 | |||
464 | /** | ||
465 | * chp_get_chp_desc - return newly allocated channel-path description | ||
466 | * @chpid: channel-path ID | ||
467 | * | ||
468 | * On success return a newly allocated copy of the channel-path description | ||
469 | * data associated with the given channel-path ID. Return %NULL on error. | ||
470 | */ | ||
471 | void *chp_get_chp_desc(struct chp_id chpid) | ||
472 | { | ||
473 | struct channel_path *chp; | ||
474 | struct channel_path_desc *desc; | ||
475 | |||
476 | chp = chpid_to_chp(chpid); | ||
477 | if (!chp) | ||
478 | return NULL; | ||
479 | desc = kmalloc(sizeof(struct channel_path_desc), GFP_KERNEL); | ||
480 | if (!desc) | ||
481 | return NULL; | ||
482 | memcpy(desc, &chp->desc, sizeof(struct channel_path_desc)); | ||
483 | return desc; | ||
484 | } | ||
485 | |||
486 | /** | ||
487 | * chp_process_crw - process channel-path status change | ||
488 | * @id: channel-path ID number | ||
489 | * @status: non-zero if channel-path has become available, zero otherwise | ||
490 | * | ||
491 | * Handle channel-report-words indicating that the status of a channel-path | ||
492 | * has changed. | ||
493 | */ | ||
494 | void chp_process_crw(int id, int status) | ||
495 | { | ||
496 | struct chp_id chpid; | ||
497 | |||
498 | chp_id_init(&chpid); | ||
499 | chpid.id = id; | ||
500 | if (status) { | ||
501 | if (!chp_is_registered(chpid)) | ||
502 | chp_new(chpid); | ||
503 | chsc_chp_online(chpid); | ||
504 | } else | ||
505 | chsc_chp_offline(chpid); | ||
506 | } | ||
507 | |||
508 | static inline int info_bit_num(struct chp_id id) | ||
509 | { | ||
510 | return id.id + id.cssid * (__MAX_CHPID + 1); | ||
511 | } | ||
512 | |||
513 | /* Force chp_info refresh on next call to info_validate(). */ | ||
514 | static void info_expire(void) | ||
515 | { | ||
516 | mutex_lock(&info_lock); | ||
517 | chp_info_expires = jiffies - 1; | ||
518 | mutex_unlock(&info_lock); | ||
519 | } | ||
520 | |||
521 | /* Ensure that chp_info is up-to-date. */ | ||
522 | static int info_update(void) | ||
523 | { | ||
524 | int rc; | ||
525 | |||
526 | mutex_lock(&info_lock); | ||
527 | rc = 0; | ||
528 | if (time_after(jiffies, chp_info_expires)) { | ||
529 | /* Data is too old, update. */ | ||
530 | rc = sclp_chp_read_info(&chp_info); | ||
531 | chp_info_expires = jiffies + CHP_INFO_UPDATE_INTERVAL ; | ||
532 | } | ||
533 | mutex_unlock(&info_lock); | ||
534 | |||
535 | return rc; | ||
536 | } | ||
537 | |||
538 | /** | ||
539 | * chp_info_get_status - retrieve configure status of a channel-path | ||
540 | * @chpid: channel-path ID | ||
541 | * | ||
542 | * On success, return 0 for standby, 1 for configured, 2 for reserved, | ||
543 | * 3 for not recognized. Return negative error code on error. | ||
544 | */ | ||
545 | int chp_info_get_status(struct chp_id chpid) | ||
546 | { | ||
547 | int rc; | ||
548 | int bit; | ||
549 | |||
550 | rc = info_update(); | ||
551 | if (rc) | ||
552 | return rc; | ||
553 | |||
554 | bit = info_bit_num(chpid); | ||
555 | mutex_lock(&info_lock); | ||
556 | if (!chp_test_bit(chp_info.recognized, bit)) | ||
557 | rc = CHP_STATUS_NOT_RECOGNIZED; | ||
558 | else if (chp_test_bit(chp_info.configured, bit)) | ||
559 | rc = CHP_STATUS_CONFIGURED; | ||
560 | else if (chp_test_bit(chp_info.standby, bit)) | ||
561 | rc = CHP_STATUS_STANDBY; | ||
562 | else | ||
563 | rc = CHP_STATUS_RESERVED; | ||
564 | mutex_unlock(&info_lock); | ||
565 | |||
566 | return rc; | ||
567 | } | ||
568 | |||
569 | /* Return configure task for chpid. */ | ||
570 | static enum cfg_task_t cfg_get_task(struct chp_id chpid) | ||
571 | { | ||
572 | return chp_cfg_task[chpid.cssid][chpid.id]; | ||
573 | } | ||
574 | |||
575 | /* Set configure task for chpid. */ | ||
576 | static void cfg_set_task(struct chp_id chpid, enum cfg_task_t cfg) | ||
577 | { | ||
578 | chp_cfg_task[chpid.cssid][chpid.id] = cfg; | ||
579 | } | ||
580 | |||
581 | /* Perform one configure/deconfigure request. Reschedule work function until | ||
582 | * last request. */ | ||
583 | static void cfg_func(struct work_struct *work) | ||
584 | { | ||
585 | struct chp_id chpid; | ||
586 | enum cfg_task_t t; | ||
587 | |||
588 | mutex_lock(&cfg_lock); | ||
589 | t = cfg_none; | ||
590 | chp_id_for_each(&chpid) { | ||
591 | t = cfg_get_task(chpid); | ||
592 | if (t != cfg_none) { | ||
593 | cfg_set_task(chpid, cfg_none); | ||
594 | break; | ||
595 | } | ||
596 | } | ||
597 | mutex_unlock(&cfg_lock); | ||
598 | |||
599 | switch (t) { | ||
600 | case cfg_configure: | ||
601 | sclp_chp_configure(chpid); | ||
602 | info_expire(); | ||
603 | chsc_chp_online(chpid); | ||
604 | break; | ||
605 | case cfg_deconfigure: | ||
606 | sclp_chp_deconfigure(chpid); | ||
607 | info_expire(); | ||
608 | chsc_chp_offline(chpid); | ||
609 | break; | ||
610 | case cfg_none: | ||
611 | /* Get updated information after last change. */ | ||
612 | info_update(); | ||
613 | mutex_lock(&cfg_lock); | ||
614 | cfg_busy = 0; | ||
615 | mutex_unlock(&cfg_lock); | ||
616 | wake_up_interruptible(&cfg_wait_queue); | ||
617 | return; | ||
618 | } | ||
619 | queue_work(chp_wq, &cfg_work); | ||
620 | } | ||
621 | |||
622 | /** | ||
623 | * chp_cfg_schedule - schedule chpid configuration request | ||
624 | * @chpid - channel-path ID | ||
625 | * @configure - Non-zero for configure, zero for deconfigure | ||
626 | * | ||
627 | * Schedule a channel-path configuration/deconfiguration request. | ||
628 | */ | ||
629 | void chp_cfg_schedule(struct chp_id chpid, int configure) | ||
630 | { | ||
631 | CIO_MSG_EVENT(2, "chp_cfg_sched%x.%02x=%d\n", chpid.cssid, chpid.id, | ||
632 | configure); | ||
633 | mutex_lock(&cfg_lock); | ||
634 | cfg_set_task(chpid, configure ? cfg_configure : cfg_deconfigure); | ||
635 | cfg_busy = 1; | ||
636 | mutex_unlock(&cfg_lock); | ||
637 | queue_work(chp_wq, &cfg_work); | ||
638 | } | ||
639 | |||
640 | /** | ||
641 | * chp_cfg_cancel_deconfigure - cancel chpid deconfiguration request | ||
642 | * @chpid - channel-path ID | ||
643 | * | ||
644 | * Cancel an active channel-path deconfiguration request if it has not yet | ||
645 | * been performed. | ||
646 | */ | ||
647 | void chp_cfg_cancel_deconfigure(struct chp_id chpid) | ||
648 | { | ||
649 | CIO_MSG_EVENT(2, "chp_cfg_cancel:%x.%02x\n", chpid.cssid, chpid.id); | ||
650 | mutex_lock(&cfg_lock); | ||
651 | if (cfg_get_task(chpid) == cfg_deconfigure) | ||
652 | cfg_set_task(chpid, cfg_none); | ||
653 | mutex_unlock(&cfg_lock); | ||
654 | } | ||
655 | |||
656 | static int cfg_wait_idle(void) | ||
657 | { | ||
658 | if (wait_event_interruptible(cfg_wait_queue, !cfg_busy)) | ||
659 | return -ERESTARTSYS; | ||
660 | return 0; | ||
661 | } | ||
662 | |||
663 | static int __init chp_init(void) | ||
664 | { | ||
665 | struct chp_id chpid; | ||
666 | |||
667 | chp_wq = create_singlethread_workqueue("cio_chp"); | ||
668 | if (!chp_wq) | ||
669 | return -ENOMEM; | ||
670 | INIT_WORK(&cfg_work, cfg_func); | ||
671 | init_waitqueue_head(&cfg_wait_queue); | ||
672 | if (info_update()) | ||
673 | return 0; | ||
674 | /* Register available channel-paths. */ | ||
675 | chp_id_for_each(&chpid) { | ||
676 | if (chp_info_get_status(chpid) != CHP_STATUS_NOT_RECOGNIZED) | ||
677 | chp_new(chpid); | ||
678 | } | ||
679 | |||
680 | return 0; | ||
681 | } | ||
682 | |||
683 | subsys_initcall(chp_init); | ||
diff --git a/drivers/s390/cio/chp.h b/drivers/s390/cio/chp.h new file mode 100644 index 000000000000..65286563c592 --- /dev/null +++ b/drivers/s390/cio/chp.h | |||
@@ -0,0 +1,53 @@ | |||
1 | /* | ||
2 | * drivers/s390/cio/chp.h | ||
3 | * | ||
4 | * Copyright IBM Corp. 2007 | ||
5 | * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com> | ||
6 | */ | ||
7 | |||
8 | #ifndef S390_CHP_H | ||
9 | #define S390_CHP_H S390_CHP_H | ||
10 | |||
11 | #include <linux/types.h> | ||
12 | #include <linux/device.h> | ||
13 | #include <asm/chpid.h> | ||
14 | #include "chsc.h" | ||
15 | |||
16 | #define CHP_STATUS_STANDBY 0 | ||
17 | #define CHP_STATUS_CONFIGURED 1 | ||
18 | #define CHP_STATUS_RESERVED 2 | ||
19 | #define CHP_STATUS_NOT_RECOGNIZED 3 | ||
20 | |||
21 | static inline int chp_test_bit(u8 *bitmap, int num) | ||
22 | { | ||
23 | int byte = num >> 3; | ||
24 | int mask = 128 >> (num & 7); | ||
25 | |||
26 | return (bitmap[byte] & mask) ? 1 : 0; | ||
27 | } | ||
28 | |||
29 | |||
30 | struct channel_path { | ||
31 | struct chp_id chpid; | ||
32 | int state; | ||
33 | struct channel_path_desc desc; | ||
34 | /* Channel-measurement related stuff: */ | ||
35 | int cmg; | ||
36 | int shared; | ||
37 | void *cmg_chars; | ||
38 | struct device dev; | ||
39 | }; | ||
40 | |||
41 | int chp_get_status(struct chp_id chpid); | ||
42 | u8 chp_get_sch_opm(struct subchannel *sch); | ||
43 | int chp_is_registered(struct chp_id chpid); | ||
44 | void *chp_get_chp_desc(struct chp_id chpid); | ||
45 | void chp_process_crw(int id, int available); | ||
46 | void chp_remove_cmg_attr(struct channel_path *chp); | ||
47 | int chp_add_cmg_attr(struct channel_path *chp); | ||
48 | int chp_new(struct chp_id chpid); | ||
49 | void chp_cfg_schedule(struct chp_id chpid, int configure); | ||
50 | void chp_cfg_cancel_deconfigure(struct chp_id chpid); | ||
51 | int chp_info_get_status(struct chp_id chpid); | ||
52 | |||
53 | #endif /* S390_CHP_H */ | ||
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c index 6f05a44e3817..ea92ac4d6577 100644 --- a/drivers/s390/cio/chsc.c +++ b/drivers/s390/cio/chsc.c | |||
@@ -15,202 +15,124 @@ | |||
15 | #include <linux/device.h> | 15 | #include <linux/device.h> |
16 | 16 | ||
17 | #include <asm/cio.h> | 17 | #include <asm/cio.h> |
18 | #include <asm/chpid.h> | ||
18 | 19 | ||
19 | #include "css.h" | 20 | #include "css.h" |
20 | #include "cio.h" | 21 | #include "cio.h" |
21 | #include "cio_debug.h" | 22 | #include "cio_debug.h" |
22 | #include "ioasm.h" | 23 | #include "ioasm.h" |
24 | #include "chp.h" | ||
23 | #include "chsc.h" | 25 | #include "chsc.h" |
24 | 26 | ||
25 | static void *sei_page; | 27 | static void *sei_page; |
26 | 28 | ||
27 | static int new_channel_path(int chpid); | 29 | struct chsc_ssd_area { |
28 | 30 | struct chsc_header request; | |
29 | static inline void | 31 | u16 :10; |
30 | set_chp_logically_online(int chp, int onoff) | 32 | u16 ssid:2; |
31 | { | 33 | u16 :4; |
32 | css[0]->chps[chp]->state = onoff; | 34 | u16 f_sch; /* first subchannel */ |
33 | } | 35 | u16 :16; |
34 | 36 | u16 l_sch; /* last subchannel */ | |
35 | static int | 37 | u32 :32; |
36 | get_chp_status(int chp) | 38 | struct chsc_header response; |
37 | { | 39 | u32 :32; |
38 | return (css[0]->chps[chp] ? css[0]->chps[chp]->state : -ENODEV); | 40 | u8 sch_valid : 1; |
39 | } | 41 | u8 dev_valid : 1; |
40 | 42 | u8 st : 3; /* subchannel type */ | |
41 | void | 43 | u8 zeroes : 3; |
42 | chsc_validate_chpids(struct subchannel *sch) | 44 | u8 unit_addr; /* unit address */ |
43 | { | 45 | u16 devno; /* device number */ |
44 | int mask, chp; | 46 | u8 path_mask; |
45 | 47 | u8 fla_valid_mask; | |
46 | for (chp = 0; chp <= 7; chp++) { | 48 | u16 sch; /* subchannel */ |
47 | mask = 0x80 >> chp; | 49 | u8 chpid[8]; /* chpids 0-7 */ |
48 | if (!get_chp_status(sch->schib.pmcw.chpid[chp])) | 50 | u16 fla[8]; /* full link addresses 0-7 */ |
49 | /* disable using this path */ | 51 | } __attribute__ ((packed)); |
50 | sch->opm &= ~mask; | ||
51 | } | ||
52 | } | ||
53 | |||
54 | void | ||
55 | chpid_is_actually_online(int chp) | ||
56 | { | ||
57 | int state; | ||
58 | |||
59 | state = get_chp_status(chp); | ||
60 | if (state < 0) { | ||
61 | need_rescan = 1; | ||
62 | queue_work(slow_path_wq, &slow_path_work); | ||
63 | } else | ||
64 | WARN_ON(!state); | ||
65 | } | ||
66 | 52 | ||
67 | /* FIXME: this is _always_ called for every subchannel. shouldn't we | 53 | int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd) |
68 | * process more than one at a time? */ | ||
69 | static int | ||
70 | chsc_get_sch_desc_irq(struct subchannel *sch, void *page) | ||
71 | { | 54 | { |
72 | int ccode, j; | 55 | unsigned long page; |
73 | 56 | struct chsc_ssd_area *ssd_area; | |
74 | struct { | 57 | int ccode; |
75 | struct chsc_header request; | 58 | int ret; |
76 | u16 reserved1a:10; | 59 | int i; |
77 | u16 ssid:2; | 60 | int mask; |
78 | u16 reserved1b:4; | ||
79 | u16 f_sch; /* first subchannel */ | ||
80 | u16 reserved2; | ||
81 | u16 l_sch; /* last subchannel */ | ||
82 | u32 reserved3; | ||
83 | struct chsc_header response; | ||
84 | u32 reserved4; | ||
85 | u8 sch_valid : 1; | ||
86 | u8 dev_valid : 1; | ||
87 | u8 st : 3; /* subchannel type */ | ||
88 | u8 zeroes : 3; | ||
89 | u8 unit_addr; /* unit address */ | ||
90 | u16 devno; /* device number */ | ||
91 | u8 path_mask; | ||
92 | u8 fla_valid_mask; | ||
93 | u16 sch; /* subchannel */ | ||
94 | u8 chpid[8]; /* chpids 0-7 */ | ||
95 | u16 fla[8]; /* full link addresses 0-7 */ | ||
96 | } __attribute__ ((packed)) *ssd_area; | ||
97 | |||
98 | ssd_area = page; | ||
99 | 61 | ||
62 | page = get_zeroed_page(GFP_KERNEL | GFP_DMA); | ||
63 | if (!page) | ||
64 | return -ENOMEM; | ||
65 | ssd_area = (struct chsc_ssd_area *) page; | ||
100 | ssd_area->request.length = 0x0010; | 66 | ssd_area->request.length = 0x0010; |
101 | ssd_area->request.code = 0x0004; | 67 | ssd_area->request.code = 0x0004; |
102 | 68 | ssd_area->ssid = schid.ssid; | |
103 | ssd_area->ssid = sch->schid.ssid; | 69 | ssd_area->f_sch = schid.sch_no; |
104 | ssd_area->f_sch = sch->schid.sch_no; | 70 | ssd_area->l_sch = schid.sch_no; |
105 | ssd_area->l_sch = sch->schid.sch_no; | ||
106 | 71 | ||
107 | ccode = chsc(ssd_area); | 72 | ccode = chsc(ssd_area); |
73 | /* Check response. */ | ||
108 | if (ccode > 0) { | 74 | if (ccode > 0) { |
109 | pr_debug("chsc returned with ccode = %d\n", ccode); | 75 | ret = (ccode == 3) ? -ENODEV : -EBUSY; |
110 | return (ccode == 3) ? -ENODEV : -EBUSY; | 76 | goto out_free; |
111 | } | 77 | } |
112 | 78 | if (ssd_area->response.code != 0x0001) { | |
113 | switch (ssd_area->response.code) { | 79 | CIO_MSG_EVENT(2, "chsc: ssd failed for 0.%x.%04x (rc=%04x)\n", |
114 | case 0x0001: /* everything ok */ | 80 | schid.ssid, schid.sch_no, |
115 | break; | ||
116 | case 0x0002: | ||
117 | CIO_CRW_EVENT(2, "Invalid command!\n"); | ||
118 | return -EINVAL; | ||
119 | case 0x0003: | ||
120 | CIO_CRW_EVENT(2, "Error in chsc request block!\n"); | ||
121 | return -EINVAL; | ||
122 | case 0x0004: | ||
123 | CIO_CRW_EVENT(2, "Model does not provide ssd\n"); | ||
124 | return -EOPNOTSUPP; | ||
125 | default: | ||
126 | CIO_CRW_EVENT(2, "Unknown CHSC response %d\n", | ||
127 | ssd_area->response.code); | 81 | ssd_area->response.code); |
128 | return -EIO; | 82 | ret = -EIO; |
83 | goto out_free; | ||
129 | } | 84 | } |
130 | 85 | if (!ssd_area->sch_valid) { | |
131 | /* | 86 | ret = -ENODEV; |
132 | * ssd_area->st stores the type of the detected | 87 | goto out_free; |
133 | * subchannel, with the following definitions: | ||
134 | * | ||
135 | * 0: I/O subchannel: All fields have meaning | ||
136 | * 1: CHSC subchannel: Only sch_val, st and sch | ||
137 | * have meaning | ||
138 | * 2: Message subchannel: All fields except unit_addr | ||
139 | * have meaning | ||
140 | * 3: ADM subchannel: Only sch_val, st and sch | ||
141 | * have meaning | ||
142 | * | ||
143 | * Other types are currently undefined. | ||
144 | */ | ||
145 | if (ssd_area->st > 3) { /* uhm, that looks strange... */ | ||
146 | CIO_CRW_EVENT(0, "Strange subchannel type %d" | ||
147 | " for sch 0.%x.%04x\n", ssd_area->st, | ||
148 | sch->schid.ssid, sch->schid.sch_no); | ||
149 | /* | ||
150 | * There may have been a new subchannel type defined in the | ||
151 | * time since this code was written; since we don't know which | ||
152 | * fields have meaning and what to do with it we just jump out | ||
153 | */ | ||
154 | return 0; | ||
155 | } else { | ||
156 | const char *type[4] = {"I/O", "chsc", "message", "ADM"}; | ||
157 | CIO_CRW_EVENT(6, "ssd: sch 0.%x.%04x is %s subchannel\n", | ||
158 | sch->schid.ssid, sch->schid.sch_no, | ||
159 | type[ssd_area->st]); | ||
160 | |||
161 | sch->ssd_info.valid = 1; | ||
162 | sch->ssd_info.type = ssd_area->st; | ||
163 | } | 88 | } |
164 | 89 | /* Copy data */ | |
165 | if (ssd_area->st == 0 || ssd_area->st == 2) { | 90 | ret = 0; |
166 | for (j = 0; j < 8; j++) { | 91 | memset(ssd, 0, sizeof(struct chsc_ssd_info)); |
167 | if (!((0x80 >> j) & ssd_area->path_mask & | 92 | if ((ssd_area->st != 0) && (ssd_area->st != 2)) |
168 | ssd_area->fla_valid_mask)) | 93 | goto out_free; |
169 | continue; | 94 | ssd->path_mask = ssd_area->path_mask; |
170 | sch->ssd_info.chpid[j] = ssd_area->chpid[j]; | 95 | ssd->fla_valid_mask = ssd_area->fla_valid_mask; |
171 | sch->ssd_info.fla[j] = ssd_area->fla[j]; | 96 | for (i = 0; i < 8; i++) { |
97 | mask = 0x80 >> i; | ||
98 | if (ssd_area->path_mask & mask) { | ||
99 | chp_id_init(&ssd->chpid[i]); | ||
100 | ssd->chpid[i].id = ssd_area->chpid[i]; | ||
172 | } | 101 | } |
102 | if (ssd_area->fla_valid_mask & mask) | ||
103 | ssd->fla[i] = ssd_area->fla[i]; | ||
173 | } | 104 | } |
174 | return 0; | 105 | out_free: |
106 | free_page(page); | ||
107 | return ret; | ||
175 | } | 108 | } |
176 | 109 | ||
177 | int | 110 | static int check_for_io_on_path(struct subchannel *sch, int mask) |
178 | css_get_ssd_info(struct subchannel *sch) | ||
179 | { | 111 | { |
180 | int ret; | 112 | int cc; |
181 | void *page; | ||
182 | 113 | ||
183 | page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); | 114 | cc = stsch(sch->schid, &sch->schib); |
184 | if (!page) | 115 | if (cc) |
185 | return -ENOMEM; | 116 | return 0; |
186 | spin_lock_irq(sch->lock); | 117 | if (sch->schib.scsw.actl && sch->schib.pmcw.lpum == mask) |
187 | ret = chsc_get_sch_desc_irq(sch, page); | 118 | return 1; |
188 | if (ret) { | 119 | return 0; |
189 | static int cio_chsc_err_msg; | 120 | } |
190 | 121 | ||
191 | if (!cio_chsc_err_msg) { | 122 | static void terminate_internal_io(struct subchannel *sch) |
192 | printk(KERN_ERR | 123 | { |
193 | "chsc_get_sch_descriptions:" | 124 | if (cio_clear(sch)) { |
194 | " Error %d while doing chsc; " | 125 | /* Recheck device in case clear failed. */ |
195 | "processing some machine checks may " | 126 | sch->lpm = 0; |
196 | "not work\n", ret); | 127 | if (device_trigger_verify(sch) != 0) |
197 | cio_chsc_err_msg = 1; | 128 | css_schedule_eval(sch->schid); |
198 | } | 129 | return; |
199 | } | ||
200 | spin_unlock_irq(sch->lock); | ||
201 | free_page((unsigned long)page); | ||
202 | if (!ret) { | ||
203 | int j, chpid, mask; | ||
204 | /* Allocate channel path structures, if needed. */ | ||
205 | for (j = 0; j < 8; j++) { | ||
206 | mask = 0x80 >> j; | ||
207 | chpid = sch->ssd_info.chpid[j]; | ||
208 | if ((sch->schib.pmcw.pim & mask) && | ||
209 | (get_chp_status(chpid) < 0)) | ||
210 | new_channel_path(chpid); | ||
211 | } | ||
212 | } | 130 | } |
213 | return ret; | 131 | /* Request retry of internal operation. */ |
132 | device_set_intretry(sch); | ||
133 | /* Call handler. */ | ||
134 | if (sch->driver && sch->driver->termination) | ||
135 | sch->driver->termination(&sch->dev); | ||
214 | } | 136 | } |
215 | 137 | ||
216 | static int | 138 | static int |
@@ -219,7 +141,7 @@ s390_subchannel_remove_chpid(struct device *dev, void *data) | |||
219 | int j; | 141 | int j; |
220 | int mask; | 142 | int mask; |
221 | struct subchannel *sch; | 143 | struct subchannel *sch; |
222 | struct channel_path *chpid; | 144 | struct chp_id *chpid; |
223 | struct schib schib; | 145 | struct schib schib; |
224 | 146 | ||
225 | sch = to_subchannel(dev); | 147 | sch = to_subchannel(dev); |
@@ -243,106 +165,50 @@ s390_subchannel_remove_chpid(struct device *dev, void *data) | |||
243 | if (sch->schib.pmcw.pim == 0x80) | 165 | if (sch->schib.pmcw.pim == 0x80) |
244 | goto out_unreg; | 166 | goto out_unreg; |
245 | 167 | ||
246 | if ((sch->schib.scsw.actl & SCSW_ACTL_DEVACT) && | 168 | if (check_for_io_on_path(sch, mask)) { |
247 | (sch->schib.scsw.actl & SCSW_ACTL_SCHACT) && | 169 | if (device_is_online(sch)) |
248 | (sch->schib.pmcw.lpum == mask)) { | 170 | device_kill_io(sch); |
249 | int cc; | 171 | else { |
250 | 172 | terminate_internal_io(sch); | |
251 | cc = cio_clear(sch); | 173 | /* Re-start path verification. */ |
252 | if (cc == -ENODEV) | 174 | if (sch->driver && sch->driver->verify) |
175 | sch->driver->verify(&sch->dev); | ||
176 | } | ||
177 | } else { | ||
178 | /* trigger path verification. */ | ||
179 | if (sch->driver && sch->driver->verify) | ||
180 | sch->driver->verify(&sch->dev); | ||
181 | else if (sch->lpm == mask) | ||
253 | goto out_unreg; | 182 | goto out_unreg; |
254 | /* Request retry of internal operation. */ | ||
255 | device_set_intretry(sch); | ||
256 | /* Call handler. */ | ||
257 | if (sch->driver && sch->driver->termination) | ||
258 | sch->driver->termination(&sch->dev); | ||
259 | goto out_unlock; | ||
260 | } | 183 | } |
261 | 184 | ||
262 | /* trigger path verification. */ | ||
263 | if (sch->driver && sch->driver->verify) | ||
264 | sch->driver->verify(&sch->dev); | ||
265 | else if (sch->lpm == mask) | ||
266 | goto out_unreg; | ||
267 | out_unlock: | ||
268 | spin_unlock_irq(sch->lock); | 185 | spin_unlock_irq(sch->lock); |
269 | return 0; | 186 | return 0; |
187 | |||
270 | out_unreg: | 188 | out_unreg: |
271 | spin_unlock_irq(sch->lock); | ||
272 | sch->lpm = 0; | 189 | sch->lpm = 0; |
273 | if (css_enqueue_subchannel_slow(sch->schid)) { | 190 | spin_unlock_irq(sch->lock); |
274 | css_clear_subchannel_slow_list(); | 191 | css_schedule_eval(sch->schid); |
275 | need_rescan = 1; | ||
276 | } | ||
277 | return 0; | 192 | return 0; |
278 | } | 193 | } |
279 | 194 | ||
280 | static void | 195 | void chsc_chp_offline(struct chp_id chpid) |
281 | s390_set_chpid_offline( __u8 chpid) | ||
282 | { | 196 | { |
283 | char dbf_txt[15]; | 197 | char dbf_txt[15]; |
284 | struct device *dev; | ||
285 | 198 | ||
286 | sprintf(dbf_txt, "chpr%x", chpid); | 199 | sprintf(dbf_txt, "chpr%x.%02x", chpid.cssid, chpid.id); |
287 | CIO_TRACE_EVENT(2, dbf_txt); | 200 | CIO_TRACE_EVENT(2, dbf_txt); |
288 | 201 | ||
289 | if (get_chp_status(chpid) <= 0) | 202 | if (chp_get_status(chpid) <= 0) |
290 | return; | 203 | return; |
291 | dev = get_device(&css[0]->chps[chpid]->dev); | 204 | bus_for_each_dev(&css_bus_type, NULL, &chpid, |
292 | bus_for_each_dev(&css_bus_type, NULL, to_channelpath(dev), | ||
293 | s390_subchannel_remove_chpid); | 205 | s390_subchannel_remove_chpid); |
294 | |||
295 | if (need_rescan || css_slow_subchannels_exist()) | ||
296 | queue_work(slow_path_wq, &slow_path_work); | ||
297 | put_device(dev); | ||
298 | } | ||
299 | |||
300 | struct res_acc_data { | ||
301 | struct channel_path *chp; | ||
302 | u32 fla_mask; | ||
303 | u16 fla; | ||
304 | }; | ||
305 | |||
306 | static int | ||
307 | s390_process_res_acc_sch(struct res_acc_data *res_data, struct subchannel *sch) | ||
308 | { | ||
309 | int found; | ||
310 | int chp; | ||
311 | int ccode; | ||
312 | |||
313 | found = 0; | ||
314 | for (chp = 0; chp <= 7; chp++) | ||
315 | /* | ||
316 | * check if chpid is in information updated by ssd | ||
317 | */ | ||
318 | if (sch->ssd_info.valid && | ||
319 | sch->ssd_info.chpid[chp] == res_data->chp->id && | ||
320 | (sch->ssd_info.fla[chp] & res_data->fla_mask) | ||
321 | == res_data->fla) { | ||
322 | found = 1; | ||
323 | break; | ||
324 | } | ||
325 | |||
326 | if (found == 0) | ||
327 | return 0; | ||
328 | |||
329 | /* | ||
330 | * Do a stsch to update our subchannel structure with the | ||
331 | * new path information and eventually check for logically | ||
332 | * offline chpids. | ||
333 | */ | ||
334 | ccode = stsch(sch->schid, &sch->schib); | ||
335 | if (ccode > 0) | ||
336 | return 0; | ||
337 | |||
338 | return 0x80 >> chp; | ||
339 | } | 206 | } |
340 | 207 | ||
341 | static int | 208 | static int |
342 | s390_process_res_acc_new_sch(struct subchannel_id schid) | 209 | s390_process_res_acc_new_sch(struct subchannel_id schid) |
343 | { | 210 | { |
344 | struct schib schib; | 211 | struct schib schib; |
345 | int ret; | ||
346 | /* | 212 | /* |
347 | * We don't know the device yet, but since a path | 213 | * We don't know the device yet, but since a path |
348 | * may be available now to the device we'll have | 214 | * may be available now to the device we'll have |
@@ -353,14 +219,35 @@ s390_process_res_acc_new_sch(struct subchannel_id schid) | |||
353 | */ | 219 | */ |
354 | if (stsch_err(schid, &schib)) | 220 | if (stsch_err(schid, &schib)) |
355 | /* We're through */ | 221 | /* We're through */ |
356 | return need_rescan ? -EAGAIN : -ENXIO; | 222 | return -ENXIO; |
357 | 223 | ||
358 | /* Put it on the slow path. */ | 224 | /* Put it on the slow path. */ |
359 | ret = css_enqueue_subchannel_slow(schid); | 225 | css_schedule_eval(schid); |
360 | if (ret) { | 226 | return 0; |
361 | css_clear_subchannel_slow_list(); | 227 | } |
362 | need_rescan = 1; | 228 | |
363 | return -EAGAIN; | 229 | struct res_acc_data { |
230 | struct chp_id chpid; | ||
231 | u32 fla_mask; | ||
232 | u16 fla; | ||
233 | }; | ||
234 | |||
235 | static int get_res_chpid_mask(struct chsc_ssd_info *ssd, | ||
236 | struct res_acc_data *data) | ||
237 | { | ||
238 | int i; | ||
239 | int mask; | ||
240 | |||
241 | for (i = 0; i < 8; i++) { | ||
242 | mask = 0x80 >> i; | ||
243 | if (!(ssd->path_mask & mask)) | ||
244 | continue; | ||
245 | if (!chp_id_is_equal(&ssd->chpid[i], &data->chpid)) | ||
246 | continue; | ||
247 | if ((ssd->fla_valid_mask & mask) && | ||
248 | ((ssd->fla[i] & data->fla_mask) != data->fla)) | ||
249 | continue; | ||
250 | return mask; | ||
364 | } | 251 | } |
365 | return 0; | 252 | return 0; |
366 | } | 253 | } |
@@ -379,14 +266,11 @@ __s390_process_res_acc(struct subchannel_id schid, void *data) | |||
379 | return s390_process_res_acc_new_sch(schid); | 266 | return s390_process_res_acc_new_sch(schid); |
380 | 267 | ||
381 | spin_lock_irq(sch->lock); | 268 | spin_lock_irq(sch->lock); |
382 | 269 | chp_mask = get_res_chpid_mask(&sch->ssd_info, res_data); | |
383 | chp_mask = s390_process_res_acc_sch(res_data, sch); | 270 | if (chp_mask == 0) |
384 | 271 | goto out; | |
385 | if (chp_mask == 0) { | 272 | if (stsch(sch->schid, &sch->schib)) |
386 | spin_unlock_irq(sch->lock); | 273 | goto out; |
387 | put_device(&sch->dev); | ||
388 | return 0; | ||
389 | } | ||
390 | old_lpm = sch->lpm; | 274 | old_lpm = sch->lpm; |
391 | sch->lpm = ((sch->schib.pmcw.pim & | 275 | sch->lpm = ((sch->schib.pmcw.pim & |
392 | sch->schib.pmcw.pam & | 276 | sch->schib.pmcw.pam & |
@@ -396,20 +280,18 @@ __s390_process_res_acc(struct subchannel_id schid, void *data) | |||
396 | device_trigger_reprobe(sch); | 280 | device_trigger_reprobe(sch); |
397 | else if (sch->driver && sch->driver->verify) | 281 | else if (sch->driver && sch->driver->verify) |
398 | sch->driver->verify(&sch->dev); | 282 | sch->driver->verify(&sch->dev); |
399 | 283 | out: | |
400 | spin_unlock_irq(sch->lock); | 284 | spin_unlock_irq(sch->lock); |
401 | put_device(&sch->dev); | 285 | put_device(&sch->dev); |
402 | return 0; | 286 | return 0; |
403 | } | 287 | } |
404 | 288 | ||
405 | 289 | static void s390_process_res_acc (struct res_acc_data *res_data) | |
406 | static int | ||
407 | s390_process_res_acc (struct res_acc_data *res_data) | ||
408 | { | 290 | { |
409 | int rc; | ||
410 | char dbf_txt[15]; | 291 | char dbf_txt[15]; |
411 | 292 | ||
412 | sprintf(dbf_txt, "accpr%x", res_data->chp->id); | 293 | sprintf(dbf_txt, "accpr%x.%02x", res_data->chpid.cssid, |
294 | res_data->chpid.id); | ||
413 | CIO_TRACE_EVENT( 2, dbf_txt); | 295 | CIO_TRACE_EVENT( 2, dbf_txt); |
414 | if (res_data->fla != 0) { | 296 | if (res_data->fla != 0) { |
415 | sprintf(dbf_txt, "fla%x", res_data->fla); | 297 | sprintf(dbf_txt, "fla%x", res_data->fla); |
@@ -423,12 +305,7 @@ s390_process_res_acc (struct res_acc_data *res_data) | |||
423 | * The more information we have (info), the less scanning | 305 | * The more information we have (info), the less scanning |
424 | * will we have to do. | 306 | * will we have to do. |
425 | */ | 307 | */ |
426 | rc = for_each_subchannel(__s390_process_res_acc, res_data); | 308 | for_each_subchannel(__s390_process_res_acc, res_data); |
427 | if (css_slow_subchannels_exist()) | ||
428 | rc = -EAGAIN; | ||
429 | else if (rc != -EAGAIN) | ||
430 | rc = 0; | ||
431 | return rc; | ||
432 | } | 309 | } |
433 | 310 | ||
434 | static int | 311 | static int |
@@ -480,43 +357,45 @@ struct chsc_sei_area { | |||
480 | /* ccdf has to be big enough for a link-incident record */ | 357 | /* ccdf has to be big enough for a link-incident record */ |
481 | } __attribute__ ((packed)); | 358 | } __attribute__ ((packed)); |
482 | 359 | ||
483 | static int chsc_process_sei_link_incident(struct chsc_sei_area *sei_area) | 360 | static void chsc_process_sei_link_incident(struct chsc_sei_area *sei_area) |
484 | { | 361 | { |
485 | int chpid; | 362 | struct chp_id chpid; |
363 | int id; | ||
486 | 364 | ||
487 | CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x)\n", | 365 | CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x)\n", |
488 | sei_area->rs, sei_area->rsid); | 366 | sei_area->rs, sei_area->rsid); |
489 | if (sei_area->rs != 4) | 367 | if (sei_area->rs != 4) |
490 | return 0; | 368 | return; |
491 | chpid = __get_chpid_from_lir(sei_area->ccdf); | 369 | id = __get_chpid_from_lir(sei_area->ccdf); |
492 | if (chpid < 0) | 370 | if (id < 0) |
493 | CIO_CRW_EVENT(4, "chsc: link incident - invalid LIR\n"); | 371 | CIO_CRW_EVENT(4, "chsc: link incident - invalid LIR\n"); |
494 | else | 372 | else { |
495 | s390_set_chpid_offline(chpid); | 373 | chp_id_init(&chpid); |
496 | 374 | chpid.id = id; | |
497 | return 0; | 375 | chsc_chp_offline(chpid); |
376 | } | ||
498 | } | 377 | } |
499 | 378 | ||
500 | static int chsc_process_sei_res_acc(struct chsc_sei_area *sei_area) | 379 | static void chsc_process_sei_res_acc(struct chsc_sei_area *sei_area) |
501 | { | 380 | { |
502 | struct res_acc_data res_data; | 381 | struct res_acc_data res_data; |
503 | struct device *dev; | 382 | struct chp_id chpid; |
504 | int status; | 383 | int status; |
505 | int rc; | ||
506 | 384 | ||
507 | CIO_CRW_EVENT(4, "chsc: resource accessibility event (rs=%02x, " | 385 | CIO_CRW_EVENT(4, "chsc: resource accessibility event (rs=%02x, " |
508 | "rs_id=%04x)\n", sei_area->rs, sei_area->rsid); | 386 | "rs_id=%04x)\n", sei_area->rs, sei_area->rsid); |
509 | if (sei_area->rs != 4) | 387 | if (sei_area->rs != 4) |
510 | return 0; | 388 | return; |
389 | chp_id_init(&chpid); | ||
390 | chpid.id = sei_area->rsid; | ||
511 | /* allocate a new channel path structure, if needed */ | 391 | /* allocate a new channel path structure, if needed */ |
512 | status = get_chp_status(sei_area->rsid); | 392 | status = chp_get_status(chpid); |
513 | if (status < 0) | 393 | if (status < 0) |
514 | new_channel_path(sei_area->rsid); | 394 | chp_new(chpid); |
515 | else if (!status) | 395 | else if (!status) |
516 | return 0; | 396 | return; |
517 | dev = get_device(&css[0]->chps[sei_area->rsid]->dev); | ||
518 | memset(&res_data, 0, sizeof(struct res_acc_data)); | 397 | memset(&res_data, 0, sizeof(struct res_acc_data)); |
519 | res_data.chp = to_channelpath(dev); | 398 | res_data.chpid = chpid; |
520 | if ((sei_area->vf & 0xc0) != 0) { | 399 | if ((sei_area->vf & 0xc0) != 0) { |
521 | res_data.fla = sei_area->fla; | 400 | res_data.fla = sei_area->fla; |
522 | if ((sei_area->vf & 0xc0) == 0xc0) | 401 | if ((sei_area->vf & 0xc0) == 0xc0) |
@@ -526,51 +405,82 @@ static int chsc_process_sei_res_acc(struct chsc_sei_area *sei_area) | |||
526 | /* link address */ | 405 | /* link address */ |
527 | res_data.fla_mask = 0xff00; | 406 | res_data.fla_mask = 0xff00; |
528 | } | 407 | } |
529 | rc = s390_process_res_acc(&res_data); | 408 | s390_process_res_acc(&res_data); |
530 | put_device(dev); | ||
531 | |||
532 | return rc; | ||
533 | } | 409 | } |
534 | 410 | ||
535 | static int chsc_process_sei(struct chsc_sei_area *sei_area) | 411 | struct chp_config_data { |
412 | u8 map[32]; | ||
413 | u8 op; | ||
414 | u8 pc; | ||
415 | }; | ||
416 | |||
417 | static void chsc_process_sei_chp_config(struct chsc_sei_area *sei_area) | ||
536 | { | 418 | { |
537 | int rc; | 419 | struct chp_config_data *data; |
420 | struct chp_id chpid; | ||
421 | int num; | ||
422 | |||
423 | CIO_CRW_EVENT(4, "chsc: channel-path-configuration notification\n"); | ||
424 | if (sei_area->rs != 0) | ||
425 | return; | ||
426 | data = (struct chp_config_data *) &(sei_area->ccdf); | ||
427 | chp_id_init(&chpid); | ||
428 | for (num = 0; num <= __MAX_CHPID; num++) { | ||
429 | if (!chp_test_bit(data->map, num)) | ||
430 | continue; | ||
431 | chpid.id = num; | ||
432 | printk(KERN_WARNING "cio: processing configure event %d for " | ||
433 | "chpid %x.%02x\n", data->op, chpid.cssid, chpid.id); | ||
434 | switch (data->op) { | ||
435 | case 0: | ||
436 | chp_cfg_schedule(chpid, 1); | ||
437 | break; | ||
438 | case 1: | ||
439 | chp_cfg_schedule(chpid, 0); | ||
440 | break; | ||
441 | case 2: | ||
442 | chp_cfg_cancel_deconfigure(chpid); | ||
443 | break; | ||
444 | } | ||
445 | } | ||
446 | } | ||
538 | 447 | ||
448 | static void chsc_process_sei(struct chsc_sei_area *sei_area) | ||
449 | { | ||
539 | /* Check if we might have lost some information. */ | 450 | /* Check if we might have lost some information. */ |
540 | if (sei_area->flags & 0x40) | 451 | if (sei_area->flags & 0x40) { |
541 | CIO_CRW_EVENT(2, "chsc: event overflow\n"); | 452 | CIO_CRW_EVENT(2, "chsc: event overflow\n"); |
453 | css_schedule_eval_all(); | ||
454 | } | ||
542 | /* which kind of information was stored? */ | 455 | /* which kind of information was stored? */ |
543 | rc = 0; | ||
544 | switch (sei_area->cc) { | 456 | switch (sei_area->cc) { |
545 | case 1: /* link incident*/ | 457 | case 1: /* link incident*/ |
546 | rc = chsc_process_sei_link_incident(sei_area); | 458 | chsc_process_sei_link_incident(sei_area); |
547 | break; | 459 | break; |
548 | case 2: /* i/o resource accessibiliy */ | 460 | case 2: /* i/o resource accessibiliy */ |
549 | rc = chsc_process_sei_res_acc(sei_area); | 461 | chsc_process_sei_res_acc(sei_area); |
462 | break; | ||
463 | case 8: /* channel-path-configuration notification */ | ||
464 | chsc_process_sei_chp_config(sei_area); | ||
550 | break; | 465 | break; |
551 | default: /* other stuff */ | 466 | default: /* other stuff */ |
552 | CIO_CRW_EVENT(4, "chsc: unhandled sei content code %d\n", | 467 | CIO_CRW_EVENT(4, "chsc: unhandled sei content code %d\n", |
553 | sei_area->cc); | 468 | sei_area->cc); |
554 | break; | 469 | break; |
555 | } | 470 | } |
556 | |||
557 | return rc; | ||
558 | } | 471 | } |
559 | 472 | ||
560 | int chsc_process_crw(void) | 473 | void chsc_process_crw(void) |
561 | { | 474 | { |
562 | struct chsc_sei_area *sei_area; | 475 | struct chsc_sei_area *sei_area; |
563 | int ret; | ||
564 | int rc; | ||
565 | 476 | ||
566 | if (!sei_page) | 477 | if (!sei_page) |
567 | return 0; | 478 | return; |
568 | /* Access to sei_page is serialized through machine check handler | 479 | /* Access to sei_page is serialized through machine check handler |
569 | * thread, so no need for locking. */ | 480 | * thread, so no need for locking. */ |
570 | sei_area = sei_page; | 481 | sei_area = sei_page; |
571 | 482 | ||
572 | CIO_TRACE_EVENT( 2, "prcss"); | 483 | CIO_TRACE_EVENT( 2, "prcss"); |
573 | ret = 0; | ||
574 | do { | 484 | do { |
575 | memset(sei_area, 0, sizeof(*sei_area)); | 485 | memset(sei_area, 0, sizeof(*sei_area)); |
576 | sei_area->request.length = 0x0010; | 486 | sei_area->request.length = 0x0010; |
@@ -580,37 +490,26 @@ int chsc_process_crw(void) | |||
580 | 490 | ||
581 | if (sei_area->response.code == 0x0001) { | 491 | if (sei_area->response.code == 0x0001) { |
582 | CIO_CRW_EVENT(4, "chsc: sei successful\n"); | 492 | CIO_CRW_EVENT(4, "chsc: sei successful\n"); |
583 | rc = chsc_process_sei(sei_area); | 493 | chsc_process_sei(sei_area); |
584 | if (rc) | ||
585 | ret = rc; | ||
586 | } else { | 494 | } else { |
587 | CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n", | 495 | CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n", |
588 | sei_area->response.code); | 496 | sei_area->response.code); |
589 | ret = 0; | ||
590 | break; | 497 | break; |
591 | } | 498 | } |
592 | } while (sei_area->flags & 0x80); | 499 | } while (sei_area->flags & 0x80); |
593 | |||
594 | return ret; | ||
595 | } | 500 | } |
596 | 501 | ||
597 | static int | 502 | static int |
598 | __chp_add_new_sch(struct subchannel_id schid) | 503 | __chp_add_new_sch(struct subchannel_id schid) |
599 | { | 504 | { |
600 | struct schib schib; | 505 | struct schib schib; |
601 | int ret; | ||
602 | 506 | ||
603 | if (stsch_err(schid, &schib)) | 507 | if (stsch_err(schid, &schib)) |
604 | /* We're through */ | 508 | /* We're through */ |
605 | return need_rescan ? -EAGAIN : -ENXIO; | 509 | return -ENXIO; |
606 | 510 | ||
607 | /* Put it on the slow path. */ | 511 | /* Put it on the slow path. */ |
608 | ret = css_enqueue_subchannel_slow(schid); | 512 | css_schedule_eval(schid); |
609 | if (ret) { | ||
610 | css_clear_subchannel_slow_list(); | ||
611 | need_rescan = 1; | ||
612 | return -EAGAIN; | ||
613 | } | ||
614 | return 0; | 513 | return 0; |
615 | } | 514 | } |
616 | 515 | ||
@@ -619,10 +518,10 @@ static int | |||
619 | __chp_add(struct subchannel_id schid, void *data) | 518 | __chp_add(struct subchannel_id schid, void *data) |
620 | { | 519 | { |
621 | int i, mask; | 520 | int i, mask; |
622 | struct channel_path *chp; | 521 | struct chp_id *chpid; |
623 | struct subchannel *sch; | 522 | struct subchannel *sch; |
624 | 523 | ||
625 | chp = data; | 524 | chpid = data; |
626 | sch = get_subchannel_by_schid(schid); | 525 | sch = get_subchannel_by_schid(schid); |
627 | if (!sch) | 526 | if (!sch) |
628 | /* Check if the subchannel is now available. */ | 527 | /* Check if the subchannel is now available. */ |
@@ -631,7 +530,7 @@ __chp_add(struct subchannel_id schid, void *data) | |||
631 | for (i=0; i<8; i++) { | 530 | for (i=0; i<8; i++) { |
632 | mask = 0x80 >> i; | 531 | mask = 0x80 >> i; |
633 | if ((sch->schib.pmcw.pim & mask) && | 532 | if ((sch->schib.pmcw.pim & mask) && |
634 | (sch->schib.pmcw.chpid[i] == chp->id)) { | 533 | (sch->schib.pmcw.chpid[i] == chpid->id)) { |
635 | if (stsch(sch->schid, &sch->schib) != 0) { | 534 | if (stsch(sch->schid, &sch->schib) != 0) { |
636 | /* Endgame. */ | 535 | /* Endgame. */ |
637 | spin_unlock_irq(sch->lock); | 536 | spin_unlock_irq(sch->lock); |
@@ -657,122 +556,58 @@ __chp_add(struct subchannel_id schid, void *data) | |||
657 | return 0; | 556 | return 0; |
658 | } | 557 | } |
659 | 558 | ||
660 | static int | 559 | void chsc_chp_online(struct chp_id chpid) |
661 | chp_add(int chpid) | ||
662 | { | 560 | { |
663 | int rc; | ||
664 | char dbf_txt[15]; | 561 | char dbf_txt[15]; |
665 | struct device *dev; | ||
666 | 562 | ||
667 | if (!get_chp_status(chpid)) | 563 | sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id); |
668 | return 0; /* no need to do the rest */ | ||
669 | |||
670 | sprintf(dbf_txt, "cadd%x", chpid); | ||
671 | CIO_TRACE_EVENT(2, dbf_txt); | 564 | CIO_TRACE_EVENT(2, dbf_txt); |
672 | 565 | ||
673 | dev = get_device(&css[0]->chps[chpid]->dev); | 566 | if (chp_get_status(chpid) != 0) |
674 | rc = for_each_subchannel(__chp_add, to_channelpath(dev)); | 567 | for_each_subchannel(__chp_add, &chpid); |
675 | if (css_slow_subchannels_exist()) | ||
676 | rc = -EAGAIN; | ||
677 | if (rc != -EAGAIN) | ||
678 | rc = 0; | ||
679 | put_device(dev); | ||
680 | return rc; | ||
681 | } | 568 | } |
682 | 569 | ||
683 | /* | 570 | static void __s390_subchannel_vary_chpid(struct subchannel *sch, |
684 | * Handling of crw machine checks with channel path source. | 571 | struct chp_id chpid, int on) |
685 | */ | ||
686 | int | ||
687 | chp_process_crw(int chpid, int on) | ||
688 | { | ||
689 | if (on == 0) { | ||
690 | /* Path has gone. We use the link incident routine.*/ | ||
691 | s390_set_chpid_offline(chpid); | ||
692 | return 0; /* De-register is async anyway. */ | ||
693 | } | ||
694 | /* | ||
695 | * Path has come. Allocate a new channel path structure, | ||
696 | * if needed. | ||
697 | */ | ||
698 | if (get_chp_status(chpid) < 0) | ||
699 | new_channel_path(chpid); | ||
700 | /* Avoid the extra overhead in process_rec_acc. */ | ||
701 | return chp_add(chpid); | ||
702 | } | ||
703 | |||
704 | static int check_for_io_on_path(struct subchannel *sch, int index) | ||
705 | { | ||
706 | int cc; | ||
707 | |||
708 | cc = stsch(sch->schid, &sch->schib); | ||
709 | if (cc) | ||
710 | return 0; | ||
711 | if (sch->schib.scsw.actl && sch->schib.pmcw.lpum == (0x80 >> index)) | ||
712 | return 1; | ||
713 | return 0; | ||
714 | } | ||
715 | |||
716 | static void terminate_internal_io(struct subchannel *sch) | ||
717 | { | ||
718 | if (cio_clear(sch)) { | ||
719 | /* Recheck device in case clear failed. */ | ||
720 | sch->lpm = 0; | ||
721 | if (device_trigger_verify(sch) != 0) { | ||
722 | if(css_enqueue_subchannel_slow(sch->schid)) { | ||
723 | css_clear_subchannel_slow_list(); | ||
724 | need_rescan = 1; | ||
725 | } | ||
726 | } | ||
727 | return; | ||
728 | } | ||
729 | /* Request retry of internal operation. */ | ||
730 | device_set_intretry(sch); | ||
731 | /* Call handler. */ | ||
732 | if (sch->driver && sch->driver->termination) | ||
733 | sch->driver->termination(&sch->dev); | ||
734 | } | ||
735 | |||
736 | static void | ||
737 | __s390_subchannel_vary_chpid(struct subchannel *sch, __u8 chpid, int on) | ||
738 | { | 572 | { |
739 | int chp, old_lpm; | 573 | int chp, old_lpm; |
574 | int mask; | ||
740 | unsigned long flags; | 575 | unsigned long flags; |
741 | 576 | ||
742 | if (!sch->ssd_info.valid) | ||
743 | return; | ||
744 | |||
745 | spin_lock_irqsave(sch->lock, flags); | 577 | spin_lock_irqsave(sch->lock, flags); |
746 | old_lpm = sch->lpm; | 578 | old_lpm = sch->lpm; |
747 | for (chp = 0; chp < 8; chp++) { | 579 | for (chp = 0; chp < 8; chp++) { |
748 | if (sch->ssd_info.chpid[chp] != chpid) | 580 | mask = 0x80 >> chp; |
581 | if (!(sch->ssd_info.path_mask & mask)) | ||
582 | continue; | ||
583 | if (!chp_id_is_equal(&sch->ssd_info.chpid[chp], &chpid)) | ||
749 | continue; | 584 | continue; |
750 | 585 | ||
751 | if (on) { | 586 | if (on) { |
752 | sch->opm |= (0x80 >> chp); | 587 | sch->opm |= mask; |
753 | sch->lpm |= (0x80 >> chp); | 588 | sch->lpm |= mask; |
754 | if (!old_lpm) | 589 | if (!old_lpm) |
755 | device_trigger_reprobe(sch); | 590 | device_trigger_reprobe(sch); |
756 | else if (sch->driver && sch->driver->verify) | 591 | else if (sch->driver && sch->driver->verify) |
757 | sch->driver->verify(&sch->dev); | 592 | sch->driver->verify(&sch->dev); |
758 | break; | 593 | break; |
759 | } | 594 | } |
760 | sch->opm &= ~(0x80 >> chp); | 595 | sch->opm &= ~mask; |
761 | sch->lpm &= ~(0x80 >> chp); | 596 | sch->lpm &= ~mask; |
762 | if (check_for_io_on_path(sch, chp)) { | 597 | if (check_for_io_on_path(sch, mask)) { |
763 | if (device_is_online(sch)) | 598 | if (device_is_online(sch)) |
764 | /* Path verification is done after killing. */ | 599 | /* Path verification is done after killing. */ |
765 | device_kill_io(sch); | 600 | device_kill_io(sch); |
766 | else | 601 | else { |
767 | /* Kill and retry internal I/O. */ | 602 | /* Kill and retry internal I/O. */ |
768 | terminate_internal_io(sch); | 603 | terminate_internal_io(sch); |
769 | } else if (!sch->lpm) { | 604 | /* Re-start path verification. */ |
770 | if (device_trigger_verify(sch) != 0) { | 605 | if (sch->driver && sch->driver->verify) |
771 | if (css_enqueue_subchannel_slow(sch->schid)) { | 606 | sch->driver->verify(&sch->dev); |
772 | css_clear_subchannel_slow_list(); | ||
773 | need_rescan = 1; | ||
774 | } | ||
775 | } | 607 | } |
608 | } else if (!sch->lpm) { | ||
609 | if (device_trigger_verify(sch) != 0) | ||
610 | css_schedule_eval(sch->schid); | ||
776 | } else if (sch->driver && sch->driver->verify) | 611 | } else if (sch->driver && sch->driver->verify) |
777 | sch->driver->verify(&sch->dev); | 612 | sch->driver->verify(&sch->dev); |
778 | break; | 613 | break; |
@@ -780,11 +615,10 @@ __s390_subchannel_vary_chpid(struct subchannel *sch, __u8 chpid, int on) | |||
780 | spin_unlock_irqrestore(sch->lock, flags); | 615 | spin_unlock_irqrestore(sch->lock, flags); |
781 | } | 616 | } |
782 | 617 | ||
783 | static int | 618 | static int s390_subchannel_vary_chpid_off(struct device *dev, void *data) |
784 | s390_subchannel_vary_chpid_off(struct device *dev, void *data) | ||
785 | { | 619 | { |
786 | struct subchannel *sch; | 620 | struct subchannel *sch; |
787 | __u8 *chpid; | 621 | struct chp_id *chpid; |
788 | 622 | ||
789 | sch = to_subchannel(dev); | 623 | sch = to_subchannel(dev); |
790 | chpid = data; | 624 | chpid = data; |
@@ -793,11 +627,10 @@ s390_subchannel_vary_chpid_off(struct device *dev, void *data) | |||
793 | return 0; | 627 | return 0; |
794 | } | 628 | } |
795 | 629 | ||
796 | static int | 630 | static int s390_subchannel_vary_chpid_on(struct device *dev, void *data) |
797 | s390_subchannel_vary_chpid_on(struct device *dev, void *data) | ||
798 | { | 631 | { |
799 | struct subchannel *sch; | 632 | struct subchannel *sch; |
800 | __u8 *chpid; | 633 | struct chp_id *chpid; |
801 | 634 | ||
802 | sch = to_subchannel(dev); | 635 | sch = to_subchannel(dev); |
803 | chpid = data; | 636 | chpid = data; |
@@ -821,40 +654,17 @@ __s390_vary_chpid_on(struct subchannel_id schid, void *data) | |||
821 | /* We're through */ | 654 | /* We're through */ |
822 | return -ENXIO; | 655 | return -ENXIO; |
823 | /* Put it on the slow path. */ | 656 | /* Put it on the slow path. */ |
824 | if (css_enqueue_subchannel_slow(schid)) { | 657 | css_schedule_eval(schid); |
825 | css_clear_subchannel_slow_list(); | ||
826 | need_rescan = 1; | ||
827 | return -EAGAIN; | ||
828 | } | ||
829 | return 0; | 658 | return 0; |
830 | } | 659 | } |
831 | 660 | ||
832 | /* | 661 | /** |
833 | * Function: s390_vary_chpid | 662 | * chsc_chp_vary - propagate channel-path vary operation to subchannels |
834 | * Varies the specified chpid online or offline | 663 | * @chpid: channl-path ID |
664 | * @on: non-zero for vary online, zero for vary offline | ||
835 | */ | 665 | */ |
836 | static int | 666 | int chsc_chp_vary(struct chp_id chpid, int on) |
837 | s390_vary_chpid( __u8 chpid, int on) | ||
838 | { | 667 | { |
839 | char dbf_text[15]; | ||
840 | int status; | ||
841 | |||
842 | sprintf(dbf_text, on?"varyon%x":"varyoff%x", chpid); | ||
843 | CIO_TRACE_EVENT( 2, dbf_text); | ||
844 | |||
845 | status = get_chp_status(chpid); | ||
846 | if (status < 0) { | ||
847 | printk(KERN_ERR "Can't vary unknown chpid %02X\n", chpid); | ||
848 | return -EINVAL; | ||
849 | } | ||
850 | |||
851 | if (!on && !status) { | ||
852 | printk(KERN_ERR "chpid %x is already offline\n", chpid); | ||
853 | return -EINVAL; | ||
854 | } | ||
855 | |||
856 | set_chp_logically_online(chpid, on); | ||
857 | |||
858 | /* | 668 | /* |
859 | * Redo PathVerification on the devices the chpid connects to | 669 | * Redo PathVerification on the devices the chpid connects to |
860 | */ | 670 | */ |
@@ -865,118 +675,9 @@ s390_vary_chpid( __u8 chpid, int on) | |||
865 | if (on) | 675 | if (on) |
866 | /* Scan for new devices on varied on path. */ | 676 | /* Scan for new devices on varied on path. */ |
867 | for_each_subchannel(__s390_vary_chpid_on, NULL); | 677 | for_each_subchannel(__s390_vary_chpid_on, NULL); |
868 | if (need_rescan || css_slow_subchannels_exist()) | ||
869 | queue_work(slow_path_wq, &slow_path_work); | ||
870 | return 0; | 678 | return 0; |
871 | } | 679 | } |
872 | 680 | ||
873 | /* | ||
874 | * Channel measurement related functions | ||
875 | */ | ||
876 | static ssize_t | ||
877 | chp_measurement_chars_read(struct kobject *kobj, char *buf, loff_t off, | ||
878 | size_t count) | ||
879 | { | ||
880 | struct channel_path *chp; | ||
881 | unsigned int size; | ||
882 | |||
883 | chp = to_channelpath(container_of(kobj, struct device, kobj)); | ||
884 | if (!chp->cmg_chars) | ||
885 | return 0; | ||
886 | |||
887 | size = sizeof(struct cmg_chars); | ||
888 | |||
889 | if (off > size) | ||
890 | return 0; | ||
891 | if (off + count > size) | ||
892 | count = size - off; | ||
893 | memcpy(buf, chp->cmg_chars + off, count); | ||
894 | return count; | ||
895 | } | ||
896 | |||
897 | static struct bin_attribute chp_measurement_chars_attr = { | ||
898 | .attr = { | ||
899 | .name = "measurement_chars", | ||
900 | .mode = S_IRUSR, | ||
901 | .owner = THIS_MODULE, | ||
902 | }, | ||
903 | .size = sizeof(struct cmg_chars), | ||
904 | .read = chp_measurement_chars_read, | ||
905 | }; | ||
906 | |||
907 | static void | ||
908 | chp_measurement_copy_block(struct cmg_entry *buf, | ||
909 | struct channel_subsystem *css, int chpid) | ||
910 | { | ||
911 | void *area; | ||
912 | struct cmg_entry *entry, reference_buf; | ||
913 | int idx; | ||
914 | |||
915 | if (chpid < 128) { | ||
916 | area = css->cub_addr1; | ||
917 | idx = chpid; | ||
918 | } else { | ||
919 | area = css->cub_addr2; | ||
920 | idx = chpid - 128; | ||
921 | } | ||
922 | entry = area + (idx * sizeof(struct cmg_entry)); | ||
923 | do { | ||
924 | memcpy(buf, entry, sizeof(*entry)); | ||
925 | memcpy(&reference_buf, entry, sizeof(*entry)); | ||
926 | } while (reference_buf.values[0] != buf->values[0]); | ||
927 | } | ||
928 | |||
929 | static ssize_t | ||
930 | chp_measurement_read(struct kobject *kobj, char *buf, loff_t off, size_t count) | ||
931 | { | ||
932 | struct channel_path *chp; | ||
933 | struct channel_subsystem *css; | ||
934 | unsigned int size; | ||
935 | |||
936 | chp = to_channelpath(container_of(kobj, struct device, kobj)); | ||
937 | css = to_css(chp->dev.parent); | ||
938 | |||
939 | size = sizeof(struct cmg_entry); | ||
940 | |||
941 | /* Only allow single reads. */ | ||
942 | if (off || count < size) | ||
943 | return 0; | ||
944 | chp_measurement_copy_block((struct cmg_entry *)buf, css, chp->id); | ||
945 | count = size; | ||
946 | return count; | ||
947 | } | ||
948 | |||
949 | static struct bin_attribute chp_measurement_attr = { | ||
950 | .attr = { | ||
951 | .name = "measurement", | ||
952 | .mode = S_IRUSR, | ||
953 | .owner = THIS_MODULE, | ||
954 | }, | ||
955 | .size = sizeof(struct cmg_entry), | ||
956 | .read = chp_measurement_read, | ||
957 | }; | ||
958 | |||
959 | static void | ||
960 | chsc_remove_chp_cmg_attr(struct channel_path *chp) | ||
961 | { | ||
962 | device_remove_bin_file(&chp->dev, &chp_measurement_chars_attr); | ||
963 | device_remove_bin_file(&chp->dev, &chp_measurement_attr); | ||
964 | } | ||
965 | |||
966 | static int | ||
967 | chsc_add_chp_cmg_attr(struct channel_path *chp) | ||
968 | { | ||
969 | int ret; | ||
970 | |||
971 | ret = device_create_bin_file(&chp->dev, &chp_measurement_chars_attr); | ||
972 | if (ret) | ||
973 | return ret; | ||
974 | ret = device_create_bin_file(&chp->dev, &chp_measurement_attr); | ||
975 | if (ret) | ||
976 | device_remove_bin_file(&chp->dev, &chp_measurement_chars_attr); | ||
977 | return ret; | ||
978 | } | ||
979 | |||
980 | static void | 681 | static void |
981 | chsc_remove_cmg_attr(struct channel_subsystem *css) | 682 | chsc_remove_cmg_attr(struct channel_subsystem *css) |
982 | { | 683 | { |
@@ -985,7 +686,7 @@ chsc_remove_cmg_attr(struct channel_subsystem *css) | |||
985 | for (i = 0; i <= __MAX_CHPID; i++) { | 686 | for (i = 0; i <= __MAX_CHPID; i++) { |
986 | if (!css->chps[i]) | 687 | if (!css->chps[i]) |
987 | continue; | 688 | continue; |
988 | chsc_remove_chp_cmg_attr(css->chps[i]); | 689 | chp_remove_cmg_attr(css->chps[i]); |
989 | } | 690 | } |
990 | } | 691 | } |
991 | 692 | ||
@@ -998,7 +699,7 @@ chsc_add_cmg_attr(struct channel_subsystem *css) | |||
998 | for (i = 0; i <= __MAX_CHPID; i++) { | 699 | for (i = 0; i <= __MAX_CHPID; i++) { |
999 | if (!css->chps[i]) | 700 | if (!css->chps[i]) |
1000 | continue; | 701 | continue; |
1001 | ret = chsc_add_chp_cmg_attr(css->chps[i]); | 702 | ret = chp_add_cmg_attr(css->chps[i]); |
1002 | if (ret) | 703 | if (ret) |
1003 | goto cleanup; | 704 | goto cleanup; |
1004 | } | 705 | } |
@@ -1007,12 +708,11 @@ cleanup: | |||
1007 | for (--i; i >= 0; i--) { | 708 | for (--i; i >= 0; i--) { |
1008 | if (!css->chps[i]) | 709 | if (!css->chps[i]) |
1009 | continue; | 710 | continue; |
1010 | chsc_remove_chp_cmg_attr(css->chps[i]); | 711 | chp_remove_cmg_attr(css->chps[i]); |
1011 | } | 712 | } |
1012 | return ret; | 713 | return ret; |
1013 | } | 714 | } |
1014 | 715 | ||
1015 | |||
1016 | static int | 716 | static int |
1017 | __chsc_do_secm(struct channel_subsystem *css, int enable, void *page) | 717 | __chsc_do_secm(struct channel_subsystem *css, int enable, void *page) |
1018 | { | 718 | { |
@@ -1118,7 +818,7 @@ chsc_secm(struct channel_subsystem *css, int enable) | |||
1118 | } else | 818 | } else |
1119 | chsc_remove_cmg_attr(css); | 819 | chsc_remove_cmg_attr(css); |
1120 | } | 820 | } |
1121 | if (enable && !css->cm_enabled) { | 821 | if (!css->cm_enabled) { |
1122 | free_page((unsigned long)css->cub_addr1); | 822 | free_page((unsigned long)css->cub_addr1); |
1123 | free_page((unsigned long)css->cub_addr2); | 823 | free_page((unsigned long)css->cub_addr2); |
1124 | } | 824 | } |
@@ -1127,109 +827,8 @@ chsc_secm(struct channel_subsystem *css, int enable) | |||
1127 | return ret; | 827 | return ret; |
1128 | } | 828 | } |
1129 | 829 | ||
1130 | /* | 830 | int chsc_determine_channel_path_description(struct chp_id chpid, |
1131 | * Files for the channel path entries. | 831 | struct channel_path_desc *desc) |
1132 | */ | ||
1133 | static ssize_t | ||
1134 | chp_status_show(struct device *dev, struct device_attribute *attr, char *buf) | ||
1135 | { | ||
1136 | struct channel_path *chp = container_of(dev, struct channel_path, dev); | ||
1137 | |||
1138 | if (!chp) | ||
1139 | return 0; | ||
1140 | return (get_chp_status(chp->id) ? sprintf(buf, "online\n") : | ||
1141 | sprintf(buf, "offline\n")); | ||
1142 | } | ||
1143 | |||
1144 | static ssize_t | ||
1145 | chp_status_write(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) | ||
1146 | { | ||
1147 | struct channel_path *cp = container_of(dev, struct channel_path, dev); | ||
1148 | char cmd[10]; | ||
1149 | int num_args; | ||
1150 | int error; | ||
1151 | |||
1152 | num_args = sscanf(buf, "%5s", cmd); | ||
1153 | if (!num_args) | ||
1154 | return count; | ||
1155 | |||
1156 | if (!strnicmp(cmd, "on", 2)) | ||
1157 | error = s390_vary_chpid(cp->id, 1); | ||
1158 | else if (!strnicmp(cmd, "off", 3)) | ||
1159 | error = s390_vary_chpid(cp->id, 0); | ||
1160 | else | ||
1161 | error = -EINVAL; | ||
1162 | |||
1163 | return error < 0 ? error : count; | ||
1164 | |||
1165 | } | ||
1166 | |||
1167 | static DEVICE_ATTR(status, 0644, chp_status_show, chp_status_write); | ||
1168 | |||
1169 | static ssize_t | ||
1170 | chp_type_show(struct device *dev, struct device_attribute *attr, char *buf) | ||
1171 | { | ||
1172 | struct channel_path *chp = container_of(dev, struct channel_path, dev); | ||
1173 | |||
1174 | if (!chp) | ||
1175 | return 0; | ||
1176 | return sprintf(buf, "%x\n", chp->desc.desc); | ||
1177 | } | ||
1178 | |||
1179 | static DEVICE_ATTR(type, 0444, chp_type_show, NULL); | ||
1180 | |||
1181 | static ssize_t | ||
1182 | chp_cmg_show(struct device *dev, struct device_attribute *attr, char *buf) | ||
1183 | { | ||
1184 | struct channel_path *chp = to_channelpath(dev); | ||
1185 | |||
1186 | if (!chp) | ||
1187 | return 0; | ||
1188 | if (chp->cmg == -1) /* channel measurements not available */ | ||
1189 | return sprintf(buf, "unknown\n"); | ||
1190 | return sprintf(buf, "%x\n", chp->cmg); | ||
1191 | } | ||
1192 | |||
1193 | static DEVICE_ATTR(cmg, 0444, chp_cmg_show, NULL); | ||
1194 | |||
1195 | static ssize_t | ||
1196 | chp_shared_show(struct device *dev, struct device_attribute *attr, char *buf) | ||
1197 | { | ||
1198 | struct channel_path *chp = to_channelpath(dev); | ||
1199 | |||
1200 | if (!chp) | ||
1201 | return 0; | ||
1202 | if (chp->shared == -1) /* channel measurements not available */ | ||
1203 | return sprintf(buf, "unknown\n"); | ||
1204 | return sprintf(buf, "%x\n", chp->shared); | ||
1205 | } | ||
1206 | |||
1207 | static DEVICE_ATTR(shared, 0444, chp_shared_show, NULL); | ||
1208 | |||
1209 | static struct attribute * chp_attrs[] = { | ||
1210 | &dev_attr_status.attr, | ||
1211 | &dev_attr_type.attr, | ||
1212 | &dev_attr_cmg.attr, | ||
1213 | &dev_attr_shared.attr, | ||
1214 | NULL, | ||
1215 | }; | ||
1216 | |||
1217 | static struct attribute_group chp_attr_group = { | ||
1218 | .attrs = chp_attrs, | ||
1219 | }; | ||
1220 | |||
1221 | static void | ||
1222 | chp_release(struct device *dev) | ||
1223 | { | ||
1224 | struct channel_path *cp; | ||
1225 | |||
1226 | cp = container_of(dev, struct channel_path, dev); | ||
1227 | kfree(cp); | ||
1228 | } | ||
1229 | |||
1230 | static int | ||
1231 | chsc_determine_channel_path_description(int chpid, | ||
1232 | struct channel_path_desc *desc) | ||
1233 | { | 832 | { |
1234 | int ccode, ret; | 833 | int ccode, ret; |
1235 | 834 | ||
@@ -1252,8 +851,8 @@ chsc_determine_channel_path_description(int chpid, | |||
1252 | scpd_area->request.length = 0x0010; | 851 | scpd_area->request.length = 0x0010; |
1253 | scpd_area->request.code = 0x0002; | 852 | scpd_area->request.code = 0x0002; |
1254 | 853 | ||
1255 | scpd_area->first_chpid = chpid; | 854 | scpd_area->first_chpid = chpid.id; |
1256 | scpd_area->last_chpid = chpid; | 855 | scpd_area->last_chpid = chpid.id; |
1257 | 856 | ||
1258 | ccode = chsc(scpd_area); | 857 | ccode = chsc(scpd_area); |
1259 | if (ccode > 0) { | 858 | if (ccode > 0) { |
@@ -1316,8 +915,7 @@ chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv, | |||
1316 | } | 915 | } |
1317 | } | 916 | } |
1318 | 917 | ||
1319 | static int | 918 | int chsc_get_channel_measurement_chars(struct channel_path *chp) |
1320 | chsc_get_channel_measurement_chars(struct channel_path *chp) | ||
1321 | { | 919 | { |
1322 | int ccode, ret; | 920 | int ccode, ret; |
1323 | 921 | ||
@@ -1349,8 +947,8 @@ chsc_get_channel_measurement_chars(struct channel_path *chp) | |||
1349 | scmc_area->request.length = 0x0010; | 947 | scmc_area->request.length = 0x0010; |
1350 | scmc_area->request.code = 0x0022; | 948 | scmc_area->request.code = 0x0022; |
1351 | 949 | ||
1352 | scmc_area->first_chpid = chp->id; | 950 | scmc_area->first_chpid = chp->chpid.id; |
1353 | scmc_area->last_chpid = chp->id; | 951 | scmc_area->last_chpid = chp->chpid.id; |
1354 | 952 | ||
1355 | ccode = chsc(scmc_area); | 953 | ccode = chsc(scmc_area); |
1356 | if (ccode > 0) { | 954 | if (ccode > 0) { |
@@ -1392,94 +990,6 @@ out: | |||
1392 | return ret; | 990 | return ret; |
1393 | } | 991 | } |
1394 | 992 | ||
1395 | /* | ||
1396 | * Entries for chpids on the system bus. | ||
1397 | * This replaces /proc/chpids. | ||
1398 | */ | ||
1399 | static int | ||
1400 | new_channel_path(int chpid) | ||
1401 | { | ||
1402 | struct channel_path *chp; | ||
1403 | int ret; | ||
1404 | |||
1405 | chp = kzalloc(sizeof(struct channel_path), GFP_KERNEL); | ||
1406 | if (!chp) | ||
1407 | return -ENOMEM; | ||
1408 | |||
1409 | /* fill in status, etc. */ | ||
1410 | chp->id = chpid; | ||
1411 | chp->state = 1; | ||
1412 | chp->dev.parent = &css[0]->device; | ||
1413 | chp->dev.release = chp_release; | ||
1414 | snprintf(chp->dev.bus_id, BUS_ID_SIZE, "chp0.%x", chpid); | ||
1415 | |||
1416 | /* Obtain channel path description and fill it in. */ | ||
1417 | ret = chsc_determine_channel_path_description(chpid, &chp->desc); | ||
1418 | if (ret) | ||
1419 | goto out_free; | ||
1420 | /* Get channel-measurement characteristics. */ | ||
1421 | if (css_characteristics_avail && css_chsc_characteristics.scmc | ||
1422 | && css_chsc_characteristics.secm) { | ||
1423 | ret = chsc_get_channel_measurement_chars(chp); | ||
1424 | if (ret) | ||
1425 | goto out_free; | ||
1426 | } else { | ||
1427 | static int msg_done; | ||
1428 | |||
1429 | if (!msg_done) { | ||
1430 | printk(KERN_WARNING "cio: Channel measurements not " | ||
1431 | "available, continuing.\n"); | ||
1432 | msg_done = 1; | ||
1433 | } | ||
1434 | chp->cmg = -1; | ||
1435 | } | ||
1436 | |||
1437 | /* make it known to the system */ | ||
1438 | ret = device_register(&chp->dev); | ||
1439 | if (ret) { | ||
1440 | printk(KERN_WARNING "%s: could not register %02x\n", | ||
1441 | __func__, chpid); | ||
1442 | goto out_free; | ||
1443 | } | ||
1444 | ret = sysfs_create_group(&chp->dev.kobj, &chp_attr_group); | ||
1445 | if (ret) { | ||
1446 | device_unregister(&chp->dev); | ||
1447 | goto out_free; | ||
1448 | } | ||
1449 | mutex_lock(&css[0]->mutex); | ||
1450 | if (css[0]->cm_enabled) { | ||
1451 | ret = chsc_add_chp_cmg_attr(chp); | ||
1452 | if (ret) { | ||
1453 | sysfs_remove_group(&chp->dev.kobj, &chp_attr_group); | ||
1454 | device_unregister(&chp->dev); | ||
1455 | mutex_unlock(&css[0]->mutex); | ||
1456 | goto out_free; | ||
1457 | } | ||
1458 | } | ||
1459 | css[0]->chps[chpid] = chp; | ||
1460 | mutex_unlock(&css[0]->mutex); | ||
1461 | return ret; | ||
1462 | out_free: | ||
1463 | kfree(chp); | ||
1464 | return ret; | ||
1465 | } | ||
1466 | |||
1467 | void * | ||
1468 | chsc_get_chp_desc(struct subchannel *sch, int chp_no) | ||
1469 | { | ||
1470 | struct channel_path *chp; | ||
1471 | struct channel_path_desc *desc; | ||
1472 | |||
1473 | chp = css[0]->chps[sch->schib.pmcw.chpid[chp_no]]; | ||
1474 | if (!chp) | ||
1475 | return NULL; | ||
1476 | desc = kmalloc(sizeof(struct channel_path_desc), GFP_KERNEL); | ||
1477 | if (!desc) | ||
1478 | return NULL; | ||
1479 | memcpy(desc, &chp->desc, sizeof(struct channel_path_desc)); | ||
1480 | return desc; | ||
1481 | } | ||
1482 | |||
1483 | static int __init | 993 | static int __init |
1484 | chsc_alloc_sei_area(void) | 994 | chsc_alloc_sei_area(void) |
1485 | { | 995 | { |
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h index 0fb2b024208f..2ad81d11cf7b 100644 --- a/drivers/s390/cio/chsc.h +++ b/drivers/s390/cio/chsc.h | |||
@@ -1,9 +1,10 @@ | |||
1 | #ifndef S390_CHSC_H | 1 | #ifndef S390_CHSC_H |
2 | #define S390_CHSC_H | 2 | #define S390_CHSC_H |
3 | 3 | ||
4 | #define CHSC_SEI_ACC_CHPID 1 | 4 | #include <linux/types.h> |
5 | #define CHSC_SEI_ACC_LINKADDR 2 | 5 | #include <linux/device.h> |
6 | #define CHSC_SEI_ACC_FULLLINKADDR 3 | 6 | #include <asm/chpid.h> |
7 | #include "schid.h" | ||
7 | 8 | ||
8 | #define CHSC_SDA_OC_MSS 0x2 | 9 | #define CHSC_SDA_OC_MSS 0x2 |
9 | 10 | ||
@@ -33,23 +34,9 @@ struct channel_path_desc { | |||
33 | u8 chpp; | 34 | u8 chpp; |
34 | } __attribute__ ((packed)); | 35 | } __attribute__ ((packed)); |
35 | 36 | ||
36 | struct channel_path { | 37 | struct channel_path; |
37 | int id; | ||
38 | int state; | ||
39 | struct channel_path_desc desc; | ||
40 | /* Channel-measurement related stuff: */ | ||
41 | int cmg; | ||
42 | int shared; | ||
43 | void *cmg_chars; | ||
44 | struct device dev; | ||
45 | }; | ||
46 | 38 | ||
47 | extern void s390_process_css( void ); | 39 | extern void chsc_process_crw(void); |
48 | extern void chsc_validate_chpids(struct subchannel *); | ||
49 | extern void chpid_is_actually_online(int); | ||
50 | extern int css_get_ssd_info(struct subchannel *); | ||
51 | extern int chsc_process_crw(void); | ||
52 | extern int chp_process_crw(int, int); | ||
53 | 40 | ||
54 | struct css_general_char { | 41 | struct css_general_char { |
55 | u64 : 41; | 42 | u64 : 41; |
@@ -82,15 +69,26 @@ struct css_chsc_char { | |||
82 | extern struct css_general_char css_general_characteristics; | 69 | extern struct css_general_char css_general_characteristics; |
83 | extern struct css_chsc_char css_chsc_characteristics; | 70 | extern struct css_chsc_char css_chsc_characteristics; |
84 | 71 | ||
72 | struct chsc_ssd_info { | ||
73 | u8 path_mask; | ||
74 | u8 fla_valid_mask; | ||
75 | struct chp_id chpid[8]; | ||
76 | u16 fla[8]; | ||
77 | }; | ||
78 | extern int chsc_get_ssd_info(struct subchannel_id schid, | ||
79 | struct chsc_ssd_info *ssd); | ||
85 | extern int chsc_determine_css_characteristics(void); | 80 | extern int chsc_determine_css_characteristics(void); |
86 | extern int css_characteristics_avail; | 81 | extern int css_characteristics_avail; |
87 | 82 | ||
88 | extern void *chsc_get_chp_desc(struct subchannel*, int); | ||
89 | |||
90 | extern int chsc_enable_facility(int); | 83 | extern int chsc_enable_facility(int); |
91 | struct channel_subsystem; | 84 | struct channel_subsystem; |
92 | extern int chsc_secm(struct channel_subsystem *, int); | 85 | extern int chsc_secm(struct channel_subsystem *, int); |
93 | 86 | ||
94 | #define to_channelpath(device) container_of(device, struct channel_path, dev) | 87 | int chsc_chp_vary(struct chp_id chpid, int on); |
88 | int chsc_determine_channel_path_description(struct chp_id chpid, | ||
89 | struct channel_path_desc *desc); | ||
90 | void chsc_chp_online(struct chp_id chpid); | ||
91 | void chsc_chp_offline(struct chp_id chpid); | ||
92 | int chsc_get_channel_measurement_chars(struct channel_path *chp); | ||
95 | 93 | ||
96 | #endif | 94 | #endif |
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c index 9cb129ab5be5..ea1defba5693 100644 --- a/drivers/s390/cio/cio.c +++ b/drivers/s390/cio/cio.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <asm/setup.h> | 22 | #include <asm/setup.h> |
23 | #include <asm/reset.h> | 23 | #include <asm/reset.h> |
24 | #include <asm/ipl.h> | 24 | #include <asm/ipl.h> |
25 | #include <asm/chpid.h> | ||
25 | #include "airq.h" | 26 | #include "airq.h" |
26 | #include "cio.h" | 27 | #include "cio.h" |
27 | #include "css.h" | 28 | #include "css.h" |
@@ -29,6 +30,7 @@ | |||
29 | #include "ioasm.h" | 30 | #include "ioasm.h" |
30 | #include "blacklist.h" | 31 | #include "blacklist.h" |
31 | #include "cio_debug.h" | 32 | #include "cio_debug.h" |
33 | #include "chp.h" | ||
32 | #include "../s390mach.h" | 34 | #include "../s390mach.h" |
33 | 35 | ||
34 | debug_info_t *cio_debug_msg_id; | 36 | debug_info_t *cio_debug_msg_id; |
@@ -592,9 +594,10 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid) | |||
592 | err = -ENODEV; | 594 | err = -ENODEV; |
593 | goto out; | 595 | goto out; |
594 | } | 596 | } |
595 | sch->opm = 0xff; | 597 | if (cio_is_console(sch->schid)) |
596 | if (!cio_is_console(sch->schid)) | 598 | sch->opm = 0xff; |
597 | chsc_validate_chpids(sch); | 599 | else |
600 | sch->opm = chp_get_sch_opm(sch); | ||
598 | sch->lpm = sch->schib.pmcw.pam & sch->opm; | 601 | sch->lpm = sch->schib.pmcw.pam & sch->opm; |
599 | 602 | ||
600 | CIO_DEBUG(KERN_INFO, 0, | 603 | CIO_DEBUG(KERN_INFO, 0, |
@@ -954,6 +957,7 @@ static void css_reset(void) | |||
954 | { | 957 | { |
955 | int i, ret; | 958 | int i, ret; |
956 | unsigned long long timeout; | 959 | unsigned long long timeout; |
960 | struct chp_id chpid; | ||
957 | 961 | ||
958 | /* Reset subchannels. */ | 962 | /* Reset subchannels. */ |
959 | for_each_subchannel(__shutdown_subchannel_easy, NULL); | 963 | for_each_subchannel(__shutdown_subchannel_easy, NULL); |
@@ -963,8 +967,10 @@ static void css_reset(void) | |||
963 | __ctl_set_bit(14, 28); | 967 | __ctl_set_bit(14, 28); |
964 | /* Temporarily reenable machine checks. */ | 968 | /* Temporarily reenable machine checks. */ |
965 | local_mcck_enable(); | 969 | local_mcck_enable(); |
970 | chp_id_init(&chpid); | ||
966 | for (i = 0; i <= __MAX_CHPID; i++) { | 971 | for (i = 0; i <= __MAX_CHPID; i++) { |
967 | ret = rchp(i); | 972 | chpid.id = i; |
973 | ret = rchp(chpid); | ||
968 | if ((ret == 0) || (ret == 2)) | 974 | if ((ret == 0) || (ret == 2)) |
969 | /* | 975 | /* |
970 | * rchp either succeeded, or another rchp is already | 976 | * rchp either succeeded, or another rchp is already |
@@ -1048,37 +1054,19 @@ void reipl_ccw_dev(struct ccw_dev_id *devid) | |||
1048 | do_reipl_asm(*((__u32*)&schid)); | 1054 | do_reipl_asm(*((__u32*)&schid)); |
1049 | } | 1055 | } |
1050 | 1056 | ||
1051 | static struct schib __initdata ipl_schib; | 1057 | int __init cio_get_iplinfo(struct cio_iplinfo *iplinfo) |
1052 | |||
1053 | /* | ||
1054 | * ipl_save_parameters gets called very early. It is not allowed to access | ||
1055 | * anything in the bss section at all. The bss section is not cleared yet, | ||
1056 | * but may contain some ipl parameters written by the firmware. | ||
1057 | * These parameters (if present) are copied to 0x2000. | ||
1058 | * To avoid corruption of the ipl parameters, all variables used by this | ||
1059 | * function must reside on the stack or in the data section. | ||
1060 | */ | ||
1061 | void ipl_save_parameters(void) | ||
1062 | { | 1058 | { |
1063 | struct subchannel_id schid; | 1059 | struct subchannel_id schid; |
1064 | unsigned int *ipl_ptr; | 1060 | struct schib schib; |
1065 | void *src, *dst; | ||
1066 | 1061 | ||
1067 | schid = *(struct subchannel_id *)__LC_SUBCHANNEL_ID; | 1062 | schid = *(struct subchannel_id *)__LC_SUBCHANNEL_ID; |
1068 | if (!schid.one) | 1063 | if (!schid.one) |
1069 | return; | 1064 | return -ENODEV; |
1070 | if (stsch(schid, &ipl_schib)) | 1065 | if (stsch(schid, &schib)) |
1071 | return; | 1066 | return -ENODEV; |
1072 | if (!ipl_schib.pmcw.dnv) | 1067 | if (!schib.pmcw.dnv) |
1073 | return; | 1068 | return -ENODEV; |
1074 | ipl_devno = ipl_schib.pmcw.dev; | 1069 | iplinfo->devno = schib.pmcw.dev; |
1075 | ipl_flags |= IPL_DEVNO_VALID; | 1070 | iplinfo->is_qdio = schib.pmcw.qf; |
1076 | if (!ipl_schib.pmcw.qf) | 1071 | return 0; |
1077 | return; | ||
1078 | ipl_flags |= IPL_PARMBLOCK_VALID; | ||
1079 | ipl_ptr = (unsigned int *)__LC_IPL_PARMBLOCK_PTR; | ||
1080 | src = (void *)(unsigned long)*ipl_ptr; | ||
1081 | dst = (void *)IPL_PARMBLOCK_ORIGIN; | ||
1082 | memmove(dst, src, PAGE_SIZE); | ||
1083 | *ipl_ptr = IPL_PARMBLOCK_ORIGIN; | ||
1084 | } | 1072 | } |
diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h index 35154a210357..7446c39951a7 100644 --- a/drivers/s390/cio/cio.h +++ b/drivers/s390/cio/cio.h | |||
@@ -1,18 +1,11 @@ | |||
1 | #ifndef S390_CIO_H | 1 | #ifndef S390_CIO_H |
2 | #define S390_CIO_H | 2 | #define S390_CIO_H |
3 | 3 | ||
4 | #include "schid.h" | ||
5 | #include <linux/mutex.h> | 4 | #include <linux/mutex.h> |
6 | 5 | #include <linux/device.h> | |
7 | /* | 6 | #include <asm/chpid.h> |
8 | * where we put the ssd info | 7 | #include "chsc.h" |
9 | */ | 8 | #include "schid.h" |
10 | struct ssd_info { | ||
11 | __u8 valid:1; | ||
12 | __u8 type:7; /* subchannel type */ | ||
13 | __u8 chpid[8]; /* chpids */ | ||
14 | __u16 fla[8]; /* full link addresses */ | ||
15 | } __attribute__ ((packed)); | ||
16 | 9 | ||
17 | /* | 10 | /* |
18 | * path management control word | 11 | * path management control word |
@@ -108,7 +101,7 @@ struct subchannel { | |||
108 | struct schib schib; /* subchannel information block */ | 101 | struct schib schib; /* subchannel information block */ |
109 | struct orb orb; /* operation request block */ | 102 | struct orb orb; /* operation request block */ |
110 | struct ccw1 sense_ccw; /* static ccw for sense command */ | 103 | struct ccw1 sense_ccw; /* static ccw for sense command */ |
111 | struct ssd_info ssd_info; /* subchannel description */ | 104 | struct chsc_ssd_info ssd_info; /* subchannel description */ |
112 | struct device dev; /* entry in device tree */ | 105 | struct device dev; /* entry in device tree */ |
113 | struct css_driver *driver; | 106 | struct css_driver *driver; |
114 | } __attribute__ ((aligned(8))); | 107 | } __attribute__ ((aligned(8))); |
diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c index 90b22faabbf7..28abd697be1a 100644 --- a/drivers/s390/cio/cmf.c +++ b/drivers/s390/cio/cmf.c | |||
@@ -476,7 +476,7 @@ struct cmb_area { | |||
476 | }; | 476 | }; |
477 | 477 | ||
478 | static struct cmb_area cmb_area = { | 478 | static struct cmb_area cmb_area = { |
479 | .lock = SPIN_LOCK_UNLOCKED, | 479 | .lock = __SPIN_LOCK_UNLOCKED(cmb_area.lock), |
480 | .list = LIST_HEAD_INIT(cmb_area.list), | 480 | .list = LIST_HEAD_INIT(cmb_area.list), |
481 | .num_channels = 1024, | 481 | .num_channels = 1024, |
482 | }; | 482 | }; |
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index fe0ace7aece8..27c6d9e55b23 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c | |||
@@ -20,8 +20,9 @@ | |||
20 | #include "ioasm.h" | 20 | #include "ioasm.h" |
21 | #include "chsc.h" | 21 | #include "chsc.h" |
22 | #include "device.h" | 22 | #include "device.h" |
23 | #include "idset.h" | ||
24 | #include "chp.h" | ||
23 | 25 | ||
24 | int need_rescan = 0; | ||
25 | int css_init_done = 0; | 26 | int css_init_done = 0; |
26 | static int need_reprobe = 0; | 27 | static int need_reprobe = 0; |
27 | static int max_ssid = 0; | 28 | static int max_ssid = 0; |
@@ -125,8 +126,52 @@ void css_sch_device_unregister(struct subchannel *sch) | |||
125 | mutex_unlock(&sch->reg_mutex); | 126 | mutex_unlock(&sch->reg_mutex); |
126 | } | 127 | } |
127 | 128 | ||
128 | static int | 129 | static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw) |
129 | css_register_subchannel(struct subchannel *sch) | 130 | { |
131 | int i; | ||
132 | int mask; | ||
133 | |||
134 | memset(ssd, 0, sizeof(struct chsc_ssd_info)); | ||
135 | ssd->path_mask = pmcw->pim; | ||
136 | for (i = 0; i < 8; i++) { | ||
137 | mask = 0x80 >> i; | ||
138 | if (pmcw->pim & mask) { | ||
139 | chp_id_init(&ssd->chpid[i]); | ||
140 | ssd->chpid[i].id = pmcw->chpid[i]; | ||
141 | } | ||
142 | } | ||
143 | } | ||
144 | |||
145 | static void ssd_register_chpids(struct chsc_ssd_info *ssd) | ||
146 | { | ||
147 | int i; | ||
148 | int mask; | ||
149 | |||
150 | for (i = 0; i < 8; i++) { | ||
151 | mask = 0x80 >> i; | ||
152 | if (ssd->path_mask & mask) | ||
153 | if (!chp_is_registered(ssd->chpid[i])) | ||
154 | chp_new(ssd->chpid[i]); | ||
155 | } | ||
156 | } | ||
157 | |||
158 | void css_update_ssd_info(struct subchannel *sch) | ||
159 | { | ||
160 | int ret; | ||
161 | |||
162 | if (cio_is_console(sch->schid)) { | ||
163 | /* Console is initialized too early for functions requiring | ||
164 | * memory allocation. */ | ||
165 | ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw); | ||
166 | } else { | ||
167 | ret = chsc_get_ssd_info(sch->schid, &sch->ssd_info); | ||
168 | if (ret) | ||
169 | ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw); | ||
170 | ssd_register_chpids(&sch->ssd_info); | ||
171 | } | ||
172 | } | ||
173 | |||
174 | static int css_register_subchannel(struct subchannel *sch) | ||
130 | { | 175 | { |
131 | int ret; | 176 | int ret; |
132 | 177 | ||
@@ -135,9 +180,7 @@ css_register_subchannel(struct subchannel *sch) | |||
135 | sch->dev.bus = &css_bus_type; | 180 | sch->dev.bus = &css_bus_type; |
136 | sch->dev.release = &css_subchannel_release; | 181 | sch->dev.release = &css_subchannel_release; |
137 | sch->dev.groups = subch_attr_groups; | 182 | sch->dev.groups = subch_attr_groups; |
138 | 183 | css_update_ssd_info(sch); | |
139 | css_get_ssd_info(sch); | ||
140 | |||
141 | /* make it known to the system */ | 184 | /* make it known to the system */ |
142 | ret = css_sch_device_register(sch); | 185 | ret = css_sch_device_register(sch); |
143 | if (ret) { | 186 | if (ret) { |
@@ -306,7 +349,7 @@ static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow) | |||
306 | return css_probe_device(schid); | 349 | return css_probe_device(schid); |
307 | } | 350 | } |
308 | 351 | ||
309 | static int css_evaluate_subchannel(struct subchannel_id schid, int slow) | 352 | static void css_evaluate_subchannel(struct subchannel_id schid, int slow) |
310 | { | 353 | { |
311 | struct subchannel *sch; | 354 | struct subchannel *sch; |
312 | int ret; | 355 | int ret; |
@@ -317,53 +360,66 @@ static int css_evaluate_subchannel(struct subchannel_id schid, int slow) | |||
317 | put_device(&sch->dev); | 360 | put_device(&sch->dev); |
318 | } else | 361 | } else |
319 | ret = css_evaluate_new_subchannel(schid, slow); | 362 | ret = css_evaluate_new_subchannel(schid, slow); |
320 | 363 | if (ret == -EAGAIN) | |
321 | return ret; | 364 | css_schedule_eval(schid); |
322 | } | 365 | } |
323 | 366 | ||
324 | static int | 367 | static struct idset *slow_subchannel_set; |
325 | css_rescan_devices(struct subchannel_id schid, void *data) | 368 | static spinlock_t slow_subchannel_lock; |
369 | |||
370 | static int __init slow_subchannel_init(void) | ||
326 | { | 371 | { |
327 | return css_evaluate_subchannel(schid, 1); | 372 | spin_lock_init(&slow_subchannel_lock); |
373 | slow_subchannel_set = idset_sch_new(); | ||
374 | if (!slow_subchannel_set) { | ||
375 | printk(KERN_WARNING "cio: could not allocate slow subchannel " | ||
376 | "set\n"); | ||
377 | return -ENOMEM; | ||
378 | } | ||
379 | return 0; | ||
328 | } | 380 | } |
329 | 381 | ||
330 | struct slow_subchannel { | 382 | subsys_initcall(slow_subchannel_init); |
331 | struct list_head slow_list; | ||
332 | struct subchannel_id schid; | ||
333 | }; | ||
334 | |||
335 | static LIST_HEAD(slow_subchannels_head); | ||
336 | static DEFINE_SPINLOCK(slow_subchannel_lock); | ||
337 | 383 | ||
338 | static void | 384 | static void css_slow_path_func(struct work_struct *unused) |
339 | css_trigger_slow_path(struct work_struct *unused) | ||
340 | { | 385 | { |
341 | CIO_TRACE_EVENT(4, "slowpath"); | 386 | struct subchannel_id schid; |
342 | |||
343 | if (need_rescan) { | ||
344 | need_rescan = 0; | ||
345 | for_each_subchannel(css_rescan_devices, NULL); | ||
346 | return; | ||
347 | } | ||
348 | 387 | ||
388 | CIO_TRACE_EVENT(4, "slowpath"); | ||
349 | spin_lock_irq(&slow_subchannel_lock); | 389 | spin_lock_irq(&slow_subchannel_lock); |
350 | while (!list_empty(&slow_subchannels_head)) { | 390 | init_subchannel_id(&schid); |
351 | struct slow_subchannel *slow_sch = | 391 | while (idset_sch_get_first(slow_subchannel_set, &schid)) { |
352 | list_entry(slow_subchannels_head.next, | 392 | idset_sch_del(slow_subchannel_set, schid); |
353 | struct slow_subchannel, slow_list); | ||
354 | |||
355 | list_del_init(slow_subchannels_head.next); | ||
356 | spin_unlock_irq(&slow_subchannel_lock); | 393 | spin_unlock_irq(&slow_subchannel_lock); |
357 | css_evaluate_subchannel(slow_sch->schid, 1); | 394 | css_evaluate_subchannel(schid, 1); |
358 | spin_lock_irq(&slow_subchannel_lock); | 395 | spin_lock_irq(&slow_subchannel_lock); |
359 | kfree(slow_sch); | ||
360 | } | 396 | } |
361 | spin_unlock_irq(&slow_subchannel_lock); | 397 | spin_unlock_irq(&slow_subchannel_lock); |
362 | } | 398 | } |
363 | 399 | ||
364 | DECLARE_WORK(slow_path_work, css_trigger_slow_path); | 400 | static DECLARE_WORK(slow_path_work, css_slow_path_func); |
365 | struct workqueue_struct *slow_path_wq; | 401 | struct workqueue_struct *slow_path_wq; |
366 | 402 | ||
403 | void css_schedule_eval(struct subchannel_id schid) | ||
404 | { | ||
405 | unsigned long flags; | ||
406 | |||
407 | spin_lock_irqsave(&slow_subchannel_lock, flags); | ||
408 | idset_sch_add(slow_subchannel_set, schid); | ||
409 | queue_work(slow_path_wq, &slow_path_work); | ||
410 | spin_unlock_irqrestore(&slow_subchannel_lock, flags); | ||
411 | } | ||
412 | |||
413 | void css_schedule_eval_all(void) | ||
414 | { | ||
415 | unsigned long flags; | ||
416 | |||
417 | spin_lock_irqsave(&slow_subchannel_lock, flags); | ||
418 | idset_fill(slow_subchannel_set); | ||
419 | queue_work(slow_path_wq, &slow_path_work); | ||
420 | spin_unlock_irqrestore(&slow_subchannel_lock, flags); | ||
421 | } | ||
422 | |||
367 | /* Reprobe subchannel if unregistered. */ | 423 | /* Reprobe subchannel if unregistered. */ |
368 | static int reprobe_subchannel(struct subchannel_id schid, void *data) | 424 | static int reprobe_subchannel(struct subchannel_id schid, void *data) |
369 | { | 425 | { |
@@ -426,33 +482,14 @@ void css_schedule_reprobe(void) | |||
426 | EXPORT_SYMBOL_GPL(css_schedule_reprobe); | 482 | EXPORT_SYMBOL_GPL(css_schedule_reprobe); |
427 | 483 | ||
428 | /* | 484 | /* |
429 | * Rescan for new devices. FIXME: This is slow. | ||
430 | * This function is called when we have lost CRWs due to overflows and we have | ||
431 | * to do subchannel housekeeping. | ||
432 | */ | ||
433 | void | ||
434 | css_reiterate_subchannels(void) | ||
435 | { | ||
436 | css_clear_subchannel_slow_list(); | ||
437 | need_rescan = 1; | ||
438 | } | ||
439 | |||
440 | /* | ||
441 | * Called from the machine check handler for subchannel report words. | 485 | * Called from the machine check handler for subchannel report words. |
442 | */ | 486 | */ |
443 | int | 487 | void css_process_crw(int rsid1, int rsid2) |
444 | css_process_crw(int rsid1, int rsid2) | ||
445 | { | 488 | { |
446 | int ret; | ||
447 | struct subchannel_id mchk_schid; | 489 | struct subchannel_id mchk_schid; |
448 | 490 | ||
449 | CIO_CRW_EVENT(2, "source is subchannel %04X, subsystem id %x\n", | 491 | CIO_CRW_EVENT(2, "source is subchannel %04X, subsystem id %x\n", |
450 | rsid1, rsid2); | 492 | rsid1, rsid2); |
451 | |||
452 | if (need_rescan) | ||
453 | /* We need to iterate all subchannels anyway. */ | ||
454 | return -EAGAIN; | ||
455 | |||
456 | init_subchannel_id(&mchk_schid); | 493 | init_subchannel_id(&mchk_schid); |
457 | mchk_schid.sch_no = rsid1; | 494 | mchk_schid.sch_no = rsid1; |
458 | if (rsid2 != 0) | 495 | if (rsid2 != 0) |
@@ -463,14 +500,7 @@ css_process_crw(int rsid1, int rsid2) | |||
463 | * use stsch() to find out if the subchannel in question has come | 500 | * use stsch() to find out if the subchannel in question has come |
464 | * or gone. | 501 | * or gone. |
465 | */ | 502 | */ |
466 | ret = css_evaluate_subchannel(mchk_schid, 0); | 503 | css_evaluate_subchannel(mchk_schid, 0); |
467 | if (ret == -EAGAIN) { | ||
468 | if (css_enqueue_subchannel_slow(mchk_schid)) { | ||
469 | css_clear_subchannel_slow_list(); | ||
470 | need_rescan = 1; | ||
471 | } | ||
472 | } | ||
473 | return ret; | ||
474 | } | 504 | } |
475 | 505 | ||
476 | static int __init | 506 | static int __init |
@@ -745,47 +775,6 @@ struct bus_type css_bus_type = { | |||
745 | 775 | ||
746 | subsys_initcall(init_channel_subsystem); | 776 | subsys_initcall(init_channel_subsystem); |
747 | 777 | ||
748 | int | ||
749 | css_enqueue_subchannel_slow(struct subchannel_id schid) | ||
750 | { | ||
751 | struct slow_subchannel *new_slow_sch; | ||
752 | unsigned long flags; | ||
753 | |||
754 | new_slow_sch = kzalloc(sizeof(struct slow_subchannel), GFP_ATOMIC); | ||
755 | if (!new_slow_sch) | ||
756 | return -ENOMEM; | ||
757 | new_slow_sch->schid = schid; | ||
758 | spin_lock_irqsave(&slow_subchannel_lock, flags); | ||
759 | list_add_tail(&new_slow_sch->slow_list, &slow_subchannels_head); | ||
760 | spin_unlock_irqrestore(&slow_subchannel_lock, flags); | ||
761 | return 0; | ||
762 | } | ||
763 | |||
764 | void | ||
765 | css_clear_subchannel_slow_list(void) | ||
766 | { | ||
767 | unsigned long flags; | ||
768 | |||
769 | spin_lock_irqsave(&slow_subchannel_lock, flags); | ||
770 | while (!list_empty(&slow_subchannels_head)) { | ||
771 | struct slow_subchannel *slow_sch = | ||
772 | list_entry(slow_subchannels_head.next, | ||
773 | struct slow_subchannel, slow_list); | ||
774 | |||
775 | list_del_init(slow_subchannels_head.next); | ||
776 | kfree(slow_sch); | ||
777 | } | ||
778 | spin_unlock_irqrestore(&slow_subchannel_lock, flags); | ||
779 | } | ||
780 | |||
781 | |||
782 | |||
783 | int | ||
784 | css_slow_subchannels_exist(void) | ||
785 | { | ||
786 | return (!list_empty(&slow_subchannels_head)); | ||
787 | } | ||
788 | |||
789 | MODULE_LICENSE("GPL"); | 778 | MODULE_LICENSE("GPL"); |
790 | EXPORT_SYMBOL(css_bus_type); | 779 | EXPORT_SYMBOL(css_bus_type); |
791 | EXPORT_SYMBOL_GPL(css_characteristics_avail); | 780 | EXPORT_SYMBOL_GPL(css_characteristics_avail); |
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h index ca2bab932a8a..71fcfdc42800 100644 --- a/drivers/s390/cio/css.h +++ b/drivers/s390/cio/css.h | |||
@@ -4,8 +4,11 @@ | |||
4 | #include <linux/mutex.h> | 4 | #include <linux/mutex.h> |
5 | #include <linux/wait.h> | 5 | #include <linux/wait.h> |
6 | #include <linux/workqueue.h> | 6 | #include <linux/workqueue.h> |
7 | #include <linux/device.h> | ||
8 | #include <linux/types.h> | ||
7 | 9 | ||
8 | #include <asm/cio.h> | 10 | #include <asm/cio.h> |
11 | #include <asm/chpid.h> | ||
9 | 12 | ||
10 | #include "schid.h" | 13 | #include "schid.h" |
11 | 14 | ||
@@ -143,13 +146,12 @@ extern void css_sch_device_unregister(struct subchannel *); | |||
143 | extern struct subchannel * get_subchannel_by_schid(struct subchannel_id); | 146 | extern struct subchannel * get_subchannel_by_schid(struct subchannel_id); |
144 | extern int css_init_done; | 147 | extern int css_init_done; |
145 | extern int for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *); | 148 | extern int for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *); |
146 | extern int css_process_crw(int, int); | 149 | extern void css_process_crw(int, int); |
147 | extern void css_reiterate_subchannels(void); | 150 | extern void css_reiterate_subchannels(void); |
151 | void css_update_ssd_info(struct subchannel *sch); | ||
148 | 152 | ||
149 | #define __MAX_SUBCHANNEL 65535 | 153 | #define __MAX_SUBCHANNEL 65535 |
150 | #define __MAX_SSID 3 | 154 | #define __MAX_SSID 3 |
151 | #define __MAX_CHPID 255 | ||
152 | #define __MAX_CSSID 0 | ||
153 | 155 | ||
154 | struct channel_subsystem { | 156 | struct channel_subsystem { |
155 | u8 cssid; | 157 | u8 cssid; |
@@ -185,16 +187,12 @@ int device_trigger_verify(struct subchannel *sch); | |||
185 | void device_kill_pending_timer(struct subchannel *); | 187 | void device_kill_pending_timer(struct subchannel *); |
186 | 188 | ||
187 | /* Helper functions to build lists for the slow path. */ | 189 | /* Helper functions to build lists for the slow path. */ |
188 | extern int css_enqueue_subchannel_slow(struct subchannel_id schid); | 190 | void css_schedule_eval(struct subchannel_id schid); |
189 | void css_walk_subchannel_slow_list(void (*fn)(unsigned long)); | 191 | void css_schedule_eval_all(void); |
190 | void css_clear_subchannel_slow_list(void); | ||
191 | int css_slow_subchannels_exist(void); | ||
192 | extern int need_rescan; | ||
193 | 192 | ||
194 | int sch_is_pseudo_sch(struct subchannel *); | 193 | int sch_is_pseudo_sch(struct subchannel *); |
195 | 194 | ||
196 | extern struct workqueue_struct *slow_path_wq; | 195 | extern struct workqueue_struct *slow_path_wq; |
197 | extern struct work_struct slow_path_work; | ||
198 | 196 | ||
199 | int subchannel_add_files (struct device *); | 197 | int subchannel_add_files (struct device *); |
200 | extern struct attribute_group *subch_attr_groups[]; | 198 | extern struct attribute_group *subch_attr_groups[]; |
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index e322111fb369..03355902c582 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c | |||
@@ -56,13 +56,12 @@ ccw_bus_match (struct device * dev, struct device_driver * drv) | |||
56 | /* Store modalias string delimited by prefix/suffix string into buffer with | 56 | /* Store modalias string delimited by prefix/suffix string into buffer with |
57 | * specified size. Return length of resulting string (excluding trailing '\0') | 57 | * specified size. Return length of resulting string (excluding trailing '\0') |
58 | * even if string doesn't fit buffer (snprintf semantics). */ | 58 | * even if string doesn't fit buffer (snprintf semantics). */ |
59 | static int snprint_alias(char *buf, size_t size, const char *prefix, | 59 | static int snprint_alias(char *buf, size_t size, |
60 | struct ccw_device_id *id, const char *suffix) | 60 | struct ccw_device_id *id, const char *suffix) |
61 | { | 61 | { |
62 | int len; | 62 | int len; |
63 | 63 | ||
64 | len = snprintf(buf, size, "%sccw:t%04Xm%02X", prefix, id->cu_type, | 64 | len = snprintf(buf, size, "ccw:t%04Xm%02X", id->cu_type, id->cu_model); |
65 | id->cu_model); | ||
66 | if (len > size) | 65 | if (len > size) |
67 | return len; | 66 | return len; |
68 | buf += len; | 67 | buf += len; |
@@ -85,53 +84,40 @@ static int ccw_uevent(struct device *dev, char **envp, int num_envp, | |||
85 | struct ccw_device *cdev = to_ccwdev(dev); | 84 | struct ccw_device *cdev = to_ccwdev(dev); |
86 | struct ccw_device_id *id = &(cdev->id); | 85 | struct ccw_device_id *id = &(cdev->id); |
87 | int i = 0; | 86 | int i = 0; |
88 | int len; | 87 | int len = 0; |
88 | int ret; | ||
89 | char modalias_buf[30]; | ||
89 | 90 | ||
90 | /* CU_TYPE= */ | 91 | /* CU_TYPE= */ |
91 | len = snprintf(buffer, buffer_size, "CU_TYPE=%04X", id->cu_type) + 1; | 92 | ret = add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &len, |
92 | if (len > buffer_size || i >= num_envp) | 93 | "CU_TYPE=%04X", id->cu_type); |
93 | return -ENOMEM; | 94 | if (ret) |
94 | envp[i++] = buffer; | 95 | return ret; |
95 | buffer += len; | ||
96 | buffer_size -= len; | ||
97 | 96 | ||
98 | /* CU_MODEL= */ | 97 | /* CU_MODEL= */ |
99 | len = snprintf(buffer, buffer_size, "CU_MODEL=%02X", id->cu_model) + 1; | 98 | ret = add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &len, |
100 | if (len > buffer_size || i >= num_envp) | 99 | "CU_MODEL=%02X", id->cu_model); |
101 | return -ENOMEM; | 100 | if (ret) |
102 | envp[i++] = buffer; | 101 | return ret; |
103 | buffer += len; | ||
104 | buffer_size -= len; | ||
105 | 102 | ||
106 | /* The next two can be zero, that's ok for us */ | 103 | /* The next two can be zero, that's ok for us */ |
107 | /* DEV_TYPE= */ | 104 | /* DEV_TYPE= */ |
108 | len = snprintf(buffer, buffer_size, "DEV_TYPE=%04X", id->dev_type) + 1; | 105 | ret = add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &len, |
109 | if (len > buffer_size || i >= num_envp) | 106 | "DEV_TYPE=%04X", id->dev_type); |
110 | return -ENOMEM; | 107 | if (ret) |
111 | envp[i++] = buffer; | 108 | return ret; |
112 | buffer += len; | ||
113 | buffer_size -= len; | ||
114 | 109 | ||
115 | /* DEV_MODEL= */ | 110 | /* DEV_MODEL= */ |
116 | len = snprintf(buffer, buffer_size, "DEV_MODEL=%02X", | 111 | ret = add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &len, |
117 | (unsigned char) id->dev_model) + 1; | 112 | "DEV_MODEL=%02X", id->dev_model); |
118 | if (len > buffer_size || i >= num_envp) | 113 | if (ret) |
119 | return -ENOMEM; | 114 | return ret; |
120 | envp[i++] = buffer; | ||
121 | buffer += len; | ||
122 | buffer_size -= len; | ||
123 | 115 | ||
124 | /* MODALIAS= */ | 116 | /* MODALIAS= */ |
125 | len = snprint_alias(buffer, buffer_size, "MODALIAS=", id, "") + 1; | 117 | snprint_alias(modalias_buf, sizeof(modalias_buf), id, ""); |
126 | if (len > buffer_size || i >= num_envp) | 118 | ret = add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &len, |
127 | return -ENOMEM; | 119 | "MODALIAS=%s", modalias_buf); |
128 | envp[i++] = buffer; | 120 | return ret; |
129 | buffer += len; | ||
130 | buffer_size -= len; | ||
131 | |||
132 | envp[i] = NULL; | ||
133 | |||
134 | return 0; | ||
135 | } | 121 | } |
136 | 122 | ||
137 | struct bus_type ccw_bus_type; | 123 | struct bus_type ccw_bus_type; |
@@ -230,12 +216,18 @@ static ssize_t | |||
230 | chpids_show (struct device * dev, struct device_attribute *attr, char * buf) | 216 | chpids_show (struct device * dev, struct device_attribute *attr, char * buf) |
231 | { | 217 | { |
232 | struct subchannel *sch = to_subchannel(dev); | 218 | struct subchannel *sch = to_subchannel(dev); |
233 | struct ssd_info *ssd = &sch->ssd_info; | 219 | struct chsc_ssd_info *ssd = &sch->ssd_info; |
234 | ssize_t ret = 0; | 220 | ssize_t ret = 0; |
235 | int chp; | 221 | int chp; |
222 | int mask; | ||
236 | 223 | ||
237 | for (chp = 0; chp < 8; chp++) | 224 | for (chp = 0; chp < 8; chp++) { |
238 | ret += sprintf (buf+ret, "%02x ", ssd->chpid[chp]); | 225 | mask = 0x80 >> chp; |
226 | if (ssd->path_mask & mask) | ||
227 | ret += sprintf(buf + ret, "%02x ", ssd->chpid[chp].id); | ||
228 | else | ||
229 | ret += sprintf(buf + ret, "00 "); | ||
230 | } | ||
239 | ret += sprintf (buf+ret, "\n"); | 231 | ret += sprintf (buf+ret, "\n"); |
240 | return min((ssize_t)PAGE_SIZE, ret); | 232 | return min((ssize_t)PAGE_SIZE, ret); |
241 | } | 233 | } |
@@ -280,7 +272,7 @@ modalias_show (struct device *dev, struct device_attribute *attr, char *buf) | |||
280 | struct ccw_device_id *id = &(cdev->id); | 272 | struct ccw_device_id *id = &(cdev->id); |
281 | int len; | 273 | int len; |
282 | 274 | ||
283 | len = snprint_alias(buf, PAGE_SIZE, "", id, "\n") + 1; | 275 | len = snprint_alias(buf, PAGE_SIZE, id, "\n") + 1; |
284 | 276 | ||
285 | return len > PAGE_SIZE ? PAGE_SIZE : len; | 277 | return len > PAGE_SIZE ? PAGE_SIZE : len; |
286 | } | 278 | } |
@@ -298,16 +290,10 @@ int ccw_device_is_orphan(struct ccw_device *cdev) | |||
298 | return sch_is_pseudo_sch(to_subchannel(cdev->dev.parent)); | 290 | return sch_is_pseudo_sch(to_subchannel(cdev->dev.parent)); |
299 | } | 291 | } |
300 | 292 | ||
301 | static void ccw_device_unregister(struct work_struct *work) | 293 | static void ccw_device_unregister(struct ccw_device *cdev) |
302 | { | 294 | { |
303 | struct ccw_device_private *priv; | ||
304 | struct ccw_device *cdev; | ||
305 | |||
306 | priv = container_of(work, struct ccw_device_private, kick_work); | ||
307 | cdev = priv->cdev; | ||
308 | if (test_and_clear_bit(1, &cdev->private->registered)) | 295 | if (test_and_clear_bit(1, &cdev->private->registered)) |
309 | device_unregister(&cdev->dev); | 296 | device_del(&cdev->dev); |
310 | put_device(&cdev->dev); | ||
311 | } | 297 | } |
312 | 298 | ||
313 | static void | 299 | static void |
@@ -324,11 +310,8 @@ ccw_device_remove_disconnected(struct ccw_device *cdev) | |||
324 | spin_lock_irqsave(cdev->ccwlock, flags); | 310 | spin_lock_irqsave(cdev->ccwlock, flags); |
325 | cdev->private->state = DEV_STATE_NOT_OPER; | 311 | cdev->private->state = DEV_STATE_NOT_OPER; |
326 | spin_unlock_irqrestore(cdev->ccwlock, flags); | 312 | spin_unlock_irqrestore(cdev->ccwlock, flags); |
327 | if (get_device(&cdev->dev)) { | 313 | ccw_device_unregister(cdev); |
328 | PREPARE_WORK(&cdev->private->kick_work, | 314 | put_device(&cdev->dev); |
329 | ccw_device_unregister); | ||
330 | queue_work(ccw_device_work, &cdev->private->kick_work); | ||
331 | } | ||
332 | return ; | 315 | return ; |
333 | } | 316 | } |
334 | sch = to_subchannel(cdev->dev.parent); | 317 | sch = to_subchannel(cdev->dev.parent); |
@@ -413,11 +396,60 @@ ccw_device_set_online(struct ccw_device *cdev) | |||
413 | return (ret == 0) ? -ENODEV : ret; | 396 | return (ret == 0) ? -ENODEV : ret; |
414 | } | 397 | } |
415 | 398 | ||
416 | static ssize_t | 399 | static void online_store_handle_offline(struct ccw_device *cdev) |
417 | online_store (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) | 400 | { |
401 | if (cdev->private->state == DEV_STATE_DISCONNECTED) | ||
402 | ccw_device_remove_disconnected(cdev); | ||
403 | else if (cdev->drv && cdev->drv->set_offline) | ||
404 | ccw_device_set_offline(cdev); | ||
405 | } | ||
406 | |||
407 | static int online_store_recog_and_online(struct ccw_device *cdev) | ||
408 | { | ||
409 | int ret; | ||
410 | |||
411 | /* Do device recognition, if needed. */ | ||
412 | if (cdev->id.cu_type == 0) { | ||
413 | ret = ccw_device_recognition(cdev); | ||
414 | if (ret) { | ||
415 | printk(KERN_WARNING"Couldn't start recognition " | ||
416 | "for device %s (ret=%d)\n", | ||
417 | cdev->dev.bus_id, ret); | ||
418 | return ret; | ||
419 | } | ||
420 | wait_event(cdev->private->wait_q, | ||
421 | cdev->private->flags.recog_done); | ||
422 | } | ||
423 | if (cdev->drv && cdev->drv->set_online) | ||
424 | ccw_device_set_online(cdev); | ||
425 | return 0; | ||
426 | } | ||
427 | static void online_store_handle_online(struct ccw_device *cdev, int force) | ||
428 | { | ||
429 | int ret; | ||
430 | |||
431 | ret = online_store_recog_and_online(cdev); | ||
432 | if (ret) | ||
433 | return; | ||
434 | if (force && cdev->private->state == DEV_STATE_BOXED) { | ||
435 | ret = ccw_device_stlck(cdev); | ||
436 | if (ret) { | ||
437 | printk(KERN_WARNING"ccw_device_stlck for device %s " | ||
438 | "returned %d!\n", cdev->dev.bus_id, ret); | ||
439 | return; | ||
440 | } | ||
441 | if (cdev->id.cu_type == 0) | ||
442 | cdev->private->state = DEV_STATE_NOT_OPER; | ||
443 | online_store_recog_and_online(cdev); | ||
444 | } | ||
445 | |||
446 | } | ||
447 | |||
448 | static ssize_t online_store (struct device *dev, struct device_attribute *attr, | ||
449 | const char *buf, size_t count) | ||
418 | { | 450 | { |
419 | struct ccw_device *cdev = to_ccwdev(dev); | 451 | struct ccw_device *cdev = to_ccwdev(dev); |
420 | int i, force, ret; | 452 | int i, force; |
421 | char *tmp; | 453 | char *tmp; |
422 | 454 | ||
423 | if (atomic_cmpxchg(&cdev->private->onoff, 0, 1) != 0) | 455 | if (atomic_cmpxchg(&cdev->private->onoff, 0, 1) != 0) |
@@ -434,51 +466,17 @@ online_store (struct device *dev, struct device_attribute *attr, const char *buf | |||
434 | force = 0; | 466 | force = 0; |
435 | i = simple_strtoul(buf, &tmp, 16); | 467 | i = simple_strtoul(buf, &tmp, 16); |
436 | } | 468 | } |
437 | if (i == 1) { | 469 | |
438 | /* Do device recognition, if needed. */ | 470 | switch (i) { |
439 | if (cdev->id.cu_type == 0) { | 471 | case 0: |
440 | ret = ccw_device_recognition(cdev); | 472 | online_store_handle_offline(cdev); |
441 | if (ret) { | 473 | break; |
442 | printk(KERN_WARNING"Couldn't start recognition " | 474 | case 1: |
443 | "for device %s (ret=%d)\n", | 475 | online_store_handle_online(cdev, force); |
444 | cdev->dev.bus_id, ret); | 476 | break; |
445 | goto out; | 477 | default: |
446 | } | 478 | count = -EINVAL; |
447 | wait_event(cdev->private->wait_q, | ||
448 | cdev->private->flags.recog_done); | ||
449 | } | ||
450 | if (cdev->drv && cdev->drv->set_online) | ||
451 | ccw_device_set_online(cdev); | ||
452 | } else if (i == 0) { | ||
453 | if (cdev->private->state == DEV_STATE_DISCONNECTED) | ||
454 | ccw_device_remove_disconnected(cdev); | ||
455 | else if (cdev->drv && cdev->drv->set_offline) | ||
456 | ccw_device_set_offline(cdev); | ||
457 | } | ||
458 | if (force && cdev->private->state == DEV_STATE_BOXED) { | ||
459 | ret = ccw_device_stlck(cdev); | ||
460 | if (ret) { | ||
461 | printk(KERN_WARNING"ccw_device_stlck for device %s " | ||
462 | "returned %d!\n", cdev->dev.bus_id, ret); | ||
463 | goto out; | ||
464 | } | ||
465 | /* Do device recognition, if needed. */ | ||
466 | if (cdev->id.cu_type == 0) { | ||
467 | cdev->private->state = DEV_STATE_NOT_OPER; | ||
468 | ret = ccw_device_recognition(cdev); | ||
469 | if (ret) { | ||
470 | printk(KERN_WARNING"Couldn't start recognition " | ||
471 | "for device %s (ret=%d)\n", | ||
472 | cdev->dev.bus_id, ret); | ||
473 | goto out; | ||
474 | } | ||
475 | wait_event(cdev->private->wait_q, | ||
476 | cdev->private->flags.recog_done); | ||
477 | } | ||
478 | if (cdev->drv && cdev->drv->set_online) | ||
479 | ccw_device_set_online(cdev); | ||
480 | } | 479 | } |
481 | out: | ||
482 | if (cdev->drv) | 480 | if (cdev->drv) |
483 | module_put(cdev->drv->owner); | 481 | module_put(cdev->drv->owner); |
484 | atomic_set(&cdev->private->onoff, 0); | 482 | atomic_set(&cdev->private->onoff, 0); |
@@ -548,17 +546,10 @@ static struct attribute_group ccwdev_attr_group = { | |||
548 | .attrs = ccwdev_attrs, | 546 | .attrs = ccwdev_attrs, |
549 | }; | 547 | }; |
550 | 548 | ||
551 | static int | 549 | struct attribute_group *ccwdev_attr_groups[] = { |
552 | device_add_files (struct device *dev) | 550 | &ccwdev_attr_group, |
553 | { | 551 | NULL, |
554 | return sysfs_create_group(&dev->kobj, &ccwdev_attr_group); | 552 | }; |
555 | } | ||
556 | |||
557 | static void | ||
558 | device_remove_files(struct device *dev) | ||
559 | { | ||
560 | sysfs_remove_group(&dev->kobj, &ccwdev_attr_group); | ||
561 | } | ||
562 | 553 | ||
563 | /* this is a simple abstraction for device_register that sets the | 554 | /* this is a simple abstraction for device_register that sets the |
564 | * correct bus type and adds the bus specific files */ | 555 | * correct bus type and adds the bus specific files */ |
@@ -573,10 +564,6 @@ static int ccw_device_register(struct ccw_device *cdev) | |||
573 | return ret; | 564 | return ret; |
574 | 565 | ||
575 | set_bit(1, &cdev->private->registered); | 566 | set_bit(1, &cdev->private->registered); |
576 | if ((ret = device_add_files(dev))) { | ||
577 | if (test_and_clear_bit(1, &cdev->private->registered)) | ||
578 | device_del(dev); | ||
579 | } | ||
580 | return ret; | 567 | return ret; |
581 | } | 568 | } |
582 | 569 | ||
@@ -648,10 +635,6 @@ ccw_device_add_changed(struct work_struct *work) | |||
648 | return; | 635 | return; |
649 | } | 636 | } |
650 | set_bit(1, &cdev->private->registered); | 637 | set_bit(1, &cdev->private->registered); |
651 | if (device_add_files(&cdev->dev)) { | ||
652 | if (test_and_clear_bit(1, &cdev->private->registered)) | ||
653 | device_unregister(&cdev->dev); | ||
654 | } | ||
655 | } | 638 | } |
656 | 639 | ||
657 | void ccw_device_do_unreg_rereg(struct work_struct *work) | 640 | void ccw_device_do_unreg_rereg(struct work_struct *work) |
@@ -664,9 +647,7 @@ void ccw_device_do_unreg_rereg(struct work_struct *work) | |||
664 | cdev = priv->cdev; | 647 | cdev = priv->cdev; |
665 | sch = to_subchannel(cdev->dev.parent); | 648 | sch = to_subchannel(cdev->dev.parent); |
666 | 649 | ||
667 | device_remove_files(&cdev->dev); | 650 | ccw_device_unregister(cdev); |
668 | if (test_and_clear_bit(1, &cdev->private->registered)) | ||
669 | device_del(&cdev->dev); | ||
670 | PREPARE_WORK(&cdev->private->kick_work, | 651 | PREPARE_WORK(&cdev->private->kick_work, |
671 | ccw_device_add_changed); | 652 | ccw_device_add_changed); |
672 | queue_work(ccw_device_work, &cdev->private->kick_work); | 653 | queue_work(ccw_device_work, &cdev->private->kick_work); |
@@ -705,6 +686,7 @@ static int io_subchannel_initialize_dev(struct subchannel *sch, | |||
705 | cdev->dev.parent = &sch->dev; | 686 | cdev->dev.parent = &sch->dev; |
706 | cdev->dev.release = ccw_device_release; | 687 | cdev->dev.release = ccw_device_release; |
707 | INIT_LIST_HEAD(&cdev->private->kick_work.entry); | 688 | INIT_LIST_HEAD(&cdev->private->kick_work.entry); |
689 | cdev->dev.groups = ccwdev_attr_groups; | ||
708 | /* Do first half of device_register. */ | 690 | /* Do first half of device_register. */ |
709 | device_initialize(&cdev->dev); | 691 | device_initialize(&cdev->dev); |
710 | if (!get_device(&sch->dev)) { | 692 | if (!get_device(&sch->dev)) { |
@@ -736,6 +718,7 @@ static int io_subchannel_recog(struct ccw_device *, struct subchannel *); | |||
736 | static void sch_attach_device(struct subchannel *sch, | 718 | static void sch_attach_device(struct subchannel *sch, |
737 | struct ccw_device *cdev) | 719 | struct ccw_device *cdev) |
738 | { | 720 | { |
721 | css_update_ssd_info(sch); | ||
739 | spin_lock_irq(sch->lock); | 722 | spin_lock_irq(sch->lock); |
740 | sch->dev.driver_data = cdev; | 723 | sch->dev.driver_data = cdev; |
741 | cdev->private->schid = sch->schid; | 724 | cdev->private->schid = sch->schid; |
@@ -871,7 +854,7 @@ io_subchannel_register(struct work_struct *work) | |||
871 | priv = container_of(work, struct ccw_device_private, kick_work); | 854 | priv = container_of(work, struct ccw_device_private, kick_work); |
872 | cdev = priv->cdev; | 855 | cdev = priv->cdev; |
873 | sch = to_subchannel(cdev->dev.parent); | 856 | sch = to_subchannel(cdev->dev.parent); |
874 | 857 | css_update_ssd_info(sch); | |
875 | /* | 858 | /* |
876 | * io_subchannel_register() will also be called after device | 859 | * io_subchannel_register() will also be called after device |
877 | * recognition has been done for a boxed device (which will already | 860 | * recognition has been done for a boxed device (which will already |
@@ -1133,15 +1116,8 @@ io_subchannel_remove (struct subchannel *sch) | |||
1133 | sch->dev.driver_data = NULL; | 1116 | sch->dev.driver_data = NULL; |
1134 | cdev->private->state = DEV_STATE_NOT_OPER; | 1117 | cdev->private->state = DEV_STATE_NOT_OPER; |
1135 | spin_unlock_irqrestore(cdev->ccwlock, flags); | 1118 | spin_unlock_irqrestore(cdev->ccwlock, flags); |
1136 | /* | 1119 | ccw_device_unregister(cdev); |
1137 | * Put unregistration on workqueue to avoid livelocks on the css bus | 1120 | put_device(&cdev->dev); |
1138 | * semaphore. | ||
1139 | */ | ||
1140 | if (get_device(&cdev->dev)) { | ||
1141 | PREPARE_WORK(&cdev->private->kick_work, | ||
1142 | ccw_device_unregister); | ||
1143 | queue_work(ccw_device_work, &cdev->private->kick_work); | ||
1144 | } | ||
1145 | return 0; | 1121 | return 0; |
1146 | } | 1122 | } |
1147 | 1123 | ||
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c index 089a3ddd6265..898ec3b2bebb 100644 --- a/drivers/s390/cio/device_fsm.c +++ b/drivers/s390/cio/device_fsm.c | |||
@@ -15,6 +15,7 @@ | |||
15 | 15 | ||
16 | #include <asm/ccwdev.h> | 16 | #include <asm/ccwdev.h> |
17 | #include <asm/cio.h> | 17 | #include <asm/cio.h> |
18 | #include <asm/chpid.h> | ||
18 | 19 | ||
19 | #include "cio.h" | 20 | #include "cio.h" |
20 | #include "cio_debug.h" | 21 | #include "cio_debug.h" |
@@ -22,6 +23,7 @@ | |||
22 | #include "device.h" | 23 | #include "device.h" |
23 | #include "chsc.h" | 24 | #include "chsc.h" |
24 | #include "ioasm.h" | 25 | #include "ioasm.h" |
26 | #include "chp.h" | ||
25 | 27 | ||
26 | int | 28 | int |
27 | device_is_online(struct subchannel *sch) | 29 | device_is_online(struct subchannel *sch) |
@@ -210,14 +212,18 @@ static void | |||
210 | __recover_lost_chpids(struct subchannel *sch, int old_lpm) | 212 | __recover_lost_chpids(struct subchannel *sch, int old_lpm) |
211 | { | 213 | { |
212 | int mask, i; | 214 | int mask, i; |
215 | struct chp_id chpid; | ||
213 | 216 | ||
217 | chp_id_init(&chpid); | ||
214 | for (i = 0; i<8; i++) { | 218 | for (i = 0; i<8; i++) { |
215 | mask = 0x80 >> i; | 219 | mask = 0x80 >> i; |
216 | if (!(sch->lpm & mask)) | 220 | if (!(sch->lpm & mask)) |
217 | continue; | 221 | continue; |
218 | if (old_lpm & mask) | 222 | if (old_lpm & mask) |
219 | continue; | 223 | continue; |
220 | chpid_is_actually_online(sch->schib.pmcw.chpid[i]); | 224 | chpid.id = sch->schib.pmcw.chpid[i]; |
225 | if (!chp_is_registered(chpid)) | ||
226 | css_schedule_eval_all(); | ||
221 | } | 227 | } |
222 | } | 228 | } |
223 | 229 | ||
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c index 7c7775aae38a..16f59fcb66b1 100644 --- a/drivers/s390/cio/device_ops.c +++ b/drivers/s390/cio/device_ops.c | |||
@@ -16,12 +16,14 @@ | |||
16 | 16 | ||
17 | #include <asm/ccwdev.h> | 17 | #include <asm/ccwdev.h> |
18 | #include <asm/idals.h> | 18 | #include <asm/idals.h> |
19 | #include <asm/chpid.h> | ||
19 | 20 | ||
20 | #include "cio.h" | 21 | #include "cio.h" |
21 | #include "cio_debug.h" | 22 | #include "cio_debug.h" |
22 | #include "css.h" | 23 | #include "css.h" |
23 | #include "chsc.h" | 24 | #include "chsc.h" |
24 | #include "device.h" | 25 | #include "device.h" |
26 | #include "chp.h" | ||
25 | 27 | ||
26 | int ccw_device_set_options_mask(struct ccw_device *cdev, unsigned long flags) | 28 | int ccw_device_set_options_mask(struct ccw_device *cdev, unsigned long flags) |
27 | { | 29 | { |
@@ -606,9 +608,12 @@ void * | |||
606 | ccw_device_get_chp_desc(struct ccw_device *cdev, int chp_no) | 608 | ccw_device_get_chp_desc(struct ccw_device *cdev, int chp_no) |
607 | { | 609 | { |
608 | struct subchannel *sch; | 610 | struct subchannel *sch; |
611 | struct chp_id chpid; | ||
609 | 612 | ||
610 | sch = to_subchannel(cdev->dev.parent); | 613 | sch = to_subchannel(cdev->dev.parent); |
611 | return chsc_get_chp_desc(sch, chp_no); | 614 | chp_id_init(&chpid); |
615 | chpid.id = sch->schib.pmcw.chpid[chp_no]; | ||
616 | return chp_get_chp_desc(chpid); | ||
612 | } | 617 | } |
613 | 618 | ||
614 | // FIXME: these have to go: | 619 | // FIXME: these have to go: |
diff --git a/drivers/s390/cio/idset.c b/drivers/s390/cio/idset.c new file mode 100644 index 000000000000..16ea828e99f7 --- /dev/null +++ b/drivers/s390/cio/idset.c | |||
@@ -0,0 +1,112 @@ | |||
1 | /* | ||
2 | * drivers/s390/cio/idset.c | ||
3 | * | ||
4 | * Copyright IBM Corp. 2007 | ||
5 | * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com> | ||
6 | */ | ||
7 | |||
8 | #include <linux/slab.h> | ||
9 | #include <asm/bitops.h> | ||
10 | #include "idset.h" | ||
11 | #include "css.h" | ||
12 | |||
13 | struct idset { | ||
14 | int num_ssid; | ||
15 | int num_id; | ||
16 | unsigned long bitmap[0]; | ||
17 | }; | ||
18 | |||
19 | static inline unsigned long bitmap_size(int num_ssid, int num_id) | ||
20 | { | ||
21 | return __BITOPS_WORDS(num_ssid * num_id) * sizeof(unsigned long); | ||
22 | } | ||
23 | |||
24 | static struct idset *idset_new(int num_ssid, int num_id) | ||
25 | { | ||
26 | struct idset *set; | ||
27 | |||
28 | set = kzalloc(sizeof(struct idset) + bitmap_size(num_ssid, num_id), | ||
29 | GFP_KERNEL); | ||
30 | if (set) { | ||
31 | set->num_ssid = num_ssid; | ||
32 | set->num_id = num_id; | ||
33 | } | ||
34 | return set; | ||
35 | } | ||
36 | |||
37 | void idset_free(struct idset *set) | ||
38 | { | ||
39 | kfree(set); | ||
40 | } | ||
41 | |||
42 | void idset_clear(struct idset *set) | ||
43 | { | ||
44 | memset(set->bitmap, 0, bitmap_size(set->num_ssid, set->num_id)); | ||
45 | } | ||
46 | |||
47 | void idset_fill(struct idset *set) | ||
48 | { | ||
49 | memset(set->bitmap, 0xff, bitmap_size(set->num_ssid, set->num_id)); | ||
50 | } | ||
51 | |||
52 | static inline void idset_add(struct idset *set, int ssid, int id) | ||
53 | { | ||
54 | set_bit(ssid * set->num_id + id, set->bitmap); | ||
55 | } | ||
56 | |||
57 | static inline void idset_del(struct idset *set, int ssid, int id) | ||
58 | { | ||
59 | clear_bit(ssid * set->num_id + id, set->bitmap); | ||
60 | } | ||
61 | |||
62 | static inline int idset_contains(struct idset *set, int ssid, int id) | ||
63 | { | ||
64 | return test_bit(ssid * set->num_id + id, set->bitmap); | ||
65 | } | ||
66 | |||
67 | static inline int idset_get_first(struct idset *set, int *ssid, int *id) | ||
68 | { | ||
69 | int bitnum; | ||
70 | |||
71 | bitnum = find_first_bit(set->bitmap, set->num_ssid * set->num_id); | ||
72 | if (bitnum >= set->num_ssid * set->num_id) | ||
73 | return 0; | ||
74 | *ssid = bitnum / set->num_id; | ||
75 | *id = bitnum % set->num_id; | ||
76 | return 1; | ||
77 | } | ||
78 | |||
79 | struct idset *idset_sch_new(void) | ||
80 | { | ||
81 | return idset_new(__MAX_SSID + 1, __MAX_SUBCHANNEL + 1); | ||
82 | } | ||
83 | |||
84 | void idset_sch_add(struct idset *set, struct subchannel_id schid) | ||
85 | { | ||
86 | idset_add(set, schid.ssid, schid.sch_no); | ||
87 | } | ||
88 | |||
89 | void idset_sch_del(struct idset *set, struct subchannel_id schid) | ||
90 | { | ||
91 | idset_del(set, schid.ssid, schid.sch_no); | ||
92 | } | ||
93 | |||
94 | int idset_sch_contains(struct idset *set, struct subchannel_id schid) | ||
95 | { | ||
96 | return idset_contains(set, schid.ssid, schid.sch_no); | ||
97 | } | ||
98 | |||
99 | int idset_sch_get_first(struct idset *set, struct subchannel_id *schid) | ||
100 | { | ||
101 | int ssid = 0; | ||
102 | int id = 0; | ||
103 | int rc; | ||
104 | |||
105 | rc = idset_get_first(set, &ssid, &id); | ||
106 | if (rc) { | ||
107 | init_subchannel_id(schid); | ||
108 | schid->ssid = ssid; | ||
109 | schid->sch_no = id; | ||
110 | } | ||
111 | return rc; | ||
112 | } | ||
diff --git a/drivers/s390/cio/idset.h b/drivers/s390/cio/idset.h new file mode 100644 index 000000000000..144466ab8c15 --- /dev/null +++ b/drivers/s390/cio/idset.h | |||
@@ -0,0 +1,25 @@ | |||
1 | /* | ||
2 | * drivers/s390/cio/idset.h | ||
3 | * | ||
4 | * Copyright IBM Corp. 2007 | ||
5 | * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com> | ||
6 | */ | ||
7 | |||
8 | #ifndef S390_IDSET_H | ||
9 | #define S390_IDSET_H S390_IDSET_H | ||
10 | |||
11 | #include "schid.h" | ||
12 | |||
13 | struct idset; | ||
14 | |||
15 | void idset_free(struct idset *set); | ||
16 | void idset_clear(struct idset *set); | ||
17 | void idset_fill(struct idset *set); | ||
18 | |||
19 | struct idset *idset_sch_new(void); | ||
20 | void idset_sch_add(struct idset *set, struct subchannel_id id); | ||
21 | void idset_sch_del(struct idset *set, struct subchannel_id id); | ||
22 | int idset_sch_contains(struct idset *set, struct subchannel_id id); | ||
23 | int idset_sch_get_first(struct idset *set, struct subchannel_id *id); | ||
24 | |||
25 | #endif /* S390_IDSET_H */ | ||
diff --git a/drivers/s390/cio/ioasm.h b/drivers/s390/cio/ioasm.h index ad6d82940069..7153dd959082 100644 --- a/drivers/s390/cio/ioasm.h +++ b/drivers/s390/cio/ioasm.h | |||
@@ -1,6 +1,7 @@ | |||
1 | #ifndef S390_CIO_IOASM_H | 1 | #ifndef S390_CIO_IOASM_H |
2 | #define S390_CIO_IOASM_H | 2 | #define S390_CIO_IOASM_H |
3 | 3 | ||
4 | #include <asm/chpid.h> | ||
4 | #include "schid.h" | 5 | #include "schid.h" |
5 | 6 | ||
6 | /* | 7 | /* |
@@ -189,9 +190,9 @@ static inline int chsc(void *chsc_area) | |||
189 | return cc; | 190 | return cc; |
190 | } | 191 | } |
191 | 192 | ||
192 | static inline int rchp(int chpid) | 193 | static inline int rchp(struct chp_id chpid) |
193 | { | 194 | { |
194 | register unsigned int reg1 asm ("1") = chpid; | 195 | register struct chp_id reg1 asm ("1") = chpid; |
195 | int ccode; | 196 | int ccode; |
196 | 197 | ||
197 | asm volatile( | 198 | asm volatile( |
diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c index 7809a79feec7..6dd64d0c8d45 100644 --- a/drivers/s390/net/claw.c +++ b/drivers/s390/net/claw.c | |||
@@ -3525,8 +3525,8 @@ unpack_next: | |||
3525 | memcpy(skb_put(skb,len_of_data), | 3525 | memcpy(skb_put(skb,len_of_data), |
3526 | privptr->p_mtc_envelope, | 3526 | privptr->p_mtc_envelope, |
3527 | len_of_data); | 3527 | len_of_data); |
3528 | skb->mac.raw=skb->data; | ||
3529 | skb->dev=dev; | 3528 | skb->dev=dev; |
3529 | skb_reset_mac_header(skb); | ||
3530 | skb->protocol=htons(ETH_P_IP); | 3530 | skb->protocol=htons(ETH_P_IP); |
3531 | skb->ip_summed=CHECKSUM_UNNECESSARY; | 3531 | skb->ip_summed=CHECKSUM_UNNECESSARY; |
3532 | privptr->stats.rx_packets++; | 3532 | privptr->stats.rx_packets++; |
diff --git a/drivers/s390/net/ctcmain.c b/drivers/s390/net/ctcmain.c index 0d6d5fcc128b..b20fd0681733 100644 --- a/drivers/s390/net/ctcmain.c +++ b/drivers/s390/net/ctcmain.c | |||
@@ -455,7 +455,7 @@ ctc_unpack_skb(struct channel *ch, struct sk_buff *pskb) | |||
455 | return; | 455 | return; |
456 | } | 456 | } |
457 | skb_put(pskb, header->length); | 457 | skb_put(pskb, header->length); |
458 | pskb->mac.raw = pskb->data; | 458 | skb_reset_mac_header(pskb); |
459 | len -= header->length; | 459 | len -= header->length; |
460 | skb = dev_alloc_skb(pskb->len); | 460 | skb = dev_alloc_skb(pskb->len); |
461 | if (!skb) { | 461 | if (!skb) { |
@@ -472,8 +472,9 @@ ctc_unpack_skb(struct channel *ch, struct sk_buff *pskb) | |||
472 | privptr->stats.rx_dropped++; | 472 | privptr->stats.rx_dropped++; |
473 | return; | 473 | return; |
474 | } | 474 | } |
475 | memcpy(skb_put(skb, pskb->len), pskb->data, pskb->len); | 475 | skb_copy_from_linear_data(pskb, skb_put(skb, pskb->len), |
476 | skb->mac.raw = skb->data; | 476 | pskb->len); |
477 | skb_reset_mac_header(skb); | ||
477 | skb->dev = pskb->dev; | 478 | skb->dev = pskb->dev; |
478 | skb->protocol = pskb->protocol; | 479 | skb->protocol = pskb->protocol; |
479 | pskb->ip_summed = CHECKSUM_UNNECESSARY; | 480 | pskb->ip_summed = CHECKSUM_UNNECESSARY; |
@@ -706,7 +707,8 @@ ch_action_txdone(fsm_instance * fi, int event, void *arg) | |||
706 | spin_unlock(&ch->collect_lock); | 707 | spin_unlock(&ch->collect_lock); |
707 | return; | 708 | return; |
708 | } | 709 | } |
709 | ch->trans_skb->tail = ch->trans_skb->data = ch->trans_skb_data; | 710 | ch->trans_skb->data = ch->trans_skb_data; |
711 | skb_reset_tail_pointer(ch->trans_skb); | ||
710 | ch->trans_skb->len = 0; | 712 | ch->trans_skb->len = 0; |
711 | if (ch->prof.maxmulti < (ch->collect_len + 2)) | 713 | if (ch->prof.maxmulti < (ch->collect_len + 2)) |
712 | ch->prof.maxmulti = ch->collect_len + 2; | 714 | ch->prof.maxmulti = ch->collect_len + 2; |
@@ -715,8 +717,9 @@ ch_action_txdone(fsm_instance * fi, int event, void *arg) | |||
715 | *((__u16 *) skb_put(ch->trans_skb, 2)) = ch->collect_len + 2; | 717 | *((__u16 *) skb_put(ch->trans_skb, 2)) = ch->collect_len + 2; |
716 | i = 0; | 718 | i = 0; |
717 | while ((skb = skb_dequeue(&ch->collect_queue))) { | 719 | while ((skb = skb_dequeue(&ch->collect_queue))) { |
718 | memcpy(skb_put(ch->trans_skb, skb->len), skb->data, | 720 | skb_copy_from_linear_data(skb, skb_put(ch->trans_skb, |
719 | skb->len); | 721 | skb->len), |
722 | skb->len); | ||
720 | privptr->stats.tx_packets++; | 723 | privptr->stats.tx_packets++; |
721 | privptr->stats.tx_bytes += skb->len - LL_HEADER_LENGTH; | 724 | privptr->stats.tx_bytes += skb->len - LL_HEADER_LENGTH; |
722 | atomic_dec(&skb->users); | 725 | atomic_dec(&skb->users); |
@@ -831,7 +834,8 @@ ch_action_rx(fsm_instance * fi, int event, void *arg) | |||
831 | ctc_unpack_skb(ch, skb); | 834 | ctc_unpack_skb(ch, skb); |
832 | } | 835 | } |
833 | again: | 836 | again: |
834 | skb->data = skb->tail = ch->trans_skb_data; | 837 | skb->data = ch->trans_skb_data; |
838 | skb_reset_tail_pointer(skb); | ||
835 | skb->len = 0; | 839 | skb->len = 0; |
836 | if (ctc_checkalloc_buffer(ch, 1)) | 840 | if (ctc_checkalloc_buffer(ch, 1)) |
837 | return; | 841 | return; |
@@ -1638,21 +1642,19 @@ add_channel(struct ccw_device *cdev, enum channel_types type) | |||
1638 | struct channel *ch; | 1642 | struct channel *ch; |
1639 | 1643 | ||
1640 | DBF_TEXT(trace, 2, __FUNCTION__); | 1644 | DBF_TEXT(trace, 2, __FUNCTION__); |
1641 | if ((ch = | 1645 | ch = kzalloc(sizeof(struct channel), GFP_KERNEL); |
1642 | (struct channel *) kmalloc(sizeof (struct channel), | 1646 | if (!ch) { |
1643 | GFP_KERNEL)) == NULL) { | ||
1644 | ctc_pr_warn("ctc: Out of memory in add_channel\n"); | 1647 | ctc_pr_warn("ctc: Out of memory in add_channel\n"); |
1645 | return -1; | 1648 | return -1; |
1646 | } | 1649 | } |
1647 | memset(ch, 0, sizeof (struct channel)); | 1650 | /* assure all flags and counters are reset */ |
1648 | if ((ch->ccw = kmalloc(8*sizeof(struct ccw1), | 1651 | ch->ccw = kzalloc(8 * sizeof(struct ccw1), GFP_KERNEL | GFP_DMA); |
1649 | GFP_KERNEL | GFP_DMA)) == NULL) { | 1652 | if (!ch->ccw) { |
1650 | kfree(ch); | 1653 | kfree(ch); |
1651 | ctc_pr_warn("ctc: Out of memory in add_channel\n"); | 1654 | ctc_pr_warn("ctc: Out of memory in add_channel\n"); |
1652 | return -1; | 1655 | return -1; |
1653 | } | 1656 | } |
1654 | 1657 | ||
1655 | memset(ch->ccw, 0, 8*sizeof(struct ccw1)); // assure all flags and counters are reset | ||
1656 | 1658 | ||
1657 | /** | 1659 | /** |
1658 | * "static" ccws are used in the following way: | 1660 | * "static" ccws are used in the following way: |
@@ -1692,15 +1694,14 @@ add_channel(struct ccw_device *cdev, enum channel_types type) | |||
1692 | return -1; | 1694 | return -1; |
1693 | } | 1695 | } |
1694 | fsm_newstate(ch->fsm, CH_STATE_IDLE); | 1696 | fsm_newstate(ch->fsm, CH_STATE_IDLE); |
1695 | if ((ch->irb = kmalloc(sizeof (struct irb), | 1697 | ch->irb = kzalloc(sizeof(struct irb), GFP_KERNEL); |
1696 | GFP_KERNEL)) == NULL) { | 1698 | if (!ch->irb) { |
1697 | ctc_pr_warn("ctc: Out of memory in add_channel\n"); | 1699 | ctc_pr_warn("ctc: Out of memory in add_channel\n"); |
1698 | kfree_fsm(ch->fsm); | 1700 | kfree_fsm(ch->fsm); |
1699 | kfree(ch->ccw); | 1701 | kfree(ch->ccw); |
1700 | kfree(ch); | 1702 | kfree(ch); |
1701 | return -1; | 1703 | return -1; |
1702 | } | 1704 | } |
1703 | memset(ch->irb, 0, sizeof (struct irb)); | ||
1704 | while (*c && less_than((*c)->id, ch->id)) | 1705 | while (*c && less_than((*c)->id, ch->id)) |
1705 | c = &(*c)->next; | 1706 | c = &(*c)->next; |
1706 | if (*c && (!strncmp((*c)->id, ch->id, CTC_ID_SIZE))) { | 1707 | if (*c && (!strncmp((*c)->id, ch->id, CTC_ID_SIZE))) { |
@@ -2226,7 +2227,8 @@ transmit_skb(struct channel *ch, struct sk_buff *skb) | |||
2226 | * IDAL support in CTC is broken, so we have to | 2227 | * IDAL support in CTC is broken, so we have to |
2227 | * care about skb's above 2G ourselves. | 2228 | * care about skb's above 2G ourselves. |
2228 | */ | 2229 | */ |
2229 | hi = ((unsigned long) skb->tail + LL_HEADER_LENGTH) >> 31; | 2230 | hi = ((unsigned long)skb_tail_pointer(skb) + |
2231 | LL_HEADER_LENGTH) >> 31; | ||
2230 | if (hi) { | 2232 | if (hi) { |
2231 | nskb = alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA); | 2233 | nskb = alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA); |
2232 | if (!nskb) { | 2234 | if (!nskb) { |
@@ -2262,11 +2264,12 @@ transmit_skb(struct channel *ch, struct sk_buff *skb) | |||
2262 | return -EBUSY; | 2264 | return -EBUSY; |
2263 | } | 2265 | } |
2264 | 2266 | ||
2265 | ch->trans_skb->tail = ch->trans_skb->data; | 2267 | skb_reset_tail_pointer(ch->trans_skb); |
2266 | ch->trans_skb->len = 0; | 2268 | ch->trans_skb->len = 0; |
2267 | ch->ccw[1].count = skb->len; | 2269 | ch->ccw[1].count = skb->len; |
2268 | memcpy(skb_put(ch->trans_skb, skb->len), skb->data, | 2270 | skb_copy_from_linear_data(skb, skb_put(ch->trans_skb, |
2269 | skb->len); | 2271 | skb->len), |
2272 | skb->len); | ||
2270 | atomic_dec(&skb->users); | 2273 | atomic_dec(&skb->users); |
2271 | dev_kfree_skb_irq(skb); | 2274 | dev_kfree_skb_irq(skb); |
2272 | ccw_idx = 0; | 2275 | ccw_idx = 0; |
@@ -2745,14 +2748,13 @@ ctc_probe_device(struct ccwgroup_device *cgdev) | |||
2745 | if (!get_device(&cgdev->dev)) | 2748 | if (!get_device(&cgdev->dev)) |
2746 | return -ENODEV; | 2749 | return -ENODEV; |
2747 | 2750 | ||
2748 | priv = kmalloc(sizeof (struct ctc_priv), GFP_KERNEL); | 2751 | priv = kzalloc(sizeof(struct ctc_priv), GFP_KERNEL); |
2749 | if (!priv) { | 2752 | if (!priv) { |
2750 | ctc_pr_err("%s: Out of memory\n", __func__); | 2753 | ctc_pr_err("%s: Out of memory\n", __func__); |
2751 | put_device(&cgdev->dev); | 2754 | put_device(&cgdev->dev); |
2752 | return -ENOMEM; | 2755 | return -ENOMEM; |
2753 | } | 2756 | } |
2754 | 2757 | ||
2755 | memset(priv, 0, sizeof (struct ctc_priv)); | ||
2756 | rc = ctc_add_files(&cgdev->dev); | 2758 | rc = ctc_add_files(&cgdev->dev); |
2757 | if (rc) { | 2759 | if (rc) { |
2758 | kfree(priv); | 2760 | kfree(priv); |
@@ -2793,10 +2795,9 @@ ctc_init_netdevice(struct net_device * dev, int alloc_device, | |||
2793 | DBF_TEXT(setup, 3, __FUNCTION__); | 2795 | DBF_TEXT(setup, 3, __FUNCTION__); |
2794 | 2796 | ||
2795 | if (alloc_device) { | 2797 | if (alloc_device) { |
2796 | dev = kmalloc(sizeof (struct net_device), GFP_KERNEL); | 2798 | dev = kzalloc(sizeof(struct net_device), GFP_KERNEL); |
2797 | if (!dev) | 2799 | if (!dev) |
2798 | return NULL; | 2800 | return NULL; |
2799 | memset(dev, 0, sizeof (struct net_device)); | ||
2800 | } | 2801 | } |
2801 | 2802 | ||
2802 | dev->priv = privptr; | 2803 | dev->priv = privptr; |
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c index ecca1046714e..08a994fdd1a4 100644 --- a/drivers/s390/net/lcs.c +++ b/drivers/s390/net/lcs.c | |||
@@ -1576,7 +1576,7 @@ __lcs_start_xmit(struct lcs_card *card, struct sk_buff *skb, | |||
1576 | header->offset = card->tx_buffer->count; | 1576 | header->offset = card->tx_buffer->count; |
1577 | header->type = card->lan_type; | 1577 | header->type = card->lan_type; |
1578 | header->slot = card->portno; | 1578 | header->slot = card->portno; |
1579 | memcpy(header + 1, skb->data, skb->len); | 1579 | skb_copy_from_linear_data(skb, header + 1, skb->len); |
1580 | spin_unlock(&card->lock); | 1580 | spin_unlock(&card->lock); |
1581 | card->stats.tx_bytes += skb->len; | 1581 | card->stats.tx_bytes += skb->len; |
1582 | card->stats.tx_packets++; | 1582 | card->stats.tx_packets++; |
@@ -1784,7 +1784,6 @@ lcs_get_skb(struct lcs_card *card, char *skb_data, unsigned int skb_len) | |||
1784 | card->stats.rx_dropped++; | 1784 | card->stats.rx_dropped++; |
1785 | return; | 1785 | return; |
1786 | } | 1786 | } |
1787 | skb->dev = card->dev; | ||
1788 | memcpy(skb_put(skb, skb_len), skb_data, skb_len); | 1787 | memcpy(skb_put(skb, skb_len), skb_data, skb_len); |
1789 | skb->protocol = card->lan_type_trans(skb, card->dev); | 1788 | skb->protocol = card->lan_type_trans(skb, card->dev); |
1790 | card->stats.rx_bytes += skb_len; | 1789 | card->stats.rx_bytes += skb_len; |
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c index 594320ca1b7c..e10e85e85c84 100644 --- a/drivers/s390/net/netiucv.c +++ b/drivers/s390/net/netiucv.c | |||
@@ -635,7 +635,7 @@ static void netiucv_unpack_skb(struct iucv_connection *conn, | |||
635 | return; | 635 | return; |
636 | } | 636 | } |
637 | skb_put(pskb, header->next); | 637 | skb_put(pskb, header->next); |
638 | pskb->mac.raw = pskb->data; | 638 | skb_reset_mac_header(pskb); |
639 | skb = dev_alloc_skb(pskb->len); | 639 | skb = dev_alloc_skb(pskb->len); |
640 | if (!skb) { | 640 | if (!skb) { |
641 | PRINT_WARN("%s Out of memory in netiucv_unpack_skb\n", | 641 | PRINT_WARN("%s Out of memory in netiucv_unpack_skb\n", |
@@ -645,8 +645,9 @@ static void netiucv_unpack_skb(struct iucv_connection *conn, | |||
645 | privptr->stats.rx_dropped++; | 645 | privptr->stats.rx_dropped++; |
646 | return; | 646 | return; |
647 | } | 647 | } |
648 | memcpy(skb_put(skb, pskb->len), pskb->data, pskb->len); | 648 | skb_copy_from_linear_data(pskb, skb_put(skb, pskb->len), |
649 | skb->mac.raw = skb->data; | 649 | pskb->len); |
650 | skb_reset_mac_header(skb); | ||
650 | skb->dev = pskb->dev; | 651 | skb->dev = pskb->dev; |
651 | skb->protocol = pskb->protocol; | 652 | skb->protocol = pskb->protocol; |
652 | pskb->ip_summed = CHECKSUM_UNNECESSARY; | 653 | pskb->ip_summed = CHECKSUM_UNNECESSARY; |
@@ -689,7 +690,8 @@ static void conn_action_rx(fsm_instance *fi, int event, void *arg) | |||
689 | msg->length, conn->max_buffsize); | 690 | msg->length, conn->max_buffsize); |
690 | return; | 691 | return; |
691 | } | 692 | } |
692 | conn->rx_buff->data = conn->rx_buff->tail = conn->rx_buff->head; | 693 | conn->rx_buff->data = conn->rx_buff->head; |
694 | skb_reset_tail_pointer(conn->rx_buff); | ||
693 | conn->rx_buff->len = 0; | 695 | conn->rx_buff->len = 0; |
694 | rc = iucv_message_receive(conn->path, msg, 0, conn->rx_buff->data, | 696 | rc = iucv_message_receive(conn->path, msg, 0, conn->rx_buff->data, |
695 | msg->length, NULL); | 697 | msg->length, NULL); |
@@ -735,14 +737,17 @@ static void conn_action_txdone(fsm_instance *fi, int event, void *arg) | |||
735 | } | 737 | } |
736 | } | 738 | } |
737 | } | 739 | } |
738 | conn->tx_buff->data = conn->tx_buff->tail = conn->tx_buff->head; | 740 | conn->tx_buff->data = conn->tx_buff->head; |
741 | skb_reset_tail_pointer(conn->tx_buff); | ||
739 | conn->tx_buff->len = 0; | 742 | conn->tx_buff->len = 0; |
740 | spin_lock_irqsave(&conn->collect_lock, saveflags); | 743 | spin_lock_irqsave(&conn->collect_lock, saveflags); |
741 | while ((skb = skb_dequeue(&conn->collect_queue))) { | 744 | while ((skb = skb_dequeue(&conn->collect_queue))) { |
742 | header.next = conn->tx_buff->len + skb->len + NETIUCV_HDRLEN; | 745 | header.next = conn->tx_buff->len + skb->len + NETIUCV_HDRLEN; |
743 | memcpy(skb_put(conn->tx_buff, NETIUCV_HDRLEN), &header, | 746 | memcpy(skb_put(conn->tx_buff, NETIUCV_HDRLEN), &header, |
744 | NETIUCV_HDRLEN); | 747 | NETIUCV_HDRLEN); |
745 | memcpy(skb_put(conn->tx_buff, skb->len), skb->data, skb->len); | 748 | skb_copy_from_linear_data(skb, |
749 | skb_put(conn->tx_buff, skb->len), | ||
750 | skb->len); | ||
746 | txbytes += skb->len; | 751 | txbytes += skb->len; |
747 | txpackets++; | 752 | txpackets++; |
748 | stat_maxcq++; | 753 | stat_maxcq++; |
@@ -1164,8 +1169,8 @@ static int netiucv_transmit_skb(struct iucv_connection *conn, | |||
1164 | * Copy the skb to a new allocated skb in lowmem only if the | 1169 | * Copy the skb to a new allocated skb in lowmem only if the |
1165 | * data is located above 2G in memory or tailroom is < 2. | 1170 | * data is located above 2G in memory or tailroom is < 2. |
1166 | */ | 1171 | */ |
1167 | unsigned long hi = | 1172 | unsigned long hi = ((unsigned long)(skb_tail_pointer(skb) + |
1168 | ((unsigned long)(skb->tail + NETIUCV_HDRLEN)) >> 31; | 1173 | NETIUCV_HDRLEN)) >> 31; |
1169 | int copied = 0; | 1174 | int copied = 0; |
1170 | if (hi || (skb_tailroom(skb) < 2)) { | 1175 | if (hi || (skb_tailroom(skb) < 2)) { |
1171 | nskb = alloc_skb(skb->len + NETIUCV_HDRLEN + | 1176 | nskb = alloc_skb(skb->len + NETIUCV_HDRLEN + |
diff --git a/drivers/s390/net/qeth_eddp.c b/drivers/s390/net/qeth_eddp.c index 7c735e1fe063..dd7034fbfff8 100644 --- a/drivers/s390/net/qeth_eddp.c +++ b/drivers/s390/net/qeth_eddp.c | |||
@@ -267,7 +267,8 @@ qeth_eddp_copy_data_tcp(char *dst, struct qeth_eddp_data *eddp, int len, | |||
267 | 267 | ||
268 | QETH_DBF_TEXT(trace, 5, "eddpcdtc"); | 268 | QETH_DBF_TEXT(trace, 5, "eddpcdtc"); |
269 | if (skb_shinfo(eddp->skb)->nr_frags == 0) { | 269 | if (skb_shinfo(eddp->skb)->nr_frags == 0) { |
270 | memcpy(dst, eddp->skb->data + eddp->skb_offset, len); | 270 | skb_copy_from_linear_data_offset(eddp->skb, eddp->skb_offset, |
271 | dst, len); | ||
271 | *hcsum = csum_partial(eddp->skb->data + eddp->skb_offset, len, | 272 | *hcsum = csum_partial(eddp->skb->data + eddp->skb_offset, len, |
272 | *hcsum); | 273 | *hcsum); |
273 | eddp->skb_offset += len; | 274 | eddp->skb_offset += len; |
@@ -416,7 +417,7 @@ __qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx, | |||
416 | eddp->skb_offset += VLAN_HLEN; | 417 | eddp->skb_offset += VLAN_HLEN; |
417 | #endif /* CONFIG_QETH_VLAN */ | 418 | #endif /* CONFIG_QETH_VLAN */ |
418 | } | 419 | } |
419 | tcph = eddp->skb->h.th; | 420 | tcph = tcp_hdr(eddp->skb); |
420 | while (eddp->skb_offset < eddp->skb->len) { | 421 | while (eddp->skb_offset < eddp->skb->len) { |
421 | data_len = min((int)skb_shinfo(eddp->skb)->gso_size, | 422 | data_len = min((int)skb_shinfo(eddp->skb)->gso_size, |
422 | (int)(eddp->skb->len - eddp->skb_offset)); | 423 | (int)(eddp->skb->len - eddp->skb_offset)); |
@@ -473,20 +474,24 @@ qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx, | |||
473 | QETH_DBF_TEXT(trace, 5, "eddpficx"); | 474 | QETH_DBF_TEXT(trace, 5, "eddpficx"); |
474 | /* create our segmentation headers and copy original headers */ | 475 | /* create our segmentation headers and copy original headers */ |
475 | if (skb->protocol == htons(ETH_P_IP)) | 476 | if (skb->protocol == htons(ETH_P_IP)) |
476 | eddp = qeth_eddp_create_eddp_data(qhdr, (u8 *)skb->nh.iph, | 477 | eddp = qeth_eddp_create_eddp_data(qhdr, |
477 | skb->nh.iph->ihl*4, | 478 | skb_network_header(skb), |
478 | (u8 *)skb->h.th, skb->h.th->doff*4); | 479 | ip_hdrlen(skb), |
480 | skb_transport_header(skb), | ||
481 | tcp_hdrlen(skb)); | ||
479 | else | 482 | else |
480 | eddp = qeth_eddp_create_eddp_data(qhdr, (u8 *)skb->nh.ipv6h, | 483 | eddp = qeth_eddp_create_eddp_data(qhdr, |
481 | sizeof(struct ipv6hdr), | 484 | skb_network_header(skb), |
482 | (u8 *)skb->h.th, skb->h.th->doff*4); | 485 | sizeof(struct ipv6hdr), |
486 | skb_transport_header(skb), | ||
487 | tcp_hdrlen(skb)); | ||
483 | 488 | ||
484 | if (eddp == NULL) { | 489 | if (eddp == NULL) { |
485 | QETH_DBF_TEXT(trace, 2, "eddpfcnm"); | 490 | QETH_DBF_TEXT(trace, 2, "eddpfcnm"); |
486 | return -ENOMEM; | 491 | return -ENOMEM; |
487 | } | 492 | } |
488 | if (qhdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2) { | 493 | if (qhdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2) { |
489 | skb->mac.raw = (skb->data) + sizeof(struct qeth_hdr); | 494 | skb_set_mac_header(skb, sizeof(struct qeth_hdr)); |
490 | memcpy(&eddp->mac, eth_hdr(skb), ETH_HLEN); | 495 | memcpy(&eddp->mac, eth_hdr(skb), ETH_HLEN); |
491 | #ifdef CONFIG_QETH_VLAN | 496 | #ifdef CONFIG_QETH_VLAN |
492 | if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q)) { | 497 | if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q)) { |
@@ -590,12 +595,13 @@ qeth_eddp_create_context_tcp(struct qeth_card *card, struct sk_buff *skb, | |||
590 | QETH_DBF_TEXT(trace, 5, "creddpct"); | 595 | QETH_DBF_TEXT(trace, 5, "creddpct"); |
591 | if (skb->protocol == htons(ETH_P_IP)) | 596 | if (skb->protocol == htons(ETH_P_IP)) |
592 | ctx = qeth_eddp_create_context_generic(card, skb, | 597 | ctx = qeth_eddp_create_context_generic(card, skb, |
593 | sizeof(struct qeth_hdr) + skb->nh.iph->ihl*4 + | 598 | (sizeof(struct qeth_hdr) + |
594 | skb->h.th->doff*4); | 599 | ip_hdrlen(skb) + |
600 | tcp_hdrlen(skb))); | ||
595 | else if (skb->protocol == htons(ETH_P_IPV6)) | 601 | else if (skb->protocol == htons(ETH_P_IPV6)) |
596 | ctx = qeth_eddp_create_context_generic(card, skb, | 602 | ctx = qeth_eddp_create_context_generic(card, skb, |
597 | sizeof(struct qeth_hdr) + sizeof(struct ipv6hdr) + | 603 | sizeof(struct qeth_hdr) + sizeof(struct ipv6hdr) + |
598 | skb->h.th->doff*4); | 604 | tcp_hdrlen(skb)); |
599 | else | 605 | else |
600 | QETH_DBF_TEXT(trace, 2, "cetcpinv"); | 606 | QETH_DBF_TEXT(trace, 2, "cetcpinv"); |
601 | 607 | ||
diff --git a/drivers/s390/net/qeth_main.c b/drivers/s390/net/qeth_main.c index d8a86f5af379..ad7792dc1a04 100644 --- a/drivers/s390/net/qeth_main.c +++ b/drivers/s390/net/qeth_main.c | |||
@@ -2278,7 +2278,7 @@ qeth_type_trans(struct sk_buff *skb, struct net_device *dev) | |||
2278 | (card->info.link_type == QETH_LINK_TYPE_LANE_TR)) | 2278 | (card->info.link_type == QETH_LINK_TYPE_LANE_TR)) |
2279 | return tr_type_trans(skb,dev); | 2279 | return tr_type_trans(skb,dev); |
2280 | #endif /* CONFIG_TR */ | 2280 | #endif /* CONFIG_TR */ |
2281 | skb->mac.raw = skb->data; | 2281 | skb_reset_mac_header(skb); |
2282 | skb_pull(skb, ETH_HLEN ); | 2282 | skb_pull(skb, ETH_HLEN ); |
2283 | eth = eth_hdr(skb); | 2283 | eth = eth_hdr(skb); |
2284 | 2284 | ||
@@ -2306,9 +2306,9 @@ qeth_rebuild_skb_fake_ll_tr(struct qeth_card *card, struct sk_buff *skb, | |||
2306 | struct iphdr *ip_hdr; | 2306 | struct iphdr *ip_hdr; |
2307 | 2307 | ||
2308 | QETH_DBF_TEXT(trace,5,"skbfktr"); | 2308 | QETH_DBF_TEXT(trace,5,"skbfktr"); |
2309 | skb->mac.raw = skb->data - QETH_FAKE_LL_LEN_TR; | 2309 | skb_set_mac_header(skb, -QETH_FAKE_LL_LEN_TR); |
2310 | /* this is a fake ethernet header */ | 2310 | /* this is a fake ethernet header */ |
2311 | fake_hdr = (struct trh_hdr *) skb->mac.raw; | 2311 | fake_hdr = tr_hdr(skb); |
2312 | 2312 | ||
2313 | /* the destination MAC address */ | 2313 | /* the destination MAC address */ |
2314 | switch (skb->pkt_type){ | 2314 | switch (skb->pkt_type){ |
@@ -2359,9 +2359,9 @@ qeth_rebuild_skb_fake_ll_eth(struct qeth_card *card, struct sk_buff *skb, | |||
2359 | struct iphdr *ip_hdr; | 2359 | struct iphdr *ip_hdr; |
2360 | 2360 | ||
2361 | QETH_DBF_TEXT(trace,5,"skbfketh"); | 2361 | QETH_DBF_TEXT(trace,5,"skbfketh"); |
2362 | skb->mac.raw = skb->data - QETH_FAKE_LL_LEN_ETH; | 2362 | skb_set_mac_header(skb, -QETH_FAKE_LL_LEN_ETH); |
2363 | /* this is a fake ethernet header */ | 2363 | /* this is a fake ethernet header */ |
2364 | fake_hdr = (struct ethhdr *) skb->mac.raw; | 2364 | fake_hdr = eth_hdr(skb); |
2365 | 2365 | ||
2366 | /* the destination MAC address */ | 2366 | /* the destination MAC address */ |
2367 | switch (skb->pkt_type){ | 2367 | switch (skb->pkt_type){ |
@@ -2461,7 +2461,7 @@ qeth_rebuild_skb(struct qeth_card *card, struct sk_buff *skb, | |||
2461 | if (card->options.fake_ll) | 2461 | if (card->options.fake_ll) |
2462 | qeth_rebuild_skb_fake_ll(card, skb, hdr); | 2462 | qeth_rebuild_skb_fake_ll(card, skb, hdr); |
2463 | else | 2463 | else |
2464 | skb->mac.raw = skb->data; | 2464 | skb_reset_mac_header(skb); |
2465 | skb->ip_summed = card->options.checksum_type; | 2465 | skb->ip_summed = card->options.checksum_type; |
2466 | if (card->options.checksum_type == HW_CHECKSUMMING){ | 2466 | if (card->options.checksum_type == HW_CHECKSUMMING){ |
2467 | if ( (hdr->hdr.l3.ext_flags & | 2467 | if ( (hdr->hdr.l3.ext_flags & |
@@ -2501,7 +2501,8 @@ qeth_process_inbound_buffer(struct qeth_card *card, | |||
2501 | vlan_tag = qeth_rebuild_skb(card, skb, hdr); | 2501 | vlan_tag = qeth_rebuild_skb(card, skb, hdr); |
2502 | else { /*in case of OSN*/ | 2502 | else { /*in case of OSN*/ |
2503 | skb_push(skb, sizeof(struct qeth_hdr)); | 2503 | skb_push(skb, sizeof(struct qeth_hdr)); |
2504 | memcpy(skb->data, hdr, sizeof(struct qeth_hdr)); | 2504 | skb_copy_to_linear_data(skb, hdr, |
2505 | sizeof(struct qeth_hdr)); | ||
2505 | } | 2506 | } |
2506 | /* is device UP ? */ | 2507 | /* is device UP ? */ |
2507 | if (!(card->dev->flags & IFF_UP)){ | 2508 | if (!(card->dev->flags & IFF_UP)){ |
@@ -3778,9 +3779,11 @@ qeth_get_cast_type(struct qeth_card *card, struct sk_buff *skb) | |||
3778 | } | 3779 | } |
3779 | /* try something else */ | 3780 | /* try something else */ |
3780 | if (skb->protocol == ETH_P_IPV6) | 3781 | if (skb->protocol == ETH_P_IPV6) |
3781 | return (skb->nh.raw[24] == 0xff) ? RTN_MULTICAST : 0; | 3782 | return (skb_network_header(skb)[24] == 0xff) ? |
3783 | RTN_MULTICAST : 0; | ||
3782 | else if (skb->protocol == ETH_P_IP) | 3784 | else if (skb->protocol == ETH_P_IP) |
3783 | return ((skb->nh.raw[16] & 0xf0) == 0xe0) ? RTN_MULTICAST : 0; | 3785 | return ((skb_network_header(skb)[16] & 0xf0) == 0xe0) ? |
3786 | RTN_MULTICAST : 0; | ||
3784 | /* ... */ | 3787 | /* ... */ |
3785 | if (!memcmp(skb->data, skb->dev->broadcast, 6)) | 3788 | if (!memcmp(skb->data, skb->dev->broadcast, 6)) |
3786 | return RTN_BROADCAST; | 3789 | return RTN_BROADCAST; |
@@ -3818,18 +3821,20 @@ qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb, | |||
3818 | return card->info.is_multicast_different & | 3821 | return card->info.is_multicast_different & |
3819 | (card->qdio.no_out_queues - 1); | 3822 | (card->qdio.no_out_queues - 1); |
3820 | if (card->qdio.do_prio_queueing && (ipv == 4)) { | 3823 | if (card->qdio.do_prio_queueing && (ipv == 4)) { |
3824 | const u8 tos = ip_hdr(skb)->tos; | ||
3825 | |||
3821 | if (card->qdio.do_prio_queueing==QETH_PRIO_Q_ING_TOS){ | 3826 | if (card->qdio.do_prio_queueing==QETH_PRIO_Q_ING_TOS){ |
3822 | if (skb->nh.iph->tos & IP_TOS_NOTIMPORTANT) | 3827 | if (tos & IP_TOS_NOTIMPORTANT) |
3823 | return 3; | 3828 | return 3; |
3824 | if (skb->nh.iph->tos & IP_TOS_HIGHRELIABILITY) | 3829 | if (tos & IP_TOS_HIGHRELIABILITY) |
3825 | return 2; | 3830 | return 2; |
3826 | if (skb->nh.iph->tos & IP_TOS_HIGHTHROUGHPUT) | 3831 | if (tos & IP_TOS_HIGHTHROUGHPUT) |
3827 | return 1; | 3832 | return 1; |
3828 | if (skb->nh.iph->tos & IP_TOS_LOWDELAY) | 3833 | if (tos & IP_TOS_LOWDELAY) |
3829 | return 0; | 3834 | return 0; |
3830 | } | 3835 | } |
3831 | if (card->qdio.do_prio_queueing==QETH_PRIO_Q_ING_PREC) | 3836 | if (card->qdio.do_prio_queueing==QETH_PRIO_Q_ING_PREC) |
3832 | return 3 - (skb->nh.iph->tos >> 6); | 3837 | return 3 - (tos >> 6); |
3833 | } else if (card->qdio.do_prio_queueing && (ipv == 6)) { | 3838 | } else if (card->qdio.do_prio_queueing && (ipv == 6)) { |
3834 | /* TODO: IPv6!!! */ | 3839 | /* TODO: IPv6!!! */ |
3835 | } | 3840 | } |
@@ -3866,9 +3871,9 @@ __qeth_prepare_skb(struct qeth_card *card, struct sk_buff *skb, int ipv) | |||
3866 | * memcpys instead of one memmove to save cycles. | 3871 | * memcpys instead of one memmove to save cycles. |
3867 | */ | 3872 | */ |
3868 | skb_push(skb, VLAN_HLEN); | 3873 | skb_push(skb, VLAN_HLEN); |
3869 | memcpy(skb->data, skb->data + 4, 4); | 3874 | skb_copy_to_linear_data(skb, skb->data + 4, 4); |
3870 | memcpy(skb->data + 4, skb->data + 8, 4); | 3875 | skb_copy_to_linear_data_offset(skb, 4, skb->data + 8, 4); |
3871 | memcpy(skb->data + 8, skb->data + 12, 4); | 3876 | skb_copy_to_linear_data_offset(skb, 8, skb->data + 12, 4); |
3872 | tag = (u16 *)(skb->data + 12); | 3877 | tag = (u16 *)(skb->data + 12); |
3873 | /* | 3878 | /* |
3874 | * first two bytes = ETH_P_8021Q (0x8100) | 3879 | * first two bytes = ETH_P_8021Q (0x8100) |
@@ -4039,7 +4044,8 @@ qeth_fill_header(struct qeth_card *card, struct qeth_hdr *hdr, | |||
4039 | *((u32 *) skb->dst->neighbour->primary_key); | 4044 | *((u32 *) skb->dst->neighbour->primary_key); |
4040 | } else { | 4045 | } else { |
4041 | /* fill in destination address used in ip header */ | 4046 | /* fill in destination address used in ip header */ |
4042 | *((u32 *) (&hdr->hdr.l3.dest_addr[12])) = skb->nh.iph->daddr; | 4047 | *((u32 *)(&hdr->hdr.l3.dest_addr[12])) = |
4048 | ip_hdr(skb)->daddr; | ||
4043 | } | 4049 | } |
4044 | } else if (ipv == 6) { /* IPv6 or passthru */ | 4050 | } else if (ipv == 6) { /* IPv6 or passthru */ |
4045 | hdr->hdr.l3.flags = qeth_get_qeth_hdr_flags6(cast_type); | 4051 | hdr->hdr.l3.flags = qeth_get_qeth_hdr_flags6(cast_type); |
@@ -4048,7 +4054,8 @@ qeth_fill_header(struct qeth_card *card, struct qeth_hdr *hdr, | |||
4048 | skb->dst->neighbour->primary_key, 16); | 4054 | skb->dst->neighbour->primary_key, 16); |
4049 | } else { | 4055 | } else { |
4050 | /* fill in destination address used in ip header */ | 4056 | /* fill in destination address used in ip header */ |
4051 | memcpy(hdr->hdr.l3.dest_addr, &skb->nh.ipv6h->daddr, 16); | 4057 | memcpy(hdr->hdr.l3.dest_addr, |
4058 | &ipv6_hdr(skb)->daddr, 16); | ||
4052 | } | 4059 | } |
4053 | } else { /* passthrough */ | 4060 | } else { /* passthrough */ |
4054 | if((skb->dev->type == ARPHRD_IEEE802_TR) && | 4061 | if((skb->dev->type == ARPHRD_IEEE802_TR) && |
diff --git a/drivers/s390/net/qeth_tso.h b/drivers/s390/net/qeth_tso.h index 14504afb044e..c20e923cf9ad 100644 --- a/drivers/s390/net/qeth_tso.h +++ b/drivers/s390/net/qeth_tso.h | |||
@@ -40,8 +40,8 @@ qeth_tso_fill_header(struct qeth_card *card, struct sk_buff *skb) | |||
40 | QETH_DBF_TEXT(trace, 5, "tsofhdr"); | 40 | QETH_DBF_TEXT(trace, 5, "tsofhdr"); |
41 | 41 | ||
42 | hdr = (struct qeth_hdr_tso *) skb->data; | 42 | hdr = (struct qeth_hdr_tso *) skb->data; |
43 | iph = skb->nh.iph; | 43 | iph = ip_hdr(skb); |
44 | tcph = skb->h.th; | 44 | tcph = tcp_hdr(skb); |
45 | /*fix header to TSO values ...*/ | 45 | /*fix header to TSO values ...*/ |
46 | hdr->hdr.hdr.l3.id = QETH_HEADER_TYPE_TSO; | 46 | hdr->hdr.hdr.l3.id = QETH_HEADER_TYPE_TSO; |
47 | /*set values which are fix for the first approach ...*/ | 47 | /*set values which are fix for the first approach ...*/ |
@@ -63,13 +63,9 @@ qeth_tso_fill_header(struct qeth_card *card, struct sk_buff *skb) | |||
63 | static inline void | 63 | static inline void |
64 | qeth_tso_set_tcpip_header(struct qeth_card *card, struct sk_buff *skb) | 64 | qeth_tso_set_tcpip_header(struct qeth_card *card, struct sk_buff *skb) |
65 | { | 65 | { |
66 | struct iphdr *iph; | 66 | struct iphdr *iph = ip_hdr(skb); |
67 | struct ipv6hdr *ip6h; | 67 | struct ipv6hdr *ip6h = ipv6_hdr(skb); |
68 | struct tcphdr *tcph; | 68 | struct tcphdr *tcph = tcp_hdr(skb); |
69 | |||
70 | iph = skb->nh.iph; | ||
71 | ip6h = skb->nh.ipv6h; | ||
72 | tcph = skb->h.th; | ||
73 | 69 | ||
74 | tcph->check = 0; | 70 | tcph->check = 0; |
75 | if (skb->protocol == ETH_P_IPV6) { | 71 | if (skb->protocol == ETH_P_IPV6) { |
diff --git a/drivers/s390/s390mach.c b/drivers/s390/s390mach.c index 806bb1a921eb..644a06eba828 100644 --- a/drivers/s390/s390mach.c +++ b/drivers/s390/s390mach.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include "cio/cio.h" | 21 | #include "cio/cio.h" |
22 | #include "cio/chsc.h" | 22 | #include "cio/chsc.h" |
23 | #include "cio/css.h" | 23 | #include "cio/css.h" |
24 | #include "cio/chp.h" | ||
24 | #include "s390mach.h" | 25 | #include "s390mach.h" |
25 | 26 | ||
26 | static struct semaphore m_sem; | 27 | static struct semaphore m_sem; |
@@ -44,14 +45,13 @@ static int | |||
44 | s390_collect_crw_info(void *param) | 45 | s390_collect_crw_info(void *param) |
45 | { | 46 | { |
46 | struct crw crw[2]; | 47 | struct crw crw[2]; |
47 | int ccode, ret, slow; | 48 | int ccode; |
48 | struct semaphore *sem; | 49 | struct semaphore *sem; |
49 | unsigned int chain; | 50 | unsigned int chain; |
50 | 51 | ||
51 | sem = (struct semaphore *)param; | 52 | sem = (struct semaphore *)param; |
52 | repeat: | 53 | repeat: |
53 | down_interruptible(sem); | 54 | down_interruptible(sem); |
54 | slow = 0; | ||
55 | chain = 0; | 55 | chain = 0; |
56 | while (1) { | 56 | while (1) { |
57 | if (unlikely(chain > 1)) { | 57 | if (unlikely(chain > 1)) { |
@@ -84,9 +84,8 @@ repeat: | |||
84 | /* Check for overflows. */ | 84 | /* Check for overflows. */ |
85 | if (crw[chain].oflw) { | 85 | if (crw[chain].oflw) { |
86 | pr_debug("%s: crw overflow detected!\n", __FUNCTION__); | 86 | pr_debug("%s: crw overflow detected!\n", __FUNCTION__); |
87 | css_reiterate_subchannels(); | 87 | css_schedule_eval_all(); |
88 | chain = 0; | 88 | chain = 0; |
89 | slow = 1; | ||
90 | continue; | 89 | continue; |
91 | } | 90 | } |
92 | switch (crw[chain].rsc) { | 91 | switch (crw[chain].rsc) { |
@@ -94,10 +93,7 @@ repeat: | |||
94 | if (crw[0].chn && !chain) | 93 | if (crw[0].chn && !chain) |
95 | break; | 94 | break; |
96 | pr_debug("source is subchannel %04X\n", crw[0].rsid); | 95 | pr_debug("source is subchannel %04X\n", crw[0].rsid); |
97 | ret = css_process_crw (crw[0].rsid, | 96 | css_process_crw(crw[0].rsid, chain ? crw[1].rsid : 0); |
98 | chain ? crw[1].rsid : 0); | ||
99 | if (ret == -EAGAIN) | ||
100 | slow = 1; | ||
101 | break; | 97 | break; |
102 | case CRW_RSC_MONITOR: | 98 | case CRW_RSC_MONITOR: |
103 | pr_debug("source is monitoring facility\n"); | 99 | pr_debug("source is monitoring facility\n"); |
@@ -116,28 +112,23 @@ repeat: | |||
116 | } | 112 | } |
117 | switch (crw[0].erc) { | 113 | switch (crw[0].erc) { |
118 | case CRW_ERC_IPARM: /* Path has come. */ | 114 | case CRW_ERC_IPARM: /* Path has come. */ |
119 | ret = chp_process_crw(crw[0].rsid, 1); | 115 | chp_process_crw(crw[0].rsid, 1); |
120 | break; | 116 | break; |
121 | case CRW_ERC_PERRI: /* Path has gone. */ | 117 | case CRW_ERC_PERRI: /* Path has gone. */ |
122 | case CRW_ERC_PERRN: | 118 | case CRW_ERC_PERRN: |
123 | ret = chp_process_crw(crw[0].rsid, 0); | 119 | chp_process_crw(crw[0].rsid, 0); |
124 | break; | 120 | break; |
125 | default: | 121 | default: |
126 | pr_debug("Don't know how to handle erc=%x\n", | 122 | pr_debug("Don't know how to handle erc=%x\n", |
127 | crw[0].erc); | 123 | crw[0].erc); |
128 | ret = 0; | ||
129 | } | 124 | } |
130 | if (ret == -EAGAIN) | ||
131 | slow = 1; | ||
132 | break; | 125 | break; |
133 | case CRW_RSC_CONFIG: | 126 | case CRW_RSC_CONFIG: |
134 | pr_debug("source is configuration-alert facility\n"); | 127 | pr_debug("source is configuration-alert facility\n"); |
135 | break; | 128 | break; |
136 | case CRW_RSC_CSS: | 129 | case CRW_RSC_CSS: |
137 | pr_debug("source is channel subsystem\n"); | 130 | pr_debug("source is channel subsystem\n"); |
138 | ret = chsc_process_crw(); | 131 | chsc_process_crw(); |
139 | if (ret == -EAGAIN) | ||
140 | slow = 1; | ||
141 | break; | 132 | break; |
142 | default: | 133 | default: |
143 | pr_debug("unknown source\n"); | 134 | pr_debug("unknown source\n"); |
@@ -146,8 +137,6 @@ repeat: | |||
146 | /* chain is always 0 or 1 here. */ | 137 | /* chain is always 0 or 1 here. */ |
147 | chain = crw[chain].chn ? chain + 1 : 0; | 138 | chain = crw[chain].chn ? chain + 1 : 0; |
148 | } | 139 | } |
149 | if (slow) | ||
150 | queue_work(slow_path_wq, &slow_path_work); | ||
151 | goto repeat; | 140 | goto repeat; |
152 | return 0; | 141 | return 0; |
153 | } | 142 | } |
diff --git a/drivers/s390/sysinfo.c b/drivers/s390/sysinfo.c index 090743d2f914..19343f9675c3 100644 --- a/drivers/s390/sysinfo.c +++ b/drivers/s390/sysinfo.c | |||
@@ -357,6 +357,24 @@ static __init int create_proc_sysinfo(void) | |||
357 | 357 | ||
358 | __initcall(create_proc_sysinfo); | 358 | __initcall(create_proc_sysinfo); |
359 | 359 | ||
360 | int get_cpu_capability(unsigned int *capability) | ||
361 | { | ||
362 | struct sysinfo_1_2_2 *info; | ||
363 | int rc; | ||
364 | |||
365 | info = (void *) get_zeroed_page(GFP_KERNEL); | ||
366 | if (!info) | ||
367 | return -ENOMEM; | ||
368 | rc = stsi(info, 1, 2, 2); | ||
369 | if (rc == -ENOSYS) | ||
370 | goto out; | ||
371 | rc = 0; | ||
372 | *capability = info->capability; | ||
373 | out: | ||
374 | free_page((unsigned long) info); | ||
375 | return rc; | ||
376 | } | ||
377 | |||
360 | /* | 378 | /* |
361 | * CPU capability might have changed. Therefore recalculate loops_per_jiffy. | 379 | * CPU capability might have changed. Therefore recalculate loops_per_jiffy. |
362 | */ | 380 | */ |
diff --git a/drivers/sbus/char/envctrl.c b/drivers/sbus/char/envctrl.c index 2cea4f5d2084..f2be2ead8742 100644 --- a/drivers/sbus/char/envctrl.c +++ b/drivers/sbus/char/envctrl.c | |||
@@ -726,7 +726,7 @@ static struct miscdevice envctrl_dev = { | |||
726 | * Return: None. | 726 | * Return: None. |
727 | */ | 727 | */ |
728 | static void envctrl_set_mon(struct i2c_child_t *pchild, | 728 | static void envctrl_set_mon(struct i2c_child_t *pchild, |
729 | char *chnl_desc, | 729 | const char *chnl_desc, |
730 | int chnl_no) | 730 | int chnl_no) |
731 | { | 731 | { |
732 | /* Firmware only has temperature type. It does not distinguish | 732 | /* Firmware only has temperature type. It does not distinguish |
@@ -763,8 +763,8 @@ static void envctrl_set_mon(struct i2c_child_t *pchild, | |||
763 | static void envctrl_init_adc(struct i2c_child_t *pchild, struct device_node *dp) | 763 | static void envctrl_init_adc(struct i2c_child_t *pchild, struct device_node *dp) |
764 | { | 764 | { |
765 | int i = 0, len; | 765 | int i = 0, len; |
766 | char *pos; | 766 | const char *pos; |
767 | unsigned int *pval; | 767 | const unsigned int *pval; |
768 | 768 | ||
769 | /* Firmware describe channels into a stream separated by a '\0'. */ | 769 | /* Firmware describe channels into a stream separated by a '\0'. */ |
770 | pos = of_get_property(dp, "channels-description", &len); | 770 | pos = of_get_property(dp, "channels-description", &len); |
@@ -859,7 +859,7 @@ static void envctrl_init_i2c_child(struct linux_ebus_child *edev_child, | |||
859 | { | 859 | { |
860 | int len, i, tbls_size = 0; | 860 | int len, i, tbls_size = 0; |
861 | struct device_node *dp = edev_child->prom_node; | 861 | struct device_node *dp = edev_child->prom_node; |
862 | void *pval; | 862 | const void *pval; |
863 | 863 | ||
864 | /* Get device address. */ | 864 | /* Get device address. */ |
865 | pval = of_get_property(dp, "reg", &len); | 865 | pval = of_get_property(dp, "reg", &len); |
diff --git a/drivers/sbus/char/flash.c b/drivers/sbus/char/flash.c index 6e99507aeb12..262f01e68592 100644 --- a/drivers/sbus/char/flash.c +++ b/drivers/sbus/char/flash.c | |||
@@ -190,7 +190,7 @@ static int __init flash_init(void) | |||
190 | } | 190 | } |
191 | if (!sdev) { | 191 | if (!sdev) { |
192 | #ifdef CONFIG_PCI | 192 | #ifdef CONFIG_PCI |
193 | struct linux_prom_registers *ebus_regs; | 193 | const struct linux_prom_registers *ebus_regs; |
194 | 194 | ||
195 | for_each_ebus(ebus) { | 195 | for_each_ebus(ebus) { |
196 | for_each_ebusdev(edev, ebus) { | 196 | for_each_ebusdev(edev, ebus) { |
diff --git a/drivers/sbus/char/openprom.c b/drivers/sbus/char/openprom.c index 5041c9dfbe3b..fbfeb89a6f32 100644 --- a/drivers/sbus/char/openprom.c +++ b/drivers/sbus/char/openprom.c | |||
@@ -44,7 +44,6 @@ | |||
44 | #include <asm/openpromio.h> | 44 | #include <asm/openpromio.h> |
45 | #ifdef CONFIG_PCI | 45 | #ifdef CONFIG_PCI |
46 | #include <linux/pci.h> | 46 | #include <linux/pci.h> |
47 | #include <asm/pbm.h> | ||
48 | #endif | 47 | #endif |
49 | 48 | ||
50 | MODULE_AUTHOR("Thomas K. Dyas (tdyas@noc.rutgers.edu) and Eddie C. Dost (ecd@skynet.be)"); | 49 | MODULE_AUTHOR("Thomas K. Dyas (tdyas@noc.rutgers.edu) and Eddie C. Dost (ecd@skynet.be)"); |
@@ -141,7 +140,7 @@ static int copyout(void __user *info, struct openpromio *opp, int len) | |||
141 | 140 | ||
142 | static int opromgetprop(void __user *argp, struct device_node *dp, struct openpromio *op, int bufsize) | 141 | static int opromgetprop(void __user *argp, struct device_node *dp, struct openpromio *op, int bufsize) |
143 | { | 142 | { |
144 | void *pval; | 143 | const void *pval; |
145 | int len; | 144 | int len; |
146 | 145 | ||
147 | if (!dp || | 146 | if (!dp || |
@@ -248,18 +247,17 @@ static int oprompci2node(void __user *argp, struct device_node *dp, struct openp | |||
248 | if (bufsize >= 2*sizeof(int)) { | 247 | if (bufsize >= 2*sizeof(int)) { |
249 | #ifdef CONFIG_PCI | 248 | #ifdef CONFIG_PCI |
250 | struct pci_dev *pdev; | 249 | struct pci_dev *pdev; |
251 | struct pcidev_cookie *pcp; | 250 | struct device_node *dp; |
251 | |||
252 | pdev = pci_get_bus_and_slot (((int *) op->oprom_array)[0], | 252 | pdev = pci_get_bus_and_slot (((int *) op->oprom_array)[0], |
253 | ((int *) op->oprom_array)[1]); | 253 | ((int *) op->oprom_array)[1]); |
254 | 254 | ||
255 | pcp = pdev->sysdata; | 255 | dp = pci_device_to_OF_node(pdev); |
256 | if (pcp != NULL) { | 256 | data->current_node = dp; |
257 | dp = pcp->prom_node; | 257 | *((int *)op->oprom_array) = dp->node; |
258 | data->current_node = dp; | 258 | op->oprom_size = sizeof(int); |
259 | *((int *)op->oprom_array) = dp->node; | 259 | err = copyout(argp, op, bufsize + sizeof(int)); |
260 | op->oprom_size = sizeof(int); | 260 | |
261 | err = copyout(argp, op, bufsize + sizeof(int)); | ||
262 | } | ||
263 | pci_dev_put(pdev); | 261 | pci_dev_put(pdev); |
264 | #endif | 262 | #endif |
265 | } | 263 | } |
@@ -410,7 +408,7 @@ static int opiocget(void __user *argp, DATA *data) | |||
410 | struct opiocdesc op; | 408 | struct opiocdesc op; |
411 | struct device_node *dp; | 409 | struct device_node *dp; |
412 | char *str; | 410 | char *str; |
413 | void *pval; | 411 | const void *pval; |
414 | int err, len; | 412 | int err, len; |
415 | 413 | ||
416 | if (copy_from_user(&op, argp, sizeof(op))) | 414 | if (copy_from_user(&op, argp, sizeof(op))) |
diff --git a/drivers/sbus/sbus.c b/drivers/sbus/sbus.c index 6349dd617f85..eee590a51d8a 100644 --- a/drivers/sbus/sbus.c +++ b/drivers/sbus/sbus.c | |||
@@ -35,7 +35,7 @@ struct sbus_bus *sbus_root; | |||
35 | static void __init fill_sbus_device(struct device_node *dp, struct sbus_dev *sdev) | 35 | static void __init fill_sbus_device(struct device_node *dp, struct sbus_dev *sdev) |
36 | { | 36 | { |
37 | unsigned long base; | 37 | unsigned long base; |
38 | void *pval; | 38 | const void *pval; |
39 | int len, err; | 39 | int len, err; |
40 | 40 | ||
41 | sdev->prom_node = dp->node; | 41 | sdev->prom_node = dp->node; |
@@ -86,7 +86,7 @@ static void __init fill_sbus_device(struct device_node *dp, struct sbus_dev *sde | |||
86 | 86 | ||
87 | static void __init sbus_bus_ranges_init(struct device_node *dp, struct sbus_bus *sbus) | 87 | static void __init sbus_bus_ranges_init(struct device_node *dp, struct sbus_bus *sbus) |
88 | { | 88 | { |
89 | void *pval; | 89 | const void *pval; |
90 | int len; | 90 | int len; |
91 | 91 | ||
92 | pval = of_get_property(dp, "ranges", &len); | 92 | pval = of_get_property(dp, "ranges", &len); |
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index 4cd280e86966..fcc4cb6c7f46 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig | |||
@@ -1763,9 +1763,15 @@ config SUN3X_ESP | |||
1763 | The ESP was an on-board SCSI controller used on Sun 3/80 | 1763 | The ESP was an on-board SCSI controller used on Sun 3/80 |
1764 | machines. Say Y here to compile in support for it. | 1764 | machines. Say Y here to compile in support for it. |
1765 | 1765 | ||
1766 | config SCSI_ESP_CORE | ||
1767 | tristate "ESP Scsi Driver Core" | ||
1768 | depends on SCSI | ||
1769 | select SCSI_SPI_ATTRS | ||
1770 | |||
1766 | config SCSI_SUNESP | 1771 | config SCSI_SUNESP |
1767 | tristate "Sparc ESP Scsi Driver" | 1772 | tristate "Sparc ESP Scsi Driver" |
1768 | depends on SBUS && SCSI | 1773 | depends on SBUS && SCSI |
1774 | select SCSI_ESP_CORE | ||
1769 | help | 1775 | help |
1770 | This is the driver for the Sun ESP SCSI host adapter. The ESP | 1776 | This is the driver for the Sun ESP SCSI host adapter. The ESP |
1771 | chipset is present in most SPARC SBUS-based computers. | 1777 | chipset is present in most SPARC SBUS-based computers. |
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile index 79ecf4ebe6eb..70cff4c599d7 100644 --- a/drivers/scsi/Makefile +++ b/drivers/scsi/Makefile | |||
@@ -106,7 +106,8 @@ obj-$(CONFIG_MEGARAID_LEGACY) += megaraid.o | |||
106 | obj-$(CONFIG_MEGARAID_NEWGEN) += megaraid/ | 106 | obj-$(CONFIG_MEGARAID_NEWGEN) += megaraid/ |
107 | obj-$(CONFIG_MEGARAID_SAS) += megaraid/ | 107 | obj-$(CONFIG_MEGARAID_SAS) += megaraid/ |
108 | obj-$(CONFIG_SCSI_ACARD) += atp870u.o | 108 | obj-$(CONFIG_SCSI_ACARD) += atp870u.o |
109 | obj-$(CONFIG_SCSI_SUNESP) += esp.o | 109 | obj-$(CONFIG_SCSI_ESP_CORE) += esp_scsi.o |
110 | obj-$(CONFIG_SCSI_SUNESP) += sun_esp.o | ||
110 | obj-$(CONFIG_SCSI_GDTH) += gdth.o | 111 | obj-$(CONFIG_SCSI_GDTH) += gdth.o |
111 | obj-$(CONFIG_SCSI_INITIO) += initio.o | 112 | obj-$(CONFIG_SCSI_INITIO) += initio.o |
112 | obj-$(CONFIG_SCSI_INIA100) += a100u2w.o | 113 | obj-$(CONFIG_SCSI_INIA100) += a100u2w.o |
diff --git a/drivers/scsi/esp.c b/drivers/scsi/esp.c deleted file mode 100644 index 2c2fe80bc42a..000000000000 --- a/drivers/scsi/esp.c +++ /dev/null | |||
@@ -1,4394 +0,0 @@ | |||
1 | /* esp.c: ESP Sun SCSI driver. | ||
2 | * | ||
3 | * Copyright (C) 1995, 1998, 2006 David S. Miller (davem@davemloft.net) | ||
4 | */ | ||
5 | |||
6 | /* TODO: | ||
7 | * | ||
8 | * 1) Maybe disable parity checking in config register one for SCSI1 | ||
9 | * targets. (Gilmore says parity error on the SBus can lock up | ||
10 | * old sun4c's) | ||
11 | * 2) Add support for DMA2 pipelining. | ||
12 | * 3) Add tagged queueing. | ||
13 | */ | ||
14 | |||
15 | #include <linux/kernel.h> | ||
16 | #include <linux/delay.h> | ||
17 | #include <linux/types.h> | ||
18 | #include <linux/string.h> | ||
19 | #include <linux/slab.h> | ||
20 | #include <linux/blkdev.h> | ||
21 | #include <linux/proc_fs.h> | ||
22 | #include <linux/stat.h> | ||
23 | #include <linux/init.h> | ||
24 | #include <linux/spinlock.h> | ||
25 | #include <linux/interrupt.h> | ||
26 | #include <linux/module.h> | ||
27 | |||
28 | #include "esp.h" | ||
29 | |||
30 | #include <asm/sbus.h> | ||
31 | #include <asm/dma.h> | ||
32 | #include <asm/system.h> | ||
33 | #include <asm/ptrace.h> | ||
34 | #include <asm/pgtable.h> | ||
35 | #include <asm/oplib.h> | ||
36 | #include <asm/io.h> | ||
37 | #include <asm/irq.h> | ||
38 | #ifndef __sparc_v9__ | ||
39 | #include <asm/machines.h> | ||
40 | #include <asm/idprom.h> | ||
41 | #endif | ||
42 | |||
43 | #include <scsi/scsi.h> | ||
44 | #include <scsi/scsi_cmnd.h> | ||
45 | #include <scsi/scsi_device.h> | ||
46 | #include <scsi/scsi_eh.h> | ||
47 | #include <scsi/scsi_host.h> | ||
48 | #include <scsi/scsi_tcq.h> | ||
49 | |||
50 | #define DRV_VERSION "1.101" | ||
51 | |||
52 | #define DEBUG_ESP | ||
53 | /* #define DEBUG_ESP_HME */ | ||
54 | /* #define DEBUG_ESP_DATA */ | ||
55 | /* #define DEBUG_ESP_QUEUE */ | ||
56 | /* #define DEBUG_ESP_DISCONNECT */ | ||
57 | /* #define DEBUG_ESP_STATUS */ | ||
58 | /* #define DEBUG_ESP_PHASES */ | ||
59 | /* #define DEBUG_ESP_WORKBUS */ | ||
60 | /* #define DEBUG_STATE_MACHINE */ | ||
61 | /* #define DEBUG_ESP_CMDS */ | ||
62 | /* #define DEBUG_ESP_IRQS */ | ||
63 | /* #define DEBUG_SDTR */ | ||
64 | /* #define DEBUG_ESP_SG */ | ||
65 | |||
66 | /* Use the following to sprinkle debugging messages in a way which | ||
67 | * suits you if combinations of the above become too verbose when | ||
68 | * trying to track down a specific problem. | ||
69 | */ | ||
70 | /* #define DEBUG_ESP_MISC */ | ||
71 | |||
72 | #if defined(DEBUG_ESP) | ||
73 | #define ESPLOG(foo) printk foo | ||
74 | #else | ||
75 | #define ESPLOG(foo) | ||
76 | #endif /* (DEBUG_ESP) */ | ||
77 | |||
78 | #if defined(DEBUG_ESP_HME) | ||
79 | #define ESPHME(foo) printk foo | ||
80 | #else | ||
81 | #define ESPHME(foo) | ||
82 | #endif | ||
83 | |||
84 | #if defined(DEBUG_ESP_DATA) | ||
85 | #define ESPDATA(foo) printk foo | ||
86 | #else | ||
87 | #define ESPDATA(foo) | ||
88 | #endif | ||
89 | |||
90 | #if defined(DEBUG_ESP_QUEUE) | ||
91 | #define ESPQUEUE(foo) printk foo | ||
92 | #else | ||
93 | #define ESPQUEUE(foo) | ||
94 | #endif | ||
95 | |||
96 | #if defined(DEBUG_ESP_DISCONNECT) | ||
97 | #define ESPDISC(foo) printk foo | ||
98 | #else | ||
99 | #define ESPDISC(foo) | ||
100 | #endif | ||
101 | |||
102 | #if defined(DEBUG_ESP_STATUS) | ||
103 | #define ESPSTAT(foo) printk foo | ||
104 | #else | ||
105 | #define ESPSTAT(foo) | ||
106 | #endif | ||
107 | |||
108 | #if defined(DEBUG_ESP_PHASES) | ||
109 | #define ESPPHASE(foo) printk foo | ||
110 | #else | ||
111 | #define ESPPHASE(foo) | ||
112 | #endif | ||
113 | |||
114 | #if defined(DEBUG_ESP_WORKBUS) | ||
115 | #define ESPBUS(foo) printk foo | ||
116 | #else | ||
117 | #define ESPBUS(foo) | ||
118 | #endif | ||
119 | |||
120 | #if defined(DEBUG_ESP_IRQS) | ||
121 | #define ESPIRQ(foo) printk foo | ||
122 | #else | ||
123 | #define ESPIRQ(foo) | ||
124 | #endif | ||
125 | |||
126 | #if defined(DEBUG_SDTR) | ||
127 | #define ESPSDTR(foo) printk foo | ||
128 | #else | ||
129 | #define ESPSDTR(foo) | ||
130 | #endif | ||
131 | |||
132 | #if defined(DEBUG_ESP_MISC) | ||
133 | #define ESPMISC(foo) printk foo | ||
134 | #else | ||
135 | #define ESPMISC(foo) | ||
136 | #endif | ||
137 | |||
138 | /* Command phase enumeration. */ | ||
139 | enum { | ||
140 | not_issued = 0x00, /* Still in the issue_SC queue. */ | ||
141 | |||
142 | /* Various forms of selecting a target. */ | ||
143 | #define in_slct_mask 0x10 | ||
144 | in_slct_norm = 0x10, /* ESP is arbitrating, normal selection */ | ||
145 | in_slct_stop = 0x11, /* ESP will select, then stop with IRQ */ | ||
146 | in_slct_msg = 0x12, /* select, then send a message */ | ||
147 | in_slct_tag = 0x13, /* select and send tagged queue msg */ | ||
148 | in_slct_sneg = 0x14, /* select and acquire sync capabilities */ | ||
149 | |||
150 | /* Any post selection activity. */ | ||
151 | #define in_phases_mask 0x20 | ||
152 | in_datain = 0x20, /* Data is transferring from the bus */ | ||
153 | in_dataout = 0x21, /* Data is transferring to the bus */ | ||
154 | in_data_done = 0x22, /* Last DMA data operation done (maybe) */ | ||
155 | in_msgin = 0x23, /* Eating message from target */ | ||
156 | in_msgincont = 0x24, /* Eating more msg bytes from target */ | ||
157 | in_msgindone = 0x25, /* Decide what to do with what we got */ | ||
158 | in_msgout = 0x26, /* Sending message to target */ | ||
159 | in_msgoutdone = 0x27, /* Done sending msg out */ | ||
160 | in_cmdbegin = 0x28, /* Sending cmd after abnormal selection */ | ||
161 | in_cmdend = 0x29, /* Done sending slow cmd */ | ||
162 | in_status = 0x2a, /* Was in status phase, finishing cmd */ | ||
163 | in_freeing = 0x2b, /* freeing the bus for cmd cmplt or disc */ | ||
164 | in_the_dark = 0x2c, /* Don't know what bus phase we are in */ | ||
165 | |||
166 | /* Special states, ie. not normal bus transitions... */ | ||
167 | #define in_spec_mask 0x80 | ||
168 | in_abortone = 0x80, /* Aborting one command currently */ | ||
169 | in_abortall = 0x81, /* Blowing away all commands we have */ | ||
170 | in_resetdev = 0x82, /* SCSI target reset in progress */ | ||
171 | in_resetbus = 0x83, /* SCSI bus reset in progress */ | ||
172 | in_tgterror = 0x84, /* Target did something stupid */ | ||
173 | }; | ||
174 | |||
175 | enum { | ||
176 | /* Zero has special meaning, see skipahead[12]. */ | ||
177 | /*0*/ do_never, | ||
178 | |||
179 | /*1*/ do_phase_determine, | ||
180 | /*2*/ do_reset_bus, | ||
181 | /*3*/ do_reset_complete, | ||
182 | /*4*/ do_work_bus, | ||
183 | /*5*/ do_intr_end | ||
184 | }; | ||
185 | |||
186 | /* Forward declarations. */ | ||
187 | static irqreturn_t esp_intr(int irq, void *dev_id); | ||
188 | |||
189 | /* Debugging routines */ | ||
190 | struct esp_cmdstrings { | ||
191 | u8 cmdchar; | ||
192 | char *text; | ||
193 | } esp_cmd_strings[] = { | ||
194 | /* Miscellaneous */ | ||
195 | { ESP_CMD_NULL, "ESP_NOP", }, | ||
196 | { ESP_CMD_FLUSH, "FIFO_FLUSH", }, | ||
197 | { ESP_CMD_RC, "RSTESP", }, | ||
198 | { ESP_CMD_RS, "RSTSCSI", }, | ||
199 | /* Disconnected State Group */ | ||
200 | { ESP_CMD_RSEL, "RESLCTSEQ", }, | ||
201 | { ESP_CMD_SEL, "SLCTNATN", }, | ||
202 | { ESP_CMD_SELA, "SLCTATN", }, | ||
203 | { ESP_CMD_SELAS, "SLCTATNSTOP", }, | ||
204 | { ESP_CMD_ESEL, "ENSLCTRESEL", }, | ||
205 | { ESP_CMD_DSEL, "DISSELRESEL", }, | ||
206 | { ESP_CMD_SA3, "SLCTATN3", }, | ||
207 | { ESP_CMD_RSEL3, "RESLCTSEQ", }, | ||
208 | /* Target State Group */ | ||
209 | { ESP_CMD_SMSG, "SNDMSG", }, | ||
210 | { ESP_CMD_SSTAT, "SNDSTATUS", }, | ||
211 | { ESP_CMD_SDATA, "SNDDATA", }, | ||
212 | { ESP_CMD_DSEQ, "DISCSEQ", }, | ||
213 | { ESP_CMD_TSEQ, "TERMSEQ", }, | ||
214 | { ESP_CMD_TCCSEQ, "TRGTCMDCOMPSEQ", }, | ||
215 | { ESP_CMD_DCNCT, "DISC", }, | ||
216 | { ESP_CMD_RMSG, "RCVMSG", }, | ||
217 | { ESP_CMD_RCMD, "RCVCMD", }, | ||
218 | { ESP_CMD_RDATA, "RCVDATA", }, | ||
219 | { ESP_CMD_RCSEQ, "RCVCMDSEQ", }, | ||
220 | /* Initiator State Group */ | ||
221 | { ESP_CMD_TI, "TRANSINFO", }, | ||
222 | { ESP_CMD_ICCSEQ, "INICMDSEQCOMP", }, | ||
223 | { ESP_CMD_MOK, "MSGACCEPTED", }, | ||
224 | { ESP_CMD_TPAD, "TPAD", }, | ||
225 | { ESP_CMD_SATN, "SATN", }, | ||
226 | { ESP_CMD_RATN, "RATN", }, | ||
227 | }; | ||
228 | #define NUM_ESP_COMMANDS ((sizeof(esp_cmd_strings)) / (sizeof(struct esp_cmdstrings))) | ||
229 | |||
230 | /* Print textual representation of an ESP command */ | ||
231 | static inline void esp_print_cmd(u8 espcmd) | ||
232 | { | ||
233 | u8 dma_bit = espcmd & ESP_CMD_DMA; | ||
234 | int i; | ||
235 | |||
236 | espcmd &= ~dma_bit; | ||
237 | for (i = 0; i < NUM_ESP_COMMANDS; i++) | ||
238 | if (esp_cmd_strings[i].cmdchar == espcmd) | ||
239 | break; | ||
240 | if (i == NUM_ESP_COMMANDS) | ||
241 | printk("ESP_Unknown"); | ||
242 | else | ||
243 | printk("%s%s", esp_cmd_strings[i].text, | ||
244 | ((dma_bit) ? "+DMA" : "")); | ||
245 | } | ||
246 | |||
247 | /* Print the status register's value */ | ||
248 | static inline void esp_print_statreg(u8 statreg) | ||
249 | { | ||
250 | u8 phase; | ||
251 | |||
252 | printk("STATUS<"); | ||
253 | phase = statreg & ESP_STAT_PMASK; | ||
254 | printk("%s,", (phase == ESP_DOP ? "DATA-OUT" : | ||
255 | (phase == ESP_DIP ? "DATA-IN" : | ||
256 | (phase == ESP_CMDP ? "COMMAND" : | ||
257 | (phase == ESP_STATP ? "STATUS" : | ||
258 | (phase == ESP_MOP ? "MSG-OUT" : | ||
259 | (phase == ESP_MIP ? "MSG_IN" : | ||
260 | "unknown"))))))); | ||
261 | if (statreg & ESP_STAT_TDONE) | ||
262 | printk("TRANS_DONE,"); | ||
263 | if (statreg & ESP_STAT_TCNT) | ||
264 | printk("TCOUNT_ZERO,"); | ||
265 | if (statreg & ESP_STAT_PERR) | ||
266 | printk("P_ERROR,"); | ||
267 | if (statreg & ESP_STAT_SPAM) | ||
268 | printk("SPAM,"); | ||
269 | if (statreg & ESP_STAT_INTR) | ||
270 | printk("IRQ,"); | ||
271 | printk(">"); | ||
272 | } | ||
273 | |||
274 | /* Print the interrupt register's value */ | ||
275 | static inline void esp_print_ireg(u8 intreg) | ||
276 | { | ||
277 | printk("INTREG< "); | ||
278 | if (intreg & ESP_INTR_S) | ||
279 | printk("SLCT_NATN "); | ||
280 | if (intreg & ESP_INTR_SATN) | ||
281 | printk("SLCT_ATN "); | ||
282 | if (intreg & ESP_INTR_RSEL) | ||
283 | printk("RSLCT "); | ||
284 | if (intreg & ESP_INTR_FDONE) | ||
285 | printk("FDONE "); | ||
286 | if (intreg & ESP_INTR_BSERV) | ||
287 | printk("BSERV "); | ||
288 | if (intreg & ESP_INTR_DC) | ||
289 | printk("DISCNCT "); | ||
290 | if (intreg & ESP_INTR_IC) | ||
291 | printk("ILL_CMD "); | ||
292 | if (intreg & ESP_INTR_SR) | ||
293 | printk("SCSI_BUS_RESET "); | ||
294 | printk(">"); | ||
295 | } | ||
296 | |||
297 | /* Print the sequence step registers contents */ | ||
298 | static inline void esp_print_seqreg(u8 stepreg) | ||
299 | { | ||
300 | stepreg &= ESP_STEP_VBITS; | ||
301 | printk("STEP<%s>", | ||
302 | (stepreg == ESP_STEP_ASEL ? "SLCT_ARB_CMPLT" : | ||
303 | (stepreg == ESP_STEP_SID ? "1BYTE_MSG_SENT" : | ||
304 | (stepreg == ESP_STEP_NCMD ? "NOT_IN_CMD_PHASE" : | ||
305 | (stepreg == ESP_STEP_PPC ? "CMD_BYTES_LOST" : | ||
306 | (stepreg == ESP_STEP_FINI4 ? "CMD_SENT_OK" : | ||
307 | "UNKNOWN")))))); | ||
308 | } | ||
309 | |||
310 | static char *phase_string(int phase) | ||
311 | { | ||
312 | switch (phase) { | ||
313 | case not_issued: | ||
314 | return "UNISSUED"; | ||
315 | case in_slct_norm: | ||
316 | return "SLCTNORM"; | ||
317 | case in_slct_stop: | ||
318 | return "SLCTSTOP"; | ||
319 | case in_slct_msg: | ||
320 | return "SLCTMSG"; | ||
321 | case in_slct_tag: | ||
322 | return "SLCTTAG"; | ||
323 | case in_slct_sneg: | ||
324 | return "SLCTSNEG"; | ||
325 | case in_datain: | ||
326 | return "DATAIN"; | ||
327 | case in_dataout: | ||
328 | return "DATAOUT"; | ||
329 | case in_data_done: | ||
330 | return "DATADONE"; | ||
331 | case in_msgin: | ||
332 | return "MSGIN"; | ||
333 | case in_msgincont: | ||
334 | return "MSGINCONT"; | ||
335 | case in_msgindone: | ||
336 | return "MSGINDONE"; | ||
337 | case in_msgout: | ||
338 | return "MSGOUT"; | ||
339 | case in_msgoutdone: | ||
340 | return "MSGOUTDONE"; | ||
341 | case in_cmdbegin: | ||
342 | return "CMDBEGIN"; | ||
343 | case in_cmdend: | ||
344 | return "CMDEND"; | ||
345 | case in_status: | ||
346 | return "STATUS"; | ||
347 | case in_freeing: | ||
348 | return "FREEING"; | ||
349 | case in_the_dark: | ||
350 | return "CLUELESS"; | ||
351 | case in_abortone: | ||
352 | return "ABORTONE"; | ||
353 | case in_abortall: | ||
354 | return "ABORTALL"; | ||
355 | case in_resetdev: | ||
356 | return "RESETDEV"; | ||
357 | case in_resetbus: | ||
358 | return "RESETBUS"; | ||
359 | case in_tgterror: | ||
360 | return "TGTERROR"; | ||
361 | default: | ||
362 | return "UNKNOWN"; | ||
363 | }; | ||
364 | } | ||
365 | |||
366 | #ifdef DEBUG_STATE_MACHINE | ||
367 | static inline void esp_advance_phase(struct scsi_cmnd *s, int newphase) | ||
368 | { | ||
369 | ESPLOG(("<%s>", phase_string(newphase))); | ||
370 | s->SCp.sent_command = s->SCp.phase; | ||
371 | s->SCp.phase = newphase; | ||
372 | } | ||
373 | #else | ||
374 | #define esp_advance_phase(__s, __newphase) \ | ||
375 | (__s)->SCp.sent_command = (__s)->SCp.phase; \ | ||
376 | (__s)->SCp.phase = (__newphase); | ||
377 | #endif | ||
378 | |||
379 | #ifdef DEBUG_ESP_CMDS | ||
380 | static inline void esp_cmd(struct esp *esp, u8 cmd) | ||
381 | { | ||
382 | esp->espcmdlog[esp->espcmdent] = cmd; | ||
383 | esp->espcmdent = (esp->espcmdent + 1) & 31; | ||
384 | sbus_writeb(cmd, esp->eregs + ESP_CMD); | ||
385 | } | ||
386 | #else | ||
387 | #define esp_cmd(__esp, __cmd) \ | ||
388 | sbus_writeb((__cmd), ((__esp)->eregs) + ESP_CMD) | ||
389 | #endif | ||
390 | |||
391 | #define ESP_INTSOFF(__dregs) \ | ||
392 | sbus_writel(sbus_readl((__dregs)+DMA_CSR)&~(DMA_INT_ENAB), (__dregs)+DMA_CSR) | ||
393 | #define ESP_INTSON(__dregs) \ | ||
394 | sbus_writel(sbus_readl((__dregs)+DMA_CSR)|DMA_INT_ENAB, (__dregs)+DMA_CSR) | ||
395 | #define ESP_IRQ_P(__dregs) \ | ||
396 | (sbus_readl((__dregs)+DMA_CSR) & (DMA_HNDL_INTR|DMA_HNDL_ERROR)) | ||
397 | |||
398 | /* How we use the various Linux SCSI data structures for operation. | ||
399 | * | ||
400 | * struct scsi_cmnd: | ||
401 | * | ||
402 | * We keep track of the synchronous capabilities of a target | ||
403 | * in the device member, using sync_min_period and | ||
404 | * sync_max_offset. These are the values we directly write | ||
405 | * into the ESP registers while running a command. If offset | ||
406 | * is zero the ESP will use asynchronous transfers. | ||
407 | * If the borken flag is set we assume we shouldn't even bother | ||
408 | * trying to negotiate for synchronous transfer as this target | ||
409 | * is really stupid. If we notice the target is dropping the | ||
410 | * bus, and we have been allowing it to disconnect, we clear | ||
411 | * the disconnect flag. | ||
412 | */ | ||
413 | |||
414 | |||
415 | /* Manipulation of the ESP command queues. Thanks to the aha152x driver | ||
416 | * and its author, Juergen E. Fischer, for the methods used here. | ||
417 | * Note that these are per-ESP queues, not global queues like | ||
418 | * the aha152x driver uses. | ||
419 | */ | ||
420 | static inline void append_SC(struct scsi_cmnd **SC, struct scsi_cmnd *new_SC) | ||
421 | { | ||
422 | struct scsi_cmnd *end; | ||
423 | |||
424 | new_SC->host_scribble = (unsigned char *) NULL; | ||
425 | if (!*SC) | ||
426 | *SC = new_SC; | ||
427 | else { | ||
428 | for (end=*SC;end->host_scribble;end=(struct scsi_cmnd *)end->host_scribble) | ||
429 | ; | ||
430 | end->host_scribble = (unsigned char *) new_SC; | ||
431 | } | ||
432 | } | ||
433 | |||
434 | static inline void prepend_SC(struct scsi_cmnd **SC, struct scsi_cmnd *new_SC) | ||
435 | { | ||
436 | new_SC->host_scribble = (unsigned char *) *SC; | ||
437 | *SC = new_SC; | ||
438 | } | ||
439 | |||
440 | static inline struct scsi_cmnd *remove_first_SC(struct scsi_cmnd **SC) | ||
441 | { | ||
442 | struct scsi_cmnd *ptr; | ||
443 | ptr = *SC; | ||
444 | if (ptr) | ||
445 | *SC = (struct scsi_cmnd *) (*SC)->host_scribble; | ||
446 | return ptr; | ||
447 | } | ||
448 | |||
449 | static inline struct scsi_cmnd *remove_SC(struct scsi_cmnd **SC, int target, int lun) | ||
450 | { | ||
451 | struct scsi_cmnd *ptr, *prev; | ||
452 | |||
453 | for (ptr = *SC, prev = NULL; | ||
454 | ptr && ((ptr->device->id != target) || (ptr->device->lun != lun)); | ||
455 | prev = ptr, ptr = (struct scsi_cmnd *) ptr->host_scribble) | ||
456 | ; | ||
457 | if (ptr) { | ||
458 | if (prev) | ||
459 | prev->host_scribble=ptr->host_scribble; | ||
460 | else | ||
461 | *SC=(struct scsi_cmnd *)ptr->host_scribble; | ||
462 | } | ||
463 | return ptr; | ||
464 | } | ||
465 | |||
466 | /* Resetting various pieces of the ESP scsi driver chipset/buses. */ | ||
467 | static void esp_reset_dma(struct esp *esp) | ||
468 | { | ||
469 | int can_do_burst16, can_do_burst32, can_do_burst64; | ||
470 | int can_do_sbus64; | ||
471 | u32 tmp; | ||
472 | |||
473 | can_do_burst16 = (esp->bursts & DMA_BURST16) != 0; | ||
474 | can_do_burst32 = (esp->bursts & DMA_BURST32) != 0; | ||
475 | can_do_burst64 = 0; | ||
476 | can_do_sbus64 = 0; | ||
477 | if (sbus_can_dma_64bit(esp->sdev)) | ||
478 | can_do_sbus64 = 1; | ||
479 | if (sbus_can_burst64(esp->sdev)) | ||
480 | can_do_burst64 = (esp->bursts & DMA_BURST64) != 0; | ||
481 | |||
482 | /* Punt the DVMA into a known state. */ | ||
483 | if (esp->dma->revision != dvmahme) { | ||
484 | tmp = sbus_readl(esp->dregs + DMA_CSR); | ||
485 | sbus_writel(tmp | DMA_RST_SCSI, esp->dregs + DMA_CSR); | ||
486 | sbus_writel(tmp & ~DMA_RST_SCSI, esp->dregs + DMA_CSR); | ||
487 | } | ||
488 | switch (esp->dma->revision) { | ||
489 | case dvmahme: | ||
490 | /* This is the HME DVMA gate array. */ | ||
491 | |||
492 | sbus_writel(DMA_RESET_FAS366, esp->dregs + DMA_CSR); | ||
493 | sbus_writel(DMA_RST_SCSI, esp->dregs + DMA_CSR); | ||
494 | |||
495 | esp->prev_hme_dmacsr = (DMA_PARITY_OFF|DMA_2CLKS|DMA_SCSI_DISAB|DMA_INT_ENAB); | ||
496 | esp->prev_hme_dmacsr &= ~(DMA_ENABLE|DMA_ST_WRITE|DMA_BRST_SZ); | ||
497 | |||
498 | if (can_do_burst64) | ||
499 | esp->prev_hme_dmacsr |= DMA_BRST64; | ||
500 | else if (can_do_burst32) | ||
501 | esp->prev_hme_dmacsr |= DMA_BRST32; | ||
502 | |||
503 | if (can_do_sbus64) { | ||
504 | esp->prev_hme_dmacsr |= DMA_SCSI_SBUS64; | ||
505 | sbus_set_sbus64(esp->sdev, esp->bursts); | ||
506 | } | ||
507 | |||
508 | /* This chip is horrible. */ | ||
509 | while (sbus_readl(esp->dregs + DMA_CSR) & DMA_PEND_READ) | ||
510 | udelay(1); | ||
511 | |||
512 | sbus_writel(0, esp->dregs + DMA_CSR); | ||
513 | sbus_writel(esp->prev_hme_dmacsr, esp->dregs + DMA_CSR); | ||
514 | |||
515 | /* This is necessary to avoid having the SCSI channel | ||
516 | * engine lock up on us. | ||
517 | */ | ||
518 | sbus_writel(0, esp->dregs + DMA_ADDR); | ||
519 | |||
520 | break; | ||
521 | case dvmarev2: | ||
522 | /* This is the gate array found in the sun4m | ||
523 | * NCR SBUS I/O subsystem. | ||
524 | */ | ||
525 | if (esp->erev != esp100) { | ||
526 | tmp = sbus_readl(esp->dregs + DMA_CSR); | ||
527 | sbus_writel(tmp | DMA_3CLKS, esp->dregs + DMA_CSR); | ||
528 | } | ||
529 | break; | ||
530 | case dvmarev3: | ||
531 | tmp = sbus_readl(esp->dregs + DMA_CSR); | ||
532 | tmp &= ~DMA_3CLKS; | ||
533 | tmp |= DMA_2CLKS; | ||
534 | if (can_do_burst32) { | ||
535 | tmp &= ~DMA_BRST_SZ; | ||
536 | tmp |= DMA_BRST32; | ||
537 | } | ||
538 | sbus_writel(tmp, esp->dregs + DMA_CSR); | ||
539 | break; | ||
540 | case dvmaesc1: | ||
541 | /* This is the DMA unit found on SCSI/Ether cards. */ | ||
542 | tmp = sbus_readl(esp->dregs + DMA_CSR); | ||
543 | tmp |= DMA_ADD_ENABLE; | ||
544 | tmp &= ~DMA_BCNT_ENAB; | ||
545 | if (!can_do_burst32 && can_do_burst16) { | ||
546 | tmp |= DMA_ESC_BURST; | ||
547 | } else { | ||
548 | tmp &= ~(DMA_ESC_BURST); | ||
549 | } | ||
550 | sbus_writel(tmp, esp->dregs + DMA_CSR); | ||
551 | break; | ||
552 | default: | ||
553 | break; | ||
554 | }; | ||
555 | ESP_INTSON(esp->dregs); | ||
556 | } | ||
557 | |||
558 | /* Reset the ESP chip, _not_ the SCSI bus. */ | ||
559 | static void __init esp_reset_esp(struct esp *esp) | ||
560 | { | ||
561 | u8 family_code, version; | ||
562 | int i; | ||
563 | |||
564 | /* Now reset the ESP chip */ | ||
565 | esp_cmd(esp, ESP_CMD_RC); | ||
566 | esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA); | ||
567 | esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA); | ||
568 | |||
569 | /* Reload the configuration registers */ | ||
570 | sbus_writeb(esp->cfact, esp->eregs + ESP_CFACT); | ||
571 | esp->prev_stp = 0; | ||
572 | sbus_writeb(esp->prev_stp, esp->eregs + ESP_STP); | ||
573 | esp->prev_soff = 0; | ||
574 | sbus_writeb(esp->prev_soff, esp->eregs + ESP_SOFF); | ||
575 | sbus_writeb(esp->neg_defp, esp->eregs + ESP_TIMEO); | ||
576 | |||
577 | /* This is the only point at which it is reliable to read | ||
578 | * the ID-code for a fast ESP chip variants. | ||
579 | */ | ||
580 | esp->max_period = ((35 * esp->ccycle) / 1000); | ||
581 | if (esp->erev == fast) { | ||
582 | version = sbus_readb(esp->eregs + ESP_UID); | ||
583 | family_code = (version & 0xf8) >> 3; | ||
584 | if (family_code == 0x02) | ||
585 | esp->erev = fas236; | ||
586 | else if (family_code == 0x0a) | ||
587 | esp->erev = fashme; /* Version is usually '5'. */ | ||
588 | else | ||
589 | esp->erev = fas100a; | ||
590 | ESPMISC(("esp%d: FAST chip is %s (family=%d, version=%d)\n", | ||
591 | esp->esp_id, | ||
592 | (esp->erev == fas236) ? "fas236" : | ||
593 | ((esp->erev == fas100a) ? "fas100a" : | ||
594 | "fasHME"), family_code, (version & 7))); | ||
595 | |||
596 | esp->min_period = ((4 * esp->ccycle) / 1000); | ||
597 | } else { | ||
598 | esp->min_period = ((5 * esp->ccycle) / 1000); | ||
599 | } | ||
600 | esp->max_period = (esp->max_period + 3)>>2; | ||
601 | esp->min_period = (esp->min_period + 3)>>2; | ||
602 | |||
603 | sbus_writeb(esp->config1, esp->eregs + ESP_CFG1); | ||
604 | switch (esp->erev) { | ||
605 | case esp100: | ||
606 | /* nothing to do */ | ||
607 | break; | ||
608 | case esp100a: | ||
609 | sbus_writeb(esp->config2, esp->eregs + ESP_CFG2); | ||
610 | break; | ||
611 | case esp236: | ||
612 | /* Slow 236 */ | ||
613 | sbus_writeb(esp->config2, esp->eregs + ESP_CFG2); | ||
614 | esp->prev_cfg3 = esp->config3[0]; | ||
615 | sbus_writeb(esp->prev_cfg3, esp->eregs + ESP_CFG3); | ||
616 | break; | ||
617 | case fashme: | ||
618 | esp->config2 |= (ESP_CONFIG2_HME32 | ESP_CONFIG2_HMEFENAB); | ||
619 | /* fallthrough... */ | ||
620 | case fas236: | ||
621 | /* Fast 236 or HME */ | ||
622 | sbus_writeb(esp->config2, esp->eregs + ESP_CFG2); | ||
623 | for (i = 0; i < 16; i++) { | ||
624 | if (esp->erev == fashme) { | ||
625 | u8 cfg3; | ||
626 | |||
627 | cfg3 = ESP_CONFIG3_FCLOCK | ESP_CONFIG3_OBPUSH; | ||
628 | if (esp->scsi_id >= 8) | ||
629 | cfg3 |= ESP_CONFIG3_IDBIT3; | ||
630 | esp->config3[i] |= cfg3; | ||
631 | } else { | ||
632 | esp->config3[i] |= ESP_CONFIG3_FCLK; | ||
633 | } | ||
634 | } | ||
635 | esp->prev_cfg3 = esp->config3[0]; | ||
636 | sbus_writeb(esp->prev_cfg3, esp->eregs + ESP_CFG3); | ||
637 | if (esp->erev == fashme) { | ||
638 | esp->radelay = 80; | ||
639 | } else { | ||
640 | if (esp->diff) | ||
641 | esp->radelay = 0; | ||
642 | else | ||
643 | esp->radelay = 96; | ||
644 | } | ||
645 | break; | ||
646 | case fas100a: | ||
647 | /* Fast 100a */ | ||
648 | sbus_writeb(esp->config2, esp->eregs + ESP_CFG2); | ||
649 | for (i = 0; i < 16; i++) | ||
650 | esp->config3[i] |= ESP_CONFIG3_FCLOCK; | ||
651 | esp->prev_cfg3 = esp->config3[0]; | ||
652 | sbus_writeb(esp->prev_cfg3, esp->eregs + ESP_CFG3); | ||
653 | esp->radelay = 32; | ||
654 | break; | ||
655 | default: | ||
656 | panic("esp: what could it be... I wonder..."); | ||
657 | break; | ||
658 | }; | ||
659 | |||
660 | /* Eat any bitrot in the chip */ | ||
661 | sbus_readb(esp->eregs + ESP_INTRPT); | ||
662 | udelay(100); | ||
663 | } | ||
664 | |||
665 | /* This places the ESP into a known state at boot time. */ | ||
666 | static void __init esp_bootup_reset(struct esp *esp) | ||
667 | { | ||
668 | u8 tmp; | ||
669 | |||
670 | /* Reset the DMA */ | ||
671 | esp_reset_dma(esp); | ||
672 | |||
673 | /* Reset the ESP */ | ||
674 | esp_reset_esp(esp); | ||
675 | |||
676 | /* Reset the SCSI bus, but tell ESP not to generate an irq */ | ||
677 | tmp = sbus_readb(esp->eregs + ESP_CFG1); | ||
678 | tmp |= ESP_CONFIG1_SRRDISAB; | ||
679 | sbus_writeb(tmp, esp->eregs + ESP_CFG1); | ||
680 | |||
681 | esp_cmd(esp, ESP_CMD_RS); | ||
682 | udelay(400); | ||
683 | |||
684 | sbus_writeb(esp->config1, esp->eregs + ESP_CFG1); | ||
685 | |||
686 | /* Eat any bitrot in the chip and we are done... */ | ||
687 | sbus_readb(esp->eregs + ESP_INTRPT); | ||
688 | } | ||
689 | |||
690 | static int __init esp_find_dvma(struct esp *esp, struct sbus_dev *dma_sdev) | ||
691 | { | ||
692 | struct sbus_dev *sdev = esp->sdev; | ||
693 | struct sbus_dma *dma; | ||
694 | |||
695 | if (dma_sdev != NULL) { | ||
696 | for_each_dvma(dma) { | ||
697 | if (dma->sdev == dma_sdev) | ||
698 | break; | ||
699 | } | ||
700 | } else { | ||
701 | for_each_dvma(dma) { | ||
702 | /* If allocated already, can't use it. */ | ||
703 | if (dma->allocated) | ||
704 | continue; | ||
705 | |||
706 | if (dma->sdev == NULL) | ||
707 | break; | ||
708 | |||
709 | /* If bus + slot are the same and it has the | ||
710 | * correct OBP name, it's ours. | ||
711 | */ | ||
712 | if (sdev->bus == dma->sdev->bus && | ||
713 | sdev->slot == dma->sdev->slot && | ||
714 | (!strcmp(dma->sdev->prom_name, "dma") || | ||
715 | !strcmp(dma->sdev->prom_name, "espdma"))) | ||
716 | break; | ||
717 | } | ||
718 | } | ||
719 | |||
720 | /* If we don't know how to handle the dvma, | ||
721 | * do not use this device. | ||
722 | */ | ||
723 | if (dma == NULL) { | ||
724 | printk("Cannot find dvma for ESP%d's SCSI\n", esp->esp_id); | ||
725 | return -1; | ||
726 | } | ||
727 | if (dma->allocated) { | ||
728 | printk("esp%d: can't use my espdma\n", esp->esp_id); | ||
729 | return -1; | ||
730 | } | ||
731 | dma->allocated = 1; | ||
732 | esp->dma = dma; | ||
733 | esp->dregs = dma->regs; | ||
734 | |||
735 | return 0; | ||
736 | } | ||
737 | |||
738 | static int __init esp_map_regs(struct esp *esp, int hme) | ||
739 | { | ||
740 | struct sbus_dev *sdev = esp->sdev; | ||
741 | struct resource *res; | ||
742 | |||
743 | /* On HME, two reg sets exist, first is DVMA, | ||
744 | * second is ESP registers. | ||
745 | */ | ||
746 | if (hme) | ||
747 | res = &sdev->resource[1]; | ||
748 | else | ||
749 | res = &sdev->resource[0]; | ||
750 | |||
751 | esp->eregs = sbus_ioremap(res, 0, ESP_REG_SIZE, "ESP Registers"); | ||
752 | |||
753 | if (esp->eregs == 0) | ||
754 | return -1; | ||
755 | return 0; | ||
756 | } | ||
757 | |||
758 | static int __init esp_map_cmdarea(struct esp *esp) | ||
759 | { | ||
760 | struct sbus_dev *sdev = esp->sdev; | ||
761 | |||
762 | esp->esp_command = sbus_alloc_consistent(sdev, 16, | ||
763 | &esp->esp_command_dvma); | ||
764 | if (esp->esp_command == NULL || | ||
765 | esp->esp_command_dvma == 0) | ||
766 | return -1; | ||
767 | return 0; | ||
768 | } | ||
769 | |||
770 | static int __init esp_register_irq(struct esp *esp) | ||
771 | { | ||
772 | esp->ehost->irq = esp->irq = esp->sdev->irqs[0]; | ||
773 | |||
774 | /* We used to try various overly-clever things to | ||
775 | * reduce the interrupt processing overhead on | ||
776 | * sun4c/sun4m when multiple ESP's shared the | ||
777 | * same IRQ. It was too complex and messy to | ||
778 | * sanely maintain. | ||
779 | */ | ||
780 | if (request_irq(esp->ehost->irq, esp_intr, | ||
781 | IRQF_SHARED, "ESP SCSI", esp)) { | ||
782 | printk("esp%d: Cannot acquire irq line\n", | ||
783 | esp->esp_id); | ||
784 | return -1; | ||
785 | } | ||
786 | |||
787 | printk("esp%d: IRQ %d ", esp->esp_id, | ||
788 | esp->ehost->irq); | ||
789 | |||
790 | return 0; | ||
791 | } | ||
792 | |||
793 | static void __init esp_get_scsi_id(struct esp *esp) | ||
794 | { | ||
795 | struct sbus_dev *sdev = esp->sdev; | ||
796 | struct device_node *dp = sdev->ofdev.node; | ||
797 | |||
798 | esp->scsi_id = of_getintprop_default(dp, | ||
799 | "initiator-id", | ||
800 | -1); | ||
801 | if (esp->scsi_id == -1) | ||
802 | esp->scsi_id = of_getintprop_default(dp, | ||
803 | "scsi-initiator-id", | ||
804 | -1); | ||
805 | if (esp->scsi_id == -1) | ||
806 | esp->scsi_id = (sdev->bus == NULL) ? 7 : | ||
807 | of_getintprop_default(sdev->bus->ofdev.node, | ||
808 | "scsi-initiator-id", | ||
809 | 7); | ||
810 | esp->ehost->this_id = esp->scsi_id; | ||
811 | esp->scsi_id_mask = (1 << esp->scsi_id); | ||
812 | |||
813 | } | ||
814 | |||
815 | static void __init esp_get_clock_params(struct esp *esp) | ||
816 | { | ||
817 | struct sbus_dev *sdev = esp->sdev; | ||
818 | int prom_node = esp->prom_node; | ||
819 | int sbus_prom_node; | ||
820 | unsigned int fmhz; | ||
821 | u8 ccf; | ||
822 | |||
823 | if (sdev != NULL && sdev->bus != NULL) | ||
824 | sbus_prom_node = sdev->bus->prom_node; | ||
825 | else | ||
826 | sbus_prom_node = 0; | ||
827 | |||
828 | /* This is getting messy but it has to be done | ||
829 | * correctly or else you get weird behavior all | ||
830 | * over the place. We are trying to basically | ||
831 | * figure out three pieces of information. | ||
832 | * | ||
833 | * a) Clock Conversion Factor | ||
834 | * | ||
835 | * This is a representation of the input | ||
836 | * crystal clock frequency going into the | ||
837 | * ESP on this machine. Any operation whose | ||
838 | * timing is longer than 400ns depends on this | ||
839 | * value being correct. For example, you'll | ||
840 | * get blips for arbitration/selection during | ||
841 | * high load or with multiple targets if this | ||
842 | * is not set correctly. | ||
843 | * | ||
844 | * b) Selection Time-Out | ||
845 | * | ||
846 | * The ESP isn't very bright and will arbitrate | ||
847 | * for the bus and try to select a target | ||
848 | * forever if you let it. This value tells | ||
849 | * the ESP when it has taken too long to | ||
850 | * negotiate and that it should interrupt | ||
851 | * the CPU so we can see what happened. | ||
852 | * The value is computed as follows (from | ||
853 | * NCR/Symbios chip docs). | ||
854 | * | ||
855 | * (Time Out Period) * (Input Clock) | ||
856 | * STO = ---------------------------------- | ||
857 | * (8192) * (Clock Conversion Factor) | ||
858 | * | ||
859 | * You usually want the time out period to be | ||
860 | * around 250ms, I think we'll set it a little | ||
861 | * bit higher to account for fully loaded SCSI | ||
862 | * bus's and slow devices that don't respond so | ||
863 | * quickly to selection attempts. (yeah, I know | ||
864 | * this is out of spec. but there is a lot of | ||
865 | * buggy pieces of firmware out there so bite me) | ||
866 | * | ||
867 | * c) Imperical constants for synchronous offset | ||
868 | * and transfer period register values | ||
869 | * | ||
870 | * This entails the smallest and largest sync | ||
871 | * period we could ever handle on this ESP. | ||
872 | */ | ||
873 | |||
874 | fmhz = prom_getintdefault(prom_node, "clock-frequency", -1); | ||
875 | if (fmhz == -1) | ||
876 | fmhz = (!sbus_prom_node) ? 0 : | ||
877 | prom_getintdefault(sbus_prom_node, "clock-frequency", -1); | ||
878 | |||
879 | if (fmhz <= (5000000)) | ||
880 | ccf = 0; | ||
881 | else | ||
882 | ccf = (((5000000 - 1) + (fmhz))/(5000000)); | ||
883 | |||
884 | if (!ccf || ccf > 8) { | ||
885 | /* If we can't find anything reasonable, | ||
886 | * just assume 20MHZ. This is the clock | ||
887 | * frequency of the older sun4c's where I've | ||
888 | * been unable to find the clock-frequency | ||
889 | * PROM property. All other machines provide | ||
890 | * useful values it seems. | ||
891 | */ | ||
892 | ccf = ESP_CCF_F4; | ||
893 | fmhz = (20000000); | ||
894 | } | ||
895 | |||
896 | if (ccf == (ESP_CCF_F7 + 1)) | ||
897 | esp->cfact = ESP_CCF_F0; | ||
898 | else if (ccf == ESP_CCF_NEVER) | ||
899 | esp->cfact = ESP_CCF_F2; | ||
900 | else | ||
901 | esp->cfact = ccf; | ||
902 | esp->raw_cfact = ccf; | ||
903 | |||
904 | esp->cfreq = fmhz; | ||
905 | esp->ccycle = ESP_MHZ_TO_CYCLE(fmhz); | ||
906 | esp->ctick = ESP_TICK(ccf, esp->ccycle); | ||
907 | esp->neg_defp = ESP_NEG_DEFP(fmhz, ccf); | ||
908 | esp->sync_defp = SYNC_DEFP_SLOW; | ||
909 | |||
910 | printk("SCSI ID %d Clk %dMHz CCYC=%d CCF=%d TOut %d ", | ||
911 | esp->scsi_id, (fmhz / 1000000), | ||
912 | (int)esp->ccycle, (int)ccf, (int) esp->neg_defp); | ||
913 | } | ||
914 | |||
915 | static void __init esp_get_bursts(struct esp *esp, struct sbus_dev *dma) | ||
916 | { | ||
917 | struct sbus_dev *sdev = esp->sdev; | ||
918 | u8 bursts; | ||
919 | |||
920 | bursts = prom_getintdefault(esp->prom_node, "burst-sizes", 0xff); | ||
921 | |||
922 | if (dma) { | ||
923 | u8 tmp = prom_getintdefault(dma->prom_node, | ||
924 | "burst-sizes", 0xff); | ||
925 | if (tmp != 0xff) | ||
926 | bursts &= tmp; | ||
927 | } | ||
928 | |||
929 | if (sdev->bus) { | ||
930 | u8 tmp = prom_getintdefault(sdev->bus->prom_node, | ||
931 | "burst-sizes", 0xff); | ||
932 | if (tmp != 0xff) | ||
933 | bursts &= tmp; | ||
934 | } | ||
935 | |||
936 | if (bursts == 0xff || | ||
937 | (bursts & DMA_BURST16) == 0 || | ||
938 | (bursts & DMA_BURST32) == 0) | ||
939 | bursts = (DMA_BURST32 - 1); | ||
940 | |||
941 | esp->bursts = bursts; | ||
942 | } | ||
943 | |||
944 | static void __init esp_get_revision(struct esp *esp) | ||
945 | { | ||
946 | u8 tmp; | ||
947 | |||
948 | esp->config1 = (ESP_CONFIG1_PENABLE | (esp->scsi_id & 7)); | ||
949 | esp->config2 = (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY); | ||
950 | sbus_writeb(esp->config2, esp->eregs + ESP_CFG2); | ||
951 | |||
952 | tmp = sbus_readb(esp->eregs + ESP_CFG2); | ||
953 | tmp &= ~ESP_CONFIG2_MAGIC; | ||
954 | if (tmp != (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY)) { | ||
955 | /* If what we write to cfg2 does not come back, cfg2 | ||
956 | * is not implemented, therefore this must be a plain | ||
957 | * esp100. | ||
958 | */ | ||
959 | esp->erev = esp100; | ||
960 | printk("NCR53C90(esp100)\n"); | ||
961 | } else { | ||
962 | esp->config2 = 0; | ||
963 | esp->prev_cfg3 = esp->config3[0] = 5; | ||
964 | sbus_writeb(esp->config2, esp->eregs + ESP_CFG2); | ||
965 | sbus_writeb(0, esp->eregs + ESP_CFG3); | ||
966 | sbus_writeb(esp->prev_cfg3, esp->eregs + ESP_CFG3); | ||
967 | |||
968 | tmp = sbus_readb(esp->eregs + ESP_CFG3); | ||
969 | if (tmp != 5) { | ||
970 | /* The cfg2 register is implemented, however | ||
971 | * cfg3 is not, must be esp100a. | ||
972 | */ | ||
973 | esp->erev = esp100a; | ||
974 | printk("NCR53C90A(esp100a)\n"); | ||
975 | } else { | ||
976 | int target; | ||
977 | |||
978 | for (target = 0; target < 16; target++) | ||
979 | esp->config3[target] = 0; | ||
980 | esp->prev_cfg3 = 0; | ||
981 | sbus_writeb(esp->prev_cfg3, esp->eregs + ESP_CFG3); | ||
982 | |||
983 | /* All of cfg{1,2,3} implemented, must be one of | ||
984 | * the fas variants, figure out which one. | ||
985 | */ | ||
986 | if (esp->raw_cfact > ESP_CCF_F5) { | ||
987 | esp->erev = fast; | ||
988 | esp->sync_defp = SYNC_DEFP_FAST; | ||
989 | printk("NCR53C9XF(espfast)\n"); | ||
990 | } else { | ||
991 | esp->erev = esp236; | ||
992 | printk("NCR53C9x(esp236)\n"); | ||
993 | } | ||
994 | esp->config2 = 0; | ||
995 | sbus_writeb(esp->config2, esp->eregs + ESP_CFG2); | ||
996 | } | ||
997 | } | ||
998 | } | ||
999 | |||
1000 | static void __init esp_init_swstate(struct esp *esp) | ||
1001 | { | ||
1002 | int i; | ||
1003 | |||
1004 | /* Command queues... */ | ||
1005 | esp->current_SC = NULL; | ||
1006 | esp->disconnected_SC = NULL; | ||
1007 | esp->issue_SC = NULL; | ||
1008 | |||
1009 | /* Target and current command state... */ | ||
1010 | esp->targets_present = 0; | ||
1011 | esp->resetting_bus = 0; | ||
1012 | esp->snip = 0; | ||
1013 | |||
1014 | init_waitqueue_head(&esp->reset_queue); | ||
1015 | |||
1016 | /* Debugging... */ | ||
1017 | for(i = 0; i < 32; i++) | ||
1018 | esp->espcmdlog[i] = 0; | ||
1019 | esp->espcmdent = 0; | ||
1020 | |||
1021 | /* MSG phase state... */ | ||
1022 | for(i = 0; i < 16; i++) { | ||
1023 | esp->cur_msgout[i] = 0; | ||
1024 | esp->cur_msgin[i] = 0; | ||
1025 | } | ||
1026 | esp->prevmsgout = esp->prevmsgin = 0; | ||
1027 | esp->msgout_len = esp->msgin_len = 0; | ||
1028 | |||
1029 | /* Clear the one behind caches to hold unmatchable values. */ | ||
1030 | esp->prev_soff = esp->prev_stp = esp->prev_cfg3 = 0xff; | ||
1031 | esp->prev_hme_dmacsr = 0xffffffff; | ||
1032 | } | ||
1033 | |||
1034 | static int __init detect_one_esp(struct scsi_host_template *tpnt, | ||
1035 | struct device *dev, | ||
1036 | struct sbus_dev *esp_dev, | ||
1037 | struct sbus_dev *espdma, | ||
1038 | struct sbus_bus *sbus, | ||
1039 | int hme) | ||
1040 | { | ||
1041 | static int instance; | ||
1042 | struct Scsi_Host *esp_host = scsi_host_alloc(tpnt, sizeof(struct esp)); | ||
1043 | struct esp *esp; | ||
1044 | |||
1045 | if (!esp_host) | ||
1046 | return -ENOMEM; | ||
1047 | |||
1048 | if (hme) | ||
1049 | esp_host->max_id = 16; | ||
1050 | esp = (struct esp *) esp_host->hostdata; | ||
1051 | esp->ehost = esp_host; | ||
1052 | esp->sdev = esp_dev; | ||
1053 | esp->esp_id = instance; | ||
1054 | esp->prom_node = esp_dev->prom_node; | ||
1055 | prom_getstring(esp->prom_node, "name", esp->prom_name, | ||
1056 | sizeof(esp->prom_name)); | ||
1057 | |||
1058 | if (esp_find_dvma(esp, espdma) < 0) | ||
1059 | goto fail_unlink; | ||
1060 | if (esp_map_regs(esp, hme) < 0) { | ||
1061 | printk("ESP registers unmappable"); | ||
1062 | goto fail_dvma_release; | ||
1063 | } | ||
1064 | if (esp_map_cmdarea(esp) < 0) { | ||
1065 | printk("ESP DVMA transport area unmappable"); | ||
1066 | goto fail_unmap_regs; | ||
1067 | } | ||
1068 | if (esp_register_irq(esp) < 0) | ||
1069 | goto fail_unmap_cmdarea; | ||
1070 | |||
1071 | esp_get_scsi_id(esp); | ||
1072 | |||
1073 | esp->diff = prom_getbool(esp->prom_node, "differential"); | ||
1074 | if (esp->diff) | ||
1075 | printk("Differential "); | ||
1076 | |||
1077 | esp_get_clock_params(esp); | ||
1078 | esp_get_bursts(esp, espdma); | ||
1079 | esp_get_revision(esp); | ||
1080 | esp_init_swstate(esp); | ||
1081 | |||
1082 | esp_bootup_reset(esp); | ||
1083 | |||
1084 | if (scsi_add_host(esp_host, dev)) | ||
1085 | goto fail_free_irq; | ||
1086 | |||
1087 | dev_set_drvdata(&esp_dev->ofdev.dev, esp); | ||
1088 | |||
1089 | scsi_scan_host(esp_host); | ||
1090 | instance++; | ||
1091 | |||
1092 | return 0; | ||
1093 | |||
1094 | fail_free_irq: | ||
1095 | free_irq(esp->ehost->irq, esp); | ||
1096 | |||
1097 | fail_unmap_cmdarea: | ||
1098 | sbus_free_consistent(esp->sdev, 16, | ||
1099 | (void *) esp->esp_command, | ||
1100 | esp->esp_command_dvma); | ||
1101 | |||
1102 | fail_unmap_regs: | ||
1103 | sbus_iounmap(esp->eregs, ESP_REG_SIZE); | ||
1104 | |||
1105 | fail_dvma_release: | ||
1106 | esp->dma->allocated = 0; | ||
1107 | |||
1108 | fail_unlink: | ||
1109 | scsi_host_put(esp_host); | ||
1110 | return -1; | ||
1111 | } | ||
1112 | |||
1113 | /* Detecting ESP chips on the machine. This is the simple and easy | ||
1114 | * version. | ||
1115 | */ | ||
1116 | static int __devexit esp_remove_common(struct esp *esp) | ||
1117 | { | ||
1118 | unsigned int irq = esp->ehost->irq; | ||
1119 | |||
1120 | scsi_remove_host(esp->ehost); | ||
1121 | |||
1122 | ESP_INTSOFF(esp->dregs); | ||
1123 | #if 0 | ||
1124 | esp_reset_dma(esp); | ||
1125 | esp_reset_esp(esp); | ||
1126 | #endif | ||
1127 | |||
1128 | free_irq(irq, esp); | ||
1129 | sbus_free_consistent(esp->sdev, 16, | ||
1130 | (void *) esp->esp_command, esp->esp_command_dvma); | ||
1131 | sbus_iounmap(esp->eregs, ESP_REG_SIZE); | ||
1132 | esp->dma->allocated = 0; | ||
1133 | |||
1134 | scsi_host_put(esp->ehost); | ||
1135 | |||
1136 | return 0; | ||
1137 | } | ||
1138 | |||
1139 | |||
1140 | #ifdef CONFIG_SUN4 | ||
1141 | |||
1142 | #include <asm/sun4paddr.h> | ||
1143 | |||
1144 | static struct sbus_dev sun4_esp_dev; | ||
1145 | |||
1146 | static int __init esp_sun4_probe(struct scsi_host_template *tpnt) | ||
1147 | { | ||
1148 | if (sun4_esp_physaddr) { | ||
1149 | memset(&sun4_esp_dev, 0, sizeof(sun4_esp_dev)); | ||
1150 | sun4_esp_dev.reg_addrs[0].phys_addr = sun4_esp_physaddr; | ||
1151 | sun4_esp_dev.irqs[0] = 4; | ||
1152 | sun4_esp_dev.resource[0].start = sun4_esp_physaddr; | ||
1153 | sun4_esp_dev.resource[0].end = | ||
1154 | sun4_esp_physaddr + ESP_REG_SIZE - 1; | ||
1155 | sun4_esp_dev.resource[0].flags = IORESOURCE_IO; | ||
1156 | |||
1157 | return detect_one_esp(tpnt, NULL, | ||
1158 | &sun4_esp_dev, NULL, NULL, 0); | ||
1159 | } | ||
1160 | return 0; | ||
1161 | } | ||
1162 | |||
1163 | static int __devexit esp_sun4_remove(void) | ||
1164 | { | ||
1165 | struct of_device *dev = &sun4_esp_dev.ofdev; | ||
1166 | struct esp *esp = dev_get_drvdata(&dev->dev); | ||
1167 | |||
1168 | return esp_remove_common(esp); | ||
1169 | } | ||
1170 | |||
1171 | #else /* !CONFIG_SUN4 */ | ||
1172 | |||
1173 | static int __devinit esp_sbus_probe(struct of_device *dev, const struct of_device_id *match) | ||
1174 | { | ||
1175 | struct sbus_dev *sdev = to_sbus_device(&dev->dev); | ||
1176 | struct device_node *dp = dev->node; | ||
1177 | struct sbus_dev *dma_sdev = NULL; | ||
1178 | int hme = 0; | ||
1179 | |||
1180 | if (dp->parent && | ||
1181 | (!strcmp(dp->parent->name, "espdma") || | ||
1182 | !strcmp(dp->parent->name, "dma"))) | ||
1183 | dma_sdev = sdev->parent; | ||
1184 | else if (!strcmp(dp->name, "SUNW,fas")) { | ||
1185 | dma_sdev = sdev; | ||
1186 | hme = 1; | ||
1187 | } | ||
1188 | |||
1189 | return detect_one_esp(match->data, &dev->dev, | ||
1190 | sdev, dma_sdev, sdev->bus, hme); | ||
1191 | } | ||
1192 | |||
1193 | static int __devexit esp_sbus_remove(struct of_device *dev) | ||
1194 | { | ||
1195 | struct esp *esp = dev_get_drvdata(&dev->dev); | ||
1196 | |||
1197 | return esp_remove_common(esp); | ||
1198 | } | ||
1199 | |||
1200 | #endif /* !CONFIG_SUN4 */ | ||
1201 | |||
1202 | /* The info function will return whatever useful | ||
1203 | * information the developer sees fit. If not provided, then | ||
1204 | * the name field will be used instead. | ||
1205 | */ | ||
1206 | static const char *esp_info(struct Scsi_Host *host) | ||
1207 | { | ||
1208 | struct esp *esp; | ||
1209 | |||
1210 | esp = (struct esp *) host->hostdata; | ||
1211 | switch (esp->erev) { | ||
1212 | case esp100: | ||
1213 | return "Sparc ESP100 (NCR53C90)"; | ||
1214 | case esp100a: | ||
1215 | return "Sparc ESP100A (NCR53C90A)"; | ||
1216 | case esp236: | ||
1217 | return "Sparc ESP236"; | ||
1218 | case fas236: | ||
1219 | return "Sparc ESP236-FAST"; | ||
1220 | case fashme: | ||
1221 | return "Sparc ESP366-HME"; | ||
1222 | case fas100a: | ||
1223 | return "Sparc ESP100A-FAST"; | ||
1224 | default: | ||
1225 | return "Bogon ESP revision"; | ||
1226 | }; | ||
1227 | } | ||
1228 | |||
1229 | /* From Wolfgang Stanglmeier's NCR scsi driver. */ | ||
1230 | struct info_str | ||
1231 | { | ||
1232 | char *buffer; | ||
1233 | int length; | ||
1234 | int offset; | ||
1235 | int pos; | ||
1236 | }; | ||
1237 | |||
1238 | static void copy_mem_info(struct info_str *info, char *data, int len) | ||
1239 | { | ||
1240 | if (info->pos + len > info->length) | ||
1241 | len = info->length - info->pos; | ||
1242 | |||
1243 | if (info->pos + len < info->offset) { | ||
1244 | info->pos += len; | ||
1245 | return; | ||
1246 | } | ||
1247 | if (info->pos < info->offset) { | ||
1248 | data += (info->offset - info->pos); | ||
1249 | len -= (info->offset - info->pos); | ||
1250 | } | ||
1251 | |||
1252 | if (len > 0) { | ||
1253 | memcpy(info->buffer + info->pos, data, len); | ||
1254 | info->pos += len; | ||
1255 | } | ||
1256 | } | ||
1257 | |||
1258 | static int copy_info(struct info_str *info, char *fmt, ...) | ||
1259 | { | ||
1260 | va_list args; | ||
1261 | char buf[81]; | ||
1262 | int len; | ||
1263 | |||
1264 | va_start(args, fmt); | ||
1265 | len = vsprintf(buf, fmt, args); | ||
1266 | va_end(args); | ||
1267 | |||
1268 | copy_mem_info(info, buf, len); | ||
1269 | return len; | ||
1270 | } | ||
1271 | |||
1272 | static int esp_host_info(struct esp *esp, char *ptr, off_t offset, int len) | ||
1273 | { | ||
1274 | struct scsi_device *sdev; | ||
1275 | struct info_str info; | ||
1276 | int i; | ||
1277 | |||
1278 | info.buffer = ptr; | ||
1279 | info.length = len; | ||
1280 | info.offset = offset; | ||
1281 | info.pos = 0; | ||
1282 | |||
1283 | copy_info(&info, "Sparc ESP Host Adapter:\n"); | ||
1284 | copy_info(&info, "\tPROM node\t\t%08x\n", (unsigned int) esp->prom_node); | ||
1285 | copy_info(&info, "\tPROM name\t\t%s\n", esp->prom_name); | ||
1286 | copy_info(&info, "\tESP Model\t\t"); | ||
1287 | switch (esp->erev) { | ||
1288 | case esp100: | ||
1289 | copy_info(&info, "ESP100\n"); | ||
1290 | break; | ||
1291 | case esp100a: | ||
1292 | copy_info(&info, "ESP100A\n"); | ||
1293 | break; | ||
1294 | case esp236: | ||
1295 | copy_info(&info, "ESP236\n"); | ||
1296 | break; | ||
1297 | case fas236: | ||
1298 | copy_info(&info, "FAS236\n"); | ||
1299 | break; | ||
1300 | case fas100a: | ||
1301 | copy_info(&info, "FAS100A\n"); | ||
1302 | break; | ||
1303 | case fast: | ||
1304 | copy_info(&info, "FAST\n"); | ||
1305 | break; | ||
1306 | case fashme: | ||
1307 | copy_info(&info, "Happy Meal FAS\n"); | ||
1308 | break; | ||
1309 | case espunknown: | ||
1310 | default: | ||
1311 | copy_info(&info, "Unknown!\n"); | ||
1312 | break; | ||
1313 | }; | ||
1314 | copy_info(&info, "\tDMA Revision\t\t"); | ||
1315 | switch (esp->dma->revision) { | ||
1316 | case dvmarev0: | ||
1317 | copy_info(&info, "Rev 0\n"); | ||
1318 | break; | ||
1319 | case dvmaesc1: | ||
1320 | copy_info(&info, "ESC Rev 1\n"); | ||
1321 | break; | ||
1322 | case dvmarev1: | ||
1323 | copy_info(&info, "Rev 1\n"); | ||
1324 | break; | ||
1325 | case dvmarev2: | ||
1326 | copy_info(&info, "Rev 2\n"); | ||
1327 | break; | ||
1328 | case dvmarev3: | ||
1329 | copy_info(&info, "Rev 3\n"); | ||
1330 | break; | ||
1331 | case dvmarevplus: | ||
1332 | copy_info(&info, "Rev 1+\n"); | ||
1333 | break; | ||
1334 | case dvmahme: | ||
1335 | copy_info(&info, "Rev HME/FAS\n"); | ||
1336 | break; | ||
1337 | default: | ||
1338 | copy_info(&info, "Unknown!\n"); | ||
1339 | break; | ||
1340 | }; | ||
1341 | copy_info(&info, "\tLive Targets\t\t[ "); | ||
1342 | for (i = 0; i < 15; i++) { | ||
1343 | if (esp->targets_present & (1 << i)) | ||
1344 | copy_info(&info, "%d ", i); | ||
1345 | } | ||
1346 | copy_info(&info, "]\n\n"); | ||
1347 | |||
1348 | /* Now describe the state of each existing target. */ | ||
1349 | copy_info(&info, "Target #\tconfig3\t\tSync Capabilities\tDisconnect\tWide\n"); | ||
1350 | |||
1351 | shost_for_each_device(sdev, esp->ehost) { | ||
1352 | struct esp_device *esp_dev = sdev->hostdata; | ||
1353 | uint id = sdev->id; | ||
1354 | |||
1355 | if (!(esp->targets_present & (1 << id))) | ||
1356 | continue; | ||
1357 | |||
1358 | copy_info(&info, "%d\t\t", id); | ||
1359 | copy_info(&info, "%08lx\t", esp->config3[id]); | ||
1360 | copy_info(&info, "[%02lx,%02lx]\t\t\t", | ||
1361 | esp_dev->sync_max_offset, | ||
1362 | esp_dev->sync_min_period); | ||
1363 | copy_info(&info, "%s\t\t", | ||
1364 | esp_dev->disconnect ? "yes" : "no"); | ||
1365 | copy_info(&info, "%s\n", | ||
1366 | (esp->config3[id] & ESP_CONFIG3_EWIDE) ? "yes" : "no"); | ||
1367 | } | ||
1368 | return info.pos > info.offset? info.pos - info.offset : 0; | ||
1369 | } | ||
1370 | |||
1371 | /* ESP proc filesystem code. */ | ||
1372 | static int esp_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset, | ||
1373 | int length, int inout) | ||
1374 | { | ||
1375 | struct esp *esp = (struct esp *) host->hostdata; | ||
1376 | |||
1377 | if (inout) | ||
1378 | return -EINVAL; /* not yet */ | ||
1379 | |||
1380 | if (start) | ||
1381 | *start = buffer; | ||
1382 | |||
1383 | return esp_host_info(esp, buffer, offset, length); | ||
1384 | } | ||
1385 | |||
1386 | static void esp_get_dmabufs(struct esp *esp, struct scsi_cmnd *sp) | ||
1387 | { | ||
1388 | if (sp->use_sg == 0) { | ||
1389 | sp->SCp.this_residual = sp->request_bufflen; | ||
1390 | sp->SCp.buffer = (struct scatterlist *) sp->request_buffer; | ||
1391 | sp->SCp.buffers_residual = 0; | ||
1392 | if (sp->request_bufflen) { | ||
1393 | sp->SCp.have_data_in = sbus_map_single(esp->sdev, sp->SCp.buffer, | ||
1394 | sp->SCp.this_residual, | ||
1395 | sp->sc_data_direction); | ||
1396 | sp->SCp.ptr = (char *) ((unsigned long)sp->SCp.have_data_in); | ||
1397 | } else { | ||
1398 | sp->SCp.ptr = NULL; | ||
1399 | } | ||
1400 | } else { | ||
1401 | sp->SCp.buffer = (struct scatterlist *) sp->request_buffer; | ||
1402 | sp->SCp.buffers_residual = sbus_map_sg(esp->sdev, | ||
1403 | sp->SCp.buffer, | ||
1404 | sp->use_sg, | ||
1405 | sp->sc_data_direction); | ||
1406 | sp->SCp.this_residual = sg_dma_len(sp->SCp.buffer); | ||
1407 | sp->SCp.ptr = (char *) ((unsigned long)sg_dma_address(sp->SCp.buffer)); | ||
1408 | } | ||
1409 | } | ||
1410 | |||
1411 | static void esp_release_dmabufs(struct esp *esp, struct scsi_cmnd *sp) | ||
1412 | { | ||
1413 | if (sp->use_sg) { | ||
1414 | sbus_unmap_sg(esp->sdev, sp->request_buffer, sp->use_sg, | ||
1415 | sp->sc_data_direction); | ||
1416 | } else if (sp->request_bufflen) { | ||
1417 | sbus_unmap_single(esp->sdev, | ||
1418 | sp->SCp.have_data_in, | ||
1419 | sp->request_bufflen, | ||
1420 | sp->sc_data_direction); | ||
1421 | } | ||
1422 | } | ||
1423 | |||
1424 | static void esp_restore_pointers(struct esp *esp, struct scsi_cmnd *sp) | ||
1425 | { | ||
1426 | struct esp_pointers *ep = &esp->data_pointers[sp->device->id]; | ||
1427 | |||
1428 | sp->SCp.ptr = ep->saved_ptr; | ||
1429 | sp->SCp.buffer = ep->saved_buffer; | ||
1430 | sp->SCp.this_residual = ep->saved_this_residual; | ||
1431 | sp->SCp.buffers_residual = ep->saved_buffers_residual; | ||
1432 | } | ||
1433 | |||
1434 | static void esp_save_pointers(struct esp *esp, struct scsi_cmnd *sp) | ||
1435 | { | ||
1436 | struct esp_pointers *ep = &esp->data_pointers[sp->device->id]; | ||
1437 | |||
1438 | ep->saved_ptr = sp->SCp.ptr; | ||
1439 | ep->saved_buffer = sp->SCp.buffer; | ||
1440 | ep->saved_this_residual = sp->SCp.this_residual; | ||
1441 | ep->saved_buffers_residual = sp->SCp.buffers_residual; | ||
1442 | } | ||
1443 | |||
1444 | /* Some rules: | ||
1445 | * | ||
1446 | * 1) Never ever panic while something is live on the bus. | ||
1447 | * If there is to be any chance of syncing the disks this | ||
1448 | * rule is to be obeyed. | ||
1449 | * | ||
1450 | * 2) Any target that causes a foul condition will no longer | ||
1451 | * have synchronous transfers done to it, no questions | ||
1452 | * asked. | ||
1453 | * | ||
1454 | * 3) Keep register accesses to a minimum. Think about some | ||
1455 | * day when we have Xbus machines this is running on and | ||
1456 | * the ESP chip is on the other end of the machine on a | ||
1457 | * different board from the cpu where this is running. | ||
1458 | */ | ||
1459 | |||
1460 | /* Fire off a command. We assume the bus is free and that the only | ||
1461 | * case where we could see an interrupt is where we have disconnected | ||
1462 | * commands active and they are trying to reselect us. | ||
1463 | */ | ||
1464 | static inline void esp_check_cmd(struct esp *esp, struct scsi_cmnd *sp) | ||
1465 | { | ||
1466 | switch (sp->cmd_len) { | ||
1467 | case 6: | ||
1468 | case 10: | ||
1469 | case 12: | ||
1470 | esp->esp_slowcmd = 0; | ||
1471 | break; | ||
1472 | |||
1473 | default: | ||
1474 | esp->esp_slowcmd = 1; | ||
1475 | esp->esp_scmdleft = sp->cmd_len; | ||
1476 | esp->esp_scmdp = &sp->cmnd[0]; | ||
1477 | break; | ||
1478 | }; | ||
1479 | } | ||
1480 | |||
1481 | static inline void build_sync_nego_msg(struct esp *esp, int period, int offset) | ||
1482 | { | ||
1483 | esp->cur_msgout[0] = EXTENDED_MESSAGE; | ||
1484 | esp->cur_msgout[1] = 3; | ||
1485 | esp->cur_msgout[2] = EXTENDED_SDTR; | ||
1486 | esp->cur_msgout[3] = period; | ||
1487 | esp->cur_msgout[4] = offset; | ||
1488 | esp->msgout_len = 5; | ||
1489 | } | ||
1490 | |||
1491 | /* SIZE is in bits, currently HME only supports 16 bit wide transfers. */ | ||
1492 | static inline void build_wide_nego_msg(struct esp *esp, int size) | ||
1493 | { | ||
1494 | esp->cur_msgout[0] = EXTENDED_MESSAGE; | ||
1495 | esp->cur_msgout[1] = 2; | ||
1496 | esp->cur_msgout[2] = EXTENDED_WDTR; | ||
1497 | switch (size) { | ||
1498 | case 32: | ||
1499 | esp->cur_msgout[3] = 2; | ||
1500 | break; | ||
1501 | case 16: | ||
1502 | esp->cur_msgout[3] = 1; | ||
1503 | break; | ||
1504 | case 8: | ||
1505 | default: | ||
1506 | esp->cur_msgout[3] = 0; | ||
1507 | break; | ||
1508 | }; | ||
1509 | |||
1510 | esp->msgout_len = 4; | ||
1511 | } | ||
1512 | |||
1513 | static void esp_exec_cmd(struct esp *esp) | ||
1514 | { | ||
1515 | struct scsi_cmnd *SCptr; | ||
1516 | struct scsi_device *SDptr; | ||
1517 | struct esp_device *esp_dev; | ||
1518 | volatile u8 *cmdp = esp->esp_command; | ||
1519 | u8 the_esp_command; | ||
1520 | int lun, target; | ||
1521 | int i; | ||
1522 | |||
1523 | /* Hold off if we have disconnected commands and | ||
1524 | * an IRQ is showing... | ||
1525 | */ | ||
1526 | if (esp->disconnected_SC && ESP_IRQ_P(esp->dregs)) | ||
1527 | return; | ||
1528 | |||
1529 | /* Grab first member of the issue queue. */ | ||
1530 | SCptr = esp->current_SC = remove_first_SC(&esp->issue_SC); | ||
1531 | |||
1532 | /* Safe to panic here because current_SC is null. */ | ||
1533 | if (!SCptr) | ||
1534 | panic("esp: esp_exec_cmd and issue queue is NULL"); | ||
1535 | |||
1536 | SDptr = SCptr->device; | ||
1537 | esp_dev = SDptr->hostdata; | ||
1538 | lun = SCptr->device->lun; | ||
1539 | target = SCptr->device->id; | ||
1540 | |||
1541 | esp->snip = 0; | ||
1542 | esp->msgout_len = 0; | ||
1543 | |||
1544 | /* Send it out whole, or piece by piece? The ESP | ||
1545 | * only knows how to automatically send out 6, 10, | ||
1546 | * and 12 byte commands. I used to think that the | ||
1547 | * Linux SCSI code would never throw anything other | ||
1548 | * than that to us, but then again there is the | ||
1549 | * SCSI generic driver which can send us anything. | ||
1550 | */ | ||
1551 | esp_check_cmd(esp, SCptr); | ||
1552 | |||
1553 | /* If arbitration/selection is successful, the ESP will leave | ||
1554 | * ATN asserted, causing the target to go into message out | ||
1555 | * phase. The ESP will feed the target the identify and then | ||
1556 | * the target can only legally go to one of command, | ||
1557 | * datain/out, status, or message in phase, or stay in message | ||
1558 | * out phase (should we be trying to send a sync negotiation | ||
1559 | * message after the identify). It is not allowed to drop | ||
1560 | * BSY, but some buggy targets do and we check for this | ||
1561 | * condition in the selection complete code. Most of the time | ||
1562 | * we'll make the command bytes available to the ESP and it | ||
1563 | * will not interrupt us until it finishes command phase, we | ||
1564 | * cannot do this for command sizes the ESP does not | ||
1565 | * understand and in this case we'll get interrupted right | ||
1566 | * when the target goes into command phase. | ||
1567 | * | ||
1568 | * It is absolutely _illegal_ in the presence of SCSI-2 devices | ||
1569 | * to use the ESP select w/o ATN command. When SCSI-2 devices are | ||
1570 | * present on the bus we _must_ always go straight to message out | ||
1571 | * phase with an identify message for the target. Being that | ||
1572 | * selection attempts in SCSI-1 w/o ATN was an option, doing SCSI-2 | ||
1573 | * selections should not confuse SCSI-1 we hope. | ||
1574 | */ | ||
1575 | |||
1576 | if (esp_dev->sync) { | ||
1577 | /* this targets sync is known */ | ||
1578 | #ifndef __sparc_v9__ | ||
1579 | do_sync_known: | ||
1580 | #endif | ||
1581 | if (esp_dev->disconnect) | ||
1582 | *cmdp++ = IDENTIFY(1, lun); | ||
1583 | else | ||
1584 | *cmdp++ = IDENTIFY(0, lun); | ||
1585 | |||
1586 | if (esp->esp_slowcmd) { | ||
1587 | the_esp_command = (ESP_CMD_SELAS | ESP_CMD_DMA); | ||
1588 | esp_advance_phase(SCptr, in_slct_stop); | ||
1589 | } else { | ||
1590 | the_esp_command = (ESP_CMD_SELA | ESP_CMD_DMA); | ||
1591 | esp_advance_phase(SCptr, in_slct_norm); | ||
1592 | } | ||
1593 | } else if (!(esp->targets_present & (1<<target)) || !(esp_dev->disconnect)) { | ||
1594 | /* After the bootup SCSI code sends both the | ||
1595 | * TEST_UNIT_READY and INQUIRY commands we want | ||
1596 | * to at least attempt allowing the device to | ||
1597 | * disconnect. | ||
1598 | */ | ||
1599 | ESPMISC(("esp: Selecting device for first time. target=%d " | ||
1600 | "lun=%d\n", target, SCptr->device->lun)); | ||
1601 | if (!SDptr->borken && !esp_dev->disconnect) | ||
1602 | esp_dev->disconnect = 1; | ||
1603 | |||
1604 | *cmdp++ = IDENTIFY(0, lun); | ||
1605 | esp->prevmsgout = NOP; | ||
1606 | esp_advance_phase(SCptr, in_slct_norm); | ||
1607 | the_esp_command = (ESP_CMD_SELA | ESP_CMD_DMA); | ||
1608 | |||
1609 | /* Take no chances... */ | ||
1610 | esp_dev->sync_max_offset = 0; | ||
1611 | esp_dev->sync_min_period = 0; | ||
1612 | } else { | ||
1613 | /* Sorry, I have had way too many problems with | ||
1614 | * various CDROM devices on ESP. -DaveM | ||
1615 | */ | ||
1616 | int cdrom_hwbug_wkaround = 0; | ||
1617 | |||
1618 | #ifndef __sparc_v9__ | ||
1619 | /* Never allow disconnects or synchronous transfers on | ||
1620 | * SparcStation1 and SparcStation1+. Allowing those | ||
1621 | * to be enabled seems to lockup the machine completely. | ||
1622 | */ | ||
1623 | if ((idprom->id_machtype == (SM_SUN4C | SM_4C_SS1)) || | ||
1624 | (idprom->id_machtype == (SM_SUN4C | SM_4C_SS1PLUS))) { | ||
1625 | /* But we are nice and allow tapes and removable | ||
1626 | * disks (but not CDROMs) to disconnect. | ||
1627 | */ | ||
1628 | if(SDptr->type == TYPE_TAPE || | ||
1629 | (SDptr->type != TYPE_ROM && SDptr->removable)) | ||
1630 | esp_dev->disconnect = 1; | ||
1631 | else | ||
1632 | esp_dev->disconnect = 0; | ||
1633 | esp_dev->sync_max_offset = 0; | ||
1634 | esp_dev->sync_min_period = 0; | ||
1635 | esp_dev->sync = 1; | ||
1636 | esp->snip = 0; | ||
1637 | goto do_sync_known; | ||
1638 | } | ||
1639 | #endif /* !(__sparc_v9__) */ | ||
1640 | |||
1641 | /* We've talked to this guy before, | ||
1642 | * but never negotiated. Let's try, | ||
1643 | * need to attempt WIDE first, before | ||
1644 | * sync nego, as per SCSI 2 standard. | ||
1645 | */ | ||
1646 | if (esp->erev == fashme && !esp_dev->wide) { | ||
1647 | if (!SDptr->borken && | ||
1648 | SDptr->type != TYPE_ROM && | ||
1649 | SDptr->removable == 0) { | ||
1650 | build_wide_nego_msg(esp, 16); | ||
1651 | esp_dev->wide = 1; | ||
1652 | esp->wnip = 1; | ||
1653 | goto after_nego_msg_built; | ||
1654 | } else { | ||
1655 | esp_dev->wide = 1; | ||
1656 | /* Fall through and try sync. */ | ||
1657 | } | ||
1658 | } | ||
1659 | |||
1660 | if (!SDptr->borken) { | ||
1661 | if ((SDptr->type == TYPE_ROM)) { | ||
1662 | /* Nice try sucker... */ | ||
1663 | ESPMISC(("esp%d: Disabling sync for buggy " | ||
1664 | "CDROM.\n", esp->esp_id)); | ||
1665 | cdrom_hwbug_wkaround = 1; | ||
1666 | build_sync_nego_msg(esp, 0, 0); | ||
1667 | } else if (SDptr->removable != 0) { | ||
1668 | ESPMISC(("esp%d: Not negotiating sync/wide but " | ||
1669 | "allowing disconnect for removable media.\n", | ||
1670 | esp->esp_id)); | ||
1671 | build_sync_nego_msg(esp, 0, 0); | ||
1672 | } else { | ||
1673 | build_sync_nego_msg(esp, esp->sync_defp, 15); | ||
1674 | } | ||
1675 | } else { | ||
1676 | build_sync_nego_msg(esp, 0, 0); | ||
1677 | } | ||
1678 | esp_dev->sync = 1; | ||
1679 | esp->snip = 1; | ||
1680 | |||
1681 | after_nego_msg_built: | ||
1682 | /* A fix for broken SCSI1 targets, when they disconnect | ||
1683 | * they lock up the bus and confuse ESP. So disallow | ||
1684 | * disconnects for SCSI1 targets for now until we | ||
1685 | * find a better fix. | ||
1686 | * | ||
1687 | * Addendum: This is funny, I figured out what was going | ||
1688 | * on. The blotzed SCSI1 target would disconnect, | ||
1689 | * one of the other SCSI2 targets or both would be | ||
1690 | * disconnected as well. The SCSI1 target would | ||
1691 | * stay disconnected long enough that we start | ||
1692 | * up a command on one of the SCSI2 targets. As | ||
1693 | * the ESP is arbitrating for the bus the SCSI1 | ||
1694 | * target begins to arbitrate as well to reselect | ||
1695 | * the ESP. The SCSI1 target refuses to drop it's | ||
1696 | * ID bit on the data bus even though the ESP is | ||
1697 | * at ID 7 and is the obvious winner for any | ||
1698 | * arbitration. The ESP is a poor sport and refuses | ||
1699 | * to lose arbitration, it will continue indefinitely | ||
1700 | * trying to arbitrate for the bus and can only be | ||
1701 | * stopped via a chip reset or SCSI bus reset. | ||
1702 | * Therefore _no_ disconnects for SCSI1 targets | ||
1703 | * thank you very much. ;-) | ||
1704 | */ | ||
1705 | if(((SDptr->scsi_level < 3) && | ||
1706 | (SDptr->type != TYPE_TAPE) && | ||
1707 | SDptr->removable == 0) || | ||
1708 | cdrom_hwbug_wkaround || SDptr->borken) { | ||
1709 | ESPMISC((KERN_INFO "esp%d: Disabling DISCONNECT for target %d " | ||
1710 | "lun %d\n", esp->esp_id, SCptr->device->id, SCptr->device->lun)); | ||
1711 | esp_dev->disconnect = 0; | ||
1712 | *cmdp++ = IDENTIFY(0, lun); | ||
1713 | } else { | ||
1714 | *cmdp++ = IDENTIFY(1, lun); | ||
1715 | } | ||
1716 | |||
1717 | /* ESP fifo is only so big... | ||
1718 | * Make this look like a slow command. | ||
1719 | */ | ||
1720 | esp->esp_slowcmd = 1; | ||
1721 | esp->esp_scmdleft = SCptr->cmd_len; | ||
1722 | esp->esp_scmdp = &SCptr->cmnd[0]; | ||
1723 | |||
1724 | the_esp_command = (ESP_CMD_SELAS | ESP_CMD_DMA); | ||
1725 | esp_advance_phase(SCptr, in_slct_msg); | ||
1726 | } | ||
1727 | |||
1728 | if (!esp->esp_slowcmd) | ||
1729 | for (i = 0; i < SCptr->cmd_len; i++) | ||
1730 | *cmdp++ = SCptr->cmnd[i]; | ||
1731 | |||
1732 | /* HME sucks... */ | ||
1733 | if (esp->erev == fashme) | ||
1734 | sbus_writeb((target & 0xf) | (ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT), | ||
1735 | esp->eregs + ESP_BUSID); | ||
1736 | else | ||
1737 | sbus_writeb(target & 7, esp->eregs + ESP_BUSID); | ||
1738 | if (esp->prev_soff != esp_dev->sync_max_offset || | ||
1739 | esp->prev_stp != esp_dev->sync_min_period || | ||
1740 | (esp->erev > esp100a && | ||
1741 | esp->prev_cfg3 != esp->config3[target])) { | ||
1742 | esp->prev_soff = esp_dev->sync_max_offset; | ||
1743 | esp->prev_stp = esp_dev->sync_min_period; | ||
1744 | sbus_writeb(esp->prev_soff, esp->eregs + ESP_SOFF); | ||
1745 | sbus_writeb(esp->prev_stp, esp->eregs + ESP_STP); | ||
1746 | if (esp->erev > esp100a) { | ||
1747 | esp->prev_cfg3 = esp->config3[target]; | ||
1748 | sbus_writeb(esp->prev_cfg3, esp->eregs + ESP_CFG3); | ||
1749 | } | ||
1750 | } | ||
1751 | i = (cmdp - esp->esp_command); | ||
1752 | |||
1753 | if (esp->erev == fashme) { | ||
1754 | esp_cmd(esp, ESP_CMD_FLUSH); /* Grrr! */ | ||
1755 | |||
1756 | /* Set up the DMA and HME counters */ | ||
1757 | sbus_writeb(i, esp->eregs + ESP_TCLOW); | ||
1758 | sbus_writeb(0, esp->eregs + ESP_TCMED); | ||
1759 | sbus_writeb(0, esp->eregs + FAS_RLO); | ||
1760 | sbus_writeb(0, esp->eregs + FAS_RHI); | ||
1761 | esp_cmd(esp, the_esp_command); | ||
1762 | |||
1763 | /* Talk about touchy hardware... */ | ||
1764 | esp->prev_hme_dmacsr = ((esp->prev_hme_dmacsr | | ||
1765 | (DMA_SCSI_DISAB | DMA_ENABLE)) & | ||
1766 | ~(DMA_ST_WRITE)); | ||
1767 | sbus_writel(16, esp->dregs + DMA_COUNT); | ||
1768 | sbus_writel(esp->esp_command_dvma, esp->dregs + DMA_ADDR); | ||
1769 | sbus_writel(esp->prev_hme_dmacsr, esp->dregs + DMA_CSR); | ||
1770 | } else { | ||
1771 | u32 tmp; | ||
1772 | |||
1773 | /* Set up the DMA and ESP counters */ | ||
1774 | sbus_writeb(i, esp->eregs + ESP_TCLOW); | ||
1775 | sbus_writeb(0, esp->eregs + ESP_TCMED); | ||
1776 | tmp = sbus_readl(esp->dregs + DMA_CSR); | ||
1777 | tmp &= ~DMA_ST_WRITE; | ||
1778 | tmp |= DMA_ENABLE; | ||
1779 | sbus_writel(tmp, esp->dregs + DMA_CSR); | ||
1780 | if (esp->dma->revision == dvmaesc1) { | ||
1781 | if (i) /* Workaround ESC gate array SBUS rerun bug. */ | ||
1782 | sbus_writel(PAGE_SIZE, esp->dregs + DMA_COUNT); | ||
1783 | } | ||
1784 | sbus_writel(esp->esp_command_dvma, esp->dregs + DMA_ADDR); | ||
1785 | |||
1786 | /* Tell ESP to "go". */ | ||
1787 | esp_cmd(esp, the_esp_command); | ||
1788 | } | ||
1789 | } | ||
1790 | |||
1791 | /* Queue a SCSI command delivered from the mid-level Linux SCSI code. */ | ||
1792 | static int esp_queue(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) | ||
1793 | { | ||
1794 | struct esp *esp; | ||
1795 | |||
1796 | /* Set up func ptr and initial driver cmd-phase. */ | ||
1797 | SCpnt->scsi_done = done; | ||
1798 | SCpnt->SCp.phase = not_issued; | ||
1799 | |||
1800 | /* We use the scratch area. */ | ||
1801 | ESPQUEUE(("esp_queue: target=%d lun=%d ", SCpnt->device->id, SCpnt->device->lun)); | ||
1802 | ESPDISC(("N<%02x,%02x>", SCpnt->device->id, SCpnt->device->lun)); | ||
1803 | |||
1804 | esp = (struct esp *) SCpnt->device->host->hostdata; | ||
1805 | esp_get_dmabufs(esp, SCpnt); | ||
1806 | esp_save_pointers(esp, SCpnt); /* FIXME for tag queueing */ | ||
1807 | |||
1808 | SCpnt->SCp.Status = CHECK_CONDITION; | ||
1809 | SCpnt->SCp.Message = 0xff; | ||
1810 | SCpnt->SCp.sent_command = 0; | ||
1811 | |||
1812 | /* Place into our queue. */ | ||
1813 | if (SCpnt->cmnd[0] == REQUEST_SENSE) { | ||
1814 | ESPQUEUE(("RQSENSE\n")); | ||
1815 | prepend_SC(&esp->issue_SC, SCpnt); | ||
1816 | } else { | ||
1817 | ESPQUEUE(("\n")); | ||
1818 | append_SC(&esp->issue_SC, SCpnt); | ||
1819 | } | ||
1820 | |||
1821 | /* Run it now if we can. */ | ||
1822 | if (!esp->current_SC && !esp->resetting_bus) | ||
1823 | esp_exec_cmd(esp); | ||
1824 | |||
1825 | return 0; | ||
1826 | } | ||
1827 | |||
1828 | /* Dump driver state. */ | ||
1829 | static void esp_dump_cmd(struct scsi_cmnd *SCptr) | ||
1830 | { | ||
1831 | ESPLOG(("[tgt<%02x> lun<%02x> " | ||
1832 | "pphase<%s> cphase<%s>]", | ||
1833 | SCptr->device->id, SCptr->device->lun, | ||
1834 | phase_string(SCptr->SCp.sent_command), | ||
1835 | phase_string(SCptr->SCp.phase))); | ||
1836 | } | ||
1837 | |||
1838 | static void esp_dump_state(struct esp *esp) | ||
1839 | { | ||
1840 | struct scsi_cmnd *SCptr = esp->current_SC; | ||
1841 | #ifdef DEBUG_ESP_CMDS | ||
1842 | int i; | ||
1843 | #endif | ||
1844 | |||
1845 | ESPLOG(("esp%d: dumping state\n", esp->esp_id)); | ||
1846 | ESPLOG(("esp%d: dma -- cond_reg<%08x> addr<%08x>\n", | ||
1847 | esp->esp_id, | ||
1848 | sbus_readl(esp->dregs + DMA_CSR), | ||
1849 | sbus_readl(esp->dregs + DMA_ADDR))); | ||
1850 | ESPLOG(("esp%d: SW [sreg<%02x> sstep<%02x> ireg<%02x>]\n", | ||
1851 | esp->esp_id, esp->sreg, esp->seqreg, esp->ireg)); | ||
1852 | ESPLOG(("esp%d: HW reread [sreg<%02x> sstep<%02x> ireg<%02x>]\n", | ||
1853 | esp->esp_id, | ||
1854 | sbus_readb(esp->eregs + ESP_STATUS), | ||
1855 | sbus_readb(esp->eregs + ESP_SSTEP), | ||
1856 | sbus_readb(esp->eregs + ESP_INTRPT))); | ||
1857 | #ifdef DEBUG_ESP_CMDS | ||
1858 | printk("esp%d: last ESP cmds [", esp->esp_id); | ||
1859 | i = (esp->espcmdent - 1) & 31; | ||
1860 | printk("<"); esp_print_cmd(esp->espcmdlog[i]); printk(">"); | ||
1861 | i = (i - 1) & 31; | ||
1862 | printk("<"); esp_print_cmd(esp->espcmdlog[i]); printk(">"); | ||
1863 | i = (i - 1) & 31; | ||
1864 | printk("<"); esp_print_cmd(esp->espcmdlog[i]); printk(">"); | ||
1865 | i = (i - 1) & 31; | ||
1866 | printk("<"); esp_print_cmd(esp->espcmdlog[i]); printk(">"); | ||
1867 | printk("]\n"); | ||
1868 | #endif /* (DEBUG_ESP_CMDS) */ | ||
1869 | |||
1870 | if (SCptr) { | ||
1871 | ESPLOG(("esp%d: current command ", esp->esp_id)); | ||
1872 | esp_dump_cmd(SCptr); | ||
1873 | } | ||
1874 | ESPLOG(("\n")); | ||
1875 | SCptr = esp->disconnected_SC; | ||
1876 | ESPLOG(("esp%d: disconnected ", esp->esp_id)); | ||
1877 | while (SCptr) { | ||
1878 | esp_dump_cmd(SCptr); | ||
1879 | SCptr = (struct scsi_cmnd *) SCptr->host_scribble; | ||
1880 | } | ||
1881 | ESPLOG(("\n")); | ||
1882 | } | ||
1883 | |||
1884 | /* Abort a command. The host_lock is acquired by caller. */ | ||
1885 | static int esp_abort(struct scsi_cmnd *SCptr) | ||
1886 | { | ||
1887 | struct esp *esp = (struct esp *) SCptr->device->host->hostdata; | ||
1888 | int don; | ||
1889 | |||
1890 | ESPLOG(("esp%d: Aborting command\n", esp->esp_id)); | ||
1891 | esp_dump_state(esp); | ||
1892 | |||
1893 | /* Wheee, if this is the current command on the bus, the | ||
1894 | * best we can do is assert ATN and wait for msgout phase. | ||
1895 | * This should even fix a hung SCSI bus when we lose state | ||
1896 | * in the driver and timeout because the eventual phase change | ||
1897 | * will cause the ESP to (eventually) give an interrupt. | ||
1898 | */ | ||
1899 | if (esp->current_SC == SCptr) { | ||
1900 | esp->cur_msgout[0] = ABORT; | ||
1901 | esp->msgout_len = 1; | ||
1902 | esp->msgout_ctr = 0; | ||
1903 | esp_cmd(esp, ESP_CMD_SATN); | ||
1904 | return SUCCESS; | ||
1905 | } | ||
1906 | |||
1907 | /* If it is still in the issue queue then we can safely | ||
1908 | * call the completion routine and report abort success. | ||
1909 | */ | ||
1910 | don = (sbus_readl(esp->dregs + DMA_CSR) & DMA_INT_ENAB); | ||
1911 | if (don) { | ||
1912 | ESP_INTSOFF(esp->dregs); | ||
1913 | } | ||
1914 | if (esp->issue_SC) { | ||
1915 | struct scsi_cmnd **prev, *this; | ||
1916 | for (prev = (&esp->issue_SC), this = esp->issue_SC; | ||
1917 | this != NULL; | ||
1918 | prev = (struct scsi_cmnd **) &(this->host_scribble), | ||
1919 | this = (struct scsi_cmnd *) this->host_scribble) { | ||
1920 | |||
1921 | if (this == SCptr) { | ||
1922 | *prev = (struct scsi_cmnd *) this->host_scribble; | ||
1923 | this->host_scribble = NULL; | ||
1924 | |||
1925 | esp_release_dmabufs(esp, this); | ||
1926 | this->result = DID_ABORT << 16; | ||
1927 | this->scsi_done(this); | ||
1928 | |||
1929 | if (don) | ||
1930 | ESP_INTSON(esp->dregs); | ||
1931 | |||
1932 | return SUCCESS; | ||
1933 | } | ||
1934 | } | ||
1935 | } | ||
1936 | |||
1937 | /* Yuck, the command to abort is disconnected, it is not | ||
1938 | * worth trying to abort it now if something else is live | ||
1939 | * on the bus at this time. So, we let the SCSI code wait | ||
1940 | * a little bit and try again later. | ||
1941 | */ | ||
1942 | if (esp->current_SC) { | ||
1943 | if (don) | ||
1944 | ESP_INTSON(esp->dregs); | ||
1945 | return FAILED; | ||
1946 | } | ||
1947 | |||
1948 | /* It's disconnected, we have to reconnect to re-establish | ||
1949 | * the nexus and tell the device to abort. However, we really | ||
1950 | * cannot 'reconnect' per se. Don't try to be fancy, just | ||
1951 | * indicate failure, which causes our caller to reset the whole | ||
1952 | * bus. | ||
1953 | */ | ||
1954 | |||
1955 | if (don) | ||
1956 | ESP_INTSON(esp->dregs); | ||
1957 | |||
1958 | return FAILED; | ||
1959 | } | ||
1960 | |||
1961 | /* We've sent ESP_CMD_RS to the ESP, the interrupt had just | ||
1962 | * arrived indicating the end of the SCSI bus reset. Our job | ||
1963 | * is to clean out the command queues and begin re-execution | ||
1964 | * of SCSI commands once more. | ||
1965 | */ | ||
1966 | static int esp_finish_reset(struct esp *esp) | ||
1967 | { | ||
1968 | struct scsi_cmnd *sp = esp->current_SC; | ||
1969 | |||
1970 | /* Clean up currently executing command, if any. */ | ||
1971 | if (sp != NULL) { | ||
1972 | esp->current_SC = NULL; | ||
1973 | |||
1974 | esp_release_dmabufs(esp, sp); | ||
1975 | sp->result = (DID_RESET << 16); | ||
1976 | |||
1977 | sp->scsi_done(sp); | ||
1978 | } | ||
1979 | |||
1980 | /* Clean up disconnected queue, they have been invalidated | ||
1981 | * by the bus reset. | ||
1982 | */ | ||
1983 | if (esp->disconnected_SC) { | ||
1984 | while ((sp = remove_first_SC(&esp->disconnected_SC)) != NULL) { | ||
1985 | esp_release_dmabufs(esp, sp); | ||
1986 | sp->result = (DID_RESET << 16); | ||
1987 | |||
1988 | sp->scsi_done(sp); | ||
1989 | } | ||
1990 | } | ||
1991 | |||
1992 | /* SCSI bus reset is complete. */ | ||
1993 | esp->resetting_bus = 0; | ||
1994 | wake_up(&esp->reset_queue); | ||
1995 | |||
1996 | /* Ok, now it is safe to get commands going once more. */ | ||
1997 | if (esp->issue_SC) | ||
1998 | esp_exec_cmd(esp); | ||
1999 | |||
2000 | return do_intr_end; | ||
2001 | } | ||
2002 | |||
2003 | static int esp_do_resetbus(struct esp *esp) | ||
2004 | { | ||
2005 | ESPLOG(("esp%d: Resetting scsi bus\n", esp->esp_id)); | ||
2006 | esp->resetting_bus = 1; | ||
2007 | esp_cmd(esp, ESP_CMD_RS); | ||
2008 | |||
2009 | return do_intr_end; | ||
2010 | } | ||
2011 | |||
2012 | /* Reset ESP chip, reset hanging bus, then kill active and | ||
2013 | * disconnected commands for targets without soft reset. | ||
2014 | * | ||
2015 | * The host_lock is acquired by caller. | ||
2016 | */ | ||
2017 | static int esp_reset(struct scsi_cmnd *SCptr) | ||
2018 | { | ||
2019 | struct esp *esp = (struct esp *) SCptr->device->host->hostdata; | ||
2020 | |||
2021 | spin_lock_irq(esp->ehost->host_lock); | ||
2022 | (void) esp_do_resetbus(esp); | ||
2023 | spin_unlock_irq(esp->ehost->host_lock); | ||
2024 | |||
2025 | wait_event(esp->reset_queue, (esp->resetting_bus == 0)); | ||
2026 | |||
2027 | return SUCCESS; | ||
2028 | } | ||
2029 | |||
2030 | /* Internal ESP done function. */ | ||
2031 | static void esp_done(struct esp *esp, int error) | ||
2032 | { | ||
2033 | struct scsi_cmnd *done_SC = esp->current_SC; | ||
2034 | |||
2035 | esp->current_SC = NULL; | ||
2036 | |||
2037 | esp_release_dmabufs(esp, done_SC); | ||
2038 | done_SC->result = error; | ||
2039 | |||
2040 | done_SC->scsi_done(done_SC); | ||
2041 | |||
2042 | /* Bus is free, issue any commands in the queue. */ | ||
2043 | if (esp->issue_SC && !esp->current_SC) | ||
2044 | esp_exec_cmd(esp); | ||
2045 | |||
2046 | } | ||
2047 | |||
2048 | /* Wheee, ESP interrupt engine. */ | ||
2049 | |||
2050 | /* Forward declarations. */ | ||
2051 | static int esp_do_phase_determine(struct esp *esp); | ||
2052 | static int esp_do_data_finale(struct esp *esp); | ||
2053 | static int esp_select_complete(struct esp *esp); | ||
2054 | static int esp_do_status(struct esp *esp); | ||
2055 | static int esp_do_msgin(struct esp *esp); | ||
2056 | static int esp_do_msgindone(struct esp *esp); | ||
2057 | static int esp_do_msgout(struct esp *esp); | ||
2058 | static int esp_do_cmdbegin(struct esp *esp); | ||
2059 | |||
2060 | #define sreg_datainp(__sreg) (((__sreg) & ESP_STAT_PMASK) == ESP_DIP) | ||
2061 | #define sreg_dataoutp(__sreg) (((__sreg) & ESP_STAT_PMASK) == ESP_DOP) | ||
2062 | |||
2063 | /* Read any bytes found in the FAS366 fifo, storing them into | ||
2064 | * the ESP driver software state structure. | ||
2065 | */ | ||
2066 | static void hme_fifo_read(struct esp *esp) | ||
2067 | { | ||
2068 | u8 count = 0; | ||
2069 | u8 status = esp->sreg; | ||
2070 | |||
2071 | /* Cannot safely frob the fifo for these following cases, but | ||
2072 | * we must always read the fifo when the reselect interrupt | ||
2073 | * is pending. | ||
2074 | */ | ||
2075 | if (((esp->ireg & ESP_INTR_RSEL) == 0) && | ||
2076 | (sreg_datainp(status) || | ||
2077 | sreg_dataoutp(status) || | ||
2078 | (esp->current_SC && | ||
2079 | esp->current_SC->SCp.phase == in_data_done))) { | ||
2080 | ESPHME(("<wkaround_skipped>")); | ||
2081 | } else { | ||
2082 | unsigned long fcnt = sbus_readb(esp->eregs + ESP_FFLAGS) & ESP_FF_FBYTES; | ||
2083 | |||
2084 | /* The HME stores bytes in multiples of 2 in the fifo. */ | ||
2085 | ESPHME(("hme_fifo[fcnt=%d", (int)fcnt)); | ||
2086 | while (fcnt) { | ||
2087 | esp->hme_fifo_workaround_buffer[count++] = | ||
2088 | sbus_readb(esp->eregs + ESP_FDATA); | ||
2089 | esp->hme_fifo_workaround_buffer[count++] = | ||
2090 | sbus_readb(esp->eregs + ESP_FDATA); | ||
2091 | ESPHME(("<%02x,%02x>", esp->hme_fifo_workaround_buffer[count-2], esp->hme_fifo_workaround_buffer[count-1])); | ||
2092 | fcnt--; | ||
2093 | } | ||
2094 | if (sbus_readb(esp->eregs + ESP_STATUS2) & ESP_STAT2_F1BYTE) { | ||
2095 | ESPHME(("<poke_byte>")); | ||
2096 | sbus_writeb(0, esp->eregs + ESP_FDATA); | ||
2097 | esp->hme_fifo_workaround_buffer[count++] = | ||
2098 | sbus_readb(esp->eregs + ESP_FDATA); | ||
2099 | ESPHME(("<%02x,0x00>", esp->hme_fifo_workaround_buffer[count-1])); | ||
2100 | ESPHME(("CMD_FLUSH")); | ||
2101 | esp_cmd(esp, ESP_CMD_FLUSH); | ||
2102 | } else { | ||
2103 | ESPHME(("no_xtra_byte")); | ||
2104 | } | ||
2105 | } | ||
2106 | ESPHME(("wkarnd_cnt=%d]", (int)count)); | ||
2107 | esp->hme_fifo_workaround_count = count; | ||
2108 | } | ||
2109 | |||
2110 | static inline void hme_fifo_push(struct esp *esp, u8 *bytes, u8 count) | ||
2111 | { | ||
2112 | esp_cmd(esp, ESP_CMD_FLUSH); | ||
2113 | while (count) { | ||
2114 | u8 tmp = *bytes++; | ||
2115 | sbus_writeb(tmp, esp->eregs + ESP_FDATA); | ||
2116 | sbus_writeb(0, esp->eregs + ESP_FDATA); | ||
2117 | count--; | ||
2118 | } | ||
2119 | } | ||
2120 | |||
2121 | /* We try to avoid some interrupts by jumping ahead and see if the ESP | ||
2122 | * has gotten far enough yet. Hence the following. | ||
2123 | */ | ||
2124 | static inline int skipahead1(struct esp *esp, struct scsi_cmnd *scp, | ||
2125 | int prev_phase, int new_phase) | ||
2126 | { | ||
2127 | if (scp->SCp.sent_command != prev_phase) | ||
2128 | return 0; | ||
2129 | if (ESP_IRQ_P(esp->dregs)) { | ||
2130 | /* Yes, we are able to save an interrupt. */ | ||
2131 | if (esp->erev == fashme) | ||
2132 | esp->sreg2 = sbus_readb(esp->eregs + ESP_STATUS2); | ||
2133 | esp->sreg = (sbus_readb(esp->eregs + ESP_STATUS) & ~(ESP_STAT_INTR)); | ||
2134 | esp->ireg = sbus_readb(esp->eregs + ESP_INTRPT); | ||
2135 | if (esp->erev == fashme) { | ||
2136 | /* This chip is really losing. */ | ||
2137 | ESPHME(("HME[")); | ||
2138 | /* Must latch fifo before reading the interrupt | ||
2139 | * register else garbage ends up in the FIFO | ||
2140 | * which confuses the driver utterly. | ||
2141 | * Happy Meal indeed.... | ||
2142 | */ | ||
2143 | ESPHME(("fifo_workaround]")); | ||
2144 | if (!(esp->sreg2 & ESP_STAT2_FEMPTY) || | ||
2145 | (esp->sreg2 & ESP_STAT2_F1BYTE)) | ||
2146 | hme_fifo_read(esp); | ||
2147 | } | ||
2148 | if (!(esp->ireg & ESP_INTR_SR)) | ||
2149 | return 0; | ||
2150 | else | ||
2151 | return do_reset_complete; | ||
2152 | } | ||
2153 | /* Ho hum, target is taking forever... */ | ||
2154 | scp->SCp.sent_command = new_phase; /* so we don't recurse... */ | ||
2155 | return do_intr_end; | ||
2156 | } | ||
2157 | |||
2158 | static inline int skipahead2(struct esp *esp, struct scsi_cmnd *scp, | ||
2159 | int prev_phase1, int prev_phase2, int new_phase) | ||
2160 | { | ||
2161 | if (scp->SCp.sent_command != prev_phase1 && | ||
2162 | scp->SCp.sent_command != prev_phase2) | ||
2163 | return 0; | ||
2164 | if (ESP_IRQ_P(esp->dregs)) { | ||
2165 | /* Yes, we are able to save an interrupt. */ | ||
2166 | if (esp->erev == fashme) | ||
2167 | esp->sreg2 = sbus_readb(esp->eregs + ESP_STATUS2); | ||
2168 | esp->sreg = (sbus_readb(esp->eregs + ESP_STATUS) & ~(ESP_STAT_INTR)); | ||
2169 | esp->ireg = sbus_readb(esp->eregs + ESP_INTRPT); | ||
2170 | if (esp->erev == fashme) { | ||
2171 | /* This chip is really losing. */ | ||
2172 | ESPHME(("HME[")); | ||
2173 | |||
2174 | /* Must latch fifo before reading the interrupt | ||
2175 | * register else garbage ends up in the FIFO | ||
2176 | * which confuses the driver utterly. | ||
2177 | * Happy Meal indeed.... | ||
2178 | */ | ||
2179 | ESPHME(("fifo_workaround]")); | ||
2180 | if (!(esp->sreg2 & ESP_STAT2_FEMPTY) || | ||
2181 | (esp->sreg2 & ESP_STAT2_F1BYTE)) | ||
2182 | hme_fifo_read(esp); | ||
2183 | } | ||
2184 | if (!(esp->ireg & ESP_INTR_SR)) | ||
2185 | return 0; | ||
2186 | else | ||
2187 | return do_reset_complete; | ||
2188 | } | ||
2189 | /* Ho hum, target is taking forever... */ | ||
2190 | scp->SCp.sent_command = new_phase; /* so we don't recurse... */ | ||
2191 | return do_intr_end; | ||
2192 | } | ||
2193 | |||
2194 | /* Now some dma helpers. */ | ||
2195 | static void dma_setup(struct esp *esp, __u32 addr, int count, int write) | ||
2196 | { | ||
2197 | u32 nreg = sbus_readl(esp->dregs + DMA_CSR); | ||
2198 | |||
2199 | if (write) | ||
2200 | nreg |= DMA_ST_WRITE; | ||
2201 | else | ||
2202 | nreg &= ~(DMA_ST_WRITE); | ||
2203 | nreg |= DMA_ENABLE; | ||
2204 | sbus_writel(nreg, esp->dregs + DMA_CSR); | ||
2205 | if (esp->dma->revision == dvmaesc1) { | ||
2206 | /* This ESC gate array sucks! */ | ||
2207 | __u32 src = addr; | ||
2208 | __u32 dest = src + count; | ||
2209 | |||
2210 | if (dest & (PAGE_SIZE - 1)) | ||
2211 | count = PAGE_ALIGN(count); | ||
2212 | sbus_writel(count, esp->dregs + DMA_COUNT); | ||
2213 | } | ||
2214 | sbus_writel(addr, esp->dregs + DMA_ADDR); | ||
2215 | } | ||
2216 | |||
2217 | static void dma_drain(struct esp *esp) | ||
2218 | { | ||
2219 | u32 tmp; | ||
2220 | |||
2221 | if (esp->dma->revision == dvmahme) | ||
2222 | return; | ||
2223 | if ((tmp = sbus_readl(esp->dregs + DMA_CSR)) & DMA_FIFO_ISDRAIN) { | ||
2224 | switch (esp->dma->revision) { | ||
2225 | default: | ||
2226 | tmp |= DMA_FIFO_STDRAIN; | ||
2227 | sbus_writel(tmp, esp->dregs + DMA_CSR); | ||
2228 | |||
2229 | case dvmarev3: | ||
2230 | case dvmaesc1: | ||
2231 | while (sbus_readl(esp->dregs + DMA_CSR) & DMA_FIFO_ISDRAIN) | ||
2232 | udelay(1); | ||
2233 | }; | ||
2234 | } | ||
2235 | } | ||
2236 | |||
2237 | static void dma_invalidate(struct esp *esp) | ||
2238 | { | ||
2239 | u32 tmp; | ||
2240 | |||
2241 | if (esp->dma->revision == dvmahme) { | ||
2242 | sbus_writel(DMA_RST_SCSI, esp->dregs + DMA_CSR); | ||
2243 | |||
2244 | esp->prev_hme_dmacsr = ((esp->prev_hme_dmacsr | | ||
2245 | (DMA_PARITY_OFF | DMA_2CLKS | | ||
2246 | DMA_SCSI_DISAB | DMA_INT_ENAB)) & | ||
2247 | ~(DMA_ST_WRITE | DMA_ENABLE)); | ||
2248 | |||
2249 | sbus_writel(0, esp->dregs + DMA_CSR); | ||
2250 | sbus_writel(esp->prev_hme_dmacsr, esp->dregs + DMA_CSR); | ||
2251 | |||
2252 | /* This is necessary to avoid having the SCSI channel | ||
2253 | * engine lock up on us. | ||
2254 | */ | ||
2255 | sbus_writel(0, esp->dregs + DMA_ADDR); | ||
2256 | } else { | ||
2257 | while ((tmp = sbus_readl(esp->dregs + DMA_CSR)) & DMA_PEND_READ) | ||
2258 | udelay(1); | ||
2259 | |||
2260 | tmp &= ~(DMA_ENABLE | DMA_ST_WRITE | DMA_BCNT_ENAB); | ||
2261 | tmp |= DMA_FIFO_INV; | ||
2262 | sbus_writel(tmp, esp->dregs + DMA_CSR); | ||
2263 | tmp &= ~DMA_FIFO_INV; | ||
2264 | sbus_writel(tmp, esp->dregs + DMA_CSR); | ||
2265 | } | ||
2266 | } | ||
2267 | |||
2268 | static inline void dma_flashclear(struct esp *esp) | ||
2269 | { | ||
2270 | dma_drain(esp); | ||
2271 | dma_invalidate(esp); | ||
2272 | } | ||
2273 | |||
2274 | static int dma_can_transfer(struct esp *esp, struct scsi_cmnd *sp) | ||
2275 | { | ||
2276 | __u32 base, end, sz; | ||
2277 | |||
2278 | if (esp->dma->revision == dvmarev3) { | ||
2279 | sz = sp->SCp.this_residual; | ||
2280 | if (sz > 0x1000000) | ||
2281 | sz = 0x1000000; | ||
2282 | } else { | ||
2283 | base = ((__u32)((unsigned long)sp->SCp.ptr)); | ||
2284 | base &= (0x1000000 - 1); | ||
2285 | end = (base + sp->SCp.this_residual); | ||
2286 | if (end > 0x1000000) | ||
2287 | end = 0x1000000; | ||
2288 | sz = (end - base); | ||
2289 | } | ||
2290 | return sz; | ||
2291 | } | ||
2292 | |||
2293 | /* Misc. esp helper macros. */ | ||
2294 | #define esp_setcount(__eregs, __cnt, __hme) \ | ||
2295 | sbus_writeb(((__cnt)&0xff), (__eregs) + ESP_TCLOW); \ | ||
2296 | sbus_writeb((((__cnt)>>8)&0xff), (__eregs) + ESP_TCMED); \ | ||
2297 | if (__hme) { \ | ||
2298 | sbus_writeb((((__cnt)>>16)&0xff), (__eregs) + FAS_RLO); \ | ||
2299 | sbus_writeb(0, (__eregs) + FAS_RHI); \ | ||
2300 | } | ||
2301 | |||
2302 | #define esp_getcount(__eregs, __hme) \ | ||
2303 | ((sbus_readb((__eregs) + ESP_TCLOW)&0xff) | \ | ||
2304 | ((sbus_readb((__eregs) + ESP_TCMED)&0xff) << 8) | \ | ||
2305 | ((__hme) ? sbus_readb((__eregs) + FAS_RLO) << 16 : 0)) | ||
2306 | |||
2307 | #define fcount(__esp) \ | ||
2308 | (((__esp)->erev == fashme) ? \ | ||
2309 | (__esp)->hme_fifo_workaround_count : \ | ||
2310 | sbus_readb(((__esp)->eregs) + ESP_FFLAGS) & ESP_FF_FBYTES) | ||
2311 | |||
2312 | #define fnzero(__esp) \ | ||
2313 | (((__esp)->erev == fashme) ? 0 : \ | ||
2314 | sbus_readb(((__esp)->eregs) + ESP_FFLAGS) & ESP_FF_ONOTZERO) | ||
2315 | |||
2316 | /* XXX speculative nops unnecessary when continuing amidst a data phase | ||
2317 | * XXX even on esp100!!! another case of flooding the bus with I/O reg | ||
2318 | * XXX writes... | ||
2319 | */ | ||
2320 | #define esp_maybe_nop(__esp) \ | ||
2321 | if ((__esp)->erev == esp100) \ | ||
2322 | esp_cmd((__esp), ESP_CMD_NULL) | ||
2323 | |||
2324 | #define sreg_to_dataphase(__sreg) \ | ||
2325 | ((((__sreg) & ESP_STAT_PMASK) == ESP_DOP) ? in_dataout : in_datain) | ||
2326 | |||
2327 | /* The ESP100 when in synchronous data phase, can mistake a long final | ||
2328 | * REQ pulse from the target as an extra byte, it places whatever is on | ||
2329 | * the data lines into the fifo. For now, we will assume when this | ||
2330 | * happens that the target is a bit quirky and we don't want to | ||
2331 | * be talking synchronously to it anyways. Regardless, we need to | ||
2332 | * tell the ESP to eat the extraneous byte so that we can proceed | ||
2333 | * to the next phase. | ||
2334 | */ | ||
2335 | static int esp100_sync_hwbug(struct esp *esp, struct scsi_cmnd *sp, int fifocnt) | ||
2336 | { | ||
2337 | /* Do not touch this piece of code. */ | ||
2338 | if ((!(esp->erev == esp100)) || | ||
2339 | (!(sreg_datainp((esp->sreg = sbus_readb(esp->eregs + ESP_STATUS))) && | ||
2340 | !fifocnt) && | ||
2341 | !(sreg_dataoutp(esp->sreg) && !fnzero(esp)))) { | ||
2342 | if (sp->SCp.phase == in_dataout) | ||
2343 | esp_cmd(esp, ESP_CMD_FLUSH); | ||
2344 | return 0; | ||
2345 | } else { | ||
2346 | /* Async mode for this guy. */ | ||
2347 | build_sync_nego_msg(esp, 0, 0); | ||
2348 | |||
2349 | /* Ack the bogus byte, but set ATN first. */ | ||
2350 | esp_cmd(esp, ESP_CMD_SATN); | ||
2351 | esp_cmd(esp, ESP_CMD_MOK); | ||
2352 | return 1; | ||
2353 | } | ||
2354 | } | ||
2355 | |||
2356 | /* This closes the window during a selection with a reselect pending, because | ||
2357 | * we use DMA for the selection process the FIFO should hold the correct | ||
2358 | * contents if we get reselected during this process. So we just need to | ||
2359 | * ack the possible illegal cmd interrupt pending on the esp100. | ||
2360 | */ | ||
2361 | static inline int esp100_reconnect_hwbug(struct esp *esp) | ||
2362 | { | ||
2363 | u8 tmp; | ||
2364 | |||
2365 | if (esp->erev != esp100) | ||
2366 | return 0; | ||
2367 | tmp = sbus_readb(esp->eregs + ESP_INTRPT); | ||
2368 | if (tmp & ESP_INTR_SR) | ||
2369 | return 1; | ||
2370 | return 0; | ||
2371 | } | ||
2372 | |||
2373 | /* This verifies the BUSID bits during a reselection so that we know which | ||
2374 | * target is talking to us. | ||
2375 | */ | ||
2376 | static inline int reconnect_target(struct esp *esp) | ||
2377 | { | ||
2378 | int it, me = esp->scsi_id_mask, targ = 0; | ||
2379 | |||
2380 | if (2 != fcount(esp)) | ||
2381 | return -1; | ||
2382 | if (esp->erev == fashme) { | ||
2383 | /* HME does not latch it's own BUS ID bits during | ||
2384 | * a reselection. Also the target number is given | ||
2385 | * as an unsigned char, not as a sole bit number | ||
2386 | * like the other ESP's do. | ||
2387 | * Happy Meal indeed.... | ||
2388 | */ | ||
2389 | targ = esp->hme_fifo_workaround_buffer[0]; | ||
2390 | } else { | ||
2391 | it = sbus_readb(esp->eregs + ESP_FDATA); | ||
2392 | if (!(it & me)) | ||
2393 | return -1; | ||
2394 | it &= ~me; | ||
2395 | if (it & (it - 1)) | ||
2396 | return -1; | ||
2397 | while (!(it & 1)) | ||
2398 | targ++, it >>= 1; | ||
2399 | } | ||
2400 | return targ; | ||
2401 | } | ||
2402 | |||
2403 | /* This verifies the identify from the target so that we know which lun is | ||
2404 | * being reconnected. | ||
2405 | */ | ||
2406 | static inline int reconnect_lun(struct esp *esp) | ||
2407 | { | ||
2408 | int lun; | ||
2409 | |||
2410 | if ((esp->sreg & ESP_STAT_PMASK) != ESP_MIP) | ||
2411 | return -1; | ||
2412 | if (esp->erev == fashme) | ||
2413 | lun = esp->hme_fifo_workaround_buffer[1]; | ||
2414 | else | ||
2415 | lun = sbus_readb(esp->eregs + ESP_FDATA); | ||
2416 | |||
2417 | /* Yes, you read this correctly. We report lun of zero | ||
2418 | * if we see parity error. ESP reports parity error for | ||
2419 | * the lun byte, and this is the only way to hope to recover | ||
2420 | * because the target is connected. | ||
2421 | */ | ||
2422 | if (esp->sreg & ESP_STAT_PERR) | ||
2423 | return 0; | ||
2424 | |||
2425 | /* Check for illegal bits being set in the lun. */ | ||
2426 | if ((lun & 0x40) || !(lun & 0x80)) | ||
2427 | return -1; | ||
2428 | |||
2429 | return lun & 7; | ||
2430 | } | ||
2431 | |||
2432 | /* This puts the driver in a state where it can revitalize a command that | ||
2433 | * is being continued due to reselection. | ||
2434 | */ | ||
2435 | static inline void esp_connect(struct esp *esp, struct scsi_cmnd *sp) | ||
2436 | { | ||
2437 | struct esp_device *esp_dev = sp->device->hostdata; | ||
2438 | |||
2439 | if (esp->prev_soff != esp_dev->sync_max_offset || | ||
2440 | esp->prev_stp != esp_dev->sync_min_period || | ||
2441 | (esp->erev > esp100a && | ||
2442 | esp->prev_cfg3 != esp->config3[sp->device->id])) { | ||
2443 | esp->prev_soff = esp_dev->sync_max_offset; | ||
2444 | esp->prev_stp = esp_dev->sync_min_period; | ||
2445 | sbus_writeb(esp->prev_soff, esp->eregs + ESP_SOFF); | ||
2446 | sbus_writeb(esp->prev_stp, esp->eregs + ESP_STP); | ||
2447 | if (esp->erev > esp100a) { | ||
2448 | esp->prev_cfg3 = esp->config3[sp->device->id]; | ||
2449 | sbus_writeb(esp->prev_cfg3, esp->eregs + ESP_CFG3); | ||
2450 | } | ||
2451 | } | ||
2452 | esp->current_SC = sp; | ||
2453 | } | ||
2454 | |||
2455 | /* This will place the current working command back into the issue queue | ||
2456 | * if we are to receive a reselection amidst a selection attempt. | ||
2457 | */ | ||
2458 | static inline void esp_reconnect(struct esp *esp, struct scsi_cmnd *sp) | ||
2459 | { | ||
2460 | if (!esp->disconnected_SC) | ||
2461 | ESPLOG(("esp%d: Weird, being reselected but disconnected " | ||
2462 | "command queue is empty.\n", esp->esp_id)); | ||
2463 | esp->snip = 0; | ||
2464 | esp->current_SC = NULL; | ||
2465 | sp->SCp.phase = not_issued; | ||
2466 | append_SC(&esp->issue_SC, sp); | ||
2467 | } | ||
2468 | |||
2469 | /* Begin message in phase. */ | ||
2470 | static int esp_do_msgin(struct esp *esp) | ||
2471 | { | ||
2472 | /* Must be very careful with the fifo on the HME */ | ||
2473 | if ((esp->erev != fashme) || | ||
2474 | !(sbus_readb(esp->eregs + ESP_STATUS2) & ESP_STAT2_FEMPTY)) | ||
2475 | esp_cmd(esp, ESP_CMD_FLUSH); | ||
2476 | esp_maybe_nop(esp); | ||
2477 | esp_cmd(esp, ESP_CMD_TI); | ||
2478 | esp->msgin_len = 1; | ||
2479 | esp->msgin_ctr = 0; | ||
2480 | esp_advance_phase(esp->current_SC, in_msgindone); | ||
2481 | return do_work_bus; | ||
2482 | } | ||
2483 | |||
2484 | /* This uses various DMA csr fields and the fifo flags count value to | ||
2485 | * determine how many bytes were successfully sent/received by the ESP. | ||
2486 | */ | ||
2487 | static inline int esp_bytes_sent(struct esp *esp, int fifo_count) | ||
2488 | { | ||
2489 | int rval = sbus_readl(esp->dregs + DMA_ADDR) - esp->esp_command_dvma; | ||
2490 | |||
2491 | if (esp->dma->revision == dvmarev1) | ||
2492 | rval -= (4 - ((sbus_readl(esp->dregs + DMA_CSR) & DMA_READ_AHEAD)>>11)); | ||
2493 | return rval - fifo_count; | ||
2494 | } | ||
2495 | |||
2496 | static inline void advance_sg(struct scsi_cmnd *sp) | ||
2497 | { | ||
2498 | ++sp->SCp.buffer; | ||
2499 | --sp->SCp.buffers_residual; | ||
2500 | sp->SCp.this_residual = sg_dma_len(sp->SCp.buffer); | ||
2501 | sp->SCp.ptr = (char *)((unsigned long)sg_dma_address(sp->SCp.buffer)); | ||
2502 | } | ||
2503 | |||
2504 | /* Please note that the way I've coded these routines is that I _always_ | ||
2505 | * check for a disconnect during any and all information transfer | ||
2506 | * phases. The SCSI standard states that the target _can_ cause a BUS | ||
2507 | * FREE condition by dropping all MSG/CD/IO/BSY signals. Also note | ||
2508 | * that during information transfer phases the target controls every | ||
2509 | * change in phase, the only thing the initiator can do is "ask" for | ||
2510 | * a message out phase by driving ATN true. The target can, and sometimes | ||
2511 | * will, completely ignore this request so we cannot assume anything when | ||
2512 | * we try to force a message out phase to abort/reset a target. Most of | ||
2513 | * the time the target will eventually be nice and go to message out, so | ||
2514 | * we may have to hold on to our state about what we want to tell the target | ||
2515 | * for some period of time. | ||
2516 | */ | ||
2517 | |||
2518 | /* I think I have things working here correctly. Even partial transfers | ||
2519 | * within a buffer or sub-buffer should not upset us at all no matter | ||
2520 | * how bad the target and/or ESP fucks things up. | ||
2521 | */ | ||
2522 | static int esp_do_data(struct esp *esp) | ||
2523 | { | ||
2524 | struct scsi_cmnd *SCptr = esp->current_SC; | ||
2525 | int thisphase, hmuch; | ||
2526 | |||
2527 | ESPDATA(("esp_do_data: ")); | ||
2528 | esp_maybe_nop(esp); | ||
2529 | thisphase = sreg_to_dataphase(esp->sreg); | ||
2530 | esp_advance_phase(SCptr, thisphase); | ||
2531 | ESPDATA(("newphase<%s> ", (thisphase == in_datain) ? "DATAIN" : "DATAOUT")); | ||
2532 | hmuch = dma_can_transfer(esp, SCptr); | ||
2533 | if (hmuch > (64 * 1024) && (esp->erev != fashme)) | ||
2534 | hmuch = (64 * 1024); | ||
2535 | ESPDATA(("hmuch<%d> ", hmuch)); | ||
2536 | esp->current_transfer_size = hmuch; | ||
2537 | |||
2538 | if (esp->erev == fashme) { | ||
2539 | u32 tmp = esp->prev_hme_dmacsr; | ||
2540 | |||
2541 | /* Always set the ESP count registers first. */ | ||
2542 | esp_setcount(esp->eregs, hmuch, 1); | ||
2543 | |||
2544 | /* Get the DMA csr computed. */ | ||
2545 | tmp |= (DMA_SCSI_DISAB | DMA_ENABLE); | ||
2546 | if (thisphase == in_datain) | ||
2547 | tmp |= DMA_ST_WRITE; | ||
2548 | else | ||
2549 | tmp &= ~(DMA_ST_WRITE); | ||
2550 | esp->prev_hme_dmacsr = tmp; | ||
2551 | |||
2552 | ESPDATA(("DMA|TI --> do_intr_end\n")); | ||
2553 | if (thisphase == in_datain) { | ||
2554 | sbus_writel(hmuch, esp->dregs + DMA_COUNT); | ||
2555 | esp_cmd(esp, ESP_CMD_DMA | ESP_CMD_TI); | ||
2556 | } else { | ||
2557 | esp_cmd(esp, ESP_CMD_DMA | ESP_CMD_TI); | ||
2558 | sbus_writel(hmuch, esp->dregs + DMA_COUNT); | ||
2559 | } | ||
2560 | sbus_writel((__u32)((unsigned long)SCptr->SCp.ptr), esp->dregs+DMA_ADDR); | ||
2561 | sbus_writel(esp->prev_hme_dmacsr, esp->dregs + DMA_CSR); | ||
2562 | } else { | ||
2563 | esp_setcount(esp->eregs, hmuch, 0); | ||
2564 | dma_setup(esp, ((__u32)((unsigned long)SCptr->SCp.ptr)), | ||
2565 | hmuch, (thisphase == in_datain)); | ||
2566 | ESPDATA(("DMA|TI --> do_intr_end\n")); | ||
2567 | esp_cmd(esp, ESP_CMD_DMA | ESP_CMD_TI); | ||
2568 | } | ||
2569 | return do_intr_end; | ||
2570 | } | ||
2571 | |||
2572 | /* See how successful the data transfer was. */ | ||
2573 | static int esp_do_data_finale(struct esp *esp) | ||
2574 | { | ||
2575 | struct scsi_cmnd *SCptr = esp->current_SC; | ||
2576 | struct esp_device *esp_dev = SCptr->device->hostdata; | ||
2577 | int bogus_data = 0, bytes_sent = 0, fifocnt, ecount = 0; | ||
2578 | |||
2579 | ESPDATA(("esp_do_data_finale: ")); | ||
2580 | |||
2581 | if (SCptr->SCp.phase == in_datain) { | ||
2582 | if (esp->sreg & ESP_STAT_PERR) { | ||
2583 | /* Yuck, parity error. The ESP asserts ATN | ||
2584 | * so that we can go to message out phase | ||
2585 | * immediately and inform the target that | ||
2586 | * something bad happened. | ||
2587 | */ | ||
2588 | ESPLOG(("esp%d: data bad parity detected.\n", | ||
2589 | esp->esp_id)); | ||
2590 | esp->cur_msgout[0] = INITIATOR_ERROR; | ||
2591 | esp->msgout_len = 1; | ||
2592 | } | ||
2593 | dma_drain(esp); | ||
2594 | } | ||
2595 | dma_invalidate(esp); | ||
2596 | |||
2597 | /* This could happen for the above parity error case. */ | ||
2598 | if (esp->ireg != ESP_INTR_BSERV) { | ||
2599 | /* Please go to msgout phase, please please please... */ | ||
2600 | ESPLOG(("esp%d: !BSERV after data, probably to msgout\n", | ||
2601 | esp->esp_id)); | ||
2602 | return esp_do_phase_determine(esp); | ||
2603 | } | ||
2604 | |||
2605 | /* Check for partial transfers and other horrible events. | ||
2606 | * Note, here we read the real fifo flags register even | ||
2607 | * on HME broken adapters because we skip the HME fifo | ||
2608 | * workaround code in esp_handle() if we are doing data | ||
2609 | * phase things. We don't want to fuck directly with | ||
2610 | * the fifo like that, especially if doing synchronous | ||
2611 | * transfers! Also, will need to double the count on | ||
2612 | * HME if we are doing wide transfers, as the HME fifo | ||
2613 | * will move and count 16-bit quantities during wide data. | ||
2614 | * SMCC _and_ Qlogic can both bite me. | ||
2615 | */ | ||
2616 | fifocnt = (sbus_readb(esp->eregs + ESP_FFLAGS) & ESP_FF_FBYTES); | ||
2617 | if (esp->erev != fashme) | ||
2618 | ecount = esp_getcount(esp->eregs, 0); | ||
2619 | bytes_sent = esp->current_transfer_size; | ||
2620 | |||
2621 | ESPDATA(("trans_sz(%d), ", bytes_sent)); | ||
2622 | if (esp->erev == fashme) { | ||
2623 | if (!(esp->sreg & ESP_STAT_TCNT)) { | ||
2624 | ecount = esp_getcount(esp->eregs, 1); | ||
2625 | bytes_sent -= ecount; | ||
2626 | } | ||
2627 | |||
2628 | /* Always subtract any cruft remaining in the FIFO. */ | ||
2629 | if (esp->prev_cfg3 & ESP_CONFIG3_EWIDE) | ||
2630 | fifocnt <<= 1; | ||
2631 | if (SCptr->SCp.phase == in_dataout) | ||
2632 | bytes_sent -= fifocnt; | ||
2633 | |||
2634 | /* I have an IBM disk which exhibits the following | ||
2635 | * behavior during writes to it. It disconnects in | ||
2636 | * the middle of a partial transfer, the current sglist | ||
2637 | * buffer is 1024 bytes, the disk stops data transfer | ||
2638 | * at 512 bytes. | ||
2639 | * | ||
2640 | * However the FAS366 reports that 32 more bytes were | ||
2641 | * transferred than really were. This is precisely | ||
2642 | * the size of a fully loaded FIFO in wide scsi mode. | ||
2643 | * The FIFO state recorded indicates that it is empty. | ||
2644 | * | ||
2645 | * I have no idea if this is a bug in the FAS366 chip | ||
2646 | * or a bug in the firmware on this IBM disk. In any | ||
2647 | * event the following seems to be a good workaround. -DaveM | ||
2648 | */ | ||
2649 | if (bytes_sent != esp->current_transfer_size && | ||
2650 | SCptr->SCp.phase == in_dataout) { | ||
2651 | int mask = (64 - 1); | ||
2652 | |||
2653 | if ((esp->prev_cfg3 & ESP_CONFIG3_EWIDE) == 0) | ||
2654 | mask >>= 1; | ||
2655 | |||
2656 | if (bytes_sent & mask) | ||
2657 | bytes_sent -= (bytes_sent & mask); | ||
2658 | } | ||
2659 | } else { | ||
2660 | if (!(esp->sreg & ESP_STAT_TCNT)) | ||
2661 | bytes_sent -= ecount; | ||
2662 | if (SCptr->SCp.phase == in_dataout) | ||
2663 | bytes_sent -= fifocnt; | ||
2664 | } | ||
2665 | |||
2666 | ESPDATA(("bytes_sent(%d), ", bytes_sent)); | ||
2667 | |||
2668 | /* If we were in synchronous mode, check for peculiarities. */ | ||
2669 | if (esp->erev == fashme) { | ||
2670 | if (esp_dev->sync_max_offset) { | ||
2671 | if (SCptr->SCp.phase == in_dataout) | ||
2672 | esp_cmd(esp, ESP_CMD_FLUSH); | ||
2673 | } else { | ||
2674 | esp_cmd(esp, ESP_CMD_FLUSH); | ||
2675 | } | ||
2676 | } else { | ||
2677 | if (esp_dev->sync_max_offset) | ||
2678 | bogus_data = esp100_sync_hwbug(esp, SCptr, fifocnt); | ||
2679 | else | ||
2680 | esp_cmd(esp, ESP_CMD_FLUSH); | ||
2681 | } | ||
2682 | |||
2683 | /* Until we are sure of what has happened, we are certainly | ||
2684 | * in the dark. | ||
2685 | */ | ||
2686 | esp_advance_phase(SCptr, in_the_dark); | ||
2687 | |||
2688 | if (bytes_sent < 0) { | ||
2689 | /* I've seen this happen due to lost state in this | ||
2690 | * driver. No idea why it happened, but allowing | ||
2691 | * this value to be negative caused things to | ||
2692 | * lock up. This allows greater chance of recovery. | ||
2693 | * In fact every time I've seen this, it has been | ||
2694 | * a driver bug without question. | ||
2695 | */ | ||
2696 | ESPLOG(("esp%d: yieee, bytes_sent < 0!\n", esp->esp_id)); | ||
2697 | ESPLOG(("esp%d: csz=%d fifocount=%d ecount=%d\n", | ||
2698 | esp->esp_id, | ||
2699 | esp->current_transfer_size, fifocnt, ecount)); | ||
2700 | ESPLOG(("esp%d: use_sg=%d ptr=%p this_residual=%d\n", | ||
2701 | esp->esp_id, | ||
2702 | SCptr->use_sg, SCptr->SCp.ptr, SCptr->SCp.this_residual)); | ||
2703 | ESPLOG(("esp%d: Forcing async for target %d\n", esp->esp_id, | ||
2704 | SCptr->device->id)); | ||
2705 | SCptr->device->borken = 1; | ||
2706 | esp_dev->sync = 0; | ||
2707 | bytes_sent = 0; | ||
2708 | } | ||
2709 | |||
2710 | /* Update the state of our transfer. */ | ||
2711 | SCptr->SCp.ptr += bytes_sent; | ||
2712 | SCptr->SCp.this_residual -= bytes_sent; | ||
2713 | if (SCptr->SCp.this_residual < 0) { | ||
2714 | /* shit */ | ||
2715 | ESPLOG(("esp%d: Data transfer overrun.\n", esp->esp_id)); | ||
2716 | SCptr->SCp.this_residual = 0; | ||
2717 | } | ||
2718 | |||
2719 | /* Maybe continue. */ | ||
2720 | if (!bogus_data) { | ||
2721 | ESPDATA(("!bogus_data, ")); | ||
2722 | |||
2723 | /* NO MATTER WHAT, we advance the scatterlist, | ||
2724 | * if the target should decide to disconnect | ||
2725 | * in between scatter chunks (which is common) | ||
2726 | * we could die horribly! I used to have the sg | ||
2727 | * advance occur only if we are going back into | ||
2728 | * (or are staying in) a data phase, you can | ||
2729 | * imagine the hell I went through trying to | ||
2730 | * figure this out. | ||
2731 | */ | ||
2732 | if (SCptr->use_sg && !SCptr->SCp.this_residual) | ||
2733 | advance_sg(SCptr); | ||
2734 | if (sreg_datainp(esp->sreg) || sreg_dataoutp(esp->sreg)) { | ||
2735 | ESPDATA(("to more data\n")); | ||
2736 | return esp_do_data(esp); | ||
2737 | } | ||
2738 | ESPDATA(("to new phase\n")); | ||
2739 | return esp_do_phase_determine(esp); | ||
2740 | } | ||
2741 | /* Bogus data, just wait for next interrupt. */ | ||
2742 | ESPLOG(("esp%d: bogus_data during end of data phase\n", | ||
2743 | esp->esp_id)); | ||
2744 | return do_intr_end; | ||
2745 | } | ||
2746 | |||
2747 | /* We received a non-good status return at the end of | ||
2748 | * running a SCSI command. This is used to decide if | ||
2749 | * we should clear our synchronous transfer state for | ||
2750 | * such a device when that happens. | ||
2751 | * | ||
2752 | * The idea is that when spinning up a disk or rewinding | ||
2753 | * a tape, we don't want to go into a loop re-negotiating | ||
2754 | * synchronous capabilities over and over. | ||
2755 | */ | ||
2756 | static int esp_should_clear_sync(struct scsi_cmnd *sp) | ||
2757 | { | ||
2758 | u8 cmd = sp->cmnd[0]; | ||
2759 | |||
2760 | /* These cases are for spinning up a disk and | ||
2761 | * waiting for that spinup to complete. | ||
2762 | */ | ||
2763 | if (cmd == START_STOP) | ||
2764 | return 0; | ||
2765 | |||
2766 | if (cmd == TEST_UNIT_READY) | ||
2767 | return 0; | ||
2768 | |||
2769 | /* One more special case for SCSI tape drives, | ||
2770 | * this is what is used to probe the device for | ||
2771 | * completion of a rewind or tape load operation. | ||
2772 | */ | ||
2773 | if (sp->device->type == TYPE_TAPE) { | ||
2774 | if (cmd == MODE_SENSE) | ||
2775 | return 0; | ||
2776 | } | ||
2777 | |||
2778 | return 1; | ||
2779 | } | ||
2780 | |||
2781 | /* Either a command is completing or a target is dropping off the bus | ||
2782 | * to continue the command in the background so we can do other work. | ||
2783 | */ | ||
2784 | static int esp_do_freebus(struct esp *esp) | ||
2785 | { | ||
2786 | struct scsi_cmnd *SCptr = esp->current_SC; | ||
2787 | struct esp_device *esp_dev = SCptr->device->hostdata; | ||
2788 | int rval; | ||
2789 | |||
2790 | rval = skipahead2(esp, SCptr, in_status, in_msgindone, in_freeing); | ||
2791 | if (rval) | ||
2792 | return rval; | ||
2793 | if (esp->ireg != ESP_INTR_DC) { | ||
2794 | ESPLOG(("esp%d: Target will not disconnect\n", esp->esp_id)); | ||
2795 | return do_reset_bus; /* target will not drop BSY... */ | ||
2796 | } | ||
2797 | esp->msgout_len = 0; | ||
2798 | esp->prevmsgout = NOP; | ||
2799 | if (esp->prevmsgin == COMMAND_COMPLETE) { | ||
2800 | /* Normal end of nexus. */ | ||
2801 | if (esp->disconnected_SC || (esp->erev == fashme)) | ||
2802 | esp_cmd(esp, ESP_CMD_ESEL); | ||
2803 | |||
2804 | if (SCptr->SCp.Status != GOOD && | ||
2805 | SCptr->SCp.Status != CONDITION_GOOD && | ||
2806 | ((1<<SCptr->device->id) & esp->targets_present) && | ||
2807 | esp_dev->sync && | ||
2808 | esp_dev->sync_max_offset) { | ||
2809 | /* SCSI standard says that the synchronous capabilities | ||
2810 | * should be renegotiated at this point. Most likely | ||
2811 | * we are about to request sense from this target | ||
2812 | * in which case we want to avoid using sync | ||
2813 | * transfers until we are sure of the current target | ||
2814 | * state. | ||
2815 | */ | ||
2816 | ESPMISC(("esp: Status <%d> for target %d lun %d\n", | ||
2817 | SCptr->SCp.Status, SCptr->device->id, SCptr->device->lun)); | ||
2818 | |||
2819 | /* But don't do this when spinning up a disk at | ||
2820 | * boot time while we poll for completion as it | ||
2821 | * fills up the console with messages. Also, tapes | ||
2822 | * can report not ready many times right after | ||
2823 | * loading up a tape. | ||
2824 | */ | ||
2825 | if (esp_should_clear_sync(SCptr) != 0) | ||
2826 | esp_dev->sync = 0; | ||
2827 | } | ||
2828 | ESPDISC(("F<%02x,%02x>", SCptr->device->id, SCptr->device->lun)); | ||
2829 | esp_done(esp, ((SCptr->SCp.Status & 0xff) | | ||
2830 | ((SCptr->SCp.Message & 0xff)<<8) | | ||
2831 | (DID_OK << 16))); | ||
2832 | } else if (esp->prevmsgin == DISCONNECT) { | ||
2833 | /* Normal disconnect. */ | ||
2834 | esp_cmd(esp, ESP_CMD_ESEL); | ||
2835 | ESPDISC(("D<%02x,%02x>", SCptr->device->id, SCptr->device->lun)); | ||
2836 | append_SC(&esp->disconnected_SC, SCptr); | ||
2837 | esp->current_SC = NULL; | ||
2838 | if (esp->issue_SC) | ||
2839 | esp_exec_cmd(esp); | ||
2840 | } else { | ||
2841 | /* Driver bug, we do not expect a disconnect here | ||
2842 | * and should not have advanced the state engine | ||
2843 | * to in_freeing. | ||
2844 | */ | ||
2845 | ESPLOG(("esp%d: last msg not disc and not cmd cmplt.\n", | ||
2846 | esp->esp_id)); | ||
2847 | return do_reset_bus; | ||
2848 | } | ||
2849 | return do_intr_end; | ||
2850 | } | ||
2851 | |||
2852 | /* When a reselect occurs, and we cannot find the command to | ||
2853 | * reconnect to in our queues, we do this. | ||
2854 | */ | ||
2855 | static int esp_bad_reconnect(struct esp *esp) | ||
2856 | { | ||
2857 | struct scsi_cmnd *sp; | ||
2858 | |||
2859 | ESPLOG(("esp%d: Eieeee, reconnecting unknown command!\n", | ||
2860 | esp->esp_id)); | ||
2861 | ESPLOG(("QUEUE DUMP\n")); | ||
2862 | sp = esp->issue_SC; | ||
2863 | ESPLOG(("esp%d: issue_SC[", esp->esp_id)); | ||
2864 | while (sp) { | ||
2865 | ESPLOG(("<%02x,%02x>", sp->device->id, sp->device->lun)); | ||
2866 | sp = (struct scsi_cmnd *) sp->host_scribble; | ||
2867 | } | ||
2868 | ESPLOG(("]\n")); | ||
2869 | sp = esp->current_SC; | ||
2870 | ESPLOG(("esp%d: current_SC[", esp->esp_id)); | ||
2871 | if (sp) | ||
2872 | ESPLOG(("<%02x,%02x>", sp->device->id, sp->device->lun)); | ||
2873 | else | ||
2874 | ESPLOG(("<NULL>")); | ||
2875 | ESPLOG(("]\n")); | ||
2876 | sp = esp->disconnected_SC; | ||
2877 | ESPLOG(("esp%d: disconnected_SC[", esp->esp_id)); | ||
2878 | while (sp) { | ||
2879 | ESPLOG(("<%02x,%02x>", sp->device->id, sp->device->lun)); | ||
2880 | sp = (struct scsi_cmnd *) sp->host_scribble; | ||
2881 | } | ||
2882 | ESPLOG(("]\n")); | ||
2883 | return do_reset_bus; | ||
2884 | } | ||
2885 | |||
2886 | /* Do the needy when a target tries to reconnect to us. */ | ||
2887 | static int esp_do_reconnect(struct esp *esp) | ||
2888 | { | ||
2889 | int lun, target; | ||
2890 | struct scsi_cmnd *SCptr; | ||
2891 | |||
2892 | /* Check for all bogus conditions first. */ | ||
2893 | target = reconnect_target(esp); | ||
2894 | if (target < 0) { | ||
2895 | ESPDISC(("bad bus bits\n")); | ||
2896 | return do_reset_bus; | ||
2897 | } | ||
2898 | lun = reconnect_lun(esp); | ||
2899 | if (lun < 0) { | ||
2900 | ESPDISC(("target=%2x, bad identify msg\n", target)); | ||
2901 | return do_reset_bus; | ||
2902 | } | ||
2903 | |||
2904 | /* Things look ok... */ | ||
2905 | ESPDISC(("R<%02x,%02x>", target, lun)); | ||
2906 | |||
2907 | /* Must not flush FIFO or DVMA on HME. */ | ||
2908 | if (esp->erev != fashme) { | ||
2909 | esp_cmd(esp, ESP_CMD_FLUSH); | ||
2910 | if (esp100_reconnect_hwbug(esp)) | ||
2911 | return do_reset_bus; | ||
2912 | esp_cmd(esp, ESP_CMD_NULL); | ||
2913 | } | ||
2914 | |||
2915 | SCptr = remove_SC(&esp->disconnected_SC, (u8) target, (u8) lun); | ||
2916 | if (!SCptr) | ||
2917 | return esp_bad_reconnect(esp); | ||
2918 | |||
2919 | esp_connect(esp, SCptr); | ||
2920 | esp_cmd(esp, ESP_CMD_MOK); | ||
2921 | |||
2922 | if (esp->erev == fashme) | ||
2923 | sbus_writeb(((SCptr->device->id & 0xf) | | ||
2924 | (ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT)), | ||
2925 | esp->eregs + ESP_BUSID); | ||
2926 | |||
2927 | /* Reconnect implies a restore pointers operation. */ | ||
2928 | esp_restore_pointers(esp, SCptr); | ||
2929 | |||
2930 | esp->snip = 0; | ||
2931 | esp_advance_phase(SCptr, in_the_dark); | ||
2932 | return do_intr_end; | ||
2933 | } | ||
2934 | |||
2935 | /* End of NEXUS (hopefully), pick up status + message byte then leave if | ||
2936 | * all goes well. | ||
2937 | */ | ||
2938 | static int esp_do_status(struct esp *esp) | ||
2939 | { | ||
2940 | struct scsi_cmnd *SCptr = esp->current_SC; | ||
2941 | int intr, rval; | ||
2942 | |||
2943 | rval = skipahead1(esp, SCptr, in_the_dark, in_status); | ||
2944 | if (rval) | ||
2945 | return rval; | ||
2946 | intr = esp->ireg; | ||
2947 | ESPSTAT(("esp_do_status: ")); | ||
2948 | if (intr != ESP_INTR_DC) { | ||
2949 | int message_out = 0; /* for parity problems */ | ||
2950 | |||
2951 | /* Ack the message. */ | ||
2952 | ESPSTAT(("ack msg, ")); | ||
2953 | esp_cmd(esp, ESP_CMD_MOK); | ||
2954 | |||
2955 | if (esp->erev != fashme) { | ||
2956 | dma_flashclear(esp); | ||
2957 | |||
2958 | /* Wait till the first bits settle. */ | ||
2959 | while (esp->esp_command[0] == 0xff) | ||
2960 | udelay(1); | ||
2961 | } else { | ||
2962 | esp->esp_command[0] = esp->hme_fifo_workaround_buffer[0]; | ||
2963 | esp->esp_command[1] = esp->hme_fifo_workaround_buffer[1]; | ||
2964 | } | ||
2965 | |||
2966 | ESPSTAT(("got something, ")); | ||
2967 | /* ESP chimes in with one of | ||
2968 | * | ||
2969 | * 1) function done interrupt: | ||
2970 | * both status and message in bytes | ||
2971 | * are available | ||
2972 | * | ||
2973 | * 2) bus service interrupt: | ||
2974 | * only status byte was acquired | ||
2975 | * | ||
2976 | * 3) Anything else: | ||
2977 | * can't happen, but we test for it | ||
2978 | * anyways | ||
2979 | * | ||
2980 | * ALSO: If bad parity was detected on either | ||
2981 | * the status _or_ the message byte then | ||
2982 | * the ESP has asserted ATN on the bus | ||
2983 | * and we must therefore wait for the | ||
2984 | * next phase change. | ||
2985 | */ | ||
2986 | if (intr & ESP_INTR_FDONE) { | ||
2987 | /* We got it all, hallejulia. */ | ||
2988 | ESPSTAT(("got both, ")); | ||
2989 | SCptr->SCp.Status = esp->esp_command[0]; | ||
2990 | SCptr->SCp.Message = esp->esp_command[1]; | ||
2991 | esp->prevmsgin = SCptr->SCp.Message; | ||
2992 | esp->cur_msgin[0] = SCptr->SCp.Message; | ||
2993 | if (esp->sreg & ESP_STAT_PERR) { | ||
2994 | /* There was bad parity for the | ||
2995 | * message byte, the status byte | ||
2996 | * was ok. | ||
2997 | */ | ||
2998 | message_out = MSG_PARITY_ERROR; | ||
2999 | } | ||
3000 | } else if (intr == ESP_INTR_BSERV) { | ||
3001 | /* Only got status byte. */ | ||
3002 | ESPLOG(("esp%d: got status only, ", esp->esp_id)); | ||
3003 | if (!(esp->sreg & ESP_STAT_PERR)) { | ||
3004 | SCptr->SCp.Status = esp->esp_command[0]; | ||
3005 | SCptr->SCp.Message = 0xff; | ||
3006 | } else { | ||
3007 | /* The status byte had bad parity. | ||
3008 | * we leave the scsi_pointer Status | ||
3009 | * field alone as we set it to a default | ||
3010 | * of CHECK_CONDITION in esp_queue. | ||
3011 | */ | ||
3012 | message_out = INITIATOR_ERROR; | ||
3013 | } | ||
3014 | } else { | ||
3015 | /* This shouldn't happen ever. */ | ||
3016 | ESPSTAT(("got bolixed\n")); | ||
3017 | esp_advance_phase(SCptr, in_the_dark); | ||
3018 | return esp_do_phase_determine(esp); | ||
3019 | } | ||
3020 | |||
3021 | if (!message_out) { | ||
3022 | ESPSTAT(("status=%2x msg=%2x, ", SCptr->SCp.Status, | ||
3023 | SCptr->SCp.Message)); | ||
3024 | if (SCptr->SCp.Message == COMMAND_COMPLETE) { | ||
3025 | ESPSTAT(("and was COMMAND_COMPLETE\n")); | ||
3026 | esp_advance_phase(SCptr, in_freeing); | ||
3027 | return esp_do_freebus(esp); | ||
3028 | } else { | ||
3029 | ESPLOG(("esp%d: and _not_ COMMAND_COMPLETE\n", | ||
3030 | esp->esp_id)); | ||
3031 | esp->msgin_len = esp->msgin_ctr = 1; | ||
3032 | esp_advance_phase(SCptr, in_msgindone); | ||
3033 | return esp_do_msgindone(esp); | ||
3034 | } | ||
3035 | } else { | ||
3036 | /* With luck we'll be able to let the target | ||
3037 | * know that bad parity happened, it will know | ||
3038 | * which byte caused the problems and send it | ||
3039 | * again. For the case where the status byte | ||
3040 | * receives bad parity, I do not believe most | ||
3041 | * targets recover very well. We'll see. | ||
3042 | */ | ||
3043 | ESPLOG(("esp%d: bad parity somewhere mout=%2x\n", | ||
3044 | esp->esp_id, message_out)); | ||
3045 | esp->cur_msgout[0] = message_out; | ||
3046 | esp->msgout_len = esp->msgout_ctr = 1; | ||
3047 | esp_advance_phase(SCptr, in_the_dark); | ||
3048 | return esp_do_phase_determine(esp); | ||
3049 | } | ||
3050 | } else { | ||
3051 | /* If we disconnect now, all hell breaks loose. */ | ||
3052 | ESPLOG(("esp%d: whoops, disconnect\n", esp->esp_id)); | ||
3053 | esp_advance_phase(SCptr, in_the_dark); | ||
3054 | return esp_do_phase_determine(esp); | ||
3055 | } | ||
3056 | } | ||
3057 | |||
3058 | static int esp_enter_status(struct esp *esp) | ||
3059 | { | ||
3060 | u8 thecmd = ESP_CMD_ICCSEQ; | ||
3061 | |||
3062 | esp_cmd(esp, ESP_CMD_FLUSH); | ||
3063 | if (esp->erev != fashme) { | ||
3064 | u32 tmp; | ||
3065 | |||
3066 | esp->esp_command[0] = esp->esp_command[1] = 0xff; | ||
3067 | sbus_writeb(2, esp->eregs + ESP_TCLOW); | ||
3068 | sbus_writeb(0, esp->eregs + ESP_TCMED); | ||
3069 | tmp = sbus_readl(esp->dregs + DMA_CSR); | ||
3070 | tmp |= (DMA_ST_WRITE | DMA_ENABLE); | ||
3071 | sbus_writel(tmp, esp->dregs + DMA_CSR); | ||
3072 | if (esp->dma->revision == dvmaesc1) | ||
3073 | sbus_writel(0x100, esp->dregs + DMA_COUNT); | ||
3074 | sbus_writel(esp->esp_command_dvma, esp->dregs + DMA_ADDR); | ||
3075 | thecmd |= ESP_CMD_DMA; | ||
3076 | } | ||
3077 | esp_cmd(esp, thecmd); | ||
3078 | esp_advance_phase(esp->current_SC, in_status); | ||
3079 | |||
3080 | return esp_do_status(esp); | ||
3081 | } | ||
3082 | |||
3083 | static int esp_disconnect_amidst_phases(struct esp *esp) | ||
3084 | { | ||
3085 | struct scsi_cmnd *sp = esp->current_SC; | ||
3086 | struct esp_device *esp_dev = sp->device->hostdata; | ||
3087 | |||
3088 | /* This means real problems if we see this | ||
3089 | * here. Unless we were actually trying | ||
3090 | * to force the device to abort/reset. | ||
3091 | */ | ||
3092 | ESPLOG(("esp%d Disconnect amidst phases, ", esp->esp_id)); | ||
3093 | ESPLOG(("pphase<%s> cphase<%s>, ", | ||
3094 | phase_string(sp->SCp.phase), | ||
3095 | phase_string(sp->SCp.sent_command))); | ||
3096 | |||
3097 | if (esp->disconnected_SC != NULL || (esp->erev == fashme)) | ||
3098 | esp_cmd(esp, ESP_CMD_ESEL); | ||
3099 | |||
3100 | switch (esp->cur_msgout[0]) { | ||
3101 | default: | ||
3102 | /* We didn't expect this to happen at all. */ | ||
3103 | ESPLOG(("device is bolixed\n")); | ||
3104 | esp_advance_phase(sp, in_tgterror); | ||
3105 | esp_done(esp, (DID_ERROR << 16)); | ||
3106 | break; | ||
3107 | |||
3108 | case BUS_DEVICE_RESET: | ||
3109 | ESPLOG(("device reset successful\n")); | ||
3110 | esp_dev->sync_max_offset = 0; | ||
3111 | esp_dev->sync_min_period = 0; | ||
3112 | esp_dev->sync = 0; | ||
3113 | esp_advance_phase(sp, in_resetdev); | ||
3114 | esp_done(esp, (DID_RESET << 16)); | ||
3115 | break; | ||
3116 | |||
3117 | case ABORT: | ||
3118 | ESPLOG(("device abort successful\n")); | ||
3119 | esp_advance_phase(sp, in_abortone); | ||
3120 | esp_done(esp, (DID_ABORT << 16)); | ||
3121 | break; | ||
3122 | |||
3123 | }; | ||
3124 | return do_intr_end; | ||
3125 | } | ||
3126 | |||
3127 | static int esp_enter_msgout(struct esp *esp) | ||
3128 | { | ||
3129 | esp_advance_phase(esp->current_SC, in_msgout); | ||
3130 | return esp_do_msgout(esp); | ||
3131 | } | ||
3132 | |||
3133 | static int esp_enter_msgin(struct esp *esp) | ||
3134 | { | ||
3135 | esp_advance_phase(esp->current_SC, in_msgin); | ||
3136 | return esp_do_msgin(esp); | ||
3137 | } | ||
3138 | |||
3139 | static int esp_enter_cmd(struct esp *esp) | ||
3140 | { | ||
3141 | esp_advance_phase(esp->current_SC, in_cmdbegin); | ||
3142 | return esp_do_cmdbegin(esp); | ||
3143 | } | ||
3144 | |||
3145 | static int esp_enter_badphase(struct esp *esp) | ||
3146 | { | ||
3147 | ESPLOG(("esp%d: Bizarre bus phase %2x.\n", esp->esp_id, | ||
3148 | esp->sreg & ESP_STAT_PMASK)); | ||
3149 | return do_reset_bus; | ||
3150 | } | ||
3151 | |||
3152 | typedef int (*espfunc_t)(struct esp *); | ||
3153 | |||
3154 | static espfunc_t phase_vector[] = { | ||
3155 | esp_do_data, /* ESP_DOP */ | ||
3156 | esp_do_data, /* ESP_DIP */ | ||
3157 | esp_enter_cmd, /* ESP_CMDP */ | ||
3158 | esp_enter_status, /* ESP_STATP */ | ||
3159 | esp_enter_badphase, /* ESP_STAT_PMSG */ | ||
3160 | esp_enter_badphase, /* ESP_STAT_PMSG | ESP_STAT_PIO */ | ||
3161 | esp_enter_msgout, /* ESP_MOP */ | ||
3162 | esp_enter_msgin, /* ESP_MIP */ | ||
3163 | }; | ||
3164 | |||
3165 | /* The target has control of the bus and we have to see where it has | ||
3166 | * taken us. | ||
3167 | */ | ||
3168 | static int esp_do_phase_determine(struct esp *esp) | ||
3169 | { | ||
3170 | if ((esp->ireg & ESP_INTR_DC) != 0) | ||
3171 | return esp_disconnect_amidst_phases(esp); | ||
3172 | return phase_vector[esp->sreg & ESP_STAT_PMASK](esp); | ||
3173 | } | ||
3174 | |||
3175 | /* First interrupt after exec'ing a cmd comes here. */ | ||
3176 | static int esp_select_complete(struct esp *esp) | ||
3177 | { | ||
3178 | struct scsi_cmnd *SCptr = esp->current_SC; | ||
3179 | struct esp_device *esp_dev = SCptr->device->hostdata; | ||
3180 | int cmd_bytes_sent, fcnt; | ||
3181 | |||
3182 | if (esp->erev != fashme) | ||
3183 | esp->seqreg = (sbus_readb(esp->eregs + ESP_SSTEP) & ESP_STEP_VBITS); | ||
3184 | |||
3185 | if (esp->erev == fashme) | ||
3186 | fcnt = esp->hme_fifo_workaround_count; | ||
3187 | else | ||
3188 | fcnt = (sbus_readb(esp->eregs + ESP_FFLAGS) & ESP_FF_FBYTES); | ||
3189 | |||
3190 | cmd_bytes_sent = esp_bytes_sent(esp, fcnt); | ||
3191 | dma_invalidate(esp); | ||
3192 | |||
3193 | /* Let's check to see if a reselect happened | ||
3194 | * while we we're trying to select. This must | ||
3195 | * be checked first. | ||
3196 | */ | ||
3197 | if (esp->ireg == (ESP_INTR_RSEL | ESP_INTR_FDONE)) { | ||
3198 | esp_reconnect(esp, SCptr); | ||
3199 | return esp_do_reconnect(esp); | ||
3200 | } | ||
3201 | |||
3202 | /* Looks like things worked, we should see a bus service & | ||
3203 | * a function complete interrupt at this point. Note we | ||
3204 | * are doing a direct comparison because we don't want to | ||
3205 | * be fooled into thinking selection was successful if | ||
3206 | * ESP_INTR_DC is set, see below. | ||
3207 | */ | ||
3208 | if (esp->ireg == (ESP_INTR_FDONE | ESP_INTR_BSERV)) { | ||
3209 | /* target speaks... */ | ||
3210 | esp->targets_present |= (1<<SCptr->device->id); | ||
3211 | |||
3212 | /* What if the target ignores the sdtr? */ | ||
3213 | if (esp->snip) | ||
3214 | esp_dev->sync = 1; | ||
3215 | |||
3216 | /* See how far, if at all, we got in getting | ||
3217 | * the information out to the target. | ||
3218 | */ | ||
3219 | switch (esp->seqreg) { | ||
3220 | default: | ||
3221 | |||
3222 | case ESP_STEP_ASEL: | ||
3223 | /* Arbitration won, target selected, but | ||
3224 | * we are in some phase which is not command | ||
3225 | * phase nor is it message out phase. | ||
3226 | * | ||
3227 | * XXX We've confused the target, obviously. | ||
3228 | * XXX So clear it's state, but we also end | ||
3229 | * XXX up clearing everyone elses. That isn't | ||
3230 | * XXX so nice. I'd like to just reset this | ||
3231 | * XXX target, but if I cannot even get it's | ||
3232 | * XXX attention and finish selection to talk | ||
3233 | * XXX to it, there is not much more I can do. | ||
3234 | * XXX If we have a loaded bus we're going to | ||
3235 | * XXX spend the next second or so renegotiating | ||
3236 | * XXX for synchronous transfers. | ||
3237 | */ | ||
3238 | ESPLOG(("esp%d: STEP_ASEL for tgt %d\n", | ||
3239 | esp->esp_id, SCptr->device->id)); | ||
3240 | |||
3241 | case ESP_STEP_SID: | ||
3242 | /* Arbitration won, target selected, went | ||
3243 | * to message out phase, sent one message | ||
3244 | * byte, then we stopped. ATN is asserted | ||
3245 | * on the SCSI bus and the target is still | ||
3246 | * there hanging on. This is a legal | ||
3247 | * sequence step if we gave the ESP a select | ||
3248 | * and stop command. | ||
3249 | * | ||
3250 | * XXX See above, I could set the borken flag | ||
3251 | * XXX in the device struct and retry the | ||
3252 | * XXX command. But would that help for | ||
3253 | * XXX tagged capable targets? | ||
3254 | */ | ||
3255 | |||
3256 | case ESP_STEP_NCMD: | ||
3257 | /* Arbitration won, target selected, maybe | ||
3258 | * sent the one message byte in message out | ||
3259 | * phase, but we did not go to command phase | ||
3260 | * in the end. Actually, we could have sent | ||
3261 | * only some of the message bytes if we tried | ||
3262 | * to send out the entire identify and tag | ||
3263 | * message using ESP_CMD_SA3. | ||
3264 | */ | ||
3265 | cmd_bytes_sent = 0; | ||
3266 | break; | ||
3267 | |||
3268 | case ESP_STEP_PPC: | ||
3269 | /* No, not the powerPC pinhead. Arbitration | ||
3270 | * won, all message bytes sent if we went to | ||
3271 | * message out phase, went to command phase | ||
3272 | * but only part of the command was sent. | ||
3273 | * | ||
3274 | * XXX I've seen this, but usually in conjunction | ||
3275 | * XXX with a gross error which appears to have | ||
3276 | * XXX occurred between the time I told the | ||
3277 | * XXX ESP to arbitrate and when I got the | ||
3278 | * XXX interrupt. Could I have misloaded the | ||
3279 | * XXX command bytes into the fifo? Actually, | ||
3280 | * XXX I most likely missed a phase, and therefore | ||
3281 | * XXX went into never never land and didn't even | ||
3282 | * XXX know it. That was the old driver though. | ||
3283 | * XXX What is even more peculiar is that the ESP | ||
3284 | * XXX showed the proper function complete and | ||
3285 | * XXX bus service bits in the interrupt register. | ||
3286 | */ | ||
3287 | |||
3288 | case ESP_STEP_FINI4: | ||
3289 | case ESP_STEP_FINI5: | ||
3290 | case ESP_STEP_FINI6: | ||
3291 | case ESP_STEP_FINI7: | ||
3292 | /* Account for the identify message */ | ||
3293 | if (SCptr->SCp.phase == in_slct_norm) | ||
3294 | cmd_bytes_sent -= 1; | ||
3295 | }; | ||
3296 | |||
3297 | if (esp->erev != fashme) | ||
3298 | esp_cmd(esp, ESP_CMD_NULL); | ||
3299 | |||
3300 | /* Be careful, we could really get fucked during synchronous | ||
3301 | * data transfers if we try to flush the fifo now. | ||
3302 | */ | ||
3303 | if ((esp->erev != fashme) && /* not a Happy Meal and... */ | ||
3304 | !fcnt && /* Fifo is empty and... */ | ||
3305 | /* either we are not doing synchronous transfers or... */ | ||
3306 | (!esp_dev->sync_max_offset || | ||
3307 | /* We are not going into data in phase. */ | ||
3308 | ((esp->sreg & ESP_STAT_PMASK) != ESP_DIP))) | ||
3309 | esp_cmd(esp, ESP_CMD_FLUSH); /* flush is safe */ | ||
3310 | |||
3311 | /* See how far we got if this is not a slow command. */ | ||
3312 | if (!esp->esp_slowcmd) { | ||
3313 | if (cmd_bytes_sent < 0) | ||
3314 | cmd_bytes_sent = 0; | ||
3315 | if (cmd_bytes_sent != SCptr->cmd_len) { | ||
3316 | /* Crapola, mark it as a slowcmd | ||
3317 | * so that we have some chance of | ||
3318 | * keeping the command alive with | ||
3319 | * good luck. | ||
3320 | * | ||
3321 | * XXX Actually, if we didn't send it all | ||
3322 | * XXX this means either we didn't set things | ||
3323 | * XXX up properly (driver bug) or the target | ||
3324 | * XXX or the ESP detected parity on one of | ||
3325 | * XXX the command bytes. This makes much | ||
3326 | * XXX more sense, and therefore this code | ||
3327 | * XXX should be changed to send out a | ||
3328 | * XXX parity error message or if the status | ||
3329 | * XXX register shows no parity error then | ||
3330 | * XXX just expect the target to bring the | ||
3331 | * XXX bus into message in phase so that it | ||
3332 | * XXX can send us the parity error message. | ||
3333 | * XXX SCSI sucks... | ||
3334 | */ | ||
3335 | esp->esp_slowcmd = 1; | ||
3336 | esp->esp_scmdp = &(SCptr->cmnd[cmd_bytes_sent]); | ||
3337 | esp->esp_scmdleft = (SCptr->cmd_len - cmd_bytes_sent); | ||
3338 | } | ||
3339 | } | ||
3340 | |||
3341 | /* Now figure out where we went. */ | ||
3342 | esp_advance_phase(SCptr, in_the_dark); | ||
3343 | return esp_do_phase_determine(esp); | ||
3344 | } | ||
3345 | |||
3346 | /* Did the target even make it? */ | ||
3347 | if (esp->ireg == ESP_INTR_DC) { | ||
3348 | /* wheee... nobody there or they didn't like | ||
3349 | * what we told it to do, clean up. | ||
3350 | */ | ||
3351 | |||
3352 | /* If anyone is off the bus, but working on | ||
3353 | * a command in the background for us, tell | ||
3354 | * the ESP to listen for them. | ||
3355 | */ | ||
3356 | if (esp->disconnected_SC) | ||
3357 | esp_cmd(esp, ESP_CMD_ESEL); | ||
3358 | |||
3359 | if (((1<<SCptr->device->id) & esp->targets_present) && | ||
3360 | esp->seqreg != 0 && | ||
3361 | (esp->cur_msgout[0] == EXTENDED_MESSAGE) && | ||
3362 | (SCptr->SCp.phase == in_slct_msg || | ||
3363 | SCptr->SCp.phase == in_slct_stop)) { | ||
3364 | /* shit */ | ||
3365 | esp->snip = 0; | ||
3366 | ESPLOG(("esp%d: Failed synchronous negotiation for target %d " | ||
3367 | "lun %d\n", esp->esp_id, SCptr->device->id, SCptr->device->lun)); | ||
3368 | esp_dev->sync_max_offset = 0; | ||
3369 | esp_dev->sync_min_period = 0; | ||
3370 | esp_dev->sync = 1; /* so we don't negotiate again */ | ||
3371 | |||
3372 | /* Run the command again, this time though we | ||
3373 | * won't try to negotiate for synchronous transfers. | ||
3374 | * | ||
3375 | * XXX I'd like to do something like send an | ||
3376 | * XXX INITIATOR_ERROR or ABORT message to the | ||
3377 | * XXX target to tell it, "Sorry I confused you, | ||
3378 | * XXX please come back and I will be nicer next | ||
3379 | * XXX time". But that requires having the target | ||
3380 | * XXX on the bus, and it has dropped BSY on us. | ||
3381 | */ | ||
3382 | esp->current_SC = NULL; | ||
3383 | esp_advance_phase(SCptr, not_issued); | ||
3384 | prepend_SC(&esp->issue_SC, SCptr); | ||
3385 | esp_exec_cmd(esp); | ||
3386 | return do_intr_end; | ||
3387 | } | ||
3388 | |||
3389 | /* Ok, this is normal, this is what we see during boot | ||
3390 | * or whenever when we are scanning the bus for targets. | ||
3391 | * But first make sure that is really what is happening. | ||
3392 | */ | ||
3393 | if (((1<<SCptr->device->id) & esp->targets_present)) { | ||
3394 | ESPLOG(("esp%d: Warning, live target %d not responding to " | ||
3395 | "selection.\n", esp->esp_id, SCptr->device->id)); | ||
3396 | |||
3397 | /* This _CAN_ happen. The SCSI standard states that | ||
3398 | * the target is to _not_ respond to selection if | ||
3399 | * _it_ detects bad parity on the bus for any reason. | ||
3400 | * Therefore, we assume that if we've talked successfully | ||
3401 | * to this target before, bad parity is the problem. | ||
3402 | */ | ||
3403 | esp_done(esp, (DID_PARITY << 16)); | ||
3404 | } else { | ||
3405 | /* Else, there really isn't anyone there. */ | ||
3406 | ESPMISC(("esp: selection failure, maybe nobody there?\n")); | ||
3407 | ESPMISC(("esp: target %d lun %d\n", | ||
3408 | SCptr->device->id, SCptr->device->lun)); | ||
3409 | esp_done(esp, (DID_BAD_TARGET << 16)); | ||
3410 | } | ||
3411 | return do_intr_end; | ||
3412 | } | ||
3413 | |||
3414 | ESPLOG(("esp%d: Selection failure.\n", esp->esp_id)); | ||
3415 | printk("esp%d: Currently -- ", esp->esp_id); | ||
3416 | esp_print_ireg(esp->ireg); printk(" "); | ||
3417 | esp_print_statreg(esp->sreg); printk(" "); | ||
3418 | esp_print_seqreg(esp->seqreg); printk("\n"); | ||
3419 | printk("esp%d: New -- ", esp->esp_id); | ||
3420 | esp->sreg = sbus_readb(esp->eregs + ESP_STATUS); | ||
3421 | esp->seqreg = sbus_readb(esp->eregs + ESP_SSTEP); | ||
3422 | esp->ireg = sbus_readb(esp->eregs + ESP_INTRPT); | ||
3423 | esp_print_ireg(esp->ireg); printk(" "); | ||
3424 | esp_print_statreg(esp->sreg); printk(" "); | ||
3425 | esp_print_seqreg(esp->seqreg); printk("\n"); | ||
3426 | ESPLOG(("esp%d: resetting bus\n", esp->esp_id)); | ||
3427 | return do_reset_bus; /* ugh... */ | ||
3428 | } | ||
3429 | |||
3430 | /* Continue reading bytes for msgin phase. */ | ||
3431 | static int esp_do_msgincont(struct esp *esp) | ||
3432 | { | ||
3433 | if (esp->ireg & ESP_INTR_BSERV) { | ||
3434 | /* in the right phase too? */ | ||
3435 | if ((esp->sreg & ESP_STAT_PMASK) == ESP_MIP) { | ||
3436 | /* phew... */ | ||
3437 | esp_cmd(esp, ESP_CMD_TI); | ||
3438 | esp_advance_phase(esp->current_SC, in_msgindone); | ||
3439 | return do_intr_end; | ||
3440 | } | ||
3441 | |||
3442 | /* We changed phase but ESP shows bus service, | ||
3443 | * in this case it is most likely that we, the | ||
3444 | * hacker who has been up for 20hrs straight | ||
3445 | * staring at the screen, drowned in coffee | ||
3446 | * smelling like retched cigarette ashes | ||
3447 | * have miscoded something..... so, try to | ||
3448 | * recover as best we can. | ||
3449 | */ | ||
3450 | ESPLOG(("esp%d: message in mis-carriage.\n", esp->esp_id)); | ||
3451 | } | ||
3452 | esp_advance_phase(esp->current_SC, in_the_dark); | ||
3453 | return do_phase_determine; | ||
3454 | } | ||
3455 | |||
3456 | static int check_singlebyte_msg(struct esp *esp) | ||
3457 | { | ||
3458 | esp->prevmsgin = esp->cur_msgin[0]; | ||
3459 | if (esp->cur_msgin[0] & 0x80) { | ||
3460 | /* wheee... */ | ||
3461 | ESPLOG(("esp%d: target sends identify amidst phases\n", | ||
3462 | esp->esp_id)); | ||
3463 | esp_advance_phase(esp->current_SC, in_the_dark); | ||
3464 | return 0; | ||
3465 | } else if (((esp->cur_msgin[0] & 0xf0) == 0x20) || | ||
3466 | (esp->cur_msgin[0] == EXTENDED_MESSAGE)) { | ||
3467 | esp->msgin_len = 2; | ||
3468 | esp_advance_phase(esp->current_SC, in_msgincont); | ||
3469 | return 0; | ||
3470 | } | ||
3471 | esp_advance_phase(esp->current_SC, in_the_dark); | ||
3472 | switch (esp->cur_msgin[0]) { | ||
3473 | default: | ||
3474 | /* We don't want to hear about it. */ | ||
3475 | ESPLOG(("esp%d: msg %02x which we don't know about\n", esp->esp_id, | ||
3476 | esp->cur_msgin[0])); | ||
3477 | return MESSAGE_REJECT; | ||
3478 | |||
3479 | case NOP: | ||
3480 | ESPLOG(("esp%d: target %d sends a nop\n", esp->esp_id, | ||
3481 | esp->current_SC->device->id)); | ||
3482 | return 0; | ||
3483 | |||
3484 | case RESTORE_POINTERS: | ||
3485 | /* In this case we might also have to backup the | ||
3486 | * "slow command" pointer. It is rare to get such | ||
3487 | * a save/restore pointer sequence so early in the | ||
3488 | * bus transition sequences, but cover it. | ||
3489 | */ | ||
3490 | if (esp->esp_slowcmd) { | ||
3491 | esp->esp_scmdleft = esp->current_SC->cmd_len; | ||
3492 | esp->esp_scmdp = &esp->current_SC->cmnd[0]; | ||
3493 | } | ||
3494 | esp_restore_pointers(esp, esp->current_SC); | ||
3495 | return 0; | ||
3496 | |||
3497 | case SAVE_POINTERS: | ||
3498 | esp_save_pointers(esp, esp->current_SC); | ||
3499 | return 0; | ||
3500 | |||
3501 | case COMMAND_COMPLETE: | ||
3502 | case DISCONNECT: | ||
3503 | /* Freeing the bus, let it go. */ | ||
3504 | esp->current_SC->SCp.phase = in_freeing; | ||
3505 | return 0; | ||
3506 | |||
3507 | case MESSAGE_REJECT: | ||
3508 | ESPMISC(("msg reject, ")); | ||
3509 | if (esp->prevmsgout == EXTENDED_MESSAGE) { | ||
3510 | struct esp_device *esp_dev = esp->current_SC->device->hostdata; | ||
3511 | |||
3512 | /* Doesn't look like this target can | ||
3513 | * do synchronous or WIDE transfers. | ||
3514 | */ | ||
3515 | ESPSDTR(("got reject, was trying nego, clearing sync/WIDE\n")); | ||
3516 | esp_dev->sync = 1; | ||
3517 | esp_dev->wide = 1; | ||
3518 | esp_dev->sync_min_period = 0; | ||
3519 | esp_dev->sync_max_offset = 0; | ||
3520 | return 0; | ||
3521 | } else { | ||
3522 | ESPMISC(("not sync nego, sending ABORT\n")); | ||
3523 | return ABORT; | ||
3524 | } | ||
3525 | }; | ||
3526 | } | ||
3527 | |||
3528 | /* Target negotiates for synchronous transfers before we do, this | ||
3529 | * is legal although very strange. What is even funnier is that | ||
3530 | * the SCSI2 standard specifically recommends against targets doing | ||
3531 | * this because so many initiators cannot cope with this occurring. | ||
3532 | */ | ||
3533 | static int target_with_ants_in_pants(struct esp *esp, | ||
3534 | struct scsi_cmnd *SCptr, | ||
3535 | struct esp_device *esp_dev) | ||
3536 | { | ||
3537 | if (esp_dev->sync || SCptr->device->borken) { | ||
3538 | /* sorry, no can do */ | ||
3539 | ESPSDTR(("forcing to async, ")); | ||
3540 | build_sync_nego_msg(esp, 0, 0); | ||
3541 | esp_dev->sync = 1; | ||
3542 | esp->snip = 1; | ||
3543 | ESPLOG(("esp%d: hoping for msgout\n", esp->esp_id)); | ||
3544 | esp_advance_phase(SCptr, in_the_dark); | ||
3545 | return EXTENDED_MESSAGE; | ||
3546 | } | ||
3547 | |||
3548 | /* Ok, we'll check them out... */ | ||
3549 | return 0; | ||
3550 | } | ||
3551 | |||
3552 | static void sync_report(struct esp *esp) | ||
3553 | { | ||
3554 | int msg3, msg4; | ||
3555 | char *type; | ||
3556 | |||
3557 | msg3 = esp->cur_msgin[3]; | ||
3558 | msg4 = esp->cur_msgin[4]; | ||
3559 | if (msg4) { | ||
3560 | int hz = 1000000000 / (msg3 * 4); | ||
3561 | int integer = hz / 1000000; | ||
3562 | int fraction = (hz - (integer * 1000000)) / 10000; | ||
3563 | if ((esp->erev == fashme) && | ||
3564 | (esp->config3[esp->current_SC->device->id] & ESP_CONFIG3_EWIDE)) { | ||
3565 | type = "FAST-WIDE"; | ||
3566 | integer <<= 1; | ||
3567 | fraction <<= 1; | ||
3568 | } else if ((msg3 * 4) < 200) { | ||
3569 | type = "FAST"; | ||
3570 | } else { | ||
3571 | type = "synchronous"; | ||
3572 | } | ||
3573 | |||
3574 | /* Do not transform this back into one big printk | ||
3575 | * again, it triggers a bug in our sparc64-gcc272 | ||
3576 | * sibling call optimization. -DaveM | ||
3577 | */ | ||
3578 | ESPLOG((KERN_INFO "esp%d: target %d ", | ||
3579 | esp->esp_id, esp->current_SC->device->id)); | ||
3580 | ESPLOG(("[period %dns offset %d %d.%02dMHz ", | ||
3581 | (int) msg3 * 4, (int) msg4, | ||
3582 | integer, fraction)); | ||
3583 | ESPLOG(("%s SCSI%s]\n", type, | ||
3584 | (((msg3 * 4) < 200) ? "-II" : ""))); | ||
3585 | } else { | ||
3586 | ESPLOG((KERN_INFO "esp%d: target %d asynchronous\n", | ||
3587 | esp->esp_id, esp->current_SC->device->id)); | ||
3588 | } | ||
3589 | } | ||
3590 | |||
3591 | static int check_multibyte_msg(struct esp *esp) | ||
3592 | { | ||
3593 | struct scsi_cmnd *SCptr = esp->current_SC; | ||
3594 | struct esp_device *esp_dev = SCptr->device->hostdata; | ||
3595 | u8 regval = 0; | ||
3596 | int message_out = 0; | ||
3597 | |||
3598 | ESPSDTR(("chk multibyte msg: ")); | ||
3599 | if (esp->cur_msgin[2] == EXTENDED_SDTR) { | ||
3600 | int period = esp->cur_msgin[3]; | ||
3601 | int offset = esp->cur_msgin[4]; | ||
3602 | |||
3603 | ESPSDTR(("is sync nego response, ")); | ||
3604 | if (!esp->snip) { | ||
3605 | int rval; | ||
3606 | |||
3607 | /* Target negotiates first! */ | ||
3608 | ESPSDTR(("target jumps the gun, ")); | ||
3609 | message_out = EXTENDED_MESSAGE; /* we must respond */ | ||
3610 | rval = target_with_ants_in_pants(esp, SCptr, esp_dev); | ||
3611 | if (rval) | ||
3612 | return rval; | ||
3613 | } | ||
3614 | |||
3615 | ESPSDTR(("examining sdtr, ")); | ||
3616 | |||
3617 | /* Offset cannot be larger than ESP fifo size. */ | ||
3618 | if (offset > 15) { | ||
3619 | ESPSDTR(("offset too big %2x, ", offset)); | ||
3620 | offset = 15; | ||
3621 | ESPSDTR(("sending back new offset\n")); | ||
3622 | build_sync_nego_msg(esp, period, offset); | ||
3623 | return EXTENDED_MESSAGE; | ||
3624 | } | ||
3625 | |||
3626 | if (offset && period > esp->max_period) { | ||
3627 | /* Yeee, async for this slow device. */ | ||
3628 | ESPSDTR(("period too long %2x, ", period)); | ||
3629 | build_sync_nego_msg(esp, 0, 0); | ||
3630 | ESPSDTR(("hoping for msgout\n")); | ||
3631 | esp_advance_phase(esp->current_SC, in_the_dark); | ||
3632 | return EXTENDED_MESSAGE; | ||
3633 | } else if (offset && period < esp->min_period) { | ||
3634 | ESPSDTR(("period too short %2x, ", period)); | ||
3635 | period = esp->min_period; | ||
3636 | if (esp->erev > esp236) | ||
3637 | regval = 4; | ||
3638 | else | ||
3639 | regval = 5; | ||
3640 | } else if (offset) { | ||
3641 | int tmp; | ||
3642 | |||
3643 | ESPSDTR(("period is ok, ")); | ||
3644 | tmp = esp->ccycle / 1000; | ||
3645 | regval = (((period << 2) + tmp - 1) / tmp); | ||
3646 | if (regval && ((esp->erev == fas100a || | ||
3647 | esp->erev == fas236 || | ||
3648 | esp->erev == fashme))) { | ||
3649 | if (period >= 50) | ||
3650 | regval--; | ||
3651 | } | ||
3652 | } | ||
3653 | |||
3654 | if (offset) { | ||
3655 | u8 bit; | ||
3656 | |||
3657 | esp_dev->sync_min_period = (regval & 0x1f); | ||
3658 | esp_dev->sync_max_offset = (offset | esp->radelay); | ||
3659 | if (esp->erev == fas100a || esp->erev == fas236 || esp->erev == fashme) { | ||
3660 | if ((esp->erev == fas100a) || (esp->erev == fashme)) | ||
3661 | bit = ESP_CONFIG3_FAST; | ||
3662 | else | ||
3663 | bit = ESP_CONFIG3_FSCSI; | ||
3664 | if (period < 50) { | ||
3665 | /* On FAS366, if using fast-20 synchronous transfers | ||
3666 | * we need to make sure the REQ/ACK assert/deassert | ||
3667 | * control bits are clear. | ||
3668 | */ | ||
3669 | if (esp->erev == fashme) | ||
3670 | esp_dev->sync_max_offset &= ~esp->radelay; | ||
3671 | esp->config3[SCptr->device->id] |= bit; | ||
3672 | } else { | ||
3673 | esp->config3[SCptr->device->id] &= ~bit; | ||
3674 | } | ||
3675 | esp->prev_cfg3 = esp->config3[SCptr->device->id]; | ||
3676 | sbus_writeb(esp->prev_cfg3, esp->eregs + ESP_CFG3); | ||
3677 | } | ||
3678 | esp->prev_soff = esp_dev->sync_max_offset; | ||
3679 | esp->prev_stp = esp_dev->sync_min_period; | ||
3680 | sbus_writeb(esp->prev_soff, esp->eregs + ESP_SOFF); | ||
3681 | sbus_writeb(esp->prev_stp, esp->eregs + ESP_STP); | ||
3682 | ESPSDTR(("soff=%2x stp=%2x cfg3=%2x\n", | ||
3683 | esp_dev->sync_max_offset, | ||
3684 | esp_dev->sync_min_period, | ||
3685 | esp->config3[SCptr->device->id])); | ||
3686 | |||
3687 | esp->snip = 0; | ||
3688 | } else if (esp_dev->sync_max_offset) { | ||
3689 | u8 bit; | ||
3690 | |||
3691 | /* back to async mode */ | ||
3692 | ESPSDTR(("unaccaptable sync nego, forcing async\n")); | ||
3693 | esp_dev->sync_max_offset = 0; | ||
3694 | esp_dev->sync_min_period = 0; | ||
3695 | esp->prev_soff = 0; | ||
3696 | esp->prev_stp = 0; | ||
3697 | sbus_writeb(esp->prev_soff, esp->eregs + ESP_SOFF); | ||
3698 | sbus_writeb(esp->prev_stp, esp->eregs + ESP_STP); | ||
3699 | if (esp->erev == fas100a || esp->erev == fas236 || esp->erev == fashme) { | ||
3700 | if ((esp->erev == fas100a) || (esp->erev == fashme)) | ||
3701 | bit = ESP_CONFIG3_FAST; | ||
3702 | else | ||
3703 | bit = ESP_CONFIG3_FSCSI; | ||
3704 | esp->config3[SCptr->device->id] &= ~bit; | ||
3705 | esp->prev_cfg3 = esp->config3[SCptr->device->id]; | ||
3706 | sbus_writeb(esp->prev_cfg3, esp->eregs + ESP_CFG3); | ||
3707 | } | ||
3708 | } | ||
3709 | |||
3710 | sync_report(esp); | ||
3711 | |||
3712 | ESPSDTR(("chk multibyte msg: sync is known, ")); | ||
3713 | esp_dev->sync = 1; | ||
3714 | |||
3715 | if (message_out) { | ||
3716 | ESPLOG(("esp%d: sending sdtr back, hoping for msgout\n", | ||
3717 | esp->esp_id)); | ||
3718 | build_sync_nego_msg(esp, period, offset); | ||
3719 | esp_advance_phase(SCptr, in_the_dark); | ||
3720 | return EXTENDED_MESSAGE; | ||
3721 | } | ||
3722 | |||
3723 | ESPSDTR(("returning zero\n")); | ||
3724 | esp_advance_phase(SCptr, in_the_dark); /* ...or else! */ | ||
3725 | return 0; | ||
3726 | } else if (esp->cur_msgin[2] == EXTENDED_WDTR) { | ||
3727 | int size = 8 << esp->cur_msgin[3]; | ||
3728 | |||
3729 | esp->wnip = 0; | ||
3730 | if (esp->erev != fashme) { | ||
3731 | ESPLOG(("esp%d: AIEEE wide msg received and not HME.\n", | ||
3732 | esp->esp_id)); | ||
3733 | message_out = MESSAGE_REJECT; | ||
3734 | } else if (size > 16) { | ||
3735 | ESPLOG(("esp%d: AIEEE wide transfer for %d size " | ||
3736 | "not supported.\n", esp->esp_id, size)); | ||
3737 | message_out = MESSAGE_REJECT; | ||
3738 | } else { | ||
3739 | /* Things look good; let's see what we got. */ | ||
3740 | if (size == 16) { | ||
3741 | /* Set config 3 register for this target. */ | ||
3742 | esp->config3[SCptr->device->id] |= ESP_CONFIG3_EWIDE; | ||
3743 | } else { | ||
3744 | /* Just make sure it was one byte sized. */ | ||
3745 | if (size != 8) { | ||
3746 | ESPLOG(("esp%d: Aieee, wide nego of %d size.\n", | ||
3747 | esp->esp_id, size)); | ||
3748 | message_out = MESSAGE_REJECT; | ||
3749 | goto finish; | ||
3750 | } | ||
3751 | /* Pure paranoia. */ | ||
3752 | esp->config3[SCptr->device->id] &= ~(ESP_CONFIG3_EWIDE); | ||
3753 | } | ||
3754 | esp->prev_cfg3 = esp->config3[SCptr->device->id]; | ||
3755 | sbus_writeb(esp->prev_cfg3, esp->eregs + ESP_CFG3); | ||
3756 | |||
3757 | /* Regardless, next try for sync transfers. */ | ||
3758 | build_sync_nego_msg(esp, esp->sync_defp, 15); | ||
3759 | esp_dev->sync = 1; | ||
3760 | esp->snip = 1; | ||
3761 | message_out = EXTENDED_MESSAGE; | ||
3762 | } | ||
3763 | } else if (esp->cur_msgin[2] == EXTENDED_MODIFY_DATA_POINTER) { | ||
3764 | ESPLOG(("esp%d: rejecting modify data ptr msg\n", esp->esp_id)); | ||
3765 | message_out = MESSAGE_REJECT; | ||
3766 | } | ||
3767 | finish: | ||
3768 | esp_advance_phase(SCptr, in_the_dark); | ||
3769 | return message_out; | ||
3770 | } | ||
3771 | |||
3772 | static int esp_do_msgindone(struct esp *esp) | ||
3773 | { | ||
3774 | struct scsi_cmnd *SCptr = esp->current_SC; | ||
3775 | int message_out = 0, it = 0, rval; | ||
3776 | |||
3777 | rval = skipahead1(esp, SCptr, in_msgin, in_msgindone); | ||
3778 | if (rval) | ||
3779 | return rval; | ||
3780 | if (SCptr->SCp.sent_command != in_status) { | ||
3781 | if (!(esp->ireg & ESP_INTR_DC)) { | ||
3782 | if (esp->msgin_len && (esp->sreg & ESP_STAT_PERR)) { | ||
3783 | message_out = MSG_PARITY_ERROR; | ||
3784 | esp_cmd(esp, ESP_CMD_FLUSH); | ||
3785 | } else if (esp->erev != fashme && | ||
3786 | (it = (sbus_readb(esp->eregs + ESP_FFLAGS) & ESP_FF_FBYTES)) != 1) { | ||
3787 | /* We certainly dropped the ball somewhere. */ | ||
3788 | message_out = INITIATOR_ERROR; | ||
3789 | esp_cmd(esp, ESP_CMD_FLUSH); | ||
3790 | } else if (!esp->msgin_len) { | ||
3791 | if (esp->erev == fashme) | ||
3792 | it = esp->hme_fifo_workaround_buffer[0]; | ||
3793 | else | ||
3794 | it = sbus_readb(esp->eregs + ESP_FDATA); | ||
3795 | esp_advance_phase(SCptr, in_msgincont); | ||
3796 | } else { | ||
3797 | /* it is ok and we want it */ | ||
3798 | if (esp->erev == fashme) | ||
3799 | it = esp->cur_msgin[esp->msgin_ctr] = | ||
3800 | esp->hme_fifo_workaround_buffer[0]; | ||
3801 | else | ||
3802 | it = esp->cur_msgin[esp->msgin_ctr] = | ||
3803 | sbus_readb(esp->eregs + ESP_FDATA); | ||
3804 | esp->msgin_ctr++; | ||
3805 | } | ||
3806 | } else { | ||
3807 | esp_advance_phase(SCptr, in_the_dark); | ||
3808 | return do_work_bus; | ||
3809 | } | ||
3810 | } else { | ||
3811 | it = esp->cur_msgin[0]; | ||
3812 | } | ||
3813 | if (!message_out && esp->msgin_len) { | ||
3814 | if (esp->msgin_ctr < esp->msgin_len) { | ||
3815 | esp_advance_phase(SCptr, in_msgincont); | ||
3816 | } else if (esp->msgin_len == 1) { | ||
3817 | message_out = check_singlebyte_msg(esp); | ||
3818 | } else if (esp->msgin_len == 2) { | ||
3819 | if (esp->cur_msgin[0] == EXTENDED_MESSAGE) { | ||
3820 | if ((it + 2) >= 15) { | ||
3821 | message_out = MESSAGE_REJECT; | ||
3822 | } else { | ||
3823 | esp->msgin_len = (it + 2); | ||
3824 | esp_advance_phase(SCptr, in_msgincont); | ||
3825 | } | ||
3826 | } else { | ||
3827 | message_out = MESSAGE_REJECT; /* foo on you */ | ||
3828 | } | ||
3829 | } else { | ||
3830 | message_out = check_multibyte_msg(esp); | ||
3831 | } | ||
3832 | } | ||
3833 | if (message_out < 0) { | ||
3834 | return -message_out; | ||
3835 | } else if (message_out) { | ||
3836 | if (((message_out != 1) && | ||
3837 | ((message_out < 0x20) || (message_out & 0x80)))) | ||
3838 | esp->msgout_len = 1; | ||
3839 | esp->cur_msgout[0] = message_out; | ||
3840 | esp_cmd(esp, ESP_CMD_SATN); | ||
3841 | esp_advance_phase(SCptr, in_the_dark); | ||
3842 | esp->msgin_len = 0; | ||
3843 | } | ||
3844 | esp->sreg = sbus_readb(esp->eregs + ESP_STATUS); | ||
3845 | esp->sreg &= ~(ESP_STAT_INTR); | ||
3846 | if ((esp->sreg & (ESP_STAT_PMSG|ESP_STAT_PCD)) == (ESP_STAT_PMSG|ESP_STAT_PCD)) | ||
3847 | esp_cmd(esp, ESP_CMD_MOK); | ||
3848 | if ((SCptr->SCp.sent_command == in_msgindone) && | ||
3849 | (SCptr->SCp.phase == in_freeing)) | ||
3850 | return esp_do_freebus(esp); | ||
3851 | return do_intr_end; | ||
3852 | } | ||
3853 | |||
3854 | static int esp_do_cmdbegin(struct esp *esp) | ||
3855 | { | ||
3856 | struct scsi_cmnd *SCptr = esp->current_SC; | ||
3857 | |||
3858 | esp_advance_phase(SCptr, in_cmdend); | ||
3859 | if (esp->erev == fashme) { | ||
3860 | u32 tmp = sbus_readl(esp->dregs + DMA_CSR); | ||
3861 | int i; | ||
3862 | |||
3863 | for (i = 0; i < esp->esp_scmdleft; i++) | ||
3864 | esp->esp_command[i] = *esp->esp_scmdp++; | ||
3865 | esp->esp_scmdleft = 0; | ||
3866 | esp_cmd(esp, ESP_CMD_FLUSH); | ||
3867 | esp_setcount(esp->eregs, i, 1); | ||
3868 | esp_cmd(esp, (ESP_CMD_DMA | ESP_CMD_TI)); | ||
3869 | tmp |= (DMA_SCSI_DISAB | DMA_ENABLE); | ||
3870 | tmp &= ~(DMA_ST_WRITE); | ||
3871 | sbus_writel(i, esp->dregs + DMA_COUNT); | ||
3872 | sbus_writel(esp->esp_command_dvma, esp->dregs + DMA_ADDR); | ||
3873 | sbus_writel(tmp, esp->dregs + DMA_CSR); | ||
3874 | } else { | ||
3875 | u8 tmp; | ||
3876 | |||
3877 | esp_cmd(esp, ESP_CMD_FLUSH); | ||
3878 | tmp = *esp->esp_scmdp++; | ||
3879 | esp->esp_scmdleft--; | ||
3880 | sbus_writeb(tmp, esp->eregs + ESP_FDATA); | ||
3881 | esp_cmd(esp, ESP_CMD_TI); | ||
3882 | } | ||
3883 | return do_intr_end; | ||
3884 | } | ||
3885 | |||
3886 | static int esp_do_cmddone(struct esp *esp) | ||
3887 | { | ||
3888 | if (esp->erev == fashme) | ||
3889 | dma_invalidate(esp); | ||
3890 | else | ||
3891 | esp_cmd(esp, ESP_CMD_NULL); | ||
3892 | |||
3893 | if (esp->ireg & ESP_INTR_BSERV) { | ||
3894 | esp_advance_phase(esp->current_SC, in_the_dark); | ||
3895 | return esp_do_phase_determine(esp); | ||
3896 | } | ||
3897 | |||
3898 | ESPLOG(("esp%d: in do_cmddone() but didn't get BSERV interrupt.\n", | ||
3899 | esp->esp_id)); | ||
3900 | return do_reset_bus; | ||
3901 | } | ||
3902 | |||
3903 | static int esp_do_msgout(struct esp *esp) | ||
3904 | { | ||
3905 | esp_cmd(esp, ESP_CMD_FLUSH); | ||
3906 | switch (esp->msgout_len) { | ||
3907 | case 1: | ||
3908 | if (esp->erev == fashme) | ||
3909 | hme_fifo_push(esp, &esp->cur_msgout[0], 1); | ||
3910 | else | ||
3911 | sbus_writeb(esp->cur_msgout[0], esp->eregs + ESP_FDATA); | ||
3912 | |||
3913 | esp_cmd(esp, ESP_CMD_TI); | ||
3914 | break; | ||
3915 | |||
3916 | case 2: | ||
3917 | esp->esp_command[0] = esp->cur_msgout[0]; | ||
3918 | esp->esp_command[1] = esp->cur_msgout[1]; | ||
3919 | |||
3920 | if (esp->erev == fashme) { | ||
3921 | hme_fifo_push(esp, &esp->cur_msgout[0], 2); | ||
3922 | esp_cmd(esp, ESP_CMD_TI); | ||
3923 | } else { | ||
3924 | dma_setup(esp, esp->esp_command_dvma, 2, 0); | ||
3925 | esp_setcount(esp->eregs, 2, 0); | ||
3926 | esp_cmd(esp, ESP_CMD_DMA | ESP_CMD_TI); | ||
3927 | } | ||
3928 | break; | ||
3929 | |||
3930 | case 4: | ||
3931 | esp->esp_command[0] = esp->cur_msgout[0]; | ||
3932 | esp->esp_command[1] = esp->cur_msgout[1]; | ||
3933 | esp->esp_command[2] = esp->cur_msgout[2]; | ||
3934 | esp->esp_command[3] = esp->cur_msgout[3]; | ||
3935 | esp->snip = 1; | ||
3936 | |||
3937 | if (esp->erev == fashme) { | ||
3938 | hme_fifo_push(esp, &esp->cur_msgout[0], 4); | ||
3939 | esp_cmd(esp, ESP_CMD_TI); | ||
3940 | } else { | ||
3941 | dma_setup(esp, esp->esp_command_dvma, 4, 0); | ||
3942 | esp_setcount(esp->eregs, 4, 0); | ||
3943 | esp_cmd(esp, ESP_CMD_DMA | ESP_CMD_TI); | ||
3944 | } | ||
3945 | break; | ||
3946 | |||
3947 | case 5: | ||
3948 | esp->esp_command[0] = esp->cur_msgout[0]; | ||
3949 | esp->esp_command[1] = esp->cur_msgout[1]; | ||
3950 | esp->esp_command[2] = esp->cur_msgout[2]; | ||
3951 | esp->esp_command[3] = esp->cur_msgout[3]; | ||
3952 | esp->esp_command[4] = esp->cur_msgout[4]; | ||
3953 | esp->snip = 1; | ||
3954 | |||
3955 | if (esp->erev == fashme) { | ||
3956 | hme_fifo_push(esp, &esp->cur_msgout[0], 5); | ||
3957 | esp_cmd(esp, ESP_CMD_TI); | ||
3958 | } else { | ||
3959 | dma_setup(esp, esp->esp_command_dvma, 5, 0); | ||
3960 | esp_setcount(esp->eregs, 5, 0); | ||
3961 | esp_cmd(esp, ESP_CMD_DMA | ESP_CMD_TI); | ||
3962 | } | ||
3963 | break; | ||
3964 | |||
3965 | default: | ||
3966 | /* whoops */ | ||
3967 | ESPMISC(("bogus msgout sending NOP\n")); | ||
3968 | esp->cur_msgout[0] = NOP; | ||
3969 | |||
3970 | if (esp->erev == fashme) { | ||
3971 | hme_fifo_push(esp, &esp->cur_msgout[0], 1); | ||
3972 | } else { | ||
3973 | sbus_writeb(esp->cur_msgout[0], esp->eregs + ESP_FDATA); | ||
3974 | } | ||
3975 | |||
3976 | esp->msgout_len = 1; | ||
3977 | esp_cmd(esp, ESP_CMD_TI); | ||
3978 | break; | ||
3979 | }; | ||
3980 | |||
3981 | esp_advance_phase(esp->current_SC, in_msgoutdone); | ||
3982 | return do_intr_end; | ||
3983 | } | ||
3984 | |||
3985 | static int esp_do_msgoutdone(struct esp *esp) | ||
3986 | { | ||
3987 | if (esp->msgout_len > 1) { | ||
3988 | /* XXX HME/FAS ATN deassert workaround required, | ||
3989 | * XXX no DMA flushing, only possible ESP_CMD_FLUSH | ||
3990 | * XXX to kill the fifo. | ||
3991 | */ | ||
3992 | if (esp->erev != fashme) { | ||
3993 | u32 tmp; | ||
3994 | |||
3995 | while ((tmp = sbus_readl(esp->dregs + DMA_CSR)) & DMA_PEND_READ) | ||
3996 | udelay(1); | ||
3997 | tmp &= ~DMA_ENABLE; | ||
3998 | sbus_writel(tmp, esp->dregs + DMA_CSR); | ||
3999 | dma_invalidate(esp); | ||
4000 | } else { | ||
4001 | esp_cmd(esp, ESP_CMD_FLUSH); | ||
4002 | } | ||
4003 | } | ||
4004 | if (!(esp->ireg & ESP_INTR_DC)) { | ||
4005 | if (esp->erev != fashme) | ||
4006 | esp_cmd(esp, ESP_CMD_NULL); | ||
4007 | switch (esp->sreg & ESP_STAT_PMASK) { | ||
4008 | case ESP_MOP: | ||
4009 | /* whoops, parity error */ | ||
4010 | ESPLOG(("esp%d: still in msgout, parity error assumed\n", | ||
4011 | esp->esp_id)); | ||
4012 | if (esp->msgout_len > 1) | ||
4013 | esp_cmd(esp, ESP_CMD_SATN); | ||
4014 | esp_advance_phase(esp->current_SC, in_msgout); | ||
4015 | return do_work_bus; | ||
4016 | |||
4017 | case ESP_DIP: | ||
4018 | break; | ||
4019 | |||
4020 | default: | ||
4021 | /* Happy Meal fifo is touchy... */ | ||
4022 | if ((esp->erev != fashme) && | ||
4023 | !fcount(esp) && | ||
4024 | !(((struct esp_device *)esp->current_SC->device->hostdata)->sync_max_offset)) | ||
4025 | esp_cmd(esp, ESP_CMD_FLUSH); | ||
4026 | break; | ||
4027 | |||
4028 | }; | ||
4029 | } else { | ||
4030 | ESPLOG(("esp%d: disconnect, resetting bus\n", esp->esp_id)); | ||
4031 | return do_reset_bus; | ||
4032 | } | ||
4033 | |||
4034 | /* If we sent out a synchronous negotiation message, update | ||
4035 | * our state. | ||
4036 | */ | ||
4037 | if (esp->cur_msgout[2] == EXTENDED_MESSAGE && | ||
4038 | esp->cur_msgout[4] == EXTENDED_SDTR) { | ||
4039 | esp->snip = 1; /* anal retentiveness... */ | ||
4040 | } | ||
4041 | |||
4042 | esp->prevmsgout = esp->cur_msgout[0]; | ||
4043 | esp->msgout_len = 0; | ||
4044 | esp_advance_phase(esp->current_SC, in_the_dark); | ||
4045 | return esp_do_phase_determine(esp); | ||
4046 | } | ||
4047 | |||
4048 | static int esp_bus_unexpected(struct esp *esp) | ||
4049 | { | ||
4050 | ESPLOG(("esp%d: command in weird state %2x\n", | ||
4051 | esp->esp_id, esp->current_SC->SCp.phase)); | ||
4052 | return do_reset_bus; | ||
4053 | } | ||
4054 | |||
4055 | static espfunc_t bus_vector[] = { | ||
4056 | esp_do_data_finale, | ||
4057 | esp_do_data_finale, | ||
4058 | esp_bus_unexpected, | ||
4059 | esp_do_msgin, | ||
4060 | esp_do_msgincont, | ||
4061 | esp_do_msgindone, | ||
4062 | esp_do_msgout, | ||
4063 | esp_do_msgoutdone, | ||
4064 | esp_do_cmdbegin, | ||
4065 | esp_do_cmddone, | ||
4066 | esp_do_status, | ||
4067 | esp_do_freebus, | ||
4068 | esp_do_phase_determine, | ||
4069 | esp_bus_unexpected, | ||
4070 | esp_bus_unexpected, | ||
4071 | esp_bus_unexpected, | ||
4072 | }; | ||
4073 | |||
4074 | /* This is the second tier in our dual-level SCSI state machine. */ | ||
4075 | static int esp_work_bus(struct esp *esp) | ||
4076 | { | ||
4077 | struct scsi_cmnd *SCptr = esp->current_SC; | ||
4078 | unsigned int phase; | ||
4079 | |||
4080 | ESPBUS(("esp_work_bus: ")); | ||
4081 | if (!SCptr) { | ||
4082 | ESPBUS(("reconnect\n")); | ||
4083 | return esp_do_reconnect(esp); | ||
4084 | } | ||
4085 | phase = SCptr->SCp.phase; | ||
4086 | if ((phase & 0xf0) == in_phases_mask) | ||
4087 | return bus_vector[(phase & 0x0f)](esp); | ||
4088 | else if ((phase & 0xf0) == in_slct_mask) | ||
4089 | return esp_select_complete(esp); | ||
4090 | else | ||
4091 | return esp_bus_unexpected(esp); | ||
4092 | } | ||
4093 | |||
4094 | static espfunc_t isvc_vector[] = { | ||
4095 | NULL, | ||
4096 | esp_do_phase_determine, | ||
4097 | esp_do_resetbus, | ||
4098 | esp_finish_reset, | ||
4099 | esp_work_bus | ||
4100 | }; | ||
4101 | |||
4102 | /* Main interrupt handler for an esp adapter. */ | ||
4103 | static void esp_handle(struct esp *esp) | ||
4104 | { | ||
4105 | struct scsi_cmnd *SCptr; | ||
4106 | int what_next = do_intr_end; | ||
4107 | |||
4108 | SCptr = esp->current_SC; | ||
4109 | |||
4110 | /* Check for errors. */ | ||
4111 | esp->sreg = sbus_readb(esp->eregs + ESP_STATUS); | ||
4112 | esp->sreg &= (~ESP_STAT_INTR); | ||
4113 | if (esp->erev == fashme) { | ||
4114 | esp->sreg2 = sbus_readb(esp->eregs + ESP_STATUS2); | ||
4115 | esp->seqreg = (sbus_readb(esp->eregs + ESP_SSTEP) & ESP_STEP_VBITS); | ||
4116 | } | ||
4117 | |||
4118 | if (esp->sreg & (ESP_STAT_SPAM)) { | ||
4119 | /* Gross error, could be due to one of: | ||
4120 | * | ||
4121 | * - top of fifo overwritten, could be because | ||
4122 | * we tried to do a synchronous transfer with | ||
4123 | * an offset greater than ESP fifo size | ||
4124 | * | ||
4125 | * - top of command register overwritten | ||
4126 | * | ||
4127 | * - DMA setup to go in one direction, SCSI | ||
4128 | * bus points in the other, whoops | ||
4129 | * | ||
4130 | * - weird phase change during asynchronous | ||
4131 | * data phase while we are initiator | ||
4132 | */ | ||
4133 | ESPLOG(("esp%d: Gross error sreg=%2x\n", esp->esp_id, esp->sreg)); | ||
4134 | |||
4135 | /* If a command is live on the bus we cannot safely | ||
4136 | * reset the bus, so we'll just let the pieces fall | ||
4137 | * where they may. Here we are hoping that the | ||
4138 | * target will be able to cleanly go away soon | ||
4139 | * so we can safely reset things. | ||
4140 | */ | ||
4141 | if (!SCptr) { | ||
4142 | ESPLOG(("esp%d: No current cmd during gross error, " | ||
4143 | "resetting bus\n", esp->esp_id)); | ||
4144 | what_next = do_reset_bus; | ||
4145 | goto state_machine; | ||
4146 | } | ||
4147 | } | ||
4148 | |||
4149 | if (sbus_readl(esp->dregs + DMA_CSR) & DMA_HNDL_ERROR) { | ||
4150 | /* A DMA gate array error. Here we must | ||
4151 | * be seeing one of two things. Either the | ||
4152 | * virtual to physical address translation | ||
4153 | * on the SBUS could not occur, else the | ||
4154 | * translation it did get pointed to a bogus | ||
4155 | * page. Ho hum... | ||
4156 | */ | ||
4157 | ESPLOG(("esp%d: DMA error %08x\n", esp->esp_id, | ||
4158 | sbus_readl(esp->dregs + DMA_CSR))); | ||
4159 | |||
4160 | /* DMA gate array itself must be reset to clear the | ||
4161 | * error condition. | ||
4162 | */ | ||
4163 | esp_reset_dma(esp); | ||
4164 | |||
4165 | what_next = do_reset_bus; | ||
4166 | goto state_machine; | ||
4167 | } | ||
4168 | |||
4169 | esp->ireg = sbus_readb(esp->eregs + ESP_INTRPT); /* Unlatch intr reg */ | ||
4170 | |||
4171 | if (esp->erev == fashme) { | ||
4172 | /* This chip is really losing. */ | ||
4173 | ESPHME(("HME[")); | ||
4174 | |||
4175 | ESPHME(("sreg2=%02x,", esp->sreg2)); | ||
4176 | /* Must latch fifo before reading the interrupt | ||
4177 | * register else garbage ends up in the FIFO | ||
4178 | * which confuses the driver utterly. | ||
4179 | */ | ||
4180 | if (!(esp->sreg2 & ESP_STAT2_FEMPTY) || | ||
4181 | (esp->sreg2 & ESP_STAT2_F1BYTE)) { | ||
4182 | ESPHME(("fifo_workaround]")); | ||
4183 | hme_fifo_read(esp); | ||
4184 | } else { | ||
4185 | ESPHME(("no_fifo_workaround]")); | ||
4186 | } | ||
4187 | } | ||
4188 | |||
4189 | /* No current cmd is only valid at this point when there are | ||
4190 | * commands off the bus or we are trying a reset. | ||
4191 | */ | ||
4192 | if (!SCptr && !esp->disconnected_SC && !(esp->ireg & ESP_INTR_SR)) { | ||
4193 | /* Panic is safe, since current_SC is null. */ | ||
4194 | ESPLOG(("esp%d: no command in esp_handle()\n", esp->esp_id)); | ||
4195 | panic("esp_handle: current_SC == penguin within interrupt!"); | ||
4196 | } | ||
4197 | |||
4198 | if (esp->ireg & (ESP_INTR_IC)) { | ||
4199 | /* Illegal command fed to ESP. Outside of obvious | ||
4200 | * software bugs that could cause this, there is | ||
4201 | * a condition with esp100 where we can confuse the | ||
4202 | * ESP into an erroneous illegal command interrupt | ||
4203 | * because it does not scrape the FIFO properly | ||
4204 | * for reselection. See esp100_reconnect_hwbug() | ||
4205 | * to see how we try very hard to avoid this. | ||
4206 | */ | ||
4207 | ESPLOG(("esp%d: invalid command\n", esp->esp_id)); | ||
4208 | |||
4209 | esp_dump_state(esp); | ||
4210 | |||
4211 | if (SCptr != NULL) { | ||
4212 | /* Devices with very buggy firmware can drop BSY | ||
4213 | * during a scatter list interrupt when using sync | ||
4214 | * mode transfers. We continue the transfer as | ||
4215 | * expected, the target drops the bus, the ESP | ||
4216 | * gets confused, and we get a illegal command | ||
4217 | * interrupt because the bus is in the disconnected | ||
4218 | * state now and ESP_CMD_TI is only allowed when | ||
4219 | * a nexus is alive on the bus. | ||
4220 | */ | ||
4221 | ESPLOG(("esp%d: Forcing async and disabling disconnect for " | ||
4222 | "target %d\n", esp->esp_id, SCptr->device->id)); | ||
4223 | SCptr->device->borken = 1; /* foo on you */ | ||
4224 | } | ||
4225 | |||
4226 | what_next = do_reset_bus; | ||
4227 | } else if (!(esp->ireg & ~(ESP_INTR_FDONE | ESP_INTR_BSERV | ESP_INTR_DC))) { | ||
4228 | if (SCptr) { | ||
4229 | unsigned int phase = SCptr->SCp.phase; | ||
4230 | |||
4231 | if (phase & in_phases_mask) { | ||
4232 | what_next = esp_work_bus(esp); | ||
4233 | } else if (phase & in_slct_mask) { | ||
4234 | what_next = esp_select_complete(esp); | ||
4235 | } else { | ||
4236 | ESPLOG(("esp%d: interrupt for no good reason...\n", | ||
4237 | esp->esp_id)); | ||
4238 | what_next = do_intr_end; | ||
4239 | } | ||
4240 | } else { | ||
4241 | ESPLOG(("esp%d: BSERV or FDONE or DC while SCptr==NULL\n", | ||
4242 | esp->esp_id)); | ||
4243 | what_next = do_reset_bus; | ||
4244 | } | ||
4245 | } else if (esp->ireg & ESP_INTR_SR) { | ||
4246 | ESPLOG(("esp%d: SCSI bus reset interrupt\n", esp->esp_id)); | ||
4247 | what_next = do_reset_complete; | ||
4248 | } else if (esp->ireg & (ESP_INTR_S | ESP_INTR_SATN)) { | ||
4249 | ESPLOG(("esp%d: AIEEE we have been selected by another initiator!\n", | ||
4250 | esp->esp_id)); | ||
4251 | what_next = do_reset_bus; | ||
4252 | } else if (esp->ireg & ESP_INTR_RSEL) { | ||
4253 | if (SCptr == NULL) { | ||
4254 | /* This is ok. */ | ||
4255 | what_next = esp_do_reconnect(esp); | ||
4256 | } else if (SCptr->SCp.phase & in_slct_mask) { | ||
4257 | /* Only selection code knows how to clean | ||
4258 | * up properly. | ||
4259 | */ | ||
4260 | ESPDISC(("Reselected during selection attempt\n")); | ||
4261 | what_next = esp_select_complete(esp); | ||
4262 | } else { | ||
4263 | ESPLOG(("esp%d: Reselected while bus is busy\n", | ||
4264 | esp->esp_id)); | ||
4265 | what_next = do_reset_bus; | ||
4266 | } | ||
4267 | } | ||
4268 | |||
4269 | /* This is tier-one in our dual level SCSI state machine. */ | ||
4270 | state_machine: | ||
4271 | while (what_next != do_intr_end) { | ||
4272 | if (what_next >= do_phase_determine && | ||
4273 | what_next < do_intr_end) { | ||
4274 | what_next = isvc_vector[what_next](esp); | ||
4275 | } else { | ||
4276 | /* state is completely lost ;-( */ | ||
4277 | ESPLOG(("esp%d: interrupt engine loses state, resetting bus\n", | ||
4278 | esp->esp_id)); | ||
4279 | what_next = do_reset_bus; | ||
4280 | } | ||
4281 | } | ||
4282 | } | ||
4283 | |||
4284 | /* Service only the ESP described by dev_id. */ | ||
4285 | static irqreturn_t esp_intr(int irq, void *dev_id) | ||
4286 | { | ||
4287 | struct esp *esp = dev_id; | ||
4288 | unsigned long flags; | ||
4289 | |||
4290 | spin_lock_irqsave(esp->ehost->host_lock, flags); | ||
4291 | if (ESP_IRQ_P(esp->dregs)) { | ||
4292 | ESP_INTSOFF(esp->dregs); | ||
4293 | |||
4294 | ESPIRQ(("I[%d:%d](", smp_processor_id(), esp->esp_id)); | ||
4295 | esp_handle(esp); | ||
4296 | ESPIRQ((")")); | ||
4297 | |||
4298 | ESP_INTSON(esp->dregs); | ||
4299 | } | ||
4300 | spin_unlock_irqrestore(esp->ehost->host_lock, flags); | ||
4301 | |||
4302 | return IRQ_HANDLED; | ||
4303 | } | ||
4304 | |||
4305 | static int esp_slave_alloc(struct scsi_device *SDptr) | ||
4306 | { | ||
4307 | struct esp_device *esp_dev = | ||
4308 | kmalloc(sizeof(struct esp_device), GFP_ATOMIC); | ||
4309 | |||
4310 | if (!esp_dev) | ||
4311 | return -ENOMEM; | ||
4312 | memset(esp_dev, 0, sizeof(struct esp_device)); | ||
4313 | SDptr->hostdata = esp_dev; | ||
4314 | return 0; | ||
4315 | } | ||
4316 | |||
4317 | static void esp_slave_destroy(struct scsi_device *SDptr) | ||
4318 | { | ||
4319 | struct esp *esp = (struct esp *) SDptr->host->hostdata; | ||
4320 | |||
4321 | esp->targets_present &= ~(1 << SDptr->id); | ||
4322 | kfree(SDptr->hostdata); | ||
4323 | SDptr->hostdata = NULL; | ||
4324 | } | ||
4325 | |||
4326 | static struct scsi_host_template esp_template = { | ||
4327 | .module = THIS_MODULE, | ||
4328 | .name = "esp", | ||
4329 | .info = esp_info, | ||
4330 | .slave_alloc = esp_slave_alloc, | ||
4331 | .slave_destroy = esp_slave_destroy, | ||
4332 | .queuecommand = esp_queue, | ||
4333 | .eh_abort_handler = esp_abort, | ||
4334 | .eh_bus_reset_handler = esp_reset, | ||
4335 | .can_queue = 7, | ||
4336 | .this_id = 7, | ||
4337 | .sg_tablesize = SG_ALL, | ||
4338 | .cmd_per_lun = 1, | ||
4339 | .use_clustering = ENABLE_CLUSTERING, | ||
4340 | .proc_name = "esp", | ||
4341 | .proc_info = esp_proc_info, | ||
4342 | }; | ||
4343 | |||
4344 | #ifndef CONFIG_SUN4 | ||
4345 | static struct of_device_id esp_match[] = { | ||
4346 | { | ||
4347 | .name = "SUNW,esp", | ||
4348 | .data = &esp_template, | ||
4349 | }, | ||
4350 | { | ||
4351 | .name = "SUNW,fas", | ||
4352 | .data = &esp_template, | ||
4353 | }, | ||
4354 | { | ||
4355 | .name = "esp", | ||
4356 | .data = &esp_template, | ||
4357 | }, | ||
4358 | {}, | ||
4359 | }; | ||
4360 | MODULE_DEVICE_TABLE(of, esp_match); | ||
4361 | |||
4362 | static struct of_platform_driver esp_sbus_driver = { | ||
4363 | .name = "esp", | ||
4364 | .match_table = esp_match, | ||
4365 | .probe = esp_sbus_probe, | ||
4366 | .remove = __devexit_p(esp_sbus_remove), | ||
4367 | }; | ||
4368 | #endif | ||
4369 | |||
4370 | static int __init esp_init(void) | ||
4371 | { | ||
4372 | #ifdef CONFIG_SUN4 | ||
4373 | return esp_sun4_probe(&esp_template); | ||
4374 | #else | ||
4375 | return of_register_driver(&esp_sbus_driver, &sbus_bus_type); | ||
4376 | #endif | ||
4377 | } | ||
4378 | |||
4379 | static void __exit esp_exit(void) | ||
4380 | { | ||
4381 | #ifdef CONFIG_SUN4 | ||
4382 | esp_sun4_remove(); | ||
4383 | #else | ||
4384 | of_unregister_driver(&esp_sbus_driver); | ||
4385 | #endif | ||
4386 | } | ||
4387 | |||
4388 | MODULE_DESCRIPTION("ESP Sun SCSI driver"); | ||
4389 | MODULE_AUTHOR("David S. Miller (davem@davemloft.net)"); | ||
4390 | MODULE_LICENSE("GPL"); | ||
4391 | MODULE_VERSION(DRV_VERSION); | ||
4392 | |||
4393 | module_init(esp_init); | ||
4394 | module_exit(esp_exit); | ||
diff --git a/drivers/scsi/esp.h b/drivers/scsi/esp.h deleted file mode 100644 index a98cda9121fc..000000000000 --- a/drivers/scsi/esp.h +++ /dev/null | |||
@@ -1,406 +0,0 @@ | |||
1 | /* $Id: esp.h,v 1.29 2001/12/11 04:55:47 davem Exp $ | ||
2 | * esp.h: Defines and structures for the Sparc ESP (Enhanced SCSI | ||
3 | * Processor) driver under Linux. | ||
4 | * | ||
5 | * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) | ||
6 | */ | ||
7 | |||
8 | #ifndef _SPARC_ESP_H | ||
9 | #define _SPARC_ESP_H | ||
10 | |||
11 | /* For dvma controller register definitions. */ | ||
12 | #include <asm/dma.h> | ||
13 | |||
14 | /* The ESP SCSI controllers have their register sets in three | ||
15 | * "classes": | ||
16 | * | ||
17 | * 1) Registers which are both read and write. | ||
18 | * 2) Registers which are read only. | ||
19 | * 3) Registers which are write only. | ||
20 | * | ||
21 | * Yet, they all live within the same IO space. | ||
22 | */ | ||
23 | |||
24 | /* All the ESP registers are one byte each and are accessed longwords | ||
25 | * apart with a big-endian ordering to the bytes. | ||
26 | */ | ||
27 | /* Access Description Offset */ | ||
28 | #define ESP_TCLOW 0x00UL /* rw Low bits of the transfer count 0x00 */ | ||
29 | #define ESP_TCMED 0x04UL /* rw Mid bits of the transfer count 0x04 */ | ||
30 | #define ESP_FDATA 0x08UL /* rw FIFO data bits 0x08 */ | ||
31 | #define ESP_CMD 0x0cUL /* rw SCSI command bits 0x0c */ | ||
32 | #define ESP_STATUS 0x10UL /* ro ESP status register 0x10 */ | ||
33 | #define ESP_BUSID ESP_STATUS /* wo Bus ID for select/reselect 0x10 */ | ||
34 | #define ESP_INTRPT 0x14UL /* ro Kind of interrupt 0x14 */ | ||
35 | #define ESP_TIMEO ESP_INTRPT /* wo Timeout value for select/resel 0x14 */ | ||
36 | #define ESP_SSTEP 0x18UL /* ro Sequence step register 0x18 */ | ||
37 | #define ESP_STP ESP_SSTEP /* wo Transfer period per sync 0x18 */ | ||
38 | #define ESP_FFLAGS 0x1cUL /* ro Bits of current FIFO info 0x1c */ | ||
39 | #define ESP_SOFF ESP_FFLAGS /* wo Sync offset 0x1c */ | ||
40 | #define ESP_CFG1 0x20UL /* rw First configuration register 0x20 */ | ||
41 | #define ESP_CFACT 0x24UL /* wo Clock conversion factor 0x24 */ | ||
42 | #define ESP_STATUS2 ESP_CFACT /* ro HME status2 register 0x24 */ | ||
43 | #define ESP_CTEST 0x28UL /* wo Chip test register 0x28 */ | ||
44 | #define ESP_CFG2 0x2cUL /* rw Second configuration register 0x2c */ | ||
45 | #define ESP_CFG3 0x30UL /* rw Third configuration register 0x30 */ | ||
46 | #define ESP_TCHI 0x38UL /* rw High bits of transfer count 0x38 */ | ||
47 | #define ESP_UID ESP_TCHI /* ro Unique ID code 0x38 */ | ||
48 | #define FAS_RLO ESP_TCHI /* rw HME extended counter 0x38 */ | ||
49 | #define ESP_FGRND 0x3cUL /* rw Data base for fifo 0x3c */ | ||
50 | #define FAS_RHI ESP_FGRND /* rw HME extended counter 0x3c */ | ||
51 | #define ESP_REG_SIZE 0x40UL | ||
52 | |||
53 | /* Various revisions of the ESP board. */ | ||
54 | enum esp_rev { | ||
55 | esp100 = 0x00, /* NCR53C90 - very broken */ | ||
56 | esp100a = 0x01, /* NCR53C90A */ | ||
57 | esp236 = 0x02, | ||
58 | fas236 = 0x03, | ||
59 | fas100a = 0x04, | ||
60 | fast = 0x05, | ||
61 | fashme = 0x06, | ||
62 | espunknown = 0x07 | ||
63 | }; | ||
64 | |||
65 | /* We allocate one of these for each scsi device and attach it to | ||
66 | * SDptr->hostdata for use in the driver | ||
67 | */ | ||
68 | struct esp_device { | ||
69 | unsigned char sync_min_period; | ||
70 | unsigned char sync_max_offset; | ||
71 | unsigned sync:1; | ||
72 | unsigned wide:1; | ||
73 | unsigned disconnect:1; | ||
74 | }; | ||
75 | |||
76 | struct scsi_cmnd; | ||
77 | |||
78 | /* We get one of these for each ESP probed. */ | ||
79 | struct esp { | ||
80 | void __iomem *eregs; /* ESP controller registers */ | ||
81 | void __iomem *dregs; /* DMA controller registers */ | ||
82 | struct sbus_dma *dma; /* DMA controller sw state */ | ||
83 | struct Scsi_Host *ehost; /* Backpointer to SCSI Host */ | ||
84 | struct sbus_dev *sdev; /* Pointer to SBus entry */ | ||
85 | |||
86 | /* ESP Configuration Registers */ | ||
87 | u8 config1; /* Copy of the 1st config register */ | ||
88 | u8 config2; /* Copy of the 2nd config register */ | ||
89 | u8 config3[16]; /* Copy of the 3rd config register */ | ||
90 | |||
91 | /* The current command we are sending to the ESP chip. This esp_command | ||
92 | * ptr needs to be mapped in DVMA area so we can send commands and read | ||
93 | * from the ESP fifo without burning precious CPU cycles. Programmed I/O | ||
94 | * sucks when we have the DVMA to do it for us. The ESP is stupid and will | ||
95 | * only send out 6, 10, and 12 byte SCSI commands, others we need to send | ||
96 | * one byte at a time. esp_slowcmd being set says that we are doing one | ||
97 | * of the command types ESP doesn't understand, esp_scmdp keeps track of | ||
98 | * which byte we are sending, esp_scmdleft says how many bytes to go. | ||
99 | */ | ||
100 | volatile u8 *esp_command; /* Location of command (CPU view) */ | ||
101 | __u32 esp_command_dvma;/* Location of command (DVMA view) */ | ||
102 | unsigned char esp_clen; /* Length of this command */ | ||
103 | unsigned char esp_slowcmd; | ||
104 | unsigned char *esp_scmdp; | ||
105 | unsigned char esp_scmdleft; | ||
106 | |||
107 | /* The following are used to determine the cause of an IRQ. Upon every | ||
108 | * IRQ entry we synchronize these with the hardware registers. | ||
109 | */ | ||
110 | u8 ireg; /* Copy of ESP interrupt register */ | ||
111 | u8 sreg; /* Copy of ESP status register */ | ||
112 | u8 seqreg; /* Copy of ESP sequence step register */ | ||
113 | u8 sreg2; /* Copy of HME status2 register */ | ||
114 | |||
115 | /* To save register writes to the ESP, which can be expensive, we | ||
116 | * keep track of the previous value that various registers had for | ||
117 | * the last target we connected to. If they are the same for the | ||
118 | * current target, we skip the register writes as they are not needed. | ||
119 | */ | ||
120 | u8 prev_soff, prev_stp; | ||
121 | u8 prev_cfg3, __cache_pad; | ||
122 | |||
123 | /* We also keep a cache of the previous FAS/HME DMA CSR register value. */ | ||
124 | u32 prev_hme_dmacsr; | ||
125 | |||
126 | /* The HME is the biggest piece of shit I have ever seen. */ | ||
127 | u8 hme_fifo_workaround_buffer[16 * 2]; | ||
128 | u8 hme_fifo_workaround_count; | ||
129 | |||
130 | /* For each target we keep track of save/restore data | ||
131 | * pointer information. This needs to be updated majorly | ||
132 | * when we add support for tagged queueing. -DaveM | ||
133 | */ | ||
134 | struct esp_pointers { | ||
135 | char *saved_ptr; | ||
136 | struct scatterlist *saved_buffer; | ||
137 | int saved_this_residual; | ||
138 | int saved_buffers_residual; | ||
139 | } data_pointers[16] /*XXX [MAX_TAGS_PER_TARGET]*/; | ||
140 | |||
141 | /* Clock periods, frequencies, synchronization, etc. */ | ||
142 | unsigned int cfreq; /* Clock frequency in HZ */ | ||
143 | unsigned int cfact; /* Clock conversion factor */ | ||
144 | unsigned int raw_cfact; /* Raw copy from probing */ | ||
145 | unsigned int ccycle; /* One ESP clock cycle */ | ||
146 | unsigned int ctick; /* One ESP clock time */ | ||
147 | unsigned int radelay; /* FAST chip req/ack delay */ | ||
148 | unsigned int neg_defp; /* Default negotiation period */ | ||
149 | unsigned int sync_defp; /* Default sync transfer period */ | ||
150 | unsigned int max_period; /* longest our period can be */ | ||
151 | unsigned int min_period; /* shortest period we can withstand */ | ||
152 | |||
153 | struct esp *next; /* Next ESP we probed or NULL */ | ||
154 | char prom_name[64]; /* Name of ESP device from prom */ | ||
155 | int prom_node; /* Prom node where ESP found */ | ||
156 | int esp_id; /* Unique per-ESP ID number */ | ||
157 | |||
158 | /* For slow to medium speed input clock rates we shoot for 5mb/s, | ||
159 | * but for high input clock rates we try to do 10mb/s although I | ||
160 | * don't think a transfer can even run that fast with an ESP even | ||
161 | * with DMA2 scatter gather pipelining. | ||
162 | */ | ||
163 | #define SYNC_DEFP_SLOW 0x32 /* 5mb/s */ | ||
164 | #define SYNC_DEFP_FAST 0x19 /* 10mb/s */ | ||
165 | |||
166 | unsigned int snip; /* Sync. negotiation in progress */ | ||
167 | unsigned int wnip; /* WIDE negotiation in progress */ | ||
168 | unsigned int targets_present;/* targets spoken to before */ | ||
169 | |||
170 | int current_transfer_size; /* Set at beginning of data dma */ | ||
171 | |||
172 | u8 espcmdlog[32]; /* Log of current esp cmds sent. */ | ||
173 | u8 espcmdent; /* Current entry in esp cmd log. */ | ||
174 | |||
175 | /* Misc. info about this ESP */ | ||
176 | enum esp_rev erev; /* ESP revision */ | ||
177 | int irq; /* SBus IRQ for this ESP */ | ||
178 | int scsi_id; /* Who am I as initiator? */ | ||
179 | int scsi_id_mask; /* Bitmask of 'me'. */ | ||
180 | int diff; /* Differential SCSI bus? */ | ||
181 | int bursts; /* Burst sizes our DVMA supports */ | ||
182 | |||
183 | /* Our command queues, only one cmd lives in the current_SC queue. */ | ||
184 | struct scsi_cmnd *issue_SC; /* Commands to be issued */ | ||
185 | struct scsi_cmnd *current_SC; /* Who is currently working the bus */ | ||
186 | struct scsi_cmnd *disconnected_SC;/* Commands disconnected from the bus */ | ||
187 | |||
188 | /* Message goo */ | ||
189 | u8 cur_msgout[16]; | ||
190 | u8 cur_msgin[16]; | ||
191 | u8 prevmsgout, prevmsgin; | ||
192 | u8 msgout_len, msgin_len; | ||
193 | u8 msgout_ctr, msgin_ctr; | ||
194 | |||
195 | /* States that we cannot keep in the per cmd structure because they | ||
196 | * cannot be assosciated with any specific command. | ||
197 | */ | ||
198 | u8 resetting_bus; | ||
199 | wait_queue_head_t reset_queue; | ||
200 | }; | ||
201 | |||
202 | /* Bitfield meanings for the above registers. */ | ||
203 | |||
204 | /* ESP config reg 1, read-write, found on all ESP chips */ | ||
205 | #define ESP_CONFIG1_ID 0x07 /* My BUS ID bits */ | ||
206 | #define ESP_CONFIG1_CHTEST 0x08 /* Enable ESP chip tests */ | ||
207 | #define ESP_CONFIG1_PENABLE 0x10 /* Enable parity checks */ | ||
208 | #define ESP_CONFIG1_PARTEST 0x20 /* Parity test mode enabled? */ | ||
209 | #define ESP_CONFIG1_SRRDISAB 0x40 /* Disable SCSI reset reports */ | ||
210 | #define ESP_CONFIG1_SLCABLE 0x80 /* Enable slow cable mode */ | ||
211 | |||
212 | /* ESP config reg 2, read-write, found only on esp100a+esp200+esp236 chips */ | ||
213 | #define ESP_CONFIG2_DMAPARITY 0x01 /* enable DMA Parity (200,236) */ | ||
214 | #define ESP_CONFIG2_REGPARITY 0x02 /* enable reg Parity (200,236) */ | ||
215 | #define ESP_CONFIG2_BADPARITY 0x04 /* Bad parity target abort */ | ||
216 | #define ESP_CONFIG2_SCSI2ENAB 0x08 /* Enable SCSI-2 features (tmode only) */ | ||
217 | #define ESP_CONFIG2_HI 0x10 /* High Impedance DREQ ??? */ | ||
218 | #define ESP_CONFIG2_HMEFENAB 0x10 /* HME features enable */ | ||
219 | #define ESP_CONFIG2_BCM 0x20 /* Enable byte-ctrl (236) */ | ||
220 | #define ESP_CONFIG2_DISPINT 0x20 /* Disable pause irq (hme) */ | ||
221 | #define ESP_CONFIG2_FENAB 0x40 /* Enable features (fas100,esp216) */ | ||
222 | #define ESP_CONFIG2_SPL 0x40 /* Enable status-phase latch (esp236) */ | ||
223 | #define ESP_CONFIG2_MKDONE 0x40 /* HME magic feature */ | ||
224 | #define ESP_CONFIG2_HME32 0x80 /* HME 32 extended */ | ||
225 | #define ESP_CONFIG2_MAGIC 0xe0 /* Invalid bits... */ | ||
226 | |||
227 | /* ESP config register 3 read-write, found only esp236+fas236+fas100a+hme chips */ | ||
228 | #define ESP_CONFIG3_FCLOCK 0x01 /* FAST SCSI clock rate (esp100a/hme) */ | ||
229 | #define ESP_CONFIG3_TEM 0x01 /* Enable thresh-8 mode (esp/fas236) */ | ||
230 | #define ESP_CONFIG3_FAST 0x02 /* Enable FAST SCSI (esp100a/hme) */ | ||
231 | #define ESP_CONFIG3_ADMA 0x02 /* Enable alternate-dma (esp/fas236) */ | ||
232 | #define ESP_CONFIG3_TENB 0x04 /* group2 SCSI2 support (esp100a/hme) */ | ||
233 | #define ESP_CONFIG3_SRB 0x04 /* Save residual byte (esp/fas236) */ | ||
234 | #define ESP_CONFIG3_TMS 0x08 /* Three-byte msg's ok (esp100a/hme) */ | ||
235 | #define ESP_CONFIG3_FCLK 0x08 /* Fast SCSI clock rate (esp/fas236) */ | ||
236 | #define ESP_CONFIG3_IDMSG 0x10 /* ID message checking (esp100a/hme) */ | ||
237 | #define ESP_CONFIG3_FSCSI 0x10 /* Enable FAST SCSI (esp/fas236) */ | ||
238 | #define ESP_CONFIG3_GTM 0x20 /* group2 SCSI2 support (esp/fas236) */ | ||
239 | #define ESP_CONFIG3_IDBIT3 0x20 /* Bit 3 of HME SCSI-ID (hme) */ | ||
240 | #define ESP_CONFIG3_TBMS 0x40 /* Three-byte msg's ok (esp/fas236) */ | ||
241 | #define ESP_CONFIG3_EWIDE 0x40 /* Enable Wide-SCSI (hme) */ | ||
242 | #define ESP_CONFIG3_IMS 0x80 /* ID msg chk'ng (esp/fas236) */ | ||
243 | #define ESP_CONFIG3_OBPUSH 0x80 /* Push odd-byte to dma (hme) */ | ||
244 | |||
245 | /* ESP command register read-write */ | ||
246 | /* Group 1 commands: These may be sent at any point in time to the ESP | ||
247 | * chip. None of them can generate interrupts 'cept | ||
248 | * the "SCSI bus reset" command if you have not disabled | ||
249 | * SCSI reset interrupts in the config1 ESP register. | ||
250 | */ | ||
251 | #define ESP_CMD_NULL 0x00 /* Null command, ie. a nop */ | ||
252 | #define ESP_CMD_FLUSH 0x01 /* FIFO Flush */ | ||
253 | #define ESP_CMD_RC 0x02 /* Chip reset */ | ||
254 | #define ESP_CMD_RS 0x03 /* SCSI bus reset */ | ||
255 | |||
256 | /* Group 2 commands: ESP must be an initiator and connected to a target | ||
257 | * for these commands to work. | ||
258 | */ | ||
259 | #define ESP_CMD_TI 0x10 /* Transfer Information */ | ||
260 | #define ESP_CMD_ICCSEQ 0x11 /* Initiator cmd complete sequence */ | ||
261 | #define ESP_CMD_MOK 0x12 /* Message okie-dokie */ | ||
262 | #define ESP_CMD_TPAD 0x18 /* Transfer Pad */ | ||
263 | #define ESP_CMD_SATN 0x1a /* Set ATN */ | ||
264 | #define ESP_CMD_RATN 0x1b /* De-assert ATN */ | ||
265 | |||
266 | /* Group 3 commands: ESP must be in the MSGOUT or MSGIN state and be connected | ||
267 | * to a target as the initiator for these commands to work. | ||
268 | */ | ||
269 | #define ESP_CMD_SMSG 0x20 /* Send message */ | ||
270 | #define ESP_CMD_SSTAT 0x21 /* Send status */ | ||
271 | #define ESP_CMD_SDATA 0x22 /* Send data */ | ||
272 | #define ESP_CMD_DSEQ 0x23 /* Discontinue Sequence */ | ||
273 | #define ESP_CMD_TSEQ 0x24 /* Terminate Sequence */ | ||
274 | #define ESP_CMD_TCCSEQ 0x25 /* Target cmd cmplt sequence */ | ||
275 | #define ESP_CMD_DCNCT 0x27 /* Disconnect */ | ||
276 | #define ESP_CMD_RMSG 0x28 /* Receive Message */ | ||
277 | #define ESP_CMD_RCMD 0x29 /* Receive Command */ | ||
278 | #define ESP_CMD_RDATA 0x2a /* Receive Data */ | ||
279 | #define ESP_CMD_RCSEQ 0x2b /* Receive cmd sequence */ | ||
280 | |||
281 | /* Group 4 commands: The ESP must be in the disconnected state and must | ||
282 | * not be connected to any targets as initiator for | ||
283 | * these commands to work. | ||
284 | */ | ||
285 | #define ESP_CMD_RSEL 0x40 /* Reselect */ | ||
286 | #define ESP_CMD_SEL 0x41 /* Select w/o ATN */ | ||
287 | #define ESP_CMD_SELA 0x42 /* Select w/ATN */ | ||
288 | #define ESP_CMD_SELAS 0x43 /* Select w/ATN & STOP */ | ||
289 | #define ESP_CMD_ESEL 0x44 /* Enable selection */ | ||
290 | #define ESP_CMD_DSEL 0x45 /* Disable selections */ | ||
291 | #define ESP_CMD_SA3 0x46 /* Select w/ATN3 */ | ||
292 | #define ESP_CMD_RSEL3 0x47 /* Reselect3 */ | ||
293 | |||
294 | /* This bit enables the ESP's DMA on the SBus */ | ||
295 | #define ESP_CMD_DMA 0x80 /* Do DMA? */ | ||
296 | |||
297 | |||
298 | /* ESP status register read-only */ | ||
299 | #define ESP_STAT_PIO 0x01 /* IO phase bit */ | ||
300 | #define ESP_STAT_PCD 0x02 /* CD phase bit */ | ||
301 | #define ESP_STAT_PMSG 0x04 /* MSG phase bit */ | ||
302 | #define ESP_STAT_PMASK 0x07 /* Mask of phase bits */ | ||
303 | #define ESP_STAT_TDONE 0x08 /* Transfer Completed */ | ||
304 | #define ESP_STAT_TCNT 0x10 /* Transfer Counter Is Zero */ | ||
305 | #define ESP_STAT_PERR 0x20 /* Parity error */ | ||
306 | #define ESP_STAT_SPAM 0x40 /* Real bad error */ | ||
307 | /* This indicates the 'interrupt pending' condition on esp236, it is a reserved | ||
308 | * bit on other revs of the ESP. | ||
309 | */ | ||
310 | #define ESP_STAT_INTR 0x80 /* Interrupt */ | ||
311 | |||
312 | /* HME only: status 2 register */ | ||
313 | #define ESP_STAT2_SCHBIT 0x01 /* Upper bits 3-7 of sstep enabled */ | ||
314 | #define ESP_STAT2_FFLAGS 0x02 /* The fifo flags are now latched */ | ||
315 | #define ESP_STAT2_XCNT 0x04 /* The transfer counter is latched */ | ||
316 | #define ESP_STAT2_CREGA 0x08 /* The command reg is active now */ | ||
317 | #define ESP_STAT2_WIDE 0x10 /* Interface on this adapter is wide */ | ||
318 | #define ESP_STAT2_F1BYTE 0x20 /* There is one byte at top of fifo */ | ||
319 | #define ESP_STAT2_FMSB 0x40 /* Next byte in fifo is most significant */ | ||
320 | #define ESP_STAT2_FEMPTY 0x80 /* FIFO is empty */ | ||
321 | |||
322 | /* The status register can be masked with ESP_STAT_PMASK and compared | ||
323 | * with the following values to determine the current phase the ESP | ||
324 | * (at least thinks it) is in. For our purposes we also add our own | ||
325 | * software 'done' bit for our phase management engine. | ||
326 | */ | ||
327 | #define ESP_DOP (0) /* Data Out */ | ||
328 | #define ESP_DIP (ESP_STAT_PIO) /* Data In */ | ||
329 | #define ESP_CMDP (ESP_STAT_PCD) /* Command */ | ||
330 | #define ESP_STATP (ESP_STAT_PCD|ESP_STAT_PIO) /* Status */ | ||
331 | #define ESP_MOP (ESP_STAT_PMSG|ESP_STAT_PCD) /* Message Out */ | ||
332 | #define ESP_MIP (ESP_STAT_PMSG|ESP_STAT_PCD|ESP_STAT_PIO) /* Message In */ | ||
333 | |||
334 | /* ESP interrupt register read-only */ | ||
335 | #define ESP_INTR_S 0x01 /* Select w/o ATN */ | ||
336 | #define ESP_INTR_SATN 0x02 /* Select w/ATN */ | ||
337 | #define ESP_INTR_RSEL 0x04 /* Reselected */ | ||
338 | #define ESP_INTR_FDONE 0x08 /* Function done */ | ||
339 | #define ESP_INTR_BSERV 0x10 /* Bus service */ | ||
340 | #define ESP_INTR_DC 0x20 /* Disconnect */ | ||
341 | #define ESP_INTR_IC 0x40 /* Illegal command given */ | ||
342 | #define ESP_INTR_SR 0x80 /* SCSI bus reset detected */ | ||
343 | |||
344 | /* Interrupt status macros */ | ||
345 | #define ESP_SRESET_IRQ(esp) ((esp)->intreg & (ESP_INTR_SR)) | ||
346 | #define ESP_ILLCMD_IRQ(esp) ((esp)->intreg & (ESP_INTR_IC)) | ||
347 | #define ESP_SELECT_WITH_ATN_IRQ(esp) ((esp)->intreg & (ESP_INTR_SATN)) | ||
348 | #define ESP_SELECT_WITHOUT_ATN_IRQ(esp) ((esp)->intreg & (ESP_INTR_S)) | ||
349 | #define ESP_SELECTION_IRQ(esp) ((ESP_SELECT_WITH_ATN_IRQ(esp)) || \ | ||
350 | (ESP_SELECT_WITHOUT_ATN_IRQ(esp))) | ||
351 | #define ESP_RESELECTION_IRQ(esp) ((esp)->intreg & (ESP_INTR_RSEL)) | ||
352 | |||
353 | /* ESP sequence step register read-only */ | ||
354 | #define ESP_STEP_VBITS 0x07 /* Valid bits */ | ||
355 | #define ESP_STEP_ASEL 0x00 /* Selection&Arbitrate cmplt */ | ||
356 | #define ESP_STEP_SID 0x01 /* One msg byte sent */ | ||
357 | #define ESP_STEP_NCMD 0x02 /* Was not in command phase */ | ||
358 | #define ESP_STEP_PPC 0x03 /* Early phase chg caused cmnd | ||
359 | * bytes to be lost | ||
360 | */ | ||
361 | #define ESP_STEP_FINI4 0x04 /* Command was sent ok */ | ||
362 | |||
363 | /* Ho hum, some ESP's set the step register to this as well... */ | ||
364 | #define ESP_STEP_FINI5 0x05 | ||
365 | #define ESP_STEP_FINI6 0x06 | ||
366 | #define ESP_STEP_FINI7 0x07 | ||
367 | |||
368 | /* ESP chip-test register read-write */ | ||
369 | #define ESP_TEST_TARG 0x01 /* Target test mode */ | ||
370 | #define ESP_TEST_INI 0x02 /* Initiator test mode */ | ||
371 | #define ESP_TEST_TS 0x04 /* Tristate test mode */ | ||
372 | |||
373 | /* ESP unique ID register read-only, found on fas236+fas100a only */ | ||
374 | #define ESP_UID_F100A 0x00 /* ESP FAS100A */ | ||
375 | #define ESP_UID_F236 0x02 /* ESP FAS236 */ | ||
376 | #define ESP_UID_REV 0x07 /* ESP revision */ | ||
377 | #define ESP_UID_FAM 0xf8 /* ESP family */ | ||
378 | |||
379 | /* ESP fifo flags register read-only */ | ||
380 | /* Note that the following implies a 16 byte FIFO on the ESP. */ | ||
381 | #define ESP_FF_FBYTES 0x1f /* Num bytes in FIFO */ | ||
382 | #define ESP_FF_ONOTZERO 0x20 /* offset ctr not zero (esp100) */ | ||
383 | #define ESP_FF_SSTEP 0xe0 /* Sequence step */ | ||
384 | |||
385 | /* ESP clock conversion factor register write-only */ | ||
386 | #define ESP_CCF_F0 0x00 /* 35.01MHz - 40MHz */ | ||
387 | #define ESP_CCF_NEVER 0x01 /* Set it to this and die */ | ||
388 | #define ESP_CCF_F2 0x02 /* 10MHz */ | ||
389 | #define ESP_CCF_F3 0x03 /* 10.01MHz - 15MHz */ | ||
390 | #define ESP_CCF_F4 0x04 /* 15.01MHz - 20MHz */ | ||
391 | #define ESP_CCF_F5 0x05 /* 20.01MHz - 25MHz */ | ||
392 | #define ESP_CCF_F6 0x06 /* 25.01MHz - 30MHz */ | ||
393 | #define ESP_CCF_F7 0x07 /* 30.01MHz - 35MHz */ | ||
394 | |||
395 | /* HME only... */ | ||
396 | #define ESP_BUSID_RESELID 0x10 | ||
397 | #define ESP_BUSID_CTR32BIT 0x40 | ||
398 | |||
399 | #define ESP_BUS_TIMEOUT 275 /* In milli-seconds */ | ||
400 | #define ESP_TIMEO_CONST 8192 | ||
401 | #define ESP_NEG_DEFP(mhz, cfact) \ | ||
402 | ((ESP_BUS_TIMEOUT * ((mhz) / 1000)) / (8192 * (cfact))) | ||
403 | #define ESP_MHZ_TO_CYCLE(mhertz) ((1000000000) / ((mhertz) / 1000)) | ||
404 | #define ESP_TICK(ccf, cycle) ((7682 * (ccf) * (cycle) / 1000)) | ||
405 | |||
406 | #endif /* !(_SPARC_ESP_H) */ | ||
diff --git a/drivers/scsi/esp_scsi.c b/drivers/scsi/esp_scsi.c new file mode 100644 index 000000000000..3cd5bf723da4 --- /dev/null +++ b/drivers/scsi/esp_scsi.c | |||
@@ -0,0 +1,2710 @@ | |||
1 | /* esp_scsi.c: ESP SCSI driver. | ||
2 | * | ||
3 | * Copyright (C) 2007 David S. Miller (davem@davemloft.net) | ||
4 | */ | ||
5 | |||
6 | #include <linux/kernel.h> | ||
7 | #include <linux/types.h> | ||
8 | #include <linux/slab.h> | ||
9 | #include <linux/delay.h> | ||
10 | #include <linux/list.h> | ||
11 | #include <linux/completion.h> | ||
12 | #include <linux/kallsyms.h> | ||
13 | #include <linux/module.h> | ||
14 | #include <linux/moduleparam.h> | ||
15 | #include <linux/init.h> | ||
16 | |||
17 | #include <asm/irq.h> | ||
18 | #include <asm/io.h> | ||
19 | #include <asm/dma.h> | ||
20 | |||
21 | #include <scsi/scsi.h> | ||
22 | #include <scsi/scsi_host.h> | ||
23 | #include <scsi/scsi_cmnd.h> | ||
24 | #include <scsi/scsi_device.h> | ||
25 | #include <scsi/scsi_tcq.h> | ||
26 | #include <scsi/scsi_dbg.h> | ||
27 | #include <scsi/scsi_transport_spi.h> | ||
28 | |||
29 | #include "esp_scsi.h" | ||
30 | |||
31 | #define DRV_MODULE_NAME "esp" | ||
32 | #define PFX DRV_MODULE_NAME ": " | ||
33 | #define DRV_VERSION "2.000" | ||
34 | #define DRV_MODULE_RELDATE "April 19, 2007" | ||
35 | |||
36 | /* SCSI bus reset settle time in seconds. */ | ||
37 | static int esp_bus_reset_settle = 3; | ||
38 | |||
39 | static u32 esp_debug; | ||
40 | #define ESP_DEBUG_INTR 0x00000001 | ||
41 | #define ESP_DEBUG_SCSICMD 0x00000002 | ||
42 | #define ESP_DEBUG_RESET 0x00000004 | ||
43 | #define ESP_DEBUG_MSGIN 0x00000008 | ||
44 | #define ESP_DEBUG_MSGOUT 0x00000010 | ||
45 | #define ESP_DEBUG_CMDDONE 0x00000020 | ||
46 | #define ESP_DEBUG_DISCONNECT 0x00000040 | ||
47 | #define ESP_DEBUG_DATASTART 0x00000080 | ||
48 | #define ESP_DEBUG_DATADONE 0x00000100 | ||
49 | #define ESP_DEBUG_RECONNECT 0x00000200 | ||
50 | #define ESP_DEBUG_AUTOSENSE 0x00000400 | ||
51 | |||
52 | #define esp_log_intr(f, a...) \ | ||
53 | do { if (esp_debug & ESP_DEBUG_INTR) \ | ||
54 | printk(f, ## a); \ | ||
55 | } while (0) | ||
56 | |||
57 | #define esp_log_reset(f, a...) \ | ||
58 | do { if (esp_debug & ESP_DEBUG_RESET) \ | ||
59 | printk(f, ## a); \ | ||
60 | } while (0) | ||
61 | |||
62 | #define esp_log_msgin(f, a...) \ | ||
63 | do { if (esp_debug & ESP_DEBUG_MSGIN) \ | ||
64 | printk(f, ## a); \ | ||
65 | } while (0) | ||
66 | |||
67 | #define esp_log_msgout(f, a...) \ | ||
68 | do { if (esp_debug & ESP_DEBUG_MSGOUT) \ | ||
69 | printk(f, ## a); \ | ||
70 | } while (0) | ||
71 | |||
72 | #define esp_log_cmddone(f, a...) \ | ||
73 | do { if (esp_debug & ESP_DEBUG_CMDDONE) \ | ||
74 | printk(f, ## a); \ | ||
75 | } while (0) | ||
76 | |||
77 | #define esp_log_disconnect(f, a...) \ | ||
78 | do { if (esp_debug & ESP_DEBUG_DISCONNECT) \ | ||
79 | printk(f, ## a); \ | ||
80 | } while (0) | ||
81 | |||
82 | #define esp_log_datastart(f, a...) \ | ||
83 | do { if (esp_debug & ESP_DEBUG_DATASTART) \ | ||
84 | printk(f, ## a); \ | ||
85 | } while (0) | ||
86 | |||
87 | #define esp_log_datadone(f, a...) \ | ||
88 | do { if (esp_debug & ESP_DEBUG_DATADONE) \ | ||
89 | printk(f, ## a); \ | ||
90 | } while (0) | ||
91 | |||
92 | #define esp_log_reconnect(f, a...) \ | ||
93 | do { if (esp_debug & ESP_DEBUG_RECONNECT) \ | ||
94 | printk(f, ## a); \ | ||
95 | } while (0) | ||
96 | |||
97 | #define esp_log_autosense(f, a...) \ | ||
98 | do { if (esp_debug & ESP_DEBUG_AUTOSENSE) \ | ||
99 | printk(f, ## a); \ | ||
100 | } while (0) | ||
101 | |||
102 | #define esp_read8(REG) esp->ops->esp_read8(esp, REG) | ||
103 | #define esp_write8(VAL,REG) esp->ops->esp_write8(esp, VAL, REG) | ||
104 | |||
105 | static void esp_log_fill_regs(struct esp *esp, | ||
106 | struct esp_event_ent *p) | ||
107 | { | ||
108 | p->sreg = esp->sreg; | ||
109 | p->seqreg = esp->seqreg; | ||
110 | p->sreg2 = esp->sreg2; | ||
111 | p->ireg = esp->ireg; | ||
112 | p->select_state = esp->select_state; | ||
113 | p->event = esp->event; | ||
114 | } | ||
115 | |||
116 | void scsi_esp_cmd(struct esp *esp, u8 val) | ||
117 | { | ||
118 | struct esp_event_ent *p; | ||
119 | int idx = esp->esp_event_cur; | ||
120 | |||
121 | p = &esp->esp_event_log[idx]; | ||
122 | p->type = ESP_EVENT_TYPE_CMD; | ||
123 | p->val = val; | ||
124 | esp_log_fill_regs(esp, p); | ||
125 | |||
126 | esp->esp_event_cur = (idx + 1) & (ESP_EVENT_LOG_SZ - 1); | ||
127 | |||
128 | esp_write8(val, ESP_CMD); | ||
129 | } | ||
130 | EXPORT_SYMBOL(scsi_esp_cmd); | ||
131 | |||
132 | static void esp_event(struct esp *esp, u8 val) | ||
133 | { | ||
134 | struct esp_event_ent *p; | ||
135 | int idx = esp->esp_event_cur; | ||
136 | |||
137 | p = &esp->esp_event_log[idx]; | ||
138 | p->type = ESP_EVENT_TYPE_EVENT; | ||
139 | p->val = val; | ||
140 | esp_log_fill_regs(esp, p); | ||
141 | |||
142 | esp->esp_event_cur = (idx + 1) & (ESP_EVENT_LOG_SZ - 1); | ||
143 | |||
144 | esp->event = val; | ||
145 | } | ||
146 | |||
147 | static void esp_dump_cmd_log(struct esp *esp) | ||
148 | { | ||
149 | int idx = esp->esp_event_cur; | ||
150 | int stop = idx; | ||
151 | |||
152 | printk(KERN_INFO PFX "esp%d: Dumping command log\n", | ||
153 | esp->host->unique_id); | ||
154 | do { | ||
155 | struct esp_event_ent *p = &esp->esp_event_log[idx]; | ||
156 | |||
157 | printk(KERN_INFO PFX "esp%d: ent[%d] %s ", | ||
158 | esp->host->unique_id, idx, | ||
159 | p->type == ESP_EVENT_TYPE_CMD ? "CMD" : "EVENT"); | ||
160 | |||
161 | printk("val[%02x] sreg[%02x] seqreg[%02x] " | ||
162 | "sreg2[%02x] ireg[%02x] ss[%02x] event[%02x]\n", | ||
163 | p->val, p->sreg, p->seqreg, | ||
164 | p->sreg2, p->ireg, p->select_state, p->event); | ||
165 | |||
166 | idx = (idx + 1) & (ESP_EVENT_LOG_SZ - 1); | ||
167 | } while (idx != stop); | ||
168 | } | ||
169 | |||
170 | static void esp_flush_fifo(struct esp *esp) | ||
171 | { | ||
172 | scsi_esp_cmd(esp, ESP_CMD_FLUSH); | ||
173 | if (esp->rev == ESP236) { | ||
174 | int lim = 1000; | ||
175 | |||
176 | while (esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES) { | ||
177 | if (--lim == 0) { | ||
178 | printk(KERN_ALERT PFX "esp%d: ESP_FF_BYTES " | ||
179 | "will not clear!\n", | ||
180 | esp->host->unique_id); | ||
181 | break; | ||
182 | } | ||
183 | udelay(1); | ||
184 | } | ||
185 | } | ||
186 | } | ||
187 | |||
188 | static void hme_read_fifo(struct esp *esp) | ||
189 | { | ||
190 | int fcnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES; | ||
191 | int idx = 0; | ||
192 | |||
193 | while (fcnt--) { | ||
194 | esp->fifo[idx++] = esp_read8(ESP_FDATA); | ||
195 | esp->fifo[idx++] = esp_read8(ESP_FDATA); | ||
196 | } | ||
197 | if (esp->sreg2 & ESP_STAT2_F1BYTE) { | ||
198 | esp_write8(0, ESP_FDATA); | ||
199 | esp->fifo[idx++] = esp_read8(ESP_FDATA); | ||
200 | scsi_esp_cmd(esp, ESP_CMD_FLUSH); | ||
201 | } | ||
202 | esp->fifo_cnt = idx; | ||
203 | } | ||
204 | |||
205 | static void esp_set_all_config3(struct esp *esp, u8 val) | ||
206 | { | ||
207 | int i; | ||
208 | |||
209 | for (i = 0; i < ESP_MAX_TARGET; i++) | ||
210 | esp->target[i].esp_config3 = val; | ||
211 | } | ||
212 | |||
213 | /* Reset the ESP chip, _not_ the SCSI bus. */ | ||
214 | static void esp_reset_esp(struct esp *esp) | ||
215 | { | ||
216 | u8 family_code, version; | ||
217 | |||
218 | /* Now reset the ESP chip */ | ||
219 | scsi_esp_cmd(esp, ESP_CMD_RC); | ||
220 | scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA); | ||
221 | scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA); | ||
222 | |||
223 | /* Reload the configuration registers */ | ||
224 | esp_write8(esp->cfact, ESP_CFACT); | ||
225 | |||
226 | esp->prev_stp = 0; | ||
227 | esp_write8(esp->prev_stp, ESP_STP); | ||
228 | |||
229 | esp->prev_soff = 0; | ||
230 | esp_write8(esp->prev_soff, ESP_SOFF); | ||
231 | |||
232 | esp_write8(esp->neg_defp, ESP_TIMEO); | ||
233 | |||
234 | /* This is the only point at which it is reliable to read | ||
235 | * the ID-code for a fast ESP chip variants. | ||
236 | */ | ||
237 | esp->max_period = ((35 * esp->ccycle) / 1000); | ||
238 | if (esp->rev == FAST) { | ||
239 | version = esp_read8(ESP_UID); | ||
240 | family_code = (version & 0xf8) >> 3; | ||
241 | if (family_code == 0x02) | ||
242 | esp->rev = FAS236; | ||
243 | else if (family_code == 0x0a) | ||
244 | esp->rev = FASHME; /* Version is usually '5'. */ | ||
245 | else | ||
246 | esp->rev = FAS100A; | ||
247 | esp->min_period = ((4 * esp->ccycle) / 1000); | ||
248 | } else { | ||
249 | esp->min_period = ((5 * esp->ccycle) / 1000); | ||
250 | } | ||
251 | esp->max_period = (esp->max_period + 3)>>2; | ||
252 | esp->min_period = (esp->min_period + 3)>>2; | ||
253 | |||
254 | esp_write8(esp->config1, ESP_CFG1); | ||
255 | switch (esp->rev) { | ||
256 | case ESP100: | ||
257 | /* nothing to do */ | ||
258 | break; | ||
259 | |||
260 | case ESP100A: | ||
261 | esp_write8(esp->config2, ESP_CFG2); | ||
262 | break; | ||
263 | |||
264 | case ESP236: | ||
265 | /* Slow 236 */ | ||
266 | esp_write8(esp->config2, ESP_CFG2); | ||
267 | esp->prev_cfg3 = esp->target[0].esp_config3; | ||
268 | esp_write8(esp->prev_cfg3, ESP_CFG3); | ||
269 | break; | ||
270 | |||
271 | case FASHME: | ||
272 | esp->config2 |= (ESP_CONFIG2_HME32 | ESP_CONFIG2_HMEFENAB); | ||
273 | /* fallthrough... */ | ||
274 | |||
275 | case FAS236: | ||
276 | /* Fast 236 or HME */ | ||
277 | esp_write8(esp->config2, ESP_CFG2); | ||
278 | if (esp->rev == FASHME) { | ||
279 | u8 cfg3 = esp->target[0].esp_config3; | ||
280 | |||
281 | cfg3 |= ESP_CONFIG3_FCLOCK | ESP_CONFIG3_OBPUSH; | ||
282 | if (esp->scsi_id >= 8) | ||
283 | cfg3 |= ESP_CONFIG3_IDBIT3; | ||
284 | esp_set_all_config3(esp, cfg3); | ||
285 | } else { | ||
286 | u32 cfg3 = esp->target[0].esp_config3; | ||
287 | |||
288 | cfg3 |= ESP_CONFIG3_FCLK; | ||
289 | esp_set_all_config3(esp, cfg3); | ||
290 | } | ||
291 | esp->prev_cfg3 = esp->target[0].esp_config3; | ||
292 | esp_write8(esp->prev_cfg3, ESP_CFG3); | ||
293 | if (esp->rev == FASHME) { | ||
294 | esp->radelay = 80; | ||
295 | } else { | ||
296 | if (esp->flags & ESP_FLAG_DIFFERENTIAL) | ||
297 | esp->radelay = 0; | ||
298 | else | ||
299 | esp->radelay = 96; | ||
300 | } | ||
301 | break; | ||
302 | |||
303 | case FAS100A: | ||
304 | /* Fast 100a */ | ||
305 | esp_write8(esp->config2, ESP_CFG2); | ||
306 | esp_set_all_config3(esp, | ||
307 | (esp->target[0].esp_config3 | | ||
308 | ESP_CONFIG3_FCLOCK)); | ||
309 | esp->prev_cfg3 = esp->target[0].esp_config3; | ||
310 | esp_write8(esp->prev_cfg3, ESP_CFG3); | ||
311 | esp->radelay = 32; | ||
312 | break; | ||
313 | |||
314 | default: | ||
315 | break; | ||
316 | } | ||
317 | |||
318 | /* Eat any bitrot in the chip */ | ||
319 | esp_read8(ESP_INTRPT); | ||
320 | udelay(100); | ||
321 | } | ||
322 | |||
323 | static void esp_map_dma(struct esp *esp, struct scsi_cmnd *cmd) | ||
324 | { | ||
325 | struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd); | ||
326 | struct scatterlist *sg = cmd->request_buffer; | ||
327 | int dir = cmd->sc_data_direction; | ||
328 | int total, i; | ||
329 | |||
330 | if (dir == DMA_NONE) | ||
331 | return; | ||
332 | |||
333 | BUG_ON(cmd->use_sg == 0); | ||
334 | |||
335 | spriv->u.num_sg = esp->ops->map_sg(esp, sg, | ||
336 | cmd->use_sg, dir); | ||
337 | spriv->cur_residue = sg_dma_len(sg); | ||
338 | spriv->cur_sg = sg; | ||
339 | |||
340 | total = 0; | ||
341 | for (i = 0; i < spriv->u.num_sg; i++) | ||
342 | total += sg_dma_len(&sg[i]); | ||
343 | spriv->tot_residue = total; | ||
344 | } | ||
345 | |||
346 | static dma_addr_t esp_cur_dma_addr(struct esp_cmd_entry *ent, | ||
347 | struct scsi_cmnd *cmd) | ||
348 | { | ||
349 | struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd); | ||
350 | |||
351 | if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) { | ||
352 | return ent->sense_dma + | ||
353 | (ent->sense_ptr - cmd->sense_buffer); | ||
354 | } | ||
355 | |||
356 | return sg_dma_address(p->cur_sg) + | ||
357 | (sg_dma_len(p->cur_sg) - | ||
358 | p->cur_residue); | ||
359 | } | ||
360 | |||
361 | static unsigned int esp_cur_dma_len(struct esp_cmd_entry *ent, | ||
362 | struct scsi_cmnd *cmd) | ||
363 | { | ||
364 | struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd); | ||
365 | |||
366 | if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) { | ||
367 | return SCSI_SENSE_BUFFERSIZE - | ||
368 | (ent->sense_ptr - cmd->sense_buffer); | ||
369 | } | ||
370 | return p->cur_residue; | ||
371 | } | ||
372 | |||
373 | static void esp_advance_dma(struct esp *esp, struct esp_cmd_entry *ent, | ||
374 | struct scsi_cmnd *cmd, unsigned int len) | ||
375 | { | ||
376 | struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd); | ||
377 | |||
378 | if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) { | ||
379 | ent->sense_ptr += len; | ||
380 | return; | ||
381 | } | ||
382 | |||
383 | p->cur_residue -= len; | ||
384 | p->tot_residue -= len; | ||
385 | if (p->cur_residue < 0 || p->tot_residue < 0) { | ||
386 | printk(KERN_ERR PFX "esp%d: Data transfer overflow.\n", | ||
387 | esp->host->unique_id); | ||
388 | printk(KERN_ERR PFX "esp%d: cur_residue[%d] tot_residue[%d] " | ||
389 | "len[%u]\n", | ||
390 | esp->host->unique_id, | ||
391 | p->cur_residue, p->tot_residue, len); | ||
392 | p->cur_residue = 0; | ||
393 | p->tot_residue = 0; | ||
394 | } | ||
395 | if (!p->cur_residue && p->tot_residue) { | ||
396 | p->cur_sg++; | ||
397 | p->cur_residue = sg_dma_len(p->cur_sg); | ||
398 | } | ||
399 | } | ||
400 | |||
401 | static void esp_unmap_dma(struct esp *esp, struct scsi_cmnd *cmd) | ||
402 | { | ||
403 | struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd); | ||
404 | int dir = cmd->sc_data_direction; | ||
405 | |||
406 | if (dir == DMA_NONE) | ||
407 | return; | ||
408 | |||
409 | esp->ops->unmap_sg(esp, cmd->request_buffer, | ||
410 | spriv->u.num_sg, dir); | ||
411 | } | ||
412 | |||
413 | static void esp_save_pointers(struct esp *esp, struct esp_cmd_entry *ent) | ||
414 | { | ||
415 | struct scsi_cmnd *cmd = ent->cmd; | ||
416 | struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd); | ||
417 | |||
418 | if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) { | ||
419 | ent->saved_sense_ptr = ent->sense_ptr; | ||
420 | return; | ||
421 | } | ||
422 | ent->saved_cur_residue = spriv->cur_residue; | ||
423 | ent->saved_cur_sg = spriv->cur_sg; | ||
424 | ent->saved_tot_residue = spriv->tot_residue; | ||
425 | } | ||
426 | |||
427 | static void esp_restore_pointers(struct esp *esp, struct esp_cmd_entry *ent) | ||
428 | { | ||
429 | struct scsi_cmnd *cmd = ent->cmd; | ||
430 | struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd); | ||
431 | |||
432 | if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) { | ||
433 | ent->sense_ptr = ent->saved_sense_ptr; | ||
434 | return; | ||
435 | } | ||
436 | spriv->cur_residue = ent->saved_cur_residue; | ||
437 | spriv->cur_sg = ent->saved_cur_sg; | ||
438 | spriv->tot_residue = ent->saved_tot_residue; | ||
439 | } | ||
440 | |||
441 | static void esp_check_command_len(struct esp *esp, struct scsi_cmnd *cmd) | ||
442 | { | ||
443 | if (cmd->cmd_len == 6 || | ||
444 | cmd->cmd_len == 10 || | ||
445 | cmd->cmd_len == 12) { | ||
446 | esp->flags &= ~ESP_FLAG_DOING_SLOWCMD; | ||
447 | } else { | ||
448 | esp->flags |= ESP_FLAG_DOING_SLOWCMD; | ||
449 | } | ||
450 | } | ||
451 | |||
452 | static void esp_write_tgt_config3(struct esp *esp, int tgt) | ||
453 | { | ||
454 | if (esp->rev > ESP100A) { | ||
455 | u8 val = esp->target[tgt].esp_config3; | ||
456 | |||
457 | if (val != esp->prev_cfg3) { | ||
458 | esp->prev_cfg3 = val; | ||
459 | esp_write8(val, ESP_CFG3); | ||
460 | } | ||
461 | } | ||
462 | } | ||
463 | |||
464 | static void esp_write_tgt_sync(struct esp *esp, int tgt) | ||
465 | { | ||
466 | u8 off = esp->target[tgt].esp_offset; | ||
467 | u8 per = esp->target[tgt].esp_period; | ||
468 | |||
469 | if (off != esp->prev_soff) { | ||
470 | esp->prev_soff = off; | ||
471 | esp_write8(off, ESP_SOFF); | ||
472 | } | ||
473 | if (per != esp->prev_stp) { | ||
474 | esp->prev_stp = per; | ||
475 | esp_write8(per, ESP_STP); | ||
476 | } | ||
477 | } | ||
478 | |||
479 | static u32 esp_dma_length_limit(struct esp *esp, u32 dma_addr, u32 dma_len) | ||
480 | { | ||
481 | if (esp->rev == FASHME) { | ||
482 | /* Arbitrary segment boundaries, 24-bit counts. */ | ||
483 | if (dma_len > (1U << 24)) | ||
484 | dma_len = (1U << 24); | ||
485 | } else { | ||
486 | u32 base, end; | ||
487 | |||
488 | /* ESP chip limits other variants by 16-bits of transfer | ||
489 | * count. Actually on FAS100A and FAS236 we could get | ||
490 | * 24-bits of transfer count by enabling ESP_CONFIG2_FENAB | ||
491 | * in the ESP_CFG2 register but that causes other unwanted | ||
492 | * changes so we don't use it currently. | ||
493 | */ | ||
494 | if (dma_len > (1U << 16)) | ||
495 | dma_len = (1U << 16); | ||
496 | |||
497 | /* All of the DMA variants hooked up to these chips | ||
498 | * cannot handle crossing a 24-bit address boundary. | ||
499 | */ | ||
500 | base = dma_addr & ((1U << 24) - 1U); | ||
501 | end = base + dma_len; | ||
502 | if (end > (1U << 24)) | ||
503 | end = (1U <<24); | ||
504 | dma_len = end - base; | ||
505 | } | ||
506 | return dma_len; | ||
507 | } | ||
508 | |||
509 | static int esp_need_to_nego_wide(struct esp_target_data *tp) | ||
510 | { | ||
511 | struct scsi_target *target = tp->starget; | ||
512 | |||
513 | return spi_width(target) != tp->nego_goal_width; | ||
514 | } | ||
515 | |||
516 | static int esp_need_to_nego_sync(struct esp_target_data *tp) | ||
517 | { | ||
518 | struct scsi_target *target = tp->starget; | ||
519 | |||
520 | /* When offset is zero, period is "don't care". */ | ||
521 | if (!spi_offset(target) && !tp->nego_goal_offset) | ||
522 | return 0; | ||
523 | |||
524 | if (spi_offset(target) == tp->nego_goal_offset && | ||
525 | spi_period(target) == tp->nego_goal_period) | ||
526 | return 0; | ||
527 | |||
528 | return 1; | ||
529 | } | ||
530 | |||
531 | static int esp_alloc_lun_tag(struct esp_cmd_entry *ent, | ||
532 | struct esp_lun_data *lp) | ||
533 | { | ||
534 | if (!ent->tag[0]) { | ||
535 | /* Non-tagged, slot already taken? */ | ||
536 | if (lp->non_tagged_cmd) | ||
537 | return -EBUSY; | ||
538 | |||
539 | if (lp->hold) { | ||
540 | /* We are being held by active tagged | ||
541 | * commands. | ||
542 | */ | ||
543 | if (lp->num_tagged) | ||
544 | return -EBUSY; | ||
545 | |||
546 | /* Tagged commands completed, we can unplug | ||
547 | * the queue and run this untagged command. | ||
548 | */ | ||
549 | lp->hold = 0; | ||
550 | } else if (lp->num_tagged) { | ||
551 | /* Plug the queue until num_tagged decreases | ||
552 | * to zero in esp_free_lun_tag. | ||
553 | */ | ||
554 | lp->hold = 1; | ||
555 | return -EBUSY; | ||
556 | } | ||
557 | |||
558 | lp->non_tagged_cmd = ent; | ||
559 | return 0; | ||
560 | } else { | ||
561 | /* Tagged command, see if blocked by a | ||
562 | * non-tagged one. | ||
563 | */ | ||
564 | if (lp->non_tagged_cmd || lp->hold) | ||
565 | return -EBUSY; | ||
566 | } | ||
567 | |||
568 | BUG_ON(lp->tagged_cmds[ent->tag[1]]); | ||
569 | |||
570 | lp->tagged_cmds[ent->tag[1]] = ent; | ||
571 | lp->num_tagged++; | ||
572 | |||
573 | return 0; | ||
574 | } | ||
575 | |||
576 | static void esp_free_lun_tag(struct esp_cmd_entry *ent, | ||
577 | struct esp_lun_data *lp) | ||
578 | { | ||
579 | if (ent->tag[0]) { | ||
580 | BUG_ON(lp->tagged_cmds[ent->tag[1]] != ent); | ||
581 | lp->tagged_cmds[ent->tag[1]] = NULL; | ||
582 | lp->num_tagged--; | ||
583 | } else { | ||
584 | BUG_ON(lp->non_tagged_cmd != ent); | ||
585 | lp->non_tagged_cmd = NULL; | ||
586 | } | ||
587 | } | ||
588 | |||
589 | /* When a contingent allegiance conditon is created, we force feed a | ||
590 | * REQUEST_SENSE command to the device to fetch the sense data. I | ||
591 | * tried many other schemes, relying on the scsi error handling layer | ||
592 | * to send out the REQUEST_SENSE automatically, but this was difficult | ||
593 | * to get right especially in the presence of applications like smartd | ||
594 | * which use SG_IO to send out their own REQUEST_SENSE commands. | ||
595 | */ | ||
596 | static void esp_autosense(struct esp *esp, struct esp_cmd_entry *ent) | ||
597 | { | ||
598 | struct scsi_cmnd *cmd = ent->cmd; | ||
599 | struct scsi_device *dev = cmd->device; | ||
600 | int tgt, lun; | ||
601 | u8 *p, val; | ||
602 | |||
603 | tgt = dev->id; | ||
604 | lun = dev->lun; | ||
605 | |||
606 | |||
607 | if (!ent->sense_ptr) { | ||
608 | esp_log_autosense("esp%d: Doing auto-sense for " | ||
609 | "tgt[%d] lun[%d]\n", | ||
610 | esp->host->unique_id, tgt, lun); | ||
611 | |||
612 | ent->sense_ptr = cmd->sense_buffer; | ||
613 | ent->sense_dma = esp->ops->map_single(esp, | ||
614 | ent->sense_ptr, | ||
615 | SCSI_SENSE_BUFFERSIZE, | ||
616 | DMA_FROM_DEVICE); | ||
617 | } | ||
618 | ent->saved_sense_ptr = ent->sense_ptr; | ||
619 | |||
620 | esp->active_cmd = ent; | ||
621 | |||
622 | p = esp->command_block; | ||
623 | esp->msg_out_len = 0; | ||
624 | |||
625 | *p++ = IDENTIFY(0, lun); | ||
626 | *p++ = REQUEST_SENSE; | ||
627 | *p++ = ((dev->scsi_level <= SCSI_2) ? | ||
628 | (lun << 5) : 0); | ||
629 | *p++ = 0; | ||
630 | *p++ = 0; | ||
631 | *p++ = SCSI_SENSE_BUFFERSIZE; | ||
632 | *p++ = 0; | ||
633 | |||
634 | esp->select_state = ESP_SELECT_BASIC; | ||
635 | |||
636 | val = tgt; | ||
637 | if (esp->rev == FASHME) | ||
638 | val |= ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT; | ||
639 | esp_write8(val, ESP_BUSID); | ||
640 | |||
641 | esp_write_tgt_sync(esp, tgt); | ||
642 | esp_write_tgt_config3(esp, tgt); | ||
643 | |||
644 | val = (p - esp->command_block); | ||
645 | |||
646 | if (esp->rev == FASHME) | ||
647 | scsi_esp_cmd(esp, ESP_CMD_FLUSH); | ||
648 | esp->ops->send_dma_cmd(esp, esp->command_block_dma, | ||
649 | val, 16, 0, ESP_CMD_DMA | ESP_CMD_SELA); | ||
650 | } | ||
651 | |||
652 | static struct esp_cmd_entry *find_and_prep_issuable_command(struct esp *esp) | ||
653 | { | ||
654 | struct esp_cmd_entry *ent; | ||
655 | |||
656 | list_for_each_entry(ent, &esp->queued_cmds, list) { | ||
657 | struct scsi_cmnd *cmd = ent->cmd; | ||
658 | struct scsi_device *dev = cmd->device; | ||
659 | struct esp_lun_data *lp = dev->hostdata; | ||
660 | |||
661 | if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) { | ||
662 | ent->tag[0] = 0; | ||
663 | ent->tag[1] = 0; | ||
664 | return ent; | ||
665 | } | ||
666 | |||
667 | if (!scsi_populate_tag_msg(cmd, &ent->tag[0])) { | ||
668 | ent->tag[0] = 0; | ||
669 | ent->tag[1] = 0; | ||
670 | } | ||
671 | |||
672 | if (esp_alloc_lun_tag(ent, lp) < 0) | ||
673 | continue; | ||
674 | |||
675 | return ent; | ||
676 | } | ||
677 | |||
678 | return NULL; | ||
679 | } | ||
680 | |||
681 | static void esp_maybe_execute_command(struct esp *esp) | ||
682 | { | ||
683 | struct esp_target_data *tp; | ||
684 | struct esp_lun_data *lp; | ||
685 | struct scsi_device *dev; | ||
686 | struct scsi_cmnd *cmd; | ||
687 | struct esp_cmd_entry *ent; | ||
688 | int tgt, lun, i; | ||
689 | u32 val, start_cmd; | ||
690 | u8 *p; | ||
691 | |||
692 | if (esp->active_cmd || | ||
693 | (esp->flags & ESP_FLAG_RESETTING)) | ||
694 | return; | ||
695 | |||
696 | ent = find_and_prep_issuable_command(esp); | ||
697 | if (!ent) | ||
698 | return; | ||
699 | |||
700 | if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) { | ||
701 | esp_autosense(esp, ent); | ||
702 | return; | ||
703 | } | ||
704 | |||
705 | cmd = ent->cmd; | ||
706 | dev = cmd->device; | ||
707 | tgt = dev->id; | ||
708 | lun = dev->lun; | ||
709 | tp = &esp->target[tgt]; | ||
710 | lp = dev->hostdata; | ||
711 | |||
712 | list_del(&ent->list); | ||
713 | list_add(&ent->list, &esp->active_cmds); | ||
714 | |||
715 | esp->active_cmd = ent; | ||
716 | |||
717 | esp_map_dma(esp, cmd); | ||
718 | esp_save_pointers(esp, ent); | ||
719 | |||
720 | esp_check_command_len(esp, cmd); | ||
721 | |||
722 | p = esp->command_block; | ||
723 | |||
724 | esp->msg_out_len = 0; | ||
725 | if (tp->flags & ESP_TGT_CHECK_NEGO) { | ||
726 | /* Need to negotiate. If the target is broken | ||
727 | * go for synchronous transfers and non-wide. | ||
728 | */ | ||
729 | if (tp->flags & ESP_TGT_BROKEN) { | ||
730 | tp->flags &= ~ESP_TGT_DISCONNECT; | ||
731 | tp->nego_goal_period = 0; | ||
732 | tp->nego_goal_offset = 0; | ||
733 | tp->nego_goal_width = 0; | ||
734 | tp->nego_goal_tags = 0; | ||
735 | } | ||
736 | |||
737 | /* If the settings are not changing, skip this. */ | ||
738 | if (spi_width(tp->starget) == tp->nego_goal_width && | ||
739 | spi_period(tp->starget) == tp->nego_goal_period && | ||
740 | spi_offset(tp->starget) == tp->nego_goal_offset) { | ||
741 | tp->flags &= ~ESP_TGT_CHECK_NEGO; | ||
742 | goto build_identify; | ||
743 | } | ||
744 | |||
745 | if (esp->rev == FASHME && esp_need_to_nego_wide(tp)) { | ||
746 | esp->msg_out_len = | ||
747 | spi_populate_width_msg(&esp->msg_out[0], | ||
748 | (tp->nego_goal_width ? | ||
749 | 1 : 0)); | ||
750 | tp->flags |= ESP_TGT_NEGO_WIDE; | ||
751 | } else if (esp_need_to_nego_sync(tp)) { | ||
752 | esp->msg_out_len = | ||
753 | spi_populate_sync_msg(&esp->msg_out[0], | ||
754 | tp->nego_goal_period, | ||
755 | tp->nego_goal_offset); | ||
756 | tp->flags |= ESP_TGT_NEGO_SYNC; | ||
757 | } else { | ||
758 | tp->flags &= ~ESP_TGT_CHECK_NEGO; | ||
759 | } | ||
760 | |||
761 | /* Process it like a slow command. */ | ||
762 | if (tp->flags & (ESP_TGT_NEGO_WIDE | ESP_TGT_NEGO_SYNC)) | ||
763 | esp->flags |= ESP_FLAG_DOING_SLOWCMD; | ||
764 | } | ||
765 | |||
766 | build_identify: | ||
767 | /* If we don't have a lun-data struct yet, we're probing | ||
768 | * so do not disconnect. Also, do not disconnect unless | ||
769 | * we have a tag on this command. | ||
770 | */ | ||
771 | if (lp && (tp->flags & ESP_TGT_DISCONNECT) && ent->tag[0]) | ||
772 | *p++ = IDENTIFY(1, lun); | ||
773 | else | ||
774 | *p++ = IDENTIFY(0, lun); | ||
775 | |||
776 | if (ent->tag[0] && esp->rev == ESP100) { | ||
777 | /* ESP100 lacks select w/atn3 command, use select | ||
778 | * and stop instead. | ||
779 | */ | ||
780 | esp->flags |= ESP_FLAG_DOING_SLOWCMD; | ||
781 | } | ||
782 | |||
783 | if (!(esp->flags & ESP_FLAG_DOING_SLOWCMD)) { | ||
784 | start_cmd = ESP_CMD_DMA | ESP_CMD_SELA; | ||
785 | if (ent->tag[0]) { | ||
786 | *p++ = ent->tag[0]; | ||
787 | *p++ = ent->tag[1]; | ||
788 | |||
789 | start_cmd = ESP_CMD_DMA | ESP_CMD_SA3; | ||
790 | } | ||
791 | |||
792 | for (i = 0; i < cmd->cmd_len; i++) | ||
793 | *p++ = cmd->cmnd[i]; | ||
794 | |||
795 | esp->select_state = ESP_SELECT_BASIC; | ||
796 | } else { | ||
797 | esp->cmd_bytes_left = cmd->cmd_len; | ||
798 | esp->cmd_bytes_ptr = &cmd->cmnd[0]; | ||
799 | |||
800 | if (ent->tag[0]) { | ||
801 | for (i = esp->msg_out_len - 1; | ||
802 | i >= 0; i--) | ||
803 | esp->msg_out[i + 2] = esp->msg_out[i]; | ||
804 | esp->msg_out[0] = ent->tag[0]; | ||
805 | esp->msg_out[1] = ent->tag[1]; | ||
806 | esp->msg_out_len += 2; | ||
807 | } | ||
808 | |||
809 | start_cmd = ESP_CMD_DMA | ESP_CMD_SELAS; | ||
810 | esp->select_state = ESP_SELECT_MSGOUT; | ||
811 | } | ||
812 | val = tgt; | ||
813 | if (esp->rev == FASHME) | ||
814 | val |= ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT; | ||
815 | esp_write8(val, ESP_BUSID); | ||
816 | |||
817 | esp_write_tgt_sync(esp, tgt); | ||
818 | esp_write_tgt_config3(esp, tgt); | ||
819 | |||
820 | val = (p - esp->command_block); | ||
821 | |||
822 | if (esp_debug & ESP_DEBUG_SCSICMD) { | ||
823 | printk("ESP: tgt[%d] lun[%d] scsi_cmd [ ", tgt, lun); | ||
824 | for (i = 0; i < cmd->cmd_len; i++) | ||
825 | printk("%02x ", cmd->cmnd[i]); | ||
826 | printk("]\n"); | ||
827 | } | ||
828 | |||
829 | if (esp->rev == FASHME) | ||
830 | scsi_esp_cmd(esp, ESP_CMD_FLUSH); | ||
831 | esp->ops->send_dma_cmd(esp, esp->command_block_dma, | ||
832 | val, 16, 0, start_cmd); | ||
833 | } | ||
834 | |||
835 | static struct esp_cmd_entry *esp_get_ent(struct esp *esp) | ||
836 | { | ||
837 | struct list_head *head = &esp->esp_cmd_pool; | ||
838 | struct esp_cmd_entry *ret; | ||
839 | |||
840 | if (list_empty(head)) { | ||
841 | ret = kzalloc(sizeof(struct esp_cmd_entry), GFP_ATOMIC); | ||
842 | } else { | ||
843 | ret = list_entry(head->next, struct esp_cmd_entry, list); | ||
844 | list_del(&ret->list); | ||
845 | memset(ret, 0, sizeof(*ret)); | ||
846 | } | ||
847 | return ret; | ||
848 | } | ||
849 | |||
850 | static void esp_put_ent(struct esp *esp, struct esp_cmd_entry *ent) | ||
851 | { | ||
852 | list_add(&ent->list, &esp->esp_cmd_pool); | ||
853 | } | ||
854 | |||
855 | static void esp_cmd_is_done(struct esp *esp, struct esp_cmd_entry *ent, | ||
856 | struct scsi_cmnd *cmd, unsigned int result) | ||
857 | { | ||
858 | struct scsi_device *dev = cmd->device; | ||
859 | int tgt = dev->id; | ||
860 | int lun = dev->lun; | ||
861 | |||
862 | esp->active_cmd = NULL; | ||
863 | esp_unmap_dma(esp, cmd); | ||
864 | esp_free_lun_tag(ent, dev->hostdata); | ||
865 | cmd->result = result; | ||
866 | |||
867 | if (ent->eh_done) { | ||
868 | complete(ent->eh_done); | ||
869 | ent->eh_done = NULL; | ||
870 | } | ||
871 | |||
872 | if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) { | ||
873 | esp->ops->unmap_single(esp, ent->sense_dma, | ||
874 | SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE); | ||
875 | ent->sense_ptr = NULL; | ||
876 | |||
877 | /* Restore the message/status bytes to what we actually | ||
878 | * saw originally. Also, report that we are providing | ||
879 | * the sense data. | ||
880 | */ | ||
881 | cmd->result = ((DRIVER_SENSE << 24) | | ||
882 | (DID_OK << 16) | | ||
883 | (COMMAND_COMPLETE << 8) | | ||
884 | (SAM_STAT_CHECK_CONDITION << 0)); | ||
885 | |||
886 | ent->flags &= ~ESP_CMD_FLAG_AUTOSENSE; | ||
887 | if (esp_debug & ESP_DEBUG_AUTOSENSE) { | ||
888 | int i; | ||
889 | |||
890 | printk("esp%d: tgt[%d] lun[%d] AUTO SENSE[ ", | ||
891 | esp->host->unique_id, tgt, lun); | ||
892 | for (i = 0; i < 18; i++) | ||
893 | printk("%02x ", cmd->sense_buffer[i]); | ||
894 | printk("]\n"); | ||
895 | } | ||
896 | } | ||
897 | |||
898 | cmd->scsi_done(cmd); | ||
899 | |||
900 | list_del(&ent->list); | ||
901 | esp_put_ent(esp, ent); | ||
902 | |||
903 | esp_maybe_execute_command(esp); | ||
904 | } | ||
905 | |||
906 | static unsigned int compose_result(unsigned int status, unsigned int message, | ||
907 | unsigned int driver_code) | ||
908 | { | ||
909 | return (status | (message << 8) | (driver_code << 16)); | ||
910 | } | ||
911 | |||
912 | static void esp_event_queue_full(struct esp *esp, struct esp_cmd_entry *ent) | ||
913 | { | ||
914 | struct scsi_device *dev = ent->cmd->device; | ||
915 | struct esp_lun_data *lp = dev->hostdata; | ||
916 | |||
917 | scsi_track_queue_full(dev, lp->num_tagged - 1); | ||
918 | } | ||
919 | |||
920 | static int esp_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) | ||
921 | { | ||
922 | struct scsi_device *dev = cmd->device; | ||
923 | struct esp *esp = host_to_esp(dev->host); | ||
924 | struct esp_cmd_priv *spriv; | ||
925 | struct esp_cmd_entry *ent; | ||
926 | |||
927 | ent = esp_get_ent(esp); | ||
928 | if (!ent) | ||
929 | return SCSI_MLQUEUE_HOST_BUSY; | ||
930 | |||
931 | ent->cmd = cmd; | ||
932 | |||
933 | cmd->scsi_done = done; | ||
934 | |||
935 | spriv = ESP_CMD_PRIV(cmd); | ||
936 | spriv->u.dma_addr = ~(dma_addr_t)0x0; | ||
937 | |||
938 | list_add_tail(&ent->list, &esp->queued_cmds); | ||
939 | |||
940 | esp_maybe_execute_command(esp); | ||
941 | |||
942 | return 0; | ||
943 | } | ||
944 | |||
945 | static int esp_check_gross_error(struct esp *esp) | ||
946 | { | ||
947 | if (esp->sreg & ESP_STAT_SPAM) { | ||
948 | /* Gross Error, could be one of: | ||
949 | * - top of fifo overwritten | ||
950 | * - top of command register overwritten | ||
951 | * - DMA programmed with wrong direction | ||
952 | * - improper phase change | ||
953 | */ | ||
954 | printk(KERN_ERR PFX "esp%d: Gross error sreg[%02x]\n", | ||
955 | esp->host->unique_id, esp->sreg); | ||
956 | /* XXX Reset the chip. XXX */ | ||
957 | return 1; | ||
958 | } | ||
959 | return 0; | ||
960 | } | ||
961 | |||
962 | static int esp_check_spur_intr(struct esp *esp) | ||
963 | { | ||
964 | switch (esp->rev) { | ||
965 | case ESP100: | ||
966 | case ESP100A: | ||
967 | /* The interrupt pending bit of the status register cannot | ||
968 | * be trusted on these revisions. | ||
969 | */ | ||
970 | esp->sreg &= ~ESP_STAT_INTR; | ||
971 | break; | ||
972 | |||
973 | default: | ||
974 | if (!(esp->sreg & ESP_STAT_INTR)) { | ||
975 | esp->ireg = esp_read8(ESP_INTRPT); | ||
976 | if (esp->ireg & ESP_INTR_SR) | ||
977 | return 1; | ||
978 | |||
979 | /* If the DMA is indicating interrupt pending and the | ||
980 | * ESP is not, the only possibility is a DMA error. | ||
981 | */ | ||
982 | if (!esp->ops->dma_error(esp)) { | ||
983 | printk(KERN_ERR PFX "esp%d: Spurious irq, " | ||
984 | "sreg=%x.\n", | ||
985 | esp->host->unique_id, esp->sreg); | ||
986 | return -1; | ||
987 | } | ||
988 | |||
989 | printk(KERN_ERR PFX "esp%d: DMA error\n", | ||
990 | esp->host->unique_id); | ||
991 | |||
992 | /* XXX Reset the chip. XXX */ | ||
993 | return -1; | ||
994 | } | ||
995 | break; | ||
996 | } | ||
997 | |||
998 | return 0; | ||
999 | } | ||
1000 | |||
1001 | static void esp_schedule_reset(struct esp *esp) | ||
1002 | { | ||
1003 | esp_log_reset("ESP: esp_schedule_reset() from %p\n", | ||
1004 | __builtin_return_address(0)); | ||
1005 | esp->flags |= ESP_FLAG_RESETTING; | ||
1006 | esp_event(esp, ESP_EVENT_RESET); | ||
1007 | } | ||
1008 | |||
1009 | /* In order to avoid having to add a special half-reconnected state | ||
1010 | * into the driver we just sit here and poll through the rest of | ||
1011 | * the reselection process to get the tag message bytes. | ||
1012 | */ | ||
1013 | static struct esp_cmd_entry *esp_reconnect_with_tag(struct esp *esp, | ||
1014 | struct esp_lun_data *lp) | ||
1015 | { | ||
1016 | struct esp_cmd_entry *ent; | ||
1017 | int i; | ||
1018 | |||
1019 | if (!lp->num_tagged) { | ||
1020 | printk(KERN_ERR PFX "esp%d: Reconnect w/num_tagged==0\n", | ||
1021 | esp->host->unique_id); | ||
1022 | return NULL; | ||
1023 | } | ||
1024 | |||
1025 | esp_log_reconnect("ESP: reconnect tag, "); | ||
1026 | |||
1027 | for (i = 0; i < ESP_QUICKIRQ_LIMIT; i++) { | ||
1028 | if (esp->ops->irq_pending(esp)) | ||
1029 | break; | ||
1030 | } | ||
1031 | if (i == ESP_QUICKIRQ_LIMIT) { | ||
1032 | printk(KERN_ERR PFX "esp%d: Reconnect IRQ1 timeout\n", | ||
1033 | esp->host->unique_id); | ||
1034 | return NULL; | ||
1035 | } | ||
1036 | |||
1037 | esp->sreg = esp_read8(ESP_STATUS); | ||
1038 | esp->ireg = esp_read8(ESP_INTRPT); | ||
1039 | |||
1040 | esp_log_reconnect("IRQ(%d:%x:%x), ", | ||
1041 | i, esp->ireg, esp->sreg); | ||
1042 | |||
1043 | if (esp->ireg & ESP_INTR_DC) { | ||
1044 | printk(KERN_ERR PFX "esp%d: Reconnect, got disconnect.\n", | ||
1045 | esp->host->unique_id); | ||
1046 | return NULL; | ||
1047 | } | ||
1048 | |||
1049 | if ((esp->sreg & ESP_STAT_PMASK) != ESP_MIP) { | ||
1050 | printk(KERN_ERR PFX "esp%d: Reconnect, not MIP sreg[%02x].\n", | ||
1051 | esp->host->unique_id, esp->sreg); | ||
1052 | return NULL; | ||
1053 | } | ||
1054 | |||
1055 | /* DMA in the tag bytes... */ | ||
1056 | esp->command_block[0] = 0xff; | ||
1057 | esp->command_block[1] = 0xff; | ||
1058 | esp->ops->send_dma_cmd(esp, esp->command_block_dma, | ||
1059 | 2, 2, 1, ESP_CMD_DMA | ESP_CMD_TI); | ||
1060 | |||
1061 | /* ACK the msssage. */ | ||
1062 | scsi_esp_cmd(esp, ESP_CMD_MOK); | ||
1063 | |||
1064 | for (i = 0; i < ESP_RESELECT_TAG_LIMIT; i++) { | ||
1065 | if (esp->ops->irq_pending(esp)) { | ||
1066 | esp->sreg = esp_read8(ESP_STATUS); | ||
1067 | esp->ireg = esp_read8(ESP_INTRPT); | ||
1068 | if (esp->ireg & ESP_INTR_FDONE) | ||
1069 | break; | ||
1070 | } | ||
1071 | udelay(1); | ||
1072 | } | ||
1073 | if (i == ESP_RESELECT_TAG_LIMIT) { | ||
1074 | printk(KERN_ERR PFX "esp%d: Reconnect IRQ2 timeout\n", | ||
1075 | esp->host->unique_id); | ||
1076 | return NULL; | ||
1077 | } | ||
1078 | esp->ops->dma_drain(esp); | ||
1079 | esp->ops->dma_invalidate(esp); | ||
1080 | |||
1081 | esp_log_reconnect("IRQ2(%d:%x:%x) tag[%x:%x]\n", | ||
1082 | i, esp->ireg, esp->sreg, | ||
1083 | esp->command_block[0], | ||
1084 | esp->command_block[1]); | ||
1085 | |||
1086 | if (esp->command_block[0] < SIMPLE_QUEUE_TAG || | ||
1087 | esp->command_block[0] > ORDERED_QUEUE_TAG) { | ||
1088 | printk(KERN_ERR PFX "esp%d: Reconnect, bad tag " | ||
1089 | "type %02x.\n", | ||
1090 | esp->host->unique_id, esp->command_block[0]); | ||
1091 | return NULL; | ||
1092 | } | ||
1093 | |||
1094 | ent = lp->tagged_cmds[esp->command_block[1]]; | ||
1095 | if (!ent) { | ||
1096 | printk(KERN_ERR PFX "esp%d: Reconnect, no entry for " | ||
1097 | "tag %02x.\n", | ||
1098 | esp->host->unique_id, esp->command_block[1]); | ||
1099 | return NULL; | ||
1100 | } | ||
1101 | |||
1102 | return ent; | ||
1103 | } | ||
1104 | |||
1105 | static int esp_reconnect(struct esp *esp) | ||
1106 | { | ||
1107 | struct esp_cmd_entry *ent; | ||
1108 | struct esp_target_data *tp; | ||
1109 | struct esp_lun_data *lp; | ||
1110 | struct scsi_device *dev; | ||
1111 | int target, lun; | ||
1112 | |||
1113 | BUG_ON(esp->active_cmd); | ||
1114 | if (esp->rev == FASHME) { | ||
1115 | /* FASHME puts the target and lun numbers directly | ||
1116 | * into the fifo. | ||
1117 | */ | ||
1118 | target = esp->fifo[0]; | ||
1119 | lun = esp->fifo[1] & 0x7; | ||
1120 | } else { | ||
1121 | u8 bits = esp_read8(ESP_FDATA); | ||
1122 | |||
1123 | /* Older chips put the lun directly into the fifo, but | ||
1124 | * the target is given as a sample of the arbitration | ||
1125 | * lines on the bus at reselection time. So we should | ||
1126 | * see the ID of the ESP and the one reconnecting target | ||
1127 | * set in the bitmap. | ||
1128 | */ | ||
1129 | if (!(bits & esp->scsi_id_mask)) | ||
1130 | goto do_reset; | ||
1131 | bits &= ~esp->scsi_id_mask; | ||
1132 | if (!bits || (bits & (bits - 1))) | ||
1133 | goto do_reset; | ||
1134 | |||
1135 | target = ffs(bits) - 1; | ||
1136 | lun = (esp_read8(ESP_FDATA) & 0x7); | ||
1137 | |||
1138 | scsi_esp_cmd(esp, ESP_CMD_FLUSH); | ||
1139 | if (esp->rev == ESP100) { | ||
1140 | u8 ireg = esp_read8(ESP_INTRPT); | ||
1141 | /* This chip has a bug during reselection that can | ||
1142 | * cause a spurious illegal-command interrupt, which | ||
1143 | * we simply ACK here. Another possibility is a bus | ||
1144 | * reset so we must check for that. | ||
1145 | */ | ||
1146 | if (ireg & ESP_INTR_SR) | ||
1147 | goto do_reset; | ||
1148 | } | ||
1149 | scsi_esp_cmd(esp, ESP_CMD_NULL); | ||
1150 | } | ||
1151 | |||
1152 | esp_write_tgt_sync(esp, target); | ||
1153 | esp_write_tgt_config3(esp, target); | ||
1154 | |||
1155 | scsi_esp_cmd(esp, ESP_CMD_MOK); | ||
1156 | |||
1157 | if (esp->rev == FASHME) | ||
1158 | esp_write8(target | ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT, | ||
1159 | ESP_BUSID); | ||
1160 | |||
1161 | tp = &esp->target[target]; | ||
1162 | dev = __scsi_device_lookup_by_target(tp->starget, lun); | ||
1163 | if (!dev) { | ||
1164 | printk(KERN_ERR PFX "esp%d: Reconnect, no lp " | ||
1165 | "tgt[%u] lun[%u]\n", | ||
1166 | esp->host->unique_id, target, lun); | ||
1167 | goto do_reset; | ||
1168 | } | ||
1169 | lp = dev->hostdata; | ||
1170 | |||
1171 | ent = lp->non_tagged_cmd; | ||
1172 | if (!ent) { | ||
1173 | ent = esp_reconnect_with_tag(esp, lp); | ||
1174 | if (!ent) | ||
1175 | goto do_reset; | ||
1176 | } | ||
1177 | |||
1178 | esp->active_cmd = ent; | ||
1179 | |||
1180 | if (ent->flags & ESP_CMD_FLAG_ABORT) { | ||
1181 | esp->msg_out[0] = ABORT_TASK_SET; | ||
1182 | esp->msg_out_len = 1; | ||
1183 | scsi_esp_cmd(esp, ESP_CMD_SATN); | ||
1184 | } | ||
1185 | |||
1186 | esp_event(esp, ESP_EVENT_CHECK_PHASE); | ||
1187 | esp_restore_pointers(esp, ent); | ||
1188 | esp->flags |= ESP_FLAG_QUICKIRQ_CHECK; | ||
1189 | return 1; | ||
1190 | |||
1191 | do_reset: | ||
1192 | esp_schedule_reset(esp); | ||
1193 | return 0; | ||
1194 | } | ||
1195 | |||
1196 | static int esp_finish_select(struct esp *esp) | ||
1197 | { | ||
1198 | struct esp_cmd_entry *ent; | ||
1199 | struct scsi_cmnd *cmd; | ||
1200 | u8 orig_select_state; | ||
1201 | |||
1202 | orig_select_state = esp->select_state; | ||
1203 | |||
1204 | /* No longer selecting. */ | ||
1205 | esp->select_state = ESP_SELECT_NONE; | ||
1206 | |||
1207 | esp->seqreg = esp_read8(ESP_SSTEP) & ESP_STEP_VBITS; | ||
1208 | ent = esp->active_cmd; | ||
1209 | cmd = ent->cmd; | ||
1210 | |||
1211 | if (esp->ops->dma_error(esp)) { | ||
1212 | /* If we see a DMA error during or as a result of selection, | ||
1213 | * all bets are off. | ||
1214 | */ | ||
1215 | esp_schedule_reset(esp); | ||
1216 | esp_cmd_is_done(esp, ent, cmd, (DID_ERROR << 16)); | ||
1217 | return 0; | ||
1218 | } | ||
1219 | |||
1220 | esp->ops->dma_invalidate(esp); | ||
1221 | |||
1222 | if (esp->ireg == (ESP_INTR_RSEL | ESP_INTR_FDONE)) { | ||
1223 | struct esp_target_data *tp = &esp->target[cmd->device->id]; | ||
1224 | |||
1225 | /* Carefully back out of the selection attempt. Release | ||
1226 | * resources (such as DMA mapping & TAG) and reset state (such | ||
1227 | * as message out and command delivery variables). | ||
1228 | */ | ||
1229 | if (!(ent->flags & ESP_CMD_FLAG_AUTOSENSE)) { | ||
1230 | esp_unmap_dma(esp, cmd); | ||
1231 | esp_free_lun_tag(ent, cmd->device->hostdata); | ||
1232 | tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_NEGO_WIDE); | ||
1233 | esp->flags &= ~ESP_FLAG_DOING_SLOWCMD; | ||
1234 | esp->cmd_bytes_ptr = NULL; | ||
1235 | esp->cmd_bytes_left = 0; | ||
1236 | } else { | ||
1237 | esp->ops->unmap_single(esp, ent->sense_dma, | ||
1238 | SCSI_SENSE_BUFFERSIZE, | ||
1239 | DMA_FROM_DEVICE); | ||
1240 | ent->sense_ptr = NULL; | ||
1241 | } | ||
1242 | |||
1243 | /* Now that the state is unwound properly, put back onto | ||
1244 | * the issue queue. This command is no longer active. | ||
1245 | */ | ||
1246 | list_del(&ent->list); | ||
1247 | list_add(&ent->list, &esp->queued_cmds); | ||
1248 | esp->active_cmd = NULL; | ||
1249 | |||
1250 | /* Return value ignored by caller, it directly invokes | ||
1251 | * esp_reconnect(). | ||
1252 | */ | ||
1253 | return 0; | ||
1254 | } | ||
1255 | |||
1256 | if (esp->ireg == ESP_INTR_DC) { | ||
1257 | struct scsi_device *dev = cmd->device; | ||
1258 | |||
1259 | /* Disconnect. Make sure we re-negotiate sync and | ||
1260 | * wide parameters if this target starts responding | ||
1261 | * again in the future. | ||
1262 | */ | ||
1263 | esp->target[dev->id].flags |= ESP_TGT_CHECK_NEGO; | ||
1264 | |||
1265 | scsi_esp_cmd(esp, ESP_CMD_ESEL); | ||
1266 | esp_cmd_is_done(esp, ent, cmd, (DID_BAD_TARGET << 16)); | ||
1267 | return 1; | ||
1268 | } | ||
1269 | |||
1270 | if (esp->ireg == (ESP_INTR_FDONE | ESP_INTR_BSERV)) { | ||
1271 | /* Selection successful. On pre-FAST chips we have | ||
1272 | * to do a NOP and possibly clean out the FIFO. | ||
1273 | */ | ||
1274 | if (esp->rev <= ESP236) { | ||
1275 | int fcnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES; | ||
1276 | |||
1277 | scsi_esp_cmd(esp, ESP_CMD_NULL); | ||
1278 | |||
1279 | if (!fcnt && | ||
1280 | (!esp->prev_soff || | ||
1281 | ((esp->sreg & ESP_STAT_PMASK) != ESP_DIP))) | ||
1282 | esp_flush_fifo(esp); | ||
1283 | } | ||
1284 | |||
1285 | /* If we are doing a slow command, negotiation, etc. | ||
1286 | * we'll do the right thing as we transition to the | ||
1287 | * next phase. | ||
1288 | */ | ||
1289 | esp_event(esp, ESP_EVENT_CHECK_PHASE); | ||
1290 | return 0; | ||
1291 | } | ||
1292 | |||
1293 | printk("ESP: Unexpected selection completion ireg[%x].\n", | ||
1294 | esp->ireg); | ||
1295 | esp_schedule_reset(esp); | ||
1296 | return 0; | ||
1297 | } | ||
1298 | |||
1299 | static int esp_data_bytes_sent(struct esp *esp, struct esp_cmd_entry *ent, | ||
1300 | struct scsi_cmnd *cmd) | ||
1301 | { | ||
1302 | int fifo_cnt, ecount, bytes_sent, flush_fifo; | ||
1303 | |||
1304 | fifo_cnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES; | ||
1305 | if (esp->prev_cfg3 & ESP_CONFIG3_EWIDE) | ||
1306 | fifo_cnt <<= 1; | ||
1307 | |||
1308 | ecount = 0; | ||
1309 | if (!(esp->sreg & ESP_STAT_TCNT)) { | ||
1310 | ecount = ((unsigned int)esp_read8(ESP_TCLOW) | | ||
1311 | (((unsigned int)esp_read8(ESP_TCMED)) << 8)); | ||
1312 | if (esp->rev == FASHME) | ||
1313 | ecount |= ((unsigned int)esp_read8(FAS_RLO)) << 16; | ||
1314 | } | ||
1315 | |||
1316 | bytes_sent = esp->data_dma_len; | ||
1317 | bytes_sent -= ecount; | ||
1318 | |||
1319 | if (!(ent->flags & ESP_CMD_FLAG_WRITE)) | ||
1320 | bytes_sent -= fifo_cnt; | ||
1321 | |||
1322 | flush_fifo = 0; | ||
1323 | if (!esp->prev_soff) { | ||
1324 | /* Synchronous data transfer, always flush fifo. */ | ||
1325 | flush_fifo = 1; | ||
1326 | } else { | ||
1327 | if (esp->rev == ESP100) { | ||
1328 | u32 fflags, phase; | ||
1329 | |||
1330 | /* ESP100 has a chip bug where in the synchronous data | ||
1331 | * phase it can mistake a final long REQ pulse from the | ||
1332 | * target as an extra data byte. Fun. | ||
1333 | * | ||
1334 | * To detect this case we resample the status register | ||
1335 | * and fifo flags. If we're still in a data phase and | ||
1336 | * we see spurious chunks in the fifo, we return error | ||
1337 | * to the caller which should reset and set things up | ||
1338 | * such that we only try future transfers to this | ||
1339 | * target in synchronous mode. | ||
1340 | */ | ||
1341 | esp->sreg = esp_read8(ESP_STATUS); | ||
1342 | phase = esp->sreg & ESP_STAT_PMASK; | ||
1343 | fflags = esp_read8(ESP_FFLAGS); | ||
1344 | |||
1345 | if ((phase == ESP_DOP && | ||
1346 | (fflags & ESP_FF_ONOTZERO)) || | ||
1347 | (phase == ESP_DIP && | ||
1348 | (fflags & ESP_FF_FBYTES))) | ||
1349 | return -1; | ||
1350 | } | ||
1351 | if (!(ent->flags & ESP_CMD_FLAG_WRITE)) | ||
1352 | flush_fifo = 1; | ||
1353 | } | ||
1354 | |||
1355 | if (flush_fifo) | ||
1356 | esp_flush_fifo(esp); | ||
1357 | |||
1358 | return bytes_sent; | ||
1359 | } | ||
1360 | |||
1361 | static void esp_setsync(struct esp *esp, struct esp_target_data *tp, | ||
1362 | u8 scsi_period, u8 scsi_offset, | ||
1363 | u8 esp_stp, u8 esp_soff) | ||
1364 | { | ||
1365 | spi_period(tp->starget) = scsi_period; | ||
1366 | spi_offset(tp->starget) = scsi_offset; | ||
1367 | spi_width(tp->starget) = (tp->flags & ESP_TGT_WIDE) ? 1 : 0; | ||
1368 | |||
1369 | if (esp_soff) { | ||
1370 | esp_stp &= 0x1f; | ||
1371 | esp_soff |= esp->radelay; | ||
1372 | if (esp->rev >= FAS236) { | ||
1373 | u8 bit = ESP_CONFIG3_FSCSI; | ||
1374 | if (esp->rev >= FAS100A) | ||
1375 | bit = ESP_CONFIG3_FAST; | ||
1376 | |||
1377 | if (scsi_period < 50) { | ||
1378 | if (esp->rev == FASHME) | ||
1379 | esp_soff &= ~esp->radelay; | ||
1380 | tp->esp_config3 |= bit; | ||
1381 | } else { | ||
1382 | tp->esp_config3 &= ~bit; | ||
1383 | } | ||
1384 | esp->prev_cfg3 = tp->esp_config3; | ||
1385 | esp_write8(esp->prev_cfg3, ESP_CFG3); | ||
1386 | } | ||
1387 | } | ||
1388 | |||
1389 | tp->esp_period = esp->prev_stp = esp_stp; | ||
1390 | tp->esp_offset = esp->prev_soff = esp_soff; | ||
1391 | |||
1392 | esp_write8(esp_soff, ESP_SOFF); | ||
1393 | esp_write8(esp_stp, ESP_STP); | ||
1394 | |||
1395 | tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_CHECK_NEGO); | ||
1396 | |||
1397 | spi_display_xfer_agreement(tp->starget); | ||
1398 | } | ||
1399 | |||
1400 | static void esp_msgin_reject(struct esp *esp) | ||
1401 | { | ||
1402 | struct esp_cmd_entry *ent = esp->active_cmd; | ||
1403 | struct scsi_cmnd *cmd = ent->cmd; | ||
1404 | struct esp_target_data *tp; | ||
1405 | int tgt; | ||
1406 | |||
1407 | tgt = cmd->device->id; | ||
1408 | tp = &esp->target[tgt]; | ||
1409 | |||
1410 | if (tp->flags & ESP_TGT_NEGO_WIDE) { | ||
1411 | tp->flags &= ~(ESP_TGT_NEGO_WIDE | ESP_TGT_WIDE); | ||
1412 | |||
1413 | if (!esp_need_to_nego_sync(tp)) { | ||
1414 | tp->flags &= ~ESP_TGT_CHECK_NEGO; | ||
1415 | scsi_esp_cmd(esp, ESP_CMD_RATN); | ||
1416 | } else { | ||
1417 | esp->msg_out_len = | ||
1418 | spi_populate_sync_msg(&esp->msg_out[0], | ||
1419 | tp->nego_goal_period, | ||
1420 | tp->nego_goal_offset); | ||
1421 | tp->flags |= ESP_TGT_NEGO_SYNC; | ||
1422 | scsi_esp_cmd(esp, ESP_CMD_SATN); | ||
1423 | } | ||
1424 | return; | ||
1425 | } | ||
1426 | |||
1427 | if (tp->flags & ESP_TGT_NEGO_SYNC) { | ||
1428 | tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_CHECK_NEGO); | ||
1429 | tp->esp_period = 0; | ||
1430 | tp->esp_offset = 0; | ||
1431 | esp_setsync(esp, tp, 0, 0, 0, 0); | ||
1432 | scsi_esp_cmd(esp, ESP_CMD_RATN); | ||
1433 | return; | ||
1434 | } | ||
1435 | |||
1436 | esp->msg_out[0] = ABORT_TASK_SET; | ||
1437 | esp->msg_out_len = 1; | ||
1438 | scsi_esp_cmd(esp, ESP_CMD_SATN); | ||
1439 | } | ||
1440 | |||
1441 | static void esp_msgin_sdtr(struct esp *esp, struct esp_target_data *tp) | ||
1442 | { | ||
1443 | u8 period = esp->msg_in[3]; | ||
1444 | u8 offset = esp->msg_in[4]; | ||
1445 | u8 stp; | ||
1446 | |||
1447 | if (!(tp->flags & ESP_TGT_NEGO_SYNC)) | ||
1448 | goto do_reject; | ||
1449 | |||
1450 | if (offset > 15) | ||
1451 | goto do_reject; | ||
1452 | |||
1453 | if (offset) { | ||
1454 | int rounded_up, one_clock; | ||
1455 | |||
1456 | if (period > esp->max_period) { | ||
1457 | period = offset = 0; | ||
1458 | goto do_sdtr; | ||
1459 | } | ||
1460 | if (period < esp->min_period) | ||
1461 | goto do_reject; | ||
1462 | |||
1463 | one_clock = esp->ccycle / 1000; | ||
1464 | rounded_up = (period << 2); | ||
1465 | rounded_up = (rounded_up + one_clock - 1) / one_clock; | ||
1466 | stp = rounded_up; | ||
1467 | if (stp && esp->rev >= FAS236) { | ||
1468 | if (stp >= 50) | ||
1469 | stp--; | ||
1470 | } | ||
1471 | } else { | ||
1472 | stp = 0; | ||
1473 | } | ||
1474 | |||
1475 | esp_setsync(esp, tp, period, offset, stp, offset); | ||
1476 | return; | ||
1477 | |||
1478 | do_reject: | ||
1479 | esp->msg_out[0] = MESSAGE_REJECT; | ||
1480 | esp->msg_out_len = 1; | ||
1481 | scsi_esp_cmd(esp, ESP_CMD_SATN); | ||
1482 | return; | ||
1483 | |||
1484 | do_sdtr: | ||
1485 | tp->nego_goal_period = period; | ||
1486 | tp->nego_goal_offset = offset; | ||
1487 | esp->msg_out_len = | ||
1488 | spi_populate_sync_msg(&esp->msg_out[0], | ||
1489 | tp->nego_goal_period, | ||
1490 | tp->nego_goal_offset); | ||
1491 | scsi_esp_cmd(esp, ESP_CMD_SATN); | ||
1492 | } | ||
1493 | |||
1494 | static void esp_msgin_wdtr(struct esp *esp, struct esp_target_data *tp) | ||
1495 | { | ||
1496 | int size = 8 << esp->msg_in[3]; | ||
1497 | u8 cfg3; | ||
1498 | |||
1499 | if (esp->rev != FASHME) | ||
1500 | goto do_reject; | ||
1501 | |||
1502 | if (size != 8 && size != 16) | ||
1503 | goto do_reject; | ||
1504 | |||
1505 | if (!(tp->flags & ESP_TGT_NEGO_WIDE)) | ||
1506 | goto do_reject; | ||
1507 | |||
1508 | cfg3 = tp->esp_config3; | ||
1509 | if (size == 16) { | ||
1510 | tp->flags |= ESP_TGT_WIDE; | ||
1511 | cfg3 |= ESP_CONFIG3_EWIDE; | ||
1512 | } else { | ||
1513 | tp->flags &= ~ESP_TGT_WIDE; | ||
1514 | cfg3 &= ~ESP_CONFIG3_EWIDE; | ||
1515 | } | ||
1516 | tp->esp_config3 = cfg3; | ||
1517 | esp->prev_cfg3 = cfg3; | ||
1518 | esp_write8(cfg3, ESP_CFG3); | ||
1519 | |||
1520 | tp->flags &= ~ESP_TGT_NEGO_WIDE; | ||
1521 | |||
1522 | spi_period(tp->starget) = 0; | ||
1523 | spi_offset(tp->starget) = 0; | ||
1524 | if (!esp_need_to_nego_sync(tp)) { | ||
1525 | tp->flags &= ~ESP_TGT_CHECK_NEGO; | ||
1526 | scsi_esp_cmd(esp, ESP_CMD_RATN); | ||
1527 | } else { | ||
1528 | esp->msg_out_len = | ||
1529 | spi_populate_sync_msg(&esp->msg_out[0], | ||
1530 | tp->nego_goal_period, | ||
1531 | tp->nego_goal_offset); | ||
1532 | tp->flags |= ESP_TGT_NEGO_SYNC; | ||
1533 | scsi_esp_cmd(esp, ESP_CMD_SATN); | ||
1534 | } | ||
1535 | return; | ||
1536 | |||
1537 | do_reject: | ||
1538 | esp->msg_out[0] = MESSAGE_REJECT; | ||
1539 | esp->msg_out_len = 1; | ||
1540 | scsi_esp_cmd(esp, ESP_CMD_SATN); | ||
1541 | } | ||
1542 | |||
1543 | static void esp_msgin_extended(struct esp *esp) | ||
1544 | { | ||
1545 | struct esp_cmd_entry *ent = esp->active_cmd; | ||
1546 | struct scsi_cmnd *cmd = ent->cmd; | ||
1547 | struct esp_target_data *tp; | ||
1548 | int tgt = cmd->device->id; | ||
1549 | |||
1550 | tp = &esp->target[tgt]; | ||
1551 | if (esp->msg_in[2] == EXTENDED_SDTR) { | ||
1552 | esp_msgin_sdtr(esp, tp); | ||
1553 | return; | ||
1554 | } | ||
1555 | if (esp->msg_in[2] == EXTENDED_WDTR) { | ||
1556 | esp_msgin_wdtr(esp, tp); | ||
1557 | return; | ||
1558 | } | ||
1559 | |||
1560 | printk("ESP: Unexpected extended msg type %x\n", | ||
1561 | esp->msg_in[2]); | ||
1562 | |||
1563 | esp->msg_out[0] = ABORT_TASK_SET; | ||
1564 | esp->msg_out_len = 1; | ||
1565 | scsi_esp_cmd(esp, ESP_CMD_SATN); | ||
1566 | } | ||
1567 | |||
1568 | /* Analyze msgin bytes received from target so far. Return non-zero | ||
1569 | * if there are more bytes needed to complete the message. | ||
1570 | */ | ||
1571 | static int esp_msgin_process(struct esp *esp) | ||
1572 | { | ||
1573 | u8 msg0 = esp->msg_in[0]; | ||
1574 | int len = esp->msg_in_len; | ||
1575 | |||
1576 | if (msg0 & 0x80) { | ||
1577 | /* Identify */ | ||
1578 | printk("ESP: Unexpected msgin identify\n"); | ||
1579 | return 0; | ||
1580 | } | ||
1581 | |||
1582 | switch (msg0) { | ||
1583 | case EXTENDED_MESSAGE: | ||
1584 | if (len == 1) | ||
1585 | return 1; | ||
1586 | if (len < esp->msg_in[1] + 2) | ||
1587 | return 1; | ||
1588 | esp_msgin_extended(esp); | ||
1589 | return 0; | ||
1590 | |||
1591 | case IGNORE_WIDE_RESIDUE: { | ||
1592 | struct esp_cmd_entry *ent; | ||
1593 | struct esp_cmd_priv *spriv; | ||
1594 | if (len == 1) | ||
1595 | return 1; | ||
1596 | |||
1597 | if (esp->msg_in[1] != 1) | ||
1598 | goto do_reject; | ||
1599 | |||
1600 | ent = esp->active_cmd; | ||
1601 | spriv = ESP_CMD_PRIV(ent->cmd); | ||
1602 | |||
1603 | if (spriv->cur_residue == sg_dma_len(spriv->cur_sg)) { | ||
1604 | spriv->cur_sg--; | ||
1605 | spriv->cur_residue = 1; | ||
1606 | } else | ||
1607 | spriv->cur_residue++; | ||
1608 | spriv->tot_residue++; | ||
1609 | return 0; | ||
1610 | } | ||
1611 | case NOP: | ||
1612 | return 0; | ||
1613 | case RESTORE_POINTERS: | ||
1614 | esp_restore_pointers(esp, esp->active_cmd); | ||
1615 | return 0; | ||
1616 | case SAVE_POINTERS: | ||
1617 | esp_save_pointers(esp, esp->active_cmd); | ||
1618 | return 0; | ||
1619 | |||
1620 | case COMMAND_COMPLETE: | ||
1621 | case DISCONNECT: { | ||
1622 | struct esp_cmd_entry *ent = esp->active_cmd; | ||
1623 | |||
1624 | ent->message = msg0; | ||
1625 | esp_event(esp, ESP_EVENT_FREE_BUS); | ||
1626 | esp->flags |= ESP_FLAG_QUICKIRQ_CHECK; | ||
1627 | return 0; | ||
1628 | } | ||
1629 | case MESSAGE_REJECT: | ||
1630 | esp_msgin_reject(esp); | ||
1631 | return 0; | ||
1632 | |||
1633 | default: | ||
1634 | do_reject: | ||
1635 | esp->msg_out[0] = MESSAGE_REJECT; | ||
1636 | esp->msg_out_len = 1; | ||
1637 | scsi_esp_cmd(esp, ESP_CMD_SATN); | ||
1638 | return 0; | ||
1639 | } | ||
1640 | } | ||
1641 | |||
1642 | static int esp_process_event(struct esp *esp) | ||
1643 | { | ||
1644 | int write; | ||
1645 | |||
1646 | again: | ||
1647 | write = 0; | ||
1648 | switch (esp->event) { | ||
1649 | case ESP_EVENT_CHECK_PHASE: | ||
1650 | switch (esp->sreg & ESP_STAT_PMASK) { | ||
1651 | case ESP_DOP: | ||
1652 | esp_event(esp, ESP_EVENT_DATA_OUT); | ||
1653 | break; | ||
1654 | case ESP_DIP: | ||
1655 | esp_event(esp, ESP_EVENT_DATA_IN); | ||
1656 | break; | ||
1657 | case ESP_STATP: | ||
1658 | esp_flush_fifo(esp); | ||
1659 | scsi_esp_cmd(esp, ESP_CMD_ICCSEQ); | ||
1660 | esp_event(esp, ESP_EVENT_STATUS); | ||
1661 | esp->flags |= ESP_FLAG_QUICKIRQ_CHECK; | ||
1662 | return 1; | ||
1663 | |||
1664 | case ESP_MOP: | ||
1665 | esp_event(esp, ESP_EVENT_MSGOUT); | ||
1666 | break; | ||
1667 | |||
1668 | case ESP_MIP: | ||
1669 | esp_event(esp, ESP_EVENT_MSGIN); | ||
1670 | break; | ||
1671 | |||
1672 | case ESP_CMDP: | ||
1673 | esp_event(esp, ESP_EVENT_CMD_START); | ||
1674 | break; | ||
1675 | |||
1676 | default: | ||
1677 | printk("ESP: Unexpected phase, sreg=%02x\n", | ||
1678 | esp->sreg); | ||
1679 | esp_schedule_reset(esp); | ||
1680 | return 0; | ||
1681 | } | ||
1682 | goto again; | ||
1683 | break; | ||
1684 | |||
1685 | case ESP_EVENT_DATA_IN: | ||
1686 | write = 1; | ||
1687 | /* fallthru */ | ||
1688 | |||
1689 | case ESP_EVENT_DATA_OUT: { | ||
1690 | struct esp_cmd_entry *ent = esp->active_cmd; | ||
1691 | struct scsi_cmnd *cmd = ent->cmd; | ||
1692 | dma_addr_t dma_addr = esp_cur_dma_addr(ent, cmd); | ||
1693 | unsigned int dma_len = esp_cur_dma_len(ent, cmd); | ||
1694 | |||
1695 | if (esp->rev == ESP100) | ||
1696 | scsi_esp_cmd(esp, ESP_CMD_NULL); | ||
1697 | |||
1698 | if (write) | ||
1699 | ent->flags |= ESP_CMD_FLAG_WRITE; | ||
1700 | else | ||
1701 | ent->flags &= ~ESP_CMD_FLAG_WRITE; | ||
1702 | |||
1703 | dma_len = esp_dma_length_limit(esp, dma_addr, dma_len); | ||
1704 | esp->data_dma_len = dma_len; | ||
1705 | |||
1706 | if (!dma_len) { | ||
1707 | printk(KERN_ERR PFX "esp%d: DMA length is zero!\n", | ||
1708 | esp->host->unique_id); | ||
1709 | printk(KERN_ERR PFX "esp%d: cur adr[%08x] len[%08x]\n", | ||
1710 | esp->host->unique_id, | ||
1711 | esp_cur_dma_addr(ent, cmd), | ||
1712 | esp_cur_dma_len(ent, cmd)); | ||
1713 | esp_schedule_reset(esp); | ||
1714 | return 0; | ||
1715 | } | ||
1716 | |||
1717 | esp_log_datastart("ESP: start data addr[%08x] len[%u] " | ||
1718 | "write(%d)\n", | ||
1719 | dma_addr, dma_len, write); | ||
1720 | |||
1721 | esp->ops->send_dma_cmd(esp, dma_addr, dma_len, dma_len, | ||
1722 | write, ESP_CMD_DMA | ESP_CMD_TI); | ||
1723 | esp_event(esp, ESP_EVENT_DATA_DONE); | ||
1724 | break; | ||
1725 | } | ||
1726 | case ESP_EVENT_DATA_DONE: { | ||
1727 | struct esp_cmd_entry *ent = esp->active_cmd; | ||
1728 | struct scsi_cmnd *cmd = ent->cmd; | ||
1729 | int bytes_sent; | ||
1730 | |||
1731 | if (esp->ops->dma_error(esp)) { | ||
1732 | printk("ESP: data done, DMA error, resetting\n"); | ||
1733 | esp_schedule_reset(esp); | ||
1734 | return 0; | ||
1735 | } | ||
1736 | |||
1737 | if (ent->flags & ESP_CMD_FLAG_WRITE) { | ||
1738 | /* XXX parity errors, etc. XXX */ | ||
1739 | |||
1740 | esp->ops->dma_drain(esp); | ||
1741 | } | ||
1742 | esp->ops->dma_invalidate(esp); | ||
1743 | |||
1744 | if (esp->ireg != ESP_INTR_BSERV) { | ||
1745 | /* We should always see exactly a bus-service | ||
1746 | * interrupt at the end of a successful transfer. | ||
1747 | */ | ||
1748 | printk("ESP: data done, not BSERV, resetting\n"); | ||
1749 | esp_schedule_reset(esp); | ||
1750 | return 0; | ||
1751 | } | ||
1752 | |||
1753 | bytes_sent = esp_data_bytes_sent(esp, ent, cmd); | ||
1754 | |||
1755 | esp_log_datadone("ESP: data done flgs[%x] sent[%d]\n", | ||
1756 | ent->flags, bytes_sent); | ||
1757 | |||
1758 | if (bytes_sent < 0) { | ||
1759 | /* XXX force sync mode for this target XXX */ | ||
1760 | esp_schedule_reset(esp); | ||
1761 | return 0; | ||
1762 | } | ||
1763 | |||
1764 | esp_advance_dma(esp, ent, cmd, bytes_sent); | ||
1765 | esp_event(esp, ESP_EVENT_CHECK_PHASE); | ||
1766 | goto again; | ||
1767 | break; | ||
1768 | } | ||
1769 | |||
1770 | case ESP_EVENT_STATUS: { | ||
1771 | struct esp_cmd_entry *ent = esp->active_cmd; | ||
1772 | |||
1773 | if (esp->ireg & ESP_INTR_FDONE) { | ||
1774 | ent->status = esp_read8(ESP_FDATA); | ||
1775 | ent->message = esp_read8(ESP_FDATA); | ||
1776 | scsi_esp_cmd(esp, ESP_CMD_MOK); | ||
1777 | } else if (esp->ireg == ESP_INTR_BSERV) { | ||
1778 | ent->status = esp_read8(ESP_FDATA); | ||
1779 | ent->message = 0xff; | ||
1780 | esp_event(esp, ESP_EVENT_MSGIN); | ||
1781 | return 0; | ||
1782 | } | ||
1783 | |||
1784 | if (ent->message != COMMAND_COMPLETE) { | ||
1785 | printk("ESP: Unexpected message %x in status\n", | ||
1786 | ent->message); | ||
1787 | esp_schedule_reset(esp); | ||
1788 | return 0; | ||
1789 | } | ||
1790 | |||
1791 | esp_event(esp, ESP_EVENT_FREE_BUS); | ||
1792 | esp->flags |= ESP_FLAG_QUICKIRQ_CHECK; | ||
1793 | break; | ||
1794 | } | ||
1795 | case ESP_EVENT_FREE_BUS: { | ||
1796 | struct esp_cmd_entry *ent = esp->active_cmd; | ||
1797 | struct scsi_cmnd *cmd = ent->cmd; | ||
1798 | |||
1799 | if (ent->message == COMMAND_COMPLETE || | ||
1800 | ent->message == DISCONNECT) | ||
1801 | scsi_esp_cmd(esp, ESP_CMD_ESEL); | ||
1802 | |||
1803 | if (ent->message == COMMAND_COMPLETE) { | ||
1804 | esp_log_cmddone("ESP: Command done status[%x] " | ||
1805 | "message[%x]\n", | ||
1806 | ent->status, ent->message); | ||
1807 | if (ent->status == SAM_STAT_TASK_SET_FULL) | ||
1808 | esp_event_queue_full(esp, ent); | ||
1809 | |||
1810 | if (ent->status == SAM_STAT_CHECK_CONDITION && | ||
1811 | !(ent->flags & ESP_CMD_FLAG_AUTOSENSE)) { | ||
1812 | ent->flags |= ESP_CMD_FLAG_AUTOSENSE; | ||
1813 | esp_autosense(esp, ent); | ||
1814 | } else { | ||
1815 | esp_cmd_is_done(esp, ent, cmd, | ||
1816 | compose_result(ent->status, | ||
1817 | ent->message, | ||
1818 | DID_OK)); | ||
1819 | } | ||
1820 | } else if (ent->message == DISCONNECT) { | ||
1821 | esp_log_disconnect("ESP: Disconnecting tgt[%d] " | ||
1822 | "tag[%x:%x]\n", | ||
1823 | cmd->device->id, | ||
1824 | ent->tag[0], ent->tag[1]); | ||
1825 | |||
1826 | esp->active_cmd = NULL; | ||
1827 | esp_maybe_execute_command(esp); | ||
1828 | } else { | ||
1829 | printk("ESP: Unexpected message %x in freebus\n", | ||
1830 | ent->message); | ||
1831 | esp_schedule_reset(esp); | ||
1832 | return 0; | ||
1833 | } | ||
1834 | if (esp->active_cmd) | ||
1835 | esp->flags |= ESP_FLAG_QUICKIRQ_CHECK; | ||
1836 | break; | ||
1837 | } | ||
1838 | case ESP_EVENT_MSGOUT: { | ||
1839 | scsi_esp_cmd(esp, ESP_CMD_FLUSH); | ||
1840 | |||
1841 | if (esp_debug & ESP_DEBUG_MSGOUT) { | ||
1842 | int i; | ||
1843 | printk("ESP: Sending message [ "); | ||
1844 | for (i = 0; i < esp->msg_out_len; i++) | ||
1845 | printk("%02x ", esp->msg_out[i]); | ||
1846 | printk("]\n"); | ||
1847 | } | ||
1848 | |||
1849 | if (esp->rev == FASHME) { | ||
1850 | int i; | ||
1851 | |||
1852 | /* Always use the fifo. */ | ||
1853 | for (i = 0; i < esp->msg_out_len; i++) { | ||
1854 | esp_write8(esp->msg_out[i], ESP_FDATA); | ||
1855 | esp_write8(0, ESP_FDATA); | ||
1856 | } | ||
1857 | scsi_esp_cmd(esp, ESP_CMD_TI); | ||
1858 | } else { | ||
1859 | if (esp->msg_out_len == 1) { | ||
1860 | esp_write8(esp->msg_out[0], ESP_FDATA); | ||
1861 | scsi_esp_cmd(esp, ESP_CMD_TI); | ||
1862 | } else { | ||
1863 | /* Use DMA. */ | ||
1864 | memcpy(esp->command_block, | ||
1865 | esp->msg_out, | ||
1866 | esp->msg_out_len); | ||
1867 | |||
1868 | esp->ops->send_dma_cmd(esp, | ||
1869 | esp->command_block_dma, | ||
1870 | esp->msg_out_len, | ||
1871 | esp->msg_out_len, | ||
1872 | 0, | ||
1873 | ESP_CMD_DMA|ESP_CMD_TI); | ||
1874 | } | ||
1875 | } | ||
1876 | esp_event(esp, ESP_EVENT_MSGOUT_DONE); | ||
1877 | break; | ||
1878 | } | ||
1879 | case ESP_EVENT_MSGOUT_DONE: | ||
1880 | if (esp->rev == FASHME) { | ||
1881 | scsi_esp_cmd(esp, ESP_CMD_FLUSH); | ||
1882 | } else { | ||
1883 | if (esp->msg_out_len > 1) | ||
1884 | esp->ops->dma_invalidate(esp); | ||
1885 | } | ||
1886 | |||
1887 | if (!(esp->ireg & ESP_INTR_DC)) { | ||
1888 | if (esp->rev != FASHME) | ||
1889 | scsi_esp_cmd(esp, ESP_CMD_NULL); | ||
1890 | } | ||
1891 | esp_event(esp, ESP_EVENT_CHECK_PHASE); | ||
1892 | goto again; | ||
1893 | case ESP_EVENT_MSGIN: | ||
1894 | if (esp->ireg & ESP_INTR_BSERV) { | ||
1895 | if (esp->rev == FASHME) { | ||
1896 | if (!(esp_read8(ESP_STATUS2) & | ||
1897 | ESP_STAT2_FEMPTY)) | ||
1898 | scsi_esp_cmd(esp, ESP_CMD_FLUSH); | ||
1899 | } else { | ||
1900 | scsi_esp_cmd(esp, ESP_CMD_FLUSH); | ||
1901 | if (esp->rev == ESP100) | ||
1902 | scsi_esp_cmd(esp, ESP_CMD_NULL); | ||
1903 | } | ||
1904 | scsi_esp_cmd(esp, ESP_CMD_TI); | ||
1905 | esp->flags |= ESP_FLAG_QUICKIRQ_CHECK; | ||
1906 | return 1; | ||
1907 | } | ||
1908 | if (esp->ireg & ESP_INTR_FDONE) { | ||
1909 | u8 val; | ||
1910 | |||
1911 | if (esp->rev == FASHME) | ||
1912 | val = esp->fifo[0]; | ||
1913 | else | ||
1914 | val = esp_read8(ESP_FDATA); | ||
1915 | esp->msg_in[esp->msg_in_len++] = val; | ||
1916 | |||
1917 | esp_log_msgin("ESP: Got msgin byte %x\n", val); | ||
1918 | |||
1919 | if (!esp_msgin_process(esp)) | ||
1920 | esp->msg_in_len = 0; | ||
1921 | |||
1922 | if (esp->rev == FASHME) | ||
1923 | scsi_esp_cmd(esp, ESP_CMD_FLUSH); | ||
1924 | |||
1925 | scsi_esp_cmd(esp, ESP_CMD_MOK); | ||
1926 | |||
1927 | if (esp->event != ESP_EVENT_FREE_BUS) | ||
1928 | esp_event(esp, ESP_EVENT_CHECK_PHASE); | ||
1929 | } else { | ||
1930 | printk("ESP: MSGIN neither BSERV not FDON, resetting"); | ||
1931 | esp_schedule_reset(esp); | ||
1932 | return 0; | ||
1933 | } | ||
1934 | break; | ||
1935 | case ESP_EVENT_CMD_START: | ||
1936 | memcpy(esp->command_block, esp->cmd_bytes_ptr, | ||
1937 | esp->cmd_bytes_left); | ||
1938 | if (esp->rev == FASHME) | ||
1939 | scsi_esp_cmd(esp, ESP_CMD_FLUSH); | ||
1940 | esp->ops->send_dma_cmd(esp, esp->command_block_dma, | ||
1941 | esp->cmd_bytes_left, 16, 0, | ||
1942 | ESP_CMD_DMA | ESP_CMD_TI); | ||
1943 | esp_event(esp, ESP_EVENT_CMD_DONE); | ||
1944 | esp->flags |= ESP_FLAG_QUICKIRQ_CHECK; | ||
1945 | break; | ||
1946 | case ESP_EVENT_CMD_DONE: | ||
1947 | esp->ops->dma_invalidate(esp); | ||
1948 | if (esp->ireg & ESP_INTR_BSERV) { | ||
1949 | esp_event(esp, ESP_EVENT_CHECK_PHASE); | ||
1950 | goto again; | ||
1951 | } | ||
1952 | esp_schedule_reset(esp); | ||
1953 | return 0; | ||
1954 | break; | ||
1955 | |||
1956 | case ESP_EVENT_RESET: | ||
1957 | scsi_esp_cmd(esp, ESP_CMD_RS); | ||
1958 | break; | ||
1959 | |||
1960 | default: | ||
1961 | printk("ESP: Unexpected event %x, resetting\n", | ||
1962 | esp->event); | ||
1963 | esp_schedule_reset(esp); | ||
1964 | return 0; | ||
1965 | break; | ||
1966 | } | ||
1967 | return 1; | ||
1968 | } | ||
1969 | |||
1970 | static void esp_reset_cleanup_one(struct esp *esp, struct esp_cmd_entry *ent) | ||
1971 | { | ||
1972 | struct scsi_cmnd *cmd = ent->cmd; | ||
1973 | |||
1974 | esp_unmap_dma(esp, cmd); | ||
1975 | esp_free_lun_tag(ent, cmd->device->hostdata); | ||
1976 | cmd->result = DID_RESET << 16; | ||
1977 | |||
1978 | if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) { | ||
1979 | esp->ops->unmap_single(esp, ent->sense_dma, | ||
1980 | SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE); | ||
1981 | ent->sense_ptr = NULL; | ||
1982 | } | ||
1983 | |||
1984 | cmd->scsi_done(cmd); | ||
1985 | list_del(&ent->list); | ||
1986 | esp_put_ent(esp, ent); | ||
1987 | } | ||
1988 | |||
1989 | static void esp_clear_hold(struct scsi_device *dev, void *data) | ||
1990 | { | ||
1991 | struct esp_lun_data *lp = dev->hostdata; | ||
1992 | |||
1993 | BUG_ON(lp->num_tagged); | ||
1994 | lp->hold = 0; | ||
1995 | } | ||
1996 | |||
1997 | static void esp_reset_cleanup(struct esp *esp) | ||
1998 | { | ||
1999 | struct esp_cmd_entry *ent, *tmp; | ||
2000 | int i; | ||
2001 | |||
2002 | list_for_each_entry_safe(ent, tmp, &esp->queued_cmds, list) { | ||
2003 | struct scsi_cmnd *cmd = ent->cmd; | ||
2004 | |||
2005 | list_del(&ent->list); | ||
2006 | cmd->result = DID_RESET << 16; | ||
2007 | cmd->scsi_done(cmd); | ||
2008 | esp_put_ent(esp, ent); | ||
2009 | } | ||
2010 | |||
2011 | list_for_each_entry_safe(ent, tmp, &esp->active_cmds, list) { | ||
2012 | if (ent == esp->active_cmd) | ||
2013 | esp->active_cmd = NULL; | ||
2014 | esp_reset_cleanup_one(esp, ent); | ||
2015 | } | ||
2016 | |||
2017 | BUG_ON(esp->active_cmd != NULL); | ||
2018 | |||
2019 | /* Force renegotiation of sync/wide transfers. */ | ||
2020 | for (i = 0; i < ESP_MAX_TARGET; i++) { | ||
2021 | struct esp_target_data *tp = &esp->target[i]; | ||
2022 | |||
2023 | tp->esp_period = 0; | ||
2024 | tp->esp_offset = 0; | ||
2025 | tp->esp_config3 &= ~(ESP_CONFIG3_EWIDE | | ||
2026 | ESP_CONFIG3_FSCSI | | ||
2027 | ESP_CONFIG3_FAST); | ||
2028 | tp->flags &= ~ESP_TGT_WIDE; | ||
2029 | tp->flags |= ESP_TGT_CHECK_NEGO; | ||
2030 | |||
2031 | if (tp->starget) | ||
2032 | starget_for_each_device(tp->starget, NULL, | ||
2033 | esp_clear_hold); | ||
2034 | } | ||
2035 | } | ||
2036 | |||
2037 | /* Runs under host->lock */ | ||
2038 | static void __esp_interrupt(struct esp *esp) | ||
2039 | { | ||
2040 | int finish_reset, intr_done; | ||
2041 | u8 phase; | ||
2042 | |||
2043 | esp->sreg = esp_read8(ESP_STATUS); | ||
2044 | |||
2045 | if (esp->flags & ESP_FLAG_RESETTING) { | ||
2046 | finish_reset = 1; | ||
2047 | } else { | ||
2048 | if (esp_check_gross_error(esp)) | ||
2049 | return; | ||
2050 | |||
2051 | finish_reset = esp_check_spur_intr(esp); | ||
2052 | if (finish_reset < 0) | ||
2053 | return; | ||
2054 | } | ||
2055 | |||
2056 | esp->ireg = esp_read8(ESP_INTRPT); | ||
2057 | |||
2058 | if (esp->ireg & ESP_INTR_SR) | ||
2059 | finish_reset = 1; | ||
2060 | |||
2061 | if (finish_reset) { | ||
2062 | esp_reset_cleanup(esp); | ||
2063 | if (esp->eh_reset) { | ||
2064 | complete(esp->eh_reset); | ||
2065 | esp->eh_reset = NULL; | ||
2066 | } | ||
2067 | return; | ||
2068 | } | ||
2069 | |||
2070 | phase = (esp->sreg & ESP_STAT_PMASK); | ||
2071 | if (esp->rev == FASHME) { | ||
2072 | if (((phase != ESP_DIP && phase != ESP_DOP) && | ||
2073 | esp->select_state == ESP_SELECT_NONE && | ||
2074 | esp->event != ESP_EVENT_STATUS && | ||
2075 | esp->event != ESP_EVENT_DATA_DONE) || | ||
2076 | (esp->ireg & ESP_INTR_RSEL)) { | ||
2077 | esp->sreg2 = esp_read8(ESP_STATUS2); | ||
2078 | if (!(esp->sreg2 & ESP_STAT2_FEMPTY) || | ||
2079 | (esp->sreg2 & ESP_STAT2_F1BYTE)) | ||
2080 | hme_read_fifo(esp); | ||
2081 | } | ||
2082 | } | ||
2083 | |||
2084 | esp_log_intr("ESP: intr sreg[%02x] seqreg[%02x] " | ||
2085 | "sreg2[%02x] ireg[%02x]\n", | ||
2086 | esp->sreg, esp->seqreg, esp->sreg2, esp->ireg); | ||
2087 | |||
2088 | intr_done = 0; | ||
2089 | |||
2090 | if (esp->ireg & (ESP_INTR_S | ESP_INTR_SATN | ESP_INTR_IC)) { | ||
2091 | printk("ESP: unexpected IREG %02x\n", esp->ireg); | ||
2092 | if (esp->ireg & ESP_INTR_IC) | ||
2093 | esp_dump_cmd_log(esp); | ||
2094 | |||
2095 | esp_schedule_reset(esp); | ||
2096 | } else { | ||
2097 | if (!(esp->ireg & ESP_INTR_RSEL)) { | ||
2098 | /* Some combination of FDONE, BSERV, DC. */ | ||
2099 | if (esp->select_state != ESP_SELECT_NONE) | ||
2100 | intr_done = esp_finish_select(esp); | ||
2101 | } else if (esp->ireg & ESP_INTR_RSEL) { | ||
2102 | if (esp->active_cmd) | ||
2103 | (void) esp_finish_select(esp); | ||
2104 | intr_done = esp_reconnect(esp); | ||
2105 | } | ||
2106 | } | ||
2107 | while (!intr_done) | ||
2108 | intr_done = esp_process_event(esp); | ||
2109 | } | ||
2110 | |||
2111 | irqreturn_t scsi_esp_intr(int irq, void *dev_id) | ||
2112 | { | ||
2113 | struct esp *esp = dev_id; | ||
2114 | unsigned long flags; | ||
2115 | irqreturn_t ret; | ||
2116 | |||
2117 | spin_lock_irqsave(esp->host->host_lock, flags); | ||
2118 | ret = IRQ_NONE; | ||
2119 | if (esp->ops->irq_pending(esp)) { | ||
2120 | ret = IRQ_HANDLED; | ||
2121 | for (;;) { | ||
2122 | int i; | ||
2123 | |||
2124 | __esp_interrupt(esp); | ||
2125 | if (!(esp->flags & ESP_FLAG_QUICKIRQ_CHECK)) | ||
2126 | break; | ||
2127 | esp->flags &= ~ESP_FLAG_QUICKIRQ_CHECK; | ||
2128 | |||
2129 | for (i = 0; i < ESP_QUICKIRQ_LIMIT; i++) { | ||
2130 | if (esp->ops->irq_pending(esp)) | ||
2131 | break; | ||
2132 | } | ||
2133 | if (i == ESP_QUICKIRQ_LIMIT) | ||
2134 | break; | ||
2135 | } | ||
2136 | } | ||
2137 | spin_unlock_irqrestore(esp->host->host_lock, flags); | ||
2138 | |||
2139 | return ret; | ||
2140 | } | ||
2141 | EXPORT_SYMBOL(scsi_esp_intr); | ||
2142 | |||
2143 | static void __devinit esp_get_revision(struct esp *esp) | ||
2144 | { | ||
2145 | u8 val; | ||
2146 | |||
2147 | esp->config1 = (ESP_CONFIG1_PENABLE | (esp->scsi_id & 7)); | ||
2148 | esp->config2 = (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY); | ||
2149 | esp_write8(esp->config2, ESP_CFG2); | ||
2150 | |||
2151 | val = esp_read8(ESP_CFG2); | ||
2152 | val &= ~ESP_CONFIG2_MAGIC; | ||
2153 | if (val != (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY)) { | ||
2154 | /* If what we write to cfg2 does not come back, cfg2 is not | ||
2155 | * implemented, therefore this must be a plain esp100. | ||
2156 | */ | ||
2157 | esp->rev = ESP100; | ||
2158 | } else { | ||
2159 | esp->config2 = 0; | ||
2160 | esp_set_all_config3(esp, 5); | ||
2161 | esp->prev_cfg3 = 5; | ||
2162 | esp_write8(esp->config2, ESP_CFG2); | ||
2163 | esp_write8(0, ESP_CFG3); | ||
2164 | esp_write8(esp->prev_cfg3, ESP_CFG3); | ||
2165 | |||
2166 | val = esp_read8(ESP_CFG3); | ||
2167 | if (val != 5) { | ||
2168 | /* The cfg2 register is implemented, however | ||
2169 | * cfg3 is not, must be esp100a. | ||
2170 | */ | ||
2171 | esp->rev = ESP100A; | ||
2172 | } else { | ||
2173 | esp_set_all_config3(esp, 0); | ||
2174 | esp->prev_cfg3 = 0; | ||
2175 | esp_write8(esp->prev_cfg3, ESP_CFG3); | ||
2176 | |||
2177 | /* All of cfg{1,2,3} implemented, must be one of | ||
2178 | * the fas variants, figure out which one. | ||
2179 | */ | ||
2180 | if (esp->cfact == 0 || esp->cfact > ESP_CCF_F5) { | ||
2181 | esp->rev = FAST; | ||
2182 | esp->sync_defp = SYNC_DEFP_FAST; | ||
2183 | } else { | ||
2184 | esp->rev = ESP236; | ||
2185 | } | ||
2186 | esp->config2 = 0; | ||
2187 | esp_write8(esp->config2, ESP_CFG2); | ||
2188 | } | ||
2189 | } | ||
2190 | } | ||
2191 | |||
2192 | static void __devinit esp_init_swstate(struct esp *esp) | ||
2193 | { | ||
2194 | int i; | ||
2195 | |||
2196 | INIT_LIST_HEAD(&esp->queued_cmds); | ||
2197 | INIT_LIST_HEAD(&esp->active_cmds); | ||
2198 | INIT_LIST_HEAD(&esp->esp_cmd_pool); | ||
2199 | |||
2200 | /* Start with a clear state, domain validation (via ->slave_configure, | ||
2201 | * spi_dv_device()) will attempt to enable SYNC, WIDE, and tagged | ||
2202 | * commands. | ||
2203 | */ | ||
2204 | for (i = 0 ; i < ESP_MAX_TARGET; i++) { | ||
2205 | esp->target[i].flags = 0; | ||
2206 | esp->target[i].nego_goal_period = 0; | ||
2207 | esp->target[i].nego_goal_offset = 0; | ||
2208 | esp->target[i].nego_goal_width = 0; | ||
2209 | esp->target[i].nego_goal_tags = 0; | ||
2210 | } | ||
2211 | } | ||
2212 | |||
2213 | /* This places the ESP into a known state at boot time. */ | ||
2214 | static void __devinit esp_bootup_reset(struct esp *esp) | ||
2215 | { | ||
2216 | u8 val; | ||
2217 | |||
2218 | /* Reset the DMA */ | ||
2219 | esp->ops->reset_dma(esp); | ||
2220 | |||
2221 | /* Reset the ESP */ | ||
2222 | esp_reset_esp(esp); | ||
2223 | |||
2224 | /* Reset the SCSI bus, but tell ESP not to generate an irq */ | ||
2225 | val = esp_read8(ESP_CFG1); | ||
2226 | val |= ESP_CONFIG1_SRRDISAB; | ||
2227 | esp_write8(val, ESP_CFG1); | ||
2228 | |||
2229 | scsi_esp_cmd(esp, ESP_CMD_RS); | ||
2230 | udelay(400); | ||
2231 | |||
2232 | esp_write8(esp->config1, ESP_CFG1); | ||
2233 | |||
2234 | /* Eat any bitrot in the chip and we are done... */ | ||
2235 | esp_read8(ESP_INTRPT); | ||
2236 | } | ||
2237 | |||
2238 | static void __devinit esp_set_clock_params(struct esp *esp) | ||
2239 | { | ||
2240 | int fmhz; | ||
2241 | u8 ccf; | ||
2242 | |||
2243 | /* This is getting messy but it has to be done correctly or else | ||
2244 | * you get weird behavior all over the place. We are trying to | ||
2245 | * basically figure out three pieces of information. | ||
2246 | * | ||
2247 | * a) Clock Conversion Factor | ||
2248 | * | ||
2249 | * This is a representation of the input crystal clock frequency | ||
2250 | * going into the ESP on this machine. Any operation whose timing | ||
2251 | * is longer than 400ns depends on this value being correct. For | ||
2252 | * example, you'll get blips for arbitration/selection during high | ||
2253 | * load or with multiple targets if this is not set correctly. | ||
2254 | * | ||
2255 | * b) Selection Time-Out | ||
2256 | * | ||
2257 | * The ESP isn't very bright and will arbitrate for the bus and try | ||
2258 | * to select a target forever if you let it. This value tells the | ||
2259 | * ESP when it has taken too long to negotiate and that it should | ||
2260 | * interrupt the CPU so we can see what happened. The value is | ||
2261 | * computed as follows (from NCR/Symbios chip docs). | ||
2262 | * | ||
2263 | * (Time Out Period) * (Input Clock) | ||
2264 | * STO = ---------------------------------- | ||
2265 | * (8192) * (Clock Conversion Factor) | ||
2266 | * | ||
2267 | * We use a time out period of 250ms (ESP_BUS_TIMEOUT). | ||
2268 | * | ||
2269 | * c) Imperical constants for synchronous offset and transfer period | ||
2270 | * register values | ||
2271 | * | ||
2272 | * This entails the smallest and largest sync period we could ever | ||
2273 | * handle on this ESP. | ||
2274 | */ | ||
2275 | fmhz = esp->cfreq; | ||
2276 | |||
2277 | ccf = ((fmhz / 1000000) + 4) / 5; | ||
2278 | if (ccf == 1) | ||
2279 | ccf = 2; | ||
2280 | |||
2281 | /* If we can't find anything reasonable, just assume 20MHZ. | ||
2282 | * This is the clock frequency of the older sun4c's where I've | ||
2283 | * been unable to find the clock-frequency PROM property. All | ||
2284 | * other machines provide useful values it seems. | ||
2285 | */ | ||
2286 | if (fmhz <= 5000000 || ccf < 1 || ccf > 8) { | ||
2287 | fmhz = 20000000; | ||
2288 | ccf = 4; | ||
2289 | } | ||
2290 | |||
2291 | esp->cfact = (ccf == 8 ? 0 : ccf); | ||
2292 | esp->cfreq = fmhz; | ||
2293 | esp->ccycle = ESP_MHZ_TO_CYCLE(fmhz); | ||
2294 | esp->ctick = ESP_TICK(ccf, esp->ccycle); | ||
2295 | esp->neg_defp = ESP_NEG_DEFP(fmhz, ccf); | ||
2296 | esp->sync_defp = SYNC_DEFP_SLOW; | ||
2297 | } | ||
2298 | |||
2299 | static const char *esp_chip_names[] = { | ||
2300 | "ESP100", | ||
2301 | "ESP100A", | ||
2302 | "ESP236", | ||
2303 | "FAS236", | ||
2304 | "FAS100A", | ||
2305 | "FAST", | ||
2306 | "FASHME", | ||
2307 | }; | ||
2308 | |||
2309 | static struct scsi_transport_template *esp_transport_template; | ||
2310 | |||
2311 | int __devinit scsi_esp_register(struct esp *esp, struct device *dev) | ||
2312 | { | ||
2313 | static int instance; | ||
2314 | int err; | ||
2315 | |||
2316 | esp->host->transportt = esp_transport_template; | ||
2317 | esp->host->max_lun = ESP_MAX_LUN; | ||
2318 | esp->host->cmd_per_lun = 2; | ||
2319 | |||
2320 | esp_set_clock_params(esp); | ||
2321 | |||
2322 | esp_get_revision(esp); | ||
2323 | |||
2324 | esp_init_swstate(esp); | ||
2325 | |||
2326 | esp_bootup_reset(esp); | ||
2327 | |||
2328 | printk(KERN_INFO PFX "esp%u, regs[%1p:%1p] irq[%u]\n", | ||
2329 | esp->host->unique_id, esp->regs, esp->dma_regs, | ||
2330 | esp->host->irq); | ||
2331 | printk(KERN_INFO PFX "esp%u is a %s, %u MHz (ccf=%u), SCSI ID %u\n", | ||
2332 | esp->host->unique_id, esp_chip_names[esp->rev], | ||
2333 | esp->cfreq / 1000000, esp->cfact, esp->scsi_id); | ||
2334 | |||
2335 | /* Let the SCSI bus reset settle. */ | ||
2336 | ssleep(esp_bus_reset_settle); | ||
2337 | |||
2338 | err = scsi_add_host(esp->host, dev); | ||
2339 | if (err) | ||
2340 | return err; | ||
2341 | |||
2342 | esp->host->unique_id = instance++; | ||
2343 | |||
2344 | scsi_scan_host(esp->host); | ||
2345 | |||
2346 | return 0; | ||
2347 | } | ||
2348 | EXPORT_SYMBOL(scsi_esp_register); | ||
2349 | |||
2350 | void __devexit scsi_esp_unregister(struct esp *esp) | ||
2351 | { | ||
2352 | scsi_remove_host(esp->host); | ||
2353 | } | ||
2354 | EXPORT_SYMBOL(scsi_esp_unregister); | ||
2355 | |||
2356 | static int esp_slave_alloc(struct scsi_device *dev) | ||
2357 | { | ||
2358 | struct esp *esp = host_to_esp(dev->host); | ||
2359 | struct esp_target_data *tp = &esp->target[dev->id]; | ||
2360 | struct esp_lun_data *lp; | ||
2361 | |||
2362 | lp = kzalloc(sizeof(*lp), GFP_KERNEL); | ||
2363 | if (!lp) | ||
2364 | return -ENOMEM; | ||
2365 | dev->hostdata = lp; | ||
2366 | |||
2367 | tp->starget = dev->sdev_target; | ||
2368 | |||
2369 | spi_min_period(tp->starget) = esp->min_period; | ||
2370 | spi_max_offset(tp->starget) = 15; | ||
2371 | |||
2372 | if (esp->flags & ESP_FLAG_WIDE_CAPABLE) | ||
2373 | spi_max_width(tp->starget) = 1; | ||
2374 | else | ||
2375 | spi_max_width(tp->starget) = 0; | ||
2376 | |||
2377 | return 0; | ||
2378 | } | ||
2379 | |||
2380 | static int esp_slave_configure(struct scsi_device *dev) | ||
2381 | { | ||
2382 | struct esp *esp = host_to_esp(dev->host); | ||
2383 | struct esp_target_data *tp = &esp->target[dev->id]; | ||
2384 | int goal_tags, queue_depth; | ||
2385 | |||
2386 | goal_tags = 0; | ||
2387 | |||
2388 | if (dev->tagged_supported) { | ||
2389 | /* XXX make this configurable somehow XXX */ | ||
2390 | goal_tags = ESP_DEFAULT_TAGS; | ||
2391 | |||
2392 | if (goal_tags > ESP_MAX_TAG) | ||
2393 | goal_tags = ESP_MAX_TAG; | ||
2394 | } | ||
2395 | |||
2396 | queue_depth = goal_tags; | ||
2397 | if (queue_depth < dev->host->cmd_per_lun) | ||
2398 | queue_depth = dev->host->cmd_per_lun; | ||
2399 | |||
2400 | if (goal_tags) { | ||
2401 | scsi_set_tag_type(dev, MSG_ORDERED_TAG); | ||
2402 | scsi_activate_tcq(dev, queue_depth); | ||
2403 | } else { | ||
2404 | scsi_deactivate_tcq(dev, queue_depth); | ||
2405 | } | ||
2406 | tp->flags |= ESP_TGT_DISCONNECT; | ||
2407 | |||
2408 | if (!spi_initial_dv(dev->sdev_target)) | ||
2409 | spi_dv_device(dev); | ||
2410 | |||
2411 | return 0; | ||
2412 | } | ||
2413 | |||
2414 | static void esp_slave_destroy(struct scsi_device *dev) | ||
2415 | { | ||
2416 | struct esp_lun_data *lp = dev->hostdata; | ||
2417 | |||
2418 | kfree(lp); | ||
2419 | dev->hostdata = NULL; | ||
2420 | } | ||
2421 | |||
2422 | static int esp_eh_abort_handler(struct scsi_cmnd *cmd) | ||
2423 | { | ||
2424 | struct esp *esp = host_to_esp(cmd->device->host); | ||
2425 | struct esp_cmd_entry *ent, *tmp; | ||
2426 | struct completion eh_done; | ||
2427 | unsigned long flags; | ||
2428 | |||
2429 | /* XXX This helps a lot with debugging but might be a bit | ||
2430 | * XXX much for the final driver. | ||
2431 | */ | ||
2432 | spin_lock_irqsave(esp->host->host_lock, flags); | ||
2433 | printk(KERN_ERR PFX "esp%d: Aborting command [%p:%02x]\n", | ||
2434 | esp->host->unique_id, cmd, cmd->cmnd[0]); | ||
2435 | ent = esp->active_cmd; | ||
2436 | if (ent) | ||
2437 | printk(KERN_ERR PFX "esp%d: Current command [%p:%02x]\n", | ||
2438 | esp->host->unique_id, ent->cmd, ent->cmd->cmnd[0]); | ||
2439 | list_for_each_entry(ent, &esp->queued_cmds, list) { | ||
2440 | printk(KERN_ERR PFX "esp%d: Queued command [%p:%02x]\n", | ||
2441 | esp->host->unique_id, ent->cmd, ent->cmd->cmnd[0]); | ||
2442 | } | ||
2443 | list_for_each_entry(ent, &esp->active_cmds, list) { | ||
2444 | printk(KERN_ERR PFX "esp%d: Active command [%p:%02x]\n", | ||
2445 | esp->host->unique_id, ent->cmd, ent->cmd->cmnd[0]); | ||
2446 | } | ||
2447 | esp_dump_cmd_log(esp); | ||
2448 | spin_unlock_irqrestore(esp->host->host_lock, flags); | ||
2449 | |||
2450 | spin_lock_irqsave(esp->host->host_lock, flags); | ||
2451 | |||
2452 | ent = NULL; | ||
2453 | list_for_each_entry(tmp, &esp->queued_cmds, list) { | ||
2454 | if (tmp->cmd == cmd) { | ||
2455 | ent = tmp; | ||
2456 | break; | ||
2457 | } | ||
2458 | } | ||
2459 | |||
2460 | if (ent) { | ||
2461 | /* Easiest case, we didn't even issue the command | ||
2462 | * yet so it is trivial to abort. | ||
2463 | */ | ||
2464 | list_del(&ent->list); | ||
2465 | |||
2466 | cmd->result = DID_ABORT << 16; | ||
2467 | cmd->scsi_done(cmd); | ||
2468 | |||
2469 | esp_put_ent(esp, ent); | ||
2470 | |||
2471 | goto out_success; | ||
2472 | } | ||
2473 | |||
2474 | init_completion(&eh_done); | ||
2475 | |||
2476 | ent = esp->active_cmd; | ||
2477 | if (ent && ent->cmd == cmd) { | ||
2478 | /* Command is the currently active command on | ||
2479 | * the bus. If we already have an output message | ||
2480 | * pending, no dice. | ||
2481 | */ | ||
2482 | if (esp->msg_out_len) | ||
2483 | goto out_failure; | ||
2484 | |||
2485 | /* Send out an abort, encouraging the target to | ||
2486 | * go to MSGOUT phase by asserting ATN. | ||
2487 | */ | ||
2488 | esp->msg_out[0] = ABORT_TASK_SET; | ||
2489 | esp->msg_out_len = 1; | ||
2490 | ent->eh_done = &eh_done; | ||
2491 | |||
2492 | scsi_esp_cmd(esp, ESP_CMD_SATN); | ||
2493 | } else { | ||
2494 | /* The command is disconnected. This is not easy to | ||
2495 | * abort. For now we fail and let the scsi error | ||
2496 | * handling layer go try a scsi bus reset or host | ||
2497 | * reset. | ||
2498 | * | ||
2499 | * What we could do is put together a scsi command | ||
2500 | * solely for the purpose of sending an abort message | ||
2501 | * to the target. Coming up with all the code to | ||
2502 | * cook up scsi commands, special case them everywhere, | ||
2503 | * etc. is for questionable gain and it would be better | ||
2504 | * if the generic scsi error handling layer could do at | ||
2505 | * least some of that for us. | ||
2506 | * | ||
2507 | * Anyways this is an area for potential future improvement | ||
2508 | * in this driver. | ||
2509 | */ | ||
2510 | goto out_failure; | ||
2511 | } | ||
2512 | |||
2513 | spin_unlock_irqrestore(esp->host->host_lock, flags); | ||
2514 | |||
2515 | if (!wait_for_completion_timeout(&eh_done, 5 * HZ)) { | ||
2516 | spin_lock_irqsave(esp->host->host_lock, flags); | ||
2517 | ent->eh_done = NULL; | ||
2518 | spin_unlock_irqrestore(esp->host->host_lock, flags); | ||
2519 | |||
2520 | return FAILED; | ||
2521 | } | ||
2522 | |||
2523 | return SUCCESS; | ||
2524 | |||
2525 | out_success: | ||
2526 | spin_unlock_irqrestore(esp->host->host_lock, flags); | ||
2527 | return SUCCESS; | ||
2528 | |||
2529 | out_failure: | ||
2530 | /* XXX This might be a good location to set ESP_TGT_BROKEN | ||
2531 | * XXX since we know which target/lun in particular is | ||
2532 | * XXX causing trouble. | ||
2533 | */ | ||
2534 | spin_unlock_irqrestore(esp->host->host_lock, flags); | ||
2535 | return FAILED; | ||
2536 | } | ||
2537 | |||
2538 | static int esp_eh_bus_reset_handler(struct scsi_cmnd *cmd) | ||
2539 | { | ||
2540 | struct esp *esp = host_to_esp(cmd->device->host); | ||
2541 | struct completion eh_reset; | ||
2542 | unsigned long flags; | ||
2543 | |||
2544 | init_completion(&eh_reset); | ||
2545 | |||
2546 | spin_lock_irqsave(esp->host->host_lock, flags); | ||
2547 | |||
2548 | esp->eh_reset = &eh_reset; | ||
2549 | |||
2550 | /* XXX This is too simple... We should add lots of | ||
2551 | * XXX checks here so that if we find that the chip is | ||
2552 | * XXX very wedged we return failure immediately so | ||
2553 | * XXX that we can perform a full chip reset. | ||
2554 | */ | ||
2555 | esp->flags |= ESP_FLAG_RESETTING; | ||
2556 | scsi_esp_cmd(esp, ESP_CMD_RS); | ||
2557 | |||
2558 | spin_unlock_irqrestore(esp->host->host_lock, flags); | ||
2559 | |||
2560 | ssleep(esp_bus_reset_settle); | ||
2561 | |||
2562 | if (!wait_for_completion_timeout(&eh_reset, 5 * HZ)) { | ||
2563 | spin_lock_irqsave(esp->host->host_lock, flags); | ||
2564 | esp->eh_reset = NULL; | ||
2565 | spin_unlock_irqrestore(esp->host->host_lock, flags); | ||
2566 | |||
2567 | return FAILED; | ||
2568 | } | ||
2569 | |||
2570 | return SUCCESS; | ||
2571 | } | ||
2572 | |||
2573 | /* All bets are off, reset the entire device. */ | ||
2574 | static int esp_eh_host_reset_handler(struct scsi_cmnd *cmd) | ||
2575 | { | ||
2576 | struct esp *esp = host_to_esp(cmd->device->host); | ||
2577 | unsigned long flags; | ||
2578 | |||
2579 | spin_lock_irqsave(esp->host->host_lock, flags); | ||
2580 | esp_bootup_reset(esp); | ||
2581 | esp_reset_cleanup(esp); | ||
2582 | spin_unlock_irqrestore(esp->host->host_lock, flags); | ||
2583 | |||
2584 | ssleep(esp_bus_reset_settle); | ||
2585 | |||
2586 | return SUCCESS; | ||
2587 | } | ||
2588 | |||
2589 | static const char *esp_info(struct Scsi_Host *host) | ||
2590 | { | ||
2591 | return "esp"; | ||
2592 | } | ||
2593 | |||
2594 | struct scsi_host_template scsi_esp_template = { | ||
2595 | .module = THIS_MODULE, | ||
2596 | .name = "esp", | ||
2597 | .info = esp_info, | ||
2598 | .queuecommand = esp_queuecommand, | ||
2599 | .slave_alloc = esp_slave_alloc, | ||
2600 | .slave_configure = esp_slave_configure, | ||
2601 | .slave_destroy = esp_slave_destroy, | ||
2602 | .eh_abort_handler = esp_eh_abort_handler, | ||
2603 | .eh_bus_reset_handler = esp_eh_bus_reset_handler, | ||
2604 | .eh_host_reset_handler = esp_eh_host_reset_handler, | ||
2605 | .can_queue = 7, | ||
2606 | .this_id = 7, | ||
2607 | .sg_tablesize = SG_ALL, | ||
2608 | .use_clustering = ENABLE_CLUSTERING, | ||
2609 | .max_sectors = 0xffff, | ||
2610 | .skip_settle_delay = 1, | ||
2611 | }; | ||
2612 | EXPORT_SYMBOL(scsi_esp_template); | ||
2613 | |||
2614 | static void esp_get_signalling(struct Scsi_Host *host) | ||
2615 | { | ||
2616 | struct esp *esp = host_to_esp(host); | ||
2617 | enum spi_signal_type type; | ||
2618 | |||
2619 | if (esp->flags & ESP_FLAG_DIFFERENTIAL) | ||
2620 | type = SPI_SIGNAL_HVD; | ||
2621 | else | ||
2622 | type = SPI_SIGNAL_SE; | ||
2623 | |||
2624 | spi_signalling(host) = type; | ||
2625 | } | ||
2626 | |||
2627 | static void esp_set_offset(struct scsi_target *target, int offset) | ||
2628 | { | ||
2629 | struct Scsi_Host *host = dev_to_shost(target->dev.parent); | ||
2630 | struct esp *esp = host_to_esp(host); | ||
2631 | struct esp_target_data *tp = &esp->target[target->id]; | ||
2632 | |||
2633 | tp->nego_goal_offset = offset; | ||
2634 | tp->flags |= ESP_TGT_CHECK_NEGO; | ||
2635 | } | ||
2636 | |||
2637 | static void esp_set_period(struct scsi_target *target, int period) | ||
2638 | { | ||
2639 | struct Scsi_Host *host = dev_to_shost(target->dev.parent); | ||
2640 | struct esp *esp = host_to_esp(host); | ||
2641 | struct esp_target_data *tp = &esp->target[target->id]; | ||
2642 | |||
2643 | tp->nego_goal_period = period; | ||
2644 | tp->flags |= ESP_TGT_CHECK_NEGO; | ||
2645 | } | ||
2646 | |||
2647 | static void esp_set_width(struct scsi_target *target, int width) | ||
2648 | { | ||
2649 | struct Scsi_Host *host = dev_to_shost(target->dev.parent); | ||
2650 | struct esp *esp = host_to_esp(host); | ||
2651 | struct esp_target_data *tp = &esp->target[target->id]; | ||
2652 | |||
2653 | tp->nego_goal_width = (width ? 1 : 0); | ||
2654 | tp->flags |= ESP_TGT_CHECK_NEGO; | ||
2655 | } | ||
2656 | |||
2657 | static struct spi_function_template esp_transport_ops = { | ||
2658 | .set_offset = esp_set_offset, | ||
2659 | .show_offset = 1, | ||
2660 | .set_period = esp_set_period, | ||
2661 | .show_period = 1, | ||
2662 | .set_width = esp_set_width, | ||
2663 | .show_width = 1, | ||
2664 | .get_signalling = esp_get_signalling, | ||
2665 | }; | ||
2666 | |||
2667 | static int __init esp_init(void) | ||
2668 | { | ||
2669 | BUILD_BUG_ON(sizeof(struct scsi_pointer) < | ||
2670 | sizeof(struct esp_cmd_priv)); | ||
2671 | |||
2672 | esp_transport_template = spi_attach_transport(&esp_transport_ops); | ||
2673 | if (!esp_transport_template) | ||
2674 | return -ENODEV; | ||
2675 | |||
2676 | return 0; | ||
2677 | } | ||
2678 | |||
2679 | static void __exit esp_exit(void) | ||
2680 | { | ||
2681 | spi_release_transport(esp_transport_template); | ||
2682 | } | ||
2683 | |||
2684 | MODULE_DESCRIPTION("ESP SCSI driver core"); | ||
2685 | MODULE_AUTHOR("David S. Miller (davem@davemloft.net)"); | ||
2686 | MODULE_LICENSE("GPL"); | ||
2687 | MODULE_VERSION(DRV_VERSION); | ||
2688 | |||
2689 | module_param(esp_bus_reset_settle, int, 0); | ||
2690 | MODULE_PARM_DESC(esp_bus_reset_settle, | ||
2691 | "ESP scsi bus reset delay in seconds"); | ||
2692 | |||
2693 | module_param(esp_debug, int, 0); | ||
2694 | MODULE_PARM_DESC(esp_debug, | ||
2695 | "ESP bitmapped debugging message enable value:\n" | ||
2696 | " 0x00000001 Log interrupt events\n" | ||
2697 | " 0x00000002 Log scsi commands\n" | ||
2698 | " 0x00000004 Log resets\n" | ||
2699 | " 0x00000008 Log message in events\n" | ||
2700 | " 0x00000010 Log message out events\n" | ||
2701 | " 0x00000020 Log command completion\n" | ||
2702 | " 0x00000040 Log disconnects\n" | ||
2703 | " 0x00000080 Log data start\n" | ||
2704 | " 0x00000100 Log data done\n" | ||
2705 | " 0x00000200 Log reconnects\n" | ||
2706 | " 0x00000400 Log auto-sense data\n" | ||
2707 | ); | ||
2708 | |||
2709 | module_init(esp_init); | ||
2710 | module_exit(esp_exit); | ||
diff --git a/drivers/scsi/esp_scsi.h b/drivers/scsi/esp_scsi.h new file mode 100644 index 000000000000..8d4a6690401f --- /dev/null +++ b/drivers/scsi/esp_scsi.h | |||
@@ -0,0 +1,560 @@ | |||
1 | /* esp_scsi.h: Defines and structures for the ESP drier. | ||
2 | * | ||
3 | * Copyright (C) 2007 David S. Miller (davem@davemloft.net) | ||
4 | */ | ||
5 | |||
6 | #ifndef _ESP_SCSI_H | ||
7 | #define _ESP_SCSI_H | ||
8 | |||
9 | /* Access Description Offset */ | ||
10 | #define ESP_TCLOW 0x00UL /* rw Low bits transfer count 0x00 */ | ||
11 | #define ESP_TCMED 0x01UL /* rw Mid bits transfer count 0x04 */ | ||
12 | #define ESP_FDATA 0x02UL /* rw FIFO data bits 0x08 */ | ||
13 | #define ESP_CMD 0x03UL /* rw SCSI command bits 0x0c */ | ||
14 | #define ESP_STATUS 0x04UL /* ro ESP status register 0x10 */ | ||
15 | #define ESP_BUSID ESP_STATUS /* wo BusID for sel/resel 0x10 */ | ||
16 | #define ESP_INTRPT 0x05UL /* ro Kind of interrupt 0x14 */ | ||
17 | #define ESP_TIMEO ESP_INTRPT /* wo Timeout for sel/resel 0x14 */ | ||
18 | #define ESP_SSTEP 0x06UL /* ro Sequence step register 0x18 */ | ||
19 | #define ESP_STP ESP_SSTEP /* wo Transfer period/sync 0x18 */ | ||
20 | #define ESP_FFLAGS 0x07UL /* ro Bits current FIFO info 0x1c */ | ||
21 | #define ESP_SOFF ESP_FFLAGS /* wo Sync offset 0x1c */ | ||
22 | #define ESP_CFG1 0x08UL /* rw First cfg register 0x20 */ | ||
23 | #define ESP_CFACT 0x09UL /* wo Clock conv factor 0x24 */ | ||
24 | #define ESP_STATUS2 ESP_CFACT /* ro HME status2 register 0x24 */ | ||
25 | #define ESP_CTEST 0x0aUL /* wo Chip test register 0x28 */ | ||
26 | #define ESP_CFG2 0x0bUL /* rw Second cfg register 0x2c */ | ||
27 | #define ESP_CFG3 0x0cUL /* rw Third cfg register 0x30 */ | ||
28 | #define ESP_TCHI 0x0eUL /* rw High bits transf count 0x38 */ | ||
29 | #define ESP_UID ESP_TCHI /* ro Unique ID code 0x38 */ | ||
30 | #define FAS_RLO ESP_TCHI /* rw HME extended counter 0x38 */ | ||
31 | #define ESP_FGRND 0x0fUL /* rw Data base for fifo 0x3c */ | ||
32 | #define FAS_RHI ESP_FGRND /* rw HME extended counter 0x3c */ | ||
33 | |||
34 | #define SBUS_ESP_REG_SIZE 0x40UL | ||
35 | |||
36 | /* Bitfield meanings for the above registers. */ | ||
37 | |||
38 | /* ESP config reg 1, read-write, found on all ESP chips */ | ||
39 | #define ESP_CONFIG1_ID 0x07 /* My BUS ID bits */ | ||
40 | #define ESP_CONFIG1_CHTEST 0x08 /* Enable ESP chip tests */ | ||
41 | #define ESP_CONFIG1_PENABLE 0x10 /* Enable parity checks */ | ||
42 | #define ESP_CONFIG1_PARTEST 0x20 /* Parity test mode enabled? */ | ||
43 | #define ESP_CONFIG1_SRRDISAB 0x40 /* Disable SCSI reset reports */ | ||
44 | #define ESP_CONFIG1_SLCABLE 0x80 /* Enable slow cable mode */ | ||
45 | |||
46 | /* ESP config reg 2, read-write, found only on esp100a+esp200+esp236 chips */ | ||
47 | #define ESP_CONFIG2_DMAPARITY 0x01 /* enable DMA Parity (200,236) */ | ||
48 | #define ESP_CONFIG2_REGPARITY 0x02 /* enable reg Parity (200,236) */ | ||
49 | #define ESP_CONFIG2_BADPARITY 0x04 /* Bad parity target abort */ | ||
50 | #define ESP_CONFIG2_SCSI2ENAB 0x08 /* Enable SCSI-2 features (tgtmode) */ | ||
51 | #define ESP_CONFIG2_HI 0x10 /* High Impedance DREQ ??? */ | ||
52 | #define ESP_CONFIG2_HMEFENAB 0x10 /* HME features enable */ | ||
53 | #define ESP_CONFIG2_BCM 0x20 /* Enable byte-ctrl (236) */ | ||
54 | #define ESP_CONFIG2_DISPINT 0x20 /* Disable pause irq (hme) */ | ||
55 | #define ESP_CONFIG2_FENAB 0x40 /* Enable features (fas100,216) */ | ||
56 | #define ESP_CONFIG2_SPL 0x40 /* Enable status-phase latch (236) */ | ||
57 | #define ESP_CONFIG2_MKDONE 0x40 /* HME magic feature */ | ||
58 | #define ESP_CONFIG2_HME32 0x80 /* HME 32 extended */ | ||
59 | #define ESP_CONFIG2_MAGIC 0xe0 /* Invalid bits... */ | ||
60 | |||
61 | /* ESP config register 3 read-write, found only esp236+fas236+fas100a+hme chips */ | ||
62 | #define ESP_CONFIG3_FCLOCK 0x01 /* FAST SCSI clock rate (esp100a/hme) */ | ||
63 | #define ESP_CONFIG3_TEM 0x01 /* Enable thresh-8 mode (esp/fas236) */ | ||
64 | #define ESP_CONFIG3_FAST 0x02 /* Enable FAST SCSI (esp100a/hme) */ | ||
65 | #define ESP_CONFIG3_ADMA 0x02 /* Enable alternate-dma (esp/fas236) */ | ||
66 | #define ESP_CONFIG3_TENB 0x04 /* group2 SCSI2 support (esp100a/hme) */ | ||
67 | #define ESP_CONFIG3_SRB 0x04 /* Save residual byte (esp/fas236) */ | ||
68 | #define ESP_CONFIG3_TMS 0x08 /* Three-byte msg's ok (esp100a/hme) */ | ||
69 | #define ESP_CONFIG3_FCLK 0x08 /* Fast SCSI clock rate (esp/fas236) */ | ||
70 | #define ESP_CONFIG3_IDMSG 0x10 /* ID message checking (esp100a/hme) */ | ||
71 | #define ESP_CONFIG3_FSCSI 0x10 /* Enable FAST SCSI (esp/fas236) */ | ||
72 | #define ESP_CONFIG3_GTM 0x20 /* group2 SCSI2 support (esp/fas236) */ | ||
73 | #define ESP_CONFIG3_IDBIT3 0x20 /* Bit 3 of HME SCSI-ID (hme) */ | ||
74 | #define ESP_CONFIG3_TBMS 0x40 /* Three-byte msg's ok (esp/fas236) */ | ||
75 | #define ESP_CONFIG3_EWIDE 0x40 /* Enable Wide-SCSI (hme) */ | ||
76 | #define ESP_CONFIG3_IMS 0x80 /* ID msg chk'ng (esp/fas236) */ | ||
77 | #define ESP_CONFIG3_OBPUSH 0x80 /* Push odd-byte to dma (hme) */ | ||
78 | |||
79 | /* ESP command register read-write */ | ||
80 | /* Group 1 commands: These may be sent at any point in time to the ESP | ||
81 | * chip. None of them can generate interrupts 'cept | ||
82 | * the "SCSI bus reset" command if you have not disabled | ||
83 | * SCSI reset interrupts in the config1 ESP register. | ||
84 | */ | ||
85 | #define ESP_CMD_NULL 0x00 /* Null command, ie. a nop */ | ||
86 | #define ESP_CMD_FLUSH 0x01 /* FIFO Flush */ | ||
87 | #define ESP_CMD_RC 0x02 /* Chip reset */ | ||
88 | #define ESP_CMD_RS 0x03 /* SCSI bus reset */ | ||
89 | |||
90 | /* Group 2 commands: ESP must be an initiator and connected to a target | ||
91 | * for these commands to work. | ||
92 | */ | ||
93 | #define ESP_CMD_TI 0x10 /* Transfer Information */ | ||
94 | #define ESP_CMD_ICCSEQ 0x11 /* Initiator cmd complete sequence */ | ||
95 | #define ESP_CMD_MOK 0x12 /* Message okie-dokie */ | ||
96 | #define ESP_CMD_TPAD 0x18 /* Transfer Pad */ | ||
97 | #define ESP_CMD_SATN 0x1a /* Set ATN */ | ||
98 | #define ESP_CMD_RATN 0x1b /* De-assert ATN */ | ||
99 | |||
100 | /* Group 3 commands: ESP must be in the MSGOUT or MSGIN state and be connected | ||
101 | * to a target as the initiator for these commands to work. | ||
102 | */ | ||
103 | #define ESP_CMD_SMSG 0x20 /* Send message */ | ||
104 | #define ESP_CMD_SSTAT 0x21 /* Send status */ | ||
105 | #define ESP_CMD_SDATA 0x22 /* Send data */ | ||
106 | #define ESP_CMD_DSEQ 0x23 /* Discontinue Sequence */ | ||
107 | #define ESP_CMD_TSEQ 0x24 /* Terminate Sequence */ | ||
108 | #define ESP_CMD_TCCSEQ 0x25 /* Target cmd cmplt sequence */ | ||
109 | #define ESP_CMD_DCNCT 0x27 /* Disconnect */ | ||
110 | #define ESP_CMD_RMSG 0x28 /* Receive Message */ | ||
111 | #define ESP_CMD_RCMD 0x29 /* Receive Command */ | ||
112 | #define ESP_CMD_RDATA 0x2a /* Receive Data */ | ||
113 | #define ESP_CMD_RCSEQ 0x2b /* Receive cmd sequence */ | ||
114 | |||
115 | /* Group 4 commands: The ESP must be in the disconnected state and must | ||
116 | * not be connected to any targets as initiator for | ||
117 | * these commands to work. | ||
118 | */ | ||
119 | #define ESP_CMD_RSEL 0x40 /* Reselect */ | ||
120 | #define ESP_CMD_SEL 0x41 /* Select w/o ATN */ | ||
121 | #define ESP_CMD_SELA 0x42 /* Select w/ATN */ | ||
122 | #define ESP_CMD_SELAS 0x43 /* Select w/ATN & STOP */ | ||
123 | #define ESP_CMD_ESEL 0x44 /* Enable selection */ | ||
124 | #define ESP_CMD_DSEL 0x45 /* Disable selections */ | ||
125 | #define ESP_CMD_SA3 0x46 /* Select w/ATN3 */ | ||
126 | #define ESP_CMD_RSEL3 0x47 /* Reselect3 */ | ||
127 | |||
128 | /* This bit enables the ESP's DMA on the SBus */ | ||
129 | #define ESP_CMD_DMA 0x80 /* Do DMA? */ | ||
130 | |||
131 | /* ESP status register read-only */ | ||
132 | #define ESP_STAT_PIO 0x01 /* IO phase bit */ | ||
133 | #define ESP_STAT_PCD 0x02 /* CD phase bit */ | ||
134 | #define ESP_STAT_PMSG 0x04 /* MSG phase bit */ | ||
135 | #define ESP_STAT_PMASK 0x07 /* Mask of phase bits */ | ||
136 | #define ESP_STAT_TDONE 0x08 /* Transfer Completed */ | ||
137 | #define ESP_STAT_TCNT 0x10 /* Transfer Counter Is Zero */ | ||
138 | #define ESP_STAT_PERR 0x20 /* Parity error */ | ||
139 | #define ESP_STAT_SPAM 0x40 /* Real bad error */ | ||
140 | /* This indicates the 'interrupt pending' condition on esp236, it is a reserved | ||
141 | * bit on other revs of the ESP. | ||
142 | */ | ||
143 | #define ESP_STAT_INTR 0x80 /* Interrupt */ | ||
144 | |||
145 | /* The status register can be masked with ESP_STAT_PMASK and compared | ||
146 | * with the following values to determine the current phase the ESP | ||
147 | * (at least thinks it) is in. For our purposes we also add our own | ||
148 | * software 'done' bit for our phase management engine. | ||
149 | */ | ||
150 | #define ESP_DOP (0) /* Data Out */ | ||
151 | #define ESP_DIP (ESP_STAT_PIO) /* Data In */ | ||
152 | #define ESP_CMDP (ESP_STAT_PCD) /* Command */ | ||
153 | #define ESP_STATP (ESP_STAT_PCD|ESP_STAT_PIO) /* Status */ | ||
154 | #define ESP_MOP (ESP_STAT_PMSG|ESP_STAT_PCD) /* Message Out */ | ||
155 | #define ESP_MIP (ESP_STAT_PMSG|ESP_STAT_PCD|ESP_STAT_PIO) /* Message In */ | ||
156 | |||
157 | /* HME only: status 2 register */ | ||
158 | #define ESP_STAT2_SCHBIT 0x01 /* Upper bits 3-7 of sstep enabled */ | ||
159 | #define ESP_STAT2_FFLAGS 0x02 /* The fifo flags are now latched */ | ||
160 | #define ESP_STAT2_XCNT 0x04 /* The transfer counter is latched */ | ||
161 | #define ESP_STAT2_CREGA 0x08 /* The command reg is active now */ | ||
162 | #define ESP_STAT2_WIDE 0x10 /* Interface on this adapter is wide */ | ||
163 | #define ESP_STAT2_F1BYTE 0x20 /* There is one byte at top of fifo */ | ||
164 | #define ESP_STAT2_FMSB 0x40 /* Next byte in fifo is most significant */ | ||
165 | #define ESP_STAT2_FEMPTY 0x80 /* FIFO is empty */ | ||
166 | |||
167 | /* ESP interrupt register read-only */ | ||
168 | #define ESP_INTR_S 0x01 /* Select w/o ATN */ | ||
169 | #define ESP_INTR_SATN 0x02 /* Select w/ATN */ | ||
170 | #define ESP_INTR_RSEL 0x04 /* Reselected */ | ||
171 | #define ESP_INTR_FDONE 0x08 /* Function done */ | ||
172 | #define ESP_INTR_BSERV 0x10 /* Bus service */ | ||
173 | #define ESP_INTR_DC 0x20 /* Disconnect */ | ||
174 | #define ESP_INTR_IC 0x40 /* Illegal command given */ | ||
175 | #define ESP_INTR_SR 0x80 /* SCSI bus reset detected */ | ||
176 | |||
177 | /* ESP sequence step register read-only */ | ||
178 | #define ESP_STEP_VBITS 0x07 /* Valid bits */ | ||
179 | #define ESP_STEP_ASEL 0x00 /* Selection&Arbitrate cmplt */ | ||
180 | #define ESP_STEP_SID 0x01 /* One msg byte sent */ | ||
181 | #define ESP_STEP_NCMD 0x02 /* Was not in command phase */ | ||
182 | #define ESP_STEP_PPC 0x03 /* Early phase chg caused cmnd | ||
183 | * bytes to be lost | ||
184 | */ | ||
185 | #define ESP_STEP_FINI4 0x04 /* Command was sent ok */ | ||
186 | |||
187 | /* Ho hum, some ESP's set the step register to this as well... */ | ||
188 | #define ESP_STEP_FINI5 0x05 | ||
189 | #define ESP_STEP_FINI6 0x06 | ||
190 | #define ESP_STEP_FINI7 0x07 | ||
191 | |||
192 | /* ESP chip-test register read-write */ | ||
193 | #define ESP_TEST_TARG 0x01 /* Target test mode */ | ||
194 | #define ESP_TEST_INI 0x02 /* Initiator test mode */ | ||
195 | #define ESP_TEST_TS 0x04 /* Tristate test mode */ | ||
196 | |||
197 | /* ESP unique ID register read-only, found on fas236+fas100a only */ | ||
198 | #define ESP_UID_F100A 0x00 /* ESP FAS100A */ | ||
199 | #define ESP_UID_F236 0x02 /* ESP FAS236 */ | ||
200 | #define ESP_UID_REV 0x07 /* ESP revision */ | ||
201 | #define ESP_UID_FAM 0xf8 /* ESP family */ | ||
202 | |||
203 | /* ESP fifo flags register read-only */ | ||
204 | /* Note that the following implies a 16 byte FIFO on the ESP. */ | ||
205 | #define ESP_FF_FBYTES 0x1f /* Num bytes in FIFO */ | ||
206 | #define ESP_FF_ONOTZERO 0x20 /* offset ctr not zero (esp100) */ | ||
207 | #define ESP_FF_SSTEP 0xe0 /* Sequence step */ | ||
208 | |||
209 | /* ESP clock conversion factor register write-only */ | ||
210 | #define ESP_CCF_F0 0x00 /* 35.01MHz - 40MHz */ | ||
211 | #define ESP_CCF_NEVER 0x01 /* Set it to this and die */ | ||
212 | #define ESP_CCF_F2 0x02 /* 10MHz */ | ||
213 | #define ESP_CCF_F3 0x03 /* 10.01MHz - 15MHz */ | ||
214 | #define ESP_CCF_F4 0x04 /* 15.01MHz - 20MHz */ | ||
215 | #define ESP_CCF_F5 0x05 /* 20.01MHz - 25MHz */ | ||
216 | #define ESP_CCF_F6 0x06 /* 25.01MHz - 30MHz */ | ||
217 | #define ESP_CCF_F7 0x07 /* 30.01MHz - 35MHz */ | ||
218 | |||
219 | /* HME only... */ | ||
220 | #define ESP_BUSID_RESELID 0x10 | ||
221 | #define ESP_BUSID_CTR32BIT 0x40 | ||
222 | |||
223 | #define ESP_BUS_TIMEOUT 250 /* In milli-seconds */ | ||
224 | #define ESP_TIMEO_CONST 8192 | ||
225 | #define ESP_NEG_DEFP(mhz, cfact) \ | ||
226 | ((ESP_BUS_TIMEOUT * ((mhz) / 1000)) / (8192 * (cfact))) | ||
227 | #define ESP_MHZ_TO_CYCLE(mhertz) ((1000000000) / ((mhertz) / 1000)) | ||
228 | #define ESP_TICK(ccf, cycle) ((7682 * (ccf) * (cycle) / 1000)) | ||
229 | |||
230 | /* For slow to medium speed input clock rates we shoot for 5mb/s, but for high | ||
231 | * input clock rates we try to do 10mb/s although I don't think a transfer can | ||
232 | * even run that fast with an ESP even with DMA2 scatter gather pipelining. | ||
233 | */ | ||
234 | #define SYNC_DEFP_SLOW 0x32 /* 5mb/s */ | ||
235 | #define SYNC_DEFP_FAST 0x19 /* 10mb/s */ | ||
236 | |||
237 | struct esp_cmd_priv { | ||
238 | union { | ||
239 | dma_addr_t dma_addr; | ||
240 | int num_sg; | ||
241 | } u; | ||
242 | |||
243 | unsigned int cur_residue; | ||
244 | struct scatterlist *cur_sg; | ||
245 | unsigned int tot_residue; | ||
246 | }; | ||
247 | #define ESP_CMD_PRIV(CMD) ((struct esp_cmd_priv *)(&(CMD)->SCp)) | ||
248 | |||
249 | enum esp_rev { | ||
250 | ESP100 = 0x00, /* NCR53C90 - very broken */ | ||
251 | ESP100A = 0x01, /* NCR53C90A */ | ||
252 | ESP236 = 0x02, | ||
253 | FAS236 = 0x03, | ||
254 | FAS100A = 0x04, | ||
255 | FAST = 0x05, | ||
256 | FASHME = 0x06, | ||
257 | }; | ||
258 | |||
259 | struct esp_cmd_entry { | ||
260 | struct list_head list; | ||
261 | |||
262 | struct scsi_cmnd *cmd; | ||
263 | |||
264 | unsigned int saved_cur_residue; | ||
265 | struct scatterlist *saved_cur_sg; | ||
266 | unsigned int saved_tot_residue; | ||
267 | |||
268 | u8 flags; | ||
269 | #define ESP_CMD_FLAG_WRITE 0x01 /* DMA is a write */ | ||
270 | #define ESP_CMD_FLAG_ABORT 0x02 /* being aborted */ | ||
271 | #define ESP_CMD_FLAG_AUTOSENSE 0x04 /* Doing automatic REQUEST_SENSE */ | ||
272 | |||
273 | u8 tag[2]; | ||
274 | |||
275 | u8 status; | ||
276 | u8 message; | ||
277 | |||
278 | unsigned char *sense_ptr; | ||
279 | unsigned char *saved_sense_ptr; | ||
280 | dma_addr_t sense_dma; | ||
281 | |||
282 | struct completion *eh_done; | ||
283 | }; | ||
284 | |||
285 | /* XXX make this configurable somehow XXX */ | ||
286 | #define ESP_DEFAULT_TAGS 16 | ||
287 | |||
288 | #define ESP_MAX_TARGET 16 | ||
289 | #define ESP_MAX_LUN 8 | ||
290 | #define ESP_MAX_TAG 256 | ||
291 | |||
292 | struct esp_lun_data { | ||
293 | struct esp_cmd_entry *non_tagged_cmd; | ||
294 | int num_tagged; | ||
295 | int hold; | ||
296 | struct esp_cmd_entry *tagged_cmds[ESP_MAX_TAG]; | ||
297 | }; | ||
298 | |||
299 | struct esp_target_data { | ||
300 | /* These are the ESP_STP, ESP_SOFF, and ESP_CFG3 register values which | ||
301 | * match the currently negotiated settings for this target. The SCSI | ||
302 | * protocol values are maintained in spi_{offset,period,wide}(starget). | ||
303 | */ | ||
304 | u8 esp_period; | ||
305 | u8 esp_offset; | ||
306 | u8 esp_config3; | ||
307 | |||
308 | u8 flags; | ||
309 | #define ESP_TGT_WIDE 0x01 | ||
310 | #define ESP_TGT_DISCONNECT 0x02 | ||
311 | #define ESP_TGT_NEGO_WIDE 0x04 | ||
312 | #define ESP_TGT_NEGO_SYNC 0x08 | ||
313 | #define ESP_TGT_CHECK_NEGO 0x40 | ||
314 | #define ESP_TGT_BROKEN 0x80 | ||
315 | |||
316 | /* When ESP_TGT_CHECK_NEGO is set, on the next scsi command to this | ||
317 | * device we will try to negotiate the following parameters. | ||
318 | */ | ||
319 | u8 nego_goal_period; | ||
320 | u8 nego_goal_offset; | ||
321 | u8 nego_goal_width; | ||
322 | u8 nego_goal_tags; | ||
323 | |||
324 | struct scsi_target *starget; | ||
325 | }; | ||
326 | |||
327 | struct esp_event_ent { | ||
328 | u8 type; | ||
329 | #define ESP_EVENT_TYPE_EVENT 0x01 | ||
330 | #define ESP_EVENT_TYPE_CMD 0x02 | ||
331 | u8 val; | ||
332 | |||
333 | u8 sreg; | ||
334 | u8 seqreg; | ||
335 | u8 sreg2; | ||
336 | u8 ireg; | ||
337 | u8 select_state; | ||
338 | u8 event; | ||
339 | u8 __pad; | ||
340 | }; | ||
341 | |||
342 | struct esp; | ||
343 | struct esp_driver_ops { | ||
344 | /* Read and write the ESP 8-bit registers. On some | ||
345 | * applications of the ESP chip the registers are at 4-byte | ||
346 | * instead of 1-byte intervals. | ||
347 | */ | ||
348 | void (*esp_write8)(struct esp *esp, u8 val, unsigned long reg); | ||
349 | u8 (*esp_read8)(struct esp *esp, unsigned long reg); | ||
350 | |||
351 | /* Map and unmap DMA memory. Eventually the driver will be | ||
352 | * converted to the generic DMA API as soon as SBUS is able to | ||
353 | * cope with that. At such time we can remove this. | ||
354 | */ | ||
355 | dma_addr_t (*map_single)(struct esp *esp, void *buf, | ||
356 | size_t sz, int dir); | ||
357 | int (*map_sg)(struct esp *esp, struct scatterlist *sg, | ||
358 | int num_sg, int dir); | ||
359 | void (*unmap_single)(struct esp *esp, dma_addr_t addr, | ||
360 | size_t sz, int dir); | ||
361 | void (*unmap_sg)(struct esp *esp, struct scatterlist *sg, | ||
362 | int num_sg, int dir); | ||
363 | |||
364 | /* Return non-zero if there is an IRQ pending. Usually this | ||
365 | * status bit lives in the DMA controller sitting in front of | ||
366 | * the ESP. This has to be accurate or else the ESP interrupt | ||
367 | * handler will not run. | ||
368 | */ | ||
369 | int (*irq_pending)(struct esp *esp); | ||
370 | |||
371 | /* Reset the DMA engine entirely. On return, ESP interrupts | ||
372 | * should be enabled. Often the interrupt enabling is | ||
373 | * controlled in the DMA engine. | ||
374 | */ | ||
375 | void (*reset_dma)(struct esp *esp); | ||
376 | |||
377 | /* Drain any pending DMA in the DMA engine after a transfer. | ||
378 | * This is for writes to memory. | ||
379 | */ | ||
380 | void (*dma_drain)(struct esp *esp); | ||
381 | |||
382 | /* Invalidate the DMA engine after a DMA transfer. */ | ||
383 | void (*dma_invalidate)(struct esp *esp); | ||
384 | |||
385 | /* Setup an ESP command that will use a DMA transfer. | ||
386 | * The 'esp_count' specifies what transfer length should be | ||
387 | * programmed into the ESP transfer counter registers, whereas | ||
388 | * the 'dma_count' is the length that should be programmed into | ||
389 | * the DMA controller. Usually they are the same. If 'write' | ||
390 | * is non-zero, this transfer is a write into memory. 'cmd' | ||
391 | * holds the ESP command that should be issued by calling | ||
392 | * scsi_esp_cmd() at the appropriate time while programming | ||
393 | * the DMA hardware. | ||
394 | */ | ||
395 | void (*send_dma_cmd)(struct esp *esp, u32 dma_addr, u32 esp_count, | ||
396 | u32 dma_count, int write, u8 cmd); | ||
397 | |||
398 | /* Return non-zero if the DMA engine is reporting an error | ||
399 | * currently. | ||
400 | */ | ||
401 | int (*dma_error)(struct esp *esp); | ||
402 | }; | ||
403 | |||
404 | #define ESP_MAX_MSG_SZ 8 | ||
405 | #define ESP_EVENT_LOG_SZ 32 | ||
406 | |||
407 | #define ESP_QUICKIRQ_LIMIT 100 | ||
408 | #define ESP_RESELECT_TAG_LIMIT 2500 | ||
409 | |||
410 | struct esp { | ||
411 | void __iomem *regs; | ||
412 | void __iomem *dma_regs; | ||
413 | |||
414 | const struct esp_driver_ops *ops; | ||
415 | |||
416 | struct Scsi_Host *host; | ||
417 | void *dev; | ||
418 | |||
419 | struct esp_cmd_entry *active_cmd; | ||
420 | |||
421 | struct list_head queued_cmds; | ||
422 | struct list_head active_cmds; | ||
423 | |||
424 | u8 *command_block; | ||
425 | dma_addr_t command_block_dma; | ||
426 | |||
427 | unsigned int data_dma_len; | ||
428 | |||
429 | /* The following are used to determine the cause of an IRQ. Upon every | ||
430 | * IRQ entry we synchronize these with the hardware registers. | ||
431 | */ | ||
432 | u8 sreg; | ||
433 | u8 seqreg; | ||
434 | u8 sreg2; | ||
435 | u8 ireg; | ||
436 | |||
437 | u32 prev_hme_dmacsr; | ||
438 | u8 prev_soff; | ||
439 | u8 prev_stp; | ||
440 | u8 prev_cfg3; | ||
441 | u8 __pad; | ||
442 | |||
443 | struct list_head esp_cmd_pool; | ||
444 | |||
445 | struct esp_target_data target[ESP_MAX_TARGET]; | ||
446 | |||
447 | int fifo_cnt; | ||
448 | u8 fifo[16]; | ||
449 | |||
450 | struct esp_event_ent esp_event_log[ESP_EVENT_LOG_SZ]; | ||
451 | int esp_event_cur; | ||
452 | |||
453 | u8 msg_out[ESP_MAX_MSG_SZ]; | ||
454 | int msg_out_len; | ||
455 | |||
456 | u8 msg_in[ESP_MAX_MSG_SZ]; | ||
457 | int msg_in_len; | ||
458 | |||
459 | u8 bursts; | ||
460 | u8 config1; | ||
461 | u8 config2; | ||
462 | |||
463 | u8 scsi_id; | ||
464 | u32 scsi_id_mask; | ||
465 | |||
466 | enum esp_rev rev; | ||
467 | |||
468 | u32 flags; | ||
469 | #define ESP_FLAG_DIFFERENTIAL 0x00000001 | ||
470 | #define ESP_FLAG_RESETTING 0x00000002 | ||
471 | #define ESP_FLAG_DOING_SLOWCMD 0x00000004 | ||
472 | #define ESP_FLAG_WIDE_CAPABLE 0x00000008 | ||
473 | #define ESP_FLAG_QUICKIRQ_CHECK 0x00000010 | ||
474 | |||
475 | u8 select_state; | ||
476 | #define ESP_SELECT_NONE 0x00 /* Not selecting */ | ||
477 | #define ESP_SELECT_BASIC 0x01 /* Select w/o MSGOUT phase */ | ||
478 | #define ESP_SELECT_MSGOUT 0x02 /* Select with MSGOUT */ | ||
479 | |||
480 | /* When we are not selecting, we are expecting an event. */ | ||
481 | u8 event; | ||
482 | #define ESP_EVENT_NONE 0x00 | ||
483 | #define ESP_EVENT_CMD_START 0x01 | ||
484 | #define ESP_EVENT_CMD_DONE 0x02 | ||
485 | #define ESP_EVENT_DATA_IN 0x03 | ||
486 | #define ESP_EVENT_DATA_OUT 0x04 | ||
487 | #define ESP_EVENT_DATA_DONE 0x05 | ||
488 | #define ESP_EVENT_MSGIN 0x06 | ||
489 | #define ESP_EVENT_MSGIN_MORE 0x07 | ||
490 | #define ESP_EVENT_MSGIN_DONE 0x08 | ||
491 | #define ESP_EVENT_MSGOUT 0x09 | ||
492 | #define ESP_EVENT_MSGOUT_DONE 0x0a | ||
493 | #define ESP_EVENT_STATUS 0x0b | ||
494 | #define ESP_EVENT_FREE_BUS 0x0c | ||
495 | #define ESP_EVENT_CHECK_PHASE 0x0d | ||
496 | #define ESP_EVENT_RESET 0x10 | ||
497 | |||
498 | /* Probed in esp_get_clock_params() */ | ||
499 | u32 cfact; | ||
500 | u32 cfreq; | ||
501 | u32 ccycle; | ||
502 | u32 ctick; | ||
503 | u32 neg_defp; | ||
504 | u32 sync_defp; | ||
505 | |||
506 | /* Computed in esp_reset_esp() */ | ||
507 | u32 max_period; | ||
508 | u32 min_period; | ||
509 | u32 radelay; | ||
510 | |||
511 | /* Slow command state. */ | ||
512 | u8 *cmd_bytes_ptr; | ||
513 | int cmd_bytes_left; | ||
514 | |||
515 | struct completion *eh_reset; | ||
516 | |||
517 | struct sbus_dma *dma; | ||
518 | }; | ||
519 | |||
520 | #define host_to_esp(host) ((struct esp *)(host)->hostdata) | ||
521 | |||
522 | /* A front-end driver for the ESP chip should do the following in | ||
523 | * it's device probe routine: | ||
524 | * 1) Allocate the host and private area using scsi_host_alloc() | ||
525 | * with size 'sizeof(struct esp)'. The first argument to | ||
526 | * scsi_host_alloc() should be &scsi_esp_template. | ||
527 | * 2) Set host->max_id as appropriate. | ||
528 | * 3) Set esp->host to the scsi_host itself, and esp->dev | ||
529 | * to the device object pointer. | ||
530 | * 4) Hook up esp->ops to the front-end implementation. | ||
531 | * 5) If the ESP chip supports wide transfers, set ESP_FLAG_WIDE_CAPABLE | ||
532 | * in esp->flags. | ||
533 | * 6) Map the DMA and ESP chip registers. | ||
534 | * 7) DMA map the ESP command block, store the DMA address | ||
535 | * in esp->command_block_dma. | ||
536 | * 8) Register the scsi_esp_intr() interrupt handler. | ||
537 | * 9) Probe for and provide the following chip properties: | ||
538 | * esp->scsi_id (assign to esp->host->this_id too) | ||
539 | * esp->scsi_id_mask | ||
540 | * If ESP bus is differential, set ESP_FLAG_DIFFERENTIAL | ||
541 | * esp->cfreq | ||
542 | * DMA burst bit mask in esp->bursts, if necessary | ||
543 | * 10) Perform any actions necessary before the ESP device can | ||
544 | * be programmed for the first time. On some configs, for | ||
545 | * example, the DMA engine has to be reset before ESP can | ||
546 | * be programmed. | ||
547 | * 11) If necessary, call dev_set_drvdata() as needed. | ||
548 | * 12) Call scsi_esp_register() with prepared 'esp' structure | ||
549 | * and a device pointer if possible. | ||
550 | * 13) Check scsi_esp_register() return value, release all resources | ||
551 | * if an error was returned. | ||
552 | */ | ||
553 | extern struct scsi_host_template scsi_esp_template; | ||
554 | extern int scsi_esp_register(struct esp *, struct device *); | ||
555 | |||
556 | extern void scsi_esp_unregister(struct esp *); | ||
557 | extern irqreturn_t scsi_esp_intr(int, void *); | ||
558 | extern void scsi_esp_cmd(struct esp *, u8); | ||
559 | |||
560 | #endif /* !(_ESP_SCSI_H) */ | ||
diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c index 9f10689905a8..c4195ea869e9 100644 --- a/drivers/scsi/qlogicpti.c +++ b/drivers/scsi/qlogicpti.c | |||
@@ -1403,7 +1403,7 @@ static int __devinit qpti_sbus_probe(struct of_device *dev, const struct of_devi | |||
1403 | struct scsi_host_template *tpnt = match->data; | 1403 | struct scsi_host_template *tpnt = match->data; |
1404 | struct Scsi_Host *host; | 1404 | struct Scsi_Host *host; |
1405 | struct qlogicpti *qpti; | 1405 | struct qlogicpti *qpti; |
1406 | char *fcode; | 1406 | const char *fcode; |
1407 | 1407 | ||
1408 | /* Sometimes Antares cards come up not completely | 1408 | /* Sometimes Antares cards come up not completely |
1409 | * setup, and we get a report of a zero IRQ. | 1409 | * setup, and we get a report of a zero IRQ. |
diff --git a/drivers/scsi/scsi_netlink.c b/drivers/scsi/scsi_netlink.c index 1b59b27e887f..4bf9aa547c78 100644 --- a/drivers/scsi/scsi_netlink.c +++ b/drivers/scsi/scsi_netlink.c | |||
@@ -50,7 +50,7 @@ scsi_nl_rcv_msg(struct sk_buff *skb) | |||
50 | while (skb->len >= NLMSG_SPACE(0)) { | 50 | while (skb->len >= NLMSG_SPACE(0)) { |
51 | err = 0; | 51 | err = 0; |
52 | 52 | ||
53 | nlh = (struct nlmsghdr *) skb->data; | 53 | nlh = nlmsg_hdr(skb); |
54 | if ((nlh->nlmsg_len < (sizeof(*nlh) + sizeof(*hdr))) || | 54 | if ((nlh->nlmsg_len < (sizeof(*nlh) + sizeof(*hdr))) || |
55 | (skb->len < nlh->nlmsg_len)) { | 55 | (skb->len < nlh->nlmsg_len)) { |
56 | printk(KERN_WARNING "%s: discarding partial skb\n", | 56 | printk(KERN_WARNING "%s: discarding partial skb\n", |
@@ -168,7 +168,8 @@ scsi_netlink_init(void) | |||
168 | } | 168 | } |
169 | 169 | ||
170 | scsi_nl_sock = netlink_kernel_create(NETLINK_SCSITRANSPORT, | 170 | scsi_nl_sock = netlink_kernel_create(NETLINK_SCSITRANSPORT, |
171 | SCSI_NL_GRP_CNT, scsi_nl_rcv, THIS_MODULE); | 171 | SCSI_NL_GRP_CNT, scsi_nl_rcv, NULL, |
172 | THIS_MODULE); | ||
172 | if (!scsi_nl_sock) { | 173 | if (!scsi_nl_sock) { |
173 | printk(KERN_ERR "%s: register of recieve handler failed\n", | 174 | printk(KERN_ERR "%s: register of recieve handler failed\n", |
174 | __FUNCTION__); | 175 | __FUNCTION__); |
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index ce0d14af33c8..aabaa0576ab4 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c | |||
@@ -1081,7 +1081,7 @@ iscsi_if_rx(struct sock *sk, int len) | |||
1081 | struct nlmsghdr *nlh; | 1081 | struct nlmsghdr *nlh; |
1082 | struct iscsi_uevent *ev; | 1082 | struct iscsi_uevent *ev; |
1083 | 1083 | ||
1084 | nlh = (struct nlmsghdr *)skb->data; | 1084 | nlh = nlmsg_hdr(skb); |
1085 | if (nlh->nlmsg_len < sizeof(*nlh) || | 1085 | if (nlh->nlmsg_len < sizeof(*nlh) || |
1086 | skb->len < nlh->nlmsg_len) { | 1086 | skb->len < nlh->nlmsg_len) { |
1087 | break; | 1087 | break; |
@@ -1435,7 +1435,7 @@ static __init int iscsi_transport_init(void) | |||
1435 | if (err) | 1435 | if (err) |
1436 | goto unregister_conn_class; | 1436 | goto unregister_conn_class; |
1437 | 1437 | ||
1438 | nls = netlink_kernel_create(NETLINK_ISCSI, 1, iscsi_if_rx, | 1438 | nls = netlink_kernel_create(NETLINK_ISCSI, 1, iscsi_if_rx, NULL, |
1439 | THIS_MODULE); | 1439 | THIS_MODULE); |
1440 | if (!nls) { | 1440 | if (!nls) { |
1441 | err = -ENOBUFS; | 1441 | err = -ENOBUFS; |
diff --git a/drivers/scsi/sun_esp.c b/drivers/scsi/sun_esp.c new file mode 100644 index 000000000000..8c766bcd1095 --- /dev/null +++ b/drivers/scsi/sun_esp.c | |||
@@ -0,0 +1,634 @@ | |||
1 | /* sun_esp.c: ESP front-end for Sparc SBUS systems. | ||
2 | * | ||
3 | * Copyright (C) 2007 David S. Miller (davem@davemloft.net) | ||
4 | */ | ||
5 | |||
6 | #include <linux/kernel.h> | ||
7 | #include <linux/types.h> | ||
8 | #include <linux/module.h> | ||
9 | #include <linux/init.h> | ||
10 | |||
11 | #include <asm/irq.h> | ||
12 | #include <asm/io.h> | ||
13 | #include <asm/dma.h> | ||
14 | |||
15 | #include <asm/sbus.h> | ||
16 | |||
17 | #include <scsi/scsi_host.h> | ||
18 | |||
19 | #include "esp_scsi.h" | ||
20 | |||
21 | #define DRV_MODULE_NAME "sun_esp" | ||
22 | #define PFX DRV_MODULE_NAME ": " | ||
23 | #define DRV_VERSION "1.000" | ||
24 | #define DRV_MODULE_RELDATE "April 19, 2007" | ||
25 | |||
26 | #define dma_read32(REG) \ | ||
27 | sbus_readl(esp->dma_regs + (REG)) | ||
28 | #define dma_write32(VAL, REG) \ | ||
29 | sbus_writel((VAL), esp->dma_regs + (REG)) | ||
30 | |||
31 | static int __devinit esp_sbus_find_dma(struct esp *esp, struct sbus_dev *dma_sdev) | ||
32 | { | ||
33 | struct sbus_dev *sdev = esp->dev; | ||
34 | struct sbus_dma *dma; | ||
35 | |||
36 | if (dma_sdev != NULL) { | ||
37 | for_each_dvma(dma) { | ||
38 | if (dma->sdev == dma_sdev) | ||
39 | break; | ||
40 | } | ||
41 | } else { | ||
42 | for_each_dvma(dma) { | ||
43 | if (dma->sdev == NULL) | ||
44 | break; | ||
45 | |||
46 | /* If bus + slot are the same and it has the | ||
47 | * correct OBP name, it's ours. | ||
48 | */ | ||
49 | if (sdev->bus == dma->sdev->bus && | ||
50 | sdev->slot == dma->sdev->slot && | ||
51 | (!strcmp(dma->sdev->prom_name, "dma") || | ||
52 | !strcmp(dma->sdev->prom_name, "espdma"))) | ||
53 | break; | ||
54 | } | ||
55 | } | ||
56 | |||
57 | if (dma == NULL) { | ||
58 | printk(KERN_ERR PFX "[%s] Cannot find dma.\n", | ||
59 | sdev->ofdev.node->full_name); | ||
60 | return -ENODEV; | ||
61 | } | ||
62 | esp->dma = dma; | ||
63 | esp->dma_regs = dma->regs; | ||
64 | |||
65 | return 0; | ||
66 | |||
67 | } | ||
68 | |||
69 | static int __devinit esp_sbus_map_regs(struct esp *esp, int hme) | ||
70 | { | ||
71 | struct sbus_dev *sdev = esp->dev; | ||
72 | struct resource *res; | ||
73 | |||
74 | /* On HME, two reg sets exist, first is DVMA, | ||
75 | * second is ESP registers. | ||
76 | */ | ||
77 | if (hme) | ||
78 | res = &sdev->resource[1]; | ||
79 | else | ||
80 | res = &sdev->resource[0]; | ||
81 | |||
82 | esp->regs = sbus_ioremap(res, 0, SBUS_ESP_REG_SIZE, "ESP"); | ||
83 | if (!esp->regs) | ||
84 | return -ENOMEM; | ||
85 | |||
86 | return 0; | ||
87 | } | ||
88 | |||
89 | static int __devinit esp_sbus_map_command_block(struct esp *esp) | ||
90 | { | ||
91 | struct sbus_dev *sdev = esp->dev; | ||
92 | |||
93 | esp->command_block = sbus_alloc_consistent(sdev, 16, | ||
94 | &esp->command_block_dma); | ||
95 | if (!esp->command_block) | ||
96 | return -ENOMEM; | ||
97 | return 0; | ||
98 | } | ||
99 | |||
100 | static int __devinit esp_sbus_register_irq(struct esp *esp) | ||
101 | { | ||
102 | struct Scsi_Host *host = esp->host; | ||
103 | struct sbus_dev *sdev = esp->dev; | ||
104 | |||
105 | host->irq = sdev->irqs[0]; | ||
106 | return request_irq(host->irq, scsi_esp_intr, IRQF_SHARED, "ESP", esp); | ||
107 | } | ||
108 | |||
109 | static void __devinit esp_get_scsi_id(struct esp *esp) | ||
110 | { | ||
111 | struct sbus_dev *sdev = esp->dev; | ||
112 | struct device_node *dp = sdev->ofdev.node; | ||
113 | |||
114 | esp->scsi_id = of_getintprop_default(dp, "initiator-id", 0xff); | ||
115 | if (esp->scsi_id != 0xff) | ||
116 | goto done; | ||
117 | |||
118 | esp->scsi_id = of_getintprop_default(dp, "scsi-initiator-id", 0xff); | ||
119 | if (esp->scsi_id != 0xff) | ||
120 | goto done; | ||
121 | |||
122 | if (!sdev->bus) { | ||
123 | /* SUN4 */ | ||
124 | esp->scsi_id = 7; | ||
125 | goto done; | ||
126 | } | ||
127 | |||
128 | esp->scsi_id = of_getintprop_default(sdev->bus->ofdev.node, | ||
129 | "scsi-initiator-id", 7); | ||
130 | |||
131 | done: | ||
132 | esp->host->this_id = esp->scsi_id; | ||
133 | esp->scsi_id_mask = (1 << esp->scsi_id); | ||
134 | } | ||
135 | |||
136 | static void __devinit esp_get_differential(struct esp *esp) | ||
137 | { | ||
138 | struct sbus_dev *sdev = esp->dev; | ||
139 | struct device_node *dp = sdev->ofdev.node; | ||
140 | |||
141 | if (of_find_property(dp, "differential", NULL)) | ||
142 | esp->flags |= ESP_FLAG_DIFFERENTIAL; | ||
143 | else | ||
144 | esp->flags &= ~ESP_FLAG_DIFFERENTIAL; | ||
145 | } | ||
146 | |||
147 | static void __devinit esp_get_clock_params(struct esp *esp) | ||
148 | { | ||
149 | struct sbus_dev *sdev = esp->dev; | ||
150 | struct device_node *dp = sdev->ofdev.node; | ||
151 | struct device_node *bus_dp; | ||
152 | int fmhz; | ||
153 | |||
154 | bus_dp = NULL; | ||
155 | if (sdev != NULL && sdev->bus != NULL) | ||
156 | bus_dp = sdev->bus->ofdev.node; | ||
157 | |||
158 | fmhz = of_getintprop_default(dp, "clock-frequency", 0); | ||
159 | if (fmhz == 0) | ||
160 | fmhz = (!bus_dp) ? 0 : | ||
161 | of_getintprop_default(bus_dp, "clock-frequency", 0); | ||
162 | |||
163 | esp->cfreq = fmhz; | ||
164 | } | ||
165 | |||
166 | static void __devinit esp_get_bursts(struct esp *esp, struct sbus_dev *dma) | ||
167 | { | ||
168 | struct sbus_dev *sdev = esp->dev; | ||
169 | struct device_node *dp = sdev->ofdev.node; | ||
170 | u8 bursts; | ||
171 | |||
172 | bursts = of_getintprop_default(dp, "burst-sizes", 0xff); | ||
173 | if (dma) { | ||
174 | struct device_node *dma_dp = dma->ofdev.node; | ||
175 | u8 val = of_getintprop_default(dma_dp, "burst-sizes", 0xff); | ||
176 | if (val != 0xff) | ||
177 | bursts &= val; | ||
178 | } | ||
179 | |||
180 | if (sdev->bus) { | ||
181 | u8 val = of_getintprop_default(sdev->bus->ofdev.node, | ||
182 | "burst-sizes", 0xff); | ||
183 | if (val != 0xff) | ||
184 | bursts &= val; | ||
185 | } | ||
186 | |||
187 | if (bursts == 0xff || | ||
188 | (bursts & DMA_BURST16) == 0 || | ||
189 | (bursts & DMA_BURST32) == 0) | ||
190 | bursts = (DMA_BURST32 - 1); | ||
191 | |||
192 | esp->bursts = bursts; | ||
193 | } | ||
194 | |||
195 | static void __devinit esp_sbus_get_props(struct esp *esp, struct sbus_dev *espdma) | ||
196 | { | ||
197 | esp_get_scsi_id(esp); | ||
198 | esp_get_differential(esp); | ||
199 | esp_get_clock_params(esp); | ||
200 | esp_get_bursts(esp, espdma); | ||
201 | } | ||
202 | |||
203 | static void sbus_esp_write8(struct esp *esp, u8 val, unsigned long reg) | ||
204 | { | ||
205 | sbus_writeb(val, esp->regs + (reg * 4UL)); | ||
206 | } | ||
207 | |||
208 | static u8 sbus_esp_read8(struct esp *esp, unsigned long reg) | ||
209 | { | ||
210 | return sbus_readb(esp->regs + (reg * 4UL)); | ||
211 | } | ||
212 | |||
213 | static dma_addr_t sbus_esp_map_single(struct esp *esp, void *buf, | ||
214 | size_t sz, int dir) | ||
215 | { | ||
216 | return sbus_map_single(esp->dev, buf, sz, dir); | ||
217 | } | ||
218 | |||
219 | static int sbus_esp_map_sg(struct esp *esp, struct scatterlist *sg, | ||
220 | int num_sg, int dir) | ||
221 | { | ||
222 | return sbus_map_sg(esp->dev, sg, num_sg, dir); | ||
223 | } | ||
224 | |||
225 | static void sbus_esp_unmap_single(struct esp *esp, dma_addr_t addr, | ||
226 | size_t sz, int dir) | ||
227 | { | ||
228 | sbus_unmap_single(esp->dev, addr, sz, dir); | ||
229 | } | ||
230 | |||
231 | static void sbus_esp_unmap_sg(struct esp *esp, struct scatterlist *sg, | ||
232 | int num_sg, int dir) | ||
233 | { | ||
234 | sbus_unmap_sg(esp->dev, sg, num_sg, dir); | ||
235 | } | ||
236 | |||
237 | static int sbus_esp_irq_pending(struct esp *esp) | ||
238 | { | ||
239 | if (dma_read32(DMA_CSR) & (DMA_HNDL_INTR | DMA_HNDL_ERROR)) | ||
240 | return 1; | ||
241 | return 0; | ||
242 | } | ||
243 | |||
244 | static void sbus_esp_reset_dma(struct esp *esp) | ||
245 | { | ||
246 | int can_do_burst16, can_do_burst32, can_do_burst64; | ||
247 | int can_do_sbus64, lim; | ||
248 | u32 val; | ||
249 | |||
250 | can_do_burst16 = (esp->bursts & DMA_BURST16) != 0; | ||
251 | can_do_burst32 = (esp->bursts & DMA_BURST32) != 0; | ||
252 | can_do_burst64 = 0; | ||
253 | can_do_sbus64 = 0; | ||
254 | if (sbus_can_dma_64bit(esp->dev)) | ||
255 | can_do_sbus64 = 1; | ||
256 | if (sbus_can_burst64(esp->sdev)) | ||
257 | can_do_burst64 = (esp->bursts & DMA_BURST64) != 0; | ||
258 | |||
259 | /* Put the DVMA into a known state. */ | ||
260 | if (esp->dma->revision != dvmahme) { | ||
261 | val = dma_read32(DMA_CSR); | ||
262 | dma_write32(val | DMA_RST_SCSI, DMA_CSR); | ||
263 | dma_write32(val & ~DMA_RST_SCSI, DMA_CSR); | ||
264 | } | ||
265 | switch (esp->dma->revision) { | ||
266 | case dvmahme: | ||
267 | dma_write32(DMA_RESET_FAS366, DMA_CSR); | ||
268 | dma_write32(DMA_RST_SCSI, DMA_CSR); | ||
269 | |||
270 | esp->prev_hme_dmacsr = (DMA_PARITY_OFF | DMA_2CLKS | | ||
271 | DMA_SCSI_DISAB | DMA_INT_ENAB); | ||
272 | |||
273 | esp->prev_hme_dmacsr &= ~(DMA_ENABLE | DMA_ST_WRITE | | ||
274 | DMA_BRST_SZ); | ||
275 | |||
276 | if (can_do_burst64) | ||
277 | esp->prev_hme_dmacsr |= DMA_BRST64; | ||
278 | else if (can_do_burst32) | ||
279 | esp->prev_hme_dmacsr |= DMA_BRST32; | ||
280 | |||
281 | if (can_do_sbus64) { | ||
282 | esp->prev_hme_dmacsr |= DMA_SCSI_SBUS64; | ||
283 | sbus_set_sbus64(esp->dev, esp->bursts); | ||
284 | } | ||
285 | |||
286 | lim = 1000; | ||
287 | while (dma_read32(DMA_CSR) & DMA_PEND_READ) { | ||
288 | if (--lim == 0) { | ||
289 | printk(KERN_ALERT PFX "esp%d: DMA_PEND_READ " | ||
290 | "will not clear!\n", | ||
291 | esp->host->unique_id); | ||
292 | break; | ||
293 | } | ||
294 | udelay(1); | ||
295 | } | ||
296 | |||
297 | dma_write32(0, DMA_CSR); | ||
298 | dma_write32(esp->prev_hme_dmacsr, DMA_CSR); | ||
299 | |||
300 | dma_write32(0, DMA_ADDR); | ||
301 | break; | ||
302 | |||
303 | case dvmarev2: | ||
304 | if (esp->rev != ESP100) { | ||
305 | val = dma_read32(DMA_CSR); | ||
306 | dma_write32(val | DMA_3CLKS, DMA_CSR); | ||
307 | } | ||
308 | break; | ||
309 | |||
310 | case dvmarev3: | ||
311 | val = dma_read32(DMA_CSR); | ||
312 | val &= ~DMA_3CLKS; | ||
313 | val |= DMA_2CLKS; | ||
314 | if (can_do_burst32) { | ||
315 | val &= ~DMA_BRST_SZ; | ||
316 | val |= DMA_BRST32; | ||
317 | } | ||
318 | dma_write32(val, DMA_CSR); | ||
319 | break; | ||
320 | |||
321 | case dvmaesc1: | ||
322 | val = dma_read32(DMA_CSR); | ||
323 | val |= DMA_ADD_ENABLE; | ||
324 | val &= ~DMA_BCNT_ENAB; | ||
325 | if (!can_do_burst32 && can_do_burst16) { | ||
326 | val |= DMA_ESC_BURST; | ||
327 | } else { | ||
328 | val &= ~(DMA_ESC_BURST); | ||
329 | } | ||
330 | dma_write32(val, DMA_CSR); | ||
331 | break; | ||
332 | |||
333 | default: | ||
334 | break; | ||
335 | } | ||
336 | |||
337 | /* Enable interrupts. */ | ||
338 | val = dma_read32(DMA_CSR); | ||
339 | dma_write32(val | DMA_INT_ENAB, DMA_CSR); | ||
340 | } | ||
341 | |||
342 | static void sbus_esp_dma_drain(struct esp *esp) | ||
343 | { | ||
344 | u32 csr; | ||
345 | int lim; | ||
346 | |||
347 | if (esp->dma->revision == dvmahme) | ||
348 | return; | ||
349 | |||
350 | csr = dma_read32(DMA_CSR); | ||
351 | if (!(csr & DMA_FIFO_ISDRAIN)) | ||
352 | return; | ||
353 | |||
354 | if (esp->dma->revision != dvmarev3 && esp->dma->revision != dvmaesc1) | ||
355 | dma_write32(csr | DMA_FIFO_STDRAIN, DMA_CSR); | ||
356 | |||
357 | lim = 1000; | ||
358 | while (dma_read32(DMA_CSR) & DMA_FIFO_ISDRAIN) { | ||
359 | if (--lim == 0) { | ||
360 | printk(KERN_ALERT PFX "esp%d: DMA will not drain!\n", | ||
361 | esp->host->unique_id); | ||
362 | break; | ||
363 | } | ||
364 | udelay(1); | ||
365 | } | ||
366 | } | ||
367 | |||
368 | static void sbus_esp_dma_invalidate(struct esp *esp) | ||
369 | { | ||
370 | if (esp->dma->revision == dvmahme) { | ||
371 | dma_write32(DMA_RST_SCSI, DMA_CSR); | ||
372 | |||
373 | esp->prev_hme_dmacsr = ((esp->prev_hme_dmacsr | | ||
374 | (DMA_PARITY_OFF | DMA_2CLKS | | ||
375 | DMA_SCSI_DISAB | DMA_INT_ENAB)) & | ||
376 | ~(DMA_ST_WRITE | DMA_ENABLE)); | ||
377 | |||
378 | dma_write32(0, DMA_CSR); | ||
379 | dma_write32(esp->prev_hme_dmacsr, DMA_CSR); | ||
380 | |||
381 | /* This is necessary to avoid having the SCSI channel | ||
382 | * engine lock up on us. | ||
383 | */ | ||
384 | dma_write32(0, DMA_ADDR); | ||
385 | } else { | ||
386 | u32 val; | ||
387 | int lim; | ||
388 | |||
389 | lim = 1000; | ||
390 | while ((val = dma_read32(DMA_CSR)) & DMA_PEND_READ) { | ||
391 | if (--lim == 0) { | ||
392 | printk(KERN_ALERT PFX "esp%d: DMA will not " | ||
393 | "invalidate!\n", esp->host->unique_id); | ||
394 | break; | ||
395 | } | ||
396 | udelay(1); | ||
397 | } | ||
398 | |||
399 | val &= ~(DMA_ENABLE | DMA_ST_WRITE | DMA_BCNT_ENAB); | ||
400 | val |= DMA_FIFO_INV; | ||
401 | dma_write32(val, DMA_CSR); | ||
402 | val &= ~DMA_FIFO_INV; | ||
403 | dma_write32(val, DMA_CSR); | ||
404 | } | ||
405 | } | ||
406 | |||
407 | static void sbus_esp_send_dma_cmd(struct esp *esp, u32 addr, u32 esp_count, | ||
408 | u32 dma_count, int write, u8 cmd) | ||
409 | { | ||
410 | u32 csr; | ||
411 | |||
412 | BUG_ON(!(cmd & ESP_CMD_DMA)); | ||
413 | |||
414 | sbus_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW); | ||
415 | sbus_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED); | ||
416 | if (esp->rev == FASHME) { | ||
417 | sbus_esp_write8(esp, (esp_count >> 16) & 0xff, FAS_RLO); | ||
418 | sbus_esp_write8(esp, 0, FAS_RHI); | ||
419 | |||
420 | scsi_esp_cmd(esp, cmd); | ||
421 | |||
422 | csr = esp->prev_hme_dmacsr; | ||
423 | csr |= DMA_SCSI_DISAB | DMA_ENABLE; | ||
424 | if (write) | ||
425 | csr |= DMA_ST_WRITE; | ||
426 | else | ||
427 | csr &= ~DMA_ST_WRITE; | ||
428 | esp->prev_hme_dmacsr = csr; | ||
429 | |||
430 | dma_write32(dma_count, DMA_COUNT); | ||
431 | dma_write32(addr, DMA_ADDR); | ||
432 | dma_write32(csr, DMA_CSR); | ||
433 | } else { | ||
434 | csr = dma_read32(DMA_CSR); | ||
435 | csr |= DMA_ENABLE; | ||
436 | if (write) | ||
437 | csr |= DMA_ST_WRITE; | ||
438 | else | ||
439 | csr &= ~DMA_ST_WRITE; | ||
440 | dma_write32(csr, DMA_CSR); | ||
441 | if (esp->dma->revision == dvmaesc1) { | ||
442 | u32 end = PAGE_ALIGN(addr + dma_count + 16U); | ||
443 | dma_write32(end - addr, DMA_COUNT); | ||
444 | } | ||
445 | dma_write32(addr, DMA_ADDR); | ||
446 | |||
447 | scsi_esp_cmd(esp, cmd); | ||
448 | } | ||
449 | |||
450 | } | ||
451 | |||
452 | static int sbus_esp_dma_error(struct esp *esp) | ||
453 | { | ||
454 | u32 csr = dma_read32(DMA_CSR); | ||
455 | |||
456 | if (csr & DMA_HNDL_ERROR) | ||
457 | return 1; | ||
458 | |||
459 | return 0; | ||
460 | } | ||
461 | |||
462 | static const struct esp_driver_ops sbus_esp_ops = { | ||
463 | .esp_write8 = sbus_esp_write8, | ||
464 | .esp_read8 = sbus_esp_read8, | ||
465 | .map_single = sbus_esp_map_single, | ||
466 | .map_sg = sbus_esp_map_sg, | ||
467 | .unmap_single = sbus_esp_unmap_single, | ||
468 | .unmap_sg = sbus_esp_unmap_sg, | ||
469 | .irq_pending = sbus_esp_irq_pending, | ||
470 | .reset_dma = sbus_esp_reset_dma, | ||
471 | .dma_drain = sbus_esp_dma_drain, | ||
472 | .dma_invalidate = sbus_esp_dma_invalidate, | ||
473 | .send_dma_cmd = sbus_esp_send_dma_cmd, | ||
474 | .dma_error = sbus_esp_dma_error, | ||
475 | }; | ||
476 | |||
477 | static int __devinit esp_sbus_probe_one(struct device *dev, | ||
478 | struct sbus_dev *esp_dev, | ||
479 | struct sbus_dev *espdma, | ||
480 | struct sbus_bus *sbus, | ||
481 | int hme) | ||
482 | { | ||
483 | struct scsi_host_template *tpnt = &scsi_esp_template; | ||
484 | struct Scsi_Host *host; | ||
485 | struct esp *esp; | ||
486 | int err; | ||
487 | |||
488 | host = scsi_host_alloc(tpnt, sizeof(struct esp)); | ||
489 | |||
490 | err = -ENOMEM; | ||
491 | if (!host) | ||
492 | goto fail; | ||
493 | |||
494 | host->max_id = (hme ? 16 : 8); | ||
495 | esp = host_to_esp(host); | ||
496 | |||
497 | esp->host = host; | ||
498 | esp->dev = esp_dev; | ||
499 | esp->ops = &sbus_esp_ops; | ||
500 | |||
501 | if (hme) | ||
502 | esp->flags |= ESP_FLAG_WIDE_CAPABLE; | ||
503 | |||
504 | err = esp_sbus_find_dma(esp, espdma); | ||
505 | if (err < 0) | ||
506 | goto fail_unlink; | ||
507 | |||
508 | err = esp_sbus_map_regs(esp, hme); | ||
509 | if (err < 0) | ||
510 | goto fail_unlink; | ||
511 | |||
512 | err = esp_sbus_map_command_block(esp); | ||
513 | if (err < 0) | ||
514 | goto fail_unmap_regs; | ||
515 | |||
516 | err = esp_sbus_register_irq(esp); | ||
517 | if (err < 0) | ||
518 | goto fail_unmap_command_block; | ||
519 | |||
520 | esp_sbus_get_props(esp, espdma); | ||
521 | |||
522 | /* Before we try to touch the ESP chip, ESC1 dma can | ||
523 | * come up with the reset bit set, so make sure that | ||
524 | * is clear first. | ||
525 | */ | ||
526 | if (esp->dma->revision == dvmaesc1) { | ||
527 | u32 val = dma_read32(DMA_CSR); | ||
528 | |||
529 | dma_write32(val & ~DMA_RST_SCSI, DMA_CSR); | ||
530 | } | ||
531 | |||
532 | dev_set_drvdata(&esp_dev->ofdev.dev, esp); | ||
533 | |||
534 | err = scsi_esp_register(esp, dev); | ||
535 | if (err) | ||
536 | goto fail_free_irq; | ||
537 | |||
538 | return 0; | ||
539 | |||
540 | fail_free_irq: | ||
541 | free_irq(host->irq, esp); | ||
542 | fail_unmap_command_block: | ||
543 | sbus_free_consistent(esp->dev, 16, | ||
544 | esp->command_block, | ||
545 | esp->command_block_dma); | ||
546 | fail_unmap_regs: | ||
547 | sbus_iounmap(esp->regs, SBUS_ESP_REG_SIZE); | ||
548 | fail_unlink: | ||
549 | scsi_host_put(host); | ||
550 | fail: | ||
551 | return err; | ||
552 | } | ||
553 | |||
554 | static int __devinit esp_sbus_probe(struct of_device *dev, const struct of_device_id *match) | ||
555 | { | ||
556 | struct sbus_dev *sdev = to_sbus_device(&dev->dev); | ||
557 | struct device_node *dp = dev->node; | ||
558 | struct sbus_dev *dma_sdev = NULL; | ||
559 | int hme = 0; | ||
560 | |||
561 | if (dp->parent && | ||
562 | (!strcmp(dp->parent->name, "espdma") || | ||
563 | !strcmp(dp->parent->name, "dma"))) | ||
564 | dma_sdev = sdev->parent; | ||
565 | else if (!strcmp(dp->name, "SUNW,fas")) { | ||
566 | dma_sdev = sdev; | ||
567 | hme = 1; | ||
568 | } | ||
569 | |||
570 | return esp_sbus_probe_one(&dev->dev, sdev, dma_sdev, | ||
571 | sdev->bus, hme); | ||
572 | } | ||
573 | |||
574 | static int __devexit esp_sbus_remove(struct of_device *dev) | ||
575 | { | ||
576 | struct esp *esp = dev_get_drvdata(&dev->dev); | ||
577 | unsigned int irq = esp->host->irq; | ||
578 | u32 val; | ||
579 | |||
580 | scsi_esp_unregister(esp); | ||
581 | |||
582 | /* Disable interrupts. */ | ||
583 | val = dma_read32(DMA_CSR); | ||
584 | dma_write32(val & ~DMA_INT_ENAB, DMA_CSR); | ||
585 | |||
586 | free_irq(irq, esp); | ||
587 | sbus_free_consistent(esp->dev, 16, | ||
588 | esp->command_block, | ||
589 | esp->command_block_dma); | ||
590 | sbus_iounmap(esp->regs, SBUS_ESP_REG_SIZE); | ||
591 | |||
592 | scsi_host_put(esp->host); | ||
593 | |||
594 | return 0; | ||
595 | } | ||
596 | |||
597 | static struct of_device_id esp_match[] = { | ||
598 | { | ||
599 | .name = "SUNW,esp", | ||
600 | }, | ||
601 | { | ||
602 | .name = "SUNW,fas", | ||
603 | }, | ||
604 | { | ||
605 | .name = "esp", | ||
606 | }, | ||
607 | {}, | ||
608 | }; | ||
609 | MODULE_DEVICE_TABLE(of, esp_match); | ||
610 | |||
611 | static struct of_platform_driver esp_sbus_driver = { | ||
612 | .name = "esp", | ||
613 | .match_table = esp_match, | ||
614 | .probe = esp_sbus_probe, | ||
615 | .remove = __devexit_p(esp_sbus_remove), | ||
616 | }; | ||
617 | |||
618 | static int __init sunesp_init(void) | ||
619 | { | ||
620 | return of_register_driver(&esp_sbus_driver, &sbus_bus_type); | ||
621 | } | ||
622 | |||
623 | static void __exit sunesp_exit(void) | ||
624 | { | ||
625 | of_unregister_driver(&esp_sbus_driver); | ||
626 | } | ||
627 | |||
628 | MODULE_DESCRIPTION("Sun ESP SCSI driver"); | ||
629 | MODULE_AUTHOR("David S. Miller (davem@davemloft.net)"); | ||
630 | MODULE_LICENSE("GPL"); | ||
631 | MODULE_VERSION(DRV_VERSION); | ||
632 | |||
633 | module_init(sunesp_init); | ||
634 | module_exit(sunesp_exit); | ||
diff --git a/drivers/serial/sunsu.c b/drivers/serial/sunsu.c index 96a852aa1903..bfd44177a215 100644 --- a/drivers/serial/sunsu.c +++ b/drivers/serial/sunsu.c | |||
@@ -1387,8 +1387,8 @@ static enum su_type __devinit su_get_type(struct device_node *dp) | |||
1387 | struct device_node *ap = of_find_node_by_path("/aliases"); | 1387 | struct device_node *ap = of_find_node_by_path("/aliases"); |
1388 | 1388 | ||
1389 | if (ap) { | 1389 | if (ap) { |
1390 | char *keyb = of_get_property(ap, "keyboard", NULL); | 1390 | const char *keyb = of_get_property(ap, "keyboard", NULL); |
1391 | char *ms = of_get_property(ap, "mouse", NULL); | 1391 | const char *ms = of_get_property(ap, "mouse", NULL); |
1392 | 1392 | ||
1393 | if (keyb) { | 1393 | if (keyb) { |
1394 | if (dp == of_find_node_by_path(keyb)) | 1394 | if (dp == of_find_node_by_path(keyb)) |
diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c index ec63b0ee0743..d3e2c5f90a26 100644 --- a/drivers/usb/atm/usbatm.c +++ b/drivers/usb/atm/usbatm.c | |||
@@ -343,7 +343,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char | |||
343 | UDSL_ASSERT(sarb->tail + ATM_CELL_PAYLOAD <= sarb->end); | 343 | UDSL_ASSERT(sarb->tail + ATM_CELL_PAYLOAD <= sarb->end); |
344 | } | 344 | } |
345 | 345 | ||
346 | memcpy(sarb->tail, source + ATM_CELL_HEADER, ATM_CELL_PAYLOAD); | 346 | memcpy(skb_tail_pointer(sarb), source + ATM_CELL_HEADER, ATM_CELL_PAYLOAD); |
347 | __skb_put(sarb, ATM_CELL_PAYLOAD); | 347 | __skb_put(sarb, ATM_CELL_PAYLOAD); |
348 | 348 | ||
349 | if (pti & 1) { | 349 | if (pti & 1) { |
@@ -370,7 +370,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char | |||
370 | goto out; | 370 | goto out; |
371 | } | 371 | } |
372 | 372 | ||
373 | if (crc32_be(~0, sarb->tail - pdu_length, pdu_length) != 0xc704dd7b) { | 373 | if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) { |
374 | atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n", | 374 | atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n", |
375 | __func__, vcc); | 375 | __func__, vcc); |
376 | atomic_inc(&vcc->stats->rx_err); | 376 | atomic_inc(&vcc->stats->rx_err); |
@@ -396,7 +396,9 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char | |||
396 | goto out; /* atm_charge increments rx_drop */ | 396 | goto out; /* atm_charge increments rx_drop */ |
397 | } | 397 | } |
398 | 398 | ||
399 | memcpy(skb->data, sarb->tail - pdu_length, length); | 399 | skb_copy_to_linear_data(skb, |
400 | skb_tail_pointer(sarb) - pdu_length, | ||
401 | length); | ||
400 | __skb_put(skb, length); | 402 | __skb_put(skb, length); |
401 | 403 | ||
402 | vdbg("%s: sending skb 0x%p, skb->len %u, skb->truesize %u", | 404 | vdbg("%s: sending skb 0x%p, skb->len %u, skb->truesize %u", |
@@ -484,7 +486,7 @@ static unsigned int usbatm_write_cells(struct usbatm_data *instance, | |||
484 | ptr[4] = 0xec; | 486 | ptr[4] = 0xec; |
485 | ptr += ATM_CELL_HEADER; | 487 | ptr += ATM_CELL_HEADER; |
486 | 488 | ||
487 | memcpy(ptr, skb->data, data_len); | 489 | skb_copy_from_linear_data(skb, ptr, data_len); |
488 | ptr += data_len; | 490 | ptr += data_len; |
489 | __skb_pull(skb, data_len); | 491 | __skb_pull(skb, data_len); |
490 | 492 | ||
diff --git a/drivers/usb/gadget/ether.c b/drivers/usb/gadget/ether.c index 04e6b8508fb6..8f9f217e0a68 100644 --- a/drivers/usb/gadget/ether.c +++ b/drivers/usb/gadget/ether.c | |||
@@ -1766,7 +1766,6 @@ static void rx_complete (struct usb_ep *ep, struct usb_request *req) | |||
1766 | break; | 1766 | break; |
1767 | } | 1767 | } |
1768 | 1768 | ||
1769 | skb->dev = dev->net; | ||
1770 | skb->protocol = eth_type_trans (skb, dev->net); | 1769 | skb->protocol = eth_type_trans (skb, dev->net); |
1771 | dev->stats.rx_packets++; | 1770 | dev->stats.rx_packets++; |
1772 | dev->stats.rx_bytes += skb->len; | 1771 | dev->stats.rx_bytes += skb->len; |
diff --git a/drivers/usb/net/asix.c b/drivers/usb/net/asix.c index 5808ea082459..d5ef97bc4d01 100644 --- a/drivers/usb/net/asix.c +++ b/drivers/usb/net/asix.c | |||
@@ -298,7 +298,7 @@ static int asix_rx_fixup(struct usbnet *dev, struct sk_buff *skb) | |||
298 | if (ax_skb) { | 298 | if (ax_skb) { |
299 | ax_skb->len = size; | 299 | ax_skb->len = size; |
300 | ax_skb->data = packet; | 300 | ax_skb->data = packet; |
301 | ax_skb->tail = packet + size; | 301 | skb_set_tail_pointer(ax_skb, size); |
302 | usbnet_skb_return(dev, ax_skb); | 302 | usbnet_skb_return(dev, ax_skb); |
303 | } else { | 303 | } else { |
304 | return 0; | 304 | return 0; |
@@ -338,7 +338,7 @@ static struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb, | |||
338 | && ((headroom + tailroom) >= (4 + padlen))) { | 338 | && ((headroom + tailroom) >= (4 + padlen))) { |
339 | if ((headroom < 4) || (tailroom < padlen)) { | 339 | if ((headroom < 4) || (tailroom < padlen)) { |
340 | skb->data = memmove(skb->head + 4, skb->data, skb->len); | 340 | skb->data = memmove(skb->head + 4, skb->data, skb->len); |
341 | skb->tail = skb->data + skb->len; | 341 | skb_set_tail_pointer(skb, skb->len); |
342 | } | 342 | } |
343 | } else { | 343 | } else { |
344 | struct sk_buff *skb2; | 344 | struct sk_buff *skb2; |
@@ -352,11 +352,11 @@ static struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb, | |||
352 | skb_push(skb, 4); | 352 | skb_push(skb, 4); |
353 | packet_len = (((skb->len - 4) ^ 0x0000ffff) << 16) + (skb->len - 4); | 353 | packet_len = (((skb->len - 4) ^ 0x0000ffff) << 16) + (skb->len - 4); |
354 | cpu_to_le32s(&packet_len); | 354 | cpu_to_le32s(&packet_len); |
355 | memcpy(skb->data, &packet_len, sizeof(packet_len)); | 355 | skb_copy_to_linear_data(skb, &packet_len, sizeof(packet_len)); |
356 | 356 | ||
357 | if ((skb->len % 512) == 0) { | 357 | if ((skb->len % 512) == 0) { |
358 | cpu_to_le32s(&padbytes); | 358 | cpu_to_le32s(&padbytes); |
359 | memcpy( skb->tail, &padbytes, sizeof(padbytes)); | 359 | memcpy(skb_tail_pointer(skb), &padbytes, sizeof(padbytes)); |
360 | skb_put(skb, sizeof(padbytes)); | 360 | skb_put(skb, sizeof(padbytes)); |
361 | } | 361 | } |
362 | return skb; | 362 | return skb; |
diff --git a/drivers/usb/net/catc.c b/drivers/usb/net/catc.c index 4852012735f6..ffec2e01b896 100644 --- a/drivers/usb/net/catc.c +++ b/drivers/usb/net/catc.c | |||
@@ -255,7 +255,6 @@ static void catc_rx_done(struct urb *urb) | |||
255 | if (!(skb = dev_alloc_skb(pkt_len))) | 255 | if (!(skb = dev_alloc_skb(pkt_len))) |
256 | return; | 256 | return; |
257 | 257 | ||
258 | skb->dev = catc->netdev; | ||
259 | eth_copy_and_sum(skb, pkt_start + pkt_offset, pkt_len, 0); | 258 | eth_copy_and_sum(skb, pkt_start + pkt_offset, pkt_len, 0); |
260 | skb_put(skb, pkt_len); | 259 | skb_put(skb, pkt_len); |
261 | 260 | ||
@@ -419,7 +418,7 @@ static int catc_hard_start_xmit(struct sk_buff *skb, struct net_device *netdev) | |||
419 | catc->tx_ptr = (((catc->tx_ptr - 1) >> 6) + 1) << 6; | 418 | catc->tx_ptr = (((catc->tx_ptr - 1) >> 6) + 1) << 6; |
420 | tx_buf = catc->tx_buf[catc->tx_idx] + catc->tx_ptr; | 419 | tx_buf = catc->tx_buf[catc->tx_idx] + catc->tx_ptr; |
421 | *((u16*)tx_buf) = (catc->is_f5u011) ? cpu_to_be16((u16)skb->len) : cpu_to_le16((u16)skb->len); | 420 | *((u16*)tx_buf) = (catc->is_f5u011) ? cpu_to_be16((u16)skb->len) : cpu_to_le16((u16)skb->len); |
422 | memcpy(tx_buf + 2, skb->data, skb->len); | 421 | skb_copy_from_linear_data(skb, tx_buf + 2, skb->len); |
423 | catc->tx_ptr += skb->len + 2; | 422 | catc->tx_ptr += skb->len + 2; |
424 | 423 | ||
425 | if (!test_and_set_bit(TX_RUNNING, &catc->flags)) | 424 | if (!test_and_set_bit(TX_RUNNING, &catc->flags)) |
diff --git a/drivers/usb/net/gl620a.c b/drivers/usb/net/gl620a.c index d257a8e026d6..031cf5ca4dbb 100644 --- a/drivers/usb/net/gl620a.c +++ b/drivers/usb/net/gl620a.c | |||
@@ -157,7 +157,7 @@ genelink_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags) | |||
157 | if ((headroom < (4 + 4*1)) || (tailroom < padlen)) { | 157 | if ((headroom < (4 + 4*1)) || (tailroom < padlen)) { |
158 | skb->data = memmove(skb->head + (4 + 4*1), | 158 | skb->data = memmove(skb->head + (4 + 4*1), |
159 | skb->data, skb->len); | 159 | skb->data, skb->len); |
160 | skb->tail = skb->data + skb->len; | 160 | skb_set_tail_pointer(skb, skb->len); |
161 | } | 161 | } |
162 | } else { | 162 | } else { |
163 | struct sk_buff *skb2; | 163 | struct sk_buff *skb2; |
diff --git a/drivers/usb/net/kaweth.c b/drivers/usb/net/kaweth.c index de95268ae4b8..a0cc05d21a6a 100644 --- a/drivers/usb/net/kaweth.c +++ b/drivers/usb/net/kaweth.c | |||
@@ -636,8 +636,6 @@ static void kaweth_usb_receive(struct urb *urb) | |||
636 | 636 | ||
637 | skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ | 637 | skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ |
638 | 638 | ||
639 | skb->dev = net; | ||
640 | |||
641 | eth_copy_and_sum(skb, kaweth->rx_buf + 2, pkt_len, 0); | 639 | eth_copy_and_sum(skb, kaweth->rx_buf + 2, pkt_len, 0); |
642 | 640 | ||
643 | skb_put(skb, pkt_len); | 641 | skb_put(skb, pkt_len); |
diff --git a/drivers/usb/net/net1080.c b/drivers/usb/net/net1080.c index ccebfdef4751..19bf8dae70c9 100644 --- a/drivers/usb/net/net1080.c +++ b/drivers/usb/net/net1080.c | |||
@@ -520,7 +520,7 @@ net1080_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags) | |||
520 | skb->data = memmove(skb->head | 520 | skb->data = memmove(skb->head |
521 | + sizeof (struct nc_header), | 521 | + sizeof (struct nc_header), |
522 | skb->data, skb->len); | 522 | skb->data, skb->len); |
523 | skb->tail = skb->data + len; | 523 | skb_set_tail_pointer(skb, len); |
524 | goto encapsulate; | 524 | goto encapsulate; |
525 | } | 525 | } |
526 | } | 526 | } |
diff --git a/drivers/usb/net/pegasus.c b/drivers/usb/net/pegasus.c index 6d12961cf9f9..1ad4ee54b186 100644 --- a/drivers/usb/net/pegasus.c +++ b/drivers/usb/net/pegasus.c | |||
@@ -575,7 +575,6 @@ static void fill_skb_pool(pegasus_t * pegasus) | |||
575 | */ | 575 | */ |
576 | if (pegasus->rx_pool[i] == NULL) | 576 | if (pegasus->rx_pool[i] == NULL) |
577 | return; | 577 | return; |
578 | pegasus->rx_pool[i]->dev = pegasus->net; | ||
579 | skb_reserve(pegasus->rx_pool[i], 2); | 578 | skb_reserve(pegasus->rx_pool[i], 2); |
580 | } | 579 | } |
581 | } | 580 | } |
@@ -890,7 +889,7 @@ static int pegasus_start_xmit(struct sk_buff *skb, struct net_device *net) | |||
890 | netif_stop_queue(net); | 889 | netif_stop_queue(net); |
891 | 890 | ||
892 | ((__le16 *) pegasus->tx_buff)[0] = cpu_to_le16(l16); | 891 | ((__le16 *) pegasus->tx_buff)[0] = cpu_to_le16(l16); |
893 | memcpy(pegasus->tx_buff + 2, skb->data, skb->len); | 892 | skb_copy_from_linear_data(skb, pegasus->tx_buff + 2, skb->len); |
894 | usb_fill_bulk_urb(pegasus->tx_urb, pegasus->usb, | 893 | usb_fill_bulk_urb(pegasus->tx_urb, pegasus->usb, |
895 | usb_sndbulkpipe(pegasus->usb, 2), | 894 | usb_sndbulkpipe(pegasus->usb, 2), |
896 | pegasus->tx_buff, count, | 895 | pegasus->tx_buff, count, |
@@ -1415,8 +1414,10 @@ static void pegasus_disconnect(struct usb_interface *intf) | |||
1415 | unlink_all_urbs(pegasus); | 1414 | unlink_all_urbs(pegasus); |
1416 | free_all_urbs(pegasus); | 1415 | free_all_urbs(pegasus); |
1417 | free_skb_pool(pegasus); | 1416 | free_skb_pool(pegasus); |
1418 | if (pegasus->rx_skb) | 1417 | if (pegasus->rx_skb != NULL) { |
1419 | dev_kfree_skb(pegasus->rx_skb); | 1418 | dev_kfree_skb(pegasus->rx_skb); |
1419 | pegasus->rx_skb = NULL; | ||
1420 | } | ||
1420 | free_netdev(pegasus->net); | 1421 | free_netdev(pegasus->net); |
1421 | } | 1422 | } |
1422 | 1423 | ||
diff --git a/drivers/usb/net/rndis_host.c b/drivers/usb/net/rndis_host.c index 39a21c74fdf4..1d36772ba6e1 100644 --- a/drivers/usb/net/rndis_host.c +++ b/drivers/usb/net/rndis_host.c | |||
@@ -588,7 +588,7 @@ rndis_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags) | |||
588 | if (likely((sizeof *hdr) <= room)) { | 588 | if (likely((sizeof *hdr) <= room)) { |
589 | skb->data = memmove(skb->head + sizeof *hdr, | 589 | skb->data = memmove(skb->head + sizeof *hdr, |
590 | skb->data, len); | 590 | skb->data, len); |
591 | skb->tail = skb->data + len; | 591 | skb_set_tail_pointer(skb, len); |
592 | goto fill; | 592 | goto fill; |
593 | } | 593 | } |
594 | } | 594 | } |
diff --git a/drivers/usb/net/rtl8150.c b/drivers/usb/net/rtl8150.c index ea153dc9b0ac..fa598f0340cf 100644 --- a/drivers/usb/net/rtl8150.c +++ b/drivers/usb/net/rtl8150.c | |||
@@ -646,7 +646,6 @@ static void fill_skb_pool(rtl8150_t *dev) | |||
646 | if (!skb) { | 646 | if (!skb) { |
647 | return; | 647 | return; |
648 | } | 648 | } |
649 | skb->dev = dev->netdev; | ||
650 | skb_reserve(skb, 2); | 649 | skb_reserve(skb, 2); |
651 | dev->rx_skb_pool[i] = skb; | 650 | dev->rx_skb_pool[i] = skb; |
652 | } | 651 | } |
diff --git a/drivers/usb/net/usbnet.c b/drivers/usb/net/usbnet.c index de69b183bd2f..0c5465a7909b 100644 --- a/drivers/usb/net/usbnet.c +++ b/drivers/usb/net/usbnet.c | |||
@@ -203,7 +203,6 @@ void usbnet_skb_return (struct usbnet *dev, struct sk_buff *skb) | |||
203 | { | 203 | { |
204 | int status; | 204 | int status; |
205 | 205 | ||
206 | skb->dev = dev->net; | ||
207 | skb->protocol = eth_type_trans (skb, dev->net); | 206 | skb->protocol = eth_type_trans (skb, dev->net); |
208 | dev->stats.rx_packets++; | 207 | dev->stats.rx_packets++; |
209 | dev->stats.rx_bytes += skb->len; | 208 | dev->stats.rx_bytes += skb->len; |
diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c index d7627fc4f11e..8514f2a6f060 100644 --- a/drivers/video/aty/atyfb_base.c +++ b/drivers/video/aty/atyfb_base.c | |||
@@ -2899,7 +2899,7 @@ static int __devinit atyfb_setup_sparc(struct pci_dev *pdev, | |||
2899 | struct fb_info *info, unsigned long addr) | 2899 | struct fb_info *info, unsigned long addr) |
2900 | { | 2900 | { |
2901 | struct atyfb_par *par = info->par; | 2901 | struct atyfb_par *par = info->par; |
2902 | struct pcidev_cookie *pcp; | 2902 | struct device_node *dp; |
2903 | char prop[128]; | 2903 | char prop[128]; |
2904 | int node, len, i, j, ret; | 2904 | int node, len, i, j, ret; |
2905 | u32 mem, chip_id; | 2905 | u32 mem, chip_id; |
@@ -3037,8 +3037,8 @@ static int __devinit atyfb_setup_sparc(struct pci_dev *pdev, | |||
3037 | node = 0; | 3037 | node = 0; |
3038 | } | 3038 | } |
3039 | 3039 | ||
3040 | pcp = pdev->sysdata; | 3040 | dp = pci_device_to_OF_node(pdev); |
3041 | if (node == pcp->prom_node->node) { | 3041 | if (node == dp->node) { |
3042 | struct fb_var_screeninfo *var = &default_var; | 3042 | struct fb_var_screeninfo *var = &default_var; |
3043 | unsigned int N, P, Q, M, T, R; | 3043 | unsigned int N, P, Q, M, T, R; |
3044 | u32 v_total, h_total; | 3044 | u32 v_total, h_total; |
diff --git a/drivers/video/aty/radeon_base.c b/drivers/video/aty/radeon_base.c index 1bf6f42eb400..a4b3fd185de7 100644 --- a/drivers/video/aty/radeon_base.c +++ b/drivers/video/aty/radeon_base.c | |||
@@ -410,7 +410,7 @@ static int __devinit radeon_find_mem_vbios(struct radeonfb_info *rinfo) | |||
410 | } | 410 | } |
411 | #endif | 411 | #endif |
412 | 412 | ||
413 | #ifdef CONFIG_PPC_OF | 413 | #if defined(CONFIG_PPC_OF) || defined(CONFIG_SPARC) |
414 | /* | 414 | /* |
415 | * Read XTAL (ref clock), SCLK and MCLK from Open Firmware device | 415 | * Read XTAL (ref clock), SCLK and MCLK from Open Firmware device |
416 | * tree. Hopefully, ATI OF driver is kind enough to fill these | 416 | * tree. Hopefully, ATI OF driver is kind enough to fill these |
@@ -440,7 +440,7 @@ static int __devinit radeon_read_xtal_OF (struct radeonfb_info *rinfo) | |||
440 | 440 | ||
441 | return 0; | 441 | return 0; |
442 | } | 442 | } |
443 | #endif /* CONFIG_PPC_OF */ | 443 | #endif /* CONFIG_PPC_OF || CONFIG_SPARC */ |
444 | 444 | ||
445 | /* | 445 | /* |
446 | * Read PLL infos from chip registers | 446 | * Read PLL infos from chip registers |
@@ -645,7 +645,7 @@ static void __devinit radeon_get_pllinfo(struct radeonfb_info *rinfo) | |||
645 | rinfo->pll.ref_div = INPLL(PPLL_REF_DIV) & PPLL_REF_DIV_MASK; | 645 | rinfo->pll.ref_div = INPLL(PPLL_REF_DIV) & PPLL_REF_DIV_MASK; |
646 | 646 | ||
647 | 647 | ||
648 | #ifdef CONFIG_PPC_OF | 648 | #if defined(CONFIG_PPC_OF) || defined(CONFIG_SPARC) |
649 | /* | 649 | /* |
650 | * Retrieve PLL infos from Open Firmware first | 650 | * Retrieve PLL infos from Open Firmware first |
651 | */ | 651 | */ |
@@ -653,7 +653,7 @@ static void __devinit radeon_get_pllinfo(struct radeonfb_info *rinfo) | |||
653 | printk(KERN_INFO "radeonfb: Retrieved PLL infos from Open Firmware\n"); | 653 | printk(KERN_INFO "radeonfb: Retrieved PLL infos from Open Firmware\n"); |
654 | goto found; | 654 | goto found; |
655 | } | 655 | } |
656 | #endif /* CONFIG_PPC_OF */ | 656 | #endif /* CONFIG_PPC_OF || CONFIG_SPARC */ |
657 | 657 | ||
658 | /* | 658 | /* |
659 | * Check out if we have an X86 which gave us some PLL informations | 659 | * Check out if we have an X86 which gave us some PLL informations |
@@ -2231,7 +2231,7 @@ static int __devinit radeonfb_pci_register (struct pci_dev *pdev, | |||
2231 | rinfo->family == CHIP_FAMILY_RS200) | 2231 | rinfo->family == CHIP_FAMILY_RS200) |
2232 | rinfo->errata |= CHIP_ERRATA_PLL_DELAY; | 2232 | rinfo->errata |= CHIP_ERRATA_PLL_DELAY; |
2233 | 2233 | ||
2234 | #ifdef CONFIG_PPC_OF | 2234 | #if defined(CONFIG_PPC_OF) || defined(CONFIG_SPARC) |
2235 | /* On PPC, we obtain the OF device-node pointer to the firmware | 2235 | /* On PPC, we obtain the OF device-node pointer to the firmware |
2236 | * data for this chip | 2236 | * data for this chip |
2237 | */ | 2237 | */ |
@@ -2240,6 +2240,8 @@ static int __devinit radeonfb_pci_register (struct pci_dev *pdev, | |||
2240 | printk(KERN_WARNING "radeonfb (%s): Cannot match card to OF node !\n", | 2240 | printk(KERN_WARNING "radeonfb (%s): Cannot match card to OF node !\n", |
2241 | pci_name(rinfo->pdev)); | 2241 | pci_name(rinfo->pdev)); |
2242 | 2242 | ||
2243 | #endif /* CONFIG_PPC_OF || CONFIG_SPARC */ | ||
2244 | #ifdef CONFIG_PPC_OF | ||
2243 | /* On PPC, the firmware sets up a memory mapping that tends | 2245 | /* On PPC, the firmware sets up a memory mapping that tends |
2244 | * to cause lockups when enabling the engine. We reconfigure | 2246 | * to cause lockups when enabling the engine. We reconfigure |
2245 | * the card internal memory mappings properly | 2247 | * the card internal memory mappings properly |
diff --git a/drivers/video/aty/radeon_monitor.c b/drivers/video/aty/radeon_monitor.c index 38c7dbf8c151..737b5c09dbdb 100644 --- a/drivers/video/aty/radeon_monitor.c +++ b/drivers/video/aty/radeon_monitor.c | |||
@@ -52,7 +52,7 @@ static char *radeon_get_mon_name(int type) | |||
52 | } | 52 | } |
53 | 53 | ||
54 | 54 | ||
55 | #ifdef CONFIG_PPC_OF | 55 | #if defined(CONFIG_PPC_OF) || defined(CONFIG_SPARC) |
56 | /* | 56 | /* |
57 | * Try to find monitor informations & EDID data out of the Open Firmware | 57 | * Try to find monitor informations & EDID data out of the Open Firmware |
58 | * device-tree. This also contains some "hacks" to work around a few machine | 58 | * device-tree. This also contains some "hacks" to work around a few machine |
@@ -156,7 +156,7 @@ static int __devinit radeon_probe_OF_head(struct radeonfb_info *rinfo, int head_ | |||
156 | } | 156 | } |
157 | return MT_NONE; | 157 | return MT_NONE; |
158 | } | 158 | } |
159 | #endif /* CONFIG_PPC_OF */ | 159 | #endif /* CONFIG_PPC_OF || CONFIG_SPARC */ |
160 | 160 | ||
161 | 161 | ||
162 | static int __devinit radeon_get_panel_info_BIOS(struct radeonfb_info *rinfo) | 162 | static int __devinit radeon_get_panel_info_BIOS(struct radeonfb_info *rinfo) |
@@ -495,11 +495,11 @@ void __devinit radeon_probe_screens(struct radeonfb_info *rinfo, | |||
495 | * Old single head cards | 495 | * Old single head cards |
496 | */ | 496 | */ |
497 | if (!rinfo->has_CRTC2) { | 497 | if (!rinfo->has_CRTC2) { |
498 | #ifdef CONFIG_PPC_OF | 498 | #if defined(CONFIG_PPC_OF) || defined(CONFIG_SPARC) |
499 | if (rinfo->mon1_type == MT_NONE) | 499 | if (rinfo->mon1_type == MT_NONE) |
500 | rinfo->mon1_type = radeon_probe_OF_head(rinfo, 0, | 500 | rinfo->mon1_type = radeon_probe_OF_head(rinfo, 0, |
501 | &rinfo->mon1_EDID); | 501 | &rinfo->mon1_EDID); |
502 | #endif /* CONFIG_PPC_OF */ | 502 | #endif /* CONFIG_PPC_OF || CONFIG_SPARC */ |
503 | #ifdef CONFIG_FB_RADEON_I2C | 503 | #ifdef CONFIG_FB_RADEON_I2C |
504 | if (rinfo->mon1_type == MT_NONE) | 504 | if (rinfo->mon1_type == MT_NONE) |
505 | rinfo->mon1_type = | 505 | rinfo->mon1_type = |
@@ -544,11 +544,11 @@ void __devinit radeon_probe_screens(struct radeonfb_info *rinfo, | |||
544 | /* | 544 | /* |
545 | * Probe primary head (DVI or laptop internal panel) | 545 | * Probe primary head (DVI or laptop internal panel) |
546 | */ | 546 | */ |
547 | #ifdef CONFIG_PPC_OF | 547 | #if defined(CONFIG_PPC_OF) || defined(CONFIG_SPARC) |
548 | if (rinfo->mon1_type == MT_NONE) | 548 | if (rinfo->mon1_type == MT_NONE) |
549 | rinfo->mon1_type = radeon_probe_OF_head(rinfo, 0, | 549 | rinfo->mon1_type = radeon_probe_OF_head(rinfo, 0, |
550 | &rinfo->mon1_EDID); | 550 | &rinfo->mon1_EDID); |
551 | #endif /* CONFIG_PPC_OF */ | 551 | #endif /* CONFIG_PPC_OF || CONFIG_SPARC */ |
552 | #ifdef CONFIG_FB_RADEON_I2C | 552 | #ifdef CONFIG_FB_RADEON_I2C |
553 | if (rinfo->mon1_type == MT_NONE) | 553 | if (rinfo->mon1_type == MT_NONE) |
554 | rinfo->mon1_type = radeon_probe_i2c_connector(rinfo, ddc_dvi, | 554 | rinfo->mon1_type = radeon_probe_i2c_connector(rinfo, ddc_dvi, |
@@ -572,11 +572,11 @@ void __devinit radeon_probe_screens(struct radeonfb_info *rinfo, | |||
572 | /* | 572 | /* |
573 | * Probe secondary head (mostly VGA, can be DVI) | 573 | * Probe secondary head (mostly VGA, can be DVI) |
574 | */ | 574 | */ |
575 | #ifdef CONFIG_PPC_OF | 575 | #if defined(CONFIG_PPC_OF) || defined(CONFIG_SPARC) |
576 | if (rinfo->mon2_type == MT_NONE) | 576 | if (rinfo->mon2_type == MT_NONE) |
577 | rinfo->mon2_type = radeon_probe_OF_head(rinfo, 1, | 577 | rinfo->mon2_type = radeon_probe_OF_head(rinfo, 1, |
578 | &rinfo->mon2_EDID); | 578 | &rinfo->mon2_EDID); |
579 | #endif /* CONFIG_PPC_OF */ | 579 | #endif /* CONFIG_PPC_OF || defined(CONFIG_SPARC) */ |
580 | #ifdef CONFIG_FB_RADEON_I2C | 580 | #ifdef CONFIG_FB_RADEON_I2C |
581 | if (rinfo->mon2_type == MT_NONE) | 581 | if (rinfo->mon2_type == MT_NONE) |
582 | rinfo->mon2_type = radeon_probe_i2c_connector(rinfo, ddc_vga, | 582 | rinfo->mon2_type = radeon_probe_i2c_connector(rinfo, ddc_vga, |
diff --git a/drivers/video/aty/radeonfb.h b/drivers/video/aty/radeonfb.h index d5ff224a6258..319000360285 100644 --- a/drivers/video/aty/radeonfb.h +++ b/drivers/video/aty/radeonfb.h | |||
@@ -16,7 +16,7 @@ | |||
16 | 16 | ||
17 | #include <asm/io.h> | 17 | #include <asm/io.h> |
18 | 18 | ||
19 | #ifdef CONFIG_PPC_OF | 19 | #if defined(CONFIG_PPC_OF) || defined(CONFIG_SPARC) |
20 | #include <asm/prom.h> | 20 | #include <asm/prom.h> |
21 | #endif | 21 | #endif |
22 | 22 | ||
@@ -292,7 +292,7 @@ struct radeonfb_info { | |||
292 | unsigned long fb_local_base; | 292 | unsigned long fb_local_base; |
293 | 293 | ||
294 | struct pci_dev *pdev; | 294 | struct pci_dev *pdev; |
295 | #ifdef CONFIG_PPC_OF | 295 | #if defined(CONFIG_PPC_OF) || defined(CONFIG_SPARC) |
296 | struct device_node *of_node; | 296 | struct device_node *of_node; |
297 | #endif | 297 | #endif |
298 | 298 | ||
diff --git a/drivers/video/cg3.c b/drivers/video/cg3.c index 767c850f8eb7..f042428a84f4 100644 --- a/drivers/video/cg3.c +++ b/drivers/video/cg3.c | |||
@@ -266,7 +266,7 @@ static void __devinit cg3_init_fix(struct fb_info *info, int linebytes, | |||
266 | static void __devinit cg3_rdi_maybe_fixup_var(struct fb_var_screeninfo *var, | 266 | static void __devinit cg3_rdi_maybe_fixup_var(struct fb_var_screeninfo *var, |
267 | struct device_node *dp) | 267 | struct device_node *dp) |
268 | { | 268 | { |
269 | char *params; | 269 | const char *params; |
270 | char *p; | 270 | char *p; |
271 | int ww, hh; | 271 | int ww, hh; |
272 | 272 | ||
diff --git a/drivers/video/igafb.c b/drivers/video/igafb.c index 90592fb59156..eb1a4812ad1d 100644 --- a/drivers/video/igafb.c +++ b/drivers/video/igafb.c | |||
@@ -44,8 +44,8 @@ | |||
44 | 44 | ||
45 | #include <asm/io.h> | 45 | #include <asm/io.h> |
46 | 46 | ||
47 | #ifdef __sparc__ | 47 | #ifdef CONFIG_SPARC |
48 | #include <asm/pbm.h> | 48 | #include <asm/prom.h> |
49 | #include <asm/pcic.h> | 49 | #include <asm/pcic.h> |
50 | #endif | 50 | #endif |
51 | 51 | ||
@@ -96,7 +96,7 @@ struct fb_var_screeninfo default_var = { | |||
96 | .vmode = FB_VMODE_NONINTERLACED | 96 | .vmode = FB_VMODE_NONINTERLACED |
97 | }; | 97 | }; |
98 | 98 | ||
99 | #ifdef __sparc__ | 99 | #ifdef CONFIG_SPARC |
100 | struct fb_var_screeninfo default_var_1024x768 __initdata = { | 100 | struct fb_var_screeninfo default_var_1024x768 __initdata = { |
101 | /* 1024x768, 75 Hz, Non-Interlaced (78.75 MHz dotclock) */ | 101 | /* 1024x768, 75 Hz, Non-Interlaced (78.75 MHz dotclock) */ |
102 | .xres = 1024, | 102 | .xres = 1024, |
@@ -188,7 +188,7 @@ static inline void iga_outb(struct iga_par *par, unsigned char val, | |||
188 | pci_outb(par, val, reg+1); | 188 | pci_outb(par, val, reg+1); |
189 | } | 189 | } |
190 | 190 | ||
191 | #endif /* __sparc__ */ | 191 | #endif /* CONFIG_SPARC */ |
192 | 192 | ||
193 | /* | 193 | /* |
194 | * Very important functionality for the JavaEngine1 computer: | 194 | * Very important functionality for the JavaEngine1 computer: |
@@ -217,7 +217,7 @@ static void iga_blank_border(struct iga_par *par) | |||
217 | iga_outb(par, 0, IGA_EXT_CNTRL, IGA_IDX_OVERSCAN_COLOR + i); | 217 | iga_outb(par, 0, IGA_EXT_CNTRL, IGA_IDX_OVERSCAN_COLOR + i); |
218 | } | 218 | } |
219 | 219 | ||
220 | #ifdef __sparc__ | 220 | #ifdef CONFIG_SPARC |
221 | static int igafb_mmap(struct fb_info *info, | 221 | static int igafb_mmap(struct fb_info *info, |
222 | struct vm_area_struct *vma) | 222 | struct vm_area_struct *vma) |
223 | { | 223 | { |
@@ -271,7 +271,7 @@ static int igafb_mmap(struct fb_info *info, | |||
271 | vma->vm_flags |= VM_IO; | 271 | vma->vm_flags |= VM_IO; |
272 | return 0; | 272 | return 0; |
273 | } | 273 | } |
274 | #endif /* __sparc__ */ | 274 | #endif /* CONFIG_SPARC */ |
275 | 275 | ||
276 | static int igafb_setcolreg(unsigned regno, unsigned red, unsigned green, | 276 | static int igafb_setcolreg(unsigned regno, unsigned red, unsigned green, |
277 | unsigned blue, unsigned transp, | 277 | unsigned blue, unsigned transp, |
@@ -323,7 +323,7 @@ static struct fb_ops igafb_ops = { | |||
323 | .fb_fillrect = cfb_fillrect, | 323 | .fb_fillrect = cfb_fillrect, |
324 | .fb_copyarea = cfb_copyarea, | 324 | .fb_copyarea = cfb_copyarea, |
325 | .fb_imageblit = cfb_imageblit, | 325 | .fb_imageblit = cfb_imageblit, |
326 | #ifdef __sparc__ | 326 | #ifdef CONFIG_SPARC |
327 | .fb_mmap = igafb_mmap, | 327 | .fb_mmap = igafb_mmap, |
328 | #endif | 328 | #endif |
329 | }; | 329 | }; |
@@ -424,7 +424,7 @@ int __init igafb_init(void) | |||
424 | 424 | ||
425 | par->frame_buffer_phys = addr & PCI_BASE_ADDRESS_MEM_MASK; | 425 | par->frame_buffer_phys = addr & PCI_BASE_ADDRESS_MEM_MASK; |
426 | 426 | ||
427 | #ifdef __sparc__ | 427 | #ifdef CONFIG_SPARC |
428 | /* | 428 | /* |
429 | * The following is sparc specific and this is why: | 429 | * The following is sparc specific and this is why: |
430 | * | 430 | * |
@@ -477,8 +477,8 @@ int __init igafb_init(void) | |||
477 | * Set default vmode and cmode from PROM properties. | 477 | * Set default vmode and cmode from PROM properties. |
478 | */ | 478 | */ |
479 | { | 479 | { |
480 | struct pcidev_cookie *cookie = pdev->sysdata; | 480 | struct device_node *dp = pci_device_to_OF_node(pdev); |
481 | int node = cookie->prom_node; | 481 | int node = dp->node; |
482 | int width = prom_getintdefault(node, "width", 1024); | 482 | int width = prom_getintdefault(node, "width", 1024); |
483 | int height = prom_getintdefault(node, "height", 768); | 483 | int height = prom_getintdefault(node, "height", 768); |
484 | int depth = prom_getintdefault(node, "depth", 8); | 484 | int depth = prom_getintdefault(node, "depth", 8); |
@@ -534,7 +534,7 @@ int __init igafb_init(void) | |||
534 | kfree(info); | 534 | kfree(info); |
535 | } | 535 | } |
536 | 536 | ||
537 | #ifdef __sparc__ | 537 | #ifdef CONFIG_SPARC |
538 | /* | 538 | /* |
539 | * Add /dev/fb mmap values. | 539 | * Add /dev/fb mmap values. |
540 | */ | 540 | */ |
@@ -552,7 +552,7 @@ int __init igafb_init(void) | |||
552 | par->mmap_map[1].size = PAGE_SIZE * 2; /* X wants 2 pages */ | 552 | par->mmap_map[1].size = PAGE_SIZE * 2; /* X wants 2 pages */ |
553 | par->mmap_map[1].prot_mask = SRMMU_CACHE; | 553 | par->mmap_map[1].prot_mask = SRMMU_CACHE; |
554 | par->mmap_map[1].prot_flag = SRMMU_WRITE; | 554 | par->mmap_map[1].prot_flag = SRMMU_WRITE; |
555 | #endif /* __sparc__ */ | 555 | #endif /* CONFIG_SPARC */ |
556 | 556 | ||
557 | return 0; | 557 | return 0; |
558 | } | 558 | } |