diff options
author | Dave Jiang <dave.jiang@intel.com> | 2013-04-10 19:44:39 -0400 |
---|---|---|
committer | Vinod Koul <vinod.koul@intel.com> | 2013-04-15 13:16:15 -0400 |
commit | 75c6f0ab480657269b5014e0e457c7b18ba8597e (patch) | |
tree | 8e3625f894c0c0ea5706096a47639932b77b31be /drivers/dma/ioat | |
parent | d302398da99956a329c467f195b50d5aaf38fb75 (diff) |
ioatdma: Adding write back descriptor error status support for ioatdma 3.3
v3.3 provides support for write back descriptor error status. This allows
reporting of errors in a descriptor field. In supporting this, certain
errors such as P/Q validation errors no longer halts the channel. The DMA
engine can continue to execute until the end of the chain and allow software
to report the "errors" up the stack. We are also going to mask those error
interrupts and handle them when the "chain" has completed at the end.
Signed-off-by: Dave Jiang <dave.jiang@intel.com>
Acked-by: Dan Williams <djbw@fb.com>
Signed-off-by: Vinod Koul <vinod.koul@intel.com>
Diffstat (limited to 'drivers/dma/ioat')
-rw-r--r-- | drivers/dma/ioat/dma.h | 1 | ||||
-rw-r--r-- | drivers/dma/ioat/dma_v3.c | 111 | ||||
-rw-r--r-- | drivers/dma/ioat/hw.h | 17 | ||||
-rw-r--r-- | drivers/dma/ioat/registers.h | 1 |
4 files changed, 105 insertions, 25 deletions
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h index 35d74028773a..54fb7b9ff9aa 100644 --- a/drivers/dma/ioat/dma.h +++ b/drivers/dma/ioat/dma.h | |||
@@ -90,6 +90,7 @@ struct ioatdma_device { | |||
90 | struct ioat_chan_common *idx[4]; | 90 | struct ioat_chan_common *idx[4]; |
91 | struct dca_provider *dca; | 91 | struct dca_provider *dca; |
92 | enum ioat_irq_mode irq_mode; | 92 | enum ioat_irq_mode irq_mode; |
93 | u32 cap; | ||
93 | void (*intr_quirk)(struct ioatdma_device *device); | 94 | void (*intr_quirk)(struct ioatdma_device *device); |
94 | int (*enumerate_channels)(struct ioatdma_device *device); | 95 | int (*enumerate_channels)(struct ioatdma_device *device); |
95 | int (*reset_hw)(struct ioat_chan_common *chan); | 96 | int (*reset_hw)(struct ioat_chan_common *chan); |
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c index b00b000f238b..28f8957bafe2 100644 --- a/drivers/dma/ioat/dma_v3.c +++ b/drivers/dma/ioat/dma_v3.c | |||
@@ -510,6 +510,36 @@ static bool ioat3_cleanup_preamble(struct ioat_chan_common *chan, | |||
510 | return true; | 510 | return true; |
511 | } | 511 | } |
512 | 512 | ||
513 | static void | ||
514 | desc_get_errstat(struct ioat2_dma_chan *ioat, struct ioat_ring_ent *desc) | ||
515 | { | ||
516 | struct ioat_dma_descriptor *hw = desc->hw; | ||
517 | |||
518 | switch (hw->ctl_f.op) { | ||
519 | case IOAT_OP_PQ_VAL: | ||
520 | case IOAT_OP_PQ_VAL_16S: | ||
521 | { | ||
522 | struct ioat_pq_descriptor *pq = desc->pq; | ||
523 | |||
524 | /* check if there's error written */ | ||
525 | if (!pq->dwbes_f.wbes) | ||
526 | return; | ||
527 | |||
528 | /* need to set a chanerr var for checking to clear later */ | ||
529 | |||
530 | if (pq->dwbes_f.p_val_err) | ||
531 | *desc->result |= SUM_CHECK_P_RESULT; | ||
532 | |||
533 | if (pq->dwbes_f.q_val_err) | ||
534 | *desc->result |= SUM_CHECK_Q_RESULT; | ||
535 | |||
536 | return; | ||
537 | } | ||
538 | default: | ||
539 | return; | ||
540 | } | ||
541 | } | ||
542 | |||
513 | /** | 543 | /** |
514 | * __cleanup - reclaim used descriptors | 544 | * __cleanup - reclaim used descriptors |
515 | * @ioat: channel (ring) to clean | 545 | * @ioat: channel (ring) to clean |
@@ -547,6 +577,11 @@ static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete) | |||
547 | prefetch(ioat2_get_ring_ent(ioat, idx + i + 1)); | 577 | prefetch(ioat2_get_ring_ent(ioat, idx + i + 1)); |
548 | desc = ioat2_get_ring_ent(ioat, idx + i); | 578 | desc = ioat2_get_ring_ent(ioat, idx + i); |
549 | dump_desc_dbg(ioat, desc); | 579 | dump_desc_dbg(ioat, desc); |
580 | |||
581 | /* set err stat if we are using dwbes */ | ||
582 | if (device->cap & IOAT_CAP_DWBES) | ||
583 | desc_get_errstat(ioat, desc); | ||
584 | |||
550 | tx = &desc->txd; | 585 | tx = &desc->txd; |
551 | if (tx->cookie) { | 586 | if (tx->cookie) { |
552 | dma_cookie_complete(tx); | 587 | dma_cookie_complete(tx); |
@@ -1090,6 +1125,9 @@ __ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result, | |||
1090 | pq->q_addr = dst[1] + offset; | 1125 | pq->q_addr = dst[1] + offset; |
1091 | pq->ctl = 0; | 1126 | pq->ctl = 0; |
1092 | pq->ctl_f.op = op; | 1127 | pq->ctl_f.op = op; |
1128 | /* we turn on descriptor write back error status */ | ||
1129 | if (device->cap & IOAT_CAP_DWBES) | ||
1130 | pq->ctl_f.wb_en = result ? 1 : 0; | ||
1093 | pq->ctl_f.src_cnt = src_cnt_to_hw(s); | 1131 | pq->ctl_f.src_cnt = src_cnt_to_hw(s); |
1094 | pq->ctl_f.p_disable = !!(flags & DMA_PREP_PQ_DISABLE_P); | 1132 | pq->ctl_f.p_disable = !!(flags & DMA_PREP_PQ_DISABLE_P); |
1095 | pq->ctl_f.q_disable = !!(flags & DMA_PREP_PQ_DISABLE_Q); | 1133 | pq->ctl_f.q_disable = !!(flags & DMA_PREP_PQ_DISABLE_Q); |
@@ -1206,6 +1244,9 @@ __ioat3_prep_pq16_lock(struct dma_chan *c, enum sum_check_flags *result, | |||
1206 | pq->ctl = 0; | 1244 | pq->ctl = 0; |
1207 | pq->ctl_f.op = op; | 1245 | pq->ctl_f.op = op; |
1208 | pq->ctl_f.src_cnt = src16_cnt_to_hw(s); | 1246 | pq->ctl_f.src_cnt = src16_cnt_to_hw(s); |
1247 | /* we turn on descriptor write back error status */ | ||
1248 | if (device->cap & IOAT_CAP_DWBES) | ||
1249 | pq->ctl_f.wb_en = result ? 1 : 0; | ||
1209 | pq->ctl_f.p_disable = !!(flags & DMA_PREP_PQ_DISABLE_P); | 1250 | pq->ctl_f.p_disable = !!(flags & DMA_PREP_PQ_DISABLE_P); |
1210 | pq->ctl_f.q_disable = !!(flags & DMA_PREP_PQ_DISABLE_Q); | 1251 | pq->ctl_f.q_disable = !!(flags & DMA_PREP_PQ_DISABLE_Q); |
1211 | 1252 | ||
@@ -1792,6 +1833,32 @@ static int ioat3_reset_hw(struct ioat_chan_common *chan) | |||
1792 | return err; | 1833 | return err; |
1793 | } | 1834 | } |
1794 | 1835 | ||
1836 | static void ioat3_intr_quirk(struct ioatdma_device *device) | ||
1837 | { | ||
1838 | struct dma_device *dma; | ||
1839 | struct dma_chan *c; | ||
1840 | struct ioat_chan_common *chan; | ||
1841 | u32 errmask; | ||
1842 | |||
1843 | dma = &device->common; | ||
1844 | |||
1845 | /* | ||
1846 | * if we have descriptor write back error status, we mask the | ||
1847 | * error interrupts | ||
1848 | */ | ||
1849 | if (device->cap & IOAT_CAP_DWBES) { | ||
1850 | list_for_each_entry(c, &dma->channels, device_node) { | ||
1851 | chan = to_chan_common(c); | ||
1852 | errmask = readl(chan->reg_base + | ||
1853 | IOAT_CHANERR_MASK_OFFSET); | ||
1854 | errmask |= IOAT_CHANERR_XOR_P_OR_CRC_ERR | | ||
1855 | IOAT_CHANERR_XOR_Q_ERR; | ||
1856 | writel(errmask, chan->reg_base + | ||
1857 | IOAT_CHANERR_MASK_OFFSET); | ||
1858 | } | ||
1859 | } | ||
1860 | } | ||
1861 | |||
1795 | int ioat3_dma_probe(struct ioatdma_device *device, int dca) | 1862 | int ioat3_dma_probe(struct ioatdma_device *device, int dca) |
1796 | { | 1863 | { |
1797 | struct pci_dev *pdev = device->pdev; | 1864 | struct pci_dev *pdev = device->pdev; |
@@ -1801,11 +1868,11 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca) | |||
1801 | struct ioat_chan_common *chan; | 1868 | struct ioat_chan_common *chan; |
1802 | bool is_raid_device = false; | 1869 | bool is_raid_device = false; |
1803 | int err; | 1870 | int err; |
1804 | u32 cap; | ||
1805 | 1871 | ||
1806 | device->enumerate_channels = ioat2_enumerate_channels; | 1872 | device->enumerate_channels = ioat2_enumerate_channels; |
1807 | device->reset_hw = ioat3_reset_hw; | 1873 | device->reset_hw = ioat3_reset_hw; |
1808 | device->self_test = ioat3_dma_self_test; | 1874 | device->self_test = ioat3_dma_self_test; |
1875 | device->intr_quirk = ioat3_intr_quirk; | ||
1809 | dma = &device->common; | 1876 | dma = &device->common; |
1810 | dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock; | 1877 | dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock; |
1811 | dma->device_issue_pending = ioat2_issue_pending; | 1878 | dma->device_issue_pending = ioat2_issue_pending; |
@@ -1818,16 +1885,16 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca) | |||
1818 | dma_cap_set(DMA_INTERRUPT, dma->cap_mask); | 1885 | dma_cap_set(DMA_INTERRUPT, dma->cap_mask); |
1819 | dma->device_prep_dma_interrupt = ioat3_prep_interrupt_lock; | 1886 | dma->device_prep_dma_interrupt = ioat3_prep_interrupt_lock; |
1820 | 1887 | ||
1821 | cap = readl(device->reg_base + IOAT_DMA_CAP_OFFSET); | 1888 | device->cap = readl(device->reg_base + IOAT_DMA_CAP_OFFSET); |
1822 | 1889 | ||
1823 | if (is_bwd_noraid(pdev)) | 1890 | if (is_bwd_noraid(pdev)) |
1824 | cap &= ~(IOAT_CAP_XOR | IOAT_CAP_PQ | IOAT_CAP_RAID16SS); | 1891 | device->cap &= ~(IOAT_CAP_XOR | IOAT_CAP_PQ | IOAT_CAP_RAID16SS); |
1825 | 1892 | ||
1826 | /* dca is incompatible with raid operations */ | 1893 | /* dca is incompatible with raid operations */ |
1827 | if (dca_en && (cap & (IOAT_CAP_XOR|IOAT_CAP_PQ))) | 1894 | if (dca_en && (device->cap & (IOAT_CAP_XOR|IOAT_CAP_PQ))) |
1828 | cap &= ~(IOAT_CAP_XOR|IOAT_CAP_PQ); | 1895 | device->cap &= ~(IOAT_CAP_XOR|IOAT_CAP_PQ); |
1829 | 1896 | ||
1830 | if (cap & IOAT_CAP_XOR) { | 1897 | if (device->cap & IOAT_CAP_XOR) { |
1831 | is_raid_device = true; | 1898 | is_raid_device = true; |
1832 | dma->max_xor = 8; | 1899 | dma->max_xor = 8; |
1833 | dma->xor_align = 6; | 1900 | dma->xor_align = 6; |
@@ -1839,10 +1906,15 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca) | |||
1839 | dma->device_prep_dma_xor_val = ioat3_prep_xor_val; | 1906 | dma->device_prep_dma_xor_val = ioat3_prep_xor_val; |
1840 | } | 1907 | } |
1841 | 1908 | ||
1842 | if (cap & IOAT_CAP_PQ) { | 1909 | if (device->cap & IOAT_CAP_PQ) { |
1843 | is_raid_device = true; | 1910 | is_raid_device = true; |
1844 | 1911 | ||
1845 | if (cap & IOAT_CAP_RAID16SS) { | 1912 | dma->device_prep_dma_pq = ioat3_prep_pq; |
1913 | dma->device_prep_dma_pq_val = ioat3_prep_pq_val; | ||
1914 | dma_cap_set(DMA_PQ, dma->cap_mask); | ||
1915 | dma_cap_set(DMA_PQ_VAL, dma->cap_mask); | ||
1916 | |||
1917 | if (device->cap & IOAT_CAP_RAID16SS) { | ||
1846 | dma_set_maxpq(dma, 16, 0); | 1918 | dma_set_maxpq(dma, 16, 0); |
1847 | dma->pq_align = 0; | 1919 | dma->pq_align = 0; |
1848 | } else { | 1920 | } else { |
@@ -1853,14 +1925,13 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca) | |||
1853 | dma->pq_align = 0; | 1925 | dma->pq_align = 0; |
1854 | } | 1926 | } |
1855 | 1927 | ||
1856 | dma_cap_set(DMA_PQ, dma->cap_mask); | 1928 | if (!(device->cap & IOAT_CAP_XOR)) { |
1857 | dma->device_prep_dma_pq = ioat3_prep_pq; | 1929 | dma->device_prep_dma_xor = ioat3_prep_pqxor; |
1858 | 1930 | dma->device_prep_dma_xor_val = ioat3_prep_pqxor_val; | |
1859 | dma_cap_set(DMA_PQ_VAL, dma->cap_mask); | 1931 | dma_cap_set(DMA_XOR, dma->cap_mask); |
1860 | dma->device_prep_dma_pq_val = ioat3_prep_pq_val; | 1932 | dma_cap_set(DMA_XOR_VAL, dma->cap_mask); |
1861 | 1933 | ||
1862 | if (!(cap & IOAT_CAP_XOR)) { | 1934 | if (device->cap & IOAT_CAP_RAID16SS) { |
1863 | if (cap & IOAT_CAP_RAID16SS) { | ||
1864 | dma->max_xor = 16; | 1935 | dma->max_xor = 16; |
1865 | dma->xor_align = 0; | 1936 | dma->xor_align = 0; |
1866 | } else { | 1937 | } else { |
@@ -1870,16 +1941,10 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca) | |||
1870 | else | 1941 | else |
1871 | dma->xor_align = 0; | 1942 | dma->xor_align = 0; |
1872 | } | 1943 | } |
1873 | |||
1874 | dma_cap_set(DMA_XOR, dma->cap_mask); | ||
1875 | dma->device_prep_dma_xor = ioat3_prep_pqxor; | ||
1876 | |||
1877 | dma_cap_set(DMA_XOR_VAL, dma->cap_mask); | ||
1878 | dma->device_prep_dma_xor_val = ioat3_prep_pqxor_val; | ||
1879 | } | 1944 | } |
1880 | } | 1945 | } |
1881 | 1946 | ||
1882 | if (is_raid_device && (cap & IOAT_CAP_FILL_BLOCK)) { | 1947 | if (is_raid_device && (device->cap & IOAT_CAP_FILL_BLOCK)) { |
1883 | dma_cap_set(DMA_MEMSET, dma->cap_mask); | 1948 | dma_cap_set(DMA_MEMSET, dma->cap_mask); |
1884 | dma->device_prep_dma_memset = ioat3_prep_memset_lock; | 1949 | dma->device_prep_dma_memset = ioat3_prep_memset_lock; |
1885 | } | 1950 | } |
@@ -1898,7 +1963,7 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca) | |||
1898 | } | 1963 | } |
1899 | 1964 | ||
1900 | /* starting with CB3.3 super extended descriptors are supported */ | 1965 | /* starting with CB3.3 super extended descriptors are supported */ |
1901 | if (cap & IOAT_CAP_RAID16SS) { | 1966 | if (device->cap & IOAT_CAP_RAID16SS) { |
1902 | char pool_name[14]; | 1967 | char pool_name[14]; |
1903 | int i; | 1968 | int i; |
1904 | 1969 | ||
diff --git a/drivers/dma/ioat/hw.h b/drivers/dma/ioat/hw.h index d10570db6e7d..5ee57d402a6e 100644 --- a/drivers/dma/ioat/hw.h +++ b/drivers/dma/ioat/hw.h | |||
@@ -165,7 +165,17 @@ struct ioat_xor_ext_descriptor { | |||
165 | }; | 165 | }; |
166 | 166 | ||
167 | struct ioat_pq_descriptor { | 167 | struct ioat_pq_descriptor { |
168 | uint32_t size; | 168 | union { |
169 | uint32_t size; | ||
170 | uint32_t dwbes; | ||
171 | struct { | ||
172 | unsigned int rsvd:25; | ||
173 | unsigned int p_val_err:1; | ||
174 | unsigned int q_val_err:1; | ||
175 | unsigned int rsvd1:4; | ||
176 | unsigned int wbes:1; | ||
177 | } dwbes_f; | ||
178 | }; | ||
169 | union { | 179 | union { |
170 | uint32_t ctl; | 180 | uint32_t ctl; |
171 | struct { | 181 | struct { |
@@ -180,7 +190,10 @@ struct ioat_pq_descriptor { | |||
180 | unsigned int hint:1; | 190 | unsigned int hint:1; |
181 | unsigned int p_disable:1; | 191 | unsigned int p_disable:1; |
182 | unsigned int q_disable:1; | 192 | unsigned int q_disable:1; |
183 | unsigned int rsvd:11; | 193 | unsigned int rsvd2:2; |
194 | unsigned int wb_en:1; | ||
195 | unsigned int prl_en:1; | ||
196 | unsigned int rsvd3:7; | ||
184 | #define IOAT_OP_PQ 0x89 | 197 | #define IOAT_OP_PQ 0x89 |
185 | #define IOAT_OP_PQ_VAL 0x8a | 198 | #define IOAT_OP_PQ_VAL 0x8a |
186 | #define IOAT_OP_PQ_16S 0xa0 | 199 | #define IOAT_OP_PQ_16S 0xa0 |
diff --git a/drivers/dma/ioat/registers.h b/drivers/dma/ioat/registers.h index efdd47e47b82..2f1cfa0f1f47 100644 --- a/drivers/dma/ioat/registers.h +++ b/drivers/dma/ioat/registers.h | |||
@@ -79,6 +79,7 @@ | |||
79 | #define IOAT_CAP_APIC 0x00000080 | 79 | #define IOAT_CAP_APIC 0x00000080 |
80 | #define IOAT_CAP_XOR 0x00000100 | 80 | #define IOAT_CAP_XOR 0x00000100 |
81 | #define IOAT_CAP_PQ 0x00000200 | 81 | #define IOAT_CAP_PQ 0x00000200 |
82 | #define IOAT_CAP_DWBES 0x00002000 | ||
82 | #define IOAT_CAP_RAID16SS 0x00020000 | 83 | #define IOAT_CAP_RAID16SS 0x00020000 |
83 | 84 | ||
84 | #define IOAT_CHANNEL_MMIO_SIZE 0x80 /* Each Channel MMIO space is this size */ | 85 | #define IOAT_CHANNEL_MMIO_SIZE 0x80 /* Each Channel MMIO space is this size */ |