diff options
-rw-r--r-- | Documentation/devicetree/bindings/usb/da8xx-usb.txt | 41 | ||||
-rw-r--r-- | drivers/dma/Kconfig | 7 | ||||
-rw-r--r-- | drivers/dma/amba-pl08x.c | 20 | ||||
-rw-r--r-- | drivers/dma/cppi41.c | 168 | ||||
-rw-r--r-- | drivers/dma/dmatest.c | 11 | ||||
-rw-r--r-- | drivers/dma/imx-sdma.c | 19 | ||||
-rw-r--r-- | drivers/dma/ioat/init.c | 4 | ||||
-rw-r--r-- | drivers/dma/mv_xor.c | 9 | ||||
-rw-r--r-- | drivers/dma/qcom/hidma.c | 15 | ||||
-rw-r--r-- | drivers/dma/qcom/hidma_ll.c | 6 | ||||
-rw-r--r-- | drivers/dma/sh/rcar-dmac.c | 52 | ||||
-rw-r--r-- | drivers/dma/stm32-dma.c | 2 | ||||
-rw-r--r-- | drivers/dma/sun4i-dma.c | 2 | ||||
-rw-r--r-- | drivers/dma/virt-dma.c | 11 | ||||
-rw-r--r-- | include/linux/amba/pl080.h | 50 | ||||
-rw-r--r-- | lib/dma-debug.c | 4 |
16 files changed, 261 insertions, 160 deletions
diff --git a/Documentation/devicetree/bindings/usb/da8xx-usb.txt b/Documentation/devicetree/bindings/usb/da8xx-usb.txt index ccb844aba7d4..717c5f656237 100644 --- a/Documentation/devicetree/bindings/usb/da8xx-usb.txt +++ b/Documentation/devicetree/bindings/usb/da8xx-usb.txt | |||
@@ -18,10 +18,26 @@ Required properties: | |||
18 | 18 | ||
19 | - phy-names: Should be "usb-phy" | 19 | - phy-names: Should be "usb-phy" |
20 | 20 | ||
21 | - dmas: specifies the dma channels | ||
22 | |||
23 | - dma-names: specifies the names of the channels. Use "rxN" for receive | ||
24 | and "txN" for transmit endpoints. N specifies the endpoint number. | ||
25 | |||
21 | Optional properties: | 26 | Optional properties: |
22 | ~~~~~~~~~~~~~~~~~~~~ | 27 | ~~~~~~~~~~~~~~~~~~~~ |
23 | - vbus-supply: Phandle to a regulator providing the USB bus power. | 28 | - vbus-supply: Phandle to a regulator providing the USB bus power. |
24 | 29 | ||
30 | DMA | ||
31 | ~~~ | ||
32 | - compatible: ti,da830-cppi41 | ||
33 | - reg: offset and length of the following register spaces: CPPI DMA Controller, | ||
34 | CPPI DMA Scheduler, Queue Manager | ||
35 | - reg-names: "controller", "scheduler", "queuemgr" | ||
36 | - #dma-cells: should be set to 2. The first number represents the | ||
37 | channel number (0 … 3 for endpoints 1 … 4). | ||
38 | The second number is 0 for RX and 1 for TX transfers. | ||
39 | - #dma-channels: should be set to 4 representing the 4 endpoints. | ||
40 | |||
25 | Example: | 41 | Example: |
26 | usb_phy: usb-phy { | 42 | usb_phy: usb-phy { |
27 | compatible = "ti,da830-usb-phy"; | 43 | compatible = "ti,da830-usb-phy"; |
@@ -30,7 +46,10 @@ Example: | |||
30 | }; | 46 | }; |
31 | usb0: usb@200000 { | 47 | usb0: usb@200000 { |
32 | compatible = "ti,da830-musb"; | 48 | compatible = "ti,da830-musb"; |
33 | reg = <0x00200000 0x10000>; | 49 | reg = <0x00200000 0x1000>; |
50 | ranges; | ||
51 | #address-cells = <1>; | ||
52 | #size-cells = <1>; | ||
34 | interrupts = <58>; | 53 | interrupts = <58>; |
35 | interrupt-names = "mc"; | 54 | interrupt-names = "mc"; |
36 | 55 | ||
@@ -39,5 +58,25 @@ Example: | |||
39 | phys = <&usb_phy 0>; | 58 | phys = <&usb_phy 0>; |
40 | phy-names = "usb-phy"; | 59 | phy-names = "usb-phy"; |
41 | 60 | ||
61 | dmas = <&cppi41dma 0 0 &cppi41dma 1 0 | ||
62 | &cppi41dma 2 0 &cppi41dma 3 0 | ||
63 | &cppi41dma 0 1 &cppi41dma 1 1 | ||
64 | &cppi41dma 2 1 &cppi41dma 3 1>; | ||
65 | dma-names = | ||
66 | "rx1", "rx2", "rx3", "rx4", | ||
67 | "tx1", "tx2", "tx3", "tx4"; | ||
68 | |||
42 | status = "okay"; | 69 | status = "okay"; |
70 | |||
71 | cppi41dma: dma-controller@201000 { | ||
72 | compatible = "ti,da830-cppi41"; | ||
73 | reg = <0x201000 0x1000 | ||
74 | 0x202000 0x1000 | ||
75 | 0x204000 0x4000>; | ||
76 | reg-names = "controller", "scheduler", "queuemgr"; | ||
77 | interrupts = <58>; | ||
78 | #dma-cells = <2>; | ||
79 | #dma-channels = <4>; | ||
80 | }; | ||
81 | |||
43 | }; | 82 | }; |
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index d01d59812cf3..24e8597b2c3e 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig | |||
@@ -514,12 +514,12 @@ config TIMB_DMA | |||
514 | Enable support for the Timberdale FPGA DMA engine. | 514 | Enable support for the Timberdale FPGA DMA engine. |
515 | 515 | ||
516 | config TI_CPPI41 | 516 | config TI_CPPI41 |
517 | tristate "AM33xx CPPI41 DMA support" | 517 | tristate "CPPI 4.1 DMA support" |
518 | depends on ARCH_OMAP | 518 | depends on (ARCH_OMAP || ARCH_DAVINCI_DA8XX) |
519 | select DMA_ENGINE | 519 | select DMA_ENGINE |
520 | help | 520 | help |
521 | The Communications Port Programming Interface (CPPI) 4.1 DMA engine | 521 | The Communications Port Programming Interface (CPPI) 4.1 DMA engine |
522 | is currently used by the USB driver on AM335x platforms. | 522 | is currently used by the USB driver on AM335x and DA8xx platforms. |
523 | 523 | ||
524 | config TI_DMA_CROSSBAR | 524 | config TI_DMA_CROSSBAR |
525 | bool | 525 | bool |
@@ -608,6 +608,7 @@ config ASYNC_TX_DMA | |||
608 | config DMATEST | 608 | config DMATEST |
609 | tristate "DMA Test client" | 609 | tristate "DMA Test client" |
610 | depends on DMA_ENGINE | 610 | depends on DMA_ENGINE |
611 | select DMA_ENGINE_RAID | ||
611 | help | 612 | help |
612 | Simple DMA test client. Say N unless you're debugging a | 613 | Simple DMA test client. Say N unless you're debugging a |
613 | DMA Device driver. | 614 | DMA Device driver. |
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index 0b7c6ce629a6..6bb8813ca275 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c | |||
@@ -106,6 +106,7 @@ struct pl08x_driver_data; | |||
106 | 106 | ||
107 | /** | 107 | /** |
108 | * struct vendor_data - vendor-specific config parameters for PL08x derivatives | 108 | * struct vendor_data - vendor-specific config parameters for PL08x derivatives |
109 | * @config_offset: offset to the configuration register | ||
109 | * @channels: the number of channels available in this variant | 110 | * @channels: the number of channels available in this variant |
110 | * @signals: the number of request signals available from the hardware | 111 | * @signals: the number of request signals available from the hardware |
111 | * @dualmaster: whether this version supports dual AHB masters or not. | 112 | * @dualmaster: whether this version supports dual AHB masters or not. |
@@ -145,6 +146,8 @@ struct pl08x_bus_data { | |||
145 | /** | 146 | /** |
146 | * struct pl08x_phy_chan - holder for the physical channels | 147 | * struct pl08x_phy_chan - holder for the physical channels |
147 | * @id: physical index to this channel | 148 | * @id: physical index to this channel |
149 | * @base: memory base address for this physical channel | ||
150 | * @reg_config: configuration address for this physical channel | ||
148 | * @lock: a lock to use when altering an instance of this struct | 151 | * @lock: a lock to use when altering an instance of this struct |
149 | * @serving: the virtual channel currently being served by this physical | 152 | * @serving: the virtual channel currently being served by this physical |
150 | * channel | 153 | * channel |
@@ -203,7 +206,7 @@ struct pl08x_txd { | |||
203 | }; | 206 | }; |
204 | 207 | ||
205 | /** | 208 | /** |
206 | * struct pl08x_dma_chan_state - holds the PL08x specific virtual channel | 209 | * enum pl08x_dma_chan_state - holds the PL08x specific virtual channel |
207 | * states | 210 | * states |
208 | * @PL08X_CHAN_IDLE: the channel is idle | 211 | * @PL08X_CHAN_IDLE: the channel is idle |
209 | * @PL08X_CHAN_RUNNING: the channel has allocated a physical transport | 212 | * @PL08X_CHAN_RUNNING: the channel has allocated a physical transport |
@@ -226,9 +229,8 @@ enum pl08x_dma_chan_state { | |||
226 | * @phychan: the physical channel utilized by this channel, if there is one | 229 | * @phychan: the physical channel utilized by this channel, if there is one |
227 | * @name: name of channel | 230 | * @name: name of channel |
228 | * @cd: channel platform data | 231 | * @cd: channel platform data |
229 | * @runtime_addr: address for RX/TX according to the runtime config | 232 | * @cfg: slave configuration |
230 | * @at: active transaction on this channel | 233 | * @at: active transaction on this channel |
231 | * @lock: a lock for this channel data | ||
232 | * @host: a pointer to the host (internal use) | 234 | * @host: a pointer to the host (internal use) |
233 | * @state: whether the channel is idle, paused, running etc | 235 | * @state: whether the channel is idle, paused, running etc |
234 | * @slave: whether this channel is a device (slave) or for memcpy | 236 | * @slave: whether this channel is a device (slave) or for memcpy |
@@ -262,7 +264,7 @@ struct pl08x_dma_chan { | |||
262 | * @lli_buses: bitmask to or in to LLI pointer selecting AHB port for LLI | 264 | * @lli_buses: bitmask to or in to LLI pointer selecting AHB port for LLI |
263 | * fetches | 265 | * fetches |
264 | * @mem_buses: set to indicate memory transfers on AHB2. | 266 | * @mem_buses: set to indicate memory transfers on AHB2. |
265 | * @lock: a spinlock for this struct | 267 | * @lli_words: how many words are used in each LLI item for this variant |
266 | */ | 268 | */ |
267 | struct pl08x_driver_data { | 269 | struct pl08x_driver_data { |
268 | struct dma_device slave; | 270 | struct dma_device slave; |
@@ -417,7 +419,7 @@ static void pl08x_start_next_txd(struct pl08x_dma_chan *plchan) | |||
417 | 419 | ||
418 | /* Enable the DMA channel */ | 420 | /* Enable the DMA channel */ |
419 | /* Do not access config register until channel shows as disabled */ | 421 | /* Do not access config register until channel shows as disabled */ |
420 | while (readl(pl08x->base + PL080_EN_CHAN) & (1 << phychan->id)) | 422 | while (readl(pl08x->base + PL080_EN_CHAN) & BIT(phychan->id)) |
421 | cpu_relax(); | 423 | cpu_relax(); |
422 | 424 | ||
423 | /* Do not access config register until channel shows as inactive */ | 425 | /* Do not access config register until channel shows as inactive */ |
@@ -484,8 +486,8 @@ static void pl08x_terminate_phy_chan(struct pl08x_driver_data *pl08x, | |||
484 | 486 | ||
485 | writel(val, ch->reg_config); | 487 | writel(val, ch->reg_config); |
486 | 488 | ||
487 | writel(1 << ch->id, pl08x->base + PL080_ERR_CLEAR); | 489 | writel(BIT(ch->id), pl08x->base + PL080_ERR_CLEAR); |
488 | writel(1 << ch->id, pl08x->base + PL080_TC_CLEAR); | 490 | writel(BIT(ch->id), pl08x->base + PL080_TC_CLEAR); |
489 | } | 491 | } |
490 | 492 | ||
491 | static inline u32 get_bytes_in_cctl(u32 cctl) | 493 | static inline u32 get_bytes_in_cctl(u32 cctl) |
@@ -1834,7 +1836,7 @@ static irqreturn_t pl08x_irq(int irq, void *dev) | |||
1834 | return IRQ_NONE; | 1836 | return IRQ_NONE; |
1835 | 1837 | ||
1836 | for (i = 0; i < pl08x->vd->channels; i++) { | 1838 | for (i = 0; i < pl08x->vd->channels; i++) { |
1837 | if (((1 << i) & err) || ((1 << i) & tc)) { | 1839 | if ((BIT(i) & err) || (BIT(i) & tc)) { |
1838 | /* Locate physical channel */ | 1840 | /* Locate physical channel */ |
1839 | struct pl08x_phy_chan *phychan = &pl08x->phy_chans[i]; | 1841 | struct pl08x_phy_chan *phychan = &pl08x->phy_chans[i]; |
1840 | struct pl08x_dma_chan *plchan = phychan->serving; | 1842 | struct pl08x_dma_chan *plchan = phychan->serving; |
@@ -1872,7 +1874,7 @@ static irqreturn_t pl08x_irq(int irq, void *dev) | |||
1872 | } | 1874 | } |
1873 | spin_unlock(&plchan->vc.lock); | 1875 | spin_unlock(&plchan->vc.lock); |
1874 | 1876 | ||
1875 | mask |= (1 << i); | 1877 | mask |= BIT(i); |
1876 | } | 1878 | } |
1877 | } | 1879 | } |
1878 | 1880 | ||
diff --git a/drivers/dma/cppi41.c b/drivers/dma/cppi41.c index d74cee077842..f7e965f63274 100644 --- a/drivers/dma/cppi41.c +++ b/drivers/dma/cppi41.c | |||
@@ -68,7 +68,6 @@ | |||
68 | #define QMGR_MEMCTRL_IDX_SH 16 | 68 | #define QMGR_MEMCTRL_IDX_SH 16 |
69 | #define QMGR_MEMCTRL_DESC_SH 8 | 69 | #define QMGR_MEMCTRL_DESC_SH 8 |
70 | 70 | ||
71 | #define QMGR_NUM_PEND 5 | ||
72 | #define QMGR_PEND(x) (0x90 + (x) * 4) | 71 | #define QMGR_PEND(x) (0x90 + (x) * 4) |
73 | 72 | ||
74 | #define QMGR_PENDING_SLOT_Q(x) (x / 32) | 73 | #define QMGR_PENDING_SLOT_Q(x) (x / 32) |
@@ -131,7 +130,6 @@ struct cppi41_dd { | |||
131 | u32 first_td_desc; | 130 | u32 first_td_desc; |
132 | struct cppi41_channel *chan_busy[ALLOC_DECS_NUM]; | 131 | struct cppi41_channel *chan_busy[ALLOC_DECS_NUM]; |
133 | 132 | ||
134 | void __iomem *usbss_mem; | ||
135 | void __iomem *ctrl_mem; | 133 | void __iomem *ctrl_mem; |
136 | void __iomem *sched_mem; | 134 | void __iomem *sched_mem; |
137 | void __iomem *qmgr_mem; | 135 | void __iomem *qmgr_mem; |
@@ -139,6 +137,10 @@ struct cppi41_dd { | |||
139 | const struct chan_queues *queues_rx; | 137 | const struct chan_queues *queues_rx; |
140 | const struct chan_queues *queues_tx; | 138 | const struct chan_queues *queues_tx; |
141 | struct chan_queues td_queue; | 139 | struct chan_queues td_queue; |
140 | u16 first_completion_queue; | ||
141 | u16 qmgr_num_pend; | ||
142 | u32 n_chans; | ||
143 | u8 platform; | ||
142 | 144 | ||
143 | struct list_head pending; /* Pending queued transfers */ | 145 | struct list_head pending; /* Pending queued transfers */ |
144 | spinlock_t lock; /* Lock for pending list */ | 146 | spinlock_t lock; /* Lock for pending list */ |
@@ -149,8 +151,7 @@ struct cppi41_dd { | |||
149 | bool is_suspended; | 151 | bool is_suspended; |
150 | }; | 152 | }; |
151 | 153 | ||
152 | #define FIST_COMPLETION_QUEUE 93 | 154 | static struct chan_queues am335x_usb_queues_tx[] = { |
153 | static struct chan_queues usb_queues_tx[] = { | ||
154 | /* USB0 ENDP 1 */ | 155 | /* USB0 ENDP 1 */ |
155 | [ 0] = { .submit = 32, .complete = 93}, | 156 | [ 0] = { .submit = 32, .complete = 93}, |
156 | [ 1] = { .submit = 34, .complete = 94}, | 157 | [ 1] = { .submit = 34, .complete = 94}, |
@@ -186,7 +187,7 @@ static struct chan_queues usb_queues_tx[] = { | |||
186 | [29] = { .submit = 90, .complete = 139}, | 187 | [29] = { .submit = 90, .complete = 139}, |
187 | }; | 188 | }; |
188 | 189 | ||
189 | static const struct chan_queues usb_queues_rx[] = { | 190 | static const struct chan_queues am335x_usb_queues_rx[] = { |
190 | /* USB0 ENDP 1 */ | 191 | /* USB0 ENDP 1 */ |
191 | [ 0] = { .submit = 1, .complete = 109}, | 192 | [ 0] = { .submit = 1, .complete = 109}, |
192 | [ 1] = { .submit = 2, .complete = 110}, | 193 | [ 1] = { .submit = 2, .complete = 110}, |
@@ -222,11 +223,26 @@ static const struct chan_queues usb_queues_rx[] = { | |||
222 | [29] = { .submit = 30, .complete = 155}, | 223 | [29] = { .submit = 30, .complete = 155}, |
223 | }; | 224 | }; |
224 | 225 | ||
226 | static const struct chan_queues da8xx_usb_queues_tx[] = { | ||
227 | [0] = { .submit = 16, .complete = 24}, | ||
228 | [1] = { .submit = 18, .complete = 24}, | ||
229 | [2] = { .submit = 20, .complete = 24}, | ||
230 | [3] = { .submit = 22, .complete = 24}, | ||
231 | }; | ||
232 | |||
233 | static const struct chan_queues da8xx_usb_queues_rx[] = { | ||
234 | [0] = { .submit = 1, .complete = 26}, | ||
235 | [1] = { .submit = 3, .complete = 26}, | ||
236 | [2] = { .submit = 5, .complete = 26}, | ||
237 | [3] = { .submit = 7, .complete = 26}, | ||
238 | }; | ||
239 | |||
225 | struct cppi_glue_infos { | 240 | struct cppi_glue_infos { |
226 | irqreturn_t (*isr)(int irq, void *data); | ||
227 | const struct chan_queues *queues_rx; | 241 | const struct chan_queues *queues_rx; |
228 | const struct chan_queues *queues_tx; | 242 | const struct chan_queues *queues_tx; |
229 | struct chan_queues td_queue; | 243 | struct chan_queues td_queue; |
244 | u16 first_completion_queue; | ||
245 | u16 qmgr_num_pend; | ||
230 | }; | 246 | }; |
231 | 247 | ||
232 | static struct cppi41_channel *to_cpp41_chan(struct dma_chan *c) | 248 | static struct cppi41_channel *to_cpp41_chan(struct dma_chan *c) |
@@ -285,19 +301,21 @@ static u32 cppi41_pop_desc(struct cppi41_dd *cdd, unsigned queue_num) | |||
285 | static irqreturn_t cppi41_irq(int irq, void *data) | 301 | static irqreturn_t cppi41_irq(int irq, void *data) |
286 | { | 302 | { |
287 | struct cppi41_dd *cdd = data; | 303 | struct cppi41_dd *cdd = data; |
304 | u16 first_completion_queue = cdd->first_completion_queue; | ||
305 | u16 qmgr_num_pend = cdd->qmgr_num_pend; | ||
288 | struct cppi41_channel *c; | 306 | struct cppi41_channel *c; |
289 | int i; | 307 | int i; |
290 | 308 | ||
291 | for (i = QMGR_PENDING_SLOT_Q(FIST_COMPLETION_QUEUE); i < QMGR_NUM_PEND; | 309 | for (i = QMGR_PENDING_SLOT_Q(first_completion_queue); i < qmgr_num_pend; |
292 | i++) { | 310 | i++) { |
293 | u32 val; | 311 | u32 val; |
294 | u32 q_num; | 312 | u32 q_num; |
295 | 313 | ||
296 | val = cppi_readl(cdd->qmgr_mem + QMGR_PEND(i)); | 314 | val = cppi_readl(cdd->qmgr_mem + QMGR_PEND(i)); |
297 | if (i == QMGR_PENDING_SLOT_Q(FIST_COMPLETION_QUEUE) && val) { | 315 | if (i == QMGR_PENDING_SLOT_Q(first_completion_queue) && val) { |
298 | u32 mask; | 316 | u32 mask; |
299 | /* set corresponding bit for completetion Q 93 */ | 317 | /* set corresponding bit for completetion Q 93 */ |
300 | mask = 1 << QMGR_PENDING_BIT_Q(FIST_COMPLETION_QUEUE); | 318 | mask = 1 << QMGR_PENDING_BIT_Q(first_completion_queue); |
301 | /* not set all bits for queues less than Q 93 */ | 319 | /* not set all bits for queues less than Q 93 */ |
302 | mask--; | 320 | mask--; |
303 | /* now invert and keep only Q 93+ set */ | 321 | /* now invert and keep only Q 93+ set */ |
@@ -402,11 +420,9 @@ static enum dma_status cppi41_dma_tx_status(struct dma_chan *chan, | |||
402 | struct cppi41_channel *c = to_cpp41_chan(chan); | 420 | struct cppi41_channel *c = to_cpp41_chan(chan); |
403 | enum dma_status ret; | 421 | enum dma_status ret; |
404 | 422 | ||
405 | /* lock */ | ||
406 | ret = dma_cookie_status(chan, cookie, txstate); | 423 | ret = dma_cookie_status(chan, cookie, txstate); |
407 | if (txstate && ret == DMA_COMPLETE) | 424 | |
408 | txstate->residue = c->residue; | 425 | dma_set_residue(txstate, c->residue); |
409 | /* unlock */ | ||
410 | 426 | ||
411 | return ret; | 427 | return ret; |
412 | } | 428 | } |
@@ -630,7 +646,7 @@ static int cppi41_tear_down_chan(struct cppi41_channel *c) | |||
630 | if (!c->is_tx) { | 646 | if (!c->is_tx) { |
631 | reg |= GCR_STARV_RETRY; | 647 | reg |= GCR_STARV_RETRY; |
632 | reg |= GCR_DESC_TYPE_HOST; | 648 | reg |= GCR_DESC_TYPE_HOST; |
633 | reg |= c->q_comp_num; | 649 | reg |= cdd->td_queue.complete; |
634 | } | 650 | } |
635 | reg |= GCR_TEARDOWN; | 651 | reg |= GCR_TEARDOWN; |
636 | cppi_writel(reg, c->gcr_reg); | 652 | cppi_writel(reg, c->gcr_reg); |
@@ -641,7 +657,7 @@ static int cppi41_tear_down_chan(struct cppi41_channel *c) | |||
641 | if (!c->td_seen || !c->td_desc_seen) { | 657 | if (!c->td_seen || !c->td_desc_seen) { |
642 | 658 | ||
643 | desc_phys = cppi41_pop_desc(cdd, cdd->td_queue.complete); | 659 | desc_phys = cppi41_pop_desc(cdd, cdd->td_queue.complete); |
644 | if (!desc_phys) | 660 | if (!desc_phys && c->is_tx) |
645 | desc_phys = cppi41_pop_desc(cdd, c->q_comp_num); | 661 | desc_phys = cppi41_pop_desc(cdd, c->q_comp_num); |
646 | 662 | ||
647 | if (desc_phys == c->desc_phys) { | 663 | if (desc_phys == c->desc_phys) { |
@@ -723,39 +739,24 @@ static int cppi41_stop_chan(struct dma_chan *chan) | |||
723 | return 0; | 739 | return 0; |
724 | } | 740 | } |
725 | 741 | ||
726 | static void cleanup_chans(struct cppi41_dd *cdd) | ||
727 | { | ||
728 | while (!list_empty(&cdd->ddev.channels)) { | ||
729 | struct cppi41_channel *cchan; | ||
730 | |||
731 | cchan = list_first_entry(&cdd->ddev.channels, | ||
732 | struct cppi41_channel, chan.device_node); | ||
733 | list_del(&cchan->chan.device_node); | ||
734 | kfree(cchan); | ||
735 | } | ||
736 | } | ||
737 | |||
738 | static int cppi41_add_chans(struct device *dev, struct cppi41_dd *cdd) | 742 | static int cppi41_add_chans(struct device *dev, struct cppi41_dd *cdd) |
739 | { | 743 | { |
740 | struct cppi41_channel *cchan; | 744 | struct cppi41_channel *cchan, *chans; |
741 | int i; | 745 | int i; |
742 | int ret; | 746 | u32 n_chans = cdd->n_chans; |
743 | u32 n_chans; | ||
744 | 747 | ||
745 | ret = of_property_read_u32(dev->of_node, "#dma-channels", | ||
746 | &n_chans); | ||
747 | if (ret) | ||
748 | return ret; | ||
749 | /* | 748 | /* |
750 | * The channels can only be used as TX or as RX. So we add twice | 749 | * The channels can only be used as TX or as RX. So we add twice |
751 | * that much dma channels because USB can only do RX or TX. | 750 | * that much dma channels because USB can only do RX or TX. |
752 | */ | 751 | */ |
753 | n_chans *= 2; | 752 | n_chans *= 2; |
754 | 753 | ||
754 | chans = devm_kcalloc(dev, n_chans, sizeof(*chans), GFP_KERNEL); | ||
755 | if (!chans) | ||
756 | return -ENOMEM; | ||
757 | |||
755 | for (i = 0; i < n_chans; i++) { | 758 | for (i = 0; i < n_chans; i++) { |
756 | cchan = kzalloc(sizeof(*cchan), GFP_KERNEL); | 759 | cchan = &chans[i]; |
757 | if (!cchan) | ||
758 | goto err; | ||
759 | 760 | ||
760 | cchan->cdd = cdd; | 761 | cchan->cdd = cdd; |
761 | if (i & 1) { | 762 | if (i & 1) { |
@@ -775,9 +776,6 @@ static int cppi41_add_chans(struct device *dev, struct cppi41_dd *cdd) | |||
775 | cdd->first_td_desc = n_chans; | 776 | cdd->first_td_desc = n_chans; |
776 | 777 | ||
777 | return 0; | 778 | return 0; |
778 | err: | ||
779 | cleanup_chans(cdd); | ||
780 | return -ENOMEM; | ||
781 | } | 779 | } |
782 | 780 | ||
783 | static void purge_descs(struct device *dev, struct cppi41_dd *cdd) | 781 | static void purge_descs(struct device *dev, struct cppi41_dd *cdd) |
@@ -859,7 +857,7 @@ static void init_sched(struct cppi41_dd *cdd) | |||
859 | 857 | ||
860 | word = 0; | 858 | word = 0; |
861 | cppi_writel(0, cdd->sched_mem + DMA_SCHED_CTRL); | 859 | cppi_writel(0, cdd->sched_mem + DMA_SCHED_CTRL); |
862 | for (ch = 0; ch < 15 * 2; ch += 2) { | 860 | for (ch = 0; ch < cdd->n_chans; ch += 2) { |
863 | 861 | ||
864 | reg = SCHED_ENTRY0_CHAN(ch); | 862 | reg = SCHED_ENTRY0_CHAN(ch); |
865 | reg |= SCHED_ENTRY1_CHAN(ch) | SCHED_ENTRY1_IS_RX; | 863 | reg |= SCHED_ENTRY1_CHAN(ch) | SCHED_ENTRY1_IS_RX; |
@@ -869,7 +867,7 @@ static void init_sched(struct cppi41_dd *cdd) | |||
869 | cppi_writel(reg, cdd->sched_mem + DMA_SCHED_WORD(word)); | 867 | cppi_writel(reg, cdd->sched_mem + DMA_SCHED_WORD(word)); |
870 | word++; | 868 | word++; |
871 | } | 869 | } |
872 | reg = 15 * 2 * 2 - 1; | 870 | reg = cdd->n_chans * 2 - 1; |
873 | reg |= DMA_SCHED_CTRL_EN; | 871 | reg |= DMA_SCHED_CTRL_EN; |
874 | cppi_writel(reg, cdd->sched_mem + DMA_SCHED_CTRL); | 872 | cppi_writel(reg, cdd->sched_mem + DMA_SCHED_CTRL); |
875 | } | 873 | } |
@@ -885,7 +883,7 @@ static int init_cppi41(struct device *dev, struct cppi41_dd *cdd) | |||
885 | return -ENOMEM; | 883 | return -ENOMEM; |
886 | 884 | ||
887 | cppi_writel(cdd->scratch_phys, cdd->qmgr_mem + QMGR_LRAM0_BASE); | 885 | cppi_writel(cdd->scratch_phys, cdd->qmgr_mem + QMGR_LRAM0_BASE); |
888 | cppi_writel(QMGR_SCRATCH_SIZE, cdd->qmgr_mem + QMGR_LRAM_SIZE); | 886 | cppi_writel(TOTAL_DESCS_NUM, cdd->qmgr_mem + QMGR_LRAM_SIZE); |
889 | cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM1_BASE); | 887 | cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM1_BASE); |
890 | 888 | ||
891 | ret = init_descs(dev, cdd); | 889 | ret = init_descs(dev, cdd); |
@@ -894,6 +892,7 @@ static int init_cppi41(struct device *dev, struct cppi41_dd *cdd) | |||
894 | 892 | ||
895 | cppi_writel(cdd->td_queue.submit, cdd->ctrl_mem + DMA_TDFDQ); | 893 | cppi_writel(cdd->td_queue.submit, cdd->ctrl_mem + DMA_TDFDQ); |
896 | init_sched(cdd); | 894 | init_sched(cdd); |
895 | |||
897 | return 0; | 896 | return 0; |
898 | err_td: | 897 | err_td: |
899 | deinit_cppi41(dev, cdd); | 898 | deinit_cppi41(dev, cdd); |
@@ -933,8 +932,9 @@ static bool cpp41_dma_filter_fn(struct dma_chan *chan, void *param) | |||
933 | else | 932 | else |
934 | queues = cdd->queues_rx; | 933 | queues = cdd->queues_rx; |
935 | 934 | ||
936 | BUILD_BUG_ON(ARRAY_SIZE(usb_queues_rx) != ARRAY_SIZE(usb_queues_tx)); | 935 | BUILD_BUG_ON(ARRAY_SIZE(am335x_usb_queues_rx) != |
937 | if (WARN_ON(cchan->port_num > ARRAY_SIZE(usb_queues_rx))) | 936 | ARRAY_SIZE(am335x_usb_queues_tx)); |
937 | if (WARN_ON(cchan->port_num > ARRAY_SIZE(am335x_usb_queues_rx))) | ||
938 | return false; | 938 | return false; |
939 | 939 | ||
940 | cchan->q_num = queues[cchan->port_num].submit; | 940 | cchan->q_num = queues[cchan->port_num].submit; |
@@ -962,15 +962,25 @@ static struct dma_chan *cppi41_dma_xlate(struct of_phandle_args *dma_spec, | |||
962 | &dma_spec->args[0]); | 962 | &dma_spec->args[0]); |
963 | } | 963 | } |
964 | 964 | ||
965 | static const struct cppi_glue_infos usb_infos = { | 965 | static const struct cppi_glue_infos am335x_usb_infos = { |
966 | .isr = cppi41_irq, | 966 | .queues_rx = am335x_usb_queues_rx, |
967 | .queues_rx = usb_queues_rx, | 967 | .queues_tx = am335x_usb_queues_tx, |
968 | .queues_tx = usb_queues_tx, | ||
969 | .td_queue = { .submit = 31, .complete = 0 }, | 968 | .td_queue = { .submit = 31, .complete = 0 }, |
969 | .first_completion_queue = 93, | ||
970 | .qmgr_num_pend = 5, | ||
971 | }; | ||
972 | |||
973 | static const struct cppi_glue_infos da8xx_usb_infos = { | ||
974 | .queues_rx = da8xx_usb_queues_rx, | ||
975 | .queues_tx = da8xx_usb_queues_tx, | ||
976 | .td_queue = { .submit = 31, .complete = 0 }, | ||
977 | .first_completion_queue = 24, | ||
978 | .qmgr_num_pend = 2, | ||
970 | }; | 979 | }; |
971 | 980 | ||
972 | static const struct of_device_id cppi41_dma_ids[] = { | 981 | static const struct of_device_id cppi41_dma_ids[] = { |
973 | { .compatible = "ti,am3359-cppi41", .data = &usb_infos}, | 982 | { .compatible = "ti,am3359-cppi41", .data = &am335x_usb_infos}, |
983 | { .compatible = "ti,da830-cppi41", .data = &da8xx_usb_infos}, | ||
974 | {}, | 984 | {}, |
975 | }; | 985 | }; |
976 | MODULE_DEVICE_TABLE(of, cppi41_dma_ids); | 986 | MODULE_DEVICE_TABLE(of, cppi41_dma_ids); |
@@ -995,6 +1005,8 @@ static int cppi41_dma_probe(struct platform_device *pdev) | |||
995 | struct cppi41_dd *cdd; | 1005 | struct cppi41_dd *cdd; |
996 | struct device *dev = &pdev->dev; | 1006 | struct device *dev = &pdev->dev; |
997 | const struct cppi_glue_infos *glue_info; | 1007 | const struct cppi_glue_infos *glue_info; |
1008 | struct resource *mem; | ||
1009 | int index; | ||
998 | int irq; | 1010 | int irq; |
999 | int ret; | 1011 | int ret; |
1000 | 1012 | ||
@@ -1021,19 +1033,31 @@ static int cppi41_dma_probe(struct platform_device *pdev) | |||
1021 | INIT_LIST_HEAD(&cdd->ddev.channels); | 1033 | INIT_LIST_HEAD(&cdd->ddev.channels); |
1022 | cpp41_dma_info.dma_cap = cdd->ddev.cap_mask; | 1034 | cpp41_dma_info.dma_cap = cdd->ddev.cap_mask; |
1023 | 1035 | ||
1024 | cdd->usbss_mem = of_iomap(dev->of_node, 0); | 1036 | index = of_property_match_string(dev->of_node, |
1025 | cdd->ctrl_mem = of_iomap(dev->of_node, 1); | 1037 | "reg-names", "controller"); |
1026 | cdd->sched_mem = of_iomap(dev->of_node, 2); | 1038 | if (index < 0) |
1027 | cdd->qmgr_mem = of_iomap(dev->of_node, 3); | 1039 | return index; |
1040 | |||
1041 | mem = platform_get_resource(pdev, IORESOURCE_MEM, index); | ||
1042 | cdd->ctrl_mem = devm_ioremap_resource(dev, mem); | ||
1043 | if (IS_ERR(cdd->ctrl_mem)) | ||
1044 | return PTR_ERR(cdd->ctrl_mem); | ||
1045 | |||
1046 | mem = platform_get_resource(pdev, IORESOURCE_MEM, index + 1); | ||
1047 | cdd->sched_mem = devm_ioremap_resource(dev, mem); | ||
1048 | if (IS_ERR(cdd->sched_mem)) | ||
1049 | return PTR_ERR(cdd->sched_mem); | ||
1050 | |||
1051 | mem = platform_get_resource(pdev, IORESOURCE_MEM, index + 2); | ||
1052 | cdd->qmgr_mem = devm_ioremap_resource(dev, mem); | ||
1053 | if (IS_ERR(cdd->qmgr_mem)) | ||
1054 | return PTR_ERR(cdd->qmgr_mem); | ||
1055 | |||
1028 | spin_lock_init(&cdd->lock); | 1056 | spin_lock_init(&cdd->lock); |
1029 | INIT_LIST_HEAD(&cdd->pending); | 1057 | INIT_LIST_HEAD(&cdd->pending); |
1030 | 1058 | ||
1031 | platform_set_drvdata(pdev, cdd); | 1059 | platform_set_drvdata(pdev, cdd); |
1032 | 1060 | ||
1033 | if (!cdd->usbss_mem || !cdd->ctrl_mem || !cdd->sched_mem || | ||
1034 | !cdd->qmgr_mem) | ||
1035 | return -ENXIO; | ||
1036 | |||
1037 | pm_runtime_enable(dev); | 1061 | pm_runtime_enable(dev); |
1038 | pm_runtime_set_autosuspend_delay(dev, 100); | 1062 | pm_runtime_set_autosuspend_delay(dev, 100); |
1039 | pm_runtime_use_autosuspend(dev); | 1063 | pm_runtime_use_autosuspend(dev); |
@@ -1044,6 +1068,13 @@ static int cppi41_dma_probe(struct platform_device *pdev) | |||
1044 | cdd->queues_rx = glue_info->queues_rx; | 1068 | cdd->queues_rx = glue_info->queues_rx; |
1045 | cdd->queues_tx = glue_info->queues_tx; | 1069 | cdd->queues_tx = glue_info->queues_tx; |
1046 | cdd->td_queue = glue_info->td_queue; | 1070 | cdd->td_queue = glue_info->td_queue; |
1071 | cdd->qmgr_num_pend = glue_info->qmgr_num_pend; | ||
1072 | cdd->first_completion_queue = glue_info->first_completion_queue; | ||
1073 | |||
1074 | ret = of_property_read_u32(dev->of_node, | ||
1075 | "#dma-channels", &cdd->n_chans); | ||
1076 | if (ret) | ||
1077 | goto err_get_n_chans; | ||
1047 | 1078 | ||
1048 | ret = init_cppi41(dev, cdd); | 1079 | ret = init_cppi41(dev, cdd); |
1049 | if (ret) | 1080 | if (ret) |
@@ -1056,18 +1087,18 @@ static int cppi41_dma_probe(struct platform_device *pdev) | |||
1056 | irq = irq_of_parse_and_map(dev->of_node, 0); | 1087 | irq = irq_of_parse_and_map(dev->of_node, 0); |
1057 | if (!irq) { | 1088 | if (!irq) { |
1058 | ret = -EINVAL; | 1089 | ret = -EINVAL; |
1059 | goto err_irq; | 1090 | goto err_chans; |
1060 | } | 1091 | } |
1061 | 1092 | ||
1062 | ret = devm_request_irq(&pdev->dev, irq, glue_info->isr, IRQF_SHARED, | 1093 | ret = devm_request_irq(&pdev->dev, irq, cppi41_irq, IRQF_SHARED, |
1063 | dev_name(dev), cdd); | 1094 | dev_name(dev), cdd); |
1064 | if (ret) | 1095 | if (ret) |
1065 | goto err_irq; | 1096 | goto err_chans; |
1066 | cdd->irq = irq; | 1097 | cdd->irq = irq; |
1067 | 1098 | ||
1068 | ret = dma_async_device_register(&cdd->ddev); | 1099 | ret = dma_async_device_register(&cdd->ddev); |
1069 | if (ret) | 1100 | if (ret) |
1070 | goto err_dma_reg; | 1101 | goto err_chans; |
1071 | 1102 | ||
1072 | ret = of_dma_controller_register(dev->of_node, | 1103 | ret = of_dma_controller_register(dev->of_node, |
1073 | cppi41_dma_xlate, &cpp41_dma_info); | 1104 | cppi41_dma_xlate, &cpp41_dma_info); |
@@ -1080,20 +1111,14 @@ static int cppi41_dma_probe(struct platform_device *pdev) | |||
1080 | return 0; | 1111 | return 0; |
1081 | err_of: | 1112 | err_of: |
1082 | dma_async_device_unregister(&cdd->ddev); | 1113 | dma_async_device_unregister(&cdd->ddev); |
1083 | err_dma_reg: | ||
1084 | err_irq: | ||
1085 | cleanup_chans(cdd); | ||
1086 | err_chans: | 1114 | err_chans: |
1087 | deinit_cppi41(dev, cdd); | 1115 | deinit_cppi41(dev, cdd); |
1088 | err_init_cppi: | 1116 | err_init_cppi: |
1089 | pm_runtime_dont_use_autosuspend(dev); | 1117 | pm_runtime_dont_use_autosuspend(dev); |
1118 | err_get_n_chans: | ||
1090 | err_get_sync: | 1119 | err_get_sync: |
1091 | pm_runtime_put_sync(dev); | 1120 | pm_runtime_put_sync(dev); |
1092 | pm_runtime_disable(dev); | 1121 | pm_runtime_disable(dev); |
1093 | iounmap(cdd->usbss_mem); | ||
1094 | iounmap(cdd->ctrl_mem); | ||
1095 | iounmap(cdd->sched_mem); | ||
1096 | iounmap(cdd->qmgr_mem); | ||
1097 | return ret; | 1122 | return ret; |
1098 | } | 1123 | } |
1099 | 1124 | ||
@@ -1110,12 +1135,7 @@ static int cppi41_dma_remove(struct platform_device *pdev) | |||
1110 | dma_async_device_unregister(&cdd->ddev); | 1135 | dma_async_device_unregister(&cdd->ddev); |
1111 | 1136 | ||
1112 | devm_free_irq(&pdev->dev, cdd->irq, cdd); | 1137 | devm_free_irq(&pdev->dev, cdd->irq, cdd); |
1113 | cleanup_chans(cdd); | ||
1114 | deinit_cppi41(&pdev->dev, cdd); | 1138 | deinit_cppi41(&pdev->dev, cdd); |
1115 | iounmap(cdd->usbss_mem); | ||
1116 | iounmap(cdd->ctrl_mem); | ||
1117 | iounmap(cdd->sched_mem); | ||
1118 | iounmap(cdd->qmgr_mem); | ||
1119 | pm_runtime_dont_use_autosuspend(&pdev->dev); | 1139 | pm_runtime_dont_use_autosuspend(&pdev->dev); |
1120 | pm_runtime_put_sync(&pdev->dev); | 1140 | pm_runtime_put_sync(&pdev->dev); |
1121 | pm_runtime_disable(&pdev->dev); | 1141 | pm_runtime_disable(&pdev->dev); |
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index 54d581d407aa..a07ef3d6b3ec 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c | |||
@@ -535,6 +535,13 @@ static int dmatest_func(void *data) | |||
535 | 535 | ||
536 | total_tests++; | 536 | total_tests++; |
537 | 537 | ||
538 | /* Check if buffer count fits into map count variable (u8) */ | ||
539 | if ((src_cnt + dst_cnt) >= 255) { | ||
540 | pr_err("too many buffers (%d of 255 supported)\n", | ||
541 | src_cnt + dst_cnt); | ||
542 | break; | ||
543 | } | ||
544 | |||
538 | if (1 << align > params->buf_size) { | 545 | if (1 << align > params->buf_size) { |
539 | pr_err("%u-byte buffer too small for %d-byte alignment\n", | 546 | pr_err("%u-byte buffer too small for %d-byte alignment\n", |
540 | params->buf_size, 1 << align); | 547 | params->buf_size, 1 << align); |
@@ -585,7 +592,7 @@ static int dmatest_func(void *data) | |||
585 | for (i = 0; i < src_cnt; i++) { | 592 | for (i = 0; i < src_cnt; i++) { |
586 | void *buf = thread->srcs[i]; | 593 | void *buf = thread->srcs[i]; |
587 | struct page *pg = virt_to_page(buf); | 594 | struct page *pg = virt_to_page(buf); |
588 | unsigned pg_off = (unsigned long) buf & ~PAGE_MASK; | 595 | unsigned long pg_off = offset_in_page(buf); |
589 | 596 | ||
590 | um->addr[i] = dma_map_page(dev->dev, pg, pg_off, | 597 | um->addr[i] = dma_map_page(dev->dev, pg, pg_off, |
591 | um->len, DMA_TO_DEVICE); | 598 | um->len, DMA_TO_DEVICE); |
@@ -605,7 +612,7 @@ static int dmatest_func(void *data) | |||
605 | for (i = 0; i < dst_cnt; i++) { | 612 | for (i = 0; i < dst_cnt; i++) { |
606 | void *buf = thread->dsts[i]; | 613 | void *buf = thread->dsts[i]; |
607 | struct page *pg = virt_to_page(buf); | 614 | struct page *pg = virt_to_page(buf); |
608 | unsigned pg_off = (unsigned long) buf & ~PAGE_MASK; | 615 | unsigned long pg_off = offset_in_page(buf); |
609 | 616 | ||
610 | dsts[i] = dma_map_page(dev->dev, pg, pg_off, um->len, | 617 | dsts[i] = dma_map_page(dev->dev, pg, pg_off, um->len, |
611 | DMA_BIDIRECTIONAL); | 618 | DMA_BIDIRECTIONAL); |
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index d1651a50c349..085993cb2ccc 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c | |||
@@ -937,6 +937,21 @@ static int sdma_disable_channel(struct dma_chan *chan) | |||
937 | return 0; | 937 | return 0; |
938 | } | 938 | } |
939 | 939 | ||
940 | static int sdma_disable_channel_with_delay(struct dma_chan *chan) | ||
941 | { | ||
942 | sdma_disable_channel(chan); | ||
943 | |||
944 | /* | ||
945 | * According to NXP R&D team a delay of one BD SDMA cost time | ||
946 | * (maximum is 1ms) should be added after disable of the channel | ||
947 | * bit, to ensure SDMA core has really been stopped after SDMA | ||
948 | * clients call .device_terminate_all. | ||
949 | */ | ||
950 | mdelay(1); | ||
951 | |||
952 | return 0; | ||
953 | } | ||
954 | |||
940 | static void sdma_set_watermarklevel_for_p2p(struct sdma_channel *sdmac) | 955 | static void sdma_set_watermarklevel_for_p2p(struct sdma_channel *sdmac) |
941 | { | 956 | { |
942 | struct sdma_engine *sdma = sdmac->sdma; | 957 | struct sdma_engine *sdma = sdmac->sdma; |
@@ -1828,11 +1843,11 @@ static int sdma_probe(struct platform_device *pdev) | |||
1828 | sdma->dma_device.device_prep_slave_sg = sdma_prep_slave_sg; | 1843 | sdma->dma_device.device_prep_slave_sg = sdma_prep_slave_sg; |
1829 | sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic; | 1844 | sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic; |
1830 | sdma->dma_device.device_config = sdma_config; | 1845 | sdma->dma_device.device_config = sdma_config; |
1831 | sdma->dma_device.device_terminate_all = sdma_disable_channel; | 1846 | sdma->dma_device.device_terminate_all = sdma_disable_channel_with_delay; |
1832 | sdma->dma_device.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); | 1847 | sdma->dma_device.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); |
1833 | sdma->dma_device.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); | 1848 | sdma->dma_device.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); |
1834 | sdma->dma_device.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); | 1849 | sdma->dma_device.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); |
1835 | sdma->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; | 1850 | sdma->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; |
1836 | sdma->dma_device.device_issue_pending = sdma_issue_pending; | 1851 | sdma->dma_device.device_issue_pending = sdma_issue_pending; |
1837 | sdma->dma_device.dev->dma_parms = &sdma->dma_parms; | 1852 | sdma->dma_device.dev->dma_parms = &sdma->dma_parms; |
1838 | dma_set_max_seg_size(sdma->dma_device.dev, 65535); | 1853 | dma_set_max_seg_size(sdma->dma_device.dev, 65535); |
diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c index cc5259b881d4..6ad4384b3fa8 100644 --- a/drivers/dma/ioat/init.c +++ b/drivers/dma/ioat/init.c | |||
@@ -760,9 +760,7 @@ ioat_init_channel(struct ioatdma_device *ioat_dma, | |||
760 | dma_cookie_init(&ioat_chan->dma_chan); | 760 | dma_cookie_init(&ioat_chan->dma_chan); |
761 | list_add_tail(&ioat_chan->dma_chan.device_node, &dma->channels); | 761 | list_add_tail(&ioat_chan->dma_chan.device_node, &dma->channels); |
762 | ioat_dma->idx[idx] = ioat_chan; | 762 | ioat_dma->idx[idx] = ioat_chan; |
763 | init_timer(&ioat_chan->timer); | 763 | setup_timer(&ioat_chan->timer, ioat_timer_event, data); |
764 | ioat_chan->timer.function = ioat_timer_event; | ||
765 | ioat_chan->timer.data = data; | ||
766 | tasklet_init(&ioat_chan->cleanup_task, ioat_cleanup_event, data); | 764 | tasklet_init(&ioat_chan->cleanup_task, ioat_cleanup_event, data); |
767 | } | 765 | } |
768 | 766 | ||
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index 0cb951b743a6..25bc5b103aa2 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c | |||
@@ -960,7 +960,7 @@ static int mv_chan_memcpy_self_test(struct mv_xor_chan *mv_chan) | |||
960 | } | 960 | } |
961 | 961 | ||
962 | src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src), | 962 | src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src), |
963 | (size_t)src & ~PAGE_MASK, PAGE_SIZE, | 963 | offset_in_page(src), PAGE_SIZE, |
964 | DMA_TO_DEVICE); | 964 | DMA_TO_DEVICE); |
965 | unmap->addr[0] = src_dma; | 965 | unmap->addr[0] = src_dma; |
966 | 966 | ||
@@ -972,7 +972,7 @@ static int mv_chan_memcpy_self_test(struct mv_xor_chan *mv_chan) | |||
972 | unmap->to_cnt = 1; | 972 | unmap->to_cnt = 1; |
973 | 973 | ||
974 | dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest), | 974 | dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest), |
975 | (size_t)dest & ~PAGE_MASK, PAGE_SIZE, | 975 | offset_in_page(dest), PAGE_SIZE, |
976 | DMA_FROM_DEVICE); | 976 | DMA_FROM_DEVICE); |
977 | unmap->addr[1] = dest_dma; | 977 | unmap->addr[1] = dest_dma; |
978 | 978 | ||
@@ -1580,11 +1580,6 @@ static int mv_xor_probe(struct platform_device *pdev) | |||
1580 | int irq; | 1580 | int irq; |
1581 | 1581 | ||
1582 | cd = &pdata->channels[i]; | 1582 | cd = &pdata->channels[i]; |
1583 | if (!cd) { | ||
1584 | ret = -ENODEV; | ||
1585 | goto err_channel_add; | ||
1586 | } | ||
1587 | |||
1588 | irq = platform_get_irq(pdev, i); | 1583 | irq = platform_get_irq(pdev, i); |
1589 | if (irq < 0) { | 1584 | if (irq < 0) { |
1590 | ret = irq; | 1585 | ret = irq; |
diff --git a/drivers/dma/qcom/hidma.c b/drivers/dma/qcom/hidma.c index 3c982c96b4b7..5072a7d306d4 100644 --- a/drivers/dma/qcom/hidma.c +++ b/drivers/dma/qcom/hidma.c | |||
@@ -865,6 +865,20 @@ bailout: | |||
865 | return rc; | 865 | return rc; |
866 | } | 866 | } |
867 | 867 | ||
868 | static void hidma_shutdown(struct platform_device *pdev) | ||
869 | { | ||
870 | struct hidma_dev *dmadev = platform_get_drvdata(pdev); | ||
871 | |||
872 | dev_info(dmadev->ddev.dev, "HI-DMA engine shutdown\n"); | ||
873 | |||
874 | pm_runtime_get_sync(dmadev->ddev.dev); | ||
875 | if (hidma_ll_disable(dmadev->lldev)) | ||
876 | dev_warn(dmadev->ddev.dev, "channel did not stop\n"); | ||
877 | pm_runtime_mark_last_busy(dmadev->ddev.dev); | ||
878 | pm_runtime_put_autosuspend(dmadev->ddev.dev); | ||
879 | |||
880 | } | ||
881 | |||
868 | static int hidma_remove(struct platform_device *pdev) | 882 | static int hidma_remove(struct platform_device *pdev) |
869 | { | 883 | { |
870 | struct hidma_dev *dmadev = platform_get_drvdata(pdev); | 884 | struct hidma_dev *dmadev = platform_get_drvdata(pdev); |
@@ -908,6 +922,7 @@ MODULE_DEVICE_TABLE(of, hidma_match); | |||
908 | static struct platform_driver hidma_driver = { | 922 | static struct platform_driver hidma_driver = { |
909 | .probe = hidma_probe, | 923 | .probe = hidma_probe, |
910 | .remove = hidma_remove, | 924 | .remove = hidma_remove, |
925 | .shutdown = hidma_shutdown, | ||
911 | .driver = { | 926 | .driver = { |
912 | .name = "hidma", | 927 | .name = "hidma", |
913 | .of_match_table = hidma_match, | 928 | .of_match_table = hidma_match, |
diff --git a/drivers/dma/qcom/hidma_ll.c b/drivers/dma/qcom/hidma_ll.c index 6645bdf0d151..1530a661518d 100644 --- a/drivers/dma/qcom/hidma_ll.c +++ b/drivers/dma/qcom/hidma_ll.c | |||
@@ -499,6 +499,9 @@ int hidma_ll_enable(struct hidma_lldev *lldev) | |||
499 | lldev->trch_state = HIDMA_CH_ENABLED; | 499 | lldev->trch_state = HIDMA_CH_ENABLED; |
500 | lldev->evch_state = HIDMA_CH_ENABLED; | 500 | lldev->evch_state = HIDMA_CH_ENABLED; |
501 | 501 | ||
502 | /* enable irqs */ | ||
503 | writel(ENABLE_IRQS, lldev->evca + HIDMA_EVCA_IRQ_EN_REG); | ||
504 | |||
502 | return 0; | 505 | return 0; |
503 | } | 506 | } |
504 | 507 | ||
@@ -596,6 +599,9 @@ int hidma_ll_disable(struct hidma_lldev *lldev) | |||
596 | 599 | ||
597 | lldev->trch_state = HIDMA_CH_SUSPENDED; | 600 | lldev->trch_state = HIDMA_CH_SUSPENDED; |
598 | lldev->evch_state = HIDMA_CH_SUSPENDED; | 601 | lldev->evch_state = HIDMA_CH_SUSPENDED; |
602 | |||
603 | /* disable interrupts */ | ||
604 | writel(0, lldev->evca + HIDMA_EVCA_IRQ_EN_REG); | ||
599 | return 0; | 605 | return 0; |
600 | } | 606 | } |
601 | 607 | ||
diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c index 48b22d5c8602..db41795fe42a 100644 --- a/drivers/dma/sh/rcar-dmac.c +++ b/drivers/dma/sh/rcar-dmac.c | |||
@@ -344,13 +344,19 @@ static void rcar_dmac_chan_start_xfer(struct rcar_dmac_chan *chan) | |||
344 | rcar_dmac_chan_write(chan, RCAR_DMARS, chan->mid_rid); | 344 | rcar_dmac_chan_write(chan, RCAR_DMARS, chan->mid_rid); |
345 | 345 | ||
346 | if (desc->hwdescs.use) { | 346 | if (desc->hwdescs.use) { |
347 | struct rcar_dmac_xfer_chunk *chunk; | 347 | struct rcar_dmac_xfer_chunk *chunk = |
348 | list_first_entry(&desc->chunks, | ||
349 | struct rcar_dmac_xfer_chunk, node); | ||
348 | 350 | ||
349 | dev_dbg(chan->chan.device->dev, | 351 | dev_dbg(chan->chan.device->dev, |
350 | "chan%u: queue desc %p: %u@%pad\n", | 352 | "chan%u: queue desc %p: %u@%pad\n", |
351 | chan->index, desc, desc->nchunks, &desc->hwdescs.dma); | 353 | chan->index, desc, desc->nchunks, &desc->hwdescs.dma); |
352 | 354 | ||
353 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT | 355 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT |
356 | rcar_dmac_chan_write(chan, RCAR_DMAFIXSAR, | ||
357 | chunk->src_addr >> 32); | ||
358 | rcar_dmac_chan_write(chan, RCAR_DMAFIXDAR, | ||
359 | chunk->dst_addr >> 32); | ||
354 | rcar_dmac_chan_write(chan, RCAR_DMAFIXDPBASE, | 360 | rcar_dmac_chan_write(chan, RCAR_DMAFIXDPBASE, |
355 | desc->hwdescs.dma >> 32); | 361 | desc->hwdescs.dma >> 32); |
356 | #endif | 362 | #endif |
@@ -368,8 +374,6 @@ static void rcar_dmac_chan_start_xfer(struct rcar_dmac_chan *chan) | |||
368 | * should. Initialize it manually with the destination address | 374 | * should. Initialize it manually with the destination address |
369 | * of the first chunk. | 375 | * of the first chunk. |
370 | */ | 376 | */ |
371 | chunk = list_first_entry(&desc->chunks, | ||
372 | struct rcar_dmac_xfer_chunk, node); | ||
373 | rcar_dmac_chan_write(chan, RCAR_DMADAR, | 377 | rcar_dmac_chan_write(chan, RCAR_DMADAR, |
374 | chunk->dst_addr & 0xffffffff); | 378 | chunk->dst_addr & 0xffffffff); |
375 | 379 | ||
@@ -855,8 +859,12 @@ rcar_dmac_chan_prep_sg(struct rcar_dmac_chan *chan, struct scatterlist *sgl, | |||
855 | unsigned int nchunks = 0; | 859 | unsigned int nchunks = 0; |
856 | unsigned int max_chunk_size; | 860 | unsigned int max_chunk_size; |
857 | unsigned int full_size = 0; | 861 | unsigned int full_size = 0; |
858 | bool highmem = false; | 862 | bool cross_boundary = false; |
859 | unsigned int i; | 863 | unsigned int i; |
864 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT | ||
865 | u32 high_dev_addr; | ||
866 | u32 high_mem_addr; | ||
867 | #endif | ||
860 | 868 | ||
861 | desc = rcar_dmac_desc_get(chan); | 869 | desc = rcar_dmac_desc_get(chan); |
862 | if (!desc) | 870 | if (!desc) |
@@ -882,6 +890,16 @@ rcar_dmac_chan_prep_sg(struct rcar_dmac_chan *chan, struct scatterlist *sgl, | |||
882 | 890 | ||
883 | full_size += len; | 891 | full_size += len; |
884 | 892 | ||
893 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT | ||
894 | if (i == 0) { | ||
895 | high_dev_addr = dev_addr >> 32; | ||
896 | high_mem_addr = mem_addr >> 32; | ||
897 | } | ||
898 | |||
899 | if ((dev_addr >> 32 != high_dev_addr) || | ||
900 | (mem_addr >> 32 != high_mem_addr)) | ||
901 | cross_boundary = true; | ||
902 | #endif | ||
885 | while (len) { | 903 | while (len) { |
886 | unsigned int size = min(len, max_chunk_size); | 904 | unsigned int size = min(len, max_chunk_size); |
887 | 905 | ||
@@ -890,18 +908,14 @@ rcar_dmac_chan_prep_sg(struct rcar_dmac_chan *chan, struct scatterlist *sgl, | |||
890 | * Prevent individual transfers from crossing 4GB | 908 | * Prevent individual transfers from crossing 4GB |
891 | * boundaries. | 909 | * boundaries. |
892 | */ | 910 | */ |
893 | if (dev_addr >> 32 != (dev_addr + size - 1) >> 32) | 911 | if (dev_addr >> 32 != (dev_addr + size - 1) >> 32) { |
894 | size = ALIGN(dev_addr, 1ULL << 32) - dev_addr; | 912 | size = ALIGN(dev_addr, 1ULL << 32) - dev_addr; |
895 | if (mem_addr >> 32 != (mem_addr + size - 1) >> 32) | 913 | cross_boundary = true; |
914 | } | ||
915 | if (mem_addr >> 32 != (mem_addr + size - 1) >> 32) { | ||
896 | size = ALIGN(mem_addr, 1ULL << 32) - mem_addr; | 916 | size = ALIGN(mem_addr, 1ULL << 32) - mem_addr; |
897 | 917 | cross_boundary = true; | |
898 | /* | 918 | } |
899 | * Check if either of the source or destination address | ||
900 | * can't be expressed in 32 bits. If so we can't use | ||
901 | * hardware descriptor lists. | ||
902 | */ | ||
903 | if (dev_addr >> 32 || mem_addr >> 32) | ||
904 | highmem = true; | ||
905 | #endif | 919 | #endif |
906 | 920 | ||
907 | chunk = rcar_dmac_xfer_chunk_get(chan); | 921 | chunk = rcar_dmac_xfer_chunk_get(chan); |
@@ -943,13 +957,11 @@ rcar_dmac_chan_prep_sg(struct rcar_dmac_chan *chan, struct scatterlist *sgl, | |||
943 | * Use hardware descriptor lists if possible when more than one chunk | 957 | * Use hardware descriptor lists if possible when more than one chunk |
944 | * needs to be transferred (otherwise they don't make much sense). | 958 | * needs to be transferred (otherwise they don't make much sense). |
945 | * | 959 | * |
946 | * The highmem check currently covers the whole transfer. As an | 960 | * Source/Destination address should be located in same 4GiB region |
947 | * optimization we could use descriptor lists for consecutive lowmem | 961 | * in the 40bit address space when it uses Hardware descriptor, |
948 | * chunks and direct manual mode for highmem chunks. Whether the | 962 | * and cross_boundary is checking it. |
949 | * performance improvement would be significant enough compared to the | ||
950 | * additional complexity remains to be investigated. | ||
951 | */ | 963 | */ |
952 | desc->hwdescs.use = !highmem && nchunks > 1; | 964 | desc->hwdescs.use = !cross_boundary && nchunks > 1; |
953 | if (desc->hwdescs.use) { | 965 | if (desc->hwdescs.use) { |
954 | if (rcar_dmac_fill_hwdesc(chan, desc) < 0) | 966 | if (rcar_dmac_fill_hwdesc(chan, desc) < 0) |
955 | desc->hwdescs.use = false; | 967 | desc->hwdescs.use = false; |
diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c index 49f86cabcfec..786fc8fcc38e 100644 --- a/drivers/dma/stm32-dma.c +++ b/drivers/dma/stm32-dma.c | |||
@@ -1008,7 +1008,7 @@ static struct dma_chan *stm32_dma_of_xlate(struct of_phandle_args *dma_spec, | |||
1008 | 1008 | ||
1009 | c = dma_get_slave_channel(&chan->vchan.chan); | 1009 | c = dma_get_slave_channel(&chan->vchan.chan); |
1010 | if (!c) { | 1010 | if (!c) { |
1011 | dev_err(dev, "No more channel avalaible\n"); | 1011 | dev_err(dev, "No more channels available\n"); |
1012 | return NULL; | 1012 | return NULL; |
1013 | } | 1013 | } |
1014 | 1014 | ||
diff --git a/drivers/dma/sun4i-dma.c b/drivers/dma/sun4i-dma.c index 57aa227bfadb..f4ed3f17607c 100644 --- a/drivers/dma/sun4i-dma.c +++ b/drivers/dma/sun4i-dma.c | |||
@@ -238,7 +238,7 @@ static struct sun4i_dma_pchan *find_and_use_pchan(struct sun4i_dma_dev *priv, | |||
238 | } | 238 | } |
239 | 239 | ||
240 | spin_lock_irqsave(&priv->lock, flags); | 240 | spin_lock_irqsave(&priv->lock, flags); |
241 | for_each_clear_bit_from(i, &priv->pchans_used, max) { | 241 | for_each_clear_bit_from(i, priv->pchans_used, max) { |
242 | pchan = &pchans[i]; | 242 | pchan = &pchans[i]; |
243 | pchan->vchan = vchan; | 243 | pchan->vchan = vchan; |
244 | set_bit(i, priv->pchans_used); | 244 | set_bit(i, priv->pchans_used); |
diff --git a/drivers/dma/virt-dma.c b/drivers/dma/virt-dma.c index e47fc9b0944f..545e97279083 100644 --- a/drivers/dma/virt-dma.c +++ b/drivers/dma/virt-dma.c | |||
@@ -86,7 +86,7 @@ EXPORT_SYMBOL_GPL(vchan_find_desc); | |||
86 | static void vchan_complete(unsigned long arg) | 86 | static void vchan_complete(unsigned long arg) |
87 | { | 87 | { |
88 | struct virt_dma_chan *vc = (struct virt_dma_chan *)arg; | 88 | struct virt_dma_chan *vc = (struct virt_dma_chan *)arg; |
89 | struct virt_dma_desc *vd; | 89 | struct virt_dma_desc *vd, *_vd; |
90 | struct dmaengine_desc_callback cb; | 90 | struct dmaengine_desc_callback cb; |
91 | LIST_HEAD(head); | 91 | LIST_HEAD(head); |
92 | 92 | ||
@@ -103,8 +103,7 @@ static void vchan_complete(unsigned long arg) | |||
103 | 103 | ||
104 | dmaengine_desc_callback_invoke(&cb, NULL); | 104 | dmaengine_desc_callback_invoke(&cb, NULL); |
105 | 105 | ||
106 | while (!list_empty(&head)) { | 106 | list_for_each_entry_safe(vd, _vd, &head, node) { |
107 | vd = list_first_entry(&head, struct virt_dma_desc, node); | ||
108 | dmaengine_desc_get_callback(&vd->tx, &cb); | 107 | dmaengine_desc_get_callback(&vd->tx, &cb); |
109 | 108 | ||
110 | list_del(&vd->node); | 109 | list_del(&vd->node); |
@@ -119,9 +118,9 @@ static void vchan_complete(unsigned long arg) | |||
119 | 118 | ||
120 | void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head) | 119 | void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head) |
121 | { | 120 | { |
122 | while (!list_empty(head)) { | 121 | struct virt_dma_desc *vd, *_vd; |
123 | struct virt_dma_desc *vd = list_first_entry(head, | 122 | |
124 | struct virt_dma_desc, node); | 123 | list_for_each_entry_safe(vd, _vd, head, node) { |
125 | if (dmaengine_desc_test_reuse(&vd->tx)) { | 124 | if (dmaengine_desc_test_reuse(&vd->tx)) { |
126 | list_move_tail(&vd->node, &vc->desc_allocated); | 125 | list_move_tail(&vd->node, &vc->desc_allocated); |
127 | } else { | 126 | } else { |
diff --git a/include/linux/amba/pl080.h b/include/linux/amba/pl080.h index 91b84a7f0539..580b5323a717 100644 --- a/include/linux/amba/pl080.h +++ b/include/linux/amba/pl080.h | |||
@@ -38,24 +38,16 @@ | |||
38 | #define PL080_SOFT_LSREQ (0x2C) | 38 | #define PL080_SOFT_LSREQ (0x2C) |
39 | 39 | ||
40 | #define PL080_CONFIG (0x30) | 40 | #define PL080_CONFIG (0x30) |
41 | #define PL080_CONFIG_M2_BE (1 << 2) | 41 | #define PL080_CONFIG_M2_BE BIT(2) |
42 | #define PL080_CONFIG_M1_BE (1 << 1) | 42 | #define PL080_CONFIG_M1_BE BIT(1) |
43 | #define PL080_CONFIG_ENABLE (1 << 0) | 43 | #define PL080_CONFIG_ENABLE BIT(0) |
44 | 44 | ||
45 | #define PL080_SYNC (0x34) | 45 | #define PL080_SYNC (0x34) |
46 | 46 | ||
47 | /* Per channel configuration registers */ | 47 | /* Per channel configuration registers */ |
48 | 48 | ||
49 | #define PL080_Cx_STRIDE (0x20) | 49 | /* Per channel configuration registers */ |
50 | #define PL080_Cx_BASE(x) ((0x100 + (x * 0x20))) | 50 | #define PL080_Cx_BASE(x) ((0x100 + (x * 0x20))) |
51 | #define PL080_Cx_SRC_ADDR(x) ((0x100 + (x * 0x20))) | ||
52 | #define PL080_Cx_DST_ADDR(x) ((0x104 + (x * 0x20))) | ||
53 | #define PL080_Cx_LLI(x) ((0x108 + (x * 0x20))) | ||
54 | #define PL080_Cx_CONTROL(x) ((0x10C + (x * 0x20))) | ||
55 | #define PL080_Cx_CONFIG(x) ((0x110 + (x * 0x20))) | ||
56 | #define PL080S_Cx_CONTROL2(x) ((0x110 + (x * 0x20))) | ||
57 | #define PL080S_Cx_CONFIG(x) ((0x114 + (x * 0x20))) | ||
58 | |||
59 | #define PL080_CH_SRC_ADDR (0x00) | 51 | #define PL080_CH_SRC_ADDR (0x00) |
60 | #define PL080_CH_DST_ADDR (0x04) | 52 | #define PL080_CH_DST_ADDR (0x04) |
61 | #define PL080_CH_LLI (0x08) | 53 | #define PL080_CH_LLI (0x08) |
@@ -66,18 +58,18 @@ | |||
66 | 58 | ||
67 | #define PL080_LLI_ADDR_MASK (0x3fffffff << 2) | 59 | #define PL080_LLI_ADDR_MASK (0x3fffffff << 2) |
68 | #define PL080_LLI_ADDR_SHIFT (2) | 60 | #define PL080_LLI_ADDR_SHIFT (2) |
69 | #define PL080_LLI_LM_AHB2 (1 << 0) | 61 | #define PL080_LLI_LM_AHB2 BIT(0) |
70 | 62 | ||
71 | #define PL080_CONTROL_TC_IRQ_EN (1 << 31) | 63 | #define PL080_CONTROL_TC_IRQ_EN BIT(31) |
72 | #define PL080_CONTROL_PROT_MASK (0x7 << 28) | 64 | #define PL080_CONTROL_PROT_MASK (0x7 << 28) |
73 | #define PL080_CONTROL_PROT_SHIFT (28) | 65 | #define PL080_CONTROL_PROT_SHIFT (28) |
74 | #define PL080_CONTROL_PROT_CACHE (1 << 30) | 66 | #define PL080_CONTROL_PROT_CACHE BIT(30) |
75 | #define PL080_CONTROL_PROT_BUFF (1 << 29) | 67 | #define PL080_CONTROL_PROT_BUFF BIT(29) |
76 | #define PL080_CONTROL_PROT_SYS (1 << 28) | 68 | #define PL080_CONTROL_PROT_SYS BIT(28) |
77 | #define PL080_CONTROL_DST_INCR (1 << 27) | 69 | #define PL080_CONTROL_DST_INCR BIT(27) |
78 | #define PL080_CONTROL_SRC_INCR (1 << 26) | 70 | #define PL080_CONTROL_SRC_INCR BIT(26) |
79 | #define PL080_CONTROL_DST_AHB2 (1 << 25) | 71 | #define PL080_CONTROL_DST_AHB2 BIT(25) |
80 | #define PL080_CONTROL_SRC_AHB2 (1 << 24) | 72 | #define PL080_CONTROL_SRC_AHB2 BIT(24) |
81 | #define PL080_CONTROL_DWIDTH_MASK (0x7 << 21) | 73 | #define PL080_CONTROL_DWIDTH_MASK (0x7 << 21) |
82 | #define PL080_CONTROL_DWIDTH_SHIFT (21) | 74 | #define PL080_CONTROL_DWIDTH_SHIFT (21) |
83 | #define PL080_CONTROL_SWIDTH_MASK (0x7 << 18) | 75 | #define PL080_CONTROL_SWIDTH_MASK (0x7 << 18) |
@@ -103,20 +95,20 @@ | |||
103 | #define PL080_WIDTH_16BIT (0x1) | 95 | #define PL080_WIDTH_16BIT (0x1) |
104 | #define PL080_WIDTH_32BIT (0x2) | 96 | #define PL080_WIDTH_32BIT (0x2) |
105 | 97 | ||
106 | #define PL080N_CONFIG_ITPROT (1 << 20) | 98 | #define PL080N_CONFIG_ITPROT BIT(20) |
107 | #define PL080N_CONFIG_SECPROT (1 << 19) | 99 | #define PL080N_CONFIG_SECPROT BIT(19) |
108 | #define PL080_CONFIG_HALT (1 << 18) | 100 | #define PL080_CONFIG_HALT BIT(18) |
109 | #define PL080_CONFIG_ACTIVE (1 << 17) /* RO */ | 101 | #define PL080_CONFIG_ACTIVE BIT(17) /* RO */ |
110 | #define PL080_CONFIG_LOCK (1 << 16) | 102 | #define PL080_CONFIG_LOCK BIT(16) |
111 | #define PL080_CONFIG_TC_IRQ_MASK (1 << 15) | 103 | #define PL080_CONFIG_TC_IRQ_MASK BIT(15) |
112 | #define PL080_CONFIG_ERR_IRQ_MASK (1 << 14) | 104 | #define PL080_CONFIG_ERR_IRQ_MASK BIT(14) |
113 | #define PL080_CONFIG_FLOW_CONTROL_MASK (0x7 << 11) | 105 | #define PL080_CONFIG_FLOW_CONTROL_MASK (0x7 << 11) |
114 | #define PL080_CONFIG_FLOW_CONTROL_SHIFT (11) | 106 | #define PL080_CONFIG_FLOW_CONTROL_SHIFT (11) |
115 | #define PL080_CONFIG_DST_SEL_MASK (0xf << 6) | 107 | #define PL080_CONFIG_DST_SEL_MASK (0xf << 6) |
116 | #define PL080_CONFIG_DST_SEL_SHIFT (6) | 108 | #define PL080_CONFIG_DST_SEL_SHIFT (6) |
117 | #define PL080_CONFIG_SRC_SEL_MASK (0xf << 1) | 109 | #define PL080_CONFIG_SRC_SEL_MASK (0xf << 1) |
118 | #define PL080_CONFIG_SRC_SEL_SHIFT (1) | 110 | #define PL080_CONFIG_SRC_SEL_SHIFT (1) |
119 | #define PL080_CONFIG_ENABLE (1 << 0) | 111 | #define PL080_CONFIG_ENABLE BIT(0) |
120 | 112 | ||
121 | #define PL080_FLOW_MEM2MEM (0x0) | 113 | #define PL080_FLOW_MEM2MEM (0x0) |
122 | #define PL080_FLOW_MEM2PER (0x1) | 114 | #define PL080_FLOW_MEM2PER (0x1) |
diff --git a/lib/dma-debug.c b/lib/dma-debug.c index b157b46cc9a6..cd5a5a426ef1 100644 --- a/lib/dma-debug.c +++ b/lib/dma-debug.c | |||
@@ -1502,7 +1502,7 @@ void debug_dma_alloc_coherent(struct device *dev, size_t size, | |||
1502 | entry->type = dma_debug_coherent; | 1502 | entry->type = dma_debug_coherent; |
1503 | entry->dev = dev; | 1503 | entry->dev = dev; |
1504 | entry->pfn = page_to_pfn(virt_to_page(virt)); | 1504 | entry->pfn = page_to_pfn(virt_to_page(virt)); |
1505 | entry->offset = (size_t) virt & ~PAGE_MASK; | 1505 | entry->offset = offset_in_page(virt); |
1506 | entry->size = size; | 1506 | entry->size = size; |
1507 | entry->dev_addr = dma_addr; | 1507 | entry->dev_addr = dma_addr; |
1508 | entry->direction = DMA_BIDIRECTIONAL; | 1508 | entry->direction = DMA_BIDIRECTIONAL; |
@@ -1518,7 +1518,7 @@ void debug_dma_free_coherent(struct device *dev, size_t size, | |||
1518 | .type = dma_debug_coherent, | 1518 | .type = dma_debug_coherent, |
1519 | .dev = dev, | 1519 | .dev = dev, |
1520 | .pfn = page_to_pfn(virt_to_page(virt)), | 1520 | .pfn = page_to_pfn(virt_to_page(virt)), |
1521 | .offset = (size_t) virt & ~PAGE_MASK, | 1521 | .offset = offset_in_page(virt), |
1522 | .dev_addr = addr, | 1522 | .dev_addr = addr, |
1523 | .size = size, | 1523 | .size = size, |
1524 | .direction = DMA_BIDIRECTIONAL, | 1524 | .direction = DMA_BIDIRECTIONAL, |