aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/Kconfig11
-rw-r--r--drivers/dma/dw_dmac_regs.h18
-rw-r--r--drivers/dma/imx-dma.c141
-rw-r--r--drivers/dma/imx-sdma.c1
-rw-r--r--drivers/dma/ipu/ipu_idmac.c3
-rw-r--r--drivers/dma/ipu/ipu_irq.c3
-rw-r--r--drivers/dma/omap-dma.c5
-rw-r--r--drivers/dma/sirf-dma.c4
8 files changed, 113 insertions, 73 deletions
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 677cd6e4e1a1..d4c12180c654 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -90,6 +90,17 @@ config DW_DMAC
90 Support the Synopsys DesignWare AHB DMA controller. This 90 Support the Synopsys DesignWare AHB DMA controller. This
91 can be integrated in chips such as the Atmel AT32ap7000. 91 can be integrated in chips such as the Atmel AT32ap7000.
92 92
93config DW_DMAC_BIG_ENDIAN_IO
94 bool "Use big endian I/O register access"
95 default y if AVR32
96 depends on DW_DMAC
97 help
98 Say yes here to use big endian I/O access when reading and writing
99 to the DMA controller registers. This is needed on some platforms,
100 like the Atmel AVR32 architecture.
101
102 If unsure, use the default setting.
103
93config AT_HDMAC 104config AT_HDMAC
94 tristate "Atmel AHB DMA support" 105 tristate "Atmel AHB DMA support"
95 depends on ARCH_AT91 106 depends on ARCH_AT91
diff --git a/drivers/dma/dw_dmac_regs.h b/drivers/dma/dw_dmac_regs.h
index ff39fa6cd2bc..88965597b7d0 100644
--- a/drivers/dma/dw_dmac_regs.h
+++ b/drivers/dma/dw_dmac_regs.h
@@ -98,9 +98,17 @@ struct dw_dma_regs {
98 u32 DW_PARAMS; 98 u32 DW_PARAMS;
99}; 99};
100 100
101#ifdef CONFIG_DW_DMAC_BIG_ENDIAN_IO
102#define dma_readl_native ioread32be
103#define dma_writel_native iowrite32be
104#else
105#define dma_readl_native readl
106#define dma_writel_native writel
107#endif
108
101/* To access the registers in early stage of probe */ 109/* To access the registers in early stage of probe */
102#define dma_read_byaddr(addr, name) \ 110#define dma_read_byaddr(addr, name) \
103 readl((addr) + offsetof(struct dw_dma_regs, name)) 111 dma_readl_native((addr) + offsetof(struct dw_dma_regs, name))
104 112
105/* Bitfields in DW_PARAMS */ 113/* Bitfields in DW_PARAMS */
106#define DW_PARAMS_NR_CHAN 8 /* number of channels */ 114#define DW_PARAMS_NR_CHAN 8 /* number of channels */
@@ -216,9 +224,9 @@ __dwc_regs(struct dw_dma_chan *dwc)
216} 224}
217 225
218#define channel_readl(dwc, name) \ 226#define channel_readl(dwc, name) \
219 readl(&(__dwc_regs(dwc)->name)) 227 dma_readl_native(&(__dwc_regs(dwc)->name))
220#define channel_writel(dwc, name, val) \ 228#define channel_writel(dwc, name, val) \
221 writel((val), &(__dwc_regs(dwc)->name)) 229 dma_writel_native((val), &(__dwc_regs(dwc)->name))
222 230
223static inline struct dw_dma_chan *to_dw_dma_chan(struct dma_chan *chan) 231static inline struct dw_dma_chan *to_dw_dma_chan(struct dma_chan *chan)
224{ 232{
@@ -246,9 +254,9 @@ static inline struct dw_dma_regs __iomem *__dw_regs(struct dw_dma *dw)
246} 254}
247 255
248#define dma_readl(dw, name) \ 256#define dma_readl(dw, name) \
249 readl(&(__dw_regs(dw)->name)) 257 dma_readl_native(&(__dw_regs(dw)->name))
250#define dma_writel(dw, name, val) \ 258#define dma_writel(dw, name, val) \
251 writel((val), &(__dw_regs(dw)->name)) 259 dma_writel_native((val), &(__dw_regs(dw)->name))
252 260
253#define channel_set_bit(dw, reg, mask) \ 261#define channel_set_bit(dw, reg, mask) \
254 dma_writel(dw, reg, ((mask) << 8) | (mask)) 262 dma_writel(dw, reg, ((mask) << 8) | (mask))
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index f11b5b2b1a1c..dbf0e6f8de8a 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -29,7 +29,6 @@
29 29
30#include <asm/irq.h> 30#include <asm/irq.h>
31#include <linux/platform_data/dma-imx.h> 31#include <linux/platform_data/dma-imx.h>
32#include <mach/hardware.h>
33 32
34#include "dmaengine.h" 33#include "dmaengine.h"
35#define IMXDMA_MAX_CHAN_DESCRIPTORS 16 34#define IMXDMA_MAX_CHAN_DESCRIPTORS 16
@@ -167,6 +166,12 @@ struct imxdma_channel {
167 int slot_2d; 166 int slot_2d;
168}; 167};
169 168
169enum imx_dma_type {
170 IMX1_DMA,
171 IMX21_DMA,
172 IMX27_DMA,
173};
174
170struct imxdma_engine { 175struct imxdma_engine {
171 struct device *dev; 176 struct device *dev;
172 struct device_dma_parameters dma_parms; 177 struct device_dma_parameters dma_parms;
@@ -177,8 +182,40 @@ struct imxdma_engine {
177 spinlock_t lock; 182 spinlock_t lock;
178 struct imx_dma_2d_config slots_2d[IMX_DMA_2D_SLOTS]; 183 struct imx_dma_2d_config slots_2d[IMX_DMA_2D_SLOTS];
179 struct imxdma_channel channel[IMX_DMA_CHANNELS]; 184 struct imxdma_channel channel[IMX_DMA_CHANNELS];
185 enum imx_dma_type devtype;
180}; 186};
181 187
188static struct platform_device_id imx_dma_devtype[] = {
189 {
190 .name = "imx1-dma",
191 .driver_data = IMX1_DMA,
192 }, {
193 .name = "imx21-dma",
194 .driver_data = IMX21_DMA,
195 }, {
196 .name = "imx27-dma",
197 .driver_data = IMX27_DMA,
198 }, {
199 /* sentinel */
200 }
201};
202MODULE_DEVICE_TABLE(platform, imx_dma_devtype);
203
204static inline int is_imx1_dma(struct imxdma_engine *imxdma)
205{
206 return imxdma->devtype == IMX1_DMA;
207}
208
209static inline int is_imx21_dma(struct imxdma_engine *imxdma)
210{
211 return imxdma->devtype == IMX21_DMA;
212}
213
214static inline int is_imx27_dma(struct imxdma_engine *imxdma)
215{
216 return imxdma->devtype == IMX27_DMA;
217}
218
182static struct imxdma_channel *to_imxdma_chan(struct dma_chan *chan) 219static struct imxdma_channel *to_imxdma_chan(struct dma_chan *chan)
183{ 220{
184 return container_of(chan, struct imxdma_channel, chan); 221 return container_of(chan, struct imxdma_channel, chan);
@@ -212,7 +249,9 @@ static unsigned imx_dmav1_readl(struct imxdma_engine *imxdma, unsigned offset)
212 249
213static int imxdma_hw_chain(struct imxdma_channel *imxdmac) 250static int imxdma_hw_chain(struct imxdma_channel *imxdmac)
214{ 251{
215 if (cpu_is_mx27()) 252 struct imxdma_engine *imxdma = imxdmac->imxdma;
253
254 if (is_imx27_dma(imxdma))
216 return imxdmac->hw_chaining; 255 return imxdmac->hw_chaining;
217 else 256 else
218 return 0; 257 return 0;
@@ -267,7 +306,7 @@ static void imxdma_enable_hw(struct imxdma_desc *d)
267 imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_CCR(channel)) | 306 imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_CCR(channel)) |
268 CCR_CEN | CCR_ACRPT, DMA_CCR(channel)); 307 CCR_CEN | CCR_ACRPT, DMA_CCR(channel));
269 308
270 if ((cpu_is_mx21() || cpu_is_mx27()) && 309 if (!is_imx1_dma(imxdma) &&
271 d->sg && imxdma_hw_chain(imxdmac)) { 310 d->sg && imxdma_hw_chain(imxdmac)) {
272 d->sg = sg_next(d->sg); 311 d->sg = sg_next(d->sg);
273 if (d->sg) { 312 if (d->sg) {
@@ -436,7 +475,7 @@ static irqreturn_t dma_irq_handler(int irq, void *dev_id)
436 struct imxdma_engine *imxdma = dev_id; 475 struct imxdma_engine *imxdma = dev_id;
437 int i, disr; 476 int i, disr;
438 477
439 if (cpu_is_mx21() || cpu_is_mx27()) 478 if (!is_imx1_dma(imxdma))
440 imxdma_err_handler(irq, dev_id); 479 imxdma_err_handler(irq, dev_id);
441 480
442 disr = imx_dmav1_readl(imxdma, DMA_DISR); 481 disr = imx_dmav1_readl(imxdma, DMA_DISR);
@@ -474,8 +513,10 @@ static int imxdma_xfer_desc(struct imxdma_desc *d)
474 slot = i; 513 slot = i;
475 break; 514 break;
476 } 515 }
477 if (slot < 0) 516 if (slot < 0) {
517 spin_unlock_irqrestore(&imxdma->lock, flags);
478 return -EBUSY; 518 return -EBUSY;
519 }
479 520
480 imxdma->slots_2d[slot].xsr = d->x; 521 imxdma->slots_2d[slot].xsr = d->x;
481 imxdma->slots_2d[slot].ysr = d->y; 522 imxdma->slots_2d[slot].ysr = d->y;
@@ -959,35 +1000,32 @@ static void imxdma_issue_pending(struct dma_chan *chan)
959static int __init imxdma_probe(struct platform_device *pdev) 1000static int __init imxdma_probe(struct platform_device *pdev)
960 { 1001 {
961 struct imxdma_engine *imxdma; 1002 struct imxdma_engine *imxdma;
1003 struct resource *res;
962 int ret, i; 1004 int ret, i;
1005 int irq, irq_err;
963 1006
964 1007 imxdma = devm_kzalloc(&pdev->dev, sizeof(*imxdma), GFP_KERNEL);
965 imxdma = kzalloc(sizeof(*imxdma), GFP_KERNEL);
966 if (!imxdma) 1008 if (!imxdma)
967 return -ENOMEM; 1009 return -ENOMEM;
968 1010
969 if (cpu_is_mx1()) { 1011 imxdma->devtype = pdev->id_entry->driver_data;
970 imxdma->base = MX1_IO_ADDRESS(MX1_DMA_BASE_ADDR); 1012
971 } else if (cpu_is_mx21()) { 1013 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
972 imxdma->base = MX21_IO_ADDRESS(MX21_DMA_BASE_ADDR); 1014 imxdma->base = devm_request_and_ioremap(&pdev->dev, res);
973 } else if (cpu_is_mx27()) { 1015 if (!imxdma->base)
974 imxdma->base = MX27_IO_ADDRESS(MX27_DMA_BASE_ADDR); 1016 return -EADDRNOTAVAIL;
975 } else { 1017
976 kfree(imxdma); 1018 irq = platform_get_irq(pdev, 0);
977 return 0; 1019 if (irq < 0)
978 } 1020 return irq;
979 1021
980 imxdma->dma_ipg = devm_clk_get(&pdev->dev, "ipg"); 1022 imxdma->dma_ipg = devm_clk_get(&pdev->dev, "ipg");
981 if (IS_ERR(imxdma->dma_ipg)) { 1023 if (IS_ERR(imxdma->dma_ipg))
982 ret = PTR_ERR(imxdma->dma_ipg); 1024 return PTR_ERR(imxdma->dma_ipg);
983 goto err_clk;
984 }
985 1025
986 imxdma->dma_ahb = devm_clk_get(&pdev->dev, "ahb"); 1026 imxdma->dma_ahb = devm_clk_get(&pdev->dev, "ahb");
987 if (IS_ERR(imxdma->dma_ahb)) { 1027 if (IS_ERR(imxdma->dma_ahb))
988 ret = PTR_ERR(imxdma->dma_ahb); 1028 return PTR_ERR(imxdma->dma_ahb);
989 goto err_clk;
990 }
991 1029
992 clk_prepare_enable(imxdma->dma_ipg); 1030 clk_prepare_enable(imxdma->dma_ipg);
993 clk_prepare_enable(imxdma->dma_ahb); 1031 clk_prepare_enable(imxdma->dma_ahb);
@@ -995,18 +1033,25 @@ static int __init imxdma_probe(struct platform_device *pdev)
995 /* reset DMA module */ 1033 /* reset DMA module */
996 imx_dmav1_writel(imxdma, DCR_DRST, DMA_DCR); 1034 imx_dmav1_writel(imxdma, DCR_DRST, DMA_DCR);
997 1035
998 if (cpu_is_mx1()) { 1036 if (is_imx1_dma(imxdma)) {
999 ret = request_irq(MX1_DMA_INT, dma_irq_handler, 0, "DMA", imxdma); 1037 ret = devm_request_irq(&pdev->dev, irq,
1038 dma_irq_handler, 0, "DMA", imxdma);
1000 if (ret) { 1039 if (ret) {
1001 dev_warn(imxdma->dev, "Can't register IRQ for DMA\n"); 1040 dev_warn(imxdma->dev, "Can't register IRQ for DMA\n");
1002 goto err_enable; 1041 goto err;
1042 }
1043
1044 irq_err = platform_get_irq(pdev, 1);
1045 if (irq_err < 0) {
1046 ret = irq_err;
1047 goto err;
1003 } 1048 }
1004 1049
1005 ret = request_irq(MX1_DMA_ERR, imxdma_err_handler, 0, "DMA", imxdma); 1050 ret = devm_request_irq(&pdev->dev, irq_err,
1051 imxdma_err_handler, 0, "DMA", imxdma);
1006 if (ret) { 1052 if (ret) {
1007 dev_warn(imxdma->dev, "Can't register ERRIRQ for DMA\n"); 1053 dev_warn(imxdma->dev, "Can't register ERRIRQ for DMA\n");
1008 free_irq(MX1_DMA_INT, NULL); 1054 goto err;
1009 goto err_enable;
1010 } 1055 }
1011 } 1056 }
1012 1057
@@ -1036,14 +1081,14 @@ static int __init imxdma_probe(struct platform_device *pdev)
1036 for (i = 0; i < IMX_DMA_CHANNELS; i++) { 1081 for (i = 0; i < IMX_DMA_CHANNELS; i++) {
1037 struct imxdma_channel *imxdmac = &imxdma->channel[i]; 1082 struct imxdma_channel *imxdmac = &imxdma->channel[i];
1038 1083
1039 if (cpu_is_mx21() || cpu_is_mx27()) { 1084 if (!is_imx1_dma(imxdma)) {
1040 ret = request_irq(MX2x_INT_DMACH0 + i, 1085 ret = devm_request_irq(&pdev->dev, irq + i,
1041 dma_irq_handler, 0, "DMA", imxdma); 1086 dma_irq_handler, 0, "DMA", imxdma);
1042 if (ret) { 1087 if (ret) {
1043 dev_warn(imxdma->dev, "Can't register IRQ %d " 1088 dev_warn(imxdma->dev, "Can't register IRQ %d "
1044 "for DMA channel %d\n", 1089 "for DMA channel %d\n",
1045 MX2x_INT_DMACH0 + i, i); 1090 irq + i, i);
1046 goto err_init; 1091 goto err;
1047 } 1092 }
1048 init_timer(&imxdmac->watchdog); 1093 init_timer(&imxdmac->watchdog);
1049 imxdmac->watchdog.function = &imxdma_watchdog; 1094 imxdmac->watchdog.function = &imxdma_watchdog;
@@ -1089,46 +1134,25 @@ static int __init imxdma_probe(struct platform_device *pdev)
1089 ret = dma_async_device_register(&imxdma->dma_device); 1134 ret = dma_async_device_register(&imxdma->dma_device);
1090 if (ret) { 1135 if (ret) {
1091 dev_err(&pdev->dev, "unable to register\n"); 1136 dev_err(&pdev->dev, "unable to register\n");
1092 goto err_init; 1137 goto err;
1093 } 1138 }
1094 1139
1095 return 0; 1140 return 0;
1096 1141
1097err_init: 1142err:
1098
1099 if (cpu_is_mx21() || cpu_is_mx27()) {
1100 while (--i >= 0)
1101 free_irq(MX2x_INT_DMACH0 + i, NULL);
1102 } else if cpu_is_mx1() {
1103 free_irq(MX1_DMA_INT, NULL);
1104 free_irq(MX1_DMA_ERR, NULL);
1105 }
1106err_enable:
1107 clk_disable_unprepare(imxdma->dma_ipg); 1143 clk_disable_unprepare(imxdma->dma_ipg);
1108 clk_disable_unprepare(imxdma->dma_ahb); 1144 clk_disable_unprepare(imxdma->dma_ahb);
1109err_clk:
1110 kfree(imxdma);
1111 return ret; 1145 return ret;
1112} 1146}
1113 1147
1114static int __exit imxdma_remove(struct platform_device *pdev) 1148static int __exit imxdma_remove(struct platform_device *pdev)
1115{ 1149{
1116 struct imxdma_engine *imxdma = platform_get_drvdata(pdev); 1150 struct imxdma_engine *imxdma = platform_get_drvdata(pdev);
1117 int i;
1118 1151
1119 dma_async_device_unregister(&imxdma->dma_device); 1152 dma_async_device_unregister(&imxdma->dma_device);
1120 1153
1121 if (cpu_is_mx21() || cpu_is_mx27()) {
1122 for (i = 0; i < IMX_DMA_CHANNELS; i++)
1123 free_irq(MX2x_INT_DMACH0 + i, NULL);
1124 } else if cpu_is_mx1() {
1125 free_irq(MX1_DMA_INT, NULL);
1126 free_irq(MX1_DMA_ERR, NULL);
1127 }
1128
1129 clk_disable_unprepare(imxdma->dma_ipg); 1154 clk_disable_unprepare(imxdma->dma_ipg);
1130 clk_disable_unprepare(imxdma->dma_ahb); 1155 clk_disable_unprepare(imxdma->dma_ahb);
1131 kfree(imxdma);
1132 1156
1133 return 0; 1157 return 0;
1134} 1158}
@@ -1137,6 +1161,7 @@ static struct platform_driver imxdma_driver = {
1137 .driver = { 1161 .driver = {
1138 .name = "imx-dma", 1162 .name = "imx-dma",
1139 }, 1163 },
1164 .id_table = imx_dma_devtype,
1140 .remove = __exit_p(imxdma_remove), 1165 .remove = __exit_p(imxdma_remove),
1141}; 1166};
1142 1167
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index c099ca0846f4..f082aa3a918c 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -40,7 +40,6 @@
40#include <asm/irq.h> 40#include <asm/irq.h>
41#include <linux/platform_data/dma-imx-sdma.h> 41#include <linux/platform_data/dma-imx-sdma.h>
42#include <linux/platform_data/dma-imx.h> 42#include <linux/platform_data/dma-imx.h>
43#include <mach/hardware.h>
44 43
45#include "dmaengine.h" 44#include "dmaengine.h"
46 45
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c
index c7573e50aa14..65855373cee6 100644
--- a/drivers/dma/ipu/ipu_idmac.c
+++ b/drivers/dma/ipu/ipu_idmac.c
@@ -22,8 +22,7 @@
22#include <linux/interrupt.h> 22#include <linux/interrupt.h>
23#include <linux/io.h> 23#include <linux/io.h>
24#include <linux/module.h> 24#include <linux/module.h>
25 25#include <linux/dma/ipu-dma.h>
26#include <mach/ipu.h>
27 26
28#include "../dmaengine.h" 27#include "../dmaengine.h"
29#include "ipu_intern.h" 28#include "ipu_intern.h"
diff --git a/drivers/dma/ipu/ipu_irq.c b/drivers/dma/ipu/ipu_irq.c
index fa95bcc3de1f..a5ee37d5320f 100644
--- a/drivers/dma/ipu/ipu_irq.c
+++ b/drivers/dma/ipu/ipu_irq.c
@@ -15,8 +15,7 @@
15#include <linux/irq.h> 15#include <linux/irq.h>
16#include <linux/io.h> 16#include <linux/io.h>
17#include <linux/module.h> 17#include <linux/module.h>
18 18#include <linux/dma/ipu-dma.h>
19#include <mach/ipu.h>
20 19
21#include "ipu_intern.h" 20#include "ipu_intern.h"
22 21
diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c
index bb2d8e7029eb..7d35c237fbf1 100644
--- a/drivers/dma/omap-dma.c
+++ b/drivers/dma/omap-dma.c
@@ -19,8 +19,7 @@
19 19
20#include "virt-dma.h" 20#include "virt-dma.h"
21 21
22#include <plat/cpu.h> 22#include <plat-omap/dma-omap.h>
23#include <plat/dma.h>
24 23
25struct omap_dmadev { 24struct omap_dmadev {
26 struct dma_device ddev; 25 struct dma_device ddev;
@@ -438,7 +437,7 @@ static struct dma_async_tx_descriptor *omap_dma_prep_dma_cyclic(
438 omap_disable_dma_irq(c->dma_ch, OMAP_DMA_BLOCK_IRQ); 437 omap_disable_dma_irq(c->dma_ch, OMAP_DMA_BLOCK_IRQ);
439 } 438 }
440 439
441 if (!cpu_class_is_omap1()) { 440 if (dma_omap2plus()) {
442 omap_set_dma_src_burst_mode(c->dma_ch, OMAP_DMA_DATA_BURST_16); 441 omap_set_dma_src_burst_mode(c->dma_ch, OMAP_DMA_DATA_BURST_16);
443 omap_set_dma_dest_burst_mode(c->dma_ch, OMAP_DMA_DATA_BURST_16); 442 omap_set_dma_dest_burst_mode(c->dma_ch, OMAP_DMA_DATA_BURST_16);
444 } 443 }
diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c
index 64385cde044b..d451caace806 100644
--- a/drivers/dma/sirf-dma.c
+++ b/drivers/dma/sirf-dma.c
@@ -109,7 +109,7 @@ static void sirfsoc_dma_execute(struct sirfsoc_dma_chan *schan)
109 sdesc = list_first_entry(&schan->queued, struct sirfsoc_dma_desc, 109 sdesc = list_first_entry(&schan->queued, struct sirfsoc_dma_desc,
110 node); 110 node);
111 /* Move the first queued descriptor to active list */ 111 /* Move the first queued descriptor to active list */
112 list_move_tail(&schan->queued, &schan->active); 112 list_move_tail(&sdesc->node, &schan->active);
113 113
114 /* Start the DMA transfer */ 114 /* Start the DMA transfer */
115 writel_relaxed(sdesc->width, sdma->base + SIRFSOC_DMA_WIDTH_0 + 115 writel_relaxed(sdesc->width, sdma->base + SIRFSOC_DMA_WIDTH_0 +
@@ -428,7 +428,7 @@ static struct dma_async_tx_descriptor *sirfsoc_dma_prep_interleaved(
428 unsigned long iflags; 428 unsigned long iflags;
429 int ret; 429 int ret;
430 430
431 if ((xt->dir != DMA_MEM_TO_DEV) || (xt->dir != DMA_DEV_TO_MEM)) { 431 if ((xt->dir != DMA_MEM_TO_DEV) && (xt->dir != DMA_DEV_TO_MEM)) {
432 ret = -EINVAL; 432 ret = -EINVAL;
433 goto err_dir; 433 goto err_dir;
434 } 434 }