diff options
Diffstat (limited to 'drivers/char')
-rw-r--r-- | drivers/char/Kconfig | 2 | ||||
-rw-r--r-- | drivers/char/Makefile | 1 | ||||
-rw-r--r-- | drivers/char/hw_random/Kconfig | 13 | ||||
-rw-r--r-- | drivers/char/hw_random/Makefile | 1 | ||||
-rw-r--r-- | drivers/char/hw_random/amd-rng.c | 4 | ||||
-rw-r--r-- | drivers/char/hw_random/geode-rng.c | 4 | ||||
-rw-r--r-- | drivers/char/hw_random/intel-rng.c | 13 | ||||
-rw-r--r-- | drivers/char/hw_random/pasemi-rng.c | 2 | ||||
-rw-r--r-- | drivers/char/hw_random/pseries-rng.c | 2 | ||||
-rw-r--r-- | drivers/char/hw_random/via-rng.c | 8 | ||||
-rw-r--r-- | drivers/char/hw_random/virtio-rng.c | 7 | ||||
-rw-r--r-- | drivers/char/hw_random/xgene-rng.c | 423 | ||||
-rw-r--r-- | drivers/char/ipmi/ipmi_msghandler.c | 15 | ||||
-rw-r--r-- | drivers/char/ipmi/ipmi_si_intf.c | 12 | ||||
-rw-r--r-- | drivers/char/tile-srom.c | 13 | ||||
-rw-r--r-- | drivers/char/xillybus/Kconfig | 33 | ||||
-rw-r--r-- | drivers/char/xillybus/Makefile | 7 | ||||
-rw-r--r-- | drivers/char/xillybus/xillybus.h | 160 | ||||
-rw-r--r-- | drivers/char/xillybus/xillybus_core.c | 2103 | ||||
-rw-r--r-- | drivers/char/xillybus/xillybus_of.c | 187 | ||||
-rw-r--r-- | drivers/char/xillybus/xillybus_pcie.c | 228 |
21 files changed, 3211 insertions, 27 deletions
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig index 6e9f74a5c095..efefd12a0f7b 100644 --- a/drivers/char/Kconfig +++ b/drivers/char/Kconfig | |||
@@ -600,5 +600,7 @@ config TILE_SROM | |||
600 | device appear much like a simple EEPROM, and knows | 600 | device appear much like a simple EEPROM, and knows |
601 | how to partition a single ROM for multiple purposes. | 601 | how to partition a single ROM for multiple purposes. |
602 | 602 | ||
603 | source "drivers/char/xillybus/Kconfig" | ||
604 | |||
603 | endmenu | 605 | endmenu |
604 | 606 | ||
diff --git a/drivers/char/Makefile b/drivers/char/Makefile index a324f9303e36..d06cde26031b 100644 --- a/drivers/char/Makefile +++ b/drivers/char/Makefile | |||
@@ -61,3 +61,4 @@ obj-$(CONFIG_JS_RTC) += js-rtc.o | |||
61 | js-rtc-y = rtc.o | 61 | js-rtc-y = rtc.o |
62 | 62 | ||
63 | obj-$(CONFIG_TILE_SROM) += tile-srom.o | 63 | obj-$(CONFIG_TILE_SROM) += tile-srom.o |
64 | obj-$(CONFIG_XILLYBUS) += xillybus/ | ||
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig index 836b061ced35..91a04ae8003c 100644 --- a/drivers/char/hw_random/Kconfig +++ b/drivers/char/hw_random/Kconfig | |||
@@ -333,6 +333,19 @@ config HW_RANDOM_MSM | |||
333 | 333 | ||
334 | If unsure, say Y. | 334 | If unsure, say Y. |
335 | 335 | ||
336 | config HW_RANDOM_XGENE | ||
337 | tristate "APM X-Gene True Random Number Generator (TRNG) support" | ||
338 | depends on HW_RANDOM && ARCH_XGENE | ||
339 | default HW_RANDOM | ||
340 | ---help--- | ||
341 | This driver provides kernel-side support for the Random Number | ||
342 | Generator hardware found on APM X-Gene SoC. | ||
343 | |||
344 | To compile this driver as a module, choose M here: the | ||
345 | module will be called xgene_rng. | ||
346 | |||
347 | If unsure, say Y. | ||
348 | |||
336 | endif # HW_RANDOM | 349 | endif # HW_RANDOM |
337 | 350 | ||
338 | config UML_RANDOM | 351 | config UML_RANDOM |
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile index 199ed283e149..0b4cd57f4e24 100644 --- a/drivers/char/hw_random/Makefile +++ b/drivers/char/hw_random/Makefile | |||
@@ -29,3 +29,4 @@ obj-$(CONFIG_HW_RANDOM_EXYNOS) += exynos-rng.o | |||
29 | obj-$(CONFIG_HW_RANDOM_TPM) += tpm-rng.o | 29 | obj-$(CONFIG_HW_RANDOM_TPM) += tpm-rng.o |
30 | obj-$(CONFIG_HW_RANDOM_BCM2835) += bcm2835-rng.o | 30 | obj-$(CONFIG_HW_RANDOM_BCM2835) += bcm2835-rng.o |
31 | obj-$(CONFIG_HW_RANDOM_MSM) += msm-rng.o | 31 | obj-$(CONFIG_HW_RANDOM_MSM) += msm-rng.o |
32 | obj-$(CONFIG_HW_RANDOM_XGENE) += xgene-rng.o | ||
diff --git a/drivers/char/hw_random/amd-rng.c b/drivers/char/hw_random/amd-rng.c index c6af038682f1..48f6a83cdd61 100644 --- a/drivers/char/hw_random/amd-rng.c +++ b/drivers/char/hw_random/amd-rng.c | |||
@@ -142,10 +142,10 @@ found: | |||
142 | amd_rng.priv = (unsigned long)pmbase; | 142 | amd_rng.priv = (unsigned long)pmbase; |
143 | amd_pdev = pdev; | 143 | amd_pdev = pdev; |
144 | 144 | ||
145 | printk(KERN_INFO "AMD768 RNG detected\n"); | 145 | pr_info("AMD768 RNG detected\n"); |
146 | err = hwrng_register(&amd_rng); | 146 | err = hwrng_register(&amd_rng); |
147 | if (err) { | 147 | if (err) { |
148 | printk(KERN_ERR PFX "RNG registering failed (%d)\n", | 148 | pr_err(PFX "RNG registering failed (%d)\n", |
149 | err); | 149 | err); |
150 | release_region(pmbase + 0xF0, 8); | 150 | release_region(pmbase + 0xF0, 8); |
151 | goto out; | 151 | goto out; |
diff --git a/drivers/char/hw_random/geode-rng.c b/drivers/char/hw_random/geode-rng.c index 4c4d4e140f98..0d0579fe465e 100644 --- a/drivers/char/hw_random/geode-rng.c +++ b/drivers/char/hw_random/geode-rng.c | |||
@@ -109,10 +109,10 @@ found: | |||
109 | goto out; | 109 | goto out; |
110 | geode_rng.priv = (unsigned long)mem; | 110 | geode_rng.priv = (unsigned long)mem; |
111 | 111 | ||
112 | printk(KERN_INFO "AMD Geode RNG detected\n"); | 112 | pr_info("AMD Geode RNG detected\n"); |
113 | err = hwrng_register(&geode_rng); | 113 | err = hwrng_register(&geode_rng); |
114 | if (err) { | 114 | if (err) { |
115 | printk(KERN_ERR PFX "RNG registering failed (%d)\n", | 115 | pr_err(PFX "RNG registering failed (%d)\n", |
116 | err); | 116 | err); |
117 | goto err_unmap; | 117 | goto err_unmap; |
118 | } | 118 | } |
diff --git a/drivers/char/hw_random/intel-rng.c b/drivers/char/hw_random/intel-rng.c index 86fe45c19968..290c880266bf 100644 --- a/drivers/char/hw_random/intel-rng.c +++ b/drivers/char/hw_random/intel-rng.c | |||
@@ -199,7 +199,7 @@ static int intel_rng_init(struct hwrng *rng) | |||
199 | if ((hw_status & INTEL_RNG_ENABLED) == 0) | 199 | if ((hw_status & INTEL_RNG_ENABLED) == 0) |
200 | hw_status = hwstatus_set(mem, hw_status | INTEL_RNG_ENABLED); | 200 | hw_status = hwstatus_set(mem, hw_status | INTEL_RNG_ENABLED); |
201 | if ((hw_status & INTEL_RNG_ENABLED) == 0) { | 201 | if ((hw_status & INTEL_RNG_ENABLED) == 0) { |
202 | printk(KERN_ERR PFX "cannot enable RNG, aborting\n"); | 202 | pr_err(PFX "cannot enable RNG, aborting\n"); |
203 | goto out; | 203 | goto out; |
204 | } | 204 | } |
205 | err = 0; | 205 | err = 0; |
@@ -216,7 +216,7 @@ static void intel_rng_cleanup(struct hwrng *rng) | |||
216 | if (hw_status & INTEL_RNG_ENABLED) | 216 | if (hw_status & INTEL_RNG_ENABLED) |
217 | hwstatus_set(mem, hw_status & ~INTEL_RNG_ENABLED); | 217 | hwstatus_set(mem, hw_status & ~INTEL_RNG_ENABLED); |
218 | else | 218 | else |
219 | printk(KERN_WARNING PFX "unusual: RNG already disabled\n"); | 219 | pr_warn(PFX "unusual: RNG already disabled\n"); |
220 | } | 220 | } |
221 | 221 | ||
222 | 222 | ||
@@ -274,7 +274,7 @@ static int __init intel_rng_hw_init(void *_intel_rng_hw) | |||
274 | if (mfc != INTEL_FWH_MANUFACTURER_CODE || | 274 | if (mfc != INTEL_FWH_MANUFACTURER_CODE || |
275 | (dvc != INTEL_FWH_DEVICE_CODE_8M && | 275 | (dvc != INTEL_FWH_DEVICE_CODE_8M && |
276 | dvc != INTEL_FWH_DEVICE_CODE_4M)) { | 276 | dvc != INTEL_FWH_DEVICE_CODE_4M)) { |
277 | printk(KERN_NOTICE PFX "FWH not detected\n"); | 277 | pr_notice(PFX "FWH not detected\n"); |
278 | return -ENODEV; | 278 | return -ENODEV; |
279 | } | 279 | } |
280 | 280 | ||
@@ -306,7 +306,6 @@ static int __init intel_init_hw_struct(struct intel_rng_hw *intel_rng_hw, | |||
306 | (BIOS_CNTL_LOCK_ENABLE_MASK|BIOS_CNTL_WRITE_ENABLE_MASK)) | 306 | (BIOS_CNTL_LOCK_ENABLE_MASK|BIOS_CNTL_WRITE_ENABLE_MASK)) |
307 | == BIOS_CNTL_LOCK_ENABLE_MASK) { | 307 | == BIOS_CNTL_LOCK_ENABLE_MASK) { |
308 | static __initdata /*const*/ char warning[] = | 308 | static __initdata /*const*/ char warning[] = |
309 | KERN_WARNING | ||
310 | PFX "Firmware space is locked read-only. If you can't or\n" | 309 | PFX "Firmware space is locked read-only. If you can't or\n" |
311 | PFX "don't want to disable this in firmware setup, and if\n" | 310 | PFX "don't want to disable this in firmware setup, and if\n" |
312 | PFX "you are certain that your system has a functional\n" | 311 | PFX "you are certain that your system has a functional\n" |
@@ -314,7 +313,7 @@ PFX "RNG, try using the 'no_fwh_detect' option.\n"; | |||
314 | 313 | ||
315 | if (no_fwh_detect) | 314 | if (no_fwh_detect) |
316 | return -ENODEV; | 315 | return -ENODEV; |
317 | printk(warning); | 316 | pr_warn("%s", warning); |
318 | return -EBUSY; | 317 | return -EBUSY; |
319 | } | 318 | } |
320 | 319 | ||
@@ -392,10 +391,10 @@ fwh_done: | |||
392 | goto out; | 391 | goto out; |
393 | } | 392 | } |
394 | 393 | ||
395 | printk(KERN_INFO "Intel 82802 RNG detected\n"); | 394 | pr_info("Intel 82802 RNG detected\n"); |
396 | err = hwrng_register(&intel_rng); | 395 | err = hwrng_register(&intel_rng); |
397 | if (err) { | 396 | if (err) { |
398 | printk(KERN_ERR PFX "RNG registering failed (%d)\n", | 397 | pr_err(PFX "RNG registering failed (%d)\n", |
399 | err); | 398 | err); |
400 | iounmap(mem); | 399 | iounmap(mem); |
401 | } | 400 | } |
diff --git a/drivers/char/hw_random/pasemi-rng.c b/drivers/char/hw_random/pasemi-rng.c index c66279bb6ef3..c0347d1dded0 100644 --- a/drivers/char/hw_random/pasemi-rng.c +++ b/drivers/char/hw_random/pasemi-rng.c | |||
@@ -113,7 +113,7 @@ static int rng_probe(struct platform_device *ofdev) | |||
113 | 113 | ||
114 | pasemi_rng.priv = (unsigned long)rng_regs; | 114 | pasemi_rng.priv = (unsigned long)rng_regs; |
115 | 115 | ||
116 | printk(KERN_INFO "Registering PA Semi RNG\n"); | 116 | pr_info("Registering PA Semi RNG\n"); |
117 | 117 | ||
118 | err = hwrng_register(&pasemi_rng); | 118 | err = hwrng_register(&pasemi_rng); |
119 | 119 | ||
diff --git a/drivers/char/hw_random/pseries-rng.c b/drivers/char/hw_random/pseries-rng.c index ab7ffdec0ec3..6226aa08c36a 100644 --- a/drivers/char/hw_random/pseries-rng.c +++ b/drivers/char/hw_random/pseries-rng.c | |||
@@ -86,7 +86,7 @@ static struct vio_driver pseries_rng_driver = { | |||
86 | 86 | ||
87 | static int __init rng_init(void) | 87 | static int __init rng_init(void) |
88 | { | 88 | { |
89 | printk(KERN_INFO "Registering IBM pSeries RNG driver\n"); | 89 | pr_info("Registering IBM pSeries RNG driver\n"); |
90 | return vio_register_driver(&pseries_rng_driver); | 90 | return vio_register_driver(&pseries_rng_driver); |
91 | } | 91 | } |
92 | 92 | ||
diff --git a/drivers/char/hw_random/via-rng.c b/drivers/char/hw_random/via-rng.c index de5a6dcfb3e2..a3bebef255ad 100644 --- a/drivers/char/hw_random/via-rng.c +++ b/drivers/char/hw_random/via-rng.c | |||
@@ -141,7 +141,7 @@ static int via_rng_init(struct hwrng *rng) | |||
141 | * register */ | 141 | * register */ |
142 | if ((c->x86 == 6) && (c->x86_model >= 0x0f)) { | 142 | if ((c->x86 == 6) && (c->x86_model >= 0x0f)) { |
143 | if (!cpu_has_xstore_enabled) { | 143 | if (!cpu_has_xstore_enabled) { |
144 | printk(KERN_ERR PFX "can't enable hardware RNG " | 144 | pr_err(PFX "can't enable hardware RNG " |
145 | "if XSTORE is not enabled\n"); | 145 | "if XSTORE is not enabled\n"); |
146 | return -ENODEV; | 146 | return -ENODEV; |
147 | } | 147 | } |
@@ -180,7 +180,7 @@ static int via_rng_init(struct hwrng *rng) | |||
180 | unneeded */ | 180 | unneeded */ |
181 | rdmsr(MSR_VIA_RNG, lo, hi); | 181 | rdmsr(MSR_VIA_RNG, lo, hi); |
182 | if ((lo & VIA_RNG_ENABLE) == 0) { | 182 | if ((lo & VIA_RNG_ENABLE) == 0) { |
183 | printk(KERN_ERR PFX "cannot enable VIA C3 RNG, aborting\n"); | 183 | pr_err(PFX "cannot enable VIA C3 RNG, aborting\n"); |
184 | return -ENODEV; | 184 | return -ENODEV; |
185 | } | 185 | } |
186 | 186 | ||
@@ -202,10 +202,10 @@ static int __init mod_init(void) | |||
202 | 202 | ||
203 | if (!cpu_has_xstore) | 203 | if (!cpu_has_xstore) |
204 | return -ENODEV; | 204 | return -ENODEV; |
205 | printk(KERN_INFO "VIA RNG detected\n"); | 205 | pr_info("VIA RNG detected\n"); |
206 | err = hwrng_register(&via_rng); | 206 | err = hwrng_register(&via_rng); |
207 | if (err) { | 207 | if (err) { |
208 | printk(KERN_ERR PFX "RNG registering failed (%d)\n", | 208 | pr_err(PFX "RNG registering failed (%d)\n", |
209 | err); | 209 | err); |
210 | goto out; | 210 | goto out; |
211 | } | 211 | } |
diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c index 2e3139eda93b..132c9ccfdc62 100644 --- a/drivers/char/hw_random/virtio-rng.c +++ b/drivers/char/hw_random/virtio-rng.c | |||
@@ -36,6 +36,7 @@ struct virtrng_info { | |||
36 | int index; | 36 | int index; |
37 | bool busy; | 37 | bool busy; |
38 | bool hwrng_register_done; | 38 | bool hwrng_register_done; |
39 | bool hwrng_removed; | ||
39 | }; | 40 | }; |
40 | 41 | ||
41 | 42 | ||
@@ -68,6 +69,9 @@ static int virtio_read(struct hwrng *rng, void *buf, size_t size, bool wait) | |||
68 | int ret; | 69 | int ret; |
69 | struct virtrng_info *vi = (struct virtrng_info *)rng->priv; | 70 | struct virtrng_info *vi = (struct virtrng_info *)rng->priv; |
70 | 71 | ||
72 | if (vi->hwrng_removed) | ||
73 | return -ENODEV; | ||
74 | |||
71 | if (!vi->busy) { | 75 | if (!vi->busy) { |
72 | vi->busy = true; | 76 | vi->busy = true; |
73 | init_completion(&vi->have_data); | 77 | init_completion(&vi->have_data); |
@@ -137,6 +141,9 @@ static void remove_common(struct virtio_device *vdev) | |||
137 | { | 141 | { |
138 | struct virtrng_info *vi = vdev->priv; | 142 | struct virtrng_info *vi = vdev->priv; |
139 | 143 | ||
144 | vi->hwrng_removed = true; | ||
145 | vi->data_avail = 0; | ||
146 | complete(&vi->have_data); | ||
140 | vdev->config->reset(vdev); | 147 | vdev->config->reset(vdev); |
141 | vi->busy = false; | 148 | vi->busy = false; |
142 | if (vi->hwrng_register_done) | 149 | if (vi->hwrng_register_done) |
diff --git a/drivers/char/hw_random/xgene-rng.c b/drivers/char/hw_random/xgene-rng.c new file mode 100644 index 000000000000..23caa05380a8 --- /dev/null +++ b/drivers/char/hw_random/xgene-rng.c | |||
@@ -0,0 +1,423 @@ | |||
1 | /* | ||
2 | * APM X-Gene SoC RNG Driver | ||
3 | * | ||
4 | * Copyright (c) 2014, Applied Micro Circuits Corporation | ||
5 | * Author: Rameshwar Prasad Sahu <rsahu@apm.com> | ||
6 | * Shamal Winchurkar <swinchurkar@apm.com> | ||
7 | * Feng Kan <fkan@apm.com> | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify it | ||
10 | * under the terms of the GNU General Public License as published by the | ||
11 | * Free Software Foundation; either version 2 of the License, or (at your | ||
12 | * option) any later version. | ||
13 | * | ||
14 | * This program is distributed in the hope that it will be useful, | ||
15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
17 | * GNU General Public License for more details. | ||
18 | * | ||
19 | * You should have received a copy of the GNU General Public License | ||
20 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
21 | * | ||
22 | */ | ||
23 | |||
24 | #include <linux/clk.h> | ||
25 | #include <linux/delay.h> | ||
26 | #include <linux/hw_random.h> | ||
27 | #include <linux/init.h> | ||
28 | #include <linux/interrupt.h> | ||
29 | #include <linux/module.h> | ||
30 | #include <linux/of_platform.h> | ||
31 | #include <linux/of_irq.h> | ||
32 | #include <linux/of_address.h> | ||
33 | #include <linux/timer.h> | ||
34 | |||
35 | #define RNG_MAX_DATUM 4 | ||
36 | #define MAX_TRY 100 | ||
37 | #define XGENE_RNG_RETRY_COUNT 20 | ||
38 | #define XGENE_RNG_RETRY_INTERVAL 10 | ||
39 | |||
40 | /* RNG Registers */ | ||
41 | #define RNG_INOUT_0 0x00 | ||
42 | #define RNG_INTR_STS_ACK 0x10 | ||
43 | #define RNG_CONTROL 0x14 | ||
44 | #define RNG_CONFIG 0x18 | ||
45 | #define RNG_ALARMCNT 0x1c | ||
46 | #define RNG_FROENABLE 0x20 | ||
47 | #define RNG_FRODETUNE 0x24 | ||
48 | #define RNG_ALARMMASK 0x28 | ||
49 | #define RNG_ALARMSTOP 0x2c | ||
50 | #define RNG_OPTIONS 0x78 | ||
51 | #define RNG_EIP_REV 0x7c | ||
52 | |||
53 | #define MONOBIT_FAIL_MASK BIT(7) | ||
54 | #define POKER_FAIL_MASK BIT(6) | ||
55 | #define LONG_RUN_FAIL_MASK BIT(5) | ||
56 | #define RUN_FAIL_MASK BIT(4) | ||
57 | #define NOISE_FAIL_MASK BIT(3) | ||
58 | #define STUCK_OUT_MASK BIT(2) | ||
59 | #define SHUTDOWN_OFLO_MASK BIT(1) | ||
60 | #define READY_MASK BIT(0) | ||
61 | |||
62 | #define MAJOR_HW_REV_RD(src) (((src) & 0x0f000000) >> 24) | ||
63 | #define MINOR_HW_REV_RD(src) (((src) & 0x00f00000) >> 20) | ||
64 | #define HW_PATCH_LEVEL_RD(src) (((src) & 0x000f0000) >> 16) | ||
65 | #define MAX_REFILL_CYCLES_SET(dst, src) \ | ||
66 | ((dst & ~0xffff0000) | (((u32)src << 16) & 0xffff0000)) | ||
67 | #define MIN_REFILL_CYCLES_SET(dst, src) \ | ||
68 | ((dst & ~0x000000ff) | (((u32)src) & 0x000000ff)) | ||
69 | #define ALARM_THRESHOLD_SET(dst, src) \ | ||
70 | ((dst & ~0x000000ff) | (((u32)src) & 0x000000ff)) | ||
71 | #define ENABLE_RNG_SET(dst, src) \ | ||
72 | ((dst & ~BIT(10)) | (((u32)src << 10) & BIT(10))) | ||
73 | #define REGSPEC_TEST_MODE_SET(dst, src) \ | ||
74 | ((dst & ~BIT(8)) | (((u32)src << 8) & BIT(8))) | ||
75 | #define MONOBIT_FAIL_MASK_SET(dst, src) \ | ||
76 | ((dst & ~BIT(7)) | (((u32)src << 7) & BIT(7))) | ||
77 | #define POKER_FAIL_MASK_SET(dst, src) \ | ||
78 | ((dst & ~BIT(6)) | (((u32)src << 6) & BIT(6))) | ||
79 | #define LONG_RUN_FAIL_MASK_SET(dst, src) \ | ||
80 | ((dst & ~BIT(5)) | (((u32)src << 5) & BIT(5))) | ||
81 | #define RUN_FAIL_MASK_SET(dst, src) \ | ||
82 | ((dst & ~BIT(4)) | (((u32)src << 4) & BIT(4))) | ||
83 | #define NOISE_FAIL_MASK_SET(dst, src) \ | ||
84 | ((dst & ~BIT(3)) | (((u32)src << 3) & BIT(3))) | ||
85 | #define STUCK_OUT_MASK_SET(dst, src) \ | ||
86 | ((dst & ~BIT(2)) | (((u32)src << 2) & BIT(2))) | ||
87 | #define SHUTDOWN_OFLO_MASK_SET(dst, src) \ | ||
88 | ((dst & ~BIT(1)) | (((u32)src << 1) & BIT(1))) | ||
89 | |||
90 | struct xgene_rng_dev { | ||
91 | u32 irq; | ||
92 | void __iomem *csr_base; | ||
93 | u32 revision; | ||
94 | u32 datum_size; | ||
95 | u32 failure_cnt; /* Failure count last minute */ | ||
96 | unsigned long failure_ts;/* First failure timestamp */ | ||
97 | struct timer_list failure_timer; | ||
98 | struct device *dev; | ||
99 | struct clk *clk; | ||
100 | }; | ||
101 | |||
102 | static void xgene_rng_expired_timer(unsigned long arg) | ||
103 | { | ||
104 | struct xgene_rng_dev *ctx = (struct xgene_rng_dev *) arg; | ||
105 | |||
106 | /* Clear failure counter as timer expired */ | ||
107 | disable_irq(ctx->irq); | ||
108 | ctx->failure_cnt = 0; | ||
109 | del_timer(&ctx->failure_timer); | ||
110 | enable_irq(ctx->irq); | ||
111 | } | ||
112 | |||
113 | static void xgene_rng_start_timer(struct xgene_rng_dev *ctx) | ||
114 | { | ||
115 | ctx->failure_timer.data = (unsigned long) ctx; | ||
116 | ctx->failure_timer.function = xgene_rng_expired_timer; | ||
117 | ctx->failure_timer.expires = jiffies + 120 * HZ; | ||
118 | add_timer(&ctx->failure_timer); | ||
119 | } | ||
120 | |||
121 | /* | ||
122 | * Initialize or reinit free running oscillators (FROs) | ||
123 | */ | ||
124 | static void xgene_rng_init_fro(struct xgene_rng_dev *ctx, u32 fro_val) | ||
125 | { | ||
126 | writel(fro_val, ctx->csr_base + RNG_FRODETUNE); | ||
127 | writel(0x00000000, ctx->csr_base + RNG_ALARMMASK); | ||
128 | writel(0x00000000, ctx->csr_base + RNG_ALARMSTOP); | ||
129 | writel(0xFFFFFFFF, ctx->csr_base + RNG_FROENABLE); | ||
130 | } | ||
131 | |||
132 | static void xgene_rng_chk_overflow(struct xgene_rng_dev *ctx) | ||
133 | { | ||
134 | u32 val; | ||
135 | |||
136 | val = readl(ctx->csr_base + RNG_INTR_STS_ACK); | ||
137 | if (val & MONOBIT_FAIL_MASK) | ||
138 | /* | ||
139 | * LFSR detected an out-of-bounds number of 1s after | ||
140 | * checking 20,000 bits (test T1 as specified in the | ||
141 | * AIS-31 standard) | ||
142 | */ | ||
143 | dev_err(ctx->dev, "test monobit failure error 0x%08X\n", val); | ||
144 | if (val & POKER_FAIL_MASK) | ||
145 | /* | ||
146 | * LFSR detected an out-of-bounds value in at least one | ||
147 | * of the 16 poker_count_X counters or an out of bounds sum | ||
148 | * of squares value after checking 20,000 bits (test T2 as | ||
149 | * specified in the AIS-31 standard) | ||
150 | */ | ||
151 | dev_err(ctx->dev, "test poker failure error 0x%08X\n", val); | ||
152 | if (val & LONG_RUN_FAIL_MASK) | ||
153 | /* | ||
154 | * LFSR detected a sequence of 34 identical bits | ||
155 | * (test T4 as specified in the AIS-31 standard) | ||
156 | */ | ||
157 | dev_err(ctx->dev, "test long run failure error 0x%08X\n", val); | ||
158 | if (val & RUN_FAIL_MASK) | ||
159 | /* | ||
160 | * LFSR detected an outof-bounds value for at least one | ||
161 | * of the running counters after checking 20,000 bits | ||
162 | * (test T3 as specified in the AIS-31 standard) | ||
163 | */ | ||
164 | dev_err(ctx->dev, "test run failure error 0x%08X\n", val); | ||
165 | if (val & NOISE_FAIL_MASK) | ||
166 | /* LFSR detected a sequence of 48 identical bits */ | ||
167 | dev_err(ctx->dev, "noise failure error 0x%08X\n", val); | ||
168 | if (val & STUCK_OUT_MASK) | ||
169 | /* | ||
170 | * Detected output data registers generated same value twice | ||
171 | * in a row | ||
172 | */ | ||
173 | dev_err(ctx->dev, "stuck out failure error 0x%08X\n", val); | ||
174 | |||
175 | if (val & SHUTDOWN_OFLO_MASK) { | ||
176 | u32 frostopped; | ||
177 | |||
178 | /* FROs shut down after a second error event. Try recover. */ | ||
179 | if (++ctx->failure_cnt == 1) { | ||
180 | /* 1st time, just recover */ | ||
181 | ctx->failure_ts = jiffies; | ||
182 | frostopped = readl(ctx->csr_base + RNG_ALARMSTOP); | ||
183 | xgene_rng_init_fro(ctx, frostopped); | ||
184 | |||
185 | /* | ||
186 | * We must start a timer to clear out this error | ||
187 | * in case the system timer wrap around | ||
188 | */ | ||
189 | xgene_rng_start_timer(ctx); | ||
190 | } else { | ||
191 | /* 2nd time failure in lesser than 1 minute? */ | ||
192 | if (time_after(ctx->failure_ts + 60 * HZ, jiffies)) { | ||
193 | dev_err(ctx->dev, | ||
194 | "FRO shutdown failure error 0x%08X\n", | ||
195 | val); | ||
196 | } else { | ||
197 | /* 2nd time failure after 1 minutes, recover */ | ||
198 | ctx->failure_ts = jiffies; | ||
199 | ctx->failure_cnt = 1; | ||
200 | /* | ||
201 | * We must start a timer to clear out this | ||
202 | * error in case the system timer wrap | ||
203 | * around | ||
204 | */ | ||
205 | xgene_rng_start_timer(ctx); | ||
206 | } | ||
207 | frostopped = readl(ctx->csr_base + RNG_ALARMSTOP); | ||
208 | xgene_rng_init_fro(ctx, frostopped); | ||
209 | } | ||
210 | } | ||
211 | /* Clear them all */ | ||
212 | writel(val, ctx->csr_base + RNG_INTR_STS_ACK); | ||
213 | } | ||
214 | |||
215 | static irqreturn_t xgene_rng_irq_handler(int irq, void *id) | ||
216 | { | ||
217 | struct xgene_rng_dev *ctx = (struct xgene_rng_dev *) id; | ||
218 | |||
219 | /* RNG Alarm Counter overflow */ | ||
220 | xgene_rng_chk_overflow(ctx); | ||
221 | |||
222 | return IRQ_HANDLED; | ||
223 | } | ||
224 | |||
225 | static int xgene_rng_data_present(struct hwrng *rng, int wait) | ||
226 | { | ||
227 | struct xgene_rng_dev *ctx = (struct xgene_rng_dev *) rng->priv; | ||
228 | u32 i, val = 0; | ||
229 | |||
230 | for (i = 0; i < XGENE_RNG_RETRY_COUNT; i++) { | ||
231 | val = readl(ctx->csr_base + RNG_INTR_STS_ACK); | ||
232 | if ((val & READY_MASK) || !wait) | ||
233 | break; | ||
234 | udelay(XGENE_RNG_RETRY_INTERVAL); | ||
235 | } | ||
236 | |||
237 | return (val & READY_MASK); | ||
238 | } | ||
239 | |||
240 | static int xgene_rng_data_read(struct hwrng *rng, u32 *data) | ||
241 | { | ||
242 | struct xgene_rng_dev *ctx = (struct xgene_rng_dev *) rng->priv; | ||
243 | int i; | ||
244 | |||
245 | for (i = 0; i < ctx->datum_size; i++) | ||
246 | data[i] = readl(ctx->csr_base + RNG_INOUT_0 + i * 4); | ||
247 | |||
248 | /* Clear ready bit to start next transaction */ | ||
249 | writel(READY_MASK, ctx->csr_base + RNG_INTR_STS_ACK); | ||
250 | |||
251 | return ctx->datum_size << 2; | ||
252 | } | ||
253 | |||
254 | static void xgene_rng_init_internal(struct xgene_rng_dev *ctx) | ||
255 | { | ||
256 | u32 val; | ||
257 | |||
258 | writel(0x00000000, ctx->csr_base + RNG_CONTROL); | ||
259 | |||
260 | val = MAX_REFILL_CYCLES_SET(0, 10); | ||
261 | val = MIN_REFILL_CYCLES_SET(val, 10); | ||
262 | writel(val, ctx->csr_base + RNG_CONFIG); | ||
263 | |||
264 | val = ALARM_THRESHOLD_SET(0, 0xFF); | ||
265 | writel(val, ctx->csr_base + RNG_ALARMCNT); | ||
266 | |||
267 | xgene_rng_init_fro(ctx, 0); | ||
268 | |||
269 | writel(MONOBIT_FAIL_MASK | | ||
270 | POKER_FAIL_MASK | | ||
271 | LONG_RUN_FAIL_MASK | | ||
272 | RUN_FAIL_MASK | | ||
273 | NOISE_FAIL_MASK | | ||
274 | STUCK_OUT_MASK | | ||
275 | SHUTDOWN_OFLO_MASK | | ||
276 | READY_MASK, ctx->csr_base + RNG_INTR_STS_ACK); | ||
277 | |||
278 | val = ENABLE_RNG_SET(0, 1); | ||
279 | val = MONOBIT_FAIL_MASK_SET(val, 1); | ||
280 | val = POKER_FAIL_MASK_SET(val, 1); | ||
281 | val = LONG_RUN_FAIL_MASK_SET(val, 1); | ||
282 | val = RUN_FAIL_MASK_SET(val, 1); | ||
283 | val = NOISE_FAIL_MASK_SET(val, 1); | ||
284 | val = STUCK_OUT_MASK_SET(val, 1); | ||
285 | val = SHUTDOWN_OFLO_MASK_SET(val, 1); | ||
286 | writel(val, ctx->csr_base + RNG_CONTROL); | ||
287 | } | ||
288 | |||
289 | static int xgene_rng_init(struct hwrng *rng) | ||
290 | { | ||
291 | struct xgene_rng_dev *ctx = (struct xgene_rng_dev *) rng->priv; | ||
292 | |||
293 | ctx->failure_cnt = 0; | ||
294 | init_timer(&ctx->failure_timer); | ||
295 | |||
296 | ctx->revision = readl(ctx->csr_base + RNG_EIP_REV); | ||
297 | |||
298 | dev_dbg(ctx->dev, "Rev %d.%d.%d\n", | ||
299 | MAJOR_HW_REV_RD(ctx->revision), | ||
300 | MINOR_HW_REV_RD(ctx->revision), | ||
301 | HW_PATCH_LEVEL_RD(ctx->revision)); | ||
302 | |||
303 | dev_dbg(ctx->dev, "Options 0x%08X", | ||
304 | readl(ctx->csr_base + RNG_OPTIONS)); | ||
305 | |||
306 | xgene_rng_init_internal(ctx); | ||
307 | |||
308 | ctx->datum_size = RNG_MAX_DATUM; | ||
309 | |||
310 | return 0; | ||
311 | } | ||
312 | |||
313 | static struct hwrng xgene_rng_func = { | ||
314 | .name = "xgene-rng", | ||
315 | .init = xgene_rng_init, | ||
316 | .data_present = xgene_rng_data_present, | ||
317 | .data_read = xgene_rng_data_read, | ||
318 | }; | ||
319 | |||
320 | static int xgene_rng_probe(struct platform_device *pdev) | ||
321 | { | ||
322 | struct resource *res; | ||
323 | struct xgene_rng_dev *ctx; | ||
324 | int rc = 0; | ||
325 | |||
326 | ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL); | ||
327 | if (!ctx) | ||
328 | return -ENOMEM; | ||
329 | |||
330 | ctx->dev = &pdev->dev; | ||
331 | platform_set_drvdata(pdev, ctx); | ||
332 | |||
333 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
334 | ctx->csr_base = devm_ioremap_resource(&pdev->dev, res); | ||
335 | if (IS_ERR(ctx->csr_base)) | ||
336 | return PTR_ERR(ctx->csr_base); | ||
337 | |||
338 | ctx->irq = platform_get_irq(pdev, 0); | ||
339 | if (ctx->irq < 0) { | ||
340 | dev_err(&pdev->dev, "No IRQ resource\n"); | ||
341 | return ctx->irq; | ||
342 | } | ||
343 | |||
344 | dev_dbg(&pdev->dev, "APM X-Gene RNG BASE %p ALARM IRQ %d", | ||
345 | ctx->csr_base, ctx->irq); | ||
346 | |||
347 | rc = devm_request_irq(&pdev->dev, ctx->irq, xgene_rng_irq_handler, 0, | ||
348 | dev_name(&pdev->dev), ctx); | ||
349 | if (rc) { | ||
350 | dev_err(&pdev->dev, "Could not request RNG alarm IRQ\n"); | ||
351 | return rc; | ||
352 | } | ||
353 | |||
354 | /* Enable IP clock */ | ||
355 | ctx->clk = devm_clk_get(&pdev->dev, NULL); | ||
356 | if (IS_ERR(ctx->clk)) { | ||
357 | dev_warn(&pdev->dev, "Couldn't get the clock for RNG\n"); | ||
358 | } else { | ||
359 | rc = clk_prepare_enable(ctx->clk); | ||
360 | if (rc) { | ||
361 | dev_warn(&pdev->dev, | ||
362 | "clock prepare enable failed for RNG"); | ||
363 | return rc; | ||
364 | } | ||
365 | } | ||
366 | |||
367 | xgene_rng_func.priv = (unsigned long) ctx; | ||
368 | |||
369 | rc = hwrng_register(&xgene_rng_func); | ||
370 | if (rc) { | ||
371 | dev_err(&pdev->dev, "RNG registering failed error %d\n", rc); | ||
372 | if (!IS_ERR(ctx->clk)) | ||
373 | clk_disable_unprepare(ctx->clk); | ||
374 | return rc; | ||
375 | } | ||
376 | |||
377 | rc = device_init_wakeup(&pdev->dev, 1); | ||
378 | if (rc) { | ||
379 | dev_err(&pdev->dev, "RNG device_init_wakeup failed error %d\n", | ||
380 | rc); | ||
381 | if (!IS_ERR(ctx->clk)) | ||
382 | clk_disable_unprepare(ctx->clk); | ||
383 | hwrng_unregister(&xgene_rng_func); | ||
384 | return rc; | ||
385 | } | ||
386 | |||
387 | return 0; | ||
388 | } | ||
389 | |||
390 | static int xgene_rng_remove(struct platform_device *pdev) | ||
391 | { | ||
392 | struct xgene_rng_dev *ctx = platform_get_drvdata(pdev); | ||
393 | int rc; | ||
394 | |||
395 | rc = device_init_wakeup(&pdev->dev, 0); | ||
396 | if (rc) | ||
397 | dev_err(&pdev->dev, "RNG init wakeup failed error %d\n", rc); | ||
398 | if (!IS_ERR(ctx->clk)) | ||
399 | clk_disable_unprepare(ctx->clk); | ||
400 | hwrng_unregister(&xgene_rng_func); | ||
401 | |||
402 | return rc; | ||
403 | } | ||
404 | |||
405 | static const struct of_device_id xgene_rng_of_match[] = { | ||
406 | { .compatible = "apm,xgene-rng" }, | ||
407 | { } | ||
408 | }; | ||
409 | |||
410 | MODULE_DEVICE_TABLE(of, xgene_rng_of_match); | ||
411 | |||
412 | static struct platform_driver xgene_rng_driver = { | ||
413 | .probe = xgene_rng_probe, | ||
414 | .remove = xgene_rng_remove, | ||
415 | .driver = { | ||
416 | .name = "xgene-rng", | ||
417 | .of_match_table = xgene_rng_of_match, | ||
418 | }, | ||
419 | }; | ||
420 | |||
421 | module_platform_driver(xgene_rng_driver); | ||
422 | MODULE_DESCRIPTION("APM X-Gene RNG driver"); | ||
423 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c index e6db9381b2c7..f816211f062f 100644 --- a/drivers/char/ipmi/ipmi_msghandler.c +++ b/drivers/char/ipmi/ipmi_msghandler.c | |||
@@ -2796,7 +2796,6 @@ channel_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg) | |||
2796 | = IPMI_CHANNEL_MEDIUM_IPMB; | 2796 | = IPMI_CHANNEL_MEDIUM_IPMB; |
2797 | intf->channels[0].protocol | 2797 | intf->channels[0].protocol |
2798 | = IPMI_CHANNEL_PROTOCOL_IPMB; | 2798 | = IPMI_CHANNEL_PROTOCOL_IPMB; |
2799 | rv = -ENOSYS; | ||
2800 | 2799 | ||
2801 | intf->curr_channel = IPMI_MAX_CHANNELS; | 2800 | intf->curr_channel = IPMI_MAX_CHANNELS; |
2802 | wake_up(&intf->waitq); | 2801 | wake_up(&intf->waitq); |
@@ -2821,12 +2820,12 @@ channel_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg) | |||
2821 | 2820 | ||
2822 | if (rv) { | 2821 | if (rv) { |
2823 | /* Got an error somehow, just give up. */ | 2822 | /* Got an error somehow, just give up. */ |
2823 | printk(KERN_WARNING PFX | ||
2824 | "Error sending channel information for channel" | ||
2825 | " %d: %d\n", intf->curr_channel, rv); | ||
2826 | |||
2824 | intf->curr_channel = IPMI_MAX_CHANNELS; | 2827 | intf->curr_channel = IPMI_MAX_CHANNELS; |
2825 | wake_up(&intf->waitq); | 2828 | wake_up(&intf->waitq); |
2826 | |||
2827 | printk(KERN_WARNING PFX | ||
2828 | "Error sending channel information: %d\n", | ||
2829 | rv); | ||
2830 | } | 2829 | } |
2831 | } | 2830 | } |
2832 | out: | 2831 | out: |
@@ -2964,8 +2963,12 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers, | |||
2964 | intf->null_user_handler = channel_handler; | 2963 | intf->null_user_handler = channel_handler; |
2965 | intf->curr_channel = 0; | 2964 | intf->curr_channel = 0; |
2966 | rv = send_channel_info_cmd(intf, 0); | 2965 | rv = send_channel_info_cmd(intf, 0); |
2967 | if (rv) | 2966 | if (rv) { |
2967 | printk(KERN_WARNING PFX | ||
2968 | "Error sending channel information for channel" | ||
2969 | " 0, %d\n", rv); | ||
2968 | goto out; | 2970 | goto out; |
2971 | } | ||
2969 | 2972 | ||
2970 | /* Wait for the channel info to be read. */ | 2973 | /* Wait for the channel info to be read. */ |
2971 | wait_event(intf->waitq, | 2974 | wait_event(intf->waitq, |
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c index 5d665680ae33..5c4e1f625bbb 100644 --- a/drivers/char/ipmi/ipmi_si_intf.c +++ b/drivers/char/ipmi/ipmi_si_intf.c | |||
@@ -965,9 +965,9 @@ static inline int ipmi_si_is_busy(struct timespec *ts) | |||
965 | return ts->tv_nsec != -1; | 965 | return ts->tv_nsec != -1; |
966 | } | 966 | } |
967 | 967 | ||
968 | static int ipmi_thread_busy_wait(enum si_sm_result smi_result, | 968 | static inline int ipmi_thread_busy_wait(enum si_sm_result smi_result, |
969 | const struct smi_info *smi_info, | 969 | const struct smi_info *smi_info, |
970 | struct timespec *busy_until) | 970 | struct timespec *busy_until) |
971 | { | 971 | { |
972 | unsigned int max_busy_us = 0; | 972 | unsigned int max_busy_us = 0; |
973 | 973 | ||
@@ -2658,6 +2658,9 @@ static int ipmi_probe(struct platform_device *dev) | |||
2658 | if (!match) | 2658 | if (!match) |
2659 | return -EINVAL; | 2659 | return -EINVAL; |
2660 | 2660 | ||
2661 | if (!of_device_is_available(np)) | ||
2662 | return -EINVAL; | ||
2663 | |||
2661 | ret = of_address_to_resource(np, 0, &resource); | 2664 | ret = of_address_to_resource(np, 0, &resource); |
2662 | if (ret) { | 2665 | if (ret) { |
2663 | dev_warn(&dev->dev, PFX "invalid address from OF\n"); | 2666 | dev_warn(&dev->dev, PFX "invalid address from OF\n"); |
@@ -3655,6 +3658,9 @@ static void cleanup_one_si(struct smi_info *to_clean) | |||
3655 | if (!to_clean) | 3658 | if (!to_clean) |
3656 | return; | 3659 | return; |
3657 | 3660 | ||
3661 | if (to_clean->dev) | ||
3662 | dev_set_drvdata(to_clean->dev, NULL); | ||
3663 | |||
3658 | list_del(&to_clean->link); | 3664 | list_del(&to_clean->link); |
3659 | 3665 | ||
3660 | /* Tell the driver that we are shutting down. */ | 3666 | /* Tell the driver that we are shutting down. */ |
diff --git a/drivers/char/tile-srom.c b/drivers/char/tile-srom.c index bd377472dcfb..02e76ac6d282 100644 --- a/drivers/char/tile-srom.c +++ b/drivers/char/tile-srom.c | |||
@@ -76,6 +76,7 @@ MODULE_LICENSE("GPL"); | |||
76 | 76 | ||
77 | static int srom_devs; /* Number of SROM partitions */ | 77 | static int srom_devs; /* Number of SROM partitions */ |
78 | static struct cdev srom_cdev; | 78 | static struct cdev srom_cdev; |
79 | static struct platform_device *srom_parent; | ||
79 | static struct class *srom_class; | 80 | static struct class *srom_class; |
80 | static struct srom_dev *srom_devices; | 81 | static struct srom_dev *srom_devices; |
81 | 82 | ||
@@ -350,7 +351,7 @@ static int srom_setup_minor(struct srom_dev *srom, int index) | |||
350 | SROM_PAGE_SIZE_OFF, sizeof(srom->page_size)) < 0) | 351 | SROM_PAGE_SIZE_OFF, sizeof(srom->page_size)) < 0) |
351 | return -EIO; | 352 | return -EIO; |
352 | 353 | ||
353 | dev = device_create(srom_class, &platform_bus, | 354 | dev = device_create(srom_class, &srom_parent->dev, |
354 | MKDEV(srom_major, index), srom, "%d", index); | 355 | MKDEV(srom_major, index), srom, "%d", index); |
355 | return PTR_ERR_OR_ZERO(dev); | 356 | return PTR_ERR_OR_ZERO(dev); |
356 | } | 357 | } |
@@ -415,6 +416,13 @@ static int srom_init(void) | |||
415 | if (result < 0) | 416 | if (result < 0) |
416 | goto fail_chrdev; | 417 | goto fail_chrdev; |
417 | 418 | ||
419 | /* Create a parent device */ | ||
420 | srom_parent = platform_device_register_simple("srom", -1, NULL, 0); | ||
421 | if (IS_ERR(srom_parent)) { | ||
422 | result = PTR_ERR(srom_parent); | ||
423 | goto fail_pdev; | ||
424 | } | ||
425 | |||
418 | /* Create a sysfs class. */ | 426 | /* Create a sysfs class. */ |
419 | srom_class = class_create(THIS_MODULE, "srom"); | 427 | srom_class = class_create(THIS_MODULE, "srom"); |
420 | if (IS_ERR(srom_class)) { | 428 | if (IS_ERR(srom_class)) { |
@@ -438,6 +446,8 @@ fail_class: | |||
438 | device_destroy(srom_class, MKDEV(srom_major, i)); | 446 | device_destroy(srom_class, MKDEV(srom_major, i)); |
439 | class_destroy(srom_class); | 447 | class_destroy(srom_class); |
440 | fail_cdev: | 448 | fail_cdev: |
449 | platform_device_unregister(srom_parent); | ||
450 | fail_pdev: | ||
441 | cdev_del(&srom_cdev); | 451 | cdev_del(&srom_cdev); |
442 | fail_chrdev: | 452 | fail_chrdev: |
443 | unregister_chrdev_region(dev, srom_devs); | 453 | unregister_chrdev_region(dev, srom_devs); |
@@ -454,6 +464,7 @@ static void srom_cleanup(void) | |||
454 | device_destroy(srom_class, MKDEV(srom_major, i)); | 464 | device_destroy(srom_class, MKDEV(srom_major, i)); |
455 | class_destroy(srom_class); | 465 | class_destroy(srom_class); |
456 | cdev_del(&srom_cdev); | 466 | cdev_del(&srom_cdev); |
467 | platform_device_unregister(srom_parent); | ||
457 | unregister_chrdev_region(MKDEV(srom_major, 0), srom_devs); | 468 | unregister_chrdev_region(MKDEV(srom_major, 0), srom_devs); |
458 | kfree(srom_devices); | 469 | kfree(srom_devices); |
459 | } | 470 | } |
diff --git a/drivers/char/xillybus/Kconfig b/drivers/char/xillybus/Kconfig new file mode 100644 index 000000000000..b53bdf12da0d --- /dev/null +++ b/drivers/char/xillybus/Kconfig | |||
@@ -0,0 +1,33 @@ | |||
1 | # | ||
2 | # Xillybus devices | ||
3 | # | ||
4 | |||
5 | config XILLYBUS | ||
6 | tristate "Xillybus generic FPGA interface" | ||
7 | depends on PCI || (OF_ADDRESS && OF_IRQ) | ||
8 | select CRC32 | ||
9 | help | ||
10 | Xillybus is a generic interface for peripherals designed on | ||
11 | programmable logic (FPGA). The driver probes the hardware for | ||
12 | its capabilities, and creates device files accordingly. | ||
13 | |||
14 | If unsure, say N. | ||
15 | |||
16 | if XILLYBUS | ||
17 | |||
18 | config XILLYBUS_PCIE | ||
19 | tristate "Xillybus over PCIe" | ||
20 | depends on PCI_MSI | ||
21 | help | ||
22 | Set to M if you want Xillybus to use PCI Express for communicating | ||
23 | with the FPGA. | ||
24 | |||
25 | config XILLYBUS_OF | ||
26 | tristate "Xillybus over Device Tree" | ||
27 | depends on OF_ADDRESS && OF_IRQ | ||
28 | help | ||
29 | Set to M if you want Xillybus to find its resources from the | ||
30 | Open Firmware Flattened Device Tree. If the target is an embedded | ||
31 | system, say M. | ||
32 | |||
33 | endif # if XILLYBUS | ||
diff --git a/drivers/char/xillybus/Makefile b/drivers/char/xillybus/Makefile new file mode 100644 index 000000000000..b68b7ebfd381 --- /dev/null +++ b/drivers/char/xillybus/Makefile | |||
@@ -0,0 +1,7 @@ | |||
1 | # | ||
2 | # Makefile for Xillybus driver | ||
3 | # | ||
4 | |||
5 | obj-$(CONFIG_XILLYBUS) += xillybus_core.o | ||
6 | obj-$(CONFIG_XILLYBUS_PCIE) += xillybus_pcie.o | ||
7 | obj-$(CONFIG_XILLYBUS_OF) += xillybus_of.o | ||
diff --git a/drivers/char/xillybus/xillybus.h b/drivers/char/xillybus/xillybus.h new file mode 100644 index 000000000000..b9a9eb6d4f72 --- /dev/null +++ b/drivers/char/xillybus/xillybus.h | |||
@@ -0,0 +1,160 @@ | |||
1 | /* | ||
2 | * linux/drivers/misc/xillybus.h | ||
3 | * | ||
4 | * Copyright 2011 Xillybus Ltd, http://xillybus.com | ||
5 | * | ||
6 | * Header file for the Xillybus FPGA/host framework. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the smems of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; version 2 of the License. | ||
11 | */ | ||
12 | |||
13 | #ifndef __XILLYBUS_H | ||
14 | #define __XILLYBUS_H | ||
15 | |||
16 | #include <linux/list.h> | ||
17 | #include <linux/device.h> | ||
18 | #include <linux/dma-mapping.h> | ||
19 | #include <linux/interrupt.h> | ||
20 | #include <linux/sched.h> | ||
21 | #include <linux/cdev.h> | ||
22 | #include <linux/spinlock.h> | ||
23 | #include <linux/mutex.h> | ||
24 | #include <linux/workqueue.h> | ||
25 | |||
26 | struct xilly_endpoint_hardware; | ||
27 | |||
28 | struct xilly_buffer { | ||
29 | void *addr; | ||
30 | dma_addr_t dma_addr; | ||
31 | int end_offset; /* Counting elements, not bytes */ | ||
32 | }; | ||
33 | |||
34 | struct xilly_idt_handle { | ||
35 | unsigned char *chandesc; | ||
36 | unsigned char *idt; | ||
37 | int entries; | ||
38 | }; | ||
39 | |||
40 | /* | ||
41 | * Read-write confusion: wr_* and rd_* notation sticks to FPGA view, so | ||
42 | * wr_* buffers are those consumed by read(), since the FPGA writes to them | ||
43 | * and vice versa. | ||
44 | */ | ||
45 | |||
46 | struct xilly_channel { | ||
47 | struct xilly_endpoint *endpoint; | ||
48 | int chan_num; | ||
49 | int log2_element_size; | ||
50 | int seekable; | ||
51 | |||
52 | struct xilly_buffer **wr_buffers; /* FPGA writes, driver reads! */ | ||
53 | int num_wr_buffers; | ||
54 | unsigned int wr_buf_size; /* In bytes */ | ||
55 | int wr_fpga_buf_idx; | ||
56 | int wr_host_buf_idx; | ||
57 | int wr_host_buf_pos; | ||
58 | int wr_empty; | ||
59 | int wr_ready; /* Significant only when wr_empty == 1 */ | ||
60 | int wr_sleepy; | ||
61 | int wr_eof; | ||
62 | int wr_hangup; | ||
63 | spinlock_t wr_spinlock; | ||
64 | struct mutex wr_mutex; | ||
65 | wait_queue_head_t wr_wait; | ||
66 | wait_queue_head_t wr_ready_wait; | ||
67 | int wr_ref_count; | ||
68 | int wr_synchronous; | ||
69 | int wr_allow_partial; | ||
70 | int wr_exclusive_open; | ||
71 | int wr_supports_nonempty; | ||
72 | |||
73 | struct xilly_buffer **rd_buffers; /* FPGA reads, driver writes! */ | ||
74 | int num_rd_buffers; | ||
75 | unsigned int rd_buf_size; /* In bytes */ | ||
76 | int rd_fpga_buf_idx; | ||
77 | int rd_host_buf_pos; | ||
78 | int rd_host_buf_idx; | ||
79 | int rd_full; | ||
80 | spinlock_t rd_spinlock; | ||
81 | struct mutex rd_mutex; | ||
82 | wait_queue_head_t rd_wait; | ||
83 | int rd_ref_count; | ||
84 | int rd_allow_partial; | ||
85 | int rd_synchronous; | ||
86 | int rd_exclusive_open; | ||
87 | struct delayed_work rd_workitem; | ||
88 | unsigned char rd_leftovers[4]; | ||
89 | }; | ||
90 | |||
91 | struct xilly_endpoint { | ||
92 | /* | ||
93 | * One of pdev and dev is always NULL, and the other is a valid | ||
94 | * pointer, depending on the type of device | ||
95 | */ | ||
96 | struct pci_dev *pdev; | ||
97 | struct device *dev; | ||
98 | struct xilly_endpoint_hardware *ephw; | ||
99 | |||
100 | struct list_head ep_list; | ||
101 | int dma_using_dac; /* =1 if 64-bit DMA is used, =0 otherwise. */ | ||
102 | __iomem void *registers; | ||
103 | int fatal_error; | ||
104 | |||
105 | struct mutex register_mutex; | ||
106 | wait_queue_head_t ep_wait; | ||
107 | |||
108 | /* Channels and message handling */ | ||
109 | struct cdev cdev; | ||
110 | |||
111 | int major; | ||
112 | int lowest_minor; /* Highest minor = lowest_minor + num_channels - 1 */ | ||
113 | |||
114 | int num_channels; /* EXCLUDING message buffer */ | ||
115 | struct xilly_channel **channels; | ||
116 | int msg_counter; | ||
117 | int failed_messages; | ||
118 | int idtlen; | ||
119 | |||
120 | u32 *msgbuf_addr; | ||
121 | dma_addr_t msgbuf_dma_addr; | ||
122 | unsigned int msg_buf_size; | ||
123 | }; | ||
124 | |||
125 | struct xilly_endpoint_hardware { | ||
126 | struct module *owner; | ||
127 | void (*hw_sync_sgl_for_cpu)(struct xilly_endpoint *, | ||
128 | dma_addr_t, | ||
129 | size_t, | ||
130 | int); | ||
131 | void (*hw_sync_sgl_for_device)(struct xilly_endpoint *, | ||
132 | dma_addr_t, | ||
133 | size_t, | ||
134 | int); | ||
135 | int (*map_single)(struct xilly_endpoint *, | ||
136 | void *, | ||
137 | size_t, | ||
138 | int, | ||
139 | dma_addr_t *); | ||
140 | }; | ||
141 | |||
142 | struct xilly_mapping { | ||
143 | void *device; | ||
144 | dma_addr_t dma_addr; | ||
145 | size_t size; | ||
146 | int direction; | ||
147 | }; | ||
148 | |||
149 | irqreturn_t xillybus_isr(int irq, void *data); | ||
150 | |||
151 | struct xilly_endpoint *xillybus_init_endpoint(struct pci_dev *pdev, | ||
152 | struct device *dev, | ||
153 | struct xilly_endpoint_hardware | ||
154 | *ephw); | ||
155 | |||
156 | int xillybus_endpoint_discovery(struct xilly_endpoint *endpoint); | ||
157 | |||
158 | void xillybus_endpoint_remove(struct xilly_endpoint *endpoint); | ||
159 | |||
160 | #endif /* __XILLYBUS_H */ | ||
diff --git a/drivers/char/xillybus/xillybus_core.c b/drivers/char/xillybus/xillybus_core.c new file mode 100644 index 000000000000..b827fa095f1b --- /dev/null +++ b/drivers/char/xillybus/xillybus_core.c | |||
@@ -0,0 +1,2103 @@ | |||
1 | /* | ||
2 | * linux/drivers/misc/xillybus_core.c | ||
3 | * | ||
4 | * Copyright 2011 Xillybus Ltd, http://xillybus.com | ||
5 | * | ||
6 | * Driver for the Xillybus FPGA/host framework. | ||
7 | * | ||
8 | * This driver interfaces with a special IP core in an FPGA, setting up | ||
9 | * a pipe between a hardware FIFO in the programmable logic and a device | ||
10 | * file in the host. The number of such pipes and their attributes are | ||
11 | * set up on the logic. This driver detects these automatically and | ||
12 | * creates the device files accordingly. | ||
13 | * | ||
14 | * This program is free software; you can redistribute it and/or modify | ||
15 | * it under the smems of the GNU General Public License as published by | ||
16 | * the Free Software Foundation; version 2 of the License. | ||
17 | */ | ||
18 | |||
19 | #include <linux/list.h> | ||
20 | #include <linux/device.h> | ||
21 | #include <linux/module.h> | ||
22 | #include <linux/io.h> | ||
23 | #include <linux/dma-mapping.h> | ||
24 | #include <linux/interrupt.h> | ||
25 | #include <linux/sched.h> | ||
26 | #include <linux/fs.h> | ||
27 | #include <linux/cdev.h> | ||
28 | #include <linux/spinlock.h> | ||
29 | #include <linux/mutex.h> | ||
30 | #include <linux/crc32.h> | ||
31 | #include <linux/poll.h> | ||
32 | #include <linux/delay.h> | ||
33 | #include <linux/slab.h> | ||
34 | #include <linux/workqueue.h> | ||
35 | #include "xillybus.h" | ||
36 | |||
37 | MODULE_DESCRIPTION("Xillybus core functions"); | ||
38 | MODULE_AUTHOR("Eli Billauer, Xillybus Ltd."); | ||
39 | MODULE_VERSION("1.07"); | ||
40 | MODULE_ALIAS("xillybus_core"); | ||
41 | MODULE_LICENSE("GPL v2"); | ||
42 | |||
43 | /* General timeout is 100 ms, rx timeout is 10 ms */ | ||
44 | #define XILLY_RX_TIMEOUT (10*HZ/1000) | ||
45 | #define XILLY_TIMEOUT (100*HZ/1000) | ||
46 | |||
47 | #define fpga_msg_ctrl_reg 0x0008 | ||
48 | #define fpga_dma_control_reg 0x0020 | ||
49 | #define fpga_dma_bufno_reg 0x0024 | ||
50 | #define fpga_dma_bufaddr_lowaddr_reg 0x0028 | ||
51 | #define fpga_dma_bufaddr_highaddr_reg 0x002c | ||
52 | #define fpga_buf_ctrl_reg 0x0030 | ||
53 | #define fpga_buf_offset_reg 0x0034 | ||
54 | #define fpga_endian_reg 0x0040 | ||
55 | |||
56 | #define XILLYMSG_OPCODE_RELEASEBUF 1 | ||
57 | #define XILLYMSG_OPCODE_QUIESCEACK 2 | ||
58 | #define XILLYMSG_OPCODE_FIFOEOF 3 | ||
59 | #define XILLYMSG_OPCODE_FATAL_ERROR 4 | ||
60 | #define XILLYMSG_OPCODE_NONEMPTY 5 | ||
61 | |||
62 | static const char xillyname[] = "xillybus"; | ||
63 | |||
64 | static struct class *xillybus_class; | ||
65 | |||
66 | /* | ||
67 | * ep_list_lock is the last lock to be taken; No other lock requests are | ||
68 | * allowed while holding it. It merely protects list_of_endpoints, and not | ||
69 | * the endpoints listed in it. | ||
70 | */ | ||
71 | |||
72 | static LIST_HEAD(list_of_endpoints); | ||
73 | static struct mutex ep_list_lock; | ||
74 | static struct workqueue_struct *xillybus_wq; | ||
75 | |||
76 | /* | ||
77 | * Locking scheme: Mutexes protect invocations of character device methods. | ||
78 | * If both locks are taken, wr_mutex is taken first, rd_mutex second. | ||
79 | * | ||
80 | * wr_spinlock protects wr_*_buf_idx, wr_empty, wr_sleepy, wr_ready and the | ||
81 | * buffers' end_offset fields against changes made by IRQ handler (and in | ||
82 | * theory, other file request handlers, but the mutex handles that). Nothing | ||
83 | * else. | ||
84 | * They are held for short direct memory manipulations. Needless to say, | ||
85 | * no mutex locking is allowed when a spinlock is held. | ||
86 | * | ||
87 | * rd_spinlock does the same with rd_*_buf_idx, rd_empty and end_offset. | ||
88 | * | ||
89 | * register_mutex is endpoint-specific, and is held when non-atomic | ||
90 | * register operations are performed. wr_mutex and rd_mutex may be | ||
91 | * held when register_mutex is taken, but none of the spinlocks. Note that | ||
92 | * register_mutex doesn't protect against sporadic buf_ctrl_reg writes | ||
93 | * which are unrelated to buf_offset_reg, since they are harmless. | ||
94 | * | ||
95 | * Blocking on the wait queues is allowed with mutexes held, but not with | ||
96 | * spinlocks. | ||
97 | * | ||
98 | * Only interruptible blocking is allowed on mutexes and wait queues. | ||
99 | * | ||
100 | * All in all, the locking order goes (with skips allowed, of course): | ||
101 | * wr_mutex -> rd_mutex -> register_mutex -> wr_spinlock -> rd_spinlock | ||
102 | */ | ||
103 | |||
104 | static void malformed_message(struct xilly_endpoint *endpoint, u32 *buf) | ||
105 | { | ||
106 | int opcode; | ||
107 | int msg_channel, msg_bufno, msg_data, msg_dir; | ||
108 | |||
109 | opcode = (buf[0] >> 24) & 0xff; | ||
110 | msg_dir = buf[0] & 1; | ||
111 | msg_channel = (buf[0] >> 1) & 0x7ff; | ||
112 | msg_bufno = (buf[0] >> 12) & 0x3ff; | ||
113 | msg_data = buf[1] & 0xfffffff; | ||
114 | |||
115 | dev_warn(endpoint->dev, | ||
116 | "Malformed message (skipping): opcode=%d, channel=%03x, dir=%d, bufno=%03x, data=%07x\n", | ||
117 | opcode, msg_channel, msg_dir, msg_bufno, msg_data); | ||
118 | } | ||
119 | |||
120 | /* | ||
121 | * xillybus_isr assumes the interrupt is allocated exclusively to it, | ||
122 | * which is the natural case MSI and several other hardware-oriented | ||
123 | * interrupts. Sharing is not allowed. | ||
124 | */ | ||
125 | |||
126 | irqreturn_t xillybus_isr(int irq, void *data) | ||
127 | { | ||
128 | struct xilly_endpoint *ep = data; | ||
129 | u32 *buf; | ||
130 | unsigned int buf_size; | ||
131 | int i; | ||
132 | int opcode; | ||
133 | unsigned int msg_channel, msg_bufno, msg_data, msg_dir; | ||
134 | struct xilly_channel *channel; | ||
135 | |||
136 | buf = ep->msgbuf_addr; | ||
137 | buf_size = ep->msg_buf_size/sizeof(u32); | ||
138 | |||
139 | ep->ephw->hw_sync_sgl_for_cpu(ep, | ||
140 | ep->msgbuf_dma_addr, | ||
141 | ep->msg_buf_size, | ||
142 | DMA_FROM_DEVICE); | ||
143 | |||
144 | for (i = 0; i < buf_size; i += 2) { | ||
145 | if (((buf[i+1] >> 28) & 0xf) != ep->msg_counter) { | ||
146 | malformed_message(ep, &buf[i]); | ||
147 | dev_warn(ep->dev, | ||
148 | "Sending a NACK on counter %x (instead of %x) on entry %d\n", | ||
149 | ((buf[i+1] >> 28) & 0xf), | ||
150 | ep->msg_counter, | ||
151 | i/2); | ||
152 | |||
153 | if (++ep->failed_messages > 10) { | ||
154 | dev_err(ep->dev, | ||
155 | "Lost sync with interrupt messages. Stopping.\n"); | ||
156 | } else { | ||
157 | ep->ephw->hw_sync_sgl_for_device( | ||
158 | ep, | ||
159 | ep->msgbuf_dma_addr, | ||
160 | ep->msg_buf_size, | ||
161 | DMA_FROM_DEVICE); | ||
162 | |||
163 | iowrite32(0x01, /* Message NACK */ | ||
164 | ep->registers + fpga_msg_ctrl_reg); | ||
165 | } | ||
166 | return IRQ_HANDLED; | ||
167 | } else if (buf[i] & (1 << 22)) /* Last message */ | ||
168 | break; | ||
169 | } | ||
170 | |||
171 | if (i >= buf_size) { | ||
172 | dev_err(ep->dev, "Bad interrupt message. Stopping.\n"); | ||
173 | return IRQ_HANDLED; | ||
174 | } | ||
175 | |||
176 | buf_size = i + 2; | ||
177 | |||
178 | for (i = 0; i < buf_size; i += 2) { /* Scan through messages */ | ||
179 | opcode = (buf[i] >> 24) & 0xff; | ||
180 | |||
181 | msg_dir = buf[i] & 1; | ||
182 | msg_channel = (buf[i] >> 1) & 0x7ff; | ||
183 | msg_bufno = (buf[i] >> 12) & 0x3ff; | ||
184 | msg_data = buf[i+1] & 0xfffffff; | ||
185 | |||
186 | switch (opcode) { | ||
187 | case XILLYMSG_OPCODE_RELEASEBUF: | ||
188 | if ((msg_channel > ep->num_channels) || | ||
189 | (msg_channel == 0)) { | ||
190 | malformed_message(ep, &buf[i]); | ||
191 | break; | ||
192 | } | ||
193 | |||
194 | channel = ep->channels[msg_channel]; | ||
195 | |||
196 | if (msg_dir) { /* Write channel */ | ||
197 | if (msg_bufno >= channel->num_wr_buffers) { | ||
198 | malformed_message(ep, &buf[i]); | ||
199 | break; | ||
200 | } | ||
201 | spin_lock(&channel->wr_spinlock); | ||
202 | channel->wr_buffers[msg_bufno]->end_offset = | ||
203 | msg_data; | ||
204 | channel->wr_fpga_buf_idx = msg_bufno; | ||
205 | channel->wr_empty = 0; | ||
206 | channel->wr_sleepy = 0; | ||
207 | spin_unlock(&channel->wr_spinlock); | ||
208 | |||
209 | wake_up_interruptible(&channel->wr_wait); | ||
210 | |||
211 | } else { | ||
212 | /* Read channel */ | ||
213 | |||
214 | if (msg_bufno >= channel->num_rd_buffers) { | ||
215 | malformed_message(ep, &buf[i]); | ||
216 | break; | ||
217 | } | ||
218 | |||
219 | spin_lock(&channel->rd_spinlock); | ||
220 | channel->rd_fpga_buf_idx = msg_bufno; | ||
221 | channel->rd_full = 0; | ||
222 | spin_unlock(&channel->rd_spinlock); | ||
223 | |||
224 | wake_up_interruptible(&channel->rd_wait); | ||
225 | if (!channel->rd_synchronous) | ||
226 | queue_delayed_work( | ||
227 | xillybus_wq, | ||
228 | &channel->rd_workitem, | ||
229 | XILLY_RX_TIMEOUT); | ||
230 | } | ||
231 | |||
232 | break; | ||
233 | case XILLYMSG_OPCODE_NONEMPTY: | ||
234 | if ((msg_channel > ep->num_channels) || | ||
235 | (msg_channel == 0) || (!msg_dir) || | ||
236 | !ep->channels[msg_channel]->wr_supports_nonempty) { | ||
237 | malformed_message(ep, &buf[i]); | ||
238 | break; | ||
239 | } | ||
240 | |||
241 | channel = ep->channels[msg_channel]; | ||
242 | |||
243 | if (msg_bufno >= channel->num_wr_buffers) { | ||
244 | malformed_message(ep, &buf[i]); | ||
245 | break; | ||
246 | } | ||
247 | spin_lock(&channel->wr_spinlock); | ||
248 | if (msg_bufno == channel->wr_host_buf_idx) | ||
249 | channel->wr_ready = 1; | ||
250 | spin_unlock(&channel->wr_spinlock); | ||
251 | |||
252 | wake_up_interruptible(&channel->wr_ready_wait); | ||
253 | |||
254 | break; | ||
255 | case XILLYMSG_OPCODE_QUIESCEACK: | ||
256 | ep->idtlen = msg_data; | ||
257 | wake_up_interruptible(&ep->ep_wait); | ||
258 | |||
259 | break; | ||
260 | case XILLYMSG_OPCODE_FIFOEOF: | ||
261 | if ((msg_channel > ep->num_channels) || | ||
262 | (msg_channel == 0) || (!msg_dir) || | ||
263 | !ep->channels[msg_channel]->num_wr_buffers) { | ||
264 | malformed_message(ep, &buf[i]); | ||
265 | break; | ||
266 | } | ||
267 | channel = ep->channels[msg_channel]; | ||
268 | spin_lock(&channel->wr_spinlock); | ||
269 | channel->wr_eof = msg_bufno; | ||
270 | channel->wr_sleepy = 0; | ||
271 | |||
272 | channel->wr_hangup = channel->wr_empty && | ||
273 | (channel->wr_host_buf_idx == msg_bufno); | ||
274 | |||
275 | spin_unlock(&channel->wr_spinlock); | ||
276 | |||
277 | wake_up_interruptible(&channel->wr_wait); | ||
278 | |||
279 | break; | ||
280 | case XILLYMSG_OPCODE_FATAL_ERROR: | ||
281 | ep->fatal_error = 1; | ||
282 | wake_up_interruptible(&ep->ep_wait); /* For select() */ | ||
283 | dev_err(ep->dev, | ||
284 | "FPGA reported a fatal error. This means that the low-level communication with the device has failed. This hardware problem is most likely unrelated to Xillybus (neither kernel module nor FPGA core), but reports are still welcome. All I/O is aborted.\n"); | ||
285 | break; | ||
286 | default: | ||
287 | malformed_message(ep, &buf[i]); | ||
288 | break; | ||
289 | } | ||
290 | } | ||
291 | |||
292 | ep->ephw->hw_sync_sgl_for_device(ep, | ||
293 | ep->msgbuf_dma_addr, | ||
294 | ep->msg_buf_size, | ||
295 | DMA_FROM_DEVICE); | ||
296 | |||
297 | ep->msg_counter = (ep->msg_counter + 1) & 0xf; | ||
298 | ep->failed_messages = 0; | ||
299 | iowrite32(0x03, ep->registers + fpga_msg_ctrl_reg); /* Message ACK */ | ||
300 | |||
301 | return IRQ_HANDLED; | ||
302 | } | ||
303 | EXPORT_SYMBOL(xillybus_isr); | ||
304 | |||
305 | /* | ||
306 | * A few trivial memory management functions. | ||
307 | * NOTE: These functions are used only on probe and remove, and therefore | ||
308 | * no locks are applied! | ||
309 | */ | ||
310 | |||
311 | static void xillybus_autoflush(struct work_struct *work); | ||
312 | |||
313 | struct xilly_alloc_state { | ||
314 | void *salami; | ||
315 | int left_of_salami; | ||
316 | int nbuffer; | ||
317 | enum dma_data_direction direction; | ||
318 | u32 regdirection; | ||
319 | }; | ||
320 | |||
321 | static int xilly_get_dma_buffers(struct xilly_endpoint *ep, | ||
322 | struct xilly_alloc_state *s, | ||
323 | struct xilly_buffer **buffers, | ||
324 | int bufnum, int bytebufsize) | ||
325 | { | ||
326 | int i, rc; | ||
327 | dma_addr_t dma_addr; | ||
328 | struct device *dev = ep->dev; | ||
329 | struct xilly_buffer *this_buffer = NULL; /* Init to silence warning */ | ||
330 | |||
331 | if (buffers) { /* Not the message buffer */ | ||
332 | this_buffer = devm_kcalloc(dev, bufnum, | ||
333 | sizeof(struct xilly_buffer), | ||
334 | GFP_KERNEL); | ||
335 | if (!this_buffer) | ||
336 | return -ENOMEM; | ||
337 | } | ||
338 | |||
339 | for (i = 0; i < bufnum; i++) { | ||
340 | /* | ||
341 | * Buffers are expected in descending size order, so there | ||
342 | * is either enough space for this buffer or none at all. | ||
343 | */ | ||
344 | |||
345 | if ((s->left_of_salami < bytebufsize) && | ||
346 | (s->left_of_salami > 0)) { | ||
347 | dev_err(ep->dev, | ||
348 | "Corrupt buffer allocation in IDT. Aborting.\n"); | ||
349 | return -ENODEV; | ||
350 | } | ||
351 | |||
352 | if (s->left_of_salami == 0) { | ||
353 | int allocorder, allocsize; | ||
354 | |||
355 | allocsize = PAGE_SIZE; | ||
356 | allocorder = 0; | ||
357 | while (bytebufsize > allocsize) { | ||
358 | allocsize *= 2; | ||
359 | allocorder++; | ||
360 | } | ||
361 | |||
362 | s->salami = (void *) devm_get_free_pages( | ||
363 | dev, | ||
364 | GFP_KERNEL | __GFP_DMA32 | __GFP_ZERO, | ||
365 | allocorder); | ||
366 | if (!s->salami) | ||
367 | return -ENOMEM; | ||
368 | |||
369 | s->left_of_salami = allocsize; | ||
370 | } | ||
371 | |||
372 | rc = ep->ephw->map_single(ep, s->salami, | ||
373 | bytebufsize, s->direction, | ||
374 | &dma_addr); | ||
375 | if (rc) | ||
376 | return rc; | ||
377 | |||
378 | iowrite32((u32) (dma_addr & 0xffffffff), | ||
379 | ep->registers + fpga_dma_bufaddr_lowaddr_reg); | ||
380 | iowrite32(((u32) ((((u64) dma_addr) >> 32) & 0xffffffff)), | ||
381 | ep->registers + fpga_dma_bufaddr_highaddr_reg); | ||
382 | |||
383 | if (buffers) { /* Not the message buffer */ | ||
384 | this_buffer->addr = s->salami; | ||
385 | this_buffer->dma_addr = dma_addr; | ||
386 | buffers[i] = this_buffer++; | ||
387 | |||
388 | iowrite32(s->regdirection | s->nbuffer++, | ||
389 | ep->registers + fpga_dma_bufno_reg); | ||
390 | } else { | ||
391 | ep->msgbuf_addr = s->salami; | ||
392 | ep->msgbuf_dma_addr = dma_addr; | ||
393 | ep->msg_buf_size = bytebufsize; | ||
394 | |||
395 | iowrite32(s->regdirection, | ||
396 | ep->registers + fpga_dma_bufno_reg); | ||
397 | } | ||
398 | |||
399 | s->left_of_salami -= bytebufsize; | ||
400 | s->salami += bytebufsize; | ||
401 | } | ||
402 | return 0; | ||
403 | } | ||
404 | |||
405 | static int xilly_setupchannels(struct xilly_endpoint *ep, | ||
406 | unsigned char *chandesc, | ||
407 | int entries) | ||
408 | { | ||
409 | struct device *dev = ep->dev; | ||
410 | int i, entry, rc; | ||
411 | struct xilly_channel *channel; | ||
412 | int channelnum, bufnum, bufsize, format, is_writebuf; | ||
413 | int bytebufsize; | ||
414 | int synchronous, allowpartial, exclusive_open, seekable; | ||
415 | int supports_nonempty; | ||
416 | int msg_buf_done = 0; | ||
417 | |||
418 | struct xilly_alloc_state rd_alloc = { | ||
419 | .salami = NULL, | ||
420 | .left_of_salami = 0, | ||
421 | .nbuffer = 1, | ||
422 | .direction = DMA_TO_DEVICE, | ||
423 | .regdirection = 0, | ||
424 | }; | ||
425 | |||
426 | struct xilly_alloc_state wr_alloc = { | ||
427 | .salami = NULL, | ||
428 | .left_of_salami = 0, | ||
429 | .nbuffer = 1, | ||
430 | .direction = DMA_FROM_DEVICE, | ||
431 | .regdirection = 0x80000000, | ||
432 | }; | ||
433 | |||
434 | channel = devm_kcalloc(dev, ep->num_channels, | ||
435 | sizeof(struct xilly_channel), GFP_KERNEL); | ||
436 | if (!channel) | ||
437 | return -ENOMEM; | ||
438 | |||
439 | ep->channels = devm_kcalloc(dev, ep->num_channels + 1, | ||
440 | sizeof(struct xilly_channel *), | ||
441 | GFP_KERNEL); | ||
442 | if (!ep->channels) | ||
443 | return -ENOMEM; | ||
444 | |||
445 | ep->channels[0] = NULL; /* Channel 0 is message buf. */ | ||
446 | |||
447 | /* Initialize all channels with defaults */ | ||
448 | |||
449 | for (i = 1; i <= ep->num_channels; i++) { | ||
450 | channel->wr_buffers = NULL; | ||
451 | channel->rd_buffers = NULL; | ||
452 | channel->num_wr_buffers = 0; | ||
453 | channel->num_rd_buffers = 0; | ||
454 | channel->wr_fpga_buf_idx = -1; | ||
455 | channel->wr_host_buf_idx = 0; | ||
456 | channel->wr_host_buf_pos = 0; | ||
457 | channel->wr_empty = 1; | ||
458 | channel->wr_ready = 0; | ||
459 | channel->wr_sleepy = 1; | ||
460 | channel->rd_fpga_buf_idx = 0; | ||
461 | channel->rd_host_buf_idx = 0; | ||
462 | channel->rd_host_buf_pos = 0; | ||
463 | channel->rd_full = 0; | ||
464 | channel->wr_ref_count = 0; | ||
465 | channel->rd_ref_count = 0; | ||
466 | |||
467 | spin_lock_init(&channel->wr_spinlock); | ||
468 | spin_lock_init(&channel->rd_spinlock); | ||
469 | mutex_init(&channel->wr_mutex); | ||
470 | mutex_init(&channel->rd_mutex); | ||
471 | init_waitqueue_head(&channel->rd_wait); | ||
472 | init_waitqueue_head(&channel->wr_wait); | ||
473 | init_waitqueue_head(&channel->wr_ready_wait); | ||
474 | |||
475 | INIT_DELAYED_WORK(&channel->rd_workitem, xillybus_autoflush); | ||
476 | |||
477 | channel->endpoint = ep; | ||
478 | channel->chan_num = i; | ||
479 | |||
480 | channel->log2_element_size = 0; | ||
481 | |||
482 | ep->channels[i] = channel++; | ||
483 | } | ||
484 | |||
485 | for (entry = 0; entry < entries; entry++, chandesc += 4) { | ||
486 | struct xilly_buffer **buffers = NULL; | ||
487 | |||
488 | is_writebuf = chandesc[0] & 0x01; | ||
489 | channelnum = (chandesc[0] >> 1) | ((chandesc[1] & 0x0f) << 7); | ||
490 | format = (chandesc[1] >> 4) & 0x03; | ||
491 | allowpartial = (chandesc[1] >> 6) & 0x01; | ||
492 | synchronous = (chandesc[1] >> 7) & 0x01; | ||
493 | bufsize = 1 << (chandesc[2] & 0x1f); | ||
494 | bufnum = 1 << (chandesc[3] & 0x0f); | ||
495 | exclusive_open = (chandesc[2] >> 7) & 0x01; | ||
496 | seekable = (chandesc[2] >> 6) & 0x01; | ||
497 | supports_nonempty = (chandesc[2] >> 5) & 0x01; | ||
498 | |||
499 | if ((channelnum > ep->num_channels) || | ||
500 | ((channelnum == 0) && !is_writebuf)) { | ||
501 | dev_err(ep->dev, | ||
502 | "IDT requests channel out of range. Aborting.\n"); | ||
503 | return -ENODEV; | ||
504 | } | ||
505 | |||
506 | channel = ep->channels[channelnum]; /* NULL for msg channel */ | ||
507 | |||
508 | if (!is_writebuf || channelnum > 0) { | ||
509 | channel->log2_element_size = ((format > 2) ? | ||
510 | 2 : format); | ||
511 | |||
512 | bytebufsize = channel->rd_buf_size = bufsize * | ||
513 | (1 << channel->log2_element_size); | ||
514 | |||
515 | buffers = devm_kcalloc(dev, bufnum, | ||
516 | sizeof(struct xilly_buffer *), | ||
517 | GFP_KERNEL); | ||
518 | if (!buffers) | ||
519 | return -ENOMEM; | ||
520 | } else { | ||
521 | bytebufsize = bufsize << 2; | ||
522 | } | ||
523 | |||
524 | if (!is_writebuf) { | ||
525 | channel->num_rd_buffers = bufnum; | ||
526 | channel->rd_allow_partial = allowpartial; | ||
527 | channel->rd_synchronous = synchronous; | ||
528 | channel->rd_exclusive_open = exclusive_open; | ||
529 | channel->seekable = seekable; | ||
530 | |||
531 | channel->rd_buffers = buffers; | ||
532 | rc = xilly_get_dma_buffers(ep, &rd_alloc, buffers, | ||
533 | bufnum, bytebufsize); | ||
534 | } else if (channelnum > 0) { | ||
535 | channel->num_wr_buffers = bufnum; | ||
536 | |||
537 | channel->seekable = seekable; | ||
538 | channel->wr_supports_nonempty = supports_nonempty; | ||
539 | |||
540 | channel->wr_allow_partial = allowpartial; | ||
541 | channel->wr_synchronous = synchronous; | ||
542 | channel->wr_exclusive_open = exclusive_open; | ||
543 | |||
544 | channel->wr_buffers = buffers; | ||
545 | rc = xilly_get_dma_buffers(ep, &wr_alloc, buffers, | ||
546 | bufnum, bytebufsize); | ||
547 | } else { | ||
548 | rc = xilly_get_dma_buffers(ep, &wr_alloc, NULL, | ||
549 | bufnum, bytebufsize); | ||
550 | msg_buf_done++; | ||
551 | } | ||
552 | |||
553 | if (rc) | ||
554 | return -ENOMEM; | ||
555 | } | ||
556 | |||
557 | if (!msg_buf_done) { | ||
558 | dev_err(ep->dev, | ||
559 | "Corrupt IDT: No message buffer. Aborting.\n"); | ||
560 | return -ENODEV; | ||
561 | } | ||
562 | return 0; | ||
563 | } | ||
564 | |||
565 | static int xilly_scan_idt(struct xilly_endpoint *endpoint, | ||
566 | struct xilly_idt_handle *idt_handle) | ||
567 | { | ||
568 | int count = 0; | ||
569 | unsigned char *idt = endpoint->channels[1]->wr_buffers[0]->addr; | ||
570 | unsigned char *end_of_idt = idt + endpoint->idtlen - 4; | ||
571 | unsigned char *scan; | ||
572 | int len; | ||
573 | |||
574 | scan = idt; | ||
575 | idt_handle->idt = idt; | ||
576 | |||
577 | scan++; /* Skip version number */ | ||
578 | |||
579 | while ((scan <= end_of_idt) && *scan) { | ||
580 | while ((scan <= end_of_idt) && *scan++) | ||
581 | /* Do nothing, just scan thru string */; | ||
582 | count++; | ||
583 | } | ||
584 | |||
585 | scan++; | ||
586 | |||
587 | if (scan > end_of_idt) { | ||
588 | dev_err(endpoint->dev, | ||
589 | "IDT device name list overflow. Aborting.\n"); | ||
590 | return -ENODEV; | ||
591 | } | ||
592 | idt_handle->chandesc = scan; | ||
593 | |||
594 | len = endpoint->idtlen - (3 + ((int) (scan - idt))); | ||
595 | |||
596 | if (len & 0x03) { | ||
597 | dev_err(endpoint->dev, | ||
598 | "Corrupt IDT device name list. Aborting.\n"); | ||
599 | return -ENODEV; | ||
600 | } | ||
601 | |||
602 | idt_handle->entries = len >> 2; | ||
603 | endpoint->num_channels = count; | ||
604 | |||
605 | return 0; | ||
606 | } | ||
607 | |||
608 | static int xilly_obtain_idt(struct xilly_endpoint *endpoint) | ||
609 | { | ||
610 | struct xilly_channel *channel; | ||
611 | unsigned char *version; | ||
612 | long t; | ||
613 | |||
614 | channel = endpoint->channels[1]; /* This should be generated ad-hoc */ | ||
615 | |||
616 | channel->wr_sleepy = 1; | ||
617 | |||
618 | iowrite32(1 | | ||
619 | (3 << 24), /* Opcode 3 for channel 0 = Send IDT */ | ||
620 | endpoint->registers + fpga_buf_ctrl_reg); | ||
621 | |||
622 | t = wait_event_interruptible_timeout(channel->wr_wait, | ||
623 | (!channel->wr_sleepy), | ||
624 | XILLY_TIMEOUT); | ||
625 | |||
626 | if (t <= 0) { | ||
627 | dev_err(endpoint->dev, "Failed to obtain IDT. Aborting.\n"); | ||
628 | |||
629 | if (endpoint->fatal_error) | ||
630 | return -EIO; | ||
631 | |||
632 | return -ENODEV; | ||
633 | } | ||
634 | |||
635 | endpoint->ephw->hw_sync_sgl_for_cpu( | ||
636 | channel->endpoint, | ||
637 | channel->wr_buffers[0]->dma_addr, | ||
638 | channel->wr_buf_size, | ||
639 | DMA_FROM_DEVICE); | ||
640 | |||
641 | if (channel->wr_buffers[0]->end_offset != endpoint->idtlen) { | ||
642 | dev_err(endpoint->dev, | ||
643 | "IDT length mismatch (%d != %d). Aborting.\n", | ||
644 | channel->wr_buffers[0]->end_offset, endpoint->idtlen); | ||
645 | return -ENODEV; | ||
646 | } | ||
647 | |||
648 | if (crc32_le(~0, channel->wr_buffers[0]->addr, | ||
649 | endpoint->idtlen+1) != 0) { | ||
650 | dev_err(endpoint->dev, "IDT failed CRC check. Aborting.\n"); | ||
651 | return -ENODEV; | ||
652 | } | ||
653 | |||
654 | version = channel->wr_buffers[0]->addr; | ||
655 | |||
656 | /* Check version number. Accept anything below 0x82 for now. */ | ||
657 | if (*version > 0x82) { | ||
658 | dev_err(endpoint->dev, | ||
659 | "No support for IDT version 0x%02x. Maybe the xillybus driver needs an upgarde. Aborting.\n", | ||
660 | *version); | ||
661 | return -ENODEV; | ||
662 | } | ||
663 | |||
664 | return 0; | ||
665 | } | ||
666 | |||
667 | static ssize_t xillybus_read(struct file *filp, char __user *userbuf, | ||
668 | size_t count, loff_t *f_pos) | ||
669 | { | ||
670 | ssize_t rc; | ||
671 | unsigned long flags; | ||
672 | int bytes_done = 0; | ||
673 | int no_time_left = 0; | ||
674 | long deadline, left_to_sleep; | ||
675 | struct xilly_channel *channel = filp->private_data; | ||
676 | |||
677 | int empty, reached_eof, exhausted, ready; | ||
678 | /* Initializations are there only to silence warnings */ | ||
679 | |||
680 | int howmany = 0, bufpos = 0, bufidx = 0, bufferdone = 0; | ||
681 | int waiting_bufidx; | ||
682 | |||
683 | if (channel->endpoint->fatal_error) | ||
684 | return -EIO; | ||
685 | |||
686 | deadline = jiffies + 1 + XILLY_RX_TIMEOUT; | ||
687 | |||
688 | rc = mutex_lock_interruptible(&channel->wr_mutex); | ||
689 | if (rc) | ||
690 | return rc; | ||
691 | |||
692 | while (1) { /* Note that we may drop mutex within this loop */ | ||
693 | int bytes_to_do = count - bytes_done; | ||
694 | |||
695 | spin_lock_irqsave(&channel->wr_spinlock, flags); | ||
696 | |||
697 | empty = channel->wr_empty; | ||
698 | ready = !empty || channel->wr_ready; | ||
699 | |||
700 | if (!empty) { | ||
701 | bufidx = channel->wr_host_buf_idx; | ||
702 | bufpos = channel->wr_host_buf_pos; | ||
703 | howmany = ((channel->wr_buffers[bufidx]->end_offset | ||
704 | + 1) << channel->log2_element_size) | ||
705 | - bufpos; | ||
706 | |||
707 | /* Update wr_host_* to its post-operation state */ | ||
708 | if (howmany > bytes_to_do) { | ||
709 | bufferdone = 0; | ||
710 | |||
711 | howmany = bytes_to_do; | ||
712 | channel->wr_host_buf_pos += howmany; | ||
713 | } else { | ||
714 | bufferdone = 1; | ||
715 | |||
716 | channel->wr_host_buf_pos = 0; | ||
717 | |||
718 | if (bufidx == channel->wr_fpga_buf_idx) { | ||
719 | channel->wr_empty = 1; | ||
720 | channel->wr_sleepy = 1; | ||
721 | channel->wr_ready = 0; | ||
722 | } | ||
723 | |||
724 | if (bufidx >= (channel->num_wr_buffers - 1)) | ||
725 | channel->wr_host_buf_idx = 0; | ||
726 | else | ||
727 | channel->wr_host_buf_idx++; | ||
728 | } | ||
729 | } | ||
730 | |||
731 | /* | ||
732 | * Marking our situation after the possible changes above, | ||
733 | * for use after releasing the spinlock. | ||
734 | * | ||
735 | * empty = empty before change | ||
736 | * exhasted = empty after possible change | ||
737 | */ | ||
738 | |||
739 | reached_eof = channel->wr_empty && | ||
740 | (channel->wr_host_buf_idx == channel->wr_eof); | ||
741 | channel->wr_hangup = reached_eof; | ||
742 | exhausted = channel->wr_empty; | ||
743 | waiting_bufidx = channel->wr_host_buf_idx; | ||
744 | |||
745 | spin_unlock_irqrestore(&channel->wr_spinlock, flags); | ||
746 | |||
747 | if (!empty) { /* Go on, now without the spinlock */ | ||
748 | |||
749 | if (bufpos == 0) /* Position zero means it's virgin */ | ||
750 | channel->endpoint->ephw->hw_sync_sgl_for_cpu( | ||
751 | channel->endpoint, | ||
752 | channel->wr_buffers[bufidx]->dma_addr, | ||
753 | channel->wr_buf_size, | ||
754 | DMA_FROM_DEVICE); | ||
755 | |||
756 | if (copy_to_user( | ||
757 | userbuf, | ||
758 | channel->wr_buffers[bufidx]->addr | ||
759 | + bufpos, howmany)) | ||
760 | rc = -EFAULT; | ||
761 | |||
762 | userbuf += howmany; | ||
763 | bytes_done += howmany; | ||
764 | |||
765 | if (bufferdone) { | ||
766 | channel->endpoint->ephw->hw_sync_sgl_for_device( | ||
767 | channel->endpoint, | ||
768 | channel->wr_buffers[bufidx]->dma_addr, | ||
769 | channel->wr_buf_size, | ||
770 | DMA_FROM_DEVICE); | ||
771 | |||
772 | /* | ||
773 | * Tell FPGA the buffer is done with. It's an | ||
774 | * atomic operation to the FPGA, so what | ||
775 | * happens with other channels doesn't matter, | ||
776 | * and the certain channel is protected with | ||
777 | * the channel-specific mutex. | ||
778 | */ | ||
779 | |||
780 | iowrite32(1 | (channel->chan_num << 1) | | ||
781 | (bufidx << 12), | ||
782 | channel->endpoint->registers + | ||
783 | fpga_buf_ctrl_reg); | ||
784 | } | ||
785 | |||
786 | if (rc) { | ||
787 | mutex_unlock(&channel->wr_mutex); | ||
788 | return rc; | ||
789 | } | ||
790 | } | ||
791 | |||
792 | /* This includes a zero-count return = EOF */ | ||
793 | if ((bytes_done >= count) || reached_eof) | ||
794 | break; | ||
795 | |||
796 | if (!exhausted) | ||
797 | continue; /* More in RAM buffer(s)? Just go on. */ | ||
798 | |||
799 | if ((bytes_done > 0) && | ||
800 | (no_time_left || | ||
801 | (channel->wr_synchronous && channel->wr_allow_partial))) | ||
802 | break; | ||
803 | |||
804 | /* | ||
805 | * Nonblocking read: The "ready" flag tells us that the FPGA | ||
806 | * has data to send. In non-blocking mode, if it isn't on, | ||
807 | * just return. But if there is, we jump directly to the point | ||
808 | * where we ask for the FPGA to send all it has, and wait | ||
809 | * until that data arrives. So in a sense, we *do* block in | ||
810 | * nonblocking mode, but only for a very short time. | ||
811 | */ | ||
812 | |||
813 | if (!no_time_left && (filp->f_flags & O_NONBLOCK)) { | ||
814 | if (bytes_done > 0) | ||
815 | break; | ||
816 | |||
817 | if (ready) | ||
818 | goto desperate; | ||
819 | |||
820 | rc = -EAGAIN; | ||
821 | break; | ||
822 | } | ||
823 | |||
824 | if (!no_time_left || (bytes_done > 0)) { | ||
825 | /* | ||
826 | * Note that in case of an element-misaligned read | ||
827 | * request, offsetlimit will include the last element, | ||
828 | * which will be partially read from. | ||
829 | */ | ||
830 | int offsetlimit = ((count - bytes_done) - 1) >> | ||
831 | channel->log2_element_size; | ||
832 | int buf_elements = channel->wr_buf_size >> | ||
833 | channel->log2_element_size; | ||
834 | |||
835 | /* | ||
836 | * In synchronous mode, always send an offset limit. | ||
837 | * Just don't send a value too big. | ||
838 | */ | ||
839 | |||
840 | if (channel->wr_synchronous) { | ||
841 | /* Don't request more than one buffer */ | ||
842 | if (channel->wr_allow_partial && | ||
843 | (offsetlimit >= buf_elements)) | ||
844 | offsetlimit = buf_elements - 1; | ||
845 | |||
846 | /* Don't request more than all buffers */ | ||
847 | if (!channel->wr_allow_partial && | ||
848 | (offsetlimit >= | ||
849 | (buf_elements * channel->num_wr_buffers))) | ||
850 | offsetlimit = buf_elements * | ||
851 | channel->num_wr_buffers - 1; | ||
852 | } | ||
853 | |||
854 | /* | ||
855 | * In asynchronous mode, force early flush of a buffer | ||
856 | * only if that will allow returning a full count. The | ||
857 | * "offsetlimit < ( ... )" rather than "<=" excludes | ||
858 | * requesting a full buffer, which would obviously | ||
859 | * cause a buffer transmission anyhow | ||
860 | */ | ||
861 | |||
862 | if (channel->wr_synchronous || | ||
863 | (offsetlimit < (buf_elements - 1))) { | ||
864 | mutex_lock(&channel->endpoint->register_mutex); | ||
865 | |||
866 | iowrite32(offsetlimit, | ||
867 | channel->endpoint->registers + | ||
868 | fpga_buf_offset_reg); | ||
869 | |||
870 | iowrite32(1 | (channel->chan_num << 1) | | ||
871 | (2 << 24) | /* 2 = offset limit */ | ||
872 | (waiting_bufidx << 12), | ||
873 | channel->endpoint->registers + | ||
874 | fpga_buf_ctrl_reg); | ||
875 | |||
876 | mutex_unlock(&channel->endpoint-> | ||
877 | register_mutex); | ||
878 | } | ||
879 | } | ||
880 | |||
881 | /* | ||
882 | * If partial completion is disallowed, there is no point in | ||
883 | * timeout sleeping. Neither if no_time_left is set and | ||
884 | * there's no data. | ||
885 | */ | ||
886 | |||
887 | if (!channel->wr_allow_partial || | ||
888 | (no_time_left && (bytes_done == 0))) { | ||
889 | /* | ||
890 | * This do-loop will run more than once if another | ||
891 | * thread reasserted wr_sleepy before we got the mutex | ||
892 | * back, so we try again. | ||
893 | */ | ||
894 | |||
895 | do { | ||
896 | mutex_unlock(&channel->wr_mutex); | ||
897 | |||
898 | if (wait_event_interruptible( | ||
899 | channel->wr_wait, | ||
900 | (!channel->wr_sleepy))) | ||
901 | goto interrupted; | ||
902 | |||
903 | if (mutex_lock_interruptible( | ||
904 | &channel->wr_mutex)) | ||
905 | goto interrupted; | ||
906 | } while (channel->wr_sleepy); | ||
907 | |||
908 | continue; | ||
909 | |||
910 | interrupted: /* Mutex is not held if got here */ | ||
911 | if (channel->endpoint->fatal_error) | ||
912 | return -EIO; | ||
913 | if (bytes_done) | ||
914 | return bytes_done; | ||
915 | if (filp->f_flags & O_NONBLOCK) | ||
916 | return -EAGAIN; /* Don't admit snoozing */ | ||
917 | return -EINTR; | ||
918 | } | ||
919 | |||
920 | left_to_sleep = deadline - ((long) jiffies); | ||
921 | |||
922 | /* | ||
923 | * If our time is out, skip the waiting. We may miss wr_sleepy | ||
924 | * being deasserted but hey, almost missing the train is like | ||
925 | * missing it. | ||
926 | */ | ||
927 | |||
928 | if (left_to_sleep > 0) { | ||
929 | left_to_sleep = | ||
930 | wait_event_interruptible_timeout( | ||
931 | channel->wr_wait, | ||
932 | (!channel->wr_sleepy), | ||
933 | left_to_sleep); | ||
934 | |||
935 | if (left_to_sleep > 0) /* wr_sleepy deasserted */ | ||
936 | continue; | ||
937 | |||
938 | if (left_to_sleep < 0) { /* Interrupt */ | ||
939 | mutex_unlock(&channel->wr_mutex); | ||
940 | if (channel->endpoint->fatal_error) | ||
941 | return -EIO; | ||
942 | if (bytes_done) | ||
943 | return bytes_done; | ||
944 | return -EINTR; | ||
945 | } | ||
946 | } | ||
947 | |||
948 | desperate: | ||
949 | no_time_left = 1; /* We're out of sleeping time. Desperate! */ | ||
950 | |||
951 | if (bytes_done == 0) { | ||
952 | /* | ||
953 | * Reaching here means that we allow partial return, | ||
954 | * that we've run out of time, and that we have | ||
955 | * nothing to return. | ||
956 | * So tell the FPGA to send anything it has or gets. | ||
957 | */ | ||
958 | |||
959 | iowrite32(1 | (channel->chan_num << 1) | | ||
960 | (3 << 24) | /* Opcode 3, flush it all! */ | ||
961 | (waiting_bufidx << 12), | ||
962 | channel->endpoint->registers + | ||
963 | fpga_buf_ctrl_reg); | ||
964 | } | ||
965 | |||
966 | /* | ||
967 | * Reaching here means that we *do* have data in the buffer, | ||
968 | * but the "partial" flag disallows returning less than | ||
969 | * required. And we don't have as much. So loop again, | ||
970 | * which is likely to end up blocking indefinitely until | ||
971 | * enough data has arrived. | ||
972 | */ | ||
973 | } | ||
974 | |||
975 | mutex_unlock(&channel->wr_mutex); | ||
976 | |||
977 | if (channel->endpoint->fatal_error) | ||
978 | return -EIO; | ||
979 | |||
980 | if (rc) | ||
981 | return rc; | ||
982 | |||
983 | return bytes_done; | ||
984 | } | ||
985 | |||
986 | /* | ||
987 | * The timeout argument takes values as follows: | ||
988 | * >0 : Flush with timeout | ||
989 | * ==0 : Flush, and wait idefinitely for the flush to complete | ||
990 | * <0 : Autoflush: Flush only if there's a single buffer occupied | ||
991 | */ | ||
992 | |||
993 | static int xillybus_myflush(struct xilly_channel *channel, long timeout) | ||
994 | { | ||
995 | int rc; | ||
996 | unsigned long flags; | ||
997 | |||
998 | int end_offset_plus1; | ||
999 | int bufidx, bufidx_minus1; | ||
1000 | int i; | ||
1001 | int empty; | ||
1002 | int new_rd_host_buf_pos; | ||
1003 | |||
1004 | if (channel->endpoint->fatal_error) | ||
1005 | return -EIO; | ||
1006 | rc = mutex_lock_interruptible(&channel->rd_mutex); | ||
1007 | if (rc) | ||
1008 | return rc; | ||
1009 | |||
1010 | /* | ||
1011 | * Don't flush a closed channel. This can happen when the work queued | ||
1012 | * autoflush thread fires off after the file has closed. This is not | ||
1013 | * an error, just something to dismiss. | ||
1014 | */ | ||
1015 | |||
1016 | if (!channel->rd_ref_count) | ||
1017 | goto done; | ||
1018 | |||
1019 | bufidx = channel->rd_host_buf_idx; | ||
1020 | |||
1021 | bufidx_minus1 = (bufidx == 0) ? | ||
1022 | channel->num_rd_buffers - 1 : | ||
1023 | bufidx - 1; | ||
1024 | |||
1025 | end_offset_plus1 = channel->rd_host_buf_pos >> | ||
1026 | channel->log2_element_size; | ||
1027 | |||
1028 | new_rd_host_buf_pos = channel->rd_host_buf_pos - | ||
1029 | (end_offset_plus1 << channel->log2_element_size); | ||
1030 | |||
1031 | /* Submit the current buffer if it's nonempty */ | ||
1032 | if (end_offset_plus1) { | ||
1033 | unsigned char *tail = channel->rd_buffers[bufidx]->addr + | ||
1034 | (end_offset_plus1 << channel->log2_element_size); | ||
1035 | |||
1036 | /* Copy unflushed data, so we can put it in next buffer */ | ||
1037 | for (i = 0; i < new_rd_host_buf_pos; i++) | ||
1038 | channel->rd_leftovers[i] = *tail++; | ||
1039 | |||
1040 | spin_lock_irqsave(&channel->rd_spinlock, flags); | ||
1041 | |||
1042 | /* Autoflush only if a single buffer is occupied */ | ||
1043 | |||
1044 | if ((timeout < 0) && | ||
1045 | (channel->rd_full || | ||
1046 | (bufidx_minus1 != channel->rd_fpga_buf_idx))) { | ||
1047 | spin_unlock_irqrestore(&channel->rd_spinlock, flags); | ||
1048 | /* | ||
1049 | * A new work item may be queued by the ISR exactly | ||
1050 | * now, since the execution of a work item allows the | ||
1051 | * queuing of a new one while it's running. | ||
1052 | */ | ||
1053 | goto done; | ||
1054 | } | ||
1055 | |||
1056 | /* The 4th element is never needed for data, so it's a flag */ | ||
1057 | channel->rd_leftovers[3] = (new_rd_host_buf_pos != 0); | ||
1058 | |||
1059 | /* Set up rd_full to reflect a certain moment's state */ | ||
1060 | |||
1061 | if (bufidx == channel->rd_fpga_buf_idx) | ||
1062 | channel->rd_full = 1; | ||
1063 | spin_unlock_irqrestore(&channel->rd_spinlock, flags); | ||
1064 | |||
1065 | if (bufidx >= (channel->num_rd_buffers - 1)) | ||
1066 | channel->rd_host_buf_idx = 0; | ||
1067 | else | ||
1068 | channel->rd_host_buf_idx++; | ||
1069 | |||
1070 | channel->endpoint->ephw->hw_sync_sgl_for_device( | ||
1071 | channel->endpoint, | ||
1072 | channel->rd_buffers[bufidx]->dma_addr, | ||
1073 | channel->rd_buf_size, | ||
1074 | DMA_TO_DEVICE); | ||
1075 | |||
1076 | mutex_lock(&channel->endpoint->register_mutex); | ||
1077 | |||
1078 | iowrite32(end_offset_plus1 - 1, | ||
1079 | channel->endpoint->registers + fpga_buf_offset_reg); | ||
1080 | |||
1081 | iowrite32((channel->chan_num << 1) | /* Channel ID */ | ||
1082 | (2 << 24) | /* Opcode 2, submit buffer */ | ||
1083 | (bufidx << 12), | ||
1084 | channel->endpoint->registers + fpga_buf_ctrl_reg); | ||
1085 | |||
1086 | mutex_unlock(&channel->endpoint->register_mutex); | ||
1087 | } else if (bufidx == 0) { | ||
1088 | bufidx = channel->num_rd_buffers - 1; | ||
1089 | } else { | ||
1090 | bufidx--; | ||
1091 | } | ||
1092 | |||
1093 | channel->rd_host_buf_pos = new_rd_host_buf_pos; | ||
1094 | |||
1095 | if (timeout < 0) | ||
1096 | goto done; /* Autoflush */ | ||
1097 | |||
1098 | /* | ||
1099 | * bufidx is now the last buffer written to (or equal to | ||
1100 | * rd_fpga_buf_idx if buffer was never written to), and | ||
1101 | * channel->rd_host_buf_idx the one after it. | ||
1102 | * | ||
1103 | * If bufidx == channel->rd_fpga_buf_idx we're either empty or full. | ||
1104 | */ | ||
1105 | |||
1106 | while (1) { /* Loop waiting for draining of buffers */ | ||
1107 | spin_lock_irqsave(&channel->rd_spinlock, flags); | ||
1108 | |||
1109 | if (bufidx != channel->rd_fpga_buf_idx) | ||
1110 | channel->rd_full = 1; /* | ||
1111 | * Not really full, | ||
1112 | * but needs waiting. | ||
1113 | */ | ||
1114 | |||
1115 | empty = !channel->rd_full; | ||
1116 | |||
1117 | spin_unlock_irqrestore(&channel->rd_spinlock, flags); | ||
1118 | |||
1119 | if (empty) | ||
1120 | break; | ||
1121 | |||
1122 | /* | ||
1123 | * Indefinite sleep with mutex taken. With data waiting for | ||
1124 | * flushing user should not be surprised if open() for write | ||
1125 | * sleeps. | ||
1126 | */ | ||
1127 | if (timeout == 0) | ||
1128 | wait_event_interruptible(channel->rd_wait, | ||
1129 | (!channel->rd_full)); | ||
1130 | |||
1131 | else if (wait_event_interruptible_timeout( | ||
1132 | channel->rd_wait, | ||
1133 | (!channel->rd_full), | ||
1134 | timeout) == 0) { | ||
1135 | dev_warn(channel->endpoint->dev, | ||
1136 | "Timed out while flushing. Output data may be lost.\n"); | ||
1137 | |||
1138 | rc = -ETIMEDOUT; | ||
1139 | break; | ||
1140 | } | ||
1141 | |||
1142 | if (channel->rd_full) { | ||
1143 | rc = -EINTR; | ||
1144 | break; | ||
1145 | } | ||
1146 | } | ||
1147 | |||
1148 | done: | ||
1149 | mutex_unlock(&channel->rd_mutex); | ||
1150 | |||
1151 | if (channel->endpoint->fatal_error) | ||
1152 | return -EIO; | ||
1153 | |||
1154 | return rc; | ||
1155 | } | ||
1156 | |||
1157 | static int xillybus_flush(struct file *filp, fl_owner_t id) | ||
1158 | { | ||
1159 | if (!(filp->f_mode & FMODE_WRITE)) | ||
1160 | return 0; | ||
1161 | |||
1162 | return xillybus_myflush(filp->private_data, HZ); /* 1 second timeout */ | ||
1163 | } | ||
1164 | |||
1165 | static void xillybus_autoflush(struct work_struct *work) | ||
1166 | { | ||
1167 | struct delayed_work *workitem = container_of( | ||
1168 | work, struct delayed_work, work); | ||
1169 | struct xilly_channel *channel = container_of( | ||
1170 | workitem, struct xilly_channel, rd_workitem); | ||
1171 | int rc; | ||
1172 | |||
1173 | rc = xillybus_myflush(channel, -1); | ||
1174 | if (rc == -EINTR) | ||
1175 | dev_warn(channel->endpoint->dev, | ||
1176 | "Autoflush failed because work queue thread got a signal.\n"); | ||
1177 | else if (rc) | ||
1178 | dev_err(channel->endpoint->dev, | ||
1179 | "Autoflush failed under weird circumstances.\n"); | ||
1180 | } | ||
1181 | |||
1182 | static ssize_t xillybus_write(struct file *filp, const char __user *userbuf, | ||
1183 | size_t count, loff_t *f_pos) | ||
1184 | { | ||
1185 | ssize_t rc; | ||
1186 | unsigned long flags; | ||
1187 | int bytes_done = 0; | ||
1188 | struct xilly_channel *channel = filp->private_data; | ||
1189 | |||
1190 | int full, exhausted; | ||
1191 | /* Initializations are there only to silence warnings */ | ||
1192 | |||
1193 | int howmany = 0, bufpos = 0, bufidx = 0, bufferdone = 0; | ||
1194 | int end_offset_plus1 = 0; | ||
1195 | |||
1196 | if (channel->endpoint->fatal_error) | ||
1197 | return -EIO; | ||
1198 | |||
1199 | rc = mutex_lock_interruptible(&channel->rd_mutex); | ||
1200 | if (rc) | ||
1201 | return rc; | ||
1202 | |||
1203 | while (1) { | ||
1204 | int bytes_to_do = count - bytes_done; | ||
1205 | |||
1206 | spin_lock_irqsave(&channel->rd_spinlock, flags); | ||
1207 | |||
1208 | full = channel->rd_full; | ||
1209 | |||
1210 | if (!full) { | ||
1211 | bufidx = channel->rd_host_buf_idx; | ||
1212 | bufpos = channel->rd_host_buf_pos; | ||
1213 | howmany = channel->rd_buf_size - bufpos; | ||
1214 | |||
1215 | /* | ||
1216 | * Update rd_host_* to its state after this operation. | ||
1217 | * count=0 means committing the buffer immediately, | ||
1218 | * which is like flushing, but not necessarily block. | ||
1219 | */ | ||
1220 | |||
1221 | if ((howmany > bytes_to_do) && | ||
1222 | (count || | ||
1223 | ((bufpos >> channel->log2_element_size) == 0))) { | ||
1224 | bufferdone = 0; | ||
1225 | |||
1226 | howmany = bytes_to_do; | ||
1227 | channel->rd_host_buf_pos += howmany; | ||
1228 | } else { | ||
1229 | bufferdone = 1; | ||
1230 | |||
1231 | if (count) { | ||
1232 | end_offset_plus1 = | ||
1233 | channel->rd_buf_size >> | ||
1234 | channel->log2_element_size; | ||
1235 | channel->rd_host_buf_pos = 0; | ||
1236 | } else { | ||
1237 | unsigned char *tail; | ||
1238 | int i; | ||
1239 | |||
1240 | end_offset_plus1 = bufpos >> | ||
1241 | channel->log2_element_size; | ||
1242 | |||
1243 | channel->rd_host_buf_pos -= | ||
1244 | end_offset_plus1 << | ||
1245 | channel->log2_element_size; | ||
1246 | |||
1247 | tail = channel-> | ||
1248 | rd_buffers[bufidx]->addr + | ||
1249 | (end_offset_plus1 << | ||
1250 | channel->log2_element_size); | ||
1251 | |||
1252 | for (i = 0; | ||
1253 | i < channel->rd_host_buf_pos; | ||
1254 | i++) | ||
1255 | channel->rd_leftovers[i] = | ||
1256 | *tail++; | ||
1257 | } | ||
1258 | |||
1259 | if (bufidx == channel->rd_fpga_buf_idx) | ||
1260 | channel->rd_full = 1; | ||
1261 | |||
1262 | if (bufidx >= (channel->num_rd_buffers - 1)) | ||
1263 | channel->rd_host_buf_idx = 0; | ||
1264 | else | ||
1265 | channel->rd_host_buf_idx++; | ||
1266 | } | ||
1267 | } | ||
1268 | |||
1269 | /* | ||
1270 | * Marking our situation after the possible changes above, | ||
1271 | * for use after releasing the spinlock. | ||
1272 | * | ||
1273 | * full = full before change | ||
1274 | * exhasted = full after possible change | ||
1275 | */ | ||
1276 | |||
1277 | exhausted = channel->rd_full; | ||
1278 | |||
1279 | spin_unlock_irqrestore(&channel->rd_spinlock, flags); | ||
1280 | |||
1281 | if (!full) { /* Go on, now without the spinlock */ | ||
1282 | unsigned char *head = | ||
1283 | channel->rd_buffers[bufidx]->addr; | ||
1284 | int i; | ||
1285 | |||
1286 | if ((bufpos == 0) || /* Zero means it's virgin */ | ||
1287 | (channel->rd_leftovers[3] != 0)) { | ||
1288 | channel->endpoint->ephw->hw_sync_sgl_for_cpu( | ||
1289 | channel->endpoint, | ||
1290 | channel->rd_buffers[bufidx]->dma_addr, | ||
1291 | channel->rd_buf_size, | ||
1292 | DMA_TO_DEVICE); | ||
1293 | |||
1294 | /* Virgin, but leftovers are due */ | ||
1295 | for (i = 0; i < bufpos; i++) | ||
1296 | *head++ = channel->rd_leftovers[i]; | ||
1297 | |||
1298 | channel->rd_leftovers[3] = 0; /* Clear flag */ | ||
1299 | } | ||
1300 | |||
1301 | if (copy_from_user( | ||
1302 | channel->rd_buffers[bufidx]->addr + bufpos, | ||
1303 | userbuf, howmany)) | ||
1304 | rc = -EFAULT; | ||
1305 | |||
1306 | userbuf += howmany; | ||
1307 | bytes_done += howmany; | ||
1308 | |||
1309 | if (bufferdone) { | ||
1310 | channel->endpoint->ephw->hw_sync_sgl_for_device( | ||
1311 | channel->endpoint, | ||
1312 | channel->rd_buffers[bufidx]->dma_addr, | ||
1313 | channel->rd_buf_size, | ||
1314 | DMA_TO_DEVICE); | ||
1315 | |||
1316 | mutex_lock(&channel->endpoint->register_mutex); | ||
1317 | |||
1318 | iowrite32(end_offset_plus1 - 1, | ||
1319 | channel->endpoint->registers + | ||
1320 | fpga_buf_offset_reg); | ||
1321 | |||
1322 | iowrite32((channel->chan_num << 1) | | ||
1323 | (2 << 24) | /* 2 = submit buffer */ | ||
1324 | (bufidx << 12), | ||
1325 | channel->endpoint->registers + | ||
1326 | fpga_buf_ctrl_reg); | ||
1327 | |||
1328 | mutex_unlock(&channel->endpoint-> | ||
1329 | register_mutex); | ||
1330 | |||
1331 | channel->rd_leftovers[3] = | ||
1332 | (channel->rd_host_buf_pos != 0); | ||
1333 | } | ||
1334 | |||
1335 | if (rc) { | ||
1336 | mutex_unlock(&channel->rd_mutex); | ||
1337 | |||
1338 | if (channel->endpoint->fatal_error) | ||
1339 | return -EIO; | ||
1340 | |||
1341 | if (!channel->rd_synchronous) | ||
1342 | queue_delayed_work( | ||
1343 | xillybus_wq, | ||
1344 | &channel->rd_workitem, | ||
1345 | XILLY_RX_TIMEOUT); | ||
1346 | |||
1347 | return rc; | ||
1348 | } | ||
1349 | } | ||
1350 | |||
1351 | if (bytes_done >= count) | ||
1352 | break; | ||
1353 | |||
1354 | if (!exhausted) | ||
1355 | continue; /* If there's more space, just go on */ | ||
1356 | |||
1357 | if ((bytes_done > 0) && channel->rd_allow_partial) | ||
1358 | break; | ||
1359 | |||
1360 | /* | ||
1361 | * Indefinite sleep with mutex taken. With data waiting for | ||
1362 | * flushing, user should not be surprised if open() for write | ||
1363 | * sleeps. | ||
1364 | */ | ||
1365 | |||
1366 | if (filp->f_flags & O_NONBLOCK) { | ||
1367 | rc = -EAGAIN; | ||
1368 | break; | ||
1369 | } | ||
1370 | |||
1371 | if (wait_event_interruptible(channel->rd_wait, | ||
1372 | (!channel->rd_full))) { | ||
1373 | mutex_unlock(&channel->rd_mutex); | ||
1374 | |||
1375 | if (channel->endpoint->fatal_error) | ||
1376 | return -EIO; | ||
1377 | |||
1378 | if (bytes_done) | ||
1379 | return bytes_done; | ||
1380 | return -EINTR; | ||
1381 | } | ||
1382 | } | ||
1383 | |||
1384 | mutex_unlock(&channel->rd_mutex); | ||
1385 | |||
1386 | if (!channel->rd_synchronous) | ||
1387 | queue_delayed_work(xillybus_wq, | ||
1388 | &channel->rd_workitem, | ||
1389 | XILLY_RX_TIMEOUT); | ||
1390 | |||
1391 | if (channel->endpoint->fatal_error) | ||
1392 | return -EIO; | ||
1393 | |||
1394 | if (rc) | ||
1395 | return rc; | ||
1396 | |||
1397 | if ((channel->rd_synchronous) && (bytes_done > 0)) { | ||
1398 | rc = xillybus_myflush(filp->private_data, 0); /* No timeout */ | ||
1399 | |||
1400 | if (rc && (rc != -EINTR)) | ||
1401 | return rc; | ||
1402 | } | ||
1403 | |||
1404 | return bytes_done; | ||
1405 | } | ||
1406 | |||
1407 | static int xillybus_open(struct inode *inode, struct file *filp) | ||
1408 | { | ||
1409 | int rc = 0; | ||
1410 | unsigned long flags; | ||
1411 | int minor = iminor(inode); | ||
1412 | int major = imajor(inode); | ||
1413 | struct xilly_endpoint *ep_iter, *endpoint = NULL; | ||
1414 | struct xilly_channel *channel; | ||
1415 | |||
1416 | mutex_lock(&ep_list_lock); | ||
1417 | |||
1418 | list_for_each_entry(ep_iter, &list_of_endpoints, ep_list) { | ||
1419 | if ((ep_iter->major == major) && | ||
1420 | (minor >= ep_iter->lowest_minor) && | ||
1421 | (minor < (ep_iter->lowest_minor + | ||
1422 | ep_iter->num_channels))) { | ||
1423 | endpoint = ep_iter; | ||
1424 | break; | ||
1425 | } | ||
1426 | } | ||
1427 | mutex_unlock(&ep_list_lock); | ||
1428 | |||
1429 | if (!endpoint) { | ||
1430 | pr_err("xillybus: open() failed to find a device for major=%d and minor=%d\n", | ||
1431 | major, minor); | ||
1432 | return -ENODEV; | ||
1433 | } | ||
1434 | |||
1435 | if (endpoint->fatal_error) | ||
1436 | return -EIO; | ||
1437 | |||
1438 | channel = endpoint->channels[1 + minor - endpoint->lowest_minor]; | ||
1439 | filp->private_data = channel; | ||
1440 | |||
1441 | /* | ||
1442 | * It gets complicated because: | ||
1443 | * 1. We don't want to take a mutex we don't have to | ||
1444 | * 2. We don't want to open one direction if the other will fail. | ||
1445 | */ | ||
1446 | |||
1447 | if ((filp->f_mode & FMODE_READ) && (!channel->num_wr_buffers)) | ||
1448 | return -ENODEV; | ||
1449 | |||
1450 | if ((filp->f_mode & FMODE_WRITE) && (!channel->num_rd_buffers)) | ||
1451 | return -ENODEV; | ||
1452 | |||
1453 | if ((filp->f_mode & FMODE_READ) && (filp->f_flags & O_NONBLOCK) && | ||
1454 | (channel->wr_synchronous || !channel->wr_allow_partial || | ||
1455 | !channel->wr_supports_nonempty)) { | ||
1456 | dev_err(endpoint->dev, | ||
1457 | "open() failed: O_NONBLOCK not allowed for read on this device\n"); | ||
1458 | return -ENODEV; | ||
1459 | } | ||
1460 | |||
1461 | if ((filp->f_mode & FMODE_WRITE) && (filp->f_flags & O_NONBLOCK) && | ||
1462 | (channel->rd_synchronous || !channel->rd_allow_partial)) { | ||
1463 | dev_err(endpoint->dev, | ||
1464 | "open() failed: O_NONBLOCK not allowed for write on this device\n"); | ||
1465 | return -ENODEV; | ||
1466 | } | ||
1467 | |||
1468 | /* | ||
1469 | * Note: open() may block on getting mutexes despite O_NONBLOCK. | ||
1470 | * This shouldn't occur normally, since multiple open of the same | ||
1471 | * file descriptor is almost always prohibited anyhow | ||
1472 | * (*_exclusive_open is normally set in real-life systems). | ||
1473 | */ | ||
1474 | |||
1475 | if (filp->f_mode & FMODE_READ) { | ||
1476 | rc = mutex_lock_interruptible(&channel->wr_mutex); | ||
1477 | if (rc) | ||
1478 | return rc; | ||
1479 | } | ||
1480 | |||
1481 | if (filp->f_mode & FMODE_WRITE) { | ||
1482 | rc = mutex_lock_interruptible(&channel->rd_mutex); | ||
1483 | if (rc) | ||
1484 | goto unlock_wr; | ||
1485 | } | ||
1486 | |||
1487 | if ((filp->f_mode & FMODE_READ) && | ||
1488 | (channel->wr_ref_count != 0) && | ||
1489 | (channel->wr_exclusive_open)) { | ||
1490 | rc = -EBUSY; | ||
1491 | goto unlock; | ||
1492 | } | ||
1493 | |||
1494 | if ((filp->f_mode & FMODE_WRITE) && | ||
1495 | (channel->rd_ref_count != 0) && | ||
1496 | (channel->rd_exclusive_open)) { | ||
1497 | rc = -EBUSY; | ||
1498 | goto unlock; | ||
1499 | } | ||
1500 | |||
1501 | if (filp->f_mode & FMODE_READ) { | ||
1502 | if (channel->wr_ref_count == 0) { /* First open of file */ | ||
1503 | /* Move the host to first buffer */ | ||
1504 | spin_lock_irqsave(&channel->wr_spinlock, flags); | ||
1505 | channel->wr_host_buf_idx = 0; | ||
1506 | channel->wr_host_buf_pos = 0; | ||
1507 | channel->wr_fpga_buf_idx = -1; | ||
1508 | channel->wr_empty = 1; | ||
1509 | channel->wr_ready = 0; | ||
1510 | channel->wr_sleepy = 1; | ||
1511 | channel->wr_eof = -1; | ||
1512 | channel->wr_hangup = 0; | ||
1513 | |||
1514 | spin_unlock_irqrestore(&channel->wr_spinlock, flags); | ||
1515 | |||
1516 | iowrite32(1 | (channel->chan_num << 1) | | ||
1517 | (4 << 24) | /* Opcode 4, open channel */ | ||
1518 | ((channel->wr_synchronous & 1) << 23), | ||
1519 | channel->endpoint->registers + | ||
1520 | fpga_buf_ctrl_reg); | ||
1521 | } | ||
1522 | |||
1523 | channel->wr_ref_count++; | ||
1524 | } | ||
1525 | |||
1526 | if (filp->f_mode & FMODE_WRITE) { | ||
1527 | if (channel->rd_ref_count == 0) { /* First open of file */ | ||
1528 | /* Move the host to first buffer */ | ||
1529 | spin_lock_irqsave(&channel->rd_spinlock, flags); | ||
1530 | channel->rd_host_buf_idx = 0; | ||
1531 | channel->rd_host_buf_pos = 0; | ||
1532 | channel->rd_leftovers[3] = 0; /* No leftovers. */ | ||
1533 | channel->rd_fpga_buf_idx = channel->num_rd_buffers - 1; | ||
1534 | channel->rd_full = 0; | ||
1535 | |||
1536 | spin_unlock_irqrestore(&channel->rd_spinlock, flags); | ||
1537 | |||
1538 | iowrite32((channel->chan_num << 1) | | ||
1539 | (4 << 24), /* Opcode 4, open channel */ | ||
1540 | channel->endpoint->registers + | ||
1541 | fpga_buf_ctrl_reg); | ||
1542 | } | ||
1543 | |||
1544 | channel->rd_ref_count++; | ||
1545 | } | ||
1546 | |||
1547 | unlock: | ||
1548 | if (filp->f_mode & FMODE_WRITE) | ||
1549 | mutex_unlock(&channel->rd_mutex); | ||
1550 | unlock_wr: | ||
1551 | if (filp->f_mode & FMODE_READ) | ||
1552 | mutex_unlock(&channel->wr_mutex); | ||
1553 | |||
1554 | if (!rc && (!channel->seekable)) | ||
1555 | return nonseekable_open(inode, filp); | ||
1556 | |||
1557 | return rc; | ||
1558 | } | ||
1559 | |||
1560 | static int xillybus_release(struct inode *inode, struct file *filp) | ||
1561 | { | ||
1562 | unsigned long flags; | ||
1563 | struct xilly_channel *channel = filp->private_data; | ||
1564 | |||
1565 | int buf_idx; | ||
1566 | int eof; | ||
1567 | |||
1568 | if (channel->endpoint->fatal_error) | ||
1569 | return -EIO; | ||
1570 | |||
1571 | if (filp->f_mode & FMODE_WRITE) { | ||
1572 | mutex_lock(&channel->rd_mutex); | ||
1573 | |||
1574 | channel->rd_ref_count--; | ||
1575 | |||
1576 | if (channel->rd_ref_count == 0) { | ||
1577 | /* | ||
1578 | * We rely on the kernel calling flush() | ||
1579 | * before we get here. | ||
1580 | */ | ||
1581 | |||
1582 | iowrite32((channel->chan_num << 1) | /* Channel ID */ | ||
1583 | (5 << 24), /* Opcode 5, close channel */ | ||
1584 | channel->endpoint->registers + | ||
1585 | fpga_buf_ctrl_reg); | ||
1586 | } | ||
1587 | mutex_unlock(&channel->rd_mutex); | ||
1588 | } | ||
1589 | |||
1590 | if (filp->f_mode & FMODE_READ) { | ||
1591 | mutex_lock(&channel->wr_mutex); | ||
1592 | |||
1593 | channel->wr_ref_count--; | ||
1594 | |||
1595 | if (channel->wr_ref_count == 0) { | ||
1596 | iowrite32(1 | (channel->chan_num << 1) | | ||
1597 | (5 << 24), /* Opcode 5, close channel */ | ||
1598 | channel->endpoint->registers + | ||
1599 | fpga_buf_ctrl_reg); | ||
1600 | |||
1601 | /* | ||
1602 | * This is crazily cautious: We make sure that not | ||
1603 | * only that we got an EOF (be it because we closed | ||
1604 | * the channel or because of a user's EOF), but verify | ||
1605 | * that it's one beyond the last buffer arrived, so | ||
1606 | * we have no leftover buffers pending before wrapping | ||
1607 | * up (which can only happen in asynchronous channels, | ||
1608 | * BTW) | ||
1609 | */ | ||
1610 | |||
1611 | while (1) { | ||
1612 | spin_lock_irqsave(&channel->wr_spinlock, | ||
1613 | flags); | ||
1614 | buf_idx = channel->wr_fpga_buf_idx; | ||
1615 | eof = channel->wr_eof; | ||
1616 | channel->wr_sleepy = 1; | ||
1617 | spin_unlock_irqrestore(&channel->wr_spinlock, | ||
1618 | flags); | ||
1619 | |||
1620 | /* | ||
1621 | * Check if eof points at the buffer after | ||
1622 | * the last one the FPGA submitted. Note that | ||
1623 | * no EOF is marked by negative eof. | ||
1624 | */ | ||
1625 | |||
1626 | buf_idx++; | ||
1627 | if (buf_idx == channel->num_wr_buffers) | ||
1628 | buf_idx = 0; | ||
1629 | |||
1630 | if (buf_idx == eof) | ||
1631 | break; | ||
1632 | |||
1633 | /* | ||
1634 | * Steal extra 100 ms if awaken by interrupt. | ||
1635 | * This is a simple workaround for an | ||
1636 | * interrupt pending when entering, which would | ||
1637 | * otherwise result in declaring the hardware | ||
1638 | * non-responsive. | ||
1639 | */ | ||
1640 | |||
1641 | if (wait_event_interruptible( | ||
1642 | channel->wr_wait, | ||
1643 | (!channel->wr_sleepy))) | ||
1644 | msleep(100); | ||
1645 | |||
1646 | if (channel->wr_sleepy) { | ||
1647 | mutex_unlock(&channel->wr_mutex); | ||
1648 | dev_warn(channel->endpoint->dev, | ||
1649 | "Hardware failed to respond to close command, therefore left in messy state.\n"); | ||
1650 | return -EINTR; | ||
1651 | } | ||
1652 | } | ||
1653 | } | ||
1654 | |||
1655 | mutex_unlock(&channel->wr_mutex); | ||
1656 | } | ||
1657 | |||
1658 | return 0; | ||
1659 | } | ||
1660 | |||
1661 | static loff_t xillybus_llseek(struct file *filp, loff_t offset, int whence) | ||
1662 | { | ||
1663 | struct xilly_channel *channel = filp->private_data; | ||
1664 | loff_t pos = filp->f_pos; | ||
1665 | int rc = 0; | ||
1666 | |||
1667 | /* | ||
1668 | * Take both mutexes not allowing interrupts, since it seems like | ||
1669 | * common applications don't expect an -EINTR here. Besides, multiple | ||
1670 | * access to a single file descriptor on seekable devices is a mess | ||
1671 | * anyhow. | ||
1672 | */ | ||
1673 | |||
1674 | if (channel->endpoint->fatal_error) | ||
1675 | return -EIO; | ||
1676 | |||
1677 | mutex_lock(&channel->wr_mutex); | ||
1678 | mutex_lock(&channel->rd_mutex); | ||
1679 | |||
1680 | switch (whence) { | ||
1681 | case SEEK_SET: | ||
1682 | pos = offset; | ||
1683 | break; | ||
1684 | case SEEK_CUR: | ||
1685 | pos += offset; | ||
1686 | break; | ||
1687 | case SEEK_END: | ||
1688 | pos = offset; /* Going to the end => to the beginning */ | ||
1689 | break; | ||
1690 | default: | ||
1691 | rc = -EINVAL; | ||
1692 | goto end; | ||
1693 | } | ||
1694 | |||
1695 | /* In any case, we must finish on an element boundary */ | ||
1696 | if (pos & ((1 << channel->log2_element_size) - 1)) { | ||
1697 | rc = -EINVAL; | ||
1698 | goto end; | ||
1699 | } | ||
1700 | |||
1701 | mutex_lock(&channel->endpoint->register_mutex); | ||
1702 | |||
1703 | iowrite32(pos >> channel->log2_element_size, | ||
1704 | channel->endpoint->registers + fpga_buf_offset_reg); | ||
1705 | |||
1706 | iowrite32((channel->chan_num << 1) | | ||
1707 | (6 << 24), /* Opcode 6, set address */ | ||
1708 | channel->endpoint->registers + fpga_buf_ctrl_reg); | ||
1709 | |||
1710 | mutex_unlock(&channel->endpoint->register_mutex); | ||
1711 | |||
1712 | end: | ||
1713 | mutex_unlock(&channel->rd_mutex); | ||
1714 | mutex_unlock(&channel->wr_mutex); | ||
1715 | |||
1716 | if (rc) /* Return error after releasing mutexes */ | ||
1717 | return rc; | ||
1718 | |||
1719 | filp->f_pos = pos; | ||
1720 | |||
1721 | /* | ||
1722 | * Since seekable devices are allowed only when the channel is | ||
1723 | * synchronous, we assume that there is no data pending in either | ||
1724 | * direction (which holds true as long as no concurrent access on the | ||
1725 | * file descriptor takes place). | ||
1726 | * The only thing we may need to throw away is leftovers from partial | ||
1727 | * write() flush. | ||
1728 | */ | ||
1729 | |||
1730 | channel->rd_leftovers[3] = 0; | ||
1731 | |||
1732 | return pos; | ||
1733 | } | ||
1734 | |||
1735 | static unsigned int xillybus_poll(struct file *filp, poll_table *wait) | ||
1736 | { | ||
1737 | struct xilly_channel *channel = filp->private_data; | ||
1738 | unsigned int mask = 0; | ||
1739 | unsigned long flags; | ||
1740 | |||
1741 | poll_wait(filp, &channel->endpoint->ep_wait, wait); | ||
1742 | |||
1743 | /* | ||
1744 | * poll() won't play ball regarding read() channels which | ||
1745 | * aren't asynchronous and support the nonempty message. Allowing | ||
1746 | * that will create situations where data has been delivered at | ||
1747 | * the FPGA, and users expecting select() to wake up, which it may | ||
1748 | * not. | ||
1749 | */ | ||
1750 | |||
1751 | if (!channel->wr_synchronous && channel->wr_supports_nonempty) { | ||
1752 | poll_wait(filp, &channel->wr_wait, wait); | ||
1753 | poll_wait(filp, &channel->wr_ready_wait, wait); | ||
1754 | |||
1755 | spin_lock_irqsave(&channel->wr_spinlock, flags); | ||
1756 | if (!channel->wr_empty || channel->wr_ready) | ||
1757 | mask |= POLLIN | POLLRDNORM; | ||
1758 | |||
1759 | if (channel->wr_hangup) | ||
1760 | /* | ||
1761 | * Not POLLHUP, because its behavior is in the | ||
1762 | * mist, and POLLIN does what we want: Wake up | ||
1763 | * the read file descriptor so it sees EOF. | ||
1764 | */ | ||
1765 | mask |= POLLIN | POLLRDNORM; | ||
1766 | spin_unlock_irqrestore(&channel->wr_spinlock, flags); | ||
1767 | } | ||
1768 | |||
1769 | /* | ||
1770 | * If partial data write is disallowed on a write() channel, | ||
1771 | * it's pointless to ever signal OK to write, because is could | ||
1772 | * block despite some space being available. | ||
1773 | */ | ||
1774 | |||
1775 | if (channel->rd_allow_partial) { | ||
1776 | poll_wait(filp, &channel->rd_wait, wait); | ||
1777 | |||
1778 | spin_lock_irqsave(&channel->rd_spinlock, flags); | ||
1779 | if (!channel->rd_full) | ||
1780 | mask |= POLLOUT | POLLWRNORM; | ||
1781 | spin_unlock_irqrestore(&channel->rd_spinlock, flags); | ||
1782 | } | ||
1783 | |||
1784 | if (channel->endpoint->fatal_error) | ||
1785 | mask |= POLLERR; | ||
1786 | |||
1787 | return mask; | ||
1788 | } | ||
1789 | |||
1790 | static const struct file_operations xillybus_fops = { | ||
1791 | .owner = THIS_MODULE, | ||
1792 | .read = xillybus_read, | ||
1793 | .write = xillybus_write, | ||
1794 | .open = xillybus_open, | ||
1795 | .flush = xillybus_flush, | ||
1796 | .release = xillybus_release, | ||
1797 | .llseek = xillybus_llseek, | ||
1798 | .poll = xillybus_poll, | ||
1799 | }; | ||
1800 | |||
1801 | static int xillybus_init_chrdev(struct xilly_endpoint *endpoint, | ||
1802 | const unsigned char *idt) | ||
1803 | { | ||
1804 | int rc; | ||
1805 | dev_t dev; | ||
1806 | int devnum, i, minor, major; | ||
1807 | char devname[48]; | ||
1808 | struct device *device; | ||
1809 | |||
1810 | rc = alloc_chrdev_region(&dev, 0, /* minor start */ | ||
1811 | endpoint->num_channels, | ||
1812 | xillyname); | ||
1813 | if (rc) { | ||
1814 | dev_warn(endpoint->dev, "Failed to obtain major/minors"); | ||
1815 | return rc; | ||
1816 | } | ||
1817 | |||
1818 | endpoint->major = major = MAJOR(dev); | ||
1819 | endpoint->lowest_minor = minor = MINOR(dev); | ||
1820 | |||
1821 | cdev_init(&endpoint->cdev, &xillybus_fops); | ||
1822 | endpoint->cdev.owner = endpoint->ephw->owner; | ||
1823 | rc = cdev_add(&endpoint->cdev, MKDEV(major, minor), | ||
1824 | endpoint->num_channels); | ||
1825 | if (rc) { | ||
1826 | dev_warn(endpoint->dev, "Failed to add cdev. Aborting.\n"); | ||
1827 | goto unregister_chrdev; | ||
1828 | } | ||
1829 | |||
1830 | idt++; | ||
1831 | |||
1832 | for (i = minor, devnum = 0; | ||
1833 | devnum < endpoint->num_channels; | ||
1834 | devnum++, i++) { | ||
1835 | snprintf(devname, sizeof(devname)-1, "xillybus_%s", idt); | ||
1836 | |||
1837 | devname[sizeof(devname)-1] = 0; /* Should never matter */ | ||
1838 | |||
1839 | while (*idt++) | ||
1840 | /* Skip to next */; | ||
1841 | |||
1842 | device = device_create(xillybus_class, | ||
1843 | NULL, | ||
1844 | MKDEV(major, i), | ||
1845 | NULL, | ||
1846 | "%s", devname); | ||
1847 | |||
1848 | if (IS_ERR(device)) { | ||
1849 | dev_warn(endpoint->dev, | ||
1850 | "Failed to create %s device. Aborting.\n", | ||
1851 | devname); | ||
1852 | rc = -ENODEV; | ||
1853 | goto unroll_device_create; | ||
1854 | } | ||
1855 | } | ||
1856 | |||
1857 | dev_info(endpoint->dev, "Created %d device files.\n", | ||
1858 | endpoint->num_channels); | ||
1859 | return 0; /* succeed */ | ||
1860 | |||
1861 | unroll_device_create: | ||
1862 | devnum--; i--; | ||
1863 | for (; devnum >= 0; devnum--, i--) | ||
1864 | device_destroy(xillybus_class, MKDEV(major, i)); | ||
1865 | |||
1866 | cdev_del(&endpoint->cdev); | ||
1867 | unregister_chrdev: | ||
1868 | unregister_chrdev_region(MKDEV(major, minor), endpoint->num_channels); | ||
1869 | |||
1870 | return rc; | ||
1871 | } | ||
1872 | |||
1873 | static void xillybus_cleanup_chrdev(struct xilly_endpoint *endpoint) | ||
1874 | { | ||
1875 | int minor; | ||
1876 | |||
1877 | for (minor = endpoint->lowest_minor; | ||
1878 | minor < (endpoint->lowest_minor + endpoint->num_channels); | ||
1879 | minor++) | ||
1880 | device_destroy(xillybus_class, MKDEV(endpoint->major, minor)); | ||
1881 | cdev_del(&endpoint->cdev); | ||
1882 | unregister_chrdev_region(MKDEV(endpoint->major, | ||
1883 | endpoint->lowest_minor), | ||
1884 | endpoint->num_channels); | ||
1885 | |||
1886 | dev_info(endpoint->dev, "Removed %d device files.\n", | ||
1887 | endpoint->num_channels); | ||
1888 | } | ||
1889 | |||
1890 | struct xilly_endpoint *xillybus_init_endpoint(struct pci_dev *pdev, | ||
1891 | struct device *dev, | ||
1892 | struct xilly_endpoint_hardware | ||
1893 | *ephw) | ||
1894 | { | ||
1895 | struct xilly_endpoint *endpoint; | ||
1896 | |||
1897 | endpoint = devm_kzalloc(dev, sizeof(*endpoint), GFP_KERNEL); | ||
1898 | if (!endpoint) | ||
1899 | return NULL; | ||
1900 | |||
1901 | endpoint->pdev = pdev; | ||
1902 | endpoint->dev = dev; | ||
1903 | endpoint->ephw = ephw; | ||
1904 | endpoint->msg_counter = 0x0b; | ||
1905 | endpoint->failed_messages = 0; | ||
1906 | endpoint->fatal_error = 0; | ||
1907 | |||
1908 | init_waitqueue_head(&endpoint->ep_wait); | ||
1909 | mutex_init(&endpoint->register_mutex); | ||
1910 | |||
1911 | return endpoint; | ||
1912 | } | ||
1913 | EXPORT_SYMBOL(xillybus_init_endpoint); | ||
1914 | |||
1915 | static int xilly_quiesce(struct xilly_endpoint *endpoint) | ||
1916 | { | ||
1917 | long t; | ||
1918 | |||
1919 | endpoint->idtlen = -1; | ||
1920 | |||
1921 | iowrite32((u32) (endpoint->dma_using_dac & 0x0001), | ||
1922 | endpoint->registers + fpga_dma_control_reg); | ||
1923 | |||
1924 | t = wait_event_interruptible_timeout(endpoint->ep_wait, | ||
1925 | (endpoint->idtlen >= 0), | ||
1926 | XILLY_TIMEOUT); | ||
1927 | if (t <= 0) { | ||
1928 | dev_err(endpoint->dev, | ||
1929 | "Failed to quiesce the device on exit.\n"); | ||
1930 | return -ENODEV; | ||
1931 | } | ||
1932 | return 0; | ||
1933 | } | ||
1934 | |||
1935 | int xillybus_endpoint_discovery(struct xilly_endpoint *endpoint) | ||
1936 | { | ||
1937 | int rc; | ||
1938 | long t; | ||
1939 | |||
1940 | void *bootstrap_resources; | ||
1941 | int idtbuffersize = (1 << PAGE_SHIFT); | ||
1942 | struct device *dev = endpoint->dev; | ||
1943 | |||
1944 | /* | ||
1945 | * The bogus IDT is used during bootstrap for allocating the initial | ||
1946 | * message buffer, and then the message buffer and space for the IDT | ||
1947 | * itself. The initial message buffer is of a single page's size, but | ||
1948 | * it's soon replaced with a more modest one (and memory is freed). | ||
1949 | */ | ||
1950 | |||
1951 | unsigned char bogus_idt[8] = { 1, 224, (PAGE_SHIFT)-2, 0, | ||
1952 | 3, 192, PAGE_SHIFT, 0 }; | ||
1953 | struct xilly_idt_handle idt_handle; | ||
1954 | |||
1955 | /* | ||
1956 | * Writing the value 0x00000001 to Endianness register signals which | ||
1957 | * endianness this processor is using, so the FPGA can swap words as | ||
1958 | * necessary. | ||
1959 | */ | ||
1960 | |||
1961 | iowrite32(1, endpoint->registers + fpga_endian_reg); | ||
1962 | |||
1963 | /* Bootstrap phase I: Allocate temporary message buffer */ | ||
1964 | |||
1965 | bootstrap_resources = devres_open_group(dev, NULL, GFP_KERNEL); | ||
1966 | if (!bootstrap_resources) | ||
1967 | return -ENOMEM; | ||
1968 | |||
1969 | endpoint->num_channels = 0; | ||
1970 | |||
1971 | rc = xilly_setupchannels(endpoint, bogus_idt, 1); | ||
1972 | if (rc) | ||
1973 | return rc; | ||
1974 | |||
1975 | /* Clear the message subsystem (and counter in particular) */ | ||
1976 | iowrite32(0x04, endpoint->registers + fpga_msg_ctrl_reg); | ||
1977 | |||
1978 | endpoint->idtlen = -1; | ||
1979 | |||
1980 | /* | ||
1981 | * Set DMA 32/64 bit mode, quiesce the device (?!) and get IDT | ||
1982 | * buffer size. | ||
1983 | */ | ||
1984 | iowrite32((u32) (endpoint->dma_using_dac & 0x0001), | ||
1985 | endpoint->registers + fpga_dma_control_reg); | ||
1986 | |||
1987 | t = wait_event_interruptible_timeout(endpoint->ep_wait, | ||
1988 | (endpoint->idtlen >= 0), | ||
1989 | XILLY_TIMEOUT); | ||
1990 | if (t <= 0) { | ||
1991 | dev_err(endpoint->dev, "No response from FPGA. Aborting.\n"); | ||
1992 | return -ENODEV; | ||
1993 | } | ||
1994 | |||
1995 | /* Enable DMA */ | ||
1996 | iowrite32((u32) (0x0002 | (endpoint->dma_using_dac & 0x0001)), | ||
1997 | endpoint->registers + fpga_dma_control_reg); | ||
1998 | |||
1999 | /* Bootstrap phase II: Allocate buffer for IDT and obtain it */ | ||
2000 | while (endpoint->idtlen >= idtbuffersize) { | ||
2001 | idtbuffersize *= 2; | ||
2002 | bogus_idt[6]++; | ||
2003 | } | ||
2004 | |||
2005 | endpoint->num_channels = 1; | ||
2006 | |||
2007 | rc = xilly_setupchannels(endpoint, bogus_idt, 2); | ||
2008 | if (rc) | ||
2009 | goto failed_idt; | ||
2010 | |||
2011 | rc = xilly_obtain_idt(endpoint); | ||
2012 | if (rc) | ||
2013 | goto failed_idt; | ||
2014 | |||
2015 | rc = xilly_scan_idt(endpoint, &idt_handle); | ||
2016 | if (rc) | ||
2017 | goto failed_idt; | ||
2018 | |||
2019 | devres_close_group(dev, bootstrap_resources); | ||
2020 | |||
2021 | /* Bootstrap phase III: Allocate buffers according to IDT */ | ||
2022 | |||
2023 | rc = xilly_setupchannels(endpoint, | ||
2024 | idt_handle.chandesc, | ||
2025 | idt_handle.entries); | ||
2026 | if (rc) | ||
2027 | goto failed_idt; | ||
2028 | |||
2029 | /* | ||
2030 | * endpoint is now completely configured. We put it on the list | ||
2031 | * available to open() before registering the char device(s) | ||
2032 | */ | ||
2033 | |||
2034 | mutex_lock(&ep_list_lock); | ||
2035 | list_add_tail(&endpoint->ep_list, &list_of_endpoints); | ||
2036 | mutex_unlock(&ep_list_lock); | ||
2037 | |||
2038 | rc = xillybus_init_chrdev(endpoint, idt_handle.idt); | ||
2039 | if (rc) | ||
2040 | goto failed_chrdevs; | ||
2041 | |||
2042 | devres_release_group(dev, bootstrap_resources); | ||
2043 | |||
2044 | return 0; | ||
2045 | |||
2046 | failed_chrdevs: | ||
2047 | mutex_lock(&ep_list_lock); | ||
2048 | list_del(&endpoint->ep_list); | ||
2049 | mutex_unlock(&ep_list_lock); | ||
2050 | |||
2051 | failed_idt: | ||
2052 | xilly_quiesce(endpoint); | ||
2053 | flush_workqueue(xillybus_wq); | ||
2054 | |||
2055 | return rc; | ||
2056 | } | ||
2057 | EXPORT_SYMBOL(xillybus_endpoint_discovery); | ||
2058 | |||
2059 | void xillybus_endpoint_remove(struct xilly_endpoint *endpoint) | ||
2060 | { | ||
2061 | xillybus_cleanup_chrdev(endpoint); | ||
2062 | |||
2063 | mutex_lock(&ep_list_lock); | ||
2064 | list_del(&endpoint->ep_list); | ||
2065 | mutex_unlock(&ep_list_lock); | ||
2066 | |||
2067 | xilly_quiesce(endpoint); | ||
2068 | |||
2069 | /* | ||
2070 | * Flushing is done upon endpoint release to prevent access to memory | ||
2071 | * just about to be released. This makes the quiesce complete. | ||
2072 | */ | ||
2073 | flush_workqueue(xillybus_wq); | ||
2074 | } | ||
2075 | EXPORT_SYMBOL(xillybus_endpoint_remove); | ||
2076 | |||
2077 | static int __init xillybus_init(void) | ||
2078 | { | ||
2079 | mutex_init(&ep_list_lock); | ||
2080 | |||
2081 | xillybus_class = class_create(THIS_MODULE, xillyname); | ||
2082 | if (IS_ERR(xillybus_class)) | ||
2083 | return PTR_ERR(xillybus_class); | ||
2084 | |||
2085 | xillybus_wq = alloc_workqueue(xillyname, 0, 0); | ||
2086 | if (!xillybus_wq) { | ||
2087 | class_destroy(xillybus_class); | ||
2088 | return -ENOMEM; | ||
2089 | } | ||
2090 | |||
2091 | return 0; | ||
2092 | } | ||
2093 | |||
2094 | static void __exit xillybus_exit(void) | ||
2095 | { | ||
2096 | /* flush_workqueue() was called for each endpoint released */ | ||
2097 | destroy_workqueue(xillybus_wq); | ||
2098 | |||
2099 | class_destroy(xillybus_class); | ||
2100 | } | ||
2101 | |||
2102 | module_init(xillybus_init); | ||
2103 | module_exit(xillybus_exit); | ||
diff --git a/drivers/char/xillybus/xillybus_of.c b/drivers/char/xillybus/xillybus_of.c new file mode 100644 index 000000000000..1ca0c7a4f1be --- /dev/null +++ b/drivers/char/xillybus/xillybus_of.c | |||
@@ -0,0 +1,187 @@ | |||
1 | /* | ||
2 | * linux/drivers/misc/xillybus_of.c | ||
3 | * | ||
4 | * Copyright 2011 Xillybus Ltd, http://xillybus.com | ||
5 | * | ||
6 | * Driver for the Xillybus FPGA/host framework using Open Firmware. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the smems of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; version 2 of the License. | ||
11 | */ | ||
12 | |||
13 | #include <linux/module.h> | ||
14 | #include <linux/device.h> | ||
15 | #include <linux/slab.h> | ||
16 | #include <linux/platform_device.h> | ||
17 | #include <linux/of.h> | ||
18 | #include <linux/of_irq.h> | ||
19 | #include <linux/of_address.h> | ||
20 | #include <linux/of_device.h> | ||
21 | #include <linux/of_platform.h> | ||
22 | #include <linux/err.h> | ||
23 | #include "xillybus.h" | ||
24 | |||
25 | MODULE_DESCRIPTION("Xillybus driver for Open Firmware"); | ||
26 | MODULE_AUTHOR("Eli Billauer, Xillybus Ltd."); | ||
27 | MODULE_VERSION("1.06"); | ||
28 | MODULE_ALIAS("xillybus_of"); | ||
29 | MODULE_LICENSE("GPL v2"); | ||
30 | |||
31 | static const char xillyname[] = "xillybus_of"; | ||
32 | |||
33 | /* Match table for of_platform binding */ | ||
34 | static struct of_device_id xillybus_of_match[] = { | ||
35 | { .compatible = "xillybus,xillybus-1.00.a", }, | ||
36 | { .compatible = "xlnx,xillybus-1.00.a", }, /* Deprecated */ | ||
37 | {} | ||
38 | }; | ||
39 | |||
40 | MODULE_DEVICE_TABLE(of, xillybus_of_match); | ||
41 | |||
42 | static void xilly_dma_sync_single_for_cpu_of(struct xilly_endpoint *ep, | ||
43 | dma_addr_t dma_handle, | ||
44 | size_t size, | ||
45 | int direction) | ||
46 | { | ||
47 | dma_sync_single_for_cpu(ep->dev, dma_handle, size, direction); | ||
48 | } | ||
49 | |||
50 | static void xilly_dma_sync_single_for_device_of(struct xilly_endpoint *ep, | ||
51 | dma_addr_t dma_handle, | ||
52 | size_t size, | ||
53 | int direction) | ||
54 | { | ||
55 | dma_sync_single_for_device(ep->dev, dma_handle, size, direction); | ||
56 | } | ||
57 | |||
58 | static void xilly_dma_sync_single_nop(struct xilly_endpoint *ep, | ||
59 | dma_addr_t dma_handle, | ||
60 | size_t size, | ||
61 | int direction) | ||
62 | { | ||
63 | } | ||
64 | |||
65 | static void xilly_of_unmap(void *ptr) | ||
66 | { | ||
67 | struct xilly_mapping *data = ptr; | ||
68 | |||
69 | dma_unmap_single(data->device, data->dma_addr, | ||
70 | data->size, data->direction); | ||
71 | |||
72 | kfree(ptr); | ||
73 | } | ||
74 | |||
75 | static int xilly_map_single_of(struct xilly_endpoint *ep, | ||
76 | void *ptr, | ||
77 | size_t size, | ||
78 | int direction, | ||
79 | dma_addr_t *ret_dma_handle | ||
80 | ) | ||
81 | { | ||
82 | dma_addr_t addr; | ||
83 | struct xilly_mapping *this; | ||
84 | int rc; | ||
85 | |||
86 | this = kzalloc(sizeof(*this), GFP_KERNEL); | ||
87 | if (!this) | ||
88 | return -ENOMEM; | ||
89 | |||
90 | addr = dma_map_single(ep->dev, ptr, size, direction); | ||
91 | |||
92 | if (dma_mapping_error(ep->dev, addr)) { | ||
93 | kfree(this); | ||
94 | return -ENODEV; | ||
95 | } | ||
96 | |||
97 | this->device = ep->dev; | ||
98 | this->dma_addr = addr; | ||
99 | this->size = size; | ||
100 | this->direction = direction; | ||
101 | |||
102 | *ret_dma_handle = addr; | ||
103 | |||
104 | rc = devm_add_action(ep->dev, xilly_of_unmap, this); | ||
105 | |||
106 | if (rc) { | ||
107 | dma_unmap_single(ep->dev, addr, size, direction); | ||
108 | kfree(this); | ||
109 | return rc; | ||
110 | } | ||
111 | |||
112 | return 0; | ||
113 | } | ||
114 | |||
115 | static struct xilly_endpoint_hardware of_hw = { | ||
116 | .owner = THIS_MODULE, | ||
117 | .hw_sync_sgl_for_cpu = xilly_dma_sync_single_for_cpu_of, | ||
118 | .hw_sync_sgl_for_device = xilly_dma_sync_single_for_device_of, | ||
119 | .map_single = xilly_map_single_of, | ||
120 | }; | ||
121 | |||
122 | static struct xilly_endpoint_hardware of_hw_coherent = { | ||
123 | .owner = THIS_MODULE, | ||
124 | .hw_sync_sgl_for_cpu = xilly_dma_sync_single_nop, | ||
125 | .hw_sync_sgl_for_device = xilly_dma_sync_single_nop, | ||
126 | .map_single = xilly_map_single_of, | ||
127 | }; | ||
128 | |||
129 | static int xilly_drv_probe(struct platform_device *op) | ||
130 | { | ||
131 | struct device *dev = &op->dev; | ||
132 | struct xilly_endpoint *endpoint; | ||
133 | int rc; | ||
134 | int irq; | ||
135 | struct resource res; | ||
136 | struct xilly_endpoint_hardware *ephw = &of_hw; | ||
137 | |||
138 | if (of_property_read_bool(dev->of_node, "dma-coherent")) | ||
139 | ephw = &of_hw_coherent; | ||
140 | |||
141 | endpoint = xillybus_init_endpoint(NULL, dev, ephw); | ||
142 | |||
143 | if (!endpoint) | ||
144 | return -ENOMEM; | ||
145 | |||
146 | dev_set_drvdata(dev, endpoint); | ||
147 | |||
148 | rc = of_address_to_resource(dev->of_node, 0, &res); | ||
149 | endpoint->registers = devm_ioremap_resource(dev, &res); | ||
150 | |||
151 | if (IS_ERR(endpoint->registers)) | ||
152 | return PTR_ERR(endpoint->registers); | ||
153 | |||
154 | irq = irq_of_parse_and_map(dev->of_node, 0); | ||
155 | |||
156 | rc = devm_request_irq(dev, irq, xillybus_isr, 0, xillyname, endpoint); | ||
157 | |||
158 | if (rc) { | ||
159 | dev_err(endpoint->dev, | ||
160 | "Failed to register IRQ handler. Aborting.\n"); | ||
161 | return -ENODEV; | ||
162 | } | ||
163 | |||
164 | return xillybus_endpoint_discovery(endpoint); | ||
165 | } | ||
166 | |||
167 | static int xilly_drv_remove(struct platform_device *op) | ||
168 | { | ||
169 | struct device *dev = &op->dev; | ||
170 | struct xilly_endpoint *endpoint = dev_get_drvdata(dev); | ||
171 | |||
172 | xillybus_endpoint_remove(endpoint); | ||
173 | |||
174 | return 0; | ||
175 | } | ||
176 | |||
177 | static struct platform_driver xillybus_platform_driver = { | ||
178 | .probe = xilly_drv_probe, | ||
179 | .remove = xilly_drv_remove, | ||
180 | .driver = { | ||
181 | .name = xillyname, | ||
182 | .owner = THIS_MODULE, | ||
183 | .of_match_table = xillybus_of_match, | ||
184 | }, | ||
185 | }; | ||
186 | |||
187 | module_platform_driver(xillybus_platform_driver); | ||
diff --git a/drivers/char/xillybus/xillybus_pcie.c b/drivers/char/xillybus/xillybus_pcie.c new file mode 100644 index 000000000000..d8266bc2ae35 --- /dev/null +++ b/drivers/char/xillybus/xillybus_pcie.c | |||
@@ -0,0 +1,228 @@ | |||
1 | /* | ||
2 | * linux/drivers/misc/xillybus_pcie.c | ||
3 | * | ||
4 | * Copyright 2011 Xillybus Ltd, http://xillybus.com | ||
5 | * | ||
6 | * Driver for the Xillybus FPGA/host framework using PCI Express. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the smems of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; version 2 of the License. | ||
11 | */ | ||
12 | |||
13 | #include <linux/module.h> | ||
14 | #include <linux/pci.h> | ||
15 | #include <linux/pci-aspm.h> | ||
16 | #include <linux/slab.h> | ||
17 | #include "xillybus.h" | ||
18 | |||
19 | MODULE_DESCRIPTION("Xillybus driver for PCIe"); | ||
20 | MODULE_AUTHOR("Eli Billauer, Xillybus Ltd."); | ||
21 | MODULE_VERSION("1.06"); | ||
22 | MODULE_ALIAS("xillybus_pcie"); | ||
23 | MODULE_LICENSE("GPL v2"); | ||
24 | |||
25 | #define PCI_DEVICE_ID_XILLYBUS 0xebeb | ||
26 | |||
27 | #define PCI_VENDOR_ID_ALTERA 0x1172 | ||
28 | #define PCI_VENDOR_ID_ACTEL 0x11aa | ||
29 | #define PCI_VENDOR_ID_LATTICE 0x1204 | ||
30 | |||
31 | static const char xillyname[] = "xillybus_pcie"; | ||
32 | |||
33 | static const struct pci_device_id xillyids[] = { | ||
34 | {PCI_DEVICE(PCI_VENDOR_ID_XILINX, PCI_DEVICE_ID_XILLYBUS)}, | ||
35 | {PCI_DEVICE(PCI_VENDOR_ID_ALTERA, PCI_DEVICE_ID_XILLYBUS)}, | ||
36 | {PCI_DEVICE(PCI_VENDOR_ID_ACTEL, PCI_DEVICE_ID_XILLYBUS)}, | ||
37 | {PCI_DEVICE(PCI_VENDOR_ID_LATTICE, PCI_DEVICE_ID_XILLYBUS)}, | ||
38 | { /* End: all zeroes */ } | ||
39 | }; | ||
40 | |||
41 | static int xilly_pci_direction(int direction) | ||
42 | { | ||
43 | switch (direction) { | ||
44 | case DMA_TO_DEVICE: | ||
45 | return PCI_DMA_TODEVICE; | ||
46 | case DMA_FROM_DEVICE: | ||
47 | return PCI_DMA_FROMDEVICE; | ||
48 | default: | ||
49 | return PCI_DMA_BIDIRECTIONAL; | ||
50 | } | ||
51 | } | ||
52 | |||
53 | static void xilly_dma_sync_single_for_cpu_pci(struct xilly_endpoint *ep, | ||
54 | dma_addr_t dma_handle, | ||
55 | size_t size, | ||
56 | int direction) | ||
57 | { | ||
58 | pci_dma_sync_single_for_cpu(ep->pdev, | ||
59 | dma_handle, | ||
60 | size, | ||
61 | xilly_pci_direction(direction)); | ||
62 | } | ||
63 | |||
64 | static void xilly_dma_sync_single_for_device_pci(struct xilly_endpoint *ep, | ||
65 | dma_addr_t dma_handle, | ||
66 | size_t size, | ||
67 | int direction) | ||
68 | { | ||
69 | pci_dma_sync_single_for_device(ep->pdev, | ||
70 | dma_handle, | ||
71 | size, | ||
72 | xilly_pci_direction(direction)); | ||
73 | } | ||
74 | |||
75 | static void xilly_pci_unmap(void *ptr) | ||
76 | { | ||
77 | struct xilly_mapping *data = ptr; | ||
78 | |||
79 | pci_unmap_single(data->device, data->dma_addr, | ||
80 | data->size, data->direction); | ||
81 | |||
82 | kfree(ptr); | ||
83 | } | ||
84 | |||
85 | /* | ||
86 | * Map either through the PCI DMA mapper or the non_PCI one. Behind the | ||
87 | * scenes exactly the same functions are called with the same parameters, | ||
88 | * but that can change. | ||
89 | */ | ||
90 | |||
91 | static int xilly_map_single_pci(struct xilly_endpoint *ep, | ||
92 | void *ptr, | ||
93 | size_t size, | ||
94 | int direction, | ||
95 | dma_addr_t *ret_dma_handle | ||
96 | ) | ||
97 | { | ||
98 | int pci_direction; | ||
99 | dma_addr_t addr; | ||
100 | struct xilly_mapping *this; | ||
101 | int rc; | ||
102 | |||
103 | this = kzalloc(sizeof(*this), GFP_KERNEL); | ||
104 | if (!this) | ||
105 | return -ENOMEM; | ||
106 | |||
107 | pci_direction = xilly_pci_direction(direction); | ||
108 | |||
109 | addr = pci_map_single(ep->pdev, ptr, size, pci_direction); | ||
110 | |||
111 | if (pci_dma_mapping_error(ep->pdev, addr)) { | ||
112 | kfree(this); | ||
113 | return -ENODEV; | ||
114 | } | ||
115 | |||
116 | this->device = ep->pdev; | ||
117 | this->dma_addr = addr; | ||
118 | this->size = size; | ||
119 | this->direction = pci_direction; | ||
120 | |||
121 | *ret_dma_handle = addr; | ||
122 | |||
123 | rc = devm_add_action(ep->dev, xilly_pci_unmap, this); | ||
124 | if (rc) { | ||
125 | pci_unmap_single(ep->pdev, addr, size, pci_direction); | ||
126 | kfree(this); | ||
127 | return rc; | ||
128 | } | ||
129 | |||
130 | return 0; | ||
131 | } | ||
132 | |||
133 | static struct xilly_endpoint_hardware pci_hw = { | ||
134 | .owner = THIS_MODULE, | ||
135 | .hw_sync_sgl_for_cpu = xilly_dma_sync_single_for_cpu_pci, | ||
136 | .hw_sync_sgl_for_device = xilly_dma_sync_single_for_device_pci, | ||
137 | .map_single = xilly_map_single_pci, | ||
138 | }; | ||
139 | |||
140 | static int xilly_probe(struct pci_dev *pdev, | ||
141 | const struct pci_device_id *ent) | ||
142 | { | ||
143 | struct xilly_endpoint *endpoint; | ||
144 | int rc; | ||
145 | |||
146 | endpoint = xillybus_init_endpoint(pdev, &pdev->dev, &pci_hw); | ||
147 | |||
148 | if (!endpoint) | ||
149 | return -ENOMEM; | ||
150 | |||
151 | pci_set_drvdata(pdev, endpoint); | ||
152 | |||
153 | rc = pcim_enable_device(pdev); | ||
154 | if (rc) { | ||
155 | dev_err(endpoint->dev, | ||
156 | "pcim_enable_device() failed. Aborting.\n"); | ||
157 | return rc; | ||
158 | } | ||
159 | |||
160 | /* L0s has caused packet drops. No power saving, thank you. */ | ||
161 | |||
162 | pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S); | ||
163 | |||
164 | if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { | ||
165 | dev_err(endpoint->dev, | ||
166 | "Incorrect BAR configuration. Aborting.\n"); | ||
167 | return -ENODEV; | ||
168 | } | ||
169 | |||
170 | rc = pcim_iomap_regions(pdev, 0x01, xillyname); | ||
171 | if (rc) { | ||
172 | dev_err(endpoint->dev, | ||
173 | "pcim_iomap_regions() failed. Aborting.\n"); | ||
174 | return rc; | ||
175 | } | ||
176 | |||
177 | endpoint->registers = pcim_iomap_table(pdev)[0]; | ||
178 | |||
179 | pci_set_master(pdev); | ||
180 | |||
181 | /* Set up a single MSI interrupt */ | ||
182 | if (pci_enable_msi(pdev)) { | ||
183 | dev_err(endpoint->dev, | ||
184 | "Failed to enable MSI interrupts. Aborting.\n"); | ||
185 | return -ENODEV; | ||
186 | } | ||
187 | rc = devm_request_irq(&pdev->dev, pdev->irq, xillybus_isr, 0, | ||
188 | xillyname, endpoint); | ||
189 | if (rc) { | ||
190 | dev_err(endpoint->dev, | ||
191 | "Failed to register MSI handler. Aborting.\n"); | ||
192 | return -ENODEV; | ||
193 | } | ||
194 | |||
195 | /* | ||
196 | * In theory, an attempt to set the DMA mask to 64 and dma_using_dac=1 | ||
197 | * is the right thing. But some unclever PCIe drivers report it's OK | ||
198 | * when the hardware drops those 64-bit PCIe packets. So trust | ||
199 | * nobody and use 32 bits DMA addressing in any case. | ||
200 | */ | ||
201 | |||
202 | if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) { | ||
203 | endpoint->dma_using_dac = 0; | ||
204 | } else { | ||
205 | dev_err(endpoint->dev, "Failed to set DMA mask. Aborting.\n"); | ||
206 | return -ENODEV; | ||
207 | } | ||
208 | |||
209 | return xillybus_endpoint_discovery(endpoint); | ||
210 | } | ||
211 | |||
212 | static void xilly_remove(struct pci_dev *pdev) | ||
213 | { | ||
214 | struct xilly_endpoint *endpoint = pci_get_drvdata(pdev); | ||
215 | |||
216 | xillybus_endpoint_remove(endpoint); | ||
217 | } | ||
218 | |||
219 | MODULE_DEVICE_TABLE(pci, xillyids); | ||
220 | |||
221 | static struct pci_driver xillybus_driver = { | ||
222 | .name = xillyname, | ||
223 | .id_table = xillyids, | ||
224 | .probe = xilly_probe, | ||
225 | .remove = xilly_remove, | ||
226 | }; | ||
227 | |||
228 | module_pci_driver(xillybus_driver); | ||