diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2017-11-16 19:05:01 -0500 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-11-16 19:05:01 -0500 |
| commit | cf9b0772f2e410645fece13b749bd56505b998b8 (patch) | |
| tree | 8b171a2c49d1e9e41d4e43fb91602e664cde8551 /drivers | |
| parent | 527d1470744d338c912f94bc1f4dba08ffdff349 (diff) | |
| parent | 339cd0ea082287ea8e2b7e7159a5a33665a2cbe3 (diff) | |
Merge tag 'armsoc-drivers' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc
Pull ARM SoC driver updates from Arnd Bergmann:
"This branch contains platform-related driver updates for ARM and
ARM64, these are the areas that bring the changes:
New drivers:
- driver support for Renesas R-Car V3M (R8A77970)
- power management support for Amlogic GX
- a new driver for the Tegra BPMP thermal sensor
- a new bus driver for Technologic Systems NBUS
Changes for subsystems that prefer to merge through arm-soc:
- the usual updates for reset controller drivers from Philipp Zabel,
with five added drivers for SoCs in the arc, meson, socfpa,
uniphier and mediatek families
- updates to the ARM SCPI and PSCI frameworks, from Sudeep Holla,
Heiner Kallweit and Lorenzo Pieralisi
Changes specific to some ARM-based SoC
- the Freescale/NXP DPAA QBMan drivers from PowerPC can now work on
ARM as well
- several changes for power management on Broadcom SoCs
- various improvements on Qualcomm, Broadcom, Amlogic, Atmel,
Mediatek
- minor Cleanups for Samsung, TI OMAP SoCs"
[ NOTE! This doesn't work without the previous ARM SoC device-tree pull,
because the R8A77970 driver is missing a header file that came from
that pull.
The fact that this got merged afterwards only fixes it at this point,
and bisection of that driver will fail if/when you walk into the
history of that driver. - Linus ]
* tag 'armsoc-drivers' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc: (96 commits)
soc: amlogic: meson-gx-pwrc-vpu: fix power-off when powered by bootloader
bus: add driver for the Technologic Systems NBUS
memory: omap-gpmc: Remove deprecated gpmc_update_nand_reg()
soc: qcom: remove unused label
soc: amlogic: gx pm domain: add PM and OF dependencies
drivers/firmware: psci_checker: Add missing destroy_timer_on_stack()
dt-bindings: power: add amlogic meson power domain bindings
soc: amlogic: add Meson GX VPU Domains driver
soc: qcom: Remote filesystem memory driver
dt-binding: soc: qcom: Add binding for rmtfs memory
of: reserved_mem: Accessor for acquiring reserved_mem
of/platform: Generalize /reserved-memory handling
soc: mediatek: pwrap: fix fatal compiler error
soc: mediatek: pwrap: fix compiler errors
arm64: mediatek: cleanup message for platform selection
soc: Allow test-building of MediaTek drivers
soc: mediatek: place Kconfig for all SoC drivers under menu
soc: mediatek: pwrap: add support for MT7622 SoC
soc: mediatek: pwrap: add common way for setup CS timing extenstion
soc: mediatek: pwrap: add MediaTek MT6380 as one slave of pwrap
..
Diffstat (limited to 'drivers')
86 files changed, 6364 insertions, 961 deletions
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig index fa94a85bf410..dc7b3c7b7d42 100644 --- a/drivers/bus/Kconfig +++ b/drivers/bus/Kconfig | |||
| @@ -165,6 +165,14 @@ config TI_SYSC | |||
| 165 | Generic driver for Texas Instruments interconnect target module | 165 | Generic driver for Texas Instruments interconnect target module |
| 166 | found on many TI SoCs. | 166 | found on many TI SoCs. |
| 167 | 167 | ||
| 168 | config TS_NBUS | ||
| 169 | tristate "Technologic Systems NBUS Driver" | ||
| 170 | depends on SOC_IMX28 | ||
| 171 | depends on OF_GPIO && PWM | ||
| 172 | help | ||
| 173 | Driver for the Technologic Systems NBUS which is used to interface | ||
| 174 | with the peripherals in the FPGA of the TS-4600 SoM. | ||
| 175 | |||
| 168 | config UNIPHIER_SYSTEM_BUS | 176 | config UNIPHIER_SYSTEM_BUS |
| 169 | tristate "UniPhier System Bus driver" | 177 | tristate "UniPhier System Bus driver" |
| 170 | depends on ARCH_UNIPHIER && OF | 178 | depends on ARCH_UNIPHIER && OF |
diff --git a/drivers/bus/Makefile b/drivers/bus/Makefile index 94a079008cbe..9bcd0bf3954b 100644 --- a/drivers/bus/Makefile +++ b/drivers/bus/Makefile | |||
| @@ -22,6 +22,7 @@ obj-$(CONFIG_SIMPLE_PM_BUS) += simple-pm-bus.o | |||
| 22 | obj-$(CONFIG_TEGRA_ACONNECT) += tegra-aconnect.o | 22 | obj-$(CONFIG_TEGRA_ACONNECT) += tegra-aconnect.o |
| 23 | obj-$(CONFIG_TEGRA_GMI) += tegra-gmi.o | 23 | obj-$(CONFIG_TEGRA_GMI) += tegra-gmi.o |
| 24 | obj-$(CONFIG_TI_SYSC) += ti-sysc.o | 24 | obj-$(CONFIG_TI_SYSC) += ti-sysc.o |
| 25 | obj-$(CONFIG_TS_NBUS) += ts-nbus.o | ||
| 25 | obj-$(CONFIG_UNIPHIER_SYSTEM_BUS) += uniphier-system-bus.o | 26 | obj-$(CONFIG_UNIPHIER_SYSTEM_BUS) += uniphier-system-bus.o |
| 26 | obj-$(CONFIG_VEXPRESS_CONFIG) += vexpress-config.o | 27 | obj-$(CONFIG_VEXPRESS_CONFIG) += vexpress-config.o |
| 27 | 28 | ||
diff --git a/drivers/bus/ts-nbus.c b/drivers/bus/ts-nbus.c new file mode 100644 index 000000000000..073fd9011154 --- /dev/null +++ b/drivers/bus/ts-nbus.c | |||
| @@ -0,0 +1,375 @@ | |||
| 1 | /* | ||
| 2 | * NBUS driver for TS-4600 based boards | ||
| 3 | * | ||
| 4 | * Copyright (c) 2016 - Savoir-faire Linux | ||
| 5 | * Author: Sebastien Bourdelin <sebastien.bourdelin@savoirfairelinux.com> | ||
| 6 | * | ||
| 7 | * This file is licensed under the terms of the GNU General Public | ||
| 8 | * License version 2. This program is licensed "as is" without any | ||
| 9 | * warranty of any kind, whether express or implied. | ||
| 10 | * | ||
| 11 | * This driver implements a GPIOs bit-banged bus, called the NBUS by Technologic | ||
| 12 | * Systems. It is used to communicate with the peripherals in the FPGA on the | ||
| 13 | * TS-4600 SoM. | ||
| 14 | */ | ||
| 15 | |||
| 16 | #include <linux/bitops.h> | ||
| 17 | #include <linux/gpio/consumer.h> | ||
| 18 | #include <linux/kernel.h> | ||
| 19 | #include <linux/module.h> | ||
| 20 | #include <linux/mutex.h> | ||
| 21 | #include <linux/of_platform.h> | ||
| 22 | #include <linux/platform_device.h> | ||
| 23 | #include <linux/pwm.h> | ||
| 24 | #include <linux/ts-nbus.h> | ||
| 25 | |||
| 26 | #define TS_NBUS_DIRECTION_IN 0 | ||
| 27 | #define TS_NBUS_DIRECTION_OUT 1 | ||
| 28 | #define TS_NBUS_WRITE_ADR 0 | ||
| 29 | #define TS_NBUS_WRITE_VAL 1 | ||
| 30 | |||
| 31 | struct ts_nbus { | ||
| 32 | struct pwm_device *pwm; | ||
| 33 | struct gpio_descs *data; | ||
| 34 | struct gpio_desc *csn; | ||
| 35 | struct gpio_desc *txrx; | ||
| 36 | struct gpio_desc *strobe; | ||
| 37 | struct gpio_desc *ale; | ||
| 38 | struct gpio_desc *rdy; | ||
| 39 | struct mutex lock; | ||
| 40 | }; | ||
| 41 | |||
| 42 | /* | ||
| 43 | * request all gpios required by the bus. | ||
| 44 | */ | ||
| 45 | static int ts_nbus_init_pdata(struct platform_device *pdev, struct ts_nbus | ||
| 46 | *ts_nbus) | ||
| 47 | { | ||
| 48 | ts_nbus->data = devm_gpiod_get_array(&pdev->dev, "ts,data", | ||
| 49 | GPIOD_OUT_HIGH); | ||
| 50 | if (IS_ERR(ts_nbus->data)) { | ||
| 51 | dev_err(&pdev->dev, "failed to retrieve ts,data-gpio from dts\n"); | ||
| 52 | return PTR_ERR(ts_nbus->data); | ||
| 53 | } | ||
| 54 | |||
| 55 | ts_nbus->csn = devm_gpiod_get(&pdev->dev, "ts,csn", GPIOD_OUT_HIGH); | ||
| 56 | if (IS_ERR(ts_nbus->csn)) { | ||
| 57 | dev_err(&pdev->dev, "failed to retrieve ts,csn-gpio from dts\n"); | ||
| 58 | return PTR_ERR(ts_nbus->csn); | ||
| 59 | } | ||
| 60 | |||
| 61 | ts_nbus->txrx = devm_gpiod_get(&pdev->dev, "ts,txrx", GPIOD_OUT_HIGH); | ||
| 62 | if (IS_ERR(ts_nbus->txrx)) { | ||
| 63 | dev_err(&pdev->dev, "failed to retrieve ts,txrx-gpio from dts\n"); | ||
| 64 | return PTR_ERR(ts_nbus->txrx); | ||
| 65 | } | ||
| 66 | |||
| 67 | ts_nbus->strobe = devm_gpiod_get(&pdev->dev, "ts,strobe", GPIOD_OUT_HIGH); | ||
| 68 | if (IS_ERR(ts_nbus->strobe)) { | ||
| 69 | dev_err(&pdev->dev, "failed to retrieve ts,strobe-gpio from dts\n"); | ||
| 70 | return PTR_ERR(ts_nbus->strobe); | ||
| 71 | } | ||
| 72 | |||
| 73 | ts_nbus->ale = devm_gpiod_get(&pdev->dev, "ts,ale", GPIOD_OUT_HIGH); | ||
| 74 | if (IS_ERR(ts_nbus->ale)) { | ||
| 75 | dev_err(&pdev->dev, "failed to retrieve ts,ale-gpio from dts\n"); | ||
| 76 | return PTR_ERR(ts_nbus->ale); | ||
| 77 | } | ||
| 78 | |||
| 79 | ts_nbus->rdy = devm_gpiod_get(&pdev->dev, "ts,rdy", GPIOD_IN); | ||
| 80 | if (IS_ERR(ts_nbus->rdy)) { | ||
| 81 | dev_err(&pdev->dev, "failed to retrieve ts,rdy-gpio from dts\n"); | ||
| 82 | return PTR_ERR(ts_nbus->rdy); | ||
| 83 | } | ||
| 84 | |||
| 85 | return 0; | ||
| 86 | } | ||
| 87 | |||
| 88 | /* | ||
| 89 | * the data gpios are used for reading and writing values, their directions | ||
| 90 | * should be adjusted accordingly. | ||
| 91 | */ | ||
| 92 | static void ts_nbus_set_direction(struct ts_nbus *ts_nbus, int direction) | ||
| 93 | { | ||
| 94 | int i; | ||
| 95 | |||
| 96 | for (i = 0; i < 8; i++) { | ||
| 97 | if (direction == TS_NBUS_DIRECTION_IN) | ||
| 98 | gpiod_direction_input(ts_nbus->data->desc[i]); | ||
| 99 | else | ||
| 100 | /* when used as output the default state of the data | ||
| 101 | * lines are set to high */ | ||
| 102 | gpiod_direction_output(ts_nbus->data->desc[i], 1); | ||
| 103 | } | ||
| 104 | } | ||
| 105 | |||
| 106 | /* | ||
| 107 | * reset the bus in its initial state. | ||
| 108 | * The data, csn, strobe and ale lines must be zero'ed to let the FPGA knows a | ||
| 109 | * new transaction can be process. | ||
| 110 | */ | ||
| 111 | static void ts_nbus_reset_bus(struct ts_nbus *ts_nbus) | ||
| 112 | { | ||
| 113 | int i; | ||
| 114 | int values[8]; | ||
| 115 | |||
| 116 | for (i = 0; i < 8; i++) | ||
| 117 | values[i] = 0; | ||
| 118 | |||
| 119 | gpiod_set_array_value_cansleep(8, ts_nbus->data->desc, values); | ||
| 120 | gpiod_set_value_cansleep(ts_nbus->csn, 0); | ||
| 121 | gpiod_set_value_cansleep(ts_nbus->strobe, 0); | ||
| 122 | gpiod_set_value_cansleep(ts_nbus->ale, 0); | ||
| 123 | } | ||
| 124 | |||
| 125 | /* | ||
| 126 | * let the FPGA knows it can process. | ||
| 127 | */ | ||
| 128 | static void ts_nbus_start_transaction(struct ts_nbus *ts_nbus) | ||
| 129 | { | ||
| 130 | gpiod_set_value_cansleep(ts_nbus->strobe, 1); | ||
| 131 | } | ||
| 132 | |||
| 133 | /* | ||
| 134 | * read a byte value from the data gpios. | ||
| 135 | * return 0 on success or negative errno on failure. | ||
| 136 | */ | ||
| 137 | static int ts_nbus_read_byte(struct ts_nbus *ts_nbus, u8 *val) | ||
| 138 | { | ||
| 139 | struct gpio_descs *gpios = ts_nbus->data; | ||
| 140 | int ret, i; | ||
| 141 | |||
| 142 | *val = 0; | ||
| 143 | for (i = 0; i < 8; i++) { | ||
| 144 | ret = gpiod_get_value_cansleep(gpios->desc[i]); | ||
| 145 | if (ret < 0) | ||
| 146 | return ret; | ||
| 147 | if (ret) | ||
| 148 | *val |= BIT(i); | ||
| 149 | } | ||
| 150 | |||
| 151 | return 0; | ||
| 152 | } | ||
| 153 | |||
| 154 | /* | ||
| 155 | * set the data gpios accordingly to the byte value. | ||
| 156 | */ | ||
| 157 | static void ts_nbus_write_byte(struct ts_nbus *ts_nbus, u8 byte) | ||
| 158 | { | ||
| 159 | struct gpio_descs *gpios = ts_nbus->data; | ||
| 160 | int i; | ||
| 161 | int values[8]; | ||
| 162 | |||
| 163 | for (i = 0; i < 8; i++) | ||
| 164 | if (byte & BIT(i)) | ||
| 165 | values[i] = 1; | ||
| 166 | else | ||
| 167 | values[i] = 0; | ||
| 168 | |||
| 169 | gpiod_set_array_value_cansleep(8, gpios->desc, values); | ||
| 170 | } | ||
| 171 | |||
| 172 | /* | ||
| 173 | * reading the bus consists of resetting the bus, then notifying the FPGA to | ||
| 174 | * send the data in the data gpios and return the read value. | ||
| 175 | * return 0 on success or negative errno on failure. | ||
| 176 | */ | ||
| 177 | static int ts_nbus_read_bus(struct ts_nbus *ts_nbus, u8 *val) | ||
| 178 | { | ||
| 179 | ts_nbus_reset_bus(ts_nbus); | ||
| 180 | ts_nbus_start_transaction(ts_nbus); | ||
| 181 | |||
| 182 | return ts_nbus_read_byte(ts_nbus, val); | ||
| 183 | } | ||
| 184 | |||
| 185 | /* | ||
| 186 | * writing to the bus consists of resetting the bus, then define the type of | ||
| 187 | * command (address/value), write the data and notify the FPGA to retrieve the | ||
| 188 | * value in the data gpios. | ||
| 189 | */ | ||
| 190 | static void ts_nbus_write_bus(struct ts_nbus *ts_nbus, int cmd, u8 val) | ||
| 191 | { | ||
| 192 | ts_nbus_reset_bus(ts_nbus); | ||
| 193 | |||
| 194 | if (cmd == TS_NBUS_WRITE_ADR) | ||
| 195 | gpiod_set_value_cansleep(ts_nbus->ale, 1); | ||
| 196 | |||
| 197 | ts_nbus_write_byte(ts_nbus, val); | ||
| 198 | ts_nbus_start_transaction(ts_nbus); | ||
| 199 | } | ||
| 200 | |||
| 201 | /* | ||
| 202 | * read the value in the FPGA register at the given address. | ||
| 203 | * return 0 on success or negative errno on failure. | ||
| 204 | */ | ||
| 205 | int ts_nbus_read(struct ts_nbus *ts_nbus, u8 adr, u16 *val) | ||
| 206 | { | ||
| 207 | int ret, i; | ||
| 208 | u8 byte; | ||
| 209 | |||
| 210 | /* bus access must be atomic */ | ||
| 211 | mutex_lock(&ts_nbus->lock); | ||
| 212 | |||
| 213 | /* set the bus in read mode */ | ||
| 214 | gpiod_set_value_cansleep(ts_nbus->txrx, 0); | ||
| 215 | |||
| 216 | /* write address */ | ||
| 217 | ts_nbus_write_bus(ts_nbus, TS_NBUS_WRITE_ADR, adr); | ||
| 218 | |||
| 219 | /* set the data gpios direction as input before reading */ | ||
| 220 | ts_nbus_set_direction(ts_nbus, TS_NBUS_DIRECTION_IN); | ||
| 221 | |||
| 222 | /* reading value MSB first */ | ||
| 223 | do { | ||
| 224 | *val = 0; | ||
| 225 | byte = 0; | ||
| 226 | for (i = 1; i >= 0; i--) { | ||
| 227 | /* read a byte from the bus, leave on error */ | ||
| 228 | ret = ts_nbus_read_bus(ts_nbus, &byte); | ||
| 229 | if (ret < 0) | ||
| 230 | goto err; | ||
| 231 | |||
| 232 | /* append the byte read to the final value */ | ||
| 233 | *val |= byte << (i * 8); | ||
| 234 | } | ||
| 235 | gpiod_set_value_cansleep(ts_nbus->csn, 1); | ||
| 236 | ret = gpiod_get_value_cansleep(ts_nbus->rdy); | ||
| 237 | } while (ret); | ||
| 238 | |||
| 239 | err: | ||
| 240 | /* restore the data gpios direction as output after reading */ | ||
| 241 | ts_nbus_set_direction(ts_nbus, TS_NBUS_DIRECTION_OUT); | ||
| 242 | |||
| 243 | mutex_unlock(&ts_nbus->lock); | ||
| 244 | |||
| 245 | return ret; | ||
| 246 | } | ||
| 247 | EXPORT_SYMBOL_GPL(ts_nbus_read); | ||
| 248 | |||
| 249 | /* | ||
| 250 | * write the desired value in the FPGA register at the given address. | ||
| 251 | */ | ||
| 252 | int ts_nbus_write(struct ts_nbus *ts_nbus, u8 adr, u16 val) | ||
| 253 | { | ||
| 254 | int i; | ||
| 255 | |||
| 256 | /* bus access must be atomic */ | ||
| 257 | mutex_lock(&ts_nbus->lock); | ||
| 258 | |||
| 259 | /* set the bus in write mode */ | ||
| 260 | gpiod_set_value_cansleep(ts_nbus->txrx, 1); | ||
| 261 | |||
| 262 | /* write address */ | ||
| 263 | ts_nbus_write_bus(ts_nbus, TS_NBUS_WRITE_ADR, adr); | ||
| 264 | |||
| 265 | /* writing value MSB first */ | ||
| 266 | for (i = 1; i >= 0; i--) | ||
| 267 | ts_nbus_write_bus(ts_nbus, TS_NBUS_WRITE_VAL, (u8)(val >> (i * 8))); | ||
| 268 | |||
| 269 | /* wait for completion */ | ||
| 270 | gpiod_set_value_cansleep(ts_nbus->csn, 1); | ||
| 271 | while (gpiod_get_value_cansleep(ts_nbus->rdy) != 0) { | ||
| 272 | gpiod_set_value_cansleep(ts_nbus->csn, 0); | ||
| 273 | gpiod_set_value_cansleep(ts_nbus->csn, 1); | ||
| 274 | } | ||
| 275 | |||
| 276 | mutex_unlock(&ts_nbus->lock); | ||
| 277 | |||
| 278 | return 0; | ||
| 279 | } | ||
| 280 | EXPORT_SYMBOL_GPL(ts_nbus_write); | ||
| 281 | |||
| 282 | static int ts_nbus_probe(struct platform_device *pdev) | ||
| 283 | { | ||
| 284 | struct pwm_device *pwm; | ||
| 285 | struct pwm_args pargs; | ||
| 286 | struct device *dev = &pdev->dev; | ||
| 287 | struct ts_nbus *ts_nbus; | ||
| 288 | int ret; | ||
| 289 | |||
| 290 | ts_nbus = devm_kzalloc(dev, sizeof(*ts_nbus), GFP_KERNEL); | ||
| 291 | if (!ts_nbus) | ||
| 292 | return -ENOMEM; | ||
| 293 | |||
| 294 | mutex_init(&ts_nbus->lock); | ||
| 295 | |||
| 296 | ret = ts_nbus_init_pdata(pdev, ts_nbus); | ||
| 297 | if (ret < 0) | ||
| 298 | return ret; | ||
| 299 | |||
| 300 | pwm = devm_pwm_get(dev, NULL); | ||
| 301 | if (IS_ERR(pwm)) { | ||
| 302 | ret = PTR_ERR(pwm); | ||
| 303 | if (ret != -EPROBE_DEFER) | ||
| 304 | dev_err(dev, "unable to request PWM\n"); | ||
| 305 | return ret; | ||
| 306 | } | ||
| 307 | |||
| 308 | pwm_get_args(pwm, &pargs); | ||
| 309 | if (!pargs.period) { | ||
| 310 | dev_err(&pdev->dev, "invalid PWM period\n"); | ||
| 311 | return -EINVAL; | ||
| 312 | } | ||
| 313 | |||
| 314 | /* | ||
| 315 | * FIXME: pwm_apply_args() should be removed when switching to | ||
| 316 | * the atomic PWM API. | ||
| 317 | */ | ||
| 318 | pwm_apply_args(pwm); | ||
| 319 | ret = pwm_config(pwm, pargs.period, pargs.period); | ||
| 320 | if (ret < 0) | ||
| 321 | return ret; | ||
| 322 | |||
| 323 | /* | ||
| 324 | * we can now start the FPGA and populate the peripherals. | ||
| 325 | */ | ||
| 326 | pwm_enable(pwm); | ||
| 327 | ts_nbus->pwm = pwm; | ||
| 328 | |||
| 329 | /* | ||
| 330 | * let the child nodes retrieve this instance of the ts-nbus. | ||
| 331 | */ | ||
| 332 | dev_set_drvdata(dev, ts_nbus); | ||
| 333 | |||
| 334 | ret = of_platform_populate(dev->of_node, NULL, NULL, dev); | ||
| 335 | if (ret < 0) | ||
| 336 | return ret; | ||
| 337 | |||
| 338 | dev_info(dev, "initialized\n"); | ||
| 339 | |||
| 340 | return 0; | ||
| 341 | } | ||
| 342 | |||
| 343 | static int ts_nbus_remove(struct platform_device *pdev) | ||
| 344 | { | ||
| 345 | struct ts_nbus *ts_nbus = dev_get_drvdata(&pdev->dev); | ||
| 346 | |||
| 347 | /* shutdown the FPGA */ | ||
| 348 | mutex_lock(&ts_nbus->lock); | ||
| 349 | pwm_disable(ts_nbus->pwm); | ||
| 350 | mutex_unlock(&ts_nbus->lock); | ||
| 351 | |||
| 352 | return 0; | ||
| 353 | } | ||
| 354 | |||
| 355 | static const struct of_device_id ts_nbus_of_match[] = { | ||
| 356 | { .compatible = "technologic,ts-nbus", }, | ||
| 357 | { }, | ||
| 358 | }; | ||
| 359 | MODULE_DEVICE_TABLE(of, ts_nbus_of_match); | ||
| 360 | |||
| 361 | static struct platform_driver ts_nbus_driver = { | ||
| 362 | .probe = ts_nbus_probe, | ||
| 363 | .remove = ts_nbus_remove, | ||
| 364 | .driver = { | ||
| 365 | .name = "ts_nbus", | ||
| 366 | .of_match_table = ts_nbus_of_match, | ||
| 367 | }, | ||
| 368 | }; | ||
| 369 | |||
| 370 | module_platform_driver(ts_nbus_driver); | ||
| 371 | |||
| 372 | MODULE_ALIAS("platform:ts_nbus"); | ||
| 373 | MODULE_AUTHOR("Sebastien Bourdelin <sebastien.bourdelin@savoirfairelinux.com>"); | ||
| 374 | MODULE_DESCRIPTION("Technologic Systems NBUS"); | ||
| 375 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/clk/bcm/Kconfig b/drivers/clk/bcm/Kconfig index 1d9187df167b..4c4bd85f707c 100644 --- a/drivers/clk/bcm/Kconfig +++ b/drivers/clk/bcm/Kconfig | |||
| @@ -30,6 +30,15 @@ config CLK_BCM_CYGNUS | |||
| 30 | help | 30 | help |
| 31 | Enable common clock framework support for the Broadcom Cygnus SoC | 31 | Enable common clock framework support for the Broadcom Cygnus SoC |
| 32 | 32 | ||
| 33 | config CLK_BCM_HR2 | ||
| 34 | bool "Broadcom Hurricane 2 clock support" | ||
| 35 | depends on ARCH_BCM_HR2 || COMPILE_TEST | ||
| 36 | select COMMON_CLK_IPROC | ||
| 37 | default ARCH_BCM_HR2 | ||
| 38 | help | ||
| 39 | Enable common clock framework support for the Broadcom Hurricane 2 | ||
| 40 | SoC | ||
| 41 | |||
| 33 | config CLK_BCM_NSP | 42 | config CLK_BCM_NSP |
| 34 | bool "Broadcom Northstar/Northstar Plus clock support" | 43 | bool "Broadcom Northstar/Northstar Plus clock support" |
| 35 | depends on ARCH_BCM_5301X || ARCH_BCM_NSP || COMPILE_TEST | 44 | depends on ARCH_BCM_5301X || ARCH_BCM_NSP || COMPILE_TEST |
diff --git a/drivers/clk/bcm/Makefile b/drivers/clk/bcm/Makefile index e3f0cb0d90f3..002661d39128 100644 --- a/drivers/clk/bcm/Makefile +++ b/drivers/clk/bcm/Makefile | |||
| @@ -9,6 +9,7 @@ obj-$(CONFIG_ARCH_BCM2835) += clk-bcm2835.o | |||
| 9 | obj-$(CONFIG_ARCH_BCM2835) += clk-bcm2835-aux.o | 9 | obj-$(CONFIG_ARCH_BCM2835) += clk-bcm2835-aux.o |
| 10 | obj-$(CONFIG_ARCH_BCM_53573) += clk-bcm53573-ilp.o | 10 | obj-$(CONFIG_ARCH_BCM_53573) += clk-bcm53573-ilp.o |
| 11 | obj-$(CONFIG_CLK_BCM_CYGNUS) += clk-cygnus.o | 11 | obj-$(CONFIG_CLK_BCM_CYGNUS) += clk-cygnus.o |
| 12 | obj-$(CONFIG_CLK_BCM_HR2) += clk-hr2.o | ||
| 12 | obj-$(CONFIG_CLK_BCM_NSP) += clk-nsp.o | 13 | obj-$(CONFIG_CLK_BCM_NSP) += clk-nsp.o |
| 13 | obj-$(CONFIG_CLK_BCM_NS2) += clk-ns2.o | 14 | obj-$(CONFIG_CLK_BCM_NS2) += clk-ns2.o |
| 14 | obj-$(CONFIG_CLK_BCM_SR) += clk-sr.o | 15 | obj-$(CONFIG_CLK_BCM_SR) += clk-sr.o |
diff --git a/drivers/clk/bcm/clk-hr2.c b/drivers/clk/bcm/clk-hr2.c new file mode 100644 index 000000000000..f7c5b7379475 --- /dev/null +++ b/drivers/clk/bcm/clk-hr2.c | |||
| @@ -0,0 +1,27 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2017 Broadcom | ||
| 3 | * | ||
| 4 | * This program is free software; you can redistribute it and/or | ||
| 5 | * modify it under the terms of the GNU General Public License as | ||
| 6 | * published by the Free Software Foundation version 2. | ||
| 7 | * | ||
| 8 | * This program is distributed "as is" WITHOUT ANY WARRANTY of any | ||
| 9 | * kind, whether express or implied; without even the implied warranty | ||
| 10 | * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 11 | * GNU General Public License for more details. | ||
| 12 | */ | ||
| 13 | |||
| 14 | #include <linux/kernel.h> | ||
| 15 | #include <linux/err.h> | ||
| 16 | #include <linux/clk-provider.h> | ||
| 17 | #include <linux/io.h> | ||
| 18 | #include <linux/of.h> | ||
| 19 | #include <linux/of_address.h> | ||
| 20 | |||
| 21 | #include "clk-iproc.h" | ||
| 22 | |||
| 23 | static void __init hr2_armpll_init(struct device_node *node) | ||
| 24 | { | ||
| 25 | iproc_armpll_setup(node); | ||
| 26 | } | ||
| 27 | CLK_OF_DECLARE(hr2_armpll, "brcm,hr2-armpll", hr2_armpll_init); | ||
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig index 6e4ed5a9c6fd..fa87a055905e 100644 --- a/drivers/firmware/Kconfig +++ b/drivers/firmware/Kconfig | |||
| @@ -215,6 +215,17 @@ config QCOM_SCM_64 | |||
| 215 | def_bool y | 215 | def_bool y |
| 216 | depends on QCOM_SCM && ARM64 | 216 | depends on QCOM_SCM && ARM64 |
| 217 | 217 | ||
| 218 | config QCOM_SCM_DOWNLOAD_MODE_DEFAULT | ||
| 219 | bool "Qualcomm download mode enabled by default" | ||
| 220 | depends on QCOM_SCM | ||
| 221 | help | ||
| 222 | A device with "download mode" enabled will upon an unexpected | ||
| 223 | warm-restart enter a special debug mode that allows the user to | ||
| 224 | "download" memory content over USB for offline postmortem analysis. | ||
| 225 | The feature can be enabled/disabled on the kernel command line. | ||
| 226 | |||
| 227 | Say Y here to enable "download mode" by default. | ||
| 228 | |||
| 218 | config TI_SCI_PROTOCOL | 229 | config TI_SCI_PROTOCOL |
| 219 | tristate "TI System Control Interface (TISCI) Message Protocol" | 230 | tristate "TI System Control Interface (TISCI) Message Protocol" |
| 220 | depends on TI_MESSAGE_MANAGER | 231 | depends on TI_MESSAGE_MANAGER |
diff --git a/drivers/firmware/arm_scpi.c b/drivers/firmware/arm_scpi.c index 7da9f1b83ebe..dfb373c8ba2a 100644 --- a/drivers/firmware/arm_scpi.c +++ b/drivers/firmware/arm_scpi.c | |||
| @@ -28,6 +28,7 @@ | |||
| 28 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | 28 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
| 29 | 29 | ||
| 30 | #include <linux/bitmap.h> | 30 | #include <linux/bitmap.h> |
| 31 | #include <linux/bitfield.h> | ||
| 31 | #include <linux/device.h> | 32 | #include <linux/device.h> |
| 32 | #include <linux/err.h> | 33 | #include <linux/err.h> |
| 33 | #include <linux/export.h> | 34 | #include <linux/export.h> |
| @@ -72,21 +73,13 @@ | |||
| 72 | 73 | ||
| 73 | #define MAX_DVFS_DOMAINS 8 | 74 | #define MAX_DVFS_DOMAINS 8 |
| 74 | #define MAX_DVFS_OPPS 16 | 75 | #define MAX_DVFS_OPPS 16 |
| 75 | #define DVFS_LATENCY(hdr) (le32_to_cpu(hdr) >> 16) | 76 | |
| 76 | #define DVFS_OPP_COUNT(hdr) ((le32_to_cpu(hdr) >> 8) & 0xff) | 77 | #define PROTO_REV_MAJOR_MASK GENMASK(31, 16) |
| 77 | 78 | #define PROTO_REV_MINOR_MASK GENMASK(15, 0) | |
| 78 | #define PROTOCOL_REV_MINOR_BITS 16 | 79 | |
| 79 | #define PROTOCOL_REV_MINOR_MASK ((1U << PROTOCOL_REV_MINOR_BITS) - 1) | 80 | #define FW_REV_MAJOR_MASK GENMASK(31, 24) |
| 80 | #define PROTOCOL_REV_MAJOR(x) ((x) >> PROTOCOL_REV_MINOR_BITS) | 81 | #define FW_REV_MINOR_MASK GENMASK(23, 16) |
| 81 | #define PROTOCOL_REV_MINOR(x) ((x) & PROTOCOL_REV_MINOR_MASK) | 82 | #define FW_REV_PATCH_MASK GENMASK(15, 0) |
| 82 | |||
| 83 | #define FW_REV_MAJOR_BITS 24 | ||
| 84 | #define FW_REV_MINOR_BITS 16 | ||
| 85 | #define FW_REV_PATCH_MASK ((1U << FW_REV_MINOR_BITS) - 1) | ||
| 86 | #define FW_REV_MINOR_MASK ((1U << FW_REV_MAJOR_BITS) - 1) | ||
| 87 | #define FW_REV_MAJOR(x) ((x) >> FW_REV_MAJOR_BITS) | ||
| 88 | #define FW_REV_MINOR(x) (((x) & FW_REV_MINOR_MASK) >> FW_REV_MINOR_BITS) | ||
| 89 | #define FW_REV_PATCH(x) ((x) & FW_REV_PATCH_MASK) | ||
| 90 | 83 | ||
| 91 | #define MAX_RX_TIMEOUT (msecs_to_jiffies(30)) | 84 | #define MAX_RX_TIMEOUT (msecs_to_jiffies(30)) |
| 92 | 85 | ||
| @@ -311,10 +304,6 @@ struct clk_get_info { | |||
| 311 | u8 name[20]; | 304 | u8 name[20]; |
| 312 | } __packed; | 305 | } __packed; |
| 313 | 306 | ||
| 314 | struct clk_get_value { | ||
| 315 | __le32 rate; | ||
| 316 | } __packed; | ||
| 317 | |||
| 318 | struct clk_set_value { | 307 | struct clk_set_value { |
| 319 | __le16 id; | 308 | __le16 id; |
| 320 | __le16 reserved; | 309 | __le16 reserved; |
| @@ -328,7 +317,9 @@ struct legacy_clk_set_value { | |||
| 328 | } __packed; | 317 | } __packed; |
| 329 | 318 | ||
| 330 | struct dvfs_info { | 319 | struct dvfs_info { |
| 331 | __le32 header; | 320 | u8 domain; |
| 321 | u8 opp_count; | ||
| 322 | __le16 latency; | ||
| 332 | struct { | 323 | struct { |
| 333 | __le32 freq; | 324 | __le32 freq; |
| 334 | __le32 m_volt; | 325 | __le32 m_volt; |
| @@ -351,11 +342,6 @@ struct _scpi_sensor_info { | |||
| 351 | char name[20]; | 342 | char name[20]; |
| 352 | }; | 343 | }; |
| 353 | 344 | ||
| 354 | struct sensor_value { | ||
| 355 | __le32 lo_val; | ||
| 356 | __le32 hi_val; | ||
| 357 | } __packed; | ||
| 358 | |||
| 359 | struct dev_pstate_set { | 345 | struct dev_pstate_set { |
| 360 | __le16 dev_id; | 346 | __le16 dev_id; |
| 361 | u8 pstate; | 347 | u8 pstate; |
| @@ -419,19 +405,20 @@ static void scpi_process_cmd(struct scpi_chan *ch, u32 cmd) | |||
| 419 | unsigned int len; | 405 | unsigned int len; |
| 420 | 406 | ||
| 421 | if (scpi_info->is_legacy) { | 407 | if (scpi_info->is_legacy) { |
| 422 | struct legacy_scpi_shared_mem *mem = ch->rx_payload; | 408 | struct legacy_scpi_shared_mem __iomem *mem = |
| 409 | ch->rx_payload; | ||
| 423 | 410 | ||
| 424 | /* RX Length is not replied by the legacy Firmware */ | 411 | /* RX Length is not replied by the legacy Firmware */ |
| 425 | len = match->rx_len; | 412 | len = match->rx_len; |
| 426 | 413 | ||
| 427 | match->status = le32_to_cpu(mem->status); | 414 | match->status = ioread32(&mem->status); |
| 428 | memcpy_fromio(match->rx_buf, mem->payload, len); | 415 | memcpy_fromio(match->rx_buf, mem->payload, len); |
| 429 | } else { | 416 | } else { |
| 430 | struct scpi_shared_mem *mem = ch->rx_payload; | 417 | struct scpi_shared_mem __iomem *mem = ch->rx_payload; |
| 431 | 418 | ||
| 432 | len = min(match->rx_len, CMD_SIZE(cmd)); | 419 | len = min(match->rx_len, CMD_SIZE(cmd)); |
| 433 | 420 | ||
| 434 | match->status = le32_to_cpu(mem->status); | 421 | match->status = ioread32(&mem->status); |
| 435 | memcpy_fromio(match->rx_buf, mem->payload, len); | 422 | memcpy_fromio(match->rx_buf, mem->payload, len); |
| 436 | } | 423 | } |
| 437 | 424 | ||
| @@ -445,11 +432,11 @@ static void scpi_process_cmd(struct scpi_chan *ch, u32 cmd) | |||
| 445 | static void scpi_handle_remote_msg(struct mbox_client *c, void *msg) | 432 | static void scpi_handle_remote_msg(struct mbox_client *c, void *msg) |
| 446 | { | 433 | { |
| 447 | struct scpi_chan *ch = container_of(c, struct scpi_chan, cl); | 434 | struct scpi_chan *ch = container_of(c, struct scpi_chan, cl); |
| 448 | struct scpi_shared_mem *mem = ch->rx_payload; | 435 | struct scpi_shared_mem __iomem *mem = ch->rx_payload; |
| 449 | u32 cmd = 0; | 436 | u32 cmd = 0; |
| 450 | 437 | ||
| 451 | if (!scpi_info->is_legacy) | 438 | if (!scpi_info->is_legacy) |
| 452 | cmd = le32_to_cpu(mem->command); | 439 | cmd = ioread32(&mem->command); |
| 453 | 440 | ||
| 454 | scpi_process_cmd(ch, cmd); | 441 | scpi_process_cmd(ch, cmd); |
| 455 | } | 442 | } |
| @@ -459,7 +446,7 @@ static void scpi_tx_prepare(struct mbox_client *c, void *msg) | |||
| 459 | unsigned long flags; | 446 | unsigned long flags; |
| 460 | struct scpi_xfer *t = msg; | 447 | struct scpi_xfer *t = msg; |
| 461 | struct scpi_chan *ch = container_of(c, struct scpi_chan, cl); | 448 | struct scpi_chan *ch = container_of(c, struct scpi_chan, cl); |
| 462 | struct scpi_shared_mem *mem = (struct scpi_shared_mem *)ch->tx_payload; | 449 | struct scpi_shared_mem __iomem *mem = ch->tx_payload; |
| 463 | 450 | ||
| 464 | if (t->tx_buf) { | 451 | if (t->tx_buf) { |
| 465 | if (scpi_info->is_legacy) | 452 | if (scpi_info->is_legacy) |
| @@ -478,7 +465,7 @@ static void scpi_tx_prepare(struct mbox_client *c, void *msg) | |||
| 478 | } | 465 | } |
| 479 | 466 | ||
| 480 | if (!scpi_info->is_legacy) | 467 | if (!scpi_info->is_legacy) |
| 481 | mem->command = cpu_to_le32(t->cmd); | 468 | iowrite32(t->cmd, &mem->command); |
| 482 | } | 469 | } |
| 483 | 470 | ||
| 484 | static struct scpi_xfer *get_scpi_xfer(struct scpi_chan *ch) | 471 | static struct scpi_xfer *get_scpi_xfer(struct scpi_chan *ch) |
| @@ -583,13 +570,13 @@ scpi_clk_get_range(u16 clk_id, unsigned long *min, unsigned long *max) | |||
| 583 | static unsigned long scpi_clk_get_val(u16 clk_id) | 570 | static unsigned long scpi_clk_get_val(u16 clk_id) |
| 584 | { | 571 | { |
| 585 | int ret; | 572 | int ret; |
| 586 | struct clk_get_value clk; | 573 | __le32 rate; |
| 587 | __le16 le_clk_id = cpu_to_le16(clk_id); | 574 | __le16 le_clk_id = cpu_to_le16(clk_id); |
| 588 | 575 | ||
| 589 | ret = scpi_send_message(CMD_GET_CLOCK_VALUE, &le_clk_id, | 576 | ret = scpi_send_message(CMD_GET_CLOCK_VALUE, &le_clk_id, |
| 590 | sizeof(le_clk_id), &clk, sizeof(clk)); | 577 | sizeof(le_clk_id), &rate, sizeof(rate)); |
| 591 | 578 | ||
| 592 | return ret ? ret : le32_to_cpu(clk.rate); | 579 | return ret ? ret : le32_to_cpu(rate); |
| 593 | } | 580 | } |
| 594 | 581 | ||
| 595 | static int scpi_clk_set_val(u16 clk_id, unsigned long rate) | 582 | static int scpi_clk_set_val(u16 clk_id, unsigned long rate) |
| @@ -645,34 +632,34 @@ static int opp_cmp_func(const void *opp1, const void *opp2) | |||
| 645 | 632 | ||
| 646 | static struct scpi_dvfs_info *scpi_dvfs_get_info(u8 domain) | 633 | static struct scpi_dvfs_info *scpi_dvfs_get_info(u8 domain) |
| 647 | { | 634 | { |
| 635 | if (domain >= MAX_DVFS_DOMAINS) | ||
| 636 | return ERR_PTR(-EINVAL); | ||
| 637 | |||
| 638 | return scpi_info->dvfs[domain] ?: ERR_PTR(-EINVAL); | ||
| 639 | } | ||
| 640 | |||
| 641 | static int scpi_dvfs_populate_info(struct device *dev, u8 domain) | ||
| 642 | { | ||
| 648 | struct scpi_dvfs_info *info; | 643 | struct scpi_dvfs_info *info; |
| 649 | struct scpi_opp *opp; | 644 | struct scpi_opp *opp; |
| 650 | struct dvfs_info buf; | 645 | struct dvfs_info buf; |
| 651 | int ret, i; | 646 | int ret, i; |
| 652 | 647 | ||
| 653 | if (domain >= MAX_DVFS_DOMAINS) | ||
| 654 | return ERR_PTR(-EINVAL); | ||
| 655 | |||
| 656 | if (scpi_info->dvfs[domain]) /* data already populated */ | ||
| 657 | return scpi_info->dvfs[domain]; | ||
| 658 | |||
| 659 | ret = scpi_send_message(CMD_GET_DVFS_INFO, &domain, sizeof(domain), | 648 | ret = scpi_send_message(CMD_GET_DVFS_INFO, &domain, sizeof(domain), |
| 660 | &buf, sizeof(buf)); | 649 | &buf, sizeof(buf)); |
| 661 | if (ret) | 650 | if (ret) |
| 662 | return ERR_PTR(ret); | 651 | return ret; |
| 663 | 652 | ||
| 664 | info = kmalloc(sizeof(*info), GFP_KERNEL); | 653 | info = devm_kmalloc(dev, sizeof(*info), GFP_KERNEL); |
| 665 | if (!info) | 654 | if (!info) |
| 666 | return ERR_PTR(-ENOMEM); | 655 | return -ENOMEM; |
| 667 | 656 | ||
| 668 | info->count = DVFS_OPP_COUNT(buf.header); | 657 | info->count = buf.opp_count; |
| 669 | info->latency = DVFS_LATENCY(buf.header) * 1000; /* uS to nS */ | 658 | info->latency = le16_to_cpu(buf.latency) * 1000; /* uS to nS */ |
| 670 | 659 | ||
| 671 | info->opps = kcalloc(info->count, sizeof(*opp), GFP_KERNEL); | 660 | info->opps = devm_kcalloc(dev, info->count, sizeof(*opp), GFP_KERNEL); |
| 672 | if (!info->opps) { | 661 | if (!info->opps) |
| 673 | kfree(info); | 662 | return -ENOMEM; |
| 674 | return ERR_PTR(-ENOMEM); | ||
| 675 | } | ||
| 676 | 663 | ||
| 677 | for (i = 0, opp = info->opps; i < info->count; i++, opp++) { | 664 | for (i = 0, opp = info->opps; i < info->count; i++, opp++) { |
| 678 | opp->freq = le32_to_cpu(buf.opps[i].freq); | 665 | opp->freq = le32_to_cpu(buf.opps[i].freq); |
| @@ -682,7 +669,15 @@ static struct scpi_dvfs_info *scpi_dvfs_get_info(u8 domain) | |||
| 682 | sort(info->opps, info->count, sizeof(*opp), opp_cmp_func, NULL); | 669 | sort(info->opps, info->count, sizeof(*opp), opp_cmp_func, NULL); |
| 683 | 670 | ||
| 684 | scpi_info->dvfs[domain] = info; | 671 | scpi_info->dvfs[domain] = info; |
| 685 | return info; | 672 | return 0; |
| 673 | } | ||
| 674 | |||
| 675 | static void scpi_dvfs_populate(struct device *dev) | ||
| 676 | { | ||
| 677 | int domain; | ||
| 678 | |||
| 679 | for (domain = 0; domain < MAX_DVFS_DOMAINS; domain++) | ||
| 680 | scpi_dvfs_populate_info(dev, domain); | ||
| 686 | } | 681 | } |
| 687 | 682 | ||
| 688 | static int scpi_dev_domain_id(struct device *dev) | 683 | static int scpi_dev_domain_id(struct device *dev) |
| @@ -713,9 +708,6 @@ static int scpi_dvfs_get_transition_latency(struct device *dev) | |||
| 713 | if (IS_ERR(info)) | 708 | if (IS_ERR(info)) |
| 714 | return PTR_ERR(info); | 709 | return PTR_ERR(info); |
| 715 | 710 | ||
| 716 | if (!info->latency) | ||
| 717 | return 0; | ||
| 718 | |||
| 719 | return info->latency; | 711 | return info->latency; |
| 720 | } | 712 | } |
| 721 | 713 | ||
| @@ -776,20 +768,19 @@ static int scpi_sensor_get_info(u16 sensor_id, struct scpi_sensor_info *info) | |||
| 776 | static int scpi_sensor_get_value(u16 sensor, u64 *val) | 768 | static int scpi_sensor_get_value(u16 sensor, u64 *val) |
| 777 | { | 769 | { |
| 778 | __le16 id = cpu_to_le16(sensor); | 770 | __le16 id = cpu_to_le16(sensor); |
| 779 | struct sensor_value buf; | 771 | __le64 value; |
| 780 | int ret; | 772 | int ret; |
| 781 | 773 | ||
| 782 | ret = scpi_send_message(CMD_SENSOR_VALUE, &id, sizeof(id), | 774 | ret = scpi_send_message(CMD_SENSOR_VALUE, &id, sizeof(id), |
| 783 | &buf, sizeof(buf)); | 775 | &value, sizeof(value)); |
| 784 | if (ret) | 776 | if (ret) |
| 785 | return ret; | 777 | return ret; |
| 786 | 778 | ||
| 787 | if (scpi_info->is_legacy) | 779 | if (scpi_info->is_legacy) |
| 788 | /* only 32-bits supported, hi_val can be junk */ | 780 | /* only 32-bits supported, upper 32 bits can be junk */ |
| 789 | *val = le32_to_cpu(buf.lo_val); | 781 | *val = le32_to_cpup((__le32 *)&value); |
| 790 | else | 782 | else |
| 791 | *val = (u64)le32_to_cpu(buf.hi_val) << 32 | | 783 | *val = le64_to_cpu(value); |
| 792 | le32_to_cpu(buf.lo_val); | ||
| 793 | 784 | ||
| 794 | return 0; | 785 | return 0; |
| 795 | } | 786 | } |
| @@ -862,23 +853,19 @@ static int scpi_init_versions(struct scpi_drvinfo *info) | |||
| 862 | static ssize_t protocol_version_show(struct device *dev, | 853 | static ssize_t protocol_version_show(struct device *dev, |
| 863 | struct device_attribute *attr, char *buf) | 854 | struct device_attribute *attr, char *buf) |
| 864 | { | 855 | { |
| 865 | struct scpi_drvinfo *scpi_info = dev_get_drvdata(dev); | 856 | return sprintf(buf, "%lu.%lu\n", |
| 866 | 857 | FIELD_GET(PROTO_REV_MAJOR_MASK, scpi_info->protocol_version), | |
| 867 | return sprintf(buf, "%d.%d\n", | 858 | FIELD_GET(PROTO_REV_MINOR_MASK, scpi_info->protocol_version)); |
| 868 | PROTOCOL_REV_MAJOR(scpi_info->protocol_version), | ||
| 869 | PROTOCOL_REV_MINOR(scpi_info->protocol_version)); | ||
| 870 | } | 859 | } |
| 871 | static DEVICE_ATTR_RO(protocol_version); | 860 | static DEVICE_ATTR_RO(protocol_version); |
| 872 | 861 | ||
| 873 | static ssize_t firmware_version_show(struct device *dev, | 862 | static ssize_t firmware_version_show(struct device *dev, |
| 874 | struct device_attribute *attr, char *buf) | 863 | struct device_attribute *attr, char *buf) |
| 875 | { | 864 | { |
| 876 | struct scpi_drvinfo *scpi_info = dev_get_drvdata(dev); | 865 | return sprintf(buf, "%lu.%lu.%lu\n", |
| 877 | 866 | FIELD_GET(FW_REV_MAJOR_MASK, scpi_info->firmware_version), | |
| 878 | return sprintf(buf, "%d.%d.%d\n", | 867 | FIELD_GET(FW_REV_MINOR_MASK, scpi_info->firmware_version), |
| 879 | FW_REV_MAJOR(scpi_info->firmware_version), | 868 | FIELD_GET(FW_REV_PATCH_MASK, scpi_info->firmware_version)); |
| 880 | FW_REV_MINOR(scpi_info->firmware_version), | ||
| 881 | FW_REV_PATCH(scpi_info->firmware_version)); | ||
| 882 | } | 869 | } |
| 883 | static DEVICE_ATTR_RO(firmware_version); | 870 | static DEVICE_ATTR_RO(firmware_version); |
| 884 | 871 | ||
| @@ -889,39 +876,13 @@ static struct attribute *versions_attrs[] = { | |||
| 889 | }; | 876 | }; |
| 890 | ATTRIBUTE_GROUPS(versions); | 877 | ATTRIBUTE_GROUPS(versions); |
| 891 | 878 | ||
| 892 | static void | 879 | static void scpi_free_channels(void *data) |
| 893 | scpi_free_channels(struct device *dev, struct scpi_chan *pchan, int count) | ||
| 894 | { | 880 | { |
| 881 | struct scpi_drvinfo *info = data; | ||
| 895 | int i; | 882 | int i; |
| 896 | 883 | ||
| 897 | for (i = 0; i < count && pchan->chan; i++, pchan++) { | 884 | for (i = 0; i < info->num_chans; i++) |
| 898 | mbox_free_channel(pchan->chan); | 885 | mbox_free_channel(info->channels[i].chan); |
| 899 | devm_kfree(dev, pchan->xfers); | ||
| 900 | devm_iounmap(dev, pchan->rx_payload); | ||
| 901 | } | ||
| 902 | } | ||
| 903 | |||
| 904 | static int scpi_remove(struct platform_device *pdev) | ||
| 905 | { | ||
| 906 | int i; | ||
| 907 | struct device *dev = &pdev->dev; | ||
| 908 | struct scpi_drvinfo *info = platform_get_drvdata(pdev); | ||
| 909 | |||
| 910 | scpi_info = NULL; /* stop exporting SCPI ops through get_scpi_ops */ | ||
| 911 | |||
| 912 | of_platform_depopulate(dev); | ||
| 913 | sysfs_remove_groups(&dev->kobj, versions_groups); | ||
| 914 | scpi_free_channels(dev, info->channels, info->num_chans); | ||
| 915 | platform_set_drvdata(pdev, NULL); | ||
| 916 | |||
| 917 | for (i = 0; i < MAX_DVFS_DOMAINS && info->dvfs[i]; i++) { | ||
| 918 | kfree(info->dvfs[i]->opps); | ||
| 919 | kfree(info->dvfs[i]); | ||
| 920 | } | ||
| 921 | devm_kfree(dev, info->channels); | ||
| 922 | devm_kfree(dev, info); | ||
| 923 | |||
| 924 | return 0; | ||
| 925 | } | 886 | } |
| 926 | 887 | ||
| 927 | #define MAX_SCPI_XFERS 10 | 888 | #define MAX_SCPI_XFERS 10 |
| @@ -952,7 +913,6 @@ static int scpi_probe(struct platform_device *pdev) | |||
| 952 | { | 913 | { |
| 953 | int count, idx, ret; | 914 | int count, idx, ret; |
| 954 | struct resource res; | 915 | struct resource res; |
| 955 | struct scpi_chan *scpi_chan; | ||
| 956 | struct device *dev = &pdev->dev; | 916 | struct device *dev = &pdev->dev; |
| 957 | struct device_node *np = dev->of_node; | 917 | struct device_node *np = dev->of_node; |
| 958 | 918 | ||
| @@ -969,13 +929,19 @@ static int scpi_probe(struct platform_device *pdev) | |||
| 969 | return -ENODEV; | 929 | return -ENODEV; |
| 970 | } | 930 | } |
| 971 | 931 | ||
| 972 | scpi_chan = devm_kcalloc(dev, count, sizeof(*scpi_chan), GFP_KERNEL); | 932 | scpi_info->channels = devm_kcalloc(dev, count, sizeof(struct scpi_chan), |
| 973 | if (!scpi_chan) | 933 | GFP_KERNEL); |
| 934 | if (!scpi_info->channels) | ||
| 974 | return -ENOMEM; | 935 | return -ENOMEM; |
| 975 | 936 | ||
| 976 | for (idx = 0; idx < count; idx++) { | 937 | ret = devm_add_action(dev, scpi_free_channels, scpi_info); |
| 938 | if (ret) | ||
| 939 | return ret; | ||
| 940 | |||
| 941 | for (; scpi_info->num_chans < count; scpi_info->num_chans++) { | ||
| 977 | resource_size_t size; | 942 | resource_size_t size; |
| 978 | struct scpi_chan *pchan = scpi_chan + idx; | 943 | int idx = scpi_info->num_chans; |
| 944 | struct scpi_chan *pchan = scpi_info->channels + idx; | ||
| 979 | struct mbox_client *cl = &pchan->cl; | 945 | struct mbox_client *cl = &pchan->cl; |
| 980 | struct device_node *shmem = of_parse_phandle(np, "shmem", idx); | 946 | struct device_node *shmem = of_parse_phandle(np, "shmem", idx); |
| 981 | 947 | ||
| @@ -983,15 +949,14 @@ static int scpi_probe(struct platform_device *pdev) | |||
| 983 | of_node_put(shmem); | 949 | of_node_put(shmem); |
| 984 | if (ret) { | 950 | if (ret) { |
| 985 | dev_err(dev, "failed to get SCPI payload mem resource\n"); | 951 | dev_err(dev, "failed to get SCPI payload mem resource\n"); |
| 986 | goto err; | 952 | return ret; |
| 987 | } | 953 | } |
| 988 | 954 | ||
| 989 | size = resource_size(&res); | 955 | size = resource_size(&res); |
| 990 | pchan->rx_payload = devm_ioremap(dev, res.start, size); | 956 | pchan->rx_payload = devm_ioremap(dev, res.start, size); |
| 991 | if (!pchan->rx_payload) { | 957 | if (!pchan->rx_payload) { |
| 992 | dev_err(dev, "failed to ioremap SCPI payload\n"); | 958 | dev_err(dev, "failed to ioremap SCPI payload\n"); |
| 993 | ret = -EADDRNOTAVAIL; | 959 | return -EADDRNOTAVAIL; |
| 994 | goto err; | ||
| 995 | } | 960 | } |
| 996 | pchan->tx_payload = pchan->rx_payload + (size >> 1); | 961 | pchan->tx_payload = pchan->rx_payload + (size >> 1); |
| 997 | 962 | ||
| @@ -1017,17 +982,11 @@ static int scpi_probe(struct platform_device *pdev) | |||
| 1017 | dev_err(dev, "failed to get channel%d err %d\n", | 982 | dev_err(dev, "failed to get channel%d err %d\n", |
| 1018 | idx, ret); | 983 | idx, ret); |
| 1019 | } | 984 | } |
| 1020 | err: | ||
| 1021 | scpi_free_channels(dev, scpi_chan, idx); | ||
| 1022 | scpi_info = NULL; | ||
| 1023 | return ret; | 985 | return ret; |
| 1024 | } | 986 | } |
| 1025 | 987 | ||
| 1026 | scpi_info->channels = scpi_chan; | ||
| 1027 | scpi_info->num_chans = count; | ||
| 1028 | scpi_info->commands = scpi_std_commands; | 988 | scpi_info->commands = scpi_std_commands; |
| 1029 | 989 | scpi_info->scpi_ops = &scpi_ops; | |
| 1030 | platform_set_drvdata(pdev, scpi_info); | ||
| 1031 | 990 | ||
| 1032 | if (scpi_info->is_legacy) { | 991 | if (scpi_info->is_legacy) { |
| 1033 | /* Replace with legacy variants */ | 992 | /* Replace with legacy variants */ |
| @@ -1043,23 +1002,23 @@ err: | |||
| 1043 | ret = scpi_init_versions(scpi_info); | 1002 | ret = scpi_init_versions(scpi_info); |
| 1044 | if (ret) { | 1003 | if (ret) { |
| 1045 | dev_err(dev, "incorrect or no SCP firmware found\n"); | 1004 | dev_err(dev, "incorrect or no SCP firmware found\n"); |
| 1046 | scpi_remove(pdev); | ||
| 1047 | return ret; | 1005 | return ret; |
| 1048 | } | 1006 | } |
| 1049 | 1007 | ||
| 1050 | _dev_info(dev, "SCP Protocol %d.%d Firmware %d.%d.%d version\n", | 1008 | scpi_dvfs_populate(dev); |
| 1051 | PROTOCOL_REV_MAJOR(scpi_info->protocol_version), | 1009 | |
| 1052 | PROTOCOL_REV_MINOR(scpi_info->protocol_version), | 1010 | _dev_info(dev, "SCP Protocol %lu.%lu Firmware %lu.%lu.%lu version\n", |
| 1053 | FW_REV_MAJOR(scpi_info->firmware_version), | 1011 | FIELD_GET(PROTO_REV_MAJOR_MASK, scpi_info->protocol_version), |
| 1054 | FW_REV_MINOR(scpi_info->firmware_version), | 1012 | FIELD_GET(PROTO_REV_MINOR_MASK, scpi_info->protocol_version), |
| 1055 | FW_REV_PATCH(scpi_info->firmware_version)); | 1013 | FIELD_GET(FW_REV_MAJOR_MASK, scpi_info->firmware_version), |
| 1056 | scpi_info->scpi_ops = &scpi_ops; | 1014 | FIELD_GET(FW_REV_MINOR_MASK, scpi_info->firmware_version), |
| 1015 | FIELD_GET(FW_REV_PATCH_MASK, scpi_info->firmware_version)); | ||
| 1057 | 1016 | ||
| 1058 | ret = sysfs_create_groups(&dev->kobj, versions_groups); | 1017 | ret = devm_device_add_groups(dev, versions_groups); |
| 1059 | if (ret) | 1018 | if (ret) |
| 1060 | dev_err(dev, "unable to create sysfs version group\n"); | 1019 | dev_err(dev, "unable to create sysfs version group\n"); |
| 1061 | 1020 | ||
| 1062 | return of_platform_populate(dev->of_node, NULL, NULL, dev); | 1021 | return devm_of_platform_populate(dev); |
| 1063 | } | 1022 | } |
| 1064 | 1023 | ||
| 1065 | static const struct of_device_id scpi_of_match[] = { | 1024 | static const struct of_device_id scpi_of_match[] = { |
| @@ -1076,7 +1035,6 @@ static struct platform_driver scpi_driver = { | |||
| 1076 | .of_match_table = scpi_of_match, | 1035 | .of_match_table = scpi_of_match, |
| 1077 | }, | 1036 | }, |
| 1078 | .probe = scpi_probe, | 1037 | .probe = scpi_probe, |
| 1079 | .remove = scpi_remove, | ||
| 1080 | }; | 1038 | }; |
| 1081 | module_platform_driver(scpi_driver); | 1039 | module_platform_driver(scpi_driver); |
| 1082 | 1040 | ||
diff --git a/drivers/firmware/psci_checker.c b/drivers/firmware/psci_checker.c index 6523ce962865..56cf825ed779 100644 --- a/drivers/firmware/psci_checker.c +++ b/drivers/firmware/psci_checker.c | |||
| @@ -340,6 +340,7 @@ static int suspend_test_thread(void *arg) | |||
| 340 | * later. | 340 | * later. |
| 341 | */ | 341 | */ |
| 342 | del_timer(&wakeup_timer); | 342 | del_timer(&wakeup_timer); |
| 343 | destroy_timer_on_stack(&wakeup_timer); | ||
| 343 | 344 | ||
| 344 | if (atomic_dec_return_relaxed(&nb_active_threads) == 0) | 345 | if (atomic_dec_return_relaxed(&nb_active_threads) == 0) |
| 345 | complete(&suspend_threads_done); | 346 | complete(&suspend_threads_done); |
diff --git a/drivers/firmware/qcom_scm-32.c b/drivers/firmware/qcom_scm-32.c index 93e3b96b6dfa..68b2033bc30e 100644 --- a/drivers/firmware/qcom_scm-32.c +++ b/drivers/firmware/qcom_scm-32.c | |||
| @@ -561,6 +561,12 @@ int __qcom_scm_pas_mss_reset(struct device *dev, bool reset) | |||
| 561 | return ret ? : le32_to_cpu(out); | 561 | return ret ? : le32_to_cpu(out); |
| 562 | } | 562 | } |
| 563 | 563 | ||
| 564 | int __qcom_scm_set_dload_mode(struct device *dev, bool enable) | ||
| 565 | { | ||
| 566 | return qcom_scm_call_atomic2(QCOM_SCM_SVC_BOOT, QCOM_SCM_SET_DLOAD_MODE, | ||
| 567 | enable ? QCOM_SCM_SET_DLOAD_MODE : 0, 0); | ||
| 568 | } | ||
| 569 | |||
| 564 | int __qcom_scm_set_remote_state(struct device *dev, u32 state, u32 id) | 570 | int __qcom_scm_set_remote_state(struct device *dev, u32 state, u32 id) |
| 565 | { | 571 | { |
| 566 | struct { | 572 | struct { |
| @@ -596,3 +602,21 @@ int __qcom_scm_iommu_secure_ptbl_init(struct device *dev, u64 addr, u32 size, | |||
| 596 | { | 602 | { |
| 597 | return -ENODEV; | 603 | return -ENODEV; |
| 598 | } | 604 | } |
| 605 | |||
| 606 | int __qcom_scm_io_readl(struct device *dev, phys_addr_t addr, | ||
| 607 | unsigned int *val) | ||
| 608 | { | ||
| 609 | int ret; | ||
| 610 | |||
| 611 | ret = qcom_scm_call_atomic1(QCOM_SCM_SVC_IO, QCOM_SCM_IO_READ, addr); | ||
| 612 | if (ret >= 0) | ||
| 613 | *val = ret; | ||
| 614 | |||
| 615 | return ret < 0 ? ret : 0; | ||
| 616 | } | ||
| 617 | |||
| 618 | int __qcom_scm_io_writel(struct device *dev, phys_addr_t addr, unsigned int val) | ||
| 619 | { | ||
| 620 | return qcom_scm_call_atomic2(QCOM_SCM_SVC_IO, QCOM_SCM_IO_WRITE, | ||
| 621 | addr, val); | ||
| 622 | } | ||
diff --git a/drivers/firmware/qcom_scm-64.c b/drivers/firmware/qcom_scm-64.c index 6e6d561708e2..3fea6f563ca9 100644 --- a/drivers/firmware/qcom_scm-64.c +++ b/drivers/firmware/qcom_scm-64.c | |||
| @@ -439,3 +439,47 @@ int __qcom_scm_iommu_secure_ptbl_init(struct device *dev, u64 addr, u32 size, | |||
| 439 | 439 | ||
| 440 | return ret; | 440 | return ret; |
| 441 | } | 441 | } |
| 442 | |||
| 443 | int __qcom_scm_set_dload_mode(struct device *dev, bool enable) | ||
| 444 | { | ||
| 445 | struct qcom_scm_desc desc = {0}; | ||
| 446 | struct arm_smccc_res res; | ||
| 447 | |||
| 448 | desc.args[0] = QCOM_SCM_SET_DLOAD_MODE; | ||
| 449 | desc.args[1] = enable ? QCOM_SCM_SET_DLOAD_MODE : 0; | ||
| 450 | desc.arginfo = QCOM_SCM_ARGS(2); | ||
| 451 | |||
| 452 | return qcom_scm_call(dev, QCOM_SCM_SVC_BOOT, QCOM_SCM_SET_DLOAD_MODE, | ||
| 453 | &desc, &res); | ||
| 454 | } | ||
| 455 | |||
| 456 | int __qcom_scm_io_readl(struct device *dev, phys_addr_t addr, | ||
| 457 | unsigned int *val) | ||
| 458 | { | ||
| 459 | struct qcom_scm_desc desc = {0}; | ||
| 460 | struct arm_smccc_res res; | ||
| 461 | int ret; | ||
| 462 | |||
| 463 | desc.args[0] = addr; | ||
| 464 | desc.arginfo = QCOM_SCM_ARGS(1); | ||
| 465 | |||
| 466 | ret = qcom_scm_call(dev, QCOM_SCM_SVC_IO, QCOM_SCM_IO_READ, | ||
| 467 | &desc, &res); | ||
| 468 | if (ret >= 0) | ||
| 469 | *val = res.a1; | ||
| 470 | |||
| 471 | return ret < 0 ? ret : 0; | ||
| 472 | } | ||
| 473 | |||
| 474 | int __qcom_scm_io_writel(struct device *dev, phys_addr_t addr, unsigned int val) | ||
| 475 | { | ||
| 476 | struct qcom_scm_desc desc = {0}; | ||
| 477 | struct arm_smccc_res res; | ||
| 478 | |||
| 479 | desc.args[0] = addr; | ||
| 480 | desc.args[1] = val; | ||
| 481 | desc.arginfo = QCOM_SCM_ARGS(2); | ||
| 482 | |||
| 483 | return qcom_scm_call(dev, QCOM_SCM_SVC_IO, QCOM_SCM_IO_WRITE, | ||
| 484 | &desc, &res); | ||
| 485 | } | ||
diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c index bb16510d75ba..9064e559a01f 100644 --- a/drivers/firmware/qcom_scm.c +++ b/drivers/firmware/qcom_scm.c | |||
| @@ -19,15 +19,20 @@ | |||
| 19 | #include <linux/cpumask.h> | 19 | #include <linux/cpumask.h> |
| 20 | #include <linux/export.h> | 20 | #include <linux/export.h> |
| 21 | #include <linux/dma-mapping.h> | 21 | #include <linux/dma-mapping.h> |
| 22 | #include <linux/module.h> | ||
| 22 | #include <linux/types.h> | 23 | #include <linux/types.h> |
| 23 | #include <linux/qcom_scm.h> | 24 | #include <linux/qcom_scm.h> |
| 24 | #include <linux/of.h> | 25 | #include <linux/of.h> |
| 26 | #include <linux/of_address.h> | ||
| 25 | #include <linux/of_platform.h> | 27 | #include <linux/of_platform.h> |
| 26 | #include <linux/clk.h> | 28 | #include <linux/clk.h> |
| 27 | #include <linux/reset-controller.h> | 29 | #include <linux/reset-controller.h> |
| 28 | 30 | ||
| 29 | #include "qcom_scm.h" | 31 | #include "qcom_scm.h" |
| 30 | 32 | ||
| 33 | static bool download_mode = IS_ENABLED(CONFIG_QCOM_SCM_DOWNLOAD_MODE_DEFAULT); | ||
| 34 | module_param(download_mode, bool, 0); | ||
| 35 | |||
| 31 | #define SCM_HAS_CORE_CLK BIT(0) | 36 | #define SCM_HAS_CORE_CLK BIT(0) |
| 32 | #define SCM_HAS_IFACE_CLK BIT(1) | 37 | #define SCM_HAS_IFACE_CLK BIT(1) |
| 33 | #define SCM_HAS_BUS_CLK BIT(2) | 38 | #define SCM_HAS_BUS_CLK BIT(2) |
| @@ -38,6 +43,8 @@ struct qcom_scm { | |||
| 38 | struct clk *iface_clk; | 43 | struct clk *iface_clk; |
| 39 | struct clk *bus_clk; | 44 | struct clk *bus_clk; |
| 40 | struct reset_controller_dev reset; | 45 | struct reset_controller_dev reset; |
| 46 | |||
| 47 | u64 dload_mode_addr; | ||
| 41 | }; | 48 | }; |
| 42 | 49 | ||
| 43 | static struct qcom_scm *__scm; | 50 | static struct qcom_scm *__scm; |
| @@ -333,6 +340,66 @@ int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare) | |||
| 333 | } | 340 | } |
| 334 | EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_init); | 341 | EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_init); |
| 335 | 342 | ||
| 343 | int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val) | ||
| 344 | { | ||
| 345 | return __qcom_scm_io_readl(__scm->dev, addr, val); | ||
| 346 | } | ||
| 347 | EXPORT_SYMBOL(qcom_scm_io_readl); | ||
| 348 | |||
| 349 | int qcom_scm_io_writel(phys_addr_t addr, unsigned int val) | ||
| 350 | { | ||
| 351 | return __qcom_scm_io_writel(__scm->dev, addr, val); | ||
| 352 | } | ||
| 353 | EXPORT_SYMBOL(qcom_scm_io_writel); | ||
| 354 | |||
| 355 | static void qcom_scm_set_download_mode(bool enable) | ||
| 356 | { | ||
| 357 | bool avail; | ||
| 358 | int ret = 0; | ||
| 359 | |||
| 360 | avail = __qcom_scm_is_call_available(__scm->dev, | ||
| 361 | QCOM_SCM_SVC_BOOT, | ||
| 362 | QCOM_SCM_SET_DLOAD_MODE); | ||
| 363 | if (avail) { | ||
| 364 | ret = __qcom_scm_set_dload_mode(__scm->dev, enable); | ||
| 365 | } else if (__scm->dload_mode_addr) { | ||
| 366 | ret = __qcom_scm_io_writel(__scm->dev, __scm->dload_mode_addr, | ||
| 367 | enable ? QCOM_SCM_SET_DLOAD_MODE : 0); | ||
| 368 | } else { | ||
| 369 | dev_err(__scm->dev, | ||
| 370 | "No available mechanism for setting download mode\n"); | ||
| 371 | } | ||
| 372 | |||
| 373 | if (ret) | ||
| 374 | dev_err(__scm->dev, "failed to set download mode: %d\n", ret); | ||
| 375 | } | ||
| 376 | |||
| 377 | static int qcom_scm_find_dload_address(struct device *dev, u64 *addr) | ||
| 378 | { | ||
| 379 | struct device_node *tcsr; | ||
| 380 | struct device_node *np = dev->of_node; | ||
| 381 | struct resource res; | ||
| 382 | u32 offset; | ||
| 383 | int ret; | ||
| 384 | |||
| 385 | tcsr = of_parse_phandle(np, "qcom,dload-mode", 0); | ||
| 386 | if (!tcsr) | ||
| 387 | return 0; | ||
| 388 | |||
| 389 | ret = of_address_to_resource(tcsr, 0, &res); | ||
| 390 | of_node_put(tcsr); | ||
| 391 | if (ret) | ||
| 392 | return ret; | ||
| 393 | |||
| 394 | ret = of_property_read_u32_index(np, "qcom,dload-mode", 1, &offset); | ||
| 395 | if (ret < 0) | ||
| 396 | return ret; | ||
| 397 | |||
| 398 | *addr = res.start + offset; | ||
| 399 | |||
| 400 | return 0; | ||
| 401 | } | ||
| 402 | |||
| 336 | /** | 403 | /** |
| 337 | * qcom_scm_is_available() - Checks if SCM is available | 404 | * qcom_scm_is_available() - Checks if SCM is available |
| 338 | */ | 405 | */ |
| @@ -358,6 +425,10 @@ static int qcom_scm_probe(struct platform_device *pdev) | |||
| 358 | if (!scm) | 425 | if (!scm) |
| 359 | return -ENOMEM; | 426 | return -ENOMEM; |
| 360 | 427 | ||
| 428 | ret = qcom_scm_find_dload_address(&pdev->dev, &scm->dload_mode_addr); | ||
| 429 | if (ret < 0) | ||
| 430 | return ret; | ||
| 431 | |||
| 361 | clks = (unsigned long)of_device_get_match_data(&pdev->dev); | 432 | clks = (unsigned long)of_device_get_match_data(&pdev->dev); |
| 362 | if (clks & SCM_HAS_CORE_CLK) { | 433 | if (clks & SCM_HAS_CORE_CLK) { |
| 363 | scm->core_clk = devm_clk_get(&pdev->dev, "core"); | 434 | scm->core_clk = devm_clk_get(&pdev->dev, "core"); |
| @@ -406,9 +477,24 @@ static int qcom_scm_probe(struct platform_device *pdev) | |||
| 406 | 477 | ||
| 407 | __qcom_scm_init(); | 478 | __qcom_scm_init(); |
| 408 | 479 | ||
| 480 | /* | ||
| 481 | * If requested enable "download mode", from this point on warmboot | ||
| 482 | * will cause the the boot stages to enter download mode, unless | ||
| 483 | * disabled below by a clean shutdown/reboot. | ||
| 484 | */ | ||
| 485 | if (download_mode) | ||
| 486 | qcom_scm_set_download_mode(true); | ||
| 487 | |||
| 409 | return 0; | 488 | return 0; |
| 410 | } | 489 | } |
| 411 | 490 | ||
| 491 | static void qcom_scm_shutdown(struct platform_device *pdev) | ||
| 492 | { | ||
| 493 | /* Clean shutdown, disable download mode to allow normal restart */ | ||
| 494 | if (download_mode) | ||
| 495 | qcom_scm_set_download_mode(false); | ||
| 496 | } | ||
| 497 | |||
| 412 | static const struct of_device_id qcom_scm_dt_match[] = { | 498 | static const struct of_device_id qcom_scm_dt_match[] = { |
| 413 | { .compatible = "qcom,scm-apq8064", | 499 | { .compatible = "qcom,scm-apq8064", |
| 414 | /* FIXME: This should have .data = (void *) SCM_HAS_CORE_CLK */ | 500 | /* FIXME: This should have .data = (void *) SCM_HAS_CORE_CLK */ |
| @@ -436,6 +522,7 @@ static struct platform_driver qcom_scm_driver = { | |||
| 436 | .of_match_table = qcom_scm_dt_match, | 522 | .of_match_table = qcom_scm_dt_match, |
| 437 | }, | 523 | }, |
| 438 | .probe = qcom_scm_probe, | 524 | .probe = qcom_scm_probe, |
| 525 | .shutdown = qcom_scm_shutdown, | ||
| 439 | }; | 526 | }; |
| 440 | 527 | ||
| 441 | static int __init qcom_scm_init(void) | 528 | static int __init qcom_scm_init(void) |
diff --git a/drivers/firmware/qcom_scm.h b/drivers/firmware/qcom_scm.h index 9bea691f30fb..83f171c23943 100644 --- a/drivers/firmware/qcom_scm.h +++ b/drivers/firmware/qcom_scm.h | |||
| @@ -14,9 +14,11 @@ | |||
| 14 | 14 | ||
| 15 | #define QCOM_SCM_SVC_BOOT 0x1 | 15 | #define QCOM_SCM_SVC_BOOT 0x1 |
| 16 | #define QCOM_SCM_BOOT_ADDR 0x1 | 16 | #define QCOM_SCM_BOOT_ADDR 0x1 |
| 17 | #define QCOM_SCM_SET_DLOAD_MODE 0x10 | ||
| 17 | #define QCOM_SCM_BOOT_ADDR_MC 0x11 | 18 | #define QCOM_SCM_BOOT_ADDR_MC 0x11 |
| 18 | #define QCOM_SCM_SET_REMOTE_STATE 0xa | 19 | #define QCOM_SCM_SET_REMOTE_STATE 0xa |
| 19 | extern int __qcom_scm_set_remote_state(struct device *dev, u32 state, u32 id); | 20 | extern int __qcom_scm_set_remote_state(struct device *dev, u32 state, u32 id); |
| 21 | extern int __qcom_scm_set_dload_mode(struct device *dev, bool enable); | ||
| 20 | 22 | ||
| 21 | #define QCOM_SCM_FLAG_HLOS 0x01 | 23 | #define QCOM_SCM_FLAG_HLOS 0x01 |
| 22 | #define QCOM_SCM_FLAG_COLDBOOT_MC 0x02 | 24 | #define QCOM_SCM_FLAG_COLDBOOT_MC 0x02 |
| @@ -30,6 +32,12 @@ extern int __qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus); | |||
| 30 | #define QCOM_SCM_CMD_CORE_HOTPLUGGED 0x10 | 32 | #define QCOM_SCM_CMD_CORE_HOTPLUGGED 0x10 |
| 31 | extern void __qcom_scm_cpu_power_down(u32 flags); | 33 | extern void __qcom_scm_cpu_power_down(u32 flags); |
| 32 | 34 | ||
| 35 | #define QCOM_SCM_SVC_IO 0x5 | ||
| 36 | #define QCOM_SCM_IO_READ 0x1 | ||
| 37 | #define QCOM_SCM_IO_WRITE 0x2 | ||
| 38 | extern int __qcom_scm_io_readl(struct device *dev, phys_addr_t addr, unsigned int *val); | ||
| 39 | extern int __qcom_scm_io_writel(struct device *dev, phys_addr_t addr, unsigned int val); | ||
| 40 | |||
| 33 | #define QCOM_SCM_SVC_INFO 0x6 | 41 | #define QCOM_SCM_SVC_INFO 0x6 |
| 34 | #define QCOM_IS_CALL_AVAIL_CMD 0x1 | 42 | #define QCOM_IS_CALL_AVAIL_CMD 0x1 |
| 35 | extern int __qcom_scm_is_call_available(struct device *dev, u32 svc_id, | 43 | extern int __qcom_scm_is_call_available(struct device *dev, u32 svc_id, |
diff --git a/drivers/firmware/tegra/Makefile b/drivers/firmware/tegra/Makefile index e34a2f79e1ad..1b826dcca719 100644 --- a/drivers/firmware/tegra/Makefile +++ b/drivers/firmware/tegra/Makefile | |||
| @@ -1,2 +1,4 @@ | |||
| 1 | obj-$(CONFIG_TEGRA_BPMP) += bpmp.o | 1 | tegra-bpmp-y = bpmp.o |
| 2 | tegra-bpmp-$(CONFIG_DEBUG_FS) += bpmp-debugfs.o | ||
| 3 | obj-$(CONFIG_TEGRA_BPMP) += tegra-bpmp.o | ||
| 2 | obj-$(CONFIG_TEGRA_IVC) += ivc.o | 4 | obj-$(CONFIG_TEGRA_IVC) += ivc.o |
diff --git a/drivers/firmware/tegra/bpmp-debugfs.c b/drivers/firmware/tegra/bpmp-debugfs.c new file mode 100644 index 000000000000..f7f6a0a5cb07 --- /dev/null +++ b/drivers/firmware/tegra/bpmp-debugfs.c | |||
| @@ -0,0 +1,444 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. | ||
| 3 | * | ||
| 4 | * This program is free software; you can redistribute it and/or modify it | ||
| 5 | * under the terms and conditions of the GNU General Public License, | ||
| 6 | * version 2, as published by the Free Software Foundation. | ||
| 7 | * | ||
| 8 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
| 9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 11 | * more details. | ||
| 12 | * | ||
| 13 | */ | ||
| 14 | #include <linux/debugfs.h> | ||
| 15 | #include <linux/dma-mapping.h> | ||
| 16 | #include <linux/uaccess.h> | ||
| 17 | |||
| 18 | #include <soc/tegra/bpmp.h> | ||
| 19 | #include <soc/tegra/bpmp-abi.h> | ||
| 20 | |||
| 21 | struct seqbuf { | ||
| 22 | char *buf; | ||
| 23 | size_t pos; | ||
| 24 | size_t size; | ||
| 25 | }; | ||
| 26 | |||
| 27 | static void seqbuf_init(struct seqbuf *seqbuf, void *buf, size_t size) | ||
| 28 | { | ||
| 29 | seqbuf->buf = buf; | ||
| 30 | seqbuf->size = size; | ||
| 31 | seqbuf->pos = 0; | ||
| 32 | } | ||
| 33 | |||
| 34 | static size_t seqbuf_avail(struct seqbuf *seqbuf) | ||
| 35 | { | ||
| 36 | return seqbuf->pos < seqbuf->size ? seqbuf->size - seqbuf->pos : 0; | ||
| 37 | } | ||
| 38 | |||
| 39 | static size_t seqbuf_status(struct seqbuf *seqbuf) | ||
| 40 | { | ||
| 41 | return seqbuf->pos <= seqbuf->size ? 0 : -EOVERFLOW; | ||
| 42 | } | ||
| 43 | |||
| 44 | static int seqbuf_eof(struct seqbuf *seqbuf) | ||
| 45 | { | ||
| 46 | return seqbuf->pos >= seqbuf->size; | ||
| 47 | } | ||
| 48 | |||
| 49 | static int seqbuf_read(struct seqbuf *seqbuf, void *buf, size_t nbyte) | ||
| 50 | { | ||
| 51 | nbyte = min(nbyte, seqbuf_avail(seqbuf)); | ||
| 52 | memcpy(buf, seqbuf->buf + seqbuf->pos, nbyte); | ||
| 53 | seqbuf->pos += nbyte; | ||
| 54 | return seqbuf_status(seqbuf); | ||
| 55 | } | ||
| 56 | |||
| 57 | static int seqbuf_read_u32(struct seqbuf *seqbuf, uint32_t *v) | ||
| 58 | { | ||
| 59 | int err; | ||
| 60 | |||
| 61 | err = seqbuf_read(seqbuf, v, 4); | ||
| 62 | *v = le32_to_cpu(*v); | ||
| 63 | return err; | ||
| 64 | } | ||
| 65 | |||
| 66 | static int seqbuf_read_str(struct seqbuf *seqbuf, const char **str) | ||
| 67 | { | ||
| 68 | *str = seqbuf->buf + seqbuf->pos; | ||
| 69 | seqbuf->pos += strnlen(*str, seqbuf_avail(seqbuf)); | ||
| 70 | seqbuf->pos++; | ||
| 71 | return seqbuf_status(seqbuf); | ||
| 72 | } | ||
| 73 | |||
| 74 | static void seqbuf_seek(struct seqbuf *seqbuf, ssize_t offset) | ||
| 75 | { | ||
| 76 | seqbuf->pos += offset; | ||
| 77 | } | ||
| 78 | |||
| 79 | /* map filename in Linux debugfs to corresponding entry in BPMP */ | ||
| 80 | static const char *get_filename(struct tegra_bpmp *bpmp, | ||
| 81 | const struct file *file, char *buf, int size) | ||
| 82 | { | ||
| 83 | char root_path_buf[512]; | ||
| 84 | const char *root_path; | ||
| 85 | const char *filename; | ||
| 86 | size_t root_len; | ||
| 87 | |||
| 88 | root_path = dentry_path(bpmp->debugfs_mirror, root_path_buf, | ||
| 89 | sizeof(root_path_buf)); | ||
| 90 | if (IS_ERR(root_path)) | ||
| 91 | return NULL; | ||
| 92 | |||
| 93 | root_len = strlen(root_path); | ||
| 94 | |||
| 95 | filename = dentry_path(file->f_path.dentry, buf, size); | ||
| 96 | if (IS_ERR(filename)) | ||
| 97 | return NULL; | ||
| 98 | |||
| 99 | if (strlen(filename) < root_len || | ||
| 100 | strncmp(filename, root_path, root_len)) | ||
| 101 | return NULL; | ||
| 102 | |||
| 103 | filename += root_len; | ||
| 104 | |||
| 105 | return filename; | ||
| 106 | } | ||
| 107 | |||
| 108 | static int mrq_debugfs_read(struct tegra_bpmp *bpmp, | ||
| 109 | dma_addr_t name, size_t sz_name, | ||
| 110 | dma_addr_t data, size_t sz_data, | ||
| 111 | size_t *nbytes) | ||
| 112 | { | ||
| 113 | struct mrq_debugfs_request req = { | ||
| 114 | .cmd = cpu_to_le32(CMD_DEBUGFS_READ), | ||
| 115 | .fop = { | ||
| 116 | .fnameaddr = cpu_to_le32((uint32_t)name), | ||
| 117 | .fnamelen = cpu_to_le32((uint32_t)sz_name), | ||
| 118 | .dataaddr = cpu_to_le32((uint32_t)data), | ||
| 119 | .datalen = cpu_to_le32((uint32_t)sz_data), | ||
| 120 | }, | ||
| 121 | }; | ||
| 122 | struct mrq_debugfs_response resp; | ||
| 123 | struct tegra_bpmp_message msg = { | ||
| 124 | .mrq = MRQ_DEBUGFS, | ||
| 125 | .tx = { | ||
| 126 | .data = &req, | ||
| 127 | .size = sizeof(req), | ||
| 128 | }, | ||
| 129 | .rx = { | ||
| 130 | .data = &resp, | ||
| 131 | .size = sizeof(resp), | ||
| 132 | }, | ||
| 133 | }; | ||
| 134 | int err; | ||
| 135 | |||
| 136 | err = tegra_bpmp_transfer(bpmp, &msg); | ||
| 137 | if (err < 0) | ||
| 138 | return err; | ||
| 139 | |||
| 140 | *nbytes = (size_t)resp.fop.nbytes; | ||
| 141 | |||
| 142 | return 0; | ||
| 143 | } | ||
| 144 | |||
| 145 | static int mrq_debugfs_write(struct tegra_bpmp *bpmp, | ||
| 146 | dma_addr_t name, size_t sz_name, | ||
| 147 | dma_addr_t data, size_t sz_data) | ||
| 148 | { | ||
| 149 | const struct mrq_debugfs_request req = { | ||
| 150 | .cmd = cpu_to_le32(CMD_DEBUGFS_WRITE), | ||
| 151 | .fop = { | ||
| 152 | .fnameaddr = cpu_to_le32((uint32_t)name), | ||
| 153 | .fnamelen = cpu_to_le32((uint32_t)sz_name), | ||
| 154 | .dataaddr = cpu_to_le32((uint32_t)data), | ||
| 155 | .datalen = cpu_to_le32((uint32_t)sz_data), | ||
| 156 | }, | ||
| 157 | }; | ||
| 158 | struct tegra_bpmp_message msg = { | ||
| 159 | .mrq = MRQ_DEBUGFS, | ||
| 160 | .tx = { | ||
| 161 | .data = &req, | ||
| 162 | .size = sizeof(req), | ||
| 163 | }, | ||
| 164 | }; | ||
| 165 | |||
| 166 | return tegra_bpmp_transfer(bpmp, &msg); | ||
| 167 | } | ||
| 168 | |||
| 169 | static int mrq_debugfs_dumpdir(struct tegra_bpmp *bpmp, dma_addr_t addr, | ||
| 170 | size_t size, size_t *nbytes) | ||
| 171 | { | ||
| 172 | const struct mrq_debugfs_request req = { | ||
| 173 | .cmd = cpu_to_le32(CMD_DEBUGFS_DUMPDIR), | ||
| 174 | .dumpdir = { | ||
| 175 | .dataaddr = cpu_to_le32((uint32_t)addr), | ||
| 176 | .datalen = cpu_to_le32((uint32_t)size), | ||
| 177 | }, | ||
| 178 | }; | ||
| 179 | struct mrq_debugfs_response resp; | ||
| 180 | struct tegra_bpmp_message msg = { | ||
| 181 | .mrq = MRQ_DEBUGFS, | ||
| 182 | .tx = { | ||
| 183 | .data = &req, | ||
| 184 | .size = sizeof(req), | ||
| 185 | }, | ||
| 186 | .rx = { | ||
| 187 | .data = &resp, | ||
| 188 | .size = sizeof(resp), | ||
| 189 | }, | ||
| 190 | }; | ||
| 191 | int err; | ||
| 192 | |||
| 193 | err = tegra_bpmp_transfer(bpmp, &msg); | ||
| 194 | if (err < 0) | ||
| 195 | return err; | ||
| 196 | |||
| 197 | *nbytes = (size_t)resp.dumpdir.nbytes; | ||
| 198 | |||
| 199 | return 0; | ||
| 200 | } | ||
| 201 | |||
| 202 | static int debugfs_show(struct seq_file *m, void *p) | ||
| 203 | { | ||
| 204 | struct file *file = m->private; | ||
| 205 | struct inode *inode = file_inode(file); | ||
| 206 | struct tegra_bpmp *bpmp = inode->i_private; | ||
| 207 | const size_t datasize = m->size; | ||
| 208 | const size_t namesize = SZ_256; | ||
| 209 | void *datavirt, *namevirt; | ||
| 210 | dma_addr_t dataphys, namephys; | ||
| 211 | char buf[256]; | ||
| 212 | const char *filename; | ||
| 213 | size_t len, nbytes; | ||
| 214 | int ret; | ||
| 215 | |||
| 216 | filename = get_filename(bpmp, file, buf, sizeof(buf)); | ||
| 217 | if (!filename) | ||
| 218 | return -ENOENT; | ||
| 219 | |||
| 220 | namevirt = dma_alloc_coherent(bpmp->dev, namesize, &namephys, | ||
| 221 | GFP_KERNEL | GFP_DMA32); | ||
| 222 | if (!namevirt) | ||
| 223 | return -ENOMEM; | ||
| 224 | |||
| 225 | datavirt = dma_alloc_coherent(bpmp->dev, datasize, &dataphys, | ||
| 226 | GFP_KERNEL | GFP_DMA32); | ||
| 227 | if (!datavirt) { | ||
| 228 | ret = -ENOMEM; | ||
| 229 | goto free_namebuf; | ||
| 230 | } | ||
| 231 | |||
| 232 | len = strlen(filename); | ||
| 233 | strncpy(namevirt, filename, namesize); | ||
| 234 | |||
| 235 | ret = mrq_debugfs_read(bpmp, namephys, len, dataphys, datasize, | ||
| 236 | &nbytes); | ||
| 237 | |||
| 238 | if (!ret) | ||
| 239 | seq_write(m, datavirt, nbytes); | ||
| 240 | |||
| 241 | dma_free_coherent(bpmp->dev, datasize, datavirt, dataphys); | ||
| 242 | free_namebuf: | ||
| 243 | dma_free_coherent(bpmp->dev, namesize, namevirt, namephys); | ||
| 244 | |||
| 245 | return ret; | ||
| 246 | } | ||
| 247 | |||
| 248 | static int debugfs_open(struct inode *inode, struct file *file) | ||
| 249 | { | ||
| 250 | return single_open_size(file, debugfs_show, file, SZ_128K); | ||
| 251 | } | ||
| 252 | |||
| 253 | static ssize_t debugfs_store(struct file *file, const char __user *buf, | ||
| 254 | size_t count, loff_t *f_pos) | ||
| 255 | { | ||
| 256 | struct inode *inode = file_inode(file); | ||
| 257 | struct tegra_bpmp *bpmp = inode->i_private; | ||
| 258 | const size_t datasize = count; | ||
| 259 | const size_t namesize = SZ_256; | ||
| 260 | void *datavirt, *namevirt; | ||
| 261 | dma_addr_t dataphys, namephys; | ||
| 262 | char fnamebuf[256]; | ||
| 263 | const char *filename; | ||
| 264 | size_t len; | ||
| 265 | int ret; | ||
| 266 | |||
| 267 | filename = get_filename(bpmp, file, fnamebuf, sizeof(fnamebuf)); | ||
| 268 | if (!filename) | ||
| 269 | return -ENOENT; | ||
| 270 | |||
| 271 | namevirt = dma_alloc_coherent(bpmp->dev, namesize, &namephys, | ||
| 272 | GFP_KERNEL | GFP_DMA32); | ||
| 273 | if (!namevirt) | ||
| 274 | return -ENOMEM; | ||
| 275 | |||
| 276 | datavirt = dma_alloc_coherent(bpmp->dev, datasize, &dataphys, | ||
| 277 | GFP_KERNEL | GFP_DMA32); | ||
| 278 | if (!datavirt) { | ||
| 279 | ret = -ENOMEM; | ||
| 280 | goto free_namebuf; | ||
| 281 | } | ||
| 282 | |||
| 283 | len = strlen(filename); | ||
| 284 | strncpy(namevirt, filename, namesize); | ||
| 285 | |||
| 286 | if (copy_from_user(datavirt, buf, count)) { | ||
| 287 | ret = -EFAULT; | ||
| 288 | goto free_databuf; | ||
| 289 | } | ||
| 290 | |||
| 291 | ret = mrq_debugfs_write(bpmp, namephys, len, dataphys, | ||
| 292 | count); | ||
| 293 | |||
| 294 | free_databuf: | ||
| 295 | dma_free_coherent(bpmp->dev, datasize, datavirt, dataphys); | ||
| 296 | free_namebuf: | ||
| 297 | dma_free_coherent(bpmp->dev, namesize, namevirt, namephys); | ||
| 298 | |||
| 299 | return ret ?: count; | ||
| 300 | } | ||
| 301 | |||
| 302 | static const struct file_operations debugfs_fops = { | ||
| 303 | .open = debugfs_open, | ||
| 304 | .read = seq_read, | ||
| 305 | .llseek = seq_lseek, | ||
| 306 | .write = debugfs_store, | ||
| 307 | .release = single_release, | ||
| 308 | }; | ||
| 309 | |||
| 310 | static int bpmp_populate_dir(struct tegra_bpmp *bpmp, struct seqbuf *seqbuf, | ||
| 311 | struct dentry *parent, uint32_t depth) | ||
| 312 | { | ||
| 313 | int err; | ||
| 314 | uint32_t d, t; | ||
| 315 | const char *name; | ||
| 316 | struct dentry *dentry; | ||
| 317 | |||
| 318 | while (!seqbuf_eof(seqbuf)) { | ||
| 319 | err = seqbuf_read_u32(seqbuf, &d); | ||
| 320 | if (err < 0) | ||
| 321 | return err; | ||
| 322 | |||
| 323 | if (d < depth) { | ||
| 324 | seqbuf_seek(seqbuf, -4); | ||
| 325 | /* go up a level */ | ||
| 326 | return 0; | ||
| 327 | } else if (d != depth) { | ||
| 328 | /* malformed data received from BPMP */ | ||
| 329 | return -EIO; | ||
| 330 | } | ||
| 331 | |||
| 332 | err = seqbuf_read_u32(seqbuf, &t); | ||
| 333 | if (err < 0) | ||
| 334 | return err; | ||
| 335 | err = seqbuf_read_str(seqbuf, &name); | ||
| 336 | if (err < 0) | ||
| 337 | return err; | ||
| 338 | |||
| 339 | if (t & DEBUGFS_S_ISDIR) { | ||
| 340 | dentry = debugfs_create_dir(name, parent); | ||
| 341 | if (!dentry) | ||
| 342 | return -ENOMEM; | ||
| 343 | err = bpmp_populate_dir(bpmp, seqbuf, dentry, depth+1); | ||
| 344 | if (err < 0) | ||
| 345 | return err; | ||
| 346 | } else { | ||
| 347 | umode_t mode; | ||
| 348 | |||
| 349 | mode = t & DEBUGFS_S_IRUSR ? S_IRUSR : 0; | ||
| 350 | mode |= t & DEBUGFS_S_IWUSR ? S_IWUSR : 0; | ||
| 351 | dentry = debugfs_create_file(name, mode, | ||
| 352 | parent, bpmp, | ||
| 353 | &debugfs_fops); | ||
| 354 | if (!dentry) | ||
| 355 | return -ENOMEM; | ||
| 356 | } | ||
| 357 | } | ||
| 358 | |||
| 359 | return 0; | ||
| 360 | } | ||
| 361 | |||
| 362 | static int create_debugfs_mirror(struct tegra_bpmp *bpmp, void *buf, | ||
| 363 | size_t bufsize, struct dentry *root) | ||
| 364 | { | ||
| 365 | struct seqbuf seqbuf; | ||
| 366 | int err; | ||
| 367 | |||
| 368 | bpmp->debugfs_mirror = debugfs_create_dir("debug", root); | ||
| 369 | if (!bpmp->debugfs_mirror) | ||
| 370 | return -ENOMEM; | ||
| 371 | |||
| 372 | seqbuf_init(&seqbuf, buf, bufsize); | ||
| 373 | err = bpmp_populate_dir(bpmp, &seqbuf, bpmp->debugfs_mirror, 0); | ||
| 374 | if (err < 0) { | ||
| 375 | debugfs_remove_recursive(bpmp->debugfs_mirror); | ||
| 376 | bpmp->debugfs_mirror = NULL; | ||
| 377 | } | ||
| 378 | |||
| 379 | return err; | ||
| 380 | } | ||
| 381 | |||
| 382 | static int mrq_is_supported(struct tegra_bpmp *bpmp, unsigned int mrq) | ||
| 383 | { | ||
| 384 | struct mrq_query_abi_request req = { .mrq = cpu_to_le32(mrq) }; | ||
| 385 | struct mrq_query_abi_response resp; | ||
| 386 | struct tegra_bpmp_message msg = { | ||
| 387 | .mrq = MRQ_QUERY_ABI, | ||
| 388 | .tx = { | ||
| 389 | .data = &req, | ||
| 390 | .size = sizeof(req), | ||
| 391 | }, | ||
| 392 | .rx = { | ||
| 393 | .data = &resp, | ||
| 394 | .size = sizeof(resp), | ||
| 395 | }, | ||
| 396 | }; | ||
| 397 | int ret; | ||
| 398 | |||
| 399 | ret = tegra_bpmp_transfer(bpmp, &msg); | ||
| 400 | if (ret < 0) { | ||
| 401 | /* something went wrong; assume not supported */ | ||
| 402 | dev_warn(bpmp->dev, "tegra_bpmp_transfer failed (%d)\n", ret); | ||
| 403 | return 0; | ||
| 404 | } | ||
| 405 | |||
| 406 | return resp.status ? 0 : 1; | ||
| 407 | } | ||
| 408 | |||
| 409 | int tegra_bpmp_init_debugfs(struct tegra_bpmp *bpmp) | ||
| 410 | { | ||
| 411 | dma_addr_t phys; | ||
| 412 | void *virt; | ||
| 413 | const size_t sz = SZ_256K; | ||
| 414 | size_t nbytes; | ||
| 415 | int ret; | ||
| 416 | struct dentry *root; | ||
| 417 | |||
| 418 | if (!mrq_is_supported(bpmp, MRQ_DEBUGFS)) | ||
| 419 | return 0; | ||
| 420 | |||
| 421 | root = debugfs_create_dir("bpmp", NULL); | ||
| 422 | if (!root) | ||
| 423 | return -ENOMEM; | ||
| 424 | |||
| 425 | virt = dma_alloc_coherent(bpmp->dev, sz, &phys, | ||
| 426 | GFP_KERNEL | GFP_DMA32); | ||
| 427 | if (!virt) { | ||
| 428 | ret = -ENOMEM; | ||
| 429 | goto out; | ||
| 430 | } | ||
| 431 | |||
| 432 | ret = mrq_debugfs_dumpdir(bpmp, phys, sz, &nbytes); | ||
| 433 | if (ret < 0) | ||
| 434 | goto free; | ||
| 435 | |||
| 436 | ret = create_debugfs_mirror(bpmp, virt, nbytes, root); | ||
| 437 | free: | ||
| 438 | dma_free_coherent(bpmp->dev, sz, virt, phys); | ||
| 439 | out: | ||
| 440 | if (ret < 0) | ||
| 441 | debugfs_remove(root); | ||
| 442 | |||
| 443 | return ret; | ||
| 444 | } | ||
diff --git a/drivers/firmware/tegra/bpmp.c b/drivers/firmware/tegra/bpmp.c index 73ca55b7b7ec..a7f461f2e650 100644 --- a/drivers/firmware/tegra/bpmp.c +++ b/drivers/firmware/tegra/bpmp.c | |||
| @@ -194,16 +194,24 @@ static int tegra_bpmp_wait_master_free(struct tegra_bpmp_channel *channel) | |||
| 194 | } | 194 | } |
| 195 | 195 | ||
| 196 | static ssize_t __tegra_bpmp_channel_read(struct tegra_bpmp_channel *channel, | 196 | static ssize_t __tegra_bpmp_channel_read(struct tegra_bpmp_channel *channel, |
| 197 | void *data, size_t size) | 197 | void *data, size_t size, int *ret) |
| 198 | { | 198 | { |
| 199 | int err; | ||
| 200 | |||
| 199 | if (data && size > 0) | 201 | if (data && size > 0) |
| 200 | memcpy(data, channel->ib->data, size); | 202 | memcpy(data, channel->ib->data, size); |
| 201 | 203 | ||
| 202 | return tegra_ivc_read_advance(channel->ivc); | 204 | err = tegra_ivc_read_advance(channel->ivc); |
| 205 | if (err < 0) | ||
| 206 | return err; | ||
| 207 | |||
| 208 | *ret = channel->ib->code; | ||
| 209 | |||
| 210 | return 0; | ||
| 203 | } | 211 | } |
| 204 | 212 | ||
| 205 | static ssize_t tegra_bpmp_channel_read(struct tegra_bpmp_channel *channel, | 213 | static ssize_t tegra_bpmp_channel_read(struct tegra_bpmp_channel *channel, |
| 206 | void *data, size_t size) | 214 | void *data, size_t size, int *ret) |
| 207 | { | 215 | { |
| 208 | struct tegra_bpmp *bpmp = channel->bpmp; | 216 | struct tegra_bpmp *bpmp = channel->bpmp; |
| 209 | unsigned long flags; | 217 | unsigned long flags; |
| @@ -217,7 +225,7 @@ static ssize_t tegra_bpmp_channel_read(struct tegra_bpmp_channel *channel, | |||
| 217 | } | 225 | } |
| 218 | 226 | ||
| 219 | spin_lock_irqsave(&bpmp->lock, flags); | 227 | spin_lock_irqsave(&bpmp->lock, flags); |
| 220 | err = __tegra_bpmp_channel_read(channel, data, size); | 228 | err = __tegra_bpmp_channel_read(channel, data, size, ret); |
| 221 | clear_bit(index, bpmp->threaded.allocated); | 229 | clear_bit(index, bpmp->threaded.allocated); |
| 222 | spin_unlock_irqrestore(&bpmp->lock, flags); | 230 | spin_unlock_irqrestore(&bpmp->lock, flags); |
| 223 | 231 | ||
| @@ -337,7 +345,8 @@ int tegra_bpmp_transfer_atomic(struct tegra_bpmp *bpmp, | |||
| 337 | if (err < 0) | 345 | if (err < 0) |
| 338 | return err; | 346 | return err; |
| 339 | 347 | ||
| 340 | return __tegra_bpmp_channel_read(channel, msg->rx.data, msg->rx.size); | 348 | return __tegra_bpmp_channel_read(channel, msg->rx.data, msg->rx.size, |
| 349 | &msg->rx.ret); | ||
| 341 | } | 350 | } |
| 342 | EXPORT_SYMBOL_GPL(tegra_bpmp_transfer_atomic); | 351 | EXPORT_SYMBOL_GPL(tegra_bpmp_transfer_atomic); |
| 343 | 352 | ||
| @@ -371,7 +380,8 @@ int tegra_bpmp_transfer(struct tegra_bpmp *bpmp, | |||
| 371 | if (err == 0) | 380 | if (err == 0) |
| 372 | return -ETIMEDOUT; | 381 | return -ETIMEDOUT; |
| 373 | 382 | ||
| 374 | return tegra_bpmp_channel_read(channel, msg->rx.data, msg->rx.size); | 383 | return tegra_bpmp_channel_read(channel, msg->rx.data, msg->rx.size, |
| 384 | &msg->rx.ret); | ||
| 375 | } | 385 | } |
| 376 | EXPORT_SYMBOL_GPL(tegra_bpmp_transfer); | 386 | EXPORT_SYMBOL_GPL(tegra_bpmp_transfer); |
| 377 | 387 | ||
| @@ -387,8 +397,8 @@ static struct tegra_bpmp_mrq *tegra_bpmp_find_mrq(struct tegra_bpmp *bpmp, | |||
| 387 | return NULL; | 397 | return NULL; |
| 388 | } | 398 | } |
| 389 | 399 | ||
| 390 | static void tegra_bpmp_mrq_return(struct tegra_bpmp_channel *channel, | 400 | void tegra_bpmp_mrq_return(struct tegra_bpmp_channel *channel, int code, |
| 391 | int code, const void *data, size_t size) | 401 | const void *data, size_t size) |
| 392 | { | 402 | { |
| 393 | unsigned long flags = channel->ib->flags; | 403 | unsigned long flags = channel->ib->flags; |
| 394 | struct tegra_bpmp *bpmp = channel->bpmp; | 404 | struct tegra_bpmp *bpmp = channel->bpmp; |
| @@ -426,6 +436,7 @@ static void tegra_bpmp_mrq_return(struct tegra_bpmp_channel *channel, | |||
| 426 | mbox_client_txdone(bpmp->mbox.channel, 0); | 436 | mbox_client_txdone(bpmp->mbox.channel, 0); |
| 427 | } | 437 | } |
| 428 | } | 438 | } |
| 439 | EXPORT_SYMBOL_GPL(tegra_bpmp_mrq_return); | ||
| 429 | 440 | ||
| 430 | static void tegra_bpmp_handle_mrq(struct tegra_bpmp *bpmp, | 441 | static void tegra_bpmp_handle_mrq(struct tegra_bpmp *bpmp, |
| 431 | unsigned int mrq, | 442 | unsigned int mrq, |
| @@ -824,6 +835,10 @@ static int tegra_bpmp_probe(struct platform_device *pdev) | |||
| 824 | if (err < 0) | 835 | if (err < 0) |
| 825 | goto free_mrq; | 836 | goto free_mrq; |
| 826 | 837 | ||
| 838 | err = tegra_bpmp_init_debugfs(bpmp); | ||
| 839 | if (err < 0) | ||
| 840 | dev_err(&pdev->dev, "debugfs initialization failed: %d\n", err); | ||
| 841 | |||
| 827 | return 0; | 842 | return 0; |
| 828 | 843 | ||
| 829 | free_mrq: | 844 | free_mrq: |
diff --git a/drivers/firmware/ti_sci.c b/drivers/firmware/ti_sci.c index 00cfed3c3e1a..23b12d99ddfe 100644 --- a/drivers/firmware/ti_sci.c +++ b/drivers/firmware/ti_sci.c | |||
| @@ -439,7 +439,7 @@ static inline int ti_sci_do_xfer(struct ti_sci_info *info, | |||
| 439 | /* And we wait for the response. */ | 439 | /* And we wait for the response. */ |
| 440 | timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms); | 440 | timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms); |
| 441 | if (!wait_for_completion_timeout(&xfer->done, timeout)) { | 441 | if (!wait_for_completion_timeout(&xfer->done, timeout)) { |
| 442 | dev_err(dev, "Mbox timedout in resp(caller: %pF)\n", | 442 | dev_err(dev, "Mbox timedout in resp(caller: %pS)\n", |
| 443 | (void *)_RET_IP_); | 443 | (void *)_RET_IP_); |
| 444 | ret = -ETIMEDOUT; | 444 | ret = -ETIMEDOUT; |
| 445 | } | 445 | } |
diff --git a/drivers/memory/Makefile b/drivers/memory/Makefile index 470b93e3858d..929a601d4cd1 100644 --- a/drivers/memory/Makefile +++ b/drivers/memory/Makefile | |||
| @@ -9,6 +9,7 @@ endif | |||
| 9 | obj-$(CONFIG_ARM_PL172_MPMC) += pl172.o | 9 | obj-$(CONFIG_ARM_PL172_MPMC) += pl172.o |
| 10 | obj-$(CONFIG_ATMEL_SDRAMC) += atmel-sdramc.o | 10 | obj-$(CONFIG_ATMEL_SDRAMC) += atmel-sdramc.o |
| 11 | obj-$(CONFIG_ATMEL_EBI) += atmel-ebi.o | 11 | obj-$(CONFIG_ATMEL_EBI) += atmel-ebi.o |
| 12 | obj-$(CONFIG_ARCH_BRCMSTB) += brcmstb_dpfe.o | ||
| 12 | obj-$(CONFIG_TI_AEMIF) += ti-aemif.o | 13 | obj-$(CONFIG_TI_AEMIF) += ti-aemif.o |
| 13 | obj-$(CONFIG_TI_EMIF) += emif.o | 14 | obj-$(CONFIG_TI_EMIF) += emif.o |
| 14 | obj-$(CONFIG_OMAP_GPMC) += omap-gpmc.o | 15 | obj-$(CONFIG_OMAP_GPMC) += omap-gpmc.o |
diff --git a/drivers/memory/brcmstb_dpfe.c b/drivers/memory/brcmstb_dpfe.c new file mode 100644 index 000000000000..0a7bdbed3a6f --- /dev/null +++ b/drivers/memory/brcmstb_dpfe.c | |||
| @@ -0,0 +1,722 @@ | |||
| 1 | /* | ||
| 2 | * DDR PHY Front End (DPFE) driver for Broadcom set top box SoCs | ||
| 3 | * | ||
| 4 | * Copyright (c) 2017 Broadcom | ||
| 5 | * | ||
| 6 | * Released under the GPLv2 only. | ||
| 7 | * SPDX-License-Identifier: GPL-2.0 | ||
| 8 | */ | ||
| 9 | |||
| 10 | /* | ||
| 11 | * This driver provides access to the DPFE interface of Broadcom STB SoCs. | ||
| 12 | * The firmware running on the DCPU inside the DDR PHY can provide current | ||
| 13 | * information about the system's RAM, for instance the DRAM refresh rate. | ||
| 14 | * This can be used as an indirect indicator for the DRAM's temperature. | ||
| 15 | * Slower refresh rate means cooler RAM, higher refresh rate means hotter | ||
| 16 | * RAM. | ||
| 17 | * | ||
| 18 | * Throughout the driver, we use readl_relaxed() and writel_relaxed(), which | ||
| 19 | * already contain the appropriate le32_to_cpu()/cpu_to_le32() calls. | ||
| 20 | * | ||
| 21 | * Note regarding the loading of the firmware image: we use be32_to_cpu() | ||
| 22 | * and le_32_to_cpu(), so we can support the following four cases: | ||
| 23 | * - LE kernel + LE firmware image (the most common case) | ||
| 24 | * - LE kernel + BE firmware image | ||
| 25 | * - BE kernel + LE firmware image | ||
| 26 | * - BE kernel + BE firmware image | ||
| 27 | * | ||
| 28 | * The DPCU always runs in big endian mode. The firwmare image, however, can | ||
| 29 | * be in either format. Also, communication between host CPU and DCPU is | ||
| 30 | * always in little endian. | ||
| 31 | */ | ||
| 32 | |||
| 33 | #include <linux/delay.h> | ||
| 34 | #include <linux/firmware.h> | ||
| 35 | #include <linux/io.h> | ||
| 36 | #include <linux/module.h> | ||
| 37 | #include <linux/of_address.h> | ||
| 38 | #include <linux/platform_device.h> | ||
| 39 | |||
| 40 | #define DRVNAME "brcmstb-dpfe" | ||
| 41 | #define FIRMWARE_NAME "dpfe.bin" | ||
| 42 | |||
| 43 | /* DCPU register offsets */ | ||
| 44 | #define REG_DCPU_RESET 0x0 | ||
| 45 | #define REG_TO_DCPU_MBOX 0x10 | ||
| 46 | #define REG_TO_HOST_MBOX 0x14 | ||
| 47 | |||
| 48 | /* Message RAM */ | ||
| 49 | #define DCPU_MSG_RAM(x) (0x100 + (x) * sizeof(u32)) | ||
| 50 | |||
| 51 | /* DRAM Info Offsets & Masks */ | ||
| 52 | #define DRAM_INFO_INTERVAL 0x0 | ||
| 53 | #define DRAM_INFO_MR4 0x4 | ||
| 54 | #define DRAM_INFO_ERROR 0x8 | ||
| 55 | #define DRAM_INFO_MR4_MASK 0xff | ||
| 56 | |||
| 57 | /* DRAM MR4 Offsets & Masks */ | ||
| 58 | #define DRAM_MR4_REFRESH 0x0 /* Refresh rate */ | ||
| 59 | #define DRAM_MR4_SR_ABORT 0x3 /* Self Refresh Abort */ | ||
| 60 | #define DRAM_MR4_PPRE 0x4 /* Post-package repair entry/exit */ | ||
| 61 | #define DRAM_MR4_TH_OFFS 0x5 /* Thermal Offset; vendor specific */ | ||
| 62 | #define DRAM_MR4_TUF 0x7 /* Temperature Update Flag */ | ||
| 63 | |||
| 64 | #define DRAM_MR4_REFRESH_MASK 0x7 | ||
| 65 | #define DRAM_MR4_SR_ABORT_MASK 0x1 | ||
| 66 | #define DRAM_MR4_PPRE_MASK 0x1 | ||
| 67 | #define DRAM_MR4_TH_OFFS_MASK 0x3 | ||
| 68 | #define DRAM_MR4_TUF_MASK 0x1 | ||
| 69 | |||
| 70 | /* DRAM Vendor Offsets & Masks */ | ||
| 71 | #define DRAM_VENDOR_MR5 0x0 | ||
| 72 | #define DRAM_VENDOR_MR6 0x4 | ||
| 73 | #define DRAM_VENDOR_MR7 0x8 | ||
| 74 | #define DRAM_VENDOR_MR8 0xc | ||
| 75 | #define DRAM_VENDOR_ERROR 0x10 | ||
| 76 | #define DRAM_VENDOR_MASK 0xff | ||
| 77 | |||
| 78 | /* Reset register bits & masks */ | ||
| 79 | #define DCPU_RESET_SHIFT 0x0 | ||
| 80 | #define DCPU_RESET_MASK 0x1 | ||
| 81 | #define DCPU_CLK_DISABLE_SHIFT 0x2 | ||
| 82 | |||
| 83 | /* DCPU return codes */ | ||
| 84 | #define DCPU_RET_ERROR_BIT BIT(31) | ||
| 85 | #define DCPU_RET_SUCCESS 0x1 | ||
| 86 | #define DCPU_RET_ERR_HEADER (DCPU_RET_ERROR_BIT | BIT(0)) | ||
| 87 | #define DCPU_RET_ERR_INVAL (DCPU_RET_ERROR_BIT | BIT(1)) | ||
| 88 | #define DCPU_RET_ERR_CHKSUM (DCPU_RET_ERROR_BIT | BIT(2)) | ||
| 89 | #define DCPU_RET_ERR_COMMAND (DCPU_RET_ERROR_BIT | BIT(3)) | ||
| 90 | /* This error code is not firmware defined and only used in the driver. */ | ||
| 91 | #define DCPU_RET_ERR_TIMEDOUT (DCPU_RET_ERROR_BIT | BIT(4)) | ||
| 92 | |||
| 93 | /* Firmware magic */ | ||
| 94 | #define DPFE_BE_MAGIC 0xfe1010fe | ||
| 95 | #define DPFE_LE_MAGIC 0xfe0101fe | ||
| 96 | |||
| 97 | /* Error codes */ | ||
| 98 | #define ERR_INVALID_MAGIC -1 | ||
| 99 | #define ERR_INVALID_SIZE -2 | ||
| 100 | #define ERR_INVALID_CHKSUM -3 | ||
| 101 | |||
| 102 | /* Message types */ | ||
| 103 | #define DPFE_MSG_TYPE_COMMAND 1 | ||
| 104 | #define DPFE_MSG_TYPE_RESPONSE 2 | ||
| 105 | |||
| 106 | #define DELAY_LOOP_MAX 200000 | ||
| 107 | |||
| 108 | enum dpfe_msg_fields { | ||
| 109 | MSG_HEADER, | ||
| 110 | MSG_COMMAND, | ||
| 111 | MSG_ARG_COUNT, | ||
| 112 | MSG_ARG0, | ||
| 113 | MSG_CHKSUM, | ||
| 114 | MSG_FIELD_MAX /* Last entry */ | ||
| 115 | }; | ||
| 116 | |||
| 117 | enum dpfe_commands { | ||
| 118 | DPFE_CMD_GET_INFO, | ||
| 119 | DPFE_CMD_GET_REFRESH, | ||
| 120 | DPFE_CMD_GET_VENDOR, | ||
| 121 | DPFE_CMD_MAX /* Last entry */ | ||
| 122 | }; | ||
| 123 | |||
| 124 | struct dpfe_msg { | ||
| 125 | u32 header; | ||
| 126 | u32 command; | ||
| 127 | u32 arg_count; | ||
| 128 | u32 arg0; | ||
| 129 | u32 chksum; /* This is the sum of all other entries. */ | ||
| 130 | }; | ||
| 131 | |||
| 132 | /* | ||
| 133 | * Format of the binary firmware file: | ||
| 134 | * | ||
| 135 | * entry | ||
| 136 | * 0 header | ||
| 137 | * value: 0xfe0101fe <== little endian | ||
| 138 | * 0xfe1010fe <== big endian | ||
| 139 | * 1 sequence: | ||
| 140 | * [31:16] total segments on this build | ||
| 141 | * [15:0] this segment sequence. | ||
| 142 | * 2 FW version | ||
| 143 | * 3 IMEM byte size | ||
| 144 | * 4 DMEM byte size | ||
| 145 | * IMEM | ||
| 146 | * DMEM | ||
| 147 | * last checksum ==> sum of everything | ||
| 148 | */ | ||
| 149 | struct dpfe_firmware_header { | ||
| 150 | u32 magic; | ||
| 151 | u32 sequence; | ||
| 152 | u32 version; | ||
| 153 | u32 imem_size; | ||
| 154 | u32 dmem_size; | ||
| 155 | }; | ||
| 156 | |||
| 157 | /* Things we only need during initialization. */ | ||
| 158 | struct init_data { | ||
| 159 | unsigned int dmem_len; | ||
| 160 | unsigned int imem_len; | ||
| 161 | unsigned int chksum; | ||
| 162 | bool is_big_endian; | ||
| 163 | }; | ||
| 164 | |||
| 165 | /* Things we need for as long as we are active. */ | ||
| 166 | struct private_data { | ||
| 167 | void __iomem *regs; | ||
| 168 | void __iomem *dmem; | ||
| 169 | void __iomem *imem; | ||
| 170 | struct device *dev; | ||
| 171 | unsigned int index; | ||
| 172 | struct mutex lock; | ||
| 173 | }; | ||
| 174 | |||
| 175 | static const char *error_text[] = { | ||
| 176 | "Success", "Header code incorrect", "Unknown command or argument", | ||
| 177 | "Incorrect checksum", "Malformed command", "Timed out", | ||
| 178 | }; | ||
| 179 | |||
| 180 | /* List of supported firmware commands */ | ||
| 181 | static const u32 dpfe_commands[DPFE_CMD_MAX][MSG_FIELD_MAX] = { | ||
| 182 | [DPFE_CMD_GET_INFO] = { | ||
| 183 | [MSG_HEADER] = DPFE_MSG_TYPE_COMMAND, | ||
| 184 | [MSG_COMMAND] = 1, | ||
| 185 | [MSG_ARG_COUNT] = 1, | ||
| 186 | [MSG_ARG0] = 1, | ||
| 187 | [MSG_CHKSUM] = 4, | ||
| 188 | }, | ||
| 189 | [DPFE_CMD_GET_REFRESH] = { | ||
| 190 | [MSG_HEADER] = DPFE_MSG_TYPE_COMMAND, | ||
| 191 | [MSG_COMMAND] = 2, | ||
| 192 | [MSG_ARG_COUNT] = 1, | ||
| 193 | [MSG_ARG0] = 1, | ||
| 194 | [MSG_CHKSUM] = 5, | ||
| 195 | }, | ||
| 196 | [DPFE_CMD_GET_VENDOR] = { | ||
| 197 | [MSG_HEADER] = DPFE_MSG_TYPE_COMMAND, | ||
| 198 | [MSG_COMMAND] = 2, | ||
| 199 | [MSG_ARG_COUNT] = 1, | ||
| 200 | [MSG_ARG0] = 2, | ||
| 201 | [MSG_CHKSUM] = 6, | ||
| 202 | }, | ||
| 203 | }; | ||
| 204 | |||
| 205 | static bool is_dcpu_enabled(void __iomem *regs) | ||
| 206 | { | ||
| 207 | u32 val; | ||
| 208 | |||
| 209 | val = readl_relaxed(regs + REG_DCPU_RESET); | ||
| 210 | |||
| 211 | return !(val & DCPU_RESET_MASK); | ||
| 212 | } | ||
| 213 | |||
| 214 | static void __disable_dcpu(void __iomem *regs) | ||
| 215 | { | ||
| 216 | u32 val; | ||
| 217 | |||
| 218 | if (!is_dcpu_enabled(regs)) | ||
| 219 | return; | ||
| 220 | |||
| 221 | /* Put DCPU in reset if it's running. */ | ||
| 222 | val = readl_relaxed(regs + REG_DCPU_RESET); | ||
| 223 | val |= (1 << DCPU_RESET_SHIFT); | ||
| 224 | writel_relaxed(val, regs + REG_DCPU_RESET); | ||
| 225 | } | ||
| 226 | |||
| 227 | static void __enable_dcpu(void __iomem *regs) | ||
| 228 | { | ||
| 229 | u32 val; | ||
| 230 | |||
| 231 | /* Clear mailbox registers. */ | ||
| 232 | writel_relaxed(0, regs + REG_TO_DCPU_MBOX); | ||
| 233 | writel_relaxed(0, regs + REG_TO_HOST_MBOX); | ||
| 234 | |||
| 235 | /* Disable DCPU clock gating */ | ||
| 236 | val = readl_relaxed(regs + REG_DCPU_RESET); | ||
| 237 | val &= ~(1 << DCPU_CLK_DISABLE_SHIFT); | ||
| 238 | writel_relaxed(val, regs + REG_DCPU_RESET); | ||
| 239 | |||
| 240 | /* Take DCPU out of reset */ | ||
| 241 | val = readl_relaxed(regs + REG_DCPU_RESET); | ||
| 242 | val &= ~(1 << DCPU_RESET_SHIFT); | ||
| 243 | writel_relaxed(val, regs + REG_DCPU_RESET); | ||
| 244 | } | ||
| 245 | |||
| 246 | static unsigned int get_msg_chksum(const u32 msg[]) | ||
| 247 | { | ||
| 248 | unsigned int sum = 0; | ||
| 249 | unsigned int i; | ||
| 250 | |||
| 251 | /* Don't include the last field in the checksum. */ | ||
| 252 | for (i = 0; i < MSG_FIELD_MAX - 1; i++) | ||
| 253 | sum += msg[i]; | ||
| 254 | |||
| 255 | return sum; | ||
| 256 | } | ||
| 257 | |||
| 258 | static int __send_command(struct private_data *priv, unsigned int cmd, | ||
| 259 | u32 result[]) | ||
| 260 | { | ||
| 261 | const u32 *msg = dpfe_commands[cmd]; | ||
| 262 | void __iomem *regs = priv->regs; | ||
| 263 | unsigned int i, chksum; | ||
| 264 | int ret = 0; | ||
| 265 | u32 resp; | ||
| 266 | |||
| 267 | if (cmd >= DPFE_CMD_MAX) | ||
| 268 | return -1; | ||
| 269 | |||
| 270 | mutex_lock(&priv->lock); | ||
| 271 | |||
| 272 | /* Write command and arguments to message area */ | ||
| 273 | for (i = 0; i < MSG_FIELD_MAX; i++) | ||
| 274 | writel_relaxed(msg[i], regs + DCPU_MSG_RAM(i)); | ||
| 275 | |||
| 276 | /* Tell DCPU there is a command waiting */ | ||
| 277 | writel_relaxed(1, regs + REG_TO_DCPU_MBOX); | ||
| 278 | |||
| 279 | /* Wait for DCPU to process the command */ | ||
| 280 | for (i = 0; i < DELAY_LOOP_MAX; i++) { | ||
| 281 | /* Read response code */ | ||
| 282 | resp = readl_relaxed(regs + REG_TO_HOST_MBOX); | ||
| 283 | if (resp > 0) | ||
| 284 | break; | ||
| 285 | udelay(5); | ||
| 286 | } | ||
| 287 | |||
| 288 | if (i == DELAY_LOOP_MAX) { | ||
| 289 | resp = (DCPU_RET_ERR_TIMEDOUT & ~DCPU_RET_ERROR_BIT); | ||
| 290 | ret = -ffs(resp); | ||
| 291 | } else { | ||
| 292 | /* Read response data */ | ||
| 293 | for (i = 0; i < MSG_FIELD_MAX; i++) | ||
| 294 | result[i] = readl_relaxed(regs + DCPU_MSG_RAM(i)); | ||
| 295 | } | ||
| 296 | |||
| 297 | /* Tell DCPU we are done */ | ||
| 298 | writel_relaxed(0, regs + REG_TO_HOST_MBOX); | ||
| 299 | |||
| 300 | mutex_unlock(&priv->lock); | ||
| 301 | |||
| 302 | if (ret) | ||
| 303 | return ret; | ||
| 304 | |||
| 305 | /* Verify response */ | ||
| 306 | chksum = get_msg_chksum(result); | ||
| 307 | if (chksum != result[MSG_CHKSUM]) | ||
| 308 | resp = DCPU_RET_ERR_CHKSUM; | ||
| 309 | |||
| 310 | if (resp != DCPU_RET_SUCCESS) { | ||
| 311 | resp &= ~DCPU_RET_ERROR_BIT; | ||
| 312 | ret = -ffs(resp); | ||
| 313 | } | ||
| 314 | |||
| 315 | return ret; | ||
| 316 | } | ||
| 317 | |||
| 318 | /* Ensure that the firmware file loaded meets all the requirements. */ | ||
| 319 | static int __verify_firmware(struct init_data *init, | ||
| 320 | const struct firmware *fw) | ||
| 321 | { | ||
| 322 | const struct dpfe_firmware_header *header = (void *)fw->data; | ||
| 323 | unsigned int dmem_size, imem_size, total_size; | ||
| 324 | bool is_big_endian = false; | ||
| 325 | const u32 *chksum_ptr; | ||
| 326 | |||
| 327 | if (header->magic == DPFE_BE_MAGIC) | ||
| 328 | is_big_endian = true; | ||
| 329 | else if (header->magic != DPFE_LE_MAGIC) | ||
| 330 | return ERR_INVALID_MAGIC; | ||
| 331 | |||
| 332 | if (is_big_endian) { | ||
| 333 | dmem_size = be32_to_cpu(header->dmem_size); | ||
| 334 | imem_size = be32_to_cpu(header->imem_size); | ||
| 335 | } else { | ||
| 336 | dmem_size = le32_to_cpu(header->dmem_size); | ||
| 337 | imem_size = le32_to_cpu(header->imem_size); | ||
| 338 | } | ||
| 339 | |||
| 340 | /* Data and instruction sections are 32 bit words. */ | ||
| 341 | if ((dmem_size % sizeof(u32)) != 0 || (imem_size % sizeof(u32)) != 0) | ||
| 342 | return ERR_INVALID_SIZE; | ||
| 343 | |||
| 344 | /* | ||
| 345 | * The header + the data section + the instruction section + the | ||
| 346 | * checksum must be equal to the total firmware size. | ||
| 347 | */ | ||
| 348 | total_size = dmem_size + imem_size + sizeof(*header) + | ||
| 349 | sizeof(*chksum_ptr); | ||
| 350 | if (total_size != fw->size) | ||
| 351 | return ERR_INVALID_SIZE; | ||
| 352 | |||
| 353 | /* The checksum comes at the very end. */ | ||
| 354 | chksum_ptr = (void *)fw->data + sizeof(*header) + dmem_size + imem_size; | ||
| 355 | |||
| 356 | init->is_big_endian = is_big_endian; | ||
| 357 | init->dmem_len = dmem_size; | ||
| 358 | init->imem_len = imem_size; | ||
| 359 | init->chksum = (is_big_endian) | ||
| 360 | ? be32_to_cpu(*chksum_ptr) : le32_to_cpu(*chksum_ptr); | ||
| 361 | |||
| 362 | return 0; | ||
| 363 | } | ||
| 364 | |||
| 365 | /* Verify checksum by reading back the firmware from co-processor RAM. */ | ||
| 366 | static int __verify_fw_checksum(struct init_data *init, | ||
| 367 | struct private_data *priv, | ||
| 368 | const struct dpfe_firmware_header *header, | ||
| 369 | u32 checksum) | ||
| 370 | { | ||
| 371 | u32 magic, sequence, version, sum; | ||
| 372 | u32 __iomem *dmem = priv->dmem; | ||
| 373 | u32 __iomem *imem = priv->imem; | ||
| 374 | unsigned int i; | ||
| 375 | |||
| 376 | if (init->is_big_endian) { | ||
| 377 | magic = be32_to_cpu(header->magic); | ||
| 378 | sequence = be32_to_cpu(header->sequence); | ||
| 379 | version = be32_to_cpu(header->version); | ||
| 380 | } else { | ||
| 381 | magic = le32_to_cpu(header->magic); | ||
| 382 | sequence = le32_to_cpu(header->sequence); | ||
| 383 | version = le32_to_cpu(header->version); | ||
| 384 | } | ||
| 385 | |||
| 386 | sum = magic + sequence + version + init->dmem_len + init->imem_len; | ||
| 387 | |||
| 388 | for (i = 0; i < init->dmem_len / sizeof(u32); i++) | ||
| 389 | sum += readl_relaxed(dmem + i); | ||
| 390 | |||
| 391 | for (i = 0; i < init->imem_len / sizeof(u32); i++) | ||
| 392 | sum += readl_relaxed(imem + i); | ||
| 393 | |||
| 394 | return (sum == checksum) ? 0 : -1; | ||
| 395 | } | ||
| 396 | |||
| 397 | static int __write_firmware(u32 __iomem *mem, const u32 *fw, | ||
| 398 | unsigned int size, bool is_big_endian) | ||
| 399 | { | ||
| 400 | unsigned int i; | ||
| 401 | |||
| 402 | /* Convert size to 32-bit words. */ | ||
| 403 | size /= sizeof(u32); | ||
| 404 | |||
| 405 | /* It is recommended to clear the firmware area first. */ | ||
| 406 | for (i = 0; i < size; i++) | ||
| 407 | writel_relaxed(0, mem + i); | ||
| 408 | |||
| 409 | /* Now copy it. */ | ||
| 410 | if (is_big_endian) { | ||
| 411 | for (i = 0; i < size; i++) | ||
| 412 | writel_relaxed(be32_to_cpu(fw[i]), mem + i); | ||
| 413 | } else { | ||
| 414 | for (i = 0; i < size; i++) | ||
| 415 | writel_relaxed(le32_to_cpu(fw[i]), mem + i); | ||
| 416 | } | ||
| 417 | |||
| 418 | return 0; | ||
| 419 | } | ||
| 420 | |||
| 421 | static int brcmstb_dpfe_download_firmware(struct platform_device *pdev, | ||
| 422 | struct init_data *init) | ||
| 423 | { | ||
| 424 | const struct dpfe_firmware_header *header; | ||
| 425 | unsigned int dmem_size, imem_size; | ||
| 426 | struct device *dev = &pdev->dev; | ||
| 427 | bool is_big_endian = false; | ||
| 428 | struct private_data *priv; | ||
| 429 | const struct firmware *fw; | ||
| 430 | const u32 *dmem, *imem; | ||
| 431 | const void *fw_blob; | ||
| 432 | int ret; | ||
| 433 | |||
| 434 | priv = platform_get_drvdata(pdev); | ||
| 435 | |||
| 436 | /* | ||
| 437 | * Skip downloading the firmware if the DCPU is already running and | ||
| 438 | * responding to commands. | ||
| 439 | */ | ||
| 440 | if (is_dcpu_enabled(priv->regs)) { | ||
| 441 | u32 response[MSG_FIELD_MAX]; | ||
| 442 | |||
| 443 | ret = __send_command(priv, DPFE_CMD_GET_INFO, response); | ||
| 444 | if (!ret) | ||
| 445 | return 0; | ||
| 446 | } | ||
| 447 | |||
| 448 | ret = request_firmware(&fw, FIRMWARE_NAME, dev); | ||
| 449 | /* request_firmware() prints its own error messages. */ | ||
| 450 | if (ret) | ||
| 451 | return ret; | ||
| 452 | |||
| 453 | ret = __verify_firmware(init, fw); | ||
| 454 | if (ret) | ||
| 455 | return -EFAULT; | ||
| 456 | |||
| 457 | __disable_dcpu(priv->regs); | ||
| 458 | |||
| 459 | is_big_endian = init->is_big_endian; | ||
| 460 | dmem_size = init->dmem_len; | ||
| 461 | imem_size = init->imem_len; | ||
| 462 | |||
| 463 | /* At the beginning of the firmware blob is a header. */ | ||
| 464 | header = (struct dpfe_firmware_header *)fw->data; | ||
| 465 | /* Void pointer to the beginning of the actual firmware. */ | ||
| 466 | fw_blob = fw->data + sizeof(*header); | ||
| 467 | /* IMEM comes right after the header. */ | ||
| 468 | imem = fw_blob; | ||
| 469 | /* DMEM follows after IMEM. */ | ||
| 470 | dmem = fw_blob + imem_size; | ||
| 471 | |||
| 472 | ret = __write_firmware(priv->dmem, dmem, dmem_size, is_big_endian); | ||
| 473 | if (ret) | ||
| 474 | return ret; | ||
| 475 | ret = __write_firmware(priv->imem, imem, imem_size, is_big_endian); | ||
| 476 | if (ret) | ||
| 477 | return ret; | ||
| 478 | |||
| 479 | ret = __verify_fw_checksum(init, priv, header, init->chksum); | ||
| 480 | if (ret) | ||
| 481 | return ret; | ||
| 482 | |||
| 483 | __enable_dcpu(priv->regs); | ||
| 484 | |||
| 485 | return 0; | ||
| 486 | } | ||
| 487 | |||
| 488 | static ssize_t generic_show(unsigned int command, u32 response[], | ||
| 489 | struct device *dev, char *buf) | ||
| 490 | { | ||
| 491 | struct private_data *priv; | ||
| 492 | int ret; | ||
| 493 | |||
| 494 | priv = dev_get_drvdata(dev); | ||
| 495 | if (!priv) | ||
| 496 | return sprintf(buf, "ERROR: driver private data not set\n"); | ||
| 497 | |||
| 498 | ret = __send_command(priv, command, response); | ||
| 499 | if (ret < 0) | ||
| 500 | return sprintf(buf, "ERROR: %s\n", error_text[-ret]); | ||
| 501 | |||
| 502 | return 0; | ||
| 503 | } | ||
| 504 | |||
| 505 | static ssize_t show_info(struct device *dev, struct device_attribute *devattr, | ||
| 506 | char *buf) | ||
| 507 | { | ||
| 508 | u32 response[MSG_FIELD_MAX]; | ||
| 509 | unsigned int info; | ||
| 510 | int ret; | ||
| 511 | |||
| 512 | ret = generic_show(DPFE_CMD_GET_INFO, response, dev, buf); | ||
| 513 | if (ret) | ||
| 514 | return ret; | ||
| 515 | |||
| 516 | info = response[MSG_ARG0]; | ||
| 517 | |||
| 518 | return sprintf(buf, "%u.%u.%u.%u\n", | ||
| 519 | (info >> 24) & 0xff, | ||
| 520 | (info >> 16) & 0xff, | ||
| 521 | (info >> 8) & 0xff, | ||
| 522 | info & 0xff); | ||
| 523 | } | ||
| 524 | |||
| 525 | static ssize_t show_refresh(struct device *dev, | ||
| 526 | struct device_attribute *devattr, char *buf) | ||
| 527 | { | ||
| 528 | u32 response[MSG_FIELD_MAX]; | ||
| 529 | void __iomem *info; | ||
| 530 | struct private_data *priv; | ||
| 531 | unsigned int offset; | ||
| 532 | u8 refresh, sr_abort, ppre, thermal_offs, tuf; | ||
| 533 | u32 mr4; | ||
| 534 | int ret; | ||
| 535 | |||
| 536 | ret = generic_show(DPFE_CMD_GET_REFRESH, response, dev, buf); | ||
| 537 | if (ret) | ||
| 538 | return ret; | ||
| 539 | |||
| 540 | priv = dev_get_drvdata(dev); | ||
| 541 | offset = response[MSG_ARG0]; | ||
| 542 | info = priv->dmem + offset; | ||
| 543 | |||
| 544 | mr4 = readl_relaxed(info + DRAM_INFO_MR4) & DRAM_INFO_MR4_MASK; | ||
| 545 | |||
| 546 | refresh = (mr4 >> DRAM_MR4_REFRESH) & DRAM_MR4_REFRESH_MASK; | ||
| 547 | sr_abort = (mr4 >> DRAM_MR4_SR_ABORT) & DRAM_MR4_SR_ABORT_MASK; | ||
| 548 | ppre = (mr4 >> DRAM_MR4_PPRE) & DRAM_MR4_PPRE_MASK; | ||
| 549 | thermal_offs = (mr4 >> DRAM_MR4_TH_OFFS) & DRAM_MR4_TH_OFFS_MASK; | ||
| 550 | tuf = (mr4 >> DRAM_MR4_TUF) & DRAM_MR4_TUF_MASK; | ||
| 551 | |||
| 552 | return sprintf(buf, "%#x %#x %#x %#x %#x %#x %#x\n", | ||
| 553 | readl_relaxed(info + DRAM_INFO_INTERVAL), | ||
| 554 | refresh, sr_abort, ppre, thermal_offs, tuf, | ||
| 555 | readl_relaxed(info + DRAM_INFO_ERROR)); | ||
| 556 | } | ||
| 557 | |||
| 558 | static ssize_t store_refresh(struct device *dev, struct device_attribute *attr, | ||
| 559 | const char *buf, size_t count) | ||
| 560 | { | ||
| 561 | u32 response[MSG_FIELD_MAX]; | ||
| 562 | struct private_data *priv; | ||
| 563 | void __iomem *info; | ||
| 564 | unsigned int offset; | ||
| 565 | unsigned long val; | ||
| 566 | int ret; | ||
| 567 | |||
| 568 | if (kstrtoul(buf, 0, &val) < 0) | ||
| 569 | return -EINVAL; | ||
| 570 | |||
| 571 | priv = dev_get_drvdata(dev); | ||
| 572 | |||
| 573 | ret = __send_command(priv, DPFE_CMD_GET_REFRESH, response); | ||
| 574 | if (ret) | ||
| 575 | return ret; | ||
| 576 | |||
| 577 | offset = response[MSG_ARG0]; | ||
| 578 | info = priv->dmem + offset; | ||
| 579 | writel_relaxed(val, info + DRAM_INFO_INTERVAL); | ||
| 580 | |||
| 581 | return count; | ||
| 582 | } | ||
| 583 | |||
| 584 | static ssize_t show_vendor(struct device *dev, struct device_attribute *devattr, | ||
| 585 | char *buf) | ||
| 586 | { | ||
| 587 | u32 response[MSG_FIELD_MAX]; | ||
| 588 | struct private_data *priv; | ||
| 589 | void __iomem *info; | ||
| 590 | unsigned int offset; | ||
| 591 | int ret; | ||
| 592 | |||
| 593 | ret = generic_show(DPFE_CMD_GET_VENDOR, response, dev, buf); | ||
| 594 | if (ret) | ||
| 595 | return ret; | ||
| 596 | |||
| 597 | offset = response[MSG_ARG0]; | ||
| 598 | priv = dev_get_drvdata(dev); | ||
| 599 | info = priv->dmem + offset; | ||
| 600 | |||
| 601 | return sprintf(buf, "%#x %#x %#x %#x %#x\n", | ||
| 602 | readl_relaxed(info + DRAM_VENDOR_MR5) & DRAM_VENDOR_MASK, | ||
| 603 | readl_relaxed(info + DRAM_VENDOR_MR6) & DRAM_VENDOR_MASK, | ||
| 604 | readl_relaxed(info + DRAM_VENDOR_MR7) & DRAM_VENDOR_MASK, | ||
| 605 | readl_relaxed(info + DRAM_VENDOR_MR8) & DRAM_VENDOR_MASK, | ||
| 606 | readl_relaxed(info + DRAM_VENDOR_ERROR)); | ||
| 607 | } | ||
| 608 | |||
| 609 | static int brcmstb_dpfe_resume(struct platform_device *pdev) | ||
| 610 | { | ||
| 611 | struct init_data init; | ||
| 612 | |||
| 613 | return brcmstb_dpfe_download_firmware(pdev, &init); | ||
| 614 | } | ||
| 615 | |||
| 616 | static DEVICE_ATTR(dpfe_info, 0444, show_info, NULL); | ||
| 617 | static DEVICE_ATTR(dpfe_refresh, 0644, show_refresh, store_refresh); | ||
| 618 | static DEVICE_ATTR(dpfe_vendor, 0444, show_vendor, NULL); | ||
| 619 | static struct attribute *dpfe_attrs[] = { | ||
| 620 | &dev_attr_dpfe_info.attr, | ||
| 621 | &dev_attr_dpfe_refresh.attr, | ||
| 622 | &dev_attr_dpfe_vendor.attr, | ||
| 623 | NULL | ||
| 624 | }; | ||
| 625 | ATTRIBUTE_GROUPS(dpfe); | ||
| 626 | |||
| 627 | static int brcmstb_dpfe_probe(struct platform_device *pdev) | ||
| 628 | { | ||
| 629 | struct device *dev = &pdev->dev; | ||
| 630 | struct private_data *priv; | ||
| 631 | struct device *dpfe_dev; | ||
| 632 | struct init_data init; | ||
| 633 | struct resource *res; | ||
| 634 | u32 index; | ||
| 635 | int ret; | ||
| 636 | |||
| 637 | priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); | ||
| 638 | if (!priv) | ||
| 639 | return -ENOMEM; | ||
| 640 | |||
| 641 | mutex_init(&priv->lock); | ||
| 642 | platform_set_drvdata(pdev, priv); | ||
| 643 | |||
| 644 | /* Cell index is optional; default to 0 if not present. */ | ||
| 645 | ret = of_property_read_u32(dev->of_node, "cell-index", &index); | ||
| 646 | if (ret) | ||
| 647 | index = 0; | ||
| 648 | |||
| 649 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dpfe-cpu"); | ||
| 650 | priv->regs = devm_ioremap_resource(dev, res); | ||
| 651 | if (IS_ERR(priv->regs)) { | ||
| 652 | dev_err(dev, "couldn't map DCPU registers\n"); | ||
| 653 | return -ENODEV; | ||
| 654 | } | ||
| 655 | |||
| 656 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dpfe-dmem"); | ||
| 657 | priv->dmem = devm_ioremap_resource(dev, res); | ||
| 658 | if (IS_ERR(priv->dmem)) { | ||
| 659 | dev_err(dev, "Couldn't map DCPU data memory\n"); | ||
| 660 | return -ENOENT; | ||
| 661 | } | ||
| 662 | |||
| 663 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dpfe-imem"); | ||
| 664 | priv->imem = devm_ioremap_resource(dev, res); | ||
| 665 | if (IS_ERR(priv->imem)) { | ||
| 666 | dev_err(dev, "Couldn't map DCPU instruction memory\n"); | ||
| 667 | return -ENOENT; | ||
| 668 | } | ||
| 669 | |||
| 670 | ret = brcmstb_dpfe_download_firmware(pdev, &init); | ||
| 671 | if (ret) | ||
| 672 | goto err; | ||
| 673 | |||
| 674 | dpfe_dev = devm_kzalloc(dev, sizeof(*dpfe_dev), GFP_KERNEL); | ||
| 675 | if (!dpfe_dev) { | ||
| 676 | ret = -ENOMEM; | ||
| 677 | goto err; | ||
| 678 | } | ||
| 679 | |||
| 680 | priv->dev = dpfe_dev; | ||
| 681 | priv->index = index; | ||
| 682 | |||
| 683 | dpfe_dev->parent = dev; | ||
| 684 | dpfe_dev->groups = dpfe_groups; | ||
| 685 | dpfe_dev->of_node = dev->of_node; | ||
| 686 | dev_set_drvdata(dpfe_dev, priv); | ||
| 687 | dev_set_name(dpfe_dev, "dpfe%u", index); | ||
| 688 | |||
| 689 | ret = device_register(dpfe_dev); | ||
| 690 | if (ret) | ||
| 691 | goto err; | ||
| 692 | |||
| 693 | dev_info(dev, "registered.\n"); | ||
| 694 | |||
| 695 | return 0; | ||
| 696 | |||
| 697 | err: | ||
| 698 | dev_err(dev, "failed to initialize -- error %d\n", ret); | ||
| 699 | |||
| 700 | return ret; | ||
| 701 | } | ||
| 702 | |||
| 703 | static const struct of_device_id brcmstb_dpfe_of_match[] = { | ||
| 704 | { .compatible = "brcm,dpfe-cpu", }, | ||
| 705 | {} | ||
| 706 | }; | ||
| 707 | MODULE_DEVICE_TABLE(of, brcmstb_dpfe_of_match); | ||
| 708 | |||
| 709 | static struct platform_driver brcmstb_dpfe_driver = { | ||
| 710 | .driver = { | ||
| 711 | .name = DRVNAME, | ||
| 712 | .of_match_table = brcmstb_dpfe_of_match, | ||
| 713 | }, | ||
| 714 | .probe = brcmstb_dpfe_probe, | ||
| 715 | .resume = brcmstb_dpfe_resume, | ||
| 716 | }; | ||
| 717 | |||
| 718 | module_platform_driver(brcmstb_dpfe_driver); | ||
| 719 | |||
| 720 | MODULE_AUTHOR("Markus Mayer <mmayer@broadcom.com>"); | ||
| 721 | MODULE_DESCRIPTION("BRCMSTB DDR PHY Front End Driver"); | ||
| 722 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c index 7059bbda2fac..a385a35c7de9 100644 --- a/drivers/memory/omap-gpmc.c +++ b/drivers/memory/omap-gpmc.c | |||
| @@ -1075,11 +1075,33 @@ int gpmc_configure(int cmd, int wval) | |||
| 1075 | } | 1075 | } |
| 1076 | EXPORT_SYMBOL(gpmc_configure); | 1076 | EXPORT_SYMBOL(gpmc_configure); |
| 1077 | 1077 | ||
| 1078 | void gpmc_update_nand_reg(struct gpmc_nand_regs *reg, int cs) | 1078 | static bool gpmc_nand_writebuffer_empty(void) |
| 1079 | { | ||
| 1080 | if (gpmc_read_reg(GPMC_STATUS) & GPMC_STATUS_EMPTYWRITEBUFFERSTATUS) | ||
| 1081 | return true; | ||
| 1082 | |||
| 1083 | return false; | ||
| 1084 | } | ||
| 1085 | |||
| 1086 | static struct gpmc_nand_ops nand_ops = { | ||
| 1087 | .nand_writebuffer_empty = gpmc_nand_writebuffer_empty, | ||
| 1088 | }; | ||
| 1089 | |||
| 1090 | /** | ||
| 1091 | * gpmc_omap_get_nand_ops - Get the GPMC NAND interface | ||
| 1092 | * @regs: the GPMC NAND register map exclusive for NAND use. | ||
| 1093 | * @cs: GPMC chip select number on which the NAND sits. The | ||
| 1094 | * register map returned will be specific to this chip select. | ||
| 1095 | * | ||
| 1096 | * Returns NULL on error e.g. invalid cs. | ||
| 1097 | */ | ||
| 1098 | struct gpmc_nand_ops *gpmc_omap_get_nand_ops(struct gpmc_nand_regs *reg, int cs) | ||
| 1079 | { | 1099 | { |
| 1080 | int i; | 1100 | int i; |
| 1081 | 1101 | ||
| 1082 | reg->gpmc_status = NULL; /* deprecated */ | 1102 | if (cs >= gpmc_cs_num) |
| 1103 | return NULL; | ||
| 1104 | |||
| 1083 | reg->gpmc_nand_command = gpmc_base + GPMC_CS0_OFFSET + | 1105 | reg->gpmc_nand_command = gpmc_base + GPMC_CS0_OFFSET + |
| 1084 | GPMC_CS_NAND_COMMAND + GPMC_CS_SIZE * cs; | 1106 | GPMC_CS_NAND_COMMAND + GPMC_CS_SIZE * cs; |
| 1085 | reg->gpmc_nand_address = gpmc_base + GPMC_CS0_OFFSET + | 1107 | reg->gpmc_nand_address = gpmc_base + GPMC_CS0_OFFSET + |
| @@ -1111,34 +1133,6 @@ void gpmc_update_nand_reg(struct gpmc_nand_regs *reg, int cs) | |||
| 1111 | reg->gpmc_bch_result6[i] = gpmc_base + GPMC_ECC_BCH_RESULT_6 + | 1133 | reg->gpmc_bch_result6[i] = gpmc_base + GPMC_ECC_BCH_RESULT_6 + |
| 1112 | i * GPMC_BCH_SIZE; | 1134 | i * GPMC_BCH_SIZE; |
| 1113 | } | 1135 | } |
| 1114 | } | ||
| 1115 | |||
| 1116 | static bool gpmc_nand_writebuffer_empty(void) | ||
| 1117 | { | ||
| 1118 | if (gpmc_read_reg(GPMC_STATUS) & GPMC_STATUS_EMPTYWRITEBUFFERSTATUS) | ||
| 1119 | return true; | ||
| 1120 | |||
| 1121 | return false; | ||
| 1122 | } | ||
| 1123 | |||
| 1124 | static struct gpmc_nand_ops nand_ops = { | ||
| 1125 | .nand_writebuffer_empty = gpmc_nand_writebuffer_empty, | ||
| 1126 | }; | ||
| 1127 | |||
| 1128 | /** | ||
| 1129 | * gpmc_omap_get_nand_ops - Get the GPMC NAND interface | ||
| 1130 | * @regs: the GPMC NAND register map exclusive for NAND use. | ||
| 1131 | * @cs: GPMC chip select number on which the NAND sits. The | ||
| 1132 | * register map returned will be specific to this chip select. | ||
| 1133 | * | ||
| 1134 | * Returns NULL on error e.g. invalid cs. | ||
| 1135 | */ | ||
| 1136 | struct gpmc_nand_ops *gpmc_omap_get_nand_ops(struct gpmc_nand_regs *reg, int cs) | ||
| 1137 | { | ||
| 1138 | if (cs >= gpmc_cs_num) | ||
| 1139 | return NULL; | ||
| 1140 | |||
| 1141 | gpmc_update_nand_reg(reg, cs); | ||
| 1142 | 1136 | ||
| 1143 | return &nand_ops; | 1137 | return &nand_ops; |
| 1144 | } | 1138 | } |
diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c index 32771c2ced7b..22b75c82e377 100644 --- a/drivers/of/of_reserved_mem.c +++ b/drivers/of/of_reserved_mem.c | |||
| @@ -397,3 +397,29 @@ void of_reserved_mem_device_release(struct device *dev) | |||
| 397 | rmem->ops->device_release(rmem, dev); | 397 | rmem->ops->device_release(rmem, dev); |
| 398 | } | 398 | } |
| 399 | EXPORT_SYMBOL_GPL(of_reserved_mem_device_release); | 399 | EXPORT_SYMBOL_GPL(of_reserved_mem_device_release); |
| 400 | |||
| 401 | /** | ||
| 402 | * of_reserved_mem_lookup() - acquire reserved_mem from a device node | ||
| 403 | * @np: node pointer of the desired reserved-memory region | ||
| 404 | * | ||
| 405 | * This function allows drivers to acquire a reference to the reserved_mem | ||
| 406 | * struct based on a device node handle. | ||
| 407 | * | ||
| 408 | * Returns a reserved_mem reference, or NULL on error. | ||
| 409 | */ | ||
| 410 | struct reserved_mem *of_reserved_mem_lookup(struct device_node *np) | ||
| 411 | { | ||
| 412 | const char *name; | ||
| 413 | int i; | ||
| 414 | |||
| 415 | if (!np->full_name) | ||
| 416 | return NULL; | ||
| 417 | |||
| 418 | name = kbasename(np->full_name); | ||
| 419 | for (i = 0; i < reserved_mem_count; i++) | ||
| 420 | if (!strcmp(reserved_mem[i].name, name)) | ||
| 421 | return &reserved_mem[i]; | ||
| 422 | |||
| 423 | return NULL; | ||
| 424 | } | ||
| 425 | EXPORT_SYMBOL_GPL(of_reserved_mem_lookup); | ||
diff --git a/drivers/of/platform.c b/drivers/of/platform.c index ac15d0e3d27d..b7cf84b29737 100644 --- a/drivers/of/platform.c +++ b/drivers/of/platform.c | |||
| @@ -497,6 +497,12 @@ int of_platform_default_populate(struct device_node *root, | |||
| 497 | EXPORT_SYMBOL_GPL(of_platform_default_populate); | 497 | EXPORT_SYMBOL_GPL(of_platform_default_populate); |
| 498 | 498 | ||
| 499 | #ifndef CONFIG_PPC | 499 | #ifndef CONFIG_PPC |
| 500 | static const struct of_device_id reserved_mem_matches[] = { | ||
| 501 | { .compatible = "qcom,rmtfs-mem" }, | ||
| 502 | { .compatible = "ramoops" }, | ||
| 503 | {} | ||
| 504 | }; | ||
| 505 | |||
| 500 | static int __init of_platform_default_populate_init(void) | 506 | static int __init of_platform_default_populate_init(void) |
| 501 | { | 507 | { |
| 502 | struct device_node *node; | 508 | struct device_node *node; |
| @@ -505,15 +511,12 @@ static int __init of_platform_default_populate_init(void) | |||
| 505 | return -ENODEV; | 511 | return -ENODEV; |
| 506 | 512 | ||
| 507 | /* | 513 | /* |
| 508 | * Handle ramoops explicitly, since it is inside /reserved-memory, | 514 | * Handle certain compatibles explicitly, since we don't want to create |
| 509 | * which lacks a "compatible" property. | 515 | * platform_devices for every node in /reserved-memory with a |
| 516 | * "compatible", | ||
| 510 | */ | 517 | */ |
| 511 | node = of_find_node_by_path("/reserved-memory"); | 518 | for_each_matching_node(node, reserved_mem_matches) |
| 512 | if (node) { | 519 | of_platform_device_create(node, NULL, NULL); |
| 513 | node = of_find_compatible_node(node, NULL, "ramoops"); | ||
| 514 | if (node) | ||
| 515 | of_platform_device_create(node, NULL, NULL); | ||
| 516 | } | ||
| 517 | 520 | ||
| 518 | /* Populate everything else. */ | 521 | /* Populate everything else. */ |
| 519 | of_platform_default_populate(NULL, NULL, NULL); | 522 | of_platform_default_populate(NULL, NULL, NULL); |
diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig index e2baecbb9dd3..7fc77696bb1e 100644 --- a/drivers/reset/Kconfig +++ b/drivers/reset/Kconfig | |||
| @@ -28,6 +28,12 @@ config RESET_ATH79 | |||
| 28 | This enables the ATH79 reset controller driver that supports the | 28 | This enables the ATH79 reset controller driver that supports the |
| 29 | AR71xx SoC reset controller. | 29 | AR71xx SoC reset controller. |
| 30 | 30 | ||
| 31 | config RESET_AXS10X | ||
| 32 | bool "AXS10x Reset Driver" if COMPILE_TEST | ||
| 33 | default ARC_PLAT_AXS10X | ||
| 34 | help | ||
| 35 | This enables the reset controller driver for AXS10x. | ||
| 36 | |||
| 31 | config RESET_BERLIN | 37 | config RESET_BERLIN |
| 32 | bool "Berlin Reset Driver" if COMPILE_TEST | 38 | bool "Berlin Reset Driver" if COMPILE_TEST |
| 33 | default ARCH_BERLIN | 39 | default ARCH_BERLIN |
| @@ -75,21 +81,21 @@ config RESET_PISTACHIO | |||
| 75 | help | 81 | help |
| 76 | This enables the reset driver for ImgTec Pistachio SoCs. | 82 | This enables the reset driver for ImgTec Pistachio SoCs. |
| 77 | 83 | ||
| 78 | config RESET_SOCFPGA | 84 | config RESET_SIMPLE |
| 79 | bool "SoCFPGA Reset Driver" if COMPILE_TEST | 85 | bool "Simple Reset Controller Driver" if COMPILE_TEST |
| 80 | default ARCH_SOCFPGA | 86 | default ARCH_SOCFPGA || ARCH_STM32 || ARCH_STRATIX10 || ARCH_SUNXI || ARCH_ZX |
| 81 | help | 87 | help |
| 82 | This enables the reset controller driver for Altera SoCFPGAs. | 88 | This enables a simple reset controller driver for reset lines that |
| 89 | that can be asserted and deasserted by toggling bits in a contiguous, | ||
| 90 | exclusive register space. | ||
| 83 | 91 | ||
| 84 | config RESET_STM32 | 92 | Currently this driver supports Altera SoCFPGAs, the RCC reset |
| 85 | bool "STM32 Reset Driver" if COMPILE_TEST | 93 | controller in STM32 MCUs, Allwinner SoCs, and ZTE's zx2967 family. |
| 86 | default ARCH_STM32 | ||
| 87 | help | ||
| 88 | This enables the RCC reset controller driver for STM32 MCUs. | ||
| 89 | 94 | ||
| 90 | config RESET_SUNXI | 95 | config RESET_SUNXI |
| 91 | bool "Allwinner SoCs Reset Driver" if COMPILE_TEST && !ARCH_SUNXI | 96 | bool "Allwinner SoCs Reset Driver" if COMPILE_TEST && !ARCH_SUNXI |
| 92 | default ARCH_SUNXI | 97 | default ARCH_SUNXI |
| 98 | select RESET_SIMPLE | ||
| 93 | help | 99 | help |
| 94 | This enables the reset driver for Allwinner SoCs. | 100 | This enables the reset driver for Allwinner SoCs. |
| 95 | 101 | ||
| @@ -121,12 +127,6 @@ config RESET_UNIPHIER | |||
| 121 | Say Y if you want to control reset signals provided by System Control | 127 | Say Y if you want to control reset signals provided by System Control |
| 122 | block, Media I/O block, Peripheral Block. | 128 | block, Media I/O block, Peripheral Block. |
| 123 | 129 | ||
| 124 | config RESET_ZX2967 | ||
| 125 | bool "ZTE ZX2967 Reset Driver" | ||
| 126 | depends on ARCH_ZX || COMPILE_TEST | ||
| 127 | help | ||
| 128 | This enables the reset controller driver for ZTE's zx2967 family. | ||
| 129 | |||
| 130 | config RESET_ZYNQ | 130 | config RESET_ZYNQ |
| 131 | bool "ZYNQ Reset Driver" if COMPILE_TEST | 131 | bool "ZYNQ Reset Driver" if COMPILE_TEST |
| 132 | default ARCH_ZYNQ | 132 | default ARCH_ZYNQ |
diff --git a/drivers/reset/Makefile b/drivers/reset/Makefile index c1fd702ac57c..132c24f5ddb5 100644 --- a/drivers/reset/Makefile +++ b/drivers/reset/Makefile | |||
| @@ -5,6 +5,7 @@ obj-$(CONFIG_ARCH_STI) += sti/ | |||
| 5 | obj-$(CONFIG_ARCH_TEGRA) += tegra/ | 5 | obj-$(CONFIG_ARCH_TEGRA) += tegra/ |
| 6 | obj-$(CONFIG_RESET_A10SR) += reset-a10sr.o | 6 | obj-$(CONFIG_RESET_A10SR) += reset-a10sr.o |
| 7 | obj-$(CONFIG_RESET_ATH79) += reset-ath79.o | 7 | obj-$(CONFIG_RESET_ATH79) += reset-ath79.o |
| 8 | obj-$(CONFIG_RESET_AXS10X) += reset-axs10x.o | ||
| 8 | obj-$(CONFIG_RESET_BERLIN) += reset-berlin.o | 9 | obj-$(CONFIG_RESET_BERLIN) += reset-berlin.o |
| 9 | obj-$(CONFIG_RESET_HSDK) += reset-hsdk.o | 10 | obj-$(CONFIG_RESET_HSDK) += reset-hsdk.o |
| 10 | obj-$(CONFIG_RESET_IMX7) += reset-imx7.o | 11 | obj-$(CONFIG_RESET_IMX7) += reset-imx7.o |
| @@ -13,12 +14,10 @@ obj-$(CONFIG_RESET_LPC18XX) += reset-lpc18xx.o | |||
| 13 | obj-$(CONFIG_RESET_MESON) += reset-meson.o | 14 | obj-$(CONFIG_RESET_MESON) += reset-meson.o |
| 14 | obj-$(CONFIG_RESET_OXNAS) += reset-oxnas.o | 15 | obj-$(CONFIG_RESET_OXNAS) += reset-oxnas.o |
| 15 | obj-$(CONFIG_RESET_PISTACHIO) += reset-pistachio.o | 16 | obj-$(CONFIG_RESET_PISTACHIO) += reset-pistachio.o |
| 16 | obj-$(CONFIG_RESET_SOCFPGA) += reset-socfpga.o | 17 | obj-$(CONFIG_RESET_SIMPLE) += reset-simple.o |
| 17 | obj-$(CONFIG_RESET_STM32) += reset-stm32.o | ||
| 18 | obj-$(CONFIG_RESET_SUNXI) += reset-sunxi.o | 18 | obj-$(CONFIG_RESET_SUNXI) += reset-sunxi.o |
| 19 | obj-$(CONFIG_RESET_TI_SCI) += reset-ti-sci.o | 19 | obj-$(CONFIG_RESET_TI_SCI) += reset-ti-sci.o |
| 20 | obj-$(CONFIG_RESET_TI_SYSCON) += reset-ti-syscon.o | 20 | obj-$(CONFIG_RESET_TI_SYSCON) += reset-ti-syscon.o |
| 21 | obj-$(CONFIG_RESET_UNIPHIER) += reset-uniphier.o | 21 | obj-$(CONFIG_RESET_UNIPHIER) += reset-uniphier.o |
| 22 | obj-$(CONFIG_RESET_ZX2967) += reset-zx2967.o | ||
| 23 | obj-$(CONFIG_RESET_ZYNQ) += reset-zynq.o | 22 | obj-$(CONFIG_RESET_ZYNQ) += reset-zynq.o |
| 24 | 23 | ||
diff --git a/drivers/reset/reset-axs10x.c b/drivers/reset/reset-axs10x.c new file mode 100644 index 000000000000..afb298e46bd9 --- /dev/null +++ b/drivers/reset/reset-axs10x.c | |||
| @@ -0,0 +1,83 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2017 Synopsys. | ||
| 3 | * | ||
| 4 | * Synopsys AXS10x reset driver. | ||
| 5 | * | ||
| 6 | * This file is licensed under the terms of the GNU General Public | ||
| 7 | * License version 2. This program is licensed "as is" without any | ||
| 8 | * warranty of any kind, whether express or implied. | ||
| 9 | */ | ||
| 10 | |||
| 11 | #include <linux/io.h> | ||
| 12 | #include <linux/module.h> | ||
| 13 | #include <linux/platform_device.h> | ||
| 14 | #include <linux/reset-controller.h> | ||
| 15 | |||
| 16 | #define to_axs10x_rst(p) container_of((p), struct axs10x_rst, rcdev) | ||
| 17 | |||
| 18 | #define AXS10X_MAX_RESETS 32 | ||
| 19 | |||
| 20 | struct axs10x_rst { | ||
| 21 | void __iomem *regs_rst; | ||
| 22 | spinlock_t lock; | ||
| 23 | struct reset_controller_dev rcdev; | ||
| 24 | }; | ||
| 25 | |||
| 26 | static int axs10x_reset_reset(struct reset_controller_dev *rcdev, | ||
| 27 | unsigned long id) | ||
| 28 | { | ||
| 29 | struct axs10x_rst *rst = to_axs10x_rst(rcdev); | ||
| 30 | unsigned long flags; | ||
| 31 | |||
| 32 | spin_lock_irqsave(&rst->lock, flags); | ||
| 33 | writel(BIT(id), rst->regs_rst); | ||
| 34 | spin_unlock_irqrestore(&rst->lock, flags); | ||
| 35 | |||
| 36 | return 0; | ||
| 37 | } | ||
| 38 | |||
| 39 | static const struct reset_control_ops axs10x_reset_ops = { | ||
| 40 | .reset = axs10x_reset_reset, | ||
| 41 | }; | ||
| 42 | |||
| 43 | static int axs10x_reset_probe(struct platform_device *pdev) | ||
| 44 | { | ||
| 45 | struct axs10x_rst *rst; | ||
| 46 | struct resource *mem; | ||
| 47 | |||
| 48 | rst = devm_kzalloc(&pdev->dev, sizeof(*rst), GFP_KERNEL); | ||
| 49 | if (!rst) | ||
| 50 | return -ENOMEM; | ||
| 51 | |||
| 52 | mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
| 53 | rst->regs_rst = devm_ioremap_resource(&pdev->dev, mem); | ||
| 54 | if (IS_ERR(rst->regs_rst)) | ||
| 55 | return PTR_ERR(rst->regs_rst); | ||
| 56 | |||
| 57 | spin_lock_init(&rst->lock); | ||
| 58 | |||
| 59 | rst->rcdev.owner = THIS_MODULE; | ||
| 60 | rst->rcdev.ops = &axs10x_reset_ops; | ||
| 61 | rst->rcdev.of_node = pdev->dev.of_node; | ||
| 62 | rst->rcdev.nr_resets = AXS10X_MAX_RESETS; | ||
| 63 | |||
| 64 | return devm_reset_controller_register(&pdev->dev, &rst->rcdev); | ||
| 65 | } | ||
| 66 | |||
| 67 | static const struct of_device_id axs10x_reset_dt_match[] = { | ||
| 68 | { .compatible = "snps,axs10x-reset" }, | ||
| 69 | { }, | ||
| 70 | }; | ||
| 71 | |||
| 72 | static struct platform_driver axs10x_reset_driver = { | ||
| 73 | .probe = axs10x_reset_probe, | ||
| 74 | .driver = { | ||
| 75 | .name = "axs10x-reset", | ||
| 76 | .of_match_table = axs10x_reset_dt_match, | ||
| 77 | }, | ||
| 78 | }; | ||
| 79 | builtin_platform_driver(axs10x_reset_driver); | ||
| 80 | |||
| 81 | MODULE_AUTHOR("Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>"); | ||
| 82 | MODULE_DESCRIPTION("Synopsys AXS10x reset driver"); | ||
| 83 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/reset/reset-meson.c b/drivers/reset/reset-meson.c index a8b915eb8b58..c419a3753d00 100644 --- a/drivers/reset/reset-meson.c +++ b/drivers/reset/reset-meson.c | |||
| @@ -62,13 +62,16 @@ | |||
| 62 | #include <linux/reset-controller.h> | 62 | #include <linux/reset-controller.h> |
| 63 | #include <linux/slab.h> | 63 | #include <linux/slab.h> |
| 64 | #include <linux/types.h> | 64 | #include <linux/types.h> |
| 65 | #include <linux/of_device.h> | ||
| 65 | 66 | ||
| 66 | #define REG_COUNT 8 | 67 | #define REG_COUNT 8 |
| 67 | #define BITS_PER_REG 32 | 68 | #define BITS_PER_REG 32 |
| 69 | #define LEVEL_OFFSET 0x7c | ||
| 68 | 70 | ||
| 69 | struct meson_reset { | 71 | struct meson_reset { |
| 70 | void __iomem *reg_base; | 72 | void __iomem *reg_base; |
| 71 | struct reset_controller_dev rcdev; | 73 | struct reset_controller_dev rcdev; |
| 74 | spinlock_t lock; | ||
| 72 | }; | 75 | }; |
| 73 | 76 | ||
| 74 | static int meson_reset_reset(struct reset_controller_dev *rcdev, | 77 | static int meson_reset_reset(struct reset_controller_dev *rcdev, |
| @@ -80,26 +83,68 @@ static int meson_reset_reset(struct reset_controller_dev *rcdev, | |||
| 80 | unsigned int offset = id % BITS_PER_REG; | 83 | unsigned int offset = id % BITS_PER_REG; |
| 81 | void __iomem *reg_addr = data->reg_base + (bank << 2); | 84 | void __iomem *reg_addr = data->reg_base + (bank << 2); |
| 82 | 85 | ||
| 83 | if (bank >= REG_COUNT) | ||
| 84 | return -EINVAL; | ||
| 85 | |||
| 86 | writel(BIT(offset), reg_addr); | 86 | writel(BIT(offset), reg_addr); |
| 87 | 87 | ||
| 88 | return 0; | 88 | return 0; |
| 89 | } | 89 | } |
| 90 | 90 | ||
| 91 | static const struct reset_control_ops meson_reset_ops = { | 91 | static int meson_reset_level(struct reset_controller_dev *rcdev, |
| 92 | unsigned long id, bool assert) | ||
| 93 | { | ||
| 94 | struct meson_reset *data = | ||
| 95 | container_of(rcdev, struct meson_reset, rcdev); | ||
| 96 | unsigned int bank = id / BITS_PER_REG; | ||
| 97 | unsigned int offset = id % BITS_PER_REG; | ||
| 98 | void __iomem *reg_addr = data->reg_base + LEVEL_OFFSET + (bank << 2); | ||
| 99 | unsigned long flags; | ||
| 100 | u32 reg; | ||
| 101 | |||
| 102 | spin_lock_irqsave(&data->lock, flags); | ||
| 103 | |||
| 104 | reg = readl(reg_addr); | ||
| 105 | if (assert) | ||
| 106 | writel(reg & ~BIT(offset), reg_addr); | ||
| 107 | else | ||
| 108 | writel(reg | BIT(offset), reg_addr); | ||
| 109 | |||
| 110 | spin_unlock_irqrestore(&data->lock, flags); | ||
| 111 | |||
| 112 | return 0; | ||
| 113 | } | ||
| 114 | |||
| 115 | static int meson_reset_assert(struct reset_controller_dev *rcdev, | ||
| 116 | unsigned long id) | ||
| 117 | { | ||
| 118 | return meson_reset_level(rcdev, id, true); | ||
| 119 | } | ||
| 120 | |||
| 121 | static int meson_reset_deassert(struct reset_controller_dev *rcdev, | ||
| 122 | unsigned long id) | ||
| 123 | { | ||
| 124 | return meson_reset_level(rcdev, id, false); | ||
| 125 | } | ||
| 126 | |||
| 127 | static const struct reset_control_ops meson_reset_meson8_ops = { | ||
| 92 | .reset = meson_reset_reset, | 128 | .reset = meson_reset_reset, |
| 93 | }; | 129 | }; |
| 94 | 130 | ||
| 131 | static const struct reset_control_ops meson_reset_gx_ops = { | ||
| 132 | .reset = meson_reset_reset, | ||
| 133 | .assert = meson_reset_assert, | ||
| 134 | .deassert = meson_reset_deassert, | ||
| 135 | }; | ||
| 136 | |||
| 95 | static const struct of_device_id meson_reset_dt_ids[] = { | 137 | static const struct of_device_id meson_reset_dt_ids[] = { |
| 96 | { .compatible = "amlogic,meson8b-reset", }, | 138 | { .compatible = "amlogic,meson8b-reset", |
| 97 | { .compatible = "amlogic,meson-gxbb-reset", }, | 139 | .data = &meson_reset_meson8_ops, }, |
| 140 | { .compatible = "amlogic,meson-gxbb-reset", | ||
| 141 | .data = &meson_reset_gx_ops, }, | ||
| 98 | { /* sentinel */ }, | 142 | { /* sentinel */ }, |
| 99 | }; | 143 | }; |
| 100 | 144 | ||
| 101 | static int meson_reset_probe(struct platform_device *pdev) | 145 | static int meson_reset_probe(struct platform_device *pdev) |
| 102 | { | 146 | { |
| 147 | const struct reset_control_ops *ops; | ||
| 103 | struct meson_reset *data; | 148 | struct meson_reset *data; |
| 104 | struct resource *res; | 149 | struct resource *res; |
| 105 | 150 | ||
| @@ -107,6 +152,10 @@ static int meson_reset_probe(struct platform_device *pdev) | |||
| 107 | if (!data) | 152 | if (!data) |
| 108 | return -ENOMEM; | 153 | return -ENOMEM; |
| 109 | 154 | ||
| 155 | ops = of_device_get_match_data(&pdev->dev); | ||
| 156 | if (!ops) | ||
| 157 | return -EINVAL; | ||
| 158 | |||
| 110 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 159 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 111 | data->reg_base = devm_ioremap_resource(&pdev->dev, res); | 160 | data->reg_base = devm_ioremap_resource(&pdev->dev, res); |
| 112 | if (IS_ERR(data->reg_base)) | 161 | if (IS_ERR(data->reg_base)) |
| @@ -114,9 +163,11 @@ static int meson_reset_probe(struct platform_device *pdev) | |||
| 114 | 163 | ||
| 115 | platform_set_drvdata(pdev, data); | 164 | platform_set_drvdata(pdev, data); |
| 116 | 165 | ||
| 166 | spin_lock_init(&data->lock); | ||
| 167 | |||
| 117 | data->rcdev.owner = THIS_MODULE; | 168 | data->rcdev.owner = THIS_MODULE; |
| 118 | data->rcdev.nr_resets = REG_COUNT * BITS_PER_REG; | 169 | data->rcdev.nr_resets = REG_COUNT * BITS_PER_REG; |
| 119 | data->rcdev.ops = &meson_reset_ops; | 170 | data->rcdev.ops = ops; |
| 120 | data->rcdev.of_node = pdev->dev.of_node; | 171 | data->rcdev.of_node = pdev->dev.of_node; |
| 121 | 172 | ||
| 122 | return devm_reset_controller_register(&pdev->dev, &data->rcdev); | 173 | return devm_reset_controller_register(&pdev->dev, &data->rcdev); |
diff --git a/drivers/reset/reset-simple.c b/drivers/reset/reset-simple.c new file mode 100644 index 000000000000..2d4f362ef025 --- /dev/null +++ b/drivers/reset/reset-simple.c | |||
| @@ -0,0 +1,186 @@ | |||
| 1 | /* | ||
| 2 | * Simple Reset Controller Driver | ||
| 3 | * | ||
| 4 | * Copyright (C) 2017 Pengutronix, Philipp Zabel <kernel@pengutronix.de> | ||
| 5 | * | ||
| 6 | * Based on Allwinner SoCs Reset Controller driver | ||
| 7 | * | ||
| 8 | * Copyright 2013 Maxime Ripard | ||
| 9 | * | ||
| 10 | * Maxime Ripard <maxime.ripard@free-electrons.com> | ||
| 11 | * | ||
| 12 | * This program is free software; you can redistribute it and/or modify | ||
| 13 | * it under the terms of the GNU General Public License as published by | ||
| 14 | * the Free Software Foundation; either version 2 of the License, or | ||
| 15 | * (at your option) any later version. | ||
| 16 | */ | ||
| 17 | |||
| 18 | #include <linux/device.h> | ||
| 19 | #include <linux/err.h> | ||
| 20 | #include <linux/io.h> | ||
| 21 | #include <linux/of.h> | ||
| 22 | #include <linux/of_device.h> | ||
| 23 | #include <linux/platform_device.h> | ||
| 24 | #include <linux/reset-controller.h> | ||
| 25 | #include <linux/spinlock.h> | ||
| 26 | |||
| 27 | #include "reset-simple.h" | ||
| 28 | |||
| 29 | static inline struct reset_simple_data * | ||
| 30 | to_reset_simple_data(struct reset_controller_dev *rcdev) | ||
| 31 | { | ||
| 32 | return container_of(rcdev, struct reset_simple_data, rcdev); | ||
| 33 | } | ||
| 34 | |||
| 35 | static int reset_simple_update(struct reset_controller_dev *rcdev, | ||
| 36 | unsigned long id, bool assert) | ||
| 37 | { | ||
| 38 | struct reset_simple_data *data = to_reset_simple_data(rcdev); | ||
| 39 | int reg_width = sizeof(u32); | ||
| 40 | int bank = id / (reg_width * BITS_PER_BYTE); | ||
| 41 | int offset = id % (reg_width * BITS_PER_BYTE); | ||
| 42 | unsigned long flags; | ||
| 43 | u32 reg; | ||
| 44 | |||
| 45 | spin_lock_irqsave(&data->lock, flags); | ||
| 46 | |||
| 47 | reg = readl(data->membase + (bank * reg_width)); | ||
| 48 | if (assert ^ data->active_low) | ||
| 49 | reg |= BIT(offset); | ||
| 50 | else | ||
| 51 | reg &= ~BIT(offset); | ||
| 52 | writel(reg, data->membase + (bank * reg_width)); | ||
| 53 | |||
| 54 | spin_unlock_irqrestore(&data->lock, flags); | ||
| 55 | |||
| 56 | return 0; | ||
| 57 | } | ||
| 58 | |||
| 59 | static int reset_simple_assert(struct reset_controller_dev *rcdev, | ||
| 60 | unsigned long id) | ||
| 61 | { | ||
| 62 | return reset_simple_update(rcdev, id, true); | ||
| 63 | } | ||
| 64 | |||
| 65 | static int reset_simple_deassert(struct reset_controller_dev *rcdev, | ||
| 66 | unsigned long id) | ||
| 67 | { | ||
| 68 | return reset_simple_update(rcdev, id, false); | ||
| 69 | } | ||
| 70 | |||
| 71 | static int reset_simple_status(struct reset_controller_dev *rcdev, | ||
| 72 | unsigned long id) | ||
| 73 | { | ||
| 74 | struct reset_simple_data *data = to_reset_simple_data(rcdev); | ||
| 75 | int reg_width = sizeof(u32); | ||
| 76 | int bank = id / (reg_width * BITS_PER_BYTE); | ||
| 77 | int offset = id % (reg_width * BITS_PER_BYTE); | ||
| 78 | u32 reg; | ||
| 79 | |||
| 80 | reg = readl(data->membase + (bank * reg_width)); | ||
| 81 | |||
| 82 | return !(reg & BIT(offset)) ^ !data->status_active_low; | ||
| 83 | } | ||
| 84 | |||
| 85 | const struct reset_control_ops reset_simple_ops = { | ||
| 86 | .assert = reset_simple_assert, | ||
| 87 | .deassert = reset_simple_deassert, | ||
| 88 | .status = reset_simple_status, | ||
| 89 | }; | ||
| 90 | |||
| 91 | /** | ||
| 92 | * struct reset_simple_devdata - simple reset controller properties | ||
| 93 | * @reg_offset: offset between base address and first reset register. | ||
| 94 | * @nr_resets: number of resets. If not set, default to resource size in bits. | ||
| 95 | * @active_low: if true, bits are cleared to assert the reset. Otherwise, bits | ||
| 96 | * are set to assert the reset. | ||
| 97 | * @status_active_low: if true, bits read back as cleared while the reset is | ||
| 98 | * asserted. Otherwise, bits read back as set while the | ||
| 99 | * reset is asserted. | ||
| 100 | */ | ||
| 101 | struct reset_simple_devdata { | ||
| 102 | u32 reg_offset; | ||
| 103 | u32 nr_resets; | ||
| 104 | bool active_low; | ||
| 105 | bool status_active_low; | ||
| 106 | }; | ||
| 107 | |||
| 108 | #define SOCFPGA_NR_BANKS 8 | ||
| 109 | |||
| 110 | static const struct reset_simple_devdata reset_simple_socfpga = { | ||
| 111 | .reg_offset = 0x10, | ||
| 112 | .nr_resets = SOCFPGA_NR_BANKS * 32, | ||
| 113 | .status_active_low = true, | ||
| 114 | }; | ||
| 115 | |||
| 116 | static const struct reset_simple_devdata reset_simple_active_low = { | ||
| 117 | .active_low = true, | ||
| 118 | .status_active_low = true, | ||
| 119 | }; | ||
| 120 | |||
| 121 | static const struct of_device_id reset_simple_dt_ids[] = { | ||
| 122 | { .compatible = "altr,rst-mgr", .data = &reset_simple_socfpga }, | ||
| 123 | { .compatible = "st,stm32-rcc", }, | ||
| 124 | { .compatible = "allwinner,sun6i-a31-clock-reset", | ||
| 125 | .data = &reset_simple_active_low }, | ||
| 126 | { .compatible = "zte,zx296718-reset", | ||
| 127 | .data = &reset_simple_active_low }, | ||
| 128 | { /* sentinel */ }, | ||
| 129 | }; | ||
| 130 | |||
| 131 | static int reset_simple_probe(struct platform_device *pdev) | ||
| 132 | { | ||
| 133 | struct device *dev = &pdev->dev; | ||
| 134 | const struct reset_simple_devdata *devdata; | ||
| 135 | struct reset_simple_data *data; | ||
| 136 | void __iomem *membase; | ||
| 137 | struct resource *res; | ||
| 138 | u32 reg_offset = 0; | ||
| 139 | |||
| 140 | devdata = of_device_get_match_data(dev); | ||
| 141 | |||
| 142 | data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); | ||
| 143 | if (!data) | ||
| 144 | return -ENOMEM; | ||
| 145 | |||
| 146 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
| 147 | membase = devm_ioremap_resource(dev, res); | ||
| 148 | if (IS_ERR(membase)) | ||
| 149 | return PTR_ERR(membase); | ||
| 150 | |||
| 151 | spin_lock_init(&data->lock); | ||
| 152 | data->membase = membase; | ||
| 153 | data->rcdev.owner = THIS_MODULE; | ||
| 154 | data->rcdev.nr_resets = resource_size(res) * BITS_PER_BYTE; | ||
| 155 | data->rcdev.ops = &reset_simple_ops; | ||
| 156 | data->rcdev.of_node = dev->of_node; | ||
| 157 | |||
| 158 | if (devdata) { | ||
| 159 | reg_offset = devdata->reg_offset; | ||
| 160 | if (devdata->nr_resets) | ||
| 161 | data->rcdev.nr_resets = devdata->nr_resets; | ||
| 162 | data->active_low = devdata->active_low; | ||
| 163 | data->status_active_low = devdata->status_active_low; | ||
| 164 | } | ||
| 165 | |||
| 166 | if (of_device_is_compatible(dev->of_node, "altr,rst-mgr") && | ||
| 167 | of_property_read_u32(dev->of_node, "altr,modrst-offset", | ||
| 168 | ®_offset)) { | ||
| 169 | dev_warn(dev, | ||
| 170 | "missing altr,modrst-offset property, assuming 0x%x!\n", | ||
| 171 | reg_offset); | ||
| 172 | } | ||
| 173 | |||
| 174 | data->membase += reg_offset; | ||
| 175 | |||
| 176 | return devm_reset_controller_register(dev, &data->rcdev); | ||
| 177 | } | ||
| 178 | |||
| 179 | static struct platform_driver reset_simple_driver = { | ||
| 180 | .probe = reset_simple_probe, | ||
| 181 | .driver = { | ||
| 182 | .name = "simple-reset", | ||
| 183 | .of_match_table = reset_simple_dt_ids, | ||
| 184 | }, | ||
| 185 | }; | ||
| 186 | builtin_platform_driver(reset_simple_driver); | ||
diff --git a/drivers/reset/reset-simple.h b/drivers/reset/reset-simple.h new file mode 100644 index 000000000000..8a496022baef --- /dev/null +++ b/drivers/reset/reset-simple.h | |||
| @@ -0,0 +1,45 @@ | |||
| 1 | /* | ||
| 2 | * Simple Reset Controller ops | ||
| 3 | * | ||
| 4 | * Based on Allwinner SoCs Reset Controller driver | ||
| 5 | * | ||
| 6 | * Copyright 2013 Maxime Ripard | ||
| 7 | * | ||
| 8 | * Maxime Ripard <maxime.ripard@free-electrons.com> | ||
| 9 | * | ||
| 10 | * This program is free software; you can redistribute it and/or modify | ||
| 11 | * it under the terms of the GNU General Public License as published by | ||
| 12 | * the Free Software Foundation; either version 2 of the License, or | ||
| 13 | * (at your option) any later version. | ||
| 14 | */ | ||
| 15 | |||
| 16 | #ifndef __RESET_SIMPLE_H__ | ||
| 17 | #define __RESET_SIMPLE_H__ | ||
| 18 | |||
| 19 | #include <linux/io.h> | ||
| 20 | #include <linux/reset-controller.h> | ||
| 21 | #include <linux/spinlock.h> | ||
| 22 | |||
| 23 | /** | ||
| 24 | * struct reset_simple_data - driver data for simple reset controllers | ||
| 25 | * @lock: spinlock to protect registers during read-modify-write cycles | ||
| 26 | * @membase: memory mapped I/O register range | ||
| 27 | * @rcdev: reset controller device base structure | ||
| 28 | * @active_low: if true, bits are cleared to assert the reset. Otherwise, bits | ||
| 29 | * are set to assert the reset. Note that this says nothing about | ||
| 30 | * the voltage level of the actual reset line. | ||
| 31 | * @status_active_low: if true, bits read back as cleared while the reset is | ||
| 32 | * asserted. Otherwise, bits read back as set while the | ||
| 33 | * reset is asserted. | ||
| 34 | */ | ||
| 35 | struct reset_simple_data { | ||
| 36 | spinlock_t lock; | ||
| 37 | void __iomem *membase; | ||
| 38 | struct reset_controller_dev rcdev; | ||
| 39 | bool active_low; | ||
| 40 | bool status_active_low; | ||
| 41 | }; | ||
| 42 | |||
| 43 | extern const struct reset_control_ops reset_simple_ops; | ||
| 44 | |||
| 45 | #endif /* __RESET_SIMPLE_H__ */ | ||
diff --git a/drivers/reset/reset-socfpga.c b/drivers/reset/reset-socfpga.c deleted file mode 100644 index 3907bbc9c6cf..000000000000 --- a/drivers/reset/reset-socfpga.c +++ /dev/null | |||
| @@ -1,157 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * Socfpga Reset Controller Driver | ||
| 3 | * | ||
| 4 | * Copyright 2014 Steffen Trumtrar <s.trumtrar@pengutronix.de> | ||
| 5 | * | ||
| 6 | * based on | ||
| 7 | * Allwinner SoCs Reset Controller driver | ||
| 8 | * | ||
| 9 | * Copyright 2013 Maxime Ripard | ||
| 10 | * | ||
| 11 | * Maxime Ripard <maxime.ripard@free-electrons.com> | ||
| 12 | * | ||
| 13 | * This program is free software; you can redistribute it and/or modify | ||
| 14 | * it under the terms of the GNU General Public License as published by | ||
| 15 | * the Free Software Foundation; either version 2 of the License, or | ||
| 16 | * (at your option) any later version. | ||
| 17 | */ | ||
| 18 | |||
| 19 | #include <linux/err.h> | ||
| 20 | #include <linux/io.h> | ||
| 21 | #include <linux/init.h> | ||
| 22 | #include <linux/of.h> | ||
| 23 | #include <linux/platform_device.h> | ||
| 24 | #include <linux/reset-controller.h> | ||
| 25 | #include <linux/spinlock.h> | ||
| 26 | #include <linux/types.h> | ||
| 27 | |||
| 28 | #define BANK_INCREMENT 4 | ||
| 29 | #define NR_BANKS 8 | ||
| 30 | |||
| 31 | struct socfpga_reset_data { | ||
| 32 | spinlock_t lock; | ||
| 33 | void __iomem *membase; | ||
| 34 | struct reset_controller_dev rcdev; | ||
| 35 | }; | ||
| 36 | |||
| 37 | static int socfpga_reset_assert(struct reset_controller_dev *rcdev, | ||
| 38 | unsigned long id) | ||
| 39 | { | ||
| 40 | struct socfpga_reset_data *data = container_of(rcdev, | ||
| 41 | struct socfpga_reset_data, | ||
| 42 | rcdev); | ||
| 43 | int reg_width = sizeof(u32); | ||
| 44 | int bank = id / (reg_width * BITS_PER_BYTE); | ||
| 45 | int offset = id % (reg_width * BITS_PER_BYTE); | ||
| 46 | unsigned long flags; | ||
| 47 | u32 reg; | ||
| 48 | |||
| 49 | spin_lock_irqsave(&data->lock, flags); | ||
| 50 | |||
| 51 | reg = readl(data->membase + (bank * BANK_INCREMENT)); | ||
| 52 | writel(reg | BIT(offset), data->membase + (bank * BANK_INCREMENT)); | ||
| 53 | spin_unlock_irqrestore(&data->lock, flags); | ||
| 54 | |||
| 55 | return 0; | ||
| 56 | } | ||
| 57 | |||
| 58 | static int socfpga_reset_deassert(struct reset_controller_dev *rcdev, | ||
| 59 | unsigned long id) | ||
| 60 | { | ||
| 61 | struct socfpga_reset_data *data = container_of(rcdev, | ||
| 62 | struct socfpga_reset_data, | ||
| 63 | rcdev); | ||
| 64 | |||
| 65 | int reg_width = sizeof(u32); | ||
| 66 | int bank = id / (reg_width * BITS_PER_BYTE); | ||
| 67 | int offset = id % (reg_width * BITS_PER_BYTE); | ||
| 68 | unsigned long flags; | ||
| 69 | u32 reg; | ||
| 70 | |||
| 71 | spin_lock_irqsave(&data->lock, flags); | ||
| 72 | |||
| 73 | reg = readl(data->membase + (bank * BANK_INCREMENT)); | ||
| 74 | writel(reg & ~BIT(offset), data->membase + (bank * BANK_INCREMENT)); | ||
| 75 | |||
| 76 | spin_unlock_irqrestore(&data->lock, flags); | ||
| 77 | |||
| 78 | return 0; | ||
| 79 | } | ||
| 80 | |||
| 81 | static int socfpga_reset_status(struct reset_controller_dev *rcdev, | ||
| 82 | unsigned long id) | ||
| 83 | { | ||
| 84 | struct socfpga_reset_data *data = container_of(rcdev, | ||
| 85 | struct socfpga_reset_data, rcdev); | ||
| 86 | int reg_width = sizeof(u32); | ||
| 87 | int bank = id / (reg_width * BITS_PER_BYTE); | ||
| 88 | int offset = id % (reg_width * BITS_PER_BYTE); | ||
| 89 | u32 reg; | ||
| 90 | |||
| 91 | reg = readl(data->membase + (bank * BANK_INCREMENT)); | ||
| 92 | |||
| 93 | return !(reg & BIT(offset)); | ||
| 94 | } | ||
| 95 | |||
| 96 | static const struct reset_control_ops socfpga_reset_ops = { | ||
| 97 | .assert = socfpga_reset_assert, | ||
| 98 | .deassert = socfpga_reset_deassert, | ||
| 99 | .status = socfpga_reset_status, | ||
| 100 | }; | ||
| 101 | |||
| 102 | static int socfpga_reset_probe(struct platform_device *pdev) | ||
| 103 | { | ||
| 104 | struct socfpga_reset_data *data; | ||
| 105 | struct resource *res; | ||
| 106 | struct device *dev = &pdev->dev; | ||
| 107 | struct device_node *np = dev->of_node; | ||
| 108 | u32 modrst_offset; | ||
| 109 | |||
| 110 | /* | ||
| 111 | * The binding was mainlined without the required property. | ||
| 112 | * Do not continue, when we encounter an old DT. | ||
| 113 | */ | ||
| 114 | if (!of_find_property(pdev->dev.of_node, "#reset-cells", NULL)) { | ||
| 115 | dev_err(&pdev->dev, "%pOF missing #reset-cells property\n", | ||
| 116 | pdev->dev.of_node); | ||
| 117 | return -EINVAL; | ||
| 118 | } | ||
| 119 | |||
| 120 | data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); | ||
| 121 | if (!data) | ||
| 122 | return -ENOMEM; | ||
| 123 | |||
| 124 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
| 125 | data->membase = devm_ioremap_resource(&pdev->dev, res); | ||
| 126 | if (IS_ERR(data->membase)) | ||
| 127 | return PTR_ERR(data->membase); | ||
| 128 | |||
| 129 | if (of_property_read_u32(np, "altr,modrst-offset", &modrst_offset)) { | ||
| 130 | dev_warn(dev, "missing altr,modrst-offset property, assuming 0x10!\n"); | ||
| 131 | modrst_offset = 0x10; | ||
| 132 | } | ||
| 133 | data->membase += modrst_offset; | ||
| 134 | |||
| 135 | spin_lock_init(&data->lock); | ||
| 136 | |||
| 137 | data->rcdev.owner = THIS_MODULE; | ||
| 138 | data->rcdev.nr_resets = NR_BANKS * (sizeof(u32) * BITS_PER_BYTE); | ||
| 139 | data->rcdev.ops = &socfpga_reset_ops; | ||
| 140 | data->rcdev.of_node = pdev->dev.of_node; | ||
| 141 | |||
| 142 | return devm_reset_controller_register(dev, &data->rcdev); | ||
| 143 | } | ||
| 144 | |||
| 145 | static const struct of_device_id socfpga_reset_dt_ids[] = { | ||
| 146 | { .compatible = "altr,rst-mgr", }, | ||
| 147 | { /* sentinel */ }, | ||
| 148 | }; | ||
| 149 | |||
| 150 | static struct platform_driver socfpga_reset_driver = { | ||
| 151 | .probe = socfpga_reset_probe, | ||
| 152 | .driver = { | ||
| 153 | .name = "socfpga-reset", | ||
| 154 | .of_match_table = socfpga_reset_dt_ids, | ||
| 155 | }, | ||
| 156 | }; | ||
| 157 | builtin_platform_driver(socfpga_reset_driver); | ||
diff --git a/drivers/reset/reset-stm32.c b/drivers/reset/reset-stm32.c deleted file mode 100644 index 3a7c8527e66a..000000000000 --- a/drivers/reset/reset-stm32.c +++ /dev/null | |||
| @@ -1,108 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) Maxime Coquelin 2015 | ||
| 3 | * Author: Maxime Coquelin <mcoquelin.stm32@gmail.com> | ||
| 4 | * License terms: GNU General Public License (GPL), version 2 | ||
| 5 | * | ||
| 6 | * Heavily based on sunxi driver from Maxime Ripard. | ||
| 7 | */ | ||
| 8 | |||
| 9 | #include <linux/err.h> | ||
| 10 | #include <linux/io.h> | ||
| 11 | #include <linux/of.h> | ||
| 12 | #include <linux/of_address.h> | ||
| 13 | #include <linux/platform_device.h> | ||
| 14 | #include <linux/reset-controller.h> | ||
| 15 | #include <linux/slab.h> | ||
| 16 | #include <linux/spinlock.h> | ||
| 17 | #include <linux/types.h> | ||
| 18 | |||
| 19 | struct stm32_reset_data { | ||
| 20 | spinlock_t lock; | ||
| 21 | void __iomem *membase; | ||
| 22 | struct reset_controller_dev rcdev; | ||
| 23 | }; | ||
| 24 | |||
| 25 | static int stm32_reset_assert(struct reset_controller_dev *rcdev, | ||
| 26 | unsigned long id) | ||
| 27 | { | ||
| 28 | struct stm32_reset_data *data = container_of(rcdev, | ||
| 29 | struct stm32_reset_data, | ||
| 30 | rcdev); | ||
| 31 | int bank = id / BITS_PER_LONG; | ||
| 32 | int offset = id % BITS_PER_LONG; | ||
| 33 | unsigned long flags; | ||
| 34 | u32 reg; | ||
| 35 | |||
| 36 | spin_lock_irqsave(&data->lock, flags); | ||
| 37 | |||
| 38 | reg = readl(data->membase + (bank * 4)); | ||
| 39 | writel(reg | BIT(offset), data->membase + (bank * 4)); | ||
| 40 | |||
| 41 | spin_unlock_irqrestore(&data->lock, flags); | ||
| 42 | |||
| 43 | return 0; | ||
| 44 | } | ||
| 45 | |||
| 46 | static int stm32_reset_deassert(struct reset_controller_dev *rcdev, | ||
| 47 | unsigned long id) | ||
| 48 | { | ||
| 49 | struct stm32_reset_data *data = container_of(rcdev, | ||
| 50 | struct stm32_reset_data, | ||
| 51 | rcdev); | ||
| 52 | int bank = id / BITS_PER_LONG; | ||
| 53 | int offset = id % BITS_PER_LONG; | ||
| 54 | unsigned long flags; | ||
| 55 | u32 reg; | ||
| 56 | |||
| 57 | spin_lock_irqsave(&data->lock, flags); | ||
| 58 | |||
| 59 | reg = readl(data->membase + (bank * 4)); | ||
| 60 | writel(reg & ~BIT(offset), data->membase + (bank * 4)); | ||
| 61 | |||
| 62 | spin_unlock_irqrestore(&data->lock, flags); | ||
| 63 | |||
| 64 | return 0; | ||
| 65 | } | ||
| 66 | |||
| 67 | static const struct reset_control_ops stm32_reset_ops = { | ||
| 68 | .assert = stm32_reset_assert, | ||
| 69 | .deassert = stm32_reset_deassert, | ||
| 70 | }; | ||
| 71 | |||
| 72 | static const struct of_device_id stm32_reset_dt_ids[] = { | ||
| 73 | { .compatible = "st,stm32-rcc", }, | ||
| 74 | { /* sentinel */ }, | ||
| 75 | }; | ||
| 76 | |||
| 77 | static int stm32_reset_probe(struct platform_device *pdev) | ||
| 78 | { | ||
| 79 | struct stm32_reset_data *data; | ||
| 80 | struct resource *res; | ||
| 81 | |||
| 82 | data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); | ||
| 83 | if (!data) | ||
| 84 | return -ENOMEM; | ||
| 85 | |||
| 86 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
| 87 | data->membase = devm_ioremap_resource(&pdev->dev, res); | ||
| 88 | if (IS_ERR(data->membase)) | ||
| 89 | return PTR_ERR(data->membase); | ||
| 90 | |||
| 91 | spin_lock_init(&data->lock); | ||
| 92 | |||
| 93 | data->rcdev.owner = THIS_MODULE; | ||
| 94 | data->rcdev.nr_resets = resource_size(res) * 8; | ||
| 95 | data->rcdev.ops = &stm32_reset_ops; | ||
| 96 | data->rcdev.of_node = pdev->dev.of_node; | ||
| 97 | |||
| 98 | return devm_reset_controller_register(&pdev->dev, &data->rcdev); | ||
| 99 | } | ||
| 100 | |||
| 101 | static struct platform_driver stm32_reset_driver = { | ||
| 102 | .probe = stm32_reset_probe, | ||
| 103 | .driver = { | ||
| 104 | .name = "stm32-rcc-reset", | ||
| 105 | .of_match_table = stm32_reset_dt_ids, | ||
| 106 | }, | ||
| 107 | }; | ||
| 108 | builtin_platform_driver(stm32_reset_driver); | ||
diff --git a/drivers/reset/reset-sunxi.c b/drivers/reset/reset-sunxi.c index 2c7dd1fd08df..db9a1a75523f 100644 --- a/drivers/reset/reset-sunxi.c +++ b/drivers/reset/reset-sunxi.c | |||
| @@ -22,64 +22,11 @@ | |||
| 22 | #include <linux/spinlock.h> | 22 | #include <linux/spinlock.h> |
| 23 | #include <linux/types.h> | 23 | #include <linux/types.h> |
| 24 | 24 | ||
| 25 | struct sunxi_reset_data { | 25 | #include "reset-simple.h" |
| 26 | spinlock_t lock; | ||
| 27 | void __iomem *membase; | ||
| 28 | struct reset_controller_dev rcdev; | ||
| 29 | }; | ||
| 30 | |||
| 31 | static int sunxi_reset_assert(struct reset_controller_dev *rcdev, | ||
| 32 | unsigned long id) | ||
| 33 | { | ||
| 34 | struct sunxi_reset_data *data = container_of(rcdev, | ||
| 35 | struct sunxi_reset_data, | ||
| 36 | rcdev); | ||
| 37 | int reg_width = sizeof(u32); | ||
| 38 | int bank = id / (reg_width * BITS_PER_BYTE); | ||
| 39 | int offset = id % (reg_width * BITS_PER_BYTE); | ||
| 40 | unsigned long flags; | ||
| 41 | u32 reg; | ||
| 42 | |||
| 43 | spin_lock_irqsave(&data->lock, flags); | ||
| 44 | |||
| 45 | reg = readl(data->membase + (bank * reg_width)); | ||
| 46 | writel(reg & ~BIT(offset), data->membase + (bank * reg_width)); | ||
| 47 | |||
| 48 | spin_unlock_irqrestore(&data->lock, flags); | ||
| 49 | |||
| 50 | return 0; | ||
| 51 | } | ||
| 52 | |||
| 53 | static int sunxi_reset_deassert(struct reset_controller_dev *rcdev, | ||
| 54 | unsigned long id) | ||
| 55 | { | ||
| 56 | struct sunxi_reset_data *data = container_of(rcdev, | ||
| 57 | struct sunxi_reset_data, | ||
| 58 | rcdev); | ||
| 59 | int reg_width = sizeof(u32); | ||
| 60 | int bank = id / (reg_width * BITS_PER_BYTE); | ||
| 61 | int offset = id % (reg_width * BITS_PER_BYTE); | ||
| 62 | unsigned long flags; | ||
| 63 | u32 reg; | ||
| 64 | |||
| 65 | spin_lock_irqsave(&data->lock, flags); | ||
| 66 | |||
| 67 | reg = readl(data->membase + (bank * reg_width)); | ||
| 68 | writel(reg | BIT(offset), data->membase + (bank * reg_width)); | ||
| 69 | |||
| 70 | spin_unlock_irqrestore(&data->lock, flags); | ||
| 71 | |||
| 72 | return 0; | ||
| 73 | } | ||
| 74 | |||
| 75 | static const struct reset_control_ops sunxi_reset_ops = { | ||
| 76 | .assert = sunxi_reset_assert, | ||
| 77 | .deassert = sunxi_reset_deassert, | ||
| 78 | }; | ||
| 79 | 26 | ||
| 80 | static int sunxi_reset_init(struct device_node *np) | 27 | static int sunxi_reset_init(struct device_node *np) |
| 81 | { | 28 | { |
| 82 | struct sunxi_reset_data *data; | 29 | struct reset_simple_data *data; |
| 83 | struct resource res; | 30 | struct resource res; |
| 84 | resource_size_t size; | 31 | resource_size_t size; |
| 85 | int ret; | 32 | int ret; |
| @@ -108,8 +55,9 @@ static int sunxi_reset_init(struct device_node *np) | |||
| 108 | 55 | ||
| 109 | data->rcdev.owner = THIS_MODULE; | 56 | data->rcdev.owner = THIS_MODULE; |
| 110 | data->rcdev.nr_resets = size * 8; | 57 | data->rcdev.nr_resets = size * 8; |
| 111 | data->rcdev.ops = &sunxi_reset_ops; | 58 | data->rcdev.ops = &reset_simple_ops; |
| 112 | data->rcdev.of_node = np; | 59 | data->rcdev.of_node = np; |
| 60 | data->active_low = true; | ||
| 113 | 61 | ||
| 114 | return reset_controller_register(&data->rcdev); | 62 | return reset_controller_register(&data->rcdev); |
| 115 | 63 | ||
| @@ -122,6 +70,8 @@ err_alloc: | |||
| 122 | * These are the reset controller we need to initialize early on in | 70 | * These are the reset controller we need to initialize early on in |
| 123 | * our system, before we can even think of using a regular device | 71 | * our system, before we can even think of using a regular device |
| 124 | * driver for it. | 72 | * driver for it. |
| 73 | * The controllers that we can register through the regular device | ||
| 74 | * model are handled by the simple reset driver directly. | ||
| 125 | */ | 75 | */ |
| 126 | static const struct of_device_id sunxi_early_reset_dt_ids[] __initconst = { | 76 | static const struct of_device_id sunxi_early_reset_dt_ids[] __initconst = { |
| 127 | { .compatible = "allwinner,sun6i-a31-ahb1-reset", }, | 77 | { .compatible = "allwinner,sun6i-a31-ahb1-reset", }, |
| @@ -135,45 +85,3 @@ void __init sun6i_reset_init(void) | |||
| 135 | for_each_matching_node(np, sunxi_early_reset_dt_ids) | 85 | for_each_matching_node(np, sunxi_early_reset_dt_ids) |
| 136 | sunxi_reset_init(np); | 86 | sunxi_reset_init(np); |
| 137 | } | 87 | } |
| 138 | |||
| 139 | /* | ||
| 140 | * And these are the controllers we can register through the regular | ||
| 141 | * device model. | ||
| 142 | */ | ||
| 143 | static const struct of_device_id sunxi_reset_dt_ids[] = { | ||
| 144 | { .compatible = "allwinner,sun6i-a31-clock-reset", }, | ||
| 145 | { /* sentinel */ }, | ||
| 146 | }; | ||
| 147 | |||
| 148 | static int sunxi_reset_probe(struct platform_device *pdev) | ||
| 149 | { | ||
| 150 | struct sunxi_reset_data *data; | ||
| 151 | struct resource *res; | ||
| 152 | |||
| 153 | data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); | ||
| 154 | if (!data) | ||
| 155 | return -ENOMEM; | ||
| 156 | |||
| 157 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
| 158 | data->membase = devm_ioremap_resource(&pdev->dev, res); | ||
| 159 | if (IS_ERR(data->membase)) | ||
| 160 | return PTR_ERR(data->membase); | ||
| 161 | |||
| 162 | spin_lock_init(&data->lock); | ||
| 163 | |||
| 164 | data->rcdev.owner = THIS_MODULE; | ||
| 165 | data->rcdev.nr_resets = resource_size(res) * 8; | ||
| 166 | data->rcdev.ops = &sunxi_reset_ops; | ||
| 167 | data->rcdev.of_node = pdev->dev.of_node; | ||
| 168 | |||
| 169 | return devm_reset_controller_register(&pdev->dev, &data->rcdev); | ||
| 170 | } | ||
| 171 | |||
| 172 | static struct platform_driver sunxi_reset_driver = { | ||
| 173 | .probe = sunxi_reset_probe, | ||
| 174 | .driver = { | ||
| 175 | .name = "sunxi-reset", | ||
| 176 | .of_match_table = sunxi_reset_dt_ids, | ||
| 177 | }, | ||
| 178 | }; | ||
| 179 | builtin_platform_driver(sunxi_reset_driver); | ||
diff --git a/drivers/reset/reset-uniphier.c b/drivers/reset/reset-uniphier.c index bda2dd196ae5..e8bb023ff15e 100644 --- a/drivers/reset/reset-uniphier.c +++ b/drivers/reset/reset-uniphier.c | |||
| @@ -58,6 +58,7 @@ static const struct uniphier_reset_data uniphier_ld4_sys_reset_data[] = { | |||
| 58 | 58 | ||
| 59 | static const struct uniphier_reset_data uniphier_pro4_sys_reset_data[] = { | 59 | static const struct uniphier_reset_data uniphier_pro4_sys_reset_data[] = { |
| 60 | UNIPHIER_RESETX(2, 0x2000, 2), /* NAND */ | 60 | UNIPHIER_RESETX(2, 0x2000, 2), /* NAND */ |
| 61 | UNIPHIER_RESETX(6, 0x2000, 12), /* Ether */ | ||
| 61 | UNIPHIER_RESETX(8, 0x2000, 10), /* STDMAC (HSC, MIO, RLE) */ | 62 | UNIPHIER_RESETX(8, 0x2000, 10), /* STDMAC (HSC, MIO, RLE) */ |
| 62 | UNIPHIER_RESETX(12, 0x2000, 6), /* GIO (Ether, SATA, USB3) */ | 63 | UNIPHIER_RESETX(12, 0x2000, 6), /* GIO (Ether, SATA, USB3) */ |
| 63 | UNIPHIER_RESETX(14, 0x2000, 17), /* USB30 */ | 64 | UNIPHIER_RESETX(14, 0x2000, 17), /* USB30 */ |
| @@ -76,6 +77,7 @@ static const struct uniphier_reset_data uniphier_pro5_sys_reset_data[] = { | |||
| 76 | 77 | ||
| 77 | static const struct uniphier_reset_data uniphier_pxs2_sys_reset_data[] = { | 78 | static const struct uniphier_reset_data uniphier_pxs2_sys_reset_data[] = { |
| 78 | UNIPHIER_RESETX(2, 0x2000, 2), /* NAND */ | 79 | UNIPHIER_RESETX(2, 0x2000, 2), /* NAND */ |
| 80 | UNIPHIER_RESETX(6, 0x2000, 12), /* Ether */ | ||
| 79 | UNIPHIER_RESETX(8, 0x2000, 10), /* STDMAC (HSC, RLE) */ | 81 | UNIPHIER_RESETX(8, 0x2000, 10), /* STDMAC (HSC, RLE) */ |
| 80 | UNIPHIER_RESETX(14, 0x2000, 17), /* USB30 */ | 82 | UNIPHIER_RESETX(14, 0x2000, 17), /* USB30 */ |
| 81 | UNIPHIER_RESETX(15, 0x2004, 17), /* USB31 */ | 83 | UNIPHIER_RESETX(15, 0x2004, 17), /* USB31 */ |
| @@ -92,6 +94,7 @@ static const struct uniphier_reset_data uniphier_pxs2_sys_reset_data[] = { | |||
| 92 | static const struct uniphier_reset_data uniphier_ld11_sys_reset_data[] = { | 94 | static const struct uniphier_reset_data uniphier_ld11_sys_reset_data[] = { |
| 93 | UNIPHIER_RESETX(2, 0x200c, 0), /* NAND */ | 95 | UNIPHIER_RESETX(2, 0x200c, 0), /* NAND */ |
| 94 | UNIPHIER_RESETX(4, 0x200c, 2), /* eMMC */ | 96 | UNIPHIER_RESETX(4, 0x200c, 2), /* eMMC */ |
| 97 | UNIPHIER_RESETX(6, 0x200c, 6), /* Ether */ | ||
| 95 | UNIPHIER_RESETX(8, 0x200c, 8), /* STDMAC (HSC, MIO) */ | 98 | UNIPHIER_RESETX(8, 0x200c, 8), /* STDMAC (HSC, MIO) */ |
| 96 | UNIPHIER_RESETX(40, 0x2008, 0), /* AIO */ | 99 | UNIPHIER_RESETX(40, 0x2008, 0), /* AIO */ |
| 97 | UNIPHIER_RESETX(41, 0x2008, 1), /* EVEA */ | 100 | UNIPHIER_RESETX(41, 0x2008, 1), /* EVEA */ |
| @@ -102,6 +105,7 @@ static const struct uniphier_reset_data uniphier_ld11_sys_reset_data[] = { | |||
| 102 | static const struct uniphier_reset_data uniphier_ld20_sys_reset_data[] = { | 105 | static const struct uniphier_reset_data uniphier_ld20_sys_reset_data[] = { |
| 103 | UNIPHIER_RESETX(2, 0x200c, 0), /* NAND */ | 106 | UNIPHIER_RESETX(2, 0x200c, 0), /* NAND */ |
| 104 | UNIPHIER_RESETX(4, 0x200c, 2), /* eMMC */ | 107 | UNIPHIER_RESETX(4, 0x200c, 2), /* eMMC */ |
| 108 | UNIPHIER_RESETX(6, 0x200c, 6), /* Ether */ | ||
| 105 | UNIPHIER_RESETX(8, 0x200c, 8), /* STDMAC (HSC) */ | 109 | UNIPHIER_RESETX(8, 0x200c, 8), /* STDMAC (HSC) */ |
| 106 | UNIPHIER_RESETX(12, 0x200c, 5), /* GIO (PCIe, USB3) */ | 110 | UNIPHIER_RESETX(12, 0x200c, 5), /* GIO (PCIe, USB3) */ |
| 107 | UNIPHIER_RESETX(16, 0x200c, 12), /* USB30-PHY0 */ | 111 | UNIPHIER_RESETX(16, 0x200c, 12), /* USB30-PHY0 */ |
| @@ -114,6 +118,20 @@ static const struct uniphier_reset_data uniphier_ld20_sys_reset_data[] = { | |||
| 114 | UNIPHIER_RESET_END, | 118 | UNIPHIER_RESET_END, |
| 115 | }; | 119 | }; |
| 116 | 120 | ||
| 121 | static const struct uniphier_reset_data uniphier_pxs3_sys_reset_data[] = { | ||
| 122 | UNIPHIER_RESETX(2, 0x200c, 0), /* NAND */ | ||
| 123 | UNIPHIER_RESETX(4, 0x200c, 2), /* eMMC */ | ||
| 124 | UNIPHIER_RESETX(8, 0x200c, 12), /* STDMAC */ | ||
| 125 | UNIPHIER_RESETX(12, 0x200c, 4), /* USB30 link (GIO0) */ | ||
| 126 | UNIPHIER_RESETX(13, 0x200c, 5), /* USB31 link (GIO1) */ | ||
| 127 | UNIPHIER_RESETX(16, 0x200c, 16), /* USB30-PHY0 */ | ||
| 128 | UNIPHIER_RESETX(17, 0x200c, 18), /* USB30-PHY1 */ | ||
| 129 | UNIPHIER_RESETX(18, 0x200c, 20), /* USB30-PHY2 */ | ||
| 130 | UNIPHIER_RESETX(20, 0x200c, 17), /* USB31-PHY0 */ | ||
| 131 | UNIPHIER_RESETX(21, 0x200c, 19), /* USB31-PHY1 */ | ||
| 132 | UNIPHIER_RESET_END, | ||
| 133 | }; | ||
| 134 | |||
| 117 | /* Media I/O reset data */ | 135 | /* Media I/O reset data */ |
| 118 | #define UNIPHIER_MIO_RESET_SD(id, ch) \ | 136 | #define UNIPHIER_MIO_RESET_SD(id, ch) \ |
| 119 | UNIPHIER_RESETX((id), 0x110 + 0x200 * (ch), 0) | 137 | UNIPHIER_RESETX((id), 0x110 + 0x200 * (ch), 0) |
| @@ -359,6 +377,10 @@ static const struct of_device_id uniphier_reset_match[] = { | |||
| 359 | .compatible = "socionext,uniphier-ld20-reset", | 377 | .compatible = "socionext,uniphier-ld20-reset", |
| 360 | .data = uniphier_ld20_sys_reset_data, | 378 | .data = uniphier_ld20_sys_reset_data, |
| 361 | }, | 379 | }, |
| 380 | { | ||
| 381 | .compatible = "socionext,uniphier-pxs3-reset", | ||
| 382 | .data = uniphier_pxs3_sys_reset_data, | ||
| 383 | }, | ||
| 362 | /* Media I/O reset, SD reset */ | 384 | /* Media I/O reset, SD reset */ |
| 363 | { | 385 | { |
| 364 | .compatible = "socionext,uniphier-ld4-mio-reset", | 386 | .compatible = "socionext,uniphier-ld4-mio-reset", |
| @@ -392,6 +414,10 @@ static const struct of_device_id uniphier_reset_match[] = { | |||
| 392 | .compatible = "socionext,uniphier-ld20-sd-reset", | 414 | .compatible = "socionext,uniphier-ld20-sd-reset", |
| 393 | .data = uniphier_pro5_sd_reset_data, | 415 | .data = uniphier_pro5_sd_reset_data, |
| 394 | }, | 416 | }, |
| 417 | { | ||
| 418 | .compatible = "socionext,uniphier-pxs3-sd-reset", | ||
| 419 | .data = uniphier_pro5_sd_reset_data, | ||
| 420 | }, | ||
| 395 | /* Peripheral reset */ | 421 | /* Peripheral reset */ |
| 396 | { | 422 | { |
| 397 | .compatible = "socionext,uniphier-ld4-peri-reset", | 423 | .compatible = "socionext,uniphier-ld4-peri-reset", |
| @@ -421,6 +447,10 @@ static const struct of_device_id uniphier_reset_match[] = { | |||
| 421 | .compatible = "socionext,uniphier-ld20-peri-reset", | 447 | .compatible = "socionext,uniphier-ld20-peri-reset", |
| 422 | .data = uniphier_pro4_peri_reset_data, | 448 | .data = uniphier_pro4_peri_reset_data, |
| 423 | }, | 449 | }, |
| 450 | { | ||
| 451 | .compatible = "socionext,uniphier-pxs3-peri-reset", | ||
| 452 | .data = uniphier_pro4_peri_reset_data, | ||
| 453 | }, | ||
| 424 | /* Analog signal amplifiers reset */ | 454 | /* Analog signal amplifiers reset */ |
| 425 | { | 455 | { |
| 426 | .compatible = "socionext,uniphier-ld11-adamv-reset", | 456 | .compatible = "socionext,uniphier-ld11-adamv-reset", |
diff --git a/drivers/reset/reset-zx2967.c b/drivers/reset/reset-zx2967.c deleted file mode 100644 index 4f319f7753d4..000000000000 --- a/drivers/reset/reset-zx2967.c +++ /dev/null | |||
| @@ -1,99 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * ZTE's zx2967 family reset controller driver | ||
| 3 | * | ||
| 4 | * Copyright (C) 2017 ZTE Ltd. | ||
| 5 | * | ||
| 6 | * Author: Baoyou Xie <baoyou.xie@linaro.org> | ||
| 7 | * | ||
| 8 | * License terms: GNU General Public License (GPL) version 2 | ||
| 9 | */ | ||
| 10 | |||
| 11 | #include <linux/of_address.h> | ||
| 12 | #include <linux/platform_device.h> | ||
| 13 | #include <linux/reset-controller.h> | ||
| 14 | |||
| 15 | struct zx2967_reset { | ||
| 16 | void __iomem *reg_base; | ||
| 17 | spinlock_t lock; | ||
| 18 | struct reset_controller_dev rcdev; | ||
| 19 | }; | ||
| 20 | |||
| 21 | static int zx2967_reset_act(struct reset_controller_dev *rcdev, | ||
| 22 | unsigned long id, bool assert) | ||
| 23 | { | ||
| 24 | struct zx2967_reset *reset = NULL; | ||
| 25 | int bank = id / 32; | ||
| 26 | int offset = id % 32; | ||
| 27 | u32 reg; | ||
| 28 | unsigned long flags; | ||
| 29 | |||
| 30 | reset = container_of(rcdev, struct zx2967_reset, rcdev); | ||
| 31 | |||
| 32 | spin_lock_irqsave(&reset->lock, flags); | ||
| 33 | |||
| 34 | reg = readl_relaxed(reset->reg_base + (bank * 4)); | ||
| 35 | if (assert) | ||
| 36 | reg &= ~BIT(offset); | ||
| 37 | else | ||
| 38 | reg |= BIT(offset); | ||
| 39 | writel_relaxed(reg, reset->reg_base + (bank * 4)); | ||
| 40 | |||
| 41 | spin_unlock_irqrestore(&reset->lock, flags); | ||
| 42 | |||
| 43 | return 0; | ||
| 44 | } | ||
| 45 | |||
| 46 | static int zx2967_reset_assert(struct reset_controller_dev *rcdev, | ||
| 47 | unsigned long id) | ||
| 48 | { | ||
| 49 | return zx2967_reset_act(rcdev, id, true); | ||
| 50 | } | ||
| 51 | |||
| 52 | static int zx2967_reset_deassert(struct reset_controller_dev *rcdev, | ||
| 53 | unsigned long id) | ||
| 54 | { | ||
| 55 | return zx2967_reset_act(rcdev, id, false); | ||
| 56 | } | ||
| 57 | |||
| 58 | static const struct reset_control_ops zx2967_reset_ops = { | ||
| 59 | .assert = zx2967_reset_assert, | ||
| 60 | .deassert = zx2967_reset_deassert, | ||
| 61 | }; | ||
| 62 | |||
| 63 | static int zx2967_reset_probe(struct platform_device *pdev) | ||
| 64 | { | ||
| 65 | struct zx2967_reset *reset; | ||
| 66 | struct resource *res; | ||
| 67 | |||
| 68 | reset = devm_kzalloc(&pdev->dev, sizeof(*reset), GFP_KERNEL); | ||
| 69 | if (!reset) | ||
| 70 | return -ENOMEM; | ||
| 71 | |||
| 72 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
| 73 | reset->reg_base = devm_ioremap_resource(&pdev->dev, res); | ||
| 74 | if (IS_ERR(reset->reg_base)) | ||
| 75 | return PTR_ERR(reset->reg_base); | ||
| 76 | |||
| 77 | spin_lock_init(&reset->lock); | ||
| 78 | |||
| 79 | reset->rcdev.owner = THIS_MODULE; | ||
| 80 | reset->rcdev.nr_resets = resource_size(res) * 8; | ||
| 81 | reset->rcdev.ops = &zx2967_reset_ops; | ||
| 82 | reset->rcdev.of_node = pdev->dev.of_node; | ||
| 83 | |||
| 84 | return devm_reset_controller_register(&pdev->dev, &reset->rcdev); | ||
| 85 | } | ||
| 86 | |||
| 87 | static const struct of_device_id zx2967_reset_dt_ids[] = { | ||
| 88 | { .compatible = "zte,zx296718-reset", }, | ||
| 89 | {}, | ||
| 90 | }; | ||
| 91 | |||
| 92 | static struct platform_driver zx2967_reset_driver = { | ||
| 93 | .probe = zx2967_reset_probe, | ||
| 94 | .driver = { | ||
| 95 | .name = "zx2967-reset", | ||
| 96 | .of_match_table = zx2967_reset_dt_ids, | ||
| 97 | }, | ||
| 98 | }; | ||
| 99 | builtin_platform_driver(zx2967_reset_driver); | ||
diff --git a/drivers/soc/Makefile b/drivers/soc/Makefile index 36dec140ea0d..deecb16e7256 100644 --- a/drivers/soc/Makefile +++ b/drivers/soc/Makefile | |||
| @@ -11,7 +11,7 @@ obj-$(CONFIG_MACH_DOVE) += dove/ | |||
| 11 | obj-y += fsl/ | 11 | obj-y += fsl/ |
| 12 | obj-$(CONFIG_ARCH_MXC) += imx/ | 12 | obj-$(CONFIG_ARCH_MXC) += imx/ |
| 13 | obj-$(CONFIG_SOC_XWAY) += lantiq/ | 13 | obj-$(CONFIG_SOC_XWAY) += lantiq/ |
| 14 | obj-$(CONFIG_ARCH_MEDIATEK) += mediatek/ | 14 | obj-y += mediatek/ |
| 15 | obj-$(CONFIG_ARCH_MESON) += amlogic/ | 15 | obj-$(CONFIG_ARCH_MESON) += amlogic/ |
| 16 | obj-$(CONFIG_ARCH_QCOM) += qcom/ | 16 | obj-$(CONFIG_ARCH_QCOM) += qcom/ |
| 17 | obj-y += renesas/ | 17 | obj-y += renesas/ |
diff --git a/drivers/soc/amlogic/Kconfig b/drivers/soc/amlogic/Kconfig index 22acf064531f..b04f6e4aedbc 100644 --- a/drivers/soc/amlogic/Kconfig +++ b/drivers/soc/amlogic/Kconfig | |||
| @@ -9,4 +9,25 @@ config MESON_GX_SOCINFO | |||
| 9 | Say yes to support decoding of Amlogic Meson GX SoC family | 9 | Say yes to support decoding of Amlogic Meson GX SoC family |
| 10 | information about the type, package and version. | 10 | information about the type, package and version. |
| 11 | 11 | ||
| 12 | config MESON_GX_PM_DOMAINS | ||
| 13 | bool "Amlogic Meson GX Power Domains driver" | ||
| 14 | depends on ARCH_MESON || COMPILE_TEST | ||
| 15 | depends on PM && OF | ||
| 16 | default ARCH_MESON | ||
| 17 | select PM_GENERIC_DOMAINS | ||
| 18 | select PM_GENERIC_DOMAINS_OF | ||
| 19 | help | ||
| 20 | Say yes to expose Amlogic Meson GX Power Domains as | ||
| 21 | Generic Power Domains. | ||
| 22 | |||
| 23 | config MESON_MX_SOCINFO | ||
| 24 | bool "Amlogic Meson MX SoC Information driver" | ||
| 25 | depends on ARCH_MESON || COMPILE_TEST | ||
| 26 | default ARCH_MESON | ||
| 27 | select SOC_BUS | ||
| 28 | help | ||
| 29 | Say yes to support decoding of Amlogic Meson6, Meson8, | ||
| 30 | Meson8b and Meson8m2 SoC family information about the type | ||
| 31 | and version. | ||
| 32 | |||
| 12 | endmenu | 33 | endmenu |
diff --git a/drivers/soc/amlogic/Makefile b/drivers/soc/amlogic/Makefile index 3e85fc462c21..8fa321893928 100644 --- a/drivers/soc/amlogic/Makefile +++ b/drivers/soc/amlogic/Makefile | |||
| @@ -1 +1,3 @@ | |||
| 1 | obj-$(CONFIG_MESON_GX_SOCINFO) += meson-gx-socinfo.o | 1 | obj-$(CONFIG_MESON_GX_SOCINFO) += meson-gx-socinfo.o |
| 2 | obj-$(CONFIG_MESON_GX_PM_DOMAINS) += meson-gx-pwrc-vpu.o | ||
| 3 | obj-$(CONFIG_MESON_MX_SOCINFO) += meson-mx-socinfo.o | ||
diff --git a/drivers/soc/amlogic/meson-gx-pwrc-vpu.c b/drivers/soc/amlogic/meson-gx-pwrc-vpu.c new file mode 100644 index 000000000000..2bdeebc48901 --- /dev/null +++ b/drivers/soc/amlogic/meson-gx-pwrc-vpu.c | |||
| @@ -0,0 +1,243 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (c) 2017 BayLibre, SAS | ||
| 3 | * Author: Neil Armstrong <narmstrong@baylibre.com> | ||
| 4 | * | ||
| 5 | * SPDX-License-Identifier: GPL-2.0+ | ||
| 6 | */ | ||
| 7 | |||
| 8 | #include <linux/of_address.h> | ||
| 9 | #include <linux/platform_device.h> | ||
| 10 | #include <linux/pm_domain.h> | ||
| 11 | #include <linux/bitfield.h> | ||
| 12 | #include <linux/regmap.h> | ||
| 13 | #include <linux/mfd/syscon.h> | ||
| 14 | #include <linux/reset.h> | ||
| 15 | #include <linux/clk.h> | ||
| 16 | |||
| 17 | /* AO Offsets */ | ||
| 18 | |||
| 19 | #define AO_RTI_GEN_PWR_SLEEP0 (0x3a << 2) | ||
| 20 | |||
| 21 | #define GEN_PWR_VPU_HDMI BIT(8) | ||
| 22 | #define GEN_PWR_VPU_HDMI_ISO BIT(9) | ||
| 23 | |||
| 24 | /* HHI Offsets */ | ||
| 25 | |||
| 26 | #define HHI_MEM_PD_REG0 (0x40 << 2) | ||
| 27 | #define HHI_VPU_MEM_PD_REG0 (0x41 << 2) | ||
| 28 | #define HHI_VPU_MEM_PD_REG1 (0x42 << 2) | ||
| 29 | |||
| 30 | struct meson_gx_pwrc_vpu { | ||
| 31 | struct generic_pm_domain genpd; | ||
| 32 | struct regmap *regmap_ao; | ||
| 33 | struct regmap *regmap_hhi; | ||
| 34 | struct reset_control *rstc; | ||
| 35 | struct clk *vpu_clk; | ||
| 36 | struct clk *vapb_clk; | ||
| 37 | }; | ||
| 38 | |||
| 39 | static inline | ||
| 40 | struct meson_gx_pwrc_vpu *genpd_to_pd(struct generic_pm_domain *d) | ||
| 41 | { | ||
| 42 | return container_of(d, struct meson_gx_pwrc_vpu, genpd); | ||
| 43 | } | ||
| 44 | |||
| 45 | static int meson_gx_pwrc_vpu_power_off(struct generic_pm_domain *genpd) | ||
| 46 | { | ||
| 47 | struct meson_gx_pwrc_vpu *pd = genpd_to_pd(genpd); | ||
| 48 | int i; | ||
| 49 | |||
| 50 | regmap_update_bits(pd->regmap_ao, AO_RTI_GEN_PWR_SLEEP0, | ||
| 51 | GEN_PWR_VPU_HDMI_ISO, GEN_PWR_VPU_HDMI_ISO); | ||
| 52 | udelay(20); | ||
| 53 | |||
| 54 | /* Power Down Memories */ | ||
| 55 | for (i = 0; i < 32; i += 2) { | ||
| 56 | regmap_update_bits(pd->regmap_hhi, HHI_VPU_MEM_PD_REG0, | ||
| 57 | 0x2 << i, 0x3 << i); | ||
| 58 | udelay(5); | ||
| 59 | } | ||
| 60 | for (i = 0; i < 32; i += 2) { | ||
| 61 | regmap_update_bits(pd->regmap_hhi, HHI_VPU_MEM_PD_REG1, | ||
| 62 | 0x2 << i, 0x3 << i); | ||
| 63 | udelay(5); | ||
| 64 | } | ||
| 65 | for (i = 8; i < 16; i++) { | ||
| 66 | regmap_update_bits(pd->regmap_hhi, HHI_MEM_PD_REG0, | ||
| 67 | BIT(i), BIT(i)); | ||
| 68 | udelay(5); | ||
| 69 | } | ||
| 70 | udelay(20); | ||
| 71 | |||
| 72 | regmap_update_bits(pd->regmap_ao, AO_RTI_GEN_PWR_SLEEP0, | ||
| 73 | GEN_PWR_VPU_HDMI, GEN_PWR_VPU_HDMI); | ||
| 74 | |||
| 75 | msleep(20); | ||
| 76 | |||
| 77 | clk_disable_unprepare(pd->vpu_clk); | ||
| 78 | clk_disable_unprepare(pd->vapb_clk); | ||
| 79 | |||
| 80 | return 0; | ||
| 81 | } | ||
| 82 | |||
| 83 | static int meson_gx_pwrc_vpu_setup_clk(struct meson_gx_pwrc_vpu *pd) | ||
| 84 | { | ||
| 85 | int ret; | ||
| 86 | |||
| 87 | ret = clk_prepare_enable(pd->vpu_clk); | ||
| 88 | if (ret) | ||
| 89 | return ret; | ||
| 90 | |||
| 91 | ret = clk_prepare_enable(pd->vapb_clk); | ||
| 92 | if (ret) | ||
| 93 | clk_disable_unprepare(pd->vpu_clk); | ||
| 94 | |||
| 95 | return ret; | ||
| 96 | } | ||
| 97 | |||
| 98 | static int meson_gx_pwrc_vpu_power_on(struct generic_pm_domain *genpd) | ||
| 99 | { | ||
| 100 | struct meson_gx_pwrc_vpu *pd = genpd_to_pd(genpd); | ||
| 101 | int ret; | ||
| 102 | int i; | ||
| 103 | |||
| 104 | regmap_update_bits(pd->regmap_ao, AO_RTI_GEN_PWR_SLEEP0, | ||
| 105 | GEN_PWR_VPU_HDMI, 0); | ||
| 106 | udelay(20); | ||
| 107 | |||
| 108 | /* Power Up Memories */ | ||
| 109 | for (i = 0; i < 32; i += 2) { | ||
| 110 | regmap_update_bits(pd->regmap_hhi, HHI_VPU_MEM_PD_REG0, | ||
| 111 | 0x2 << i, 0); | ||
| 112 | udelay(5); | ||
| 113 | } | ||
| 114 | |||
| 115 | for (i = 0; i < 32; i += 2) { | ||
| 116 | regmap_update_bits(pd->regmap_hhi, HHI_VPU_MEM_PD_REG1, | ||
| 117 | 0x2 << i, 0); | ||
| 118 | udelay(5); | ||
| 119 | } | ||
| 120 | |||
| 121 | for (i = 8; i < 16; i++) { | ||
| 122 | regmap_update_bits(pd->regmap_hhi, HHI_MEM_PD_REG0, | ||
| 123 | BIT(i), 0); | ||
| 124 | udelay(5); | ||
| 125 | } | ||
| 126 | udelay(20); | ||
| 127 | |||
| 128 | ret = reset_control_assert(pd->rstc); | ||
| 129 | if (ret) | ||
| 130 | return ret; | ||
| 131 | |||
| 132 | regmap_update_bits(pd->regmap_ao, AO_RTI_GEN_PWR_SLEEP0, | ||
| 133 | GEN_PWR_VPU_HDMI_ISO, 0); | ||
| 134 | |||
| 135 | ret = reset_control_deassert(pd->rstc); | ||
| 136 | if (ret) | ||
| 137 | return ret; | ||
| 138 | |||
| 139 | ret = meson_gx_pwrc_vpu_setup_clk(pd); | ||
| 140 | if (ret) | ||
| 141 | return ret; | ||
| 142 | |||
| 143 | return 0; | ||
| 144 | } | ||
| 145 | |||
| 146 | static bool meson_gx_pwrc_vpu_get_power(struct meson_gx_pwrc_vpu *pd) | ||
| 147 | { | ||
| 148 | u32 reg; | ||
| 149 | |||
| 150 | regmap_read(pd->regmap_ao, AO_RTI_GEN_PWR_SLEEP0, ®); | ||
| 151 | |||
| 152 | return (reg & GEN_PWR_VPU_HDMI); | ||
| 153 | } | ||
| 154 | |||
| 155 | static struct meson_gx_pwrc_vpu vpu_hdmi_pd = { | ||
| 156 | .genpd = { | ||
| 157 | .name = "vpu_hdmi", | ||
| 158 | .power_off = meson_gx_pwrc_vpu_power_off, | ||
| 159 | .power_on = meson_gx_pwrc_vpu_power_on, | ||
| 160 | }, | ||
| 161 | }; | ||
| 162 | |||
| 163 | static int meson_gx_pwrc_vpu_probe(struct platform_device *pdev) | ||
| 164 | { | ||
| 165 | struct regmap *regmap_ao, *regmap_hhi; | ||
| 166 | struct reset_control *rstc; | ||
| 167 | struct clk *vpu_clk; | ||
| 168 | struct clk *vapb_clk; | ||
| 169 | bool powered_off; | ||
| 170 | int ret; | ||
| 171 | |||
| 172 | regmap_ao = syscon_node_to_regmap(of_get_parent(pdev->dev.of_node)); | ||
| 173 | if (IS_ERR(regmap_ao)) { | ||
| 174 | dev_err(&pdev->dev, "failed to get regmap\n"); | ||
| 175 | return PTR_ERR(regmap_ao); | ||
| 176 | } | ||
| 177 | |||
| 178 | regmap_hhi = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, | ||
| 179 | "amlogic,hhi-sysctrl"); | ||
| 180 | if (IS_ERR(regmap_hhi)) { | ||
| 181 | dev_err(&pdev->dev, "failed to get HHI regmap\n"); | ||
| 182 | return PTR_ERR(regmap_hhi); | ||
| 183 | } | ||
| 184 | |||
| 185 | rstc = devm_reset_control_array_get(&pdev->dev, false, false); | ||
| 186 | if (IS_ERR(rstc)) { | ||
| 187 | dev_err(&pdev->dev, "failed to get reset lines\n"); | ||
| 188 | return PTR_ERR(rstc); | ||
| 189 | } | ||
| 190 | |||
| 191 | vpu_clk = devm_clk_get(&pdev->dev, "vpu"); | ||
| 192 | if (IS_ERR(vpu_clk)) { | ||
| 193 | dev_err(&pdev->dev, "vpu clock request failed\n"); | ||
| 194 | return PTR_ERR(vpu_clk); | ||
| 195 | } | ||
| 196 | |||
| 197 | vapb_clk = devm_clk_get(&pdev->dev, "vapb"); | ||
| 198 | if (IS_ERR(vapb_clk)) { | ||
| 199 | dev_err(&pdev->dev, "vapb clock request failed\n"); | ||
| 200 | return PTR_ERR(vapb_clk); | ||
| 201 | } | ||
| 202 | |||
| 203 | vpu_hdmi_pd.regmap_ao = regmap_ao; | ||
| 204 | vpu_hdmi_pd.regmap_hhi = regmap_hhi; | ||
| 205 | vpu_hdmi_pd.rstc = rstc; | ||
| 206 | vpu_hdmi_pd.vpu_clk = vpu_clk; | ||
| 207 | vpu_hdmi_pd.vapb_clk = vapb_clk; | ||
| 208 | |||
| 209 | powered_off = meson_gx_pwrc_vpu_get_power(&vpu_hdmi_pd); | ||
| 210 | |||
| 211 | /* If already powered, sync the clock states */ | ||
| 212 | if (!powered_off) { | ||
| 213 | ret = meson_gx_pwrc_vpu_setup_clk(&vpu_hdmi_pd); | ||
| 214 | if (ret) | ||
| 215 | return ret; | ||
| 216 | } | ||
| 217 | |||
| 218 | pm_genpd_init(&vpu_hdmi_pd.genpd, &pm_domain_always_on_gov, | ||
| 219 | powered_off); | ||
| 220 | |||
| 221 | return of_genpd_add_provider_simple(pdev->dev.of_node, | ||
| 222 | &vpu_hdmi_pd.genpd); | ||
| 223 | } | ||
| 224 | |||
| 225 | static void meson_gx_pwrc_vpu_shutdown(struct platform_device *pdev) | ||
| 226 | { | ||
| 227 | meson_gx_pwrc_vpu_power_off(&vpu_hdmi_pd.genpd); | ||
| 228 | } | ||
| 229 | |||
| 230 | static const struct of_device_id meson_gx_pwrc_vpu_match_table[] = { | ||
| 231 | { .compatible = "amlogic,meson-gx-pwrc-vpu" }, | ||
| 232 | { /* sentinel */ } | ||
| 233 | }; | ||
| 234 | |||
| 235 | static struct platform_driver meson_gx_pwrc_vpu_driver = { | ||
| 236 | .probe = meson_gx_pwrc_vpu_probe, | ||
| 237 | .shutdown = meson_gx_pwrc_vpu_shutdown, | ||
| 238 | .driver = { | ||
| 239 | .name = "meson_gx_pwrc_vpu", | ||
| 240 | .of_match_table = meson_gx_pwrc_vpu_match_table, | ||
| 241 | }, | ||
| 242 | }; | ||
| 243 | builtin_platform_driver(meson_gx_pwrc_vpu_driver); | ||
diff --git a/drivers/soc/amlogic/meson-mx-socinfo.c b/drivers/soc/amlogic/meson-mx-socinfo.c new file mode 100644 index 000000000000..7bfff5ff22a2 --- /dev/null +++ b/drivers/soc/amlogic/meson-mx-socinfo.c | |||
| @@ -0,0 +1,175 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (c) 2017 Martin Blumenstingl <martin.blumenstingl@googlemail.com> | ||
| 3 | * | ||
| 4 | * SPDX-License-Identifier: GPL-2.0+ | ||
| 5 | */ | ||
| 6 | |||
| 7 | #include <linux/io.h> | ||
| 8 | #include <linux/of.h> | ||
| 9 | #include <linux/of_address.h> | ||
| 10 | #include <linux/of_platform.h> | ||
| 11 | #include <linux/platform_device.h> | ||
| 12 | #include <linux/slab.h> | ||
| 13 | #include <linux/sys_soc.h> | ||
| 14 | #include <linux/bitfield.h> | ||
| 15 | #include <linux/regmap.h> | ||
| 16 | #include <linux/mfd/syscon.h> | ||
| 17 | |||
| 18 | #define MESON_SOCINFO_MAJOR_VER_MESON6 0x16 | ||
| 19 | #define MESON_SOCINFO_MAJOR_VER_MESON8 0x19 | ||
| 20 | #define MESON_SOCINFO_MAJOR_VER_MESON8B 0x1b | ||
| 21 | |||
| 22 | #define MESON_MX_ASSIST_HW_REV 0x14c | ||
| 23 | |||
| 24 | #define MESON_MX_ANALOG_TOP_METAL_REVISION 0x0 | ||
| 25 | |||
| 26 | #define MESON_MX_BOOTROM_MISC_VER 0x4 | ||
| 27 | |||
| 28 | static const char *meson_mx_socinfo_revision(unsigned int major_ver, | ||
| 29 | unsigned int misc_ver, | ||
| 30 | unsigned int metal_rev) | ||
| 31 | { | ||
| 32 | unsigned int minor_ver; | ||
| 33 | |||
| 34 | switch (major_ver) { | ||
| 35 | case MESON_SOCINFO_MAJOR_VER_MESON6: | ||
| 36 | minor_ver = 0xa; | ||
| 37 | break; | ||
| 38 | |||
| 39 | case MESON_SOCINFO_MAJOR_VER_MESON8: | ||
| 40 | if (metal_rev == 0x11111112) | ||
| 41 | major_ver = 0x1d; | ||
| 42 | |||
| 43 | if (metal_rev == 0x11111111 || metal_rev == 0x11111112) | ||
| 44 | minor_ver = 0xa; | ||
| 45 | else if (metal_rev == 0x11111113) | ||
| 46 | minor_ver = 0xb; | ||
| 47 | else if (metal_rev == 0x11111133) | ||
| 48 | minor_ver = 0xc; | ||
| 49 | else | ||
| 50 | minor_ver = 0xd; | ||
| 51 | |||
| 52 | break; | ||
| 53 | |||
| 54 | case MESON_SOCINFO_MAJOR_VER_MESON8B: | ||
| 55 | if (metal_rev == 0x11111111) | ||
| 56 | minor_ver = 0xa; | ||
| 57 | else | ||
| 58 | minor_ver = 0xb; | ||
| 59 | |||
| 60 | break; | ||
| 61 | |||
| 62 | default: | ||
| 63 | minor_ver = 0x0; | ||
| 64 | break; | ||
| 65 | } | ||
| 66 | |||
| 67 | return kasprintf(GFP_KERNEL, "Rev%X (%x - 0:%X)", minor_ver, major_ver, | ||
| 68 | misc_ver); | ||
| 69 | } | ||
| 70 | |||
| 71 | static const char *meson_mx_socinfo_soc_id(unsigned int major_ver, | ||
| 72 | unsigned int metal_rev) | ||
| 73 | { | ||
| 74 | const char *soc_id; | ||
| 75 | |||
| 76 | switch (major_ver) { | ||
| 77 | case MESON_SOCINFO_MAJOR_VER_MESON6: | ||
| 78 | soc_id = "Meson6 (AML8726-MX)"; | ||
| 79 | break; | ||
| 80 | |||
| 81 | case MESON_SOCINFO_MAJOR_VER_MESON8: | ||
| 82 | if (metal_rev == 0x11111112) | ||
| 83 | soc_id = "Meson8m2 (S812)"; | ||
| 84 | else | ||
| 85 | soc_id = "Meson8 (S802)"; | ||
| 86 | |||
| 87 | break; | ||
| 88 | |||
| 89 | case MESON_SOCINFO_MAJOR_VER_MESON8B: | ||
| 90 | soc_id = "Meson8b (S805)"; | ||
| 91 | break; | ||
| 92 | |||
| 93 | default: | ||
| 94 | soc_id = "Unknown"; | ||
| 95 | break; | ||
| 96 | } | ||
| 97 | |||
| 98 | return kstrdup_const(soc_id, GFP_KERNEL); | ||
| 99 | } | ||
| 100 | |||
| 101 | static const struct of_device_id meson_mx_socinfo_analog_top_ids[] = { | ||
| 102 | { .compatible = "amlogic,meson8-analog-top", }, | ||
| 103 | { .compatible = "amlogic,meson8b-analog-top", }, | ||
| 104 | { /* sentinel */ } | ||
| 105 | }; | ||
| 106 | |||
| 107 | int __init meson_mx_socinfo_init(void) | ||
| 108 | { | ||
| 109 | struct soc_device_attribute *soc_dev_attr; | ||
| 110 | struct soc_device *soc_dev; | ||
| 111 | struct device_node *np; | ||
| 112 | struct regmap *assist_regmap, *bootrom_regmap, *analog_top_regmap; | ||
| 113 | unsigned int major_ver, misc_ver, metal_rev = 0; | ||
| 114 | int ret; | ||
| 115 | |||
| 116 | assist_regmap = | ||
| 117 | syscon_regmap_lookup_by_compatible("amlogic,meson-mx-assist"); | ||
| 118 | if (IS_ERR(assist_regmap)) | ||
| 119 | return PTR_ERR(assist_regmap); | ||
| 120 | |||
| 121 | bootrom_regmap = | ||
| 122 | syscon_regmap_lookup_by_compatible("amlogic,meson-mx-bootrom"); | ||
| 123 | if (IS_ERR(bootrom_regmap)) | ||
| 124 | return PTR_ERR(bootrom_regmap); | ||
| 125 | |||
| 126 | np = of_find_matching_node(NULL, meson_mx_socinfo_analog_top_ids); | ||
| 127 | if (np) { | ||
| 128 | analog_top_regmap = syscon_node_to_regmap(np); | ||
| 129 | if (IS_ERR(analog_top_regmap)) | ||
| 130 | return PTR_ERR(analog_top_regmap); | ||
| 131 | |||
| 132 | ret = regmap_read(analog_top_regmap, | ||
| 133 | MESON_MX_ANALOG_TOP_METAL_REVISION, | ||
| 134 | &metal_rev); | ||
| 135 | if (ret) | ||
| 136 | return ret; | ||
| 137 | } | ||
| 138 | |||
| 139 | ret = regmap_read(assist_regmap, MESON_MX_ASSIST_HW_REV, &major_ver); | ||
| 140 | if (ret < 0) | ||
| 141 | return ret; | ||
| 142 | |||
| 143 | ret = regmap_read(bootrom_regmap, MESON_MX_BOOTROM_MISC_VER, | ||
| 144 | &misc_ver); | ||
| 145 | if (ret < 0) | ||
| 146 | return ret; | ||
| 147 | |||
| 148 | soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL); | ||
| 149 | if (!soc_dev_attr) | ||
| 150 | return -ENODEV; | ||
| 151 | |||
| 152 | soc_dev_attr->family = "Amlogic Meson"; | ||
| 153 | |||
| 154 | np = of_find_node_by_path("/"); | ||
| 155 | of_property_read_string(np, "model", &soc_dev_attr->machine); | ||
| 156 | of_node_put(np); | ||
| 157 | |||
| 158 | soc_dev_attr->revision = meson_mx_socinfo_revision(major_ver, misc_ver, | ||
| 159 | metal_rev); | ||
| 160 | soc_dev_attr->soc_id = meson_mx_socinfo_soc_id(major_ver, metal_rev); | ||
| 161 | |||
| 162 | soc_dev = soc_device_register(soc_dev_attr); | ||
| 163 | if (IS_ERR(soc_dev)) { | ||
| 164 | kfree_const(soc_dev_attr->revision); | ||
| 165 | kfree_const(soc_dev_attr->soc_id); | ||
| 166 | kfree(soc_dev_attr); | ||
| 167 | return PTR_ERR(soc_dev); | ||
| 168 | } | ||
| 169 | |||
| 170 | dev_info(soc_device_to_device(soc_dev), "Amlogic %s %s detected\n", | ||
| 171 | soc_dev_attr->soc_id, soc_dev_attr->revision); | ||
| 172 | |||
| 173 | return 0; | ||
| 174 | } | ||
| 175 | device_initcall(meson_mx_socinfo_init); | ||
diff --git a/drivers/soc/atmel/soc.c b/drivers/soc/atmel/soc.c index c1363c83c352..4dd03b099c89 100644 --- a/drivers/soc/atmel/soc.c +++ b/drivers/soc/atmel/soc.c | |||
| @@ -72,6 +72,8 @@ static const struct at91_soc __initconst socs[] = { | |||
| 72 | "sama5d21", "sama5d2"), | 72 | "sama5d21", "sama5d2"), |
| 73 | AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D22CU_EXID_MATCH, | 73 | AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D22CU_EXID_MATCH, |
| 74 | "sama5d22", "sama5d2"), | 74 | "sama5d22", "sama5d2"), |
| 75 | AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D225C_D1M_EXID_MATCH, | ||
| 76 | "sama5d225c 16MiB SiP", "sama5d2"), | ||
| 75 | AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D23CU_EXID_MATCH, | 77 | AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D23CU_EXID_MATCH, |
| 76 | "sama5d23", "sama5d2"), | 78 | "sama5d23", "sama5d2"), |
| 77 | AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D24CX_EXID_MATCH, | 79 | AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D24CX_EXID_MATCH, |
| @@ -84,10 +86,16 @@ static const struct at91_soc __initconst socs[] = { | |||
| 84 | "sama5d27", "sama5d2"), | 86 | "sama5d27", "sama5d2"), |
| 85 | AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D27CN_EXID_MATCH, | 87 | AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D27CN_EXID_MATCH, |
| 86 | "sama5d27", "sama5d2"), | 88 | "sama5d27", "sama5d2"), |
| 89 | AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D27C_D1G_EXID_MATCH, | ||
| 90 | "sama5d27c 128MiB SiP", "sama5d2"), | ||
| 91 | AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D27C_D5M_EXID_MATCH, | ||
| 92 | "sama5d27c 64MiB SiP", "sama5d2"), | ||
| 87 | AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D28CU_EXID_MATCH, | 93 | AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D28CU_EXID_MATCH, |
| 88 | "sama5d28", "sama5d2"), | 94 | "sama5d28", "sama5d2"), |
| 89 | AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D28CN_EXID_MATCH, | 95 | AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D28CN_EXID_MATCH, |
| 90 | "sama5d28", "sama5d2"), | 96 | "sama5d28", "sama5d2"), |
| 97 | AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D28C_D1G_EXID_MATCH, | ||
| 98 | "sama5d28c 128MiB SiP", "sama5d2"), | ||
| 91 | AT91_SOC(SAMA5D3_CIDR_MATCH, SAMA5D31_EXID_MATCH, | 99 | AT91_SOC(SAMA5D3_CIDR_MATCH, SAMA5D31_EXID_MATCH, |
| 92 | "sama5d31", "sama5d3"), | 100 | "sama5d31", "sama5d3"), |
| 93 | AT91_SOC(SAMA5D3_CIDR_MATCH, SAMA5D33_EXID_MATCH, | 101 | AT91_SOC(SAMA5D3_CIDR_MATCH, SAMA5D33_EXID_MATCH, |
diff --git a/drivers/soc/atmel/soc.h b/drivers/soc/atmel/soc.h index a90bd5b0ef8f..94cd5d1ab502 100644 --- a/drivers/soc/atmel/soc.h +++ b/drivers/soc/atmel/soc.h | |||
| @@ -64,14 +64,18 @@ at91_soc_init(const struct at91_soc *socs); | |||
| 64 | 64 | ||
| 65 | #define SAMA5D2_CIDR_MATCH 0x0a5c08c0 | 65 | #define SAMA5D2_CIDR_MATCH 0x0a5c08c0 |
| 66 | #define SAMA5D21CU_EXID_MATCH 0x0000005a | 66 | #define SAMA5D21CU_EXID_MATCH 0x0000005a |
| 67 | #define SAMA5D225C_D1M_EXID_MATCH 0x00000053 | ||
| 67 | #define SAMA5D22CU_EXID_MATCH 0x00000059 | 68 | #define SAMA5D22CU_EXID_MATCH 0x00000059 |
| 68 | #define SAMA5D22CN_EXID_MATCH 0x00000069 | 69 | #define SAMA5D22CN_EXID_MATCH 0x00000069 |
| 69 | #define SAMA5D23CU_EXID_MATCH 0x00000058 | 70 | #define SAMA5D23CU_EXID_MATCH 0x00000058 |
| 70 | #define SAMA5D24CX_EXID_MATCH 0x00000004 | 71 | #define SAMA5D24CX_EXID_MATCH 0x00000004 |
| 71 | #define SAMA5D24CU_EXID_MATCH 0x00000014 | 72 | #define SAMA5D24CU_EXID_MATCH 0x00000014 |
| 72 | #define SAMA5D26CU_EXID_MATCH 0x00000012 | 73 | #define SAMA5D26CU_EXID_MATCH 0x00000012 |
| 74 | #define SAMA5D27C_D1G_EXID_MATCH 0x00000033 | ||
| 75 | #define SAMA5D27C_D5M_EXID_MATCH 0x00000032 | ||
| 73 | #define SAMA5D27CU_EXID_MATCH 0x00000011 | 76 | #define SAMA5D27CU_EXID_MATCH 0x00000011 |
| 74 | #define SAMA5D27CN_EXID_MATCH 0x00000021 | 77 | #define SAMA5D27CN_EXID_MATCH 0x00000021 |
| 78 | #define SAMA5D28C_D1G_EXID_MATCH 0x00000013 | ||
| 75 | #define SAMA5D28CU_EXID_MATCH 0x00000010 | 79 | #define SAMA5D28CU_EXID_MATCH 0x00000010 |
| 76 | #define SAMA5D28CN_EXID_MATCH 0x00000020 | 80 | #define SAMA5D28CN_EXID_MATCH 0x00000020 |
| 77 | 81 | ||
diff --git a/drivers/soc/bcm/Kconfig b/drivers/soc/bcm/Kconfig index 49f1e2a75d61..055a845ed979 100644 --- a/drivers/soc/bcm/Kconfig +++ b/drivers/soc/bcm/Kconfig | |||
| @@ -20,4 +20,6 @@ config SOC_BRCMSTB | |||
| 20 | 20 | ||
| 21 | If unsure, say N. | 21 | If unsure, say N. |
| 22 | 22 | ||
| 23 | source "drivers/soc/bcm/brcmstb/Kconfig" | ||
| 24 | |||
| 23 | endmenu | 25 | endmenu |
diff --git a/drivers/soc/bcm/brcmstb/Kconfig b/drivers/soc/bcm/brcmstb/Kconfig new file mode 100644 index 000000000000..d36f6e03c1a6 --- /dev/null +++ b/drivers/soc/bcm/brcmstb/Kconfig | |||
| @@ -0,0 +1,10 @@ | |||
| 1 | if SOC_BRCMSTB | ||
| 2 | |||
| 3 | config BRCMSTB_PM | ||
| 4 | bool "Support suspend/resume for STB platforms" | ||
| 5 | default y | ||
| 6 | depends on PM | ||
| 7 | depends on ARCH_BRCMSTB || BMIPS_GENERIC | ||
| 8 | select ARM_CPU_SUSPEND if ARM | ||
| 9 | |||
| 10 | endif # SOC_BRCMSTB | ||
diff --git a/drivers/soc/bcm/brcmstb/Makefile b/drivers/soc/bcm/brcmstb/Makefile index 9120b2715d3e..01687c26535b 100644 --- a/drivers/soc/bcm/brcmstb/Makefile +++ b/drivers/soc/bcm/brcmstb/Makefile | |||
| @@ -1 +1,2 @@ | |||
| 1 | obj-y += common.o biuctrl.o | 1 | obj-y += common.o biuctrl.o |
| 2 | obj-$(CONFIG_BRCMSTB_PM) += pm/ | ||
diff --git a/drivers/soc/bcm/brcmstb/pm/Makefile b/drivers/soc/bcm/brcmstb/pm/Makefile new file mode 100644 index 000000000000..08bbd244ef11 --- /dev/null +++ b/drivers/soc/bcm/brcmstb/pm/Makefile | |||
| @@ -0,0 +1,3 @@ | |||
| 1 | obj-$(CONFIG_ARM) += s2-arm.o pm-arm.o | ||
| 2 | AFLAGS_s2-arm.o := -march=armv7-a | ||
| 3 | obj-$(CONFIG_BMIPS_GENERIC) += s2-mips.o s3-mips.o pm-mips.o | ||
diff --git a/drivers/soc/bcm/brcmstb/pm/aon_defs.h b/drivers/soc/bcm/brcmstb/pm/aon_defs.h new file mode 100644 index 000000000000..fb936abd847d --- /dev/null +++ b/drivers/soc/bcm/brcmstb/pm/aon_defs.h | |||
| @@ -0,0 +1,113 @@ | |||
| 1 | /* | ||
| 2 | * Always ON (AON) register interface between bootloader and Linux | ||
| 3 | * | ||
| 4 | * Copyright © 2014-2017 Broadcom | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify | ||
| 7 | * it under the terms of the GNU General Public License version 2 as | ||
| 8 | * published by the Free Software Foundation. | ||
| 9 | * | ||
| 10 | * This program is distributed in the hope that it will be useful, | ||
| 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 13 | * GNU General Public License for more details. | ||
| 14 | */ | ||
| 15 | |||
| 16 | #ifndef __BRCMSTB_AON_DEFS_H__ | ||
| 17 | #define __BRCMSTB_AON_DEFS_H__ | ||
| 18 | |||
| 19 | #include <linux/compiler.h> | ||
| 20 | |||
| 21 | /* Magic number in upper 16-bits */ | ||
| 22 | #define BRCMSTB_S3_MAGIC_MASK 0xffff0000 | ||
| 23 | #define BRCMSTB_S3_MAGIC_SHORT 0x5AFE0000 | ||
| 24 | |||
| 25 | enum { | ||
| 26 | /* Restore random key for AES memory verification (off = fixed key) */ | ||
| 27 | S3_FLAG_LOAD_RANDKEY = (1 << 0), | ||
| 28 | |||
| 29 | /* Scratch buffer page table is present */ | ||
| 30 | S3_FLAG_SCRATCH_BUFFER_TABLE = (1 << 1), | ||
| 31 | |||
| 32 | /* Skip all memory verification */ | ||
| 33 | S3_FLAG_NO_MEM_VERIFY = (1 << 2), | ||
| 34 | |||
| 35 | /* | ||
| 36 | * Modification of this bit reserved for bootloader only. | ||
| 37 | * 1=PSCI started Linux, 0=Direct jump to Linux. | ||
| 38 | */ | ||
| 39 | S3_FLAG_PSCI_BOOT = (1 << 3), | ||
| 40 | |||
| 41 | /* | ||
| 42 | * Modification of this bit reserved for bootloader only. | ||
| 43 | * 1=64 bit boot, 0=32 bit boot. | ||
| 44 | */ | ||
| 45 | S3_FLAG_BOOTED64 = (1 << 4), | ||
| 46 | }; | ||
| 47 | |||
| 48 | #define BRCMSTB_HASH_LEN (128 / 8) /* 128-bit hash */ | ||
| 49 | |||
| 50 | #define AON_REG_MAGIC_FLAGS 0x00 | ||
| 51 | #define AON_REG_CONTROL_LOW 0x04 | ||
| 52 | #define AON_REG_CONTROL_HIGH 0x08 | ||
| 53 | #define AON_REG_S3_HASH 0x0c /* hash of S3 params */ | ||
| 54 | #define AON_REG_CONTROL_HASH_LEN 0x1c | ||
| 55 | #define AON_REG_PANIC 0x20 | ||
| 56 | |||
| 57 | #define BRCMSTB_S3_MAGIC 0x5AFEB007 | ||
| 58 | #define BRCMSTB_PANIC_MAGIC 0x512E115E | ||
| 59 | #define BOOTLOADER_SCRATCH_SIZE 64 | ||
| 60 | #define BRCMSTB_DTU_STATE_MAP_ENTRIES (8*1024) | ||
| 61 | #define BRCMSTB_DTU_CONFIG_ENTRIES (512) | ||
| 62 | #define BRCMSTB_DTU_COUNT (2) | ||
| 63 | |||
| 64 | #define IMAGE_DESCRIPTORS_BUFSIZE (2 * 1024) | ||
| 65 | #define S3_BOOTLOADER_RESERVED (S3_FLAG_PSCI_BOOT | S3_FLAG_BOOTED64) | ||
| 66 | |||
| 67 | struct brcmstb_bootloader_dtu_table { | ||
| 68 | uint32_t dtu_state_map[BRCMSTB_DTU_STATE_MAP_ENTRIES]; | ||
| 69 | uint32_t dtu_config[BRCMSTB_DTU_CONFIG_ENTRIES]; | ||
| 70 | }; | ||
| 71 | |||
| 72 | /* | ||
| 73 | * Bootloader utilizes a custom parameter block left in DRAM for handling S3 | ||
| 74 | * warm resume | ||
| 75 | */ | ||
| 76 | struct brcmstb_s3_params { | ||
| 77 | /* scratch memory for bootloader */ | ||
| 78 | uint8_t scratch[BOOTLOADER_SCRATCH_SIZE]; | ||
| 79 | |||
| 80 | uint32_t magic; /* BRCMSTB_S3_MAGIC */ | ||
| 81 | uint64_t reentry; /* PA */ | ||
| 82 | |||
| 83 | /* descriptors */ | ||
| 84 | uint32_t hash[BRCMSTB_HASH_LEN / 4]; | ||
| 85 | |||
| 86 | /* | ||
| 87 | * If 0, then ignore this parameter (there is only one set of | ||
| 88 | * descriptors) | ||
| 89 | * | ||
| 90 | * If non-0, then a second set of descriptors is stored at: | ||
| 91 | * | ||
| 92 | * descriptors + desc_offset_2 | ||
| 93 | * | ||
| 94 | * The MAC result of both descriptors is XOR'd and stored in @hash | ||
| 95 | */ | ||
| 96 | uint32_t desc_offset_2; | ||
| 97 | |||
| 98 | /* | ||
| 99 | * (Physical) address of a brcmstb_bootloader_scratch_table, for | ||
| 100 | * providing a large DRAM buffer to the bootloader | ||
| 101 | */ | ||
| 102 | uint64_t buffer_table; | ||
| 103 | |||
| 104 | uint32_t spare[70]; | ||
| 105 | |||
| 106 | uint8_t descriptors[IMAGE_DESCRIPTORS_BUFSIZE]; | ||
| 107 | /* | ||
| 108 | * Must be last member of struct. See brcmstb_pm_s3_finish() for reason. | ||
| 109 | */ | ||
| 110 | struct brcmstb_bootloader_dtu_table dtu[BRCMSTB_DTU_COUNT]; | ||
| 111 | } __packed; | ||
| 112 | |||
| 113 | #endif /* __BRCMSTB_AON_DEFS_H__ */ | ||
diff --git a/drivers/soc/bcm/brcmstb/pm/pm-arm.c b/drivers/soc/bcm/brcmstb/pm/pm-arm.c new file mode 100644 index 000000000000..dcf8c8065508 --- /dev/null +++ b/drivers/soc/bcm/brcmstb/pm/pm-arm.c | |||
| @@ -0,0 +1,822 @@ | |||
| 1 | /* | ||
| 2 | * ARM-specific support for Broadcom STB S2/S3/S5 power management | ||
| 3 | * | ||
| 4 | * S2: clock gate CPUs and as many peripherals as possible | ||
| 5 | * S3: power off all of the chip except the Always ON (AON) island; keep DDR is | ||
| 6 | * self-refresh | ||
| 7 | * S5: (a.k.a. S3 cold boot) much like S3, except DDR is powered down, so we | ||
| 8 | * treat this mode like a soft power-off, with wakeup allowed from AON | ||
| 9 | * | ||
| 10 | * Copyright © 2014-2017 Broadcom | ||
| 11 | * | ||
| 12 | * This program is free software; you can redistribute it and/or modify | ||
| 13 | * it under the terms of the GNU General Public License version 2 as | ||
| 14 | * published by the Free Software Foundation. | ||
| 15 | * | ||
| 16 | * This program is distributed in the hope that it will be useful, | ||
| 17 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 19 | * GNU General Public License for more details. | ||
| 20 | */ | ||
| 21 | |||
| 22 | #define pr_fmt(fmt) "brcmstb-pm: " fmt | ||
| 23 | |||
| 24 | #include <linux/bitops.h> | ||
| 25 | #include <linux/compiler.h> | ||
| 26 | #include <linux/delay.h> | ||
| 27 | #include <linux/dma-mapping.h> | ||
| 28 | #include <linux/err.h> | ||
| 29 | #include <linux/init.h> | ||
| 30 | #include <linux/io.h> | ||
| 31 | #include <linux/ioport.h> | ||
| 32 | #include <linux/kconfig.h> | ||
| 33 | #include <linux/kernel.h> | ||
| 34 | #include <linux/memblock.h> | ||
| 35 | #include <linux/module.h> | ||
| 36 | #include <linux/notifier.h> | ||
| 37 | #include <linux/of.h> | ||
| 38 | #include <linux/of_address.h> | ||
| 39 | #include <linux/platform_device.h> | ||
| 40 | #include <linux/pm.h> | ||
| 41 | #include <linux/printk.h> | ||
| 42 | #include <linux/proc_fs.h> | ||
| 43 | #include <linux/sizes.h> | ||
| 44 | #include <linux/slab.h> | ||
| 45 | #include <linux/sort.h> | ||
| 46 | #include <linux/suspend.h> | ||
| 47 | #include <linux/types.h> | ||
| 48 | #include <linux/uaccess.h> | ||
| 49 | #include <linux/soc/brcmstb/brcmstb.h> | ||
| 50 | |||
| 51 | #include <asm/fncpy.h> | ||
| 52 | #include <asm/setup.h> | ||
| 53 | #include <asm/suspend.h> | ||
| 54 | |||
| 55 | #include "pm.h" | ||
| 56 | #include "aon_defs.h" | ||
| 57 | |||
| 58 | #define SHIMPHY_DDR_PAD_CNTRL 0x8c | ||
| 59 | |||
| 60 | /* Method #0 */ | ||
| 61 | #define SHIMPHY_PAD_PLL_SEQUENCE BIT(8) | ||
| 62 | #define SHIMPHY_PAD_GATE_PLL_S3 BIT(9) | ||
| 63 | |||
| 64 | /* Method #1 */ | ||
| 65 | #define PWRDWN_SEQ_NO_SEQUENCING 0 | ||
| 66 | #define PWRDWN_SEQ_HOLD_CHANNEL 1 | ||
| 67 | #define PWRDWN_SEQ_RESET_PLL 2 | ||
| 68 | #define PWRDWN_SEQ_POWERDOWN_PLL 3 | ||
| 69 | |||
| 70 | #define SHIMPHY_PAD_S3_PWRDWN_SEQ_MASK 0x00f00000 | ||
| 71 | #define SHIMPHY_PAD_S3_PWRDWN_SEQ_SHIFT 20 | ||
| 72 | |||
| 73 | #define DDR_FORCE_CKE_RST_N BIT(3) | ||
| 74 | #define DDR_PHY_RST_N BIT(2) | ||
| 75 | #define DDR_PHY_CKE BIT(1) | ||
| 76 | |||
| 77 | #define DDR_PHY_NO_CHANNEL 0xffffffff | ||
| 78 | |||
| 79 | #define MAX_NUM_MEMC 3 | ||
| 80 | |||
| 81 | struct brcmstb_memc { | ||
| 82 | void __iomem *ddr_phy_base; | ||
| 83 | void __iomem *ddr_shimphy_base; | ||
| 84 | void __iomem *ddr_ctrl; | ||
| 85 | }; | ||
| 86 | |||
| 87 | struct brcmstb_pm_control { | ||
| 88 | void __iomem *aon_ctrl_base; | ||
| 89 | void __iomem *aon_sram; | ||
| 90 | struct brcmstb_memc memcs[MAX_NUM_MEMC]; | ||
| 91 | |||
| 92 | void __iomem *boot_sram; | ||
| 93 | size_t boot_sram_len; | ||
| 94 | |||
| 95 | bool support_warm_boot; | ||
| 96 | size_t pll_status_offset; | ||
| 97 | int num_memc; | ||
| 98 | |||
| 99 | struct brcmstb_s3_params *s3_params; | ||
| 100 | dma_addr_t s3_params_pa; | ||
| 101 | int s3entry_method; | ||
| 102 | u32 warm_boot_offset; | ||
| 103 | u32 phy_a_standby_ctrl_offs; | ||
| 104 | u32 phy_b_standby_ctrl_offs; | ||
| 105 | bool needs_ddr_pad; | ||
| 106 | struct platform_device *pdev; | ||
| 107 | }; | ||
| 108 | |||
| 109 | enum bsp_initiate_command { | ||
| 110 | BSP_CLOCK_STOP = 0x00, | ||
| 111 | BSP_GEN_RANDOM_KEY = 0x4A, | ||
| 112 | BSP_RESTORE_RANDOM_KEY = 0x55, | ||
| 113 | BSP_GEN_FIXED_KEY = 0x63, | ||
| 114 | }; | ||
| 115 | |||
| 116 | #define PM_INITIATE 0x01 | ||
| 117 | #define PM_INITIATE_SUCCESS 0x00 | ||
| 118 | #define PM_INITIATE_FAIL 0xfe | ||
| 119 | |||
| 120 | static struct brcmstb_pm_control ctrl; | ||
| 121 | |||
| 122 | static int (*brcmstb_pm_do_s2_sram)(void __iomem *aon_ctrl_base, | ||
| 123 | void __iomem *ddr_phy_pll_status); | ||
| 124 | |||
| 125 | static int brcmstb_init_sram(struct device_node *dn) | ||
| 126 | { | ||
| 127 | void __iomem *sram; | ||
| 128 | struct resource res; | ||
| 129 | int ret; | ||
| 130 | |||
| 131 | ret = of_address_to_resource(dn, 0, &res); | ||
| 132 | if (ret) | ||
| 133 | return ret; | ||
| 134 | |||
| 135 | /* Uncached, executable remapping of SRAM */ | ||
| 136 | sram = __arm_ioremap_exec(res.start, resource_size(&res), false); | ||
| 137 | if (!sram) | ||
| 138 | return -ENOMEM; | ||
| 139 | |||
| 140 | ctrl.boot_sram = sram; | ||
| 141 | ctrl.boot_sram_len = resource_size(&res); | ||
| 142 | |||
| 143 | return 0; | ||
| 144 | } | ||
| 145 | |||
| 146 | static const struct of_device_id sram_dt_ids[] = { | ||
| 147 | { .compatible = "mmio-sram" }, | ||
| 148 | { /* sentinel */ } | ||
| 149 | }; | ||
| 150 | |||
| 151 | static int do_bsp_initiate_command(enum bsp_initiate_command cmd) | ||
| 152 | { | ||
| 153 | void __iomem *base = ctrl.aon_ctrl_base; | ||
| 154 | int ret; | ||
| 155 | int timeo = 1000 * 1000; /* 1 second */ | ||
| 156 | |||
| 157 | writel_relaxed(0, base + AON_CTRL_PM_INITIATE); | ||
| 158 | (void)readl_relaxed(base + AON_CTRL_PM_INITIATE); | ||
| 159 | |||
| 160 | /* Go! */ | ||
| 161 | writel_relaxed((cmd << 1) | PM_INITIATE, base + AON_CTRL_PM_INITIATE); | ||
| 162 | |||
| 163 | /* | ||
| 164 | * If firmware doesn't support the 'ack', then just assume it's done | ||
| 165 | * after 10ms. Note that this only works for command 0, BSP_CLOCK_STOP | ||
| 166 | */ | ||
| 167 | if (of_machine_is_compatible("brcm,bcm74371a0")) { | ||
| 168 | (void)readl_relaxed(base + AON_CTRL_PM_INITIATE); | ||
| 169 | mdelay(10); | ||
| 170 | return 0; | ||
| 171 | } | ||
| 172 | |||
| 173 | for (;;) { | ||
| 174 | ret = readl_relaxed(base + AON_CTRL_PM_INITIATE); | ||
| 175 | if (!(ret & PM_INITIATE)) | ||
| 176 | break; | ||
| 177 | if (timeo <= 0) { | ||
| 178 | pr_err("error: timeout waiting for BSP (%x)\n", ret); | ||
| 179 | break; | ||
| 180 | } | ||
| 181 | timeo -= 50; | ||
| 182 | udelay(50); | ||
| 183 | } | ||
| 184 | |||
| 185 | return (ret & 0xff) != PM_INITIATE_SUCCESS; | ||
| 186 | } | ||
| 187 | |||
| 188 | static int brcmstb_pm_handshake(void) | ||
| 189 | { | ||
| 190 | void __iomem *base = ctrl.aon_ctrl_base; | ||
| 191 | u32 tmp; | ||
| 192 | int ret; | ||
| 193 | |||
| 194 | /* BSP power handshake, v1 */ | ||
| 195 | tmp = readl_relaxed(base + AON_CTRL_HOST_MISC_CMDS); | ||
| 196 | tmp &= ~1UL; | ||
| 197 | writel_relaxed(tmp, base + AON_CTRL_HOST_MISC_CMDS); | ||
| 198 | (void)readl_relaxed(base + AON_CTRL_HOST_MISC_CMDS); | ||
| 199 | |||
| 200 | ret = do_bsp_initiate_command(BSP_CLOCK_STOP); | ||
| 201 | if (ret) | ||
| 202 | pr_err("BSP handshake failed\n"); | ||
| 203 | |||
| 204 | /* | ||
| 205 | * HACK: BSP may have internal race on the CLOCK_STOP command. | ||
| 206 | * Avoid touching the BSP for a few milliseconds. | ||
| 207 | */ | ||
| 208 | mdelay(3); | ||
| 209 | |||
| 210 | return ret; | ||
| 211 | } | ||
| 212 | |||
| 213 | static inline void shimphy_set(u32 value, u32 mask) | ||
| 214 | { | ||
| 215 | int i; | ||
| 216 | |||
| 217 | if (!ctrl.needs_ddr_pad) | ||
| 218 | return; | ||
| 219 | |||
| 220 | for (i = 0; i < ctrl.num_memc; i++) { | ||
| 221 | u32 tmp; | ||
| 222 | |||
| 223 | tmp = readl_relaxed(ctrl.memcs[i].ddr_shimphy_base + | ||
| 224 | SHIMPHY_DDR_PAD_CNTRL); | ||
| 225 | tmp = value | (tmp & mask); | ||
| 226 | writel_relaxed(tmp, ctrl.memcs[i].ddr_shimphy_base + | ||
| 227 | SHIMPHY_DDR_PAD_CNTRL); | ||
| 228 | } | ||
| 229 | wmb(); /* Complete sequence in order. */ | ||
| 230 | } | ||
| 231 | |||
| 232 | static inline void ddr_ctrl_set(bool warmboot) | ||
| 233 | { | ||
| 234 | int i; | ||
| 235 | |||
| 236 | for (i = 0; i < ctrl.num_memc; i++) { | ||
| 237 | u32 tmp; | ||
| 238 | |||
| 239 | tmp = readl_relaxed(ctrl.memcs[i].ddr_ctrl + | ||
| 240 | ctrl.warm_boot_offset); | ||
| 241 | if (warmboot) | ||
| 242 | tmp |= 1; | ||
| 243 | else | ||
| 244 | tmp &= ~1; /* Cold boot */ | ||
| 245 | writel_relaxed(tmp, ctrl.memcs[i].ddr_ctrl + | ||
| 246 | ctrl.warm_boot_offset); | ||
| 247 | } | ||
| 248 | /* Complete sequence in order */ | ||
| 249 | wmb(); | ||
| 250 | } | ||
| 251 | |||
| 252 | static inline void s3entry_method0(void) | ||
| 253 | { | ||
| 254 | shimphy_set(SHIMPHY_PAD_GATE_PLL_S3 | SHIMPHY_PAD_PLL_SEQUENCE, | ||
| 255 | 0xffffffff); | ||
| 256 | } | ||
| 257 | |||
| 258 | static inline void s3entry_method1(void) | ||
| 259 | { | ||
| 260 | /* | ||
| 261 | * S3 Entry Sequence | ||
| 262 | * ----------------- | ||
| 263 | * Step 1: SHIMPHY_ADDR_CNTL_0_DDR_PAD_CNTRL [ S3_PWRDWN_SEQ ] = 3 | ||
| 264 | * Step 2: MEMC_DDR_0_WARM_BOOT [ WARM_BOOT ] = 1 | ||
| 265 | */ | ||
| 266 | shimphy_set((PWRDWN_SEQ_POWERDOWN_PLL << | ||
| 267 | SHIMPHY_PAD_S3_PWRDWN_SEQ_SHIFT), | ||
| 268 | ~SHIMPHY_PAD_S3_PWRDWN_SEQ_MASK); | ||
| 269 | |||
| 270 | ddr_ctrl_set(true); | ||
| 271 | } | ||
| 272 | |||
| 273 | static inline void s5entry_method1(void) | ||
| 274 | { | ||
| 275 | int i; | ||
| 276 | |||
| 277 | /* | ||
| 278 | * S5 Entry Sequence | ||
| 279 | * ----------------- | ||
| 280 | * Step 1: SHIMPHY_ADDR_CNTL_0_DDR_PAD_CNTRL [ S3_PWRDWN_SEQ ] = 3 | ||
| 281 | * Step 2: MEMC_DDR_0_WARM_BOOT [ WARM_BOOT ] = 0 | ||
| 282 | * Step 3: DDR_PHY_CONTROL_REGS_[AB]_0_STANDBY_CONTROL[ CKE ] = 0 | ||
| 283 | * DDR_PHY_CONTROL_REGS_[AB]_0_STANDBY_CONTROL[ RST_N ] = 0 | ||
| 284 | */ | ||
| 285 | shimphy_set((PWRDWN_SEQ_POWERDOWN_PLL << | ||
| 286 | SHIMPHY_PAD_S3_PWRDWN_SEQ_SHIFT), | ||
| 287 | ~SHIMPHY_PAD_S3_PWRDWN_SEQ_MASK); | ||
| 288 | |||
| 289 | ddr_ctrl_set(false); | ||
| 290 | |||
| 291 | for (i = 0; i < ctrl.num_memc; i++) { | ||
| 292 | u32 tmp; | ||
| 293 | |||
| 294 | /* Step 3: Channel A (RST_N = CKE = 0) */ | ||
| 295 | tmp = readl_relaxed(ctrl.memcs[i].ddr_phy_base + | ||
| 296 | ctrl.phy_a_standby_ctrl_offs); | ||
| 297 | tmp &= ~(DDR_PHY_RST_N | DDR_PHY_RST_N); | ||
| 298 | writel_relaxed(tmp, ctrl.memcs[i].ddr_phy_base + | ||
| 299 | ctrl.phy_a_standby_ctrl_offs); | ||
| 300 | |||
| 301 | /* Step 3: Channel B? */ | ||
| 302 | if (ctrl.phy_b_standby_ctrl_offs != DDR_PHY_NO_CHANNEL) { | ||
| 303 | tmp = readl_relaxed(ctrl.memcs[i].ddr_phy_base + | ||
| 304 | ctrl.phy_b_standby_ctrl_offs); | ||
| 305 | tmp &= ~(DDR_PHY_RST_N | DDR_PHY_RST_N); | ||
| 306 | writel_relaxed(tmp, ctrl.memcs[i].ddr_phy_base + | ||
| 307 | ctrl.phy_b_standby_ctrl_offs); | ||
| 308 | } | ||
| 309 | } | ||
| 310 | /* Must complete */ | ||
| 311 | wmb(); | ||
| 312 | } | ||
| 313 | |||
| 314 | /* | ||
| 315 | * Run a Power Management State Machine (PMSM) shutdown command and put the CPU | ||
| 316 | * into a low-power mode | ||
| 317 | */ | ||
| 318 | static void brcmstb_do_pmsm_power_down(unsigned long base_cmd, bool onewrite) | ||
| 319 | { | ||
| 320 | void __iomem *base = ctrl.aon_ctrl_base; | ||
| 321 | |||
| 322 | if ((ctrl.s3entry_method == 1) && (base_cmd == PM_COLD_CONFIG)) | ||
| 323 | s5entry_method1(); | ||
| 324 | |||
| 325 | /* pm_start_pwrdn transition 0->1 */ | ||
| 326 | writel_relaxed(base_cmd, base + AON_CTRL_PM_CTRL); | ||
| 327 | |||
| 328 | if (!onewrite) { | ||
| 329 | (void)readl_relaxed(base + AON_CTRL_PM_CTRL); | ||
| 330 | |||
| 331 | writel_relaxed(base_cmd | PM_PWR_DOWN, base + AON_CTRL_PM_CTRL); | ||
| 332 | (void)readl_relaxed(base + AON_CTRL_PM_CTRL); | ||
| 333 | } | ||
| 334 | wfi(); | ||
| 335 | } | ||
| 336 | |||
| 337 | /* Support S5 cold boot out of "poweroff" */ | ||
| 338 | static void brcmstb_pm_poweroff(void) | ||
| 339 | { | ||
| 340 | brcmstb_pm_handshake(); | ||
| 341 | |||
| 342 | /* Clear magic S3 warm-boot value */ | ||
| 343 | writel_relaxed(0, ctrl.aon_sram + AON_REG_MAGIC_FLAGS); | ||
| 344 | (void)readl_relaxed(ctrl.aon_sram + AON_REG_MAGIC_FLAGS); | ||
| 345 | |||
| 346 | /* Skip wait-for-interrupt signal; just use a countdown */ | ||
| 347 | writel_relaxed(0x10, ctrl.aon_ctrl_base + AON_CTRL_PM_CPU_WAIT_COUNT); | ||
| 348 | (void)readl_relaxed(ctrl.aon_ctrl_base + AON_CTRL_PM_CPU_WAIT_COUNT); | ||
| 349 | |||
| 350 | if (ctrl.s3entry_method == 1) { | ||
| 351 | shimphy_set((PWRDWN_SEQ_POWERDOWN_PLL << | ||
| 352 | SHIMPHY_PAD_S3_PWRDWN_SEQ_SHIFT), | ||
| 353 | ~SHIMPHY_PAD_S3_PWRDWN_SEQ_MASK); | ||
| 354 | ddr_ctrl_set(false); | ||
| 355 | brcmstb_do_pmsm_power_down(M1_PM_COLD_CONFIG, true); | ||
| 356 | return; /* We should never actually get here */ | ||
| 357 | } | ||
| 358 | |||
| 359 | brcmstb_do_pmsm_power_down(PM_COLD_CONFIG, false); | ||
| 360 | } | ||
| 361 | |||
| 362 | static void *brcmstb_pm_copy_to_sram(void *fn, size_t len) | ||
| 363 | { | ||
| 364 | unsigned int size = ALIGN(len, FNCPY_ALIGN); | ||
| 365 | |||
| 366 | if (ctrl.boot_sram_len < size) { | ||
| 367 | pr_err("standby code will not fit in SRAM\n"); | ||
| 368 | return NULL; | ||
| 369 | } | ||
| 370 | |||
| 371 | return fncpy(ctrl.boot_sram, fn, size); | ||
| 372 | } | ||
| 373 | |||
| 374 | /* | ||
| 375 | * S2 suspend/resume picks up where we left off, so we must execute carefully | ||
| 376 | * from SRAM, in order to allow DDR to come back up safely before we continue. | ||
| 377 | */ | ||
| 378 | static int brcmstb_pm_s2(void) | ||
| 379 | { | ||
| 380 | /* A previous S3 can set a value hazardous to S2, so make sure. */ | ||
| 381 | if (ctrl.s3entry_method == 1) { | ||
| 382 | shimphy_set((PWRDWN_SEQ_NO_SEQUENCING << | ||
| 383 | SHIMPHY_PAD_S3_PWRDWN_SEQ_SHIFT), | ||
| 384 | ~SHIMPHY_PAD_S3_PWRDWN_SEQ_MASK); | ||
| 385 | ddr_ctrl_set(false); | ||
| 386 | } | ||
| 387 | |||
| 388 | brcmstb_pm_do_s2_sram = brcmstb_pm_copy_to_sram(&brcmstb_pm_do_s2, | ||
| 389 | brcmstb_pm_do_s2_sz); | ||
| 390 | if (!brcmstb_pm_do_s2_sram) | ||
| 391 | return -EINVAL; | ||
| 392 | |||
| 393 | return brcmstb_pm_do_s2_sram(ctrl.aon_ctrl_base, | ||
| 394 | ctrl.memcs[0].ddr_phy_base + | ||
| 395 | ctrl.pll_status_offset); | ||
| 396 | } | ||
| 397 | |||
| 398 | /* | ||
| 399 | * This function is called on a new stack, so don't allow inlining (which will | ||
| 400 | * generate stack references on the old stack). It cannot be made static because | ||
| 401 | * it is referenced from brcmstb_pm_s3() | ||
| 402 | */ | ||
| 403 | noinline int brcmstb_pm_s3_finish(void) | ||
| 404 | { | ||
| 405 | struct brcmstb_s3_params *params = ctrl.s3_params; | ||
| 406 | dma_addr_t params_pa = ctrl.s3_params_pa; | ||
| 407 | phys_addr_t reentry = virt_to_phys(&cpu_resume); | ||
| 408 | enum bsp_initiate_command cmd; | ||
| 409 | u32 flags; | ||
| 410 | |||
| 411 | /* | ||
| 412 | * Clear parameter structure, but not DTU area, which has already been | ||
| 413 | * filled in. We know DTU is a the end, so we can just subtract its | ||
| 414 | * size. | ||
| 415 | */ | ||
| 416 | memset(params, 0, sizeof(*params) - sizeof(params->dtu)); | ||
| 417 | |||
| 418 | flags = readl_relaxed(ctrl.aon_sram + AON_REG_MAGIC_FLAGS); | ||
| 419 | |||
| 420 | flags &= S3_BOOTLOADER_RESERVED; | ||
| 421 | flags |= S3_FLAG_NO_MEM_VERIFY; | ||
| 422 | flags |= S3_FLAG_LOAD_RANDKEY; | ||
| 423 | |||
| 424 | /* Load random / fixed key */ | ||
| 425 | if (flags & S3_FLAG_LOAD_RANDKEY) | ||
| 426 | cmd = BSP_GEN_RANDOM_KEY; | ||
| 427 | else | ||
| 428 | cmd = BSP_GEN_FIXED_KEY; | ||
| 429 | if (do_bsp_initiate_command(cmd)) { | ||
| 430 | pr_info("key loading failed\n"); | ||
| 431 | return -EIO; | ||
| 432 | } | ||
| 433 | |||
| 434 | params->magic = BRCMSTB_S3_MAGIC; | ||
| 435 | params->reentry = reentry; | ||
| 436 | |||
| 437 | /* No more writes to DRAM */ | ||
| 438 | flush_cache_all(); | ||
| 439 | |||
| 440 | flags |= BRCMSTB_S3_MAGIC_SHORT; | ||
| 441 | |||
| 442 | writel_relaxed(flags, ctrl.aon_sram + AON_REG_MAGIC_FLAGS); | ||
| 443 | writel_relaxed(lower_32_bits(params_pa), | ||
| 444 | ctrl.aon_sram + AON_REG_CONTROL_LOW); | ||
| 445 | writel_relaxed(upper_32_bits(params_pa), | ||
| 446 | ctrl.aon_sram + AON_REG_CONTROL_HIGH); | ||
| 447 | |||
| 448 | switch (ctrl.s3entry_method) { | ||
| 449 | case 0: | ||
| 450 | s3entry_method0(); | ||
| 451 | brcmstb_do_pmsm_power_down(PM_WARM_CONFIG, false); | ||
| 452 | break; | ||
| 453 | case 1: | ||
| 454 | s3entry_method1(); | ||
| 455 | brcmstb_do_pmsm_power_down(M1_PM_WARM_CONFIG, true); | ||
| 456 | break; | ||
| 457 | default: | ||
| 458 | return -EINVAL; | ||
| 459 | } | ||
| 460 | |||
| 461 | /* Must have been interrupted from wfi()? */ | ||
| 462 | return -EINTR; | ||
| 463 | } | ||
| 464 | |||
| 465 | static int brcmstb_pm_do_s3(unsigned long sp) | ||
| 466 | { | ||
| 467 | unsigned long save_sp; | ||
| 468 | int ret; | ||
| 469 | |||
| 470 | asm volatile ( | ||
| 471 | "mov %[save], sp\n" | ||
| 472 | "mov sp, %[new]\n" | ||
| 473 | "bl brcmstb_pm_s3_finish\n" | ||
| 474 | "mov %[ret], r0\n" | ||
| 475 | "mov %[new], sp\n" | ||
| 476 | "mov sp, %[save]\n" | ||
| 477 | : [save] "=&r" (save_sp), [ret] "=&r" (ret) | ||
| 478 | : [new] "r" (sp) | ||
| 479 | ); | ||
| 480 | |||
| 481 | return ret; | ||
| 482 | } | ||
| 483 | |||
| 484 | static int brcmstb_pm_s3(void) | ||
| 485 | { | ||
| 486 | void __iomem *sp = ctrl.boot_sram + ctrl.boot_sram_len; | ||
| 487 | |||
| 488 | return cpu_suspend((unsigned long)sp, brcmstb_pm_do_s3); | ||
| 489 | } | ||
| 490 | |||
| 491 | static int brcmstb_pm_standby(bool deep_standby) | ||
| 492 | { | ||
| 493 | int ret; | ||
| 494 | |||
| 495 | if (brcmstb_pm_handshake()) | ||
| 496 | return -EIO; | ||
| 497 | |||
| 498 | if (deep_standby) | ||
| 499 | ret = brcmstb_pm_s3(); | ||
| 500 | else | ||
| 501 | ret = brcmstb_pm_s2(); | ||
| 502 | if (ret) | ||
| 503 | pr_err("%s: standby failed\n", __func__); | ||
| 504 | |||
| 505 | return ret; | ||
| 506 | } | ||
| 507 | |||
| 508 | static int brcmstb_pm_enter(suspend_state_t state) | ||
| 509 | { | ||
| 510 | int ret = -EINVAL; | ||
| 511 | |||
| 512 | switch (state) { | ||
| 513 | case PM_SUSPEND_STANDBY: | ||
| 514 | ret = brcmstb_pm_standby(false); | ||
| 515 | break; | ||
| 516 | case PM_SUSPEND_MEM: | ||
| 517 | ret = brcmstb_pm_standby(true); | ||
| 518 | break; | ||
| 519 | } | ||
| 520 | |||
| 521 | return ret; | ||
| 522 | } | ||
| 523 | |||
| 524 | static int brcmstb_pm_valid(suspend_state_t state) | ||
| 525 | { | ||
| 526 | switch (state) { | ||
| 527 | case PM_SUSPEND_STANDBY: | ||
| 528 | return true; | ||
| 529 | case PM_SUSPEND_MEM: | ||
| 530 | return ctrl.support_warm_boot; | ||
| 531 | default: | ||
| 532 | return false; | ||
| 533 | } | ||
| 534 | } | ||
| 535 | |||
| 536 | static const struct platform_suspend_ops brcmstb_pm_ops = { | ||
| 537 | .enter = brcmstb_pm_enter, | ||
| 538 | .valid = brcmstb_pm_valid, | ||
| 539 | }; | ||
| 540 | |||
| 541 | static const struct of_device_id aon_ctrl_dt_ids[] = { | ||
| 542 | { .compatible = "brcm,brcmstb-aon-ctrl" }, | ||
| 543 | {} | ||
| 544 | }; | ||
| 545 | |||
| 546 | struct ddr_phy_ofdata { | ||
| 547 | bool supports_warm_boot; | ||
| 548 | size_t pll_status_offset; | ||
| 549 | int s3entry_method; | ||
| 550 | u32 warm_boot_offset; | ||
| 551 | u32 phy_a_standby_ctrl_offs; | ||
| 552 | u32 phy_b_standby_ctrl_offs; | ||
| 553 | }; | ||
| 554 | |||
| 555 | static struct ddr_phy_ofdata ddr_phy_71_1 = { | ||
| 556 | .supports_warm_boot = true, | ||
| 557 | .pll_status_offset = 0x0c, | ||
| 558 | .s3entry_method = 1, | ||
| 559 | .warm_boot_offset = 0x2c, | ||
| 560 | .phy_a_standby_ctrl_offs = 0x198, | ||
| 561 | .phy_b_standby_ctrl_offs = DDR_PHY_NO_CHANNEL | ||
| 562 | }; | ||
| 563 | |||
| 564 | static struct ddr_phy_ofdata ddr_phy_72_0 = { | ||
| 565 | .supports_warm_boot = true, | ||
| 566 | .pll_status_offset = 0x10, | ||
| 567 | .s3entry_method = 1, | ||
| 568 | .warm_boot_offset = 0x40, | ||
| 569 | .phy_a_standby_ctrl_offs = 0x2a4, | ||
| 570 | .phy_b_standby_ctrl_offs = 0x8a4 | ||
| 571 | }; | ||
| 572 | |||
| 573 | static struct ddr_phy_ofdata ddr_phy_225_1 = { | ||
| 574 | .supports_warm_boot = false, | ||
| 575 | .pll_status_offset = 0x4, | ||
| 576 | .s3entry_method = 0 | ||
| 577 | }; | ||
| 578 | |||
| 579 | static struct ddr_phy_ofdata ddr_phy_240_1 = { | ||
| 580 | .supports_warm_boot = true, | ||
| 581 | .pll_status_offset = 0x4, | ||
| 582 | .s3entry_method = 0 | ||
| 583 | }; | ||
| 584 | |||
| 585 | static const struct of_device_id ddr_phy_dt_ids[] = { | ||
| 586 | { | ||
| 587 | .compatible = "brcm,brcmstb-ddr-phy-v71.1", | ||
| 588 | .data = &ddr_phy_71_1, | ||
| 589 | }, | ||
| 590 | { | ||
| 591 | .compatible = "brcm,brcmstb-ddr-phy-v72.0", | ||
| 592 | .data = &ddr_phy_72_0, | ||
| 593 | }, | ||
| 594 | { | ||
| 595 | .compatible = "brcm,brcmstb-ddr-phy-v225.1", | ||
| 596 | .data = &ddr_phy_225_1, | ||
| 597 | }, | ||
| 598 | { | ||
| 599 | .compatible = "brcm,brcmstb-ddr-phy-v240.1", | ||
| 600 | .data = &ddr_phy_240_1, | ||
| 601 | }, | ||
| 602 | { | ||
| 603 | /* Same as v240.1, for the registers we care about */ | ||
| 604 | .compatible = "brcm,brcmstb-ddr-phy-v240.2", | ||
| 605 | .data = &ddr_phy_240_1, | ||
| 606 | }, | ||
| 607 | {} | ||
| 608 | }; | ||
| 609 | |||
| 610 | struct ddr_seq_ofdata { | ||
| 611 | bool needs_ddr_pad; | ||
| 612 | u32 warm_boot_offset; | ||
| 613 | }; | ||
| 614 | |||
| 615 | static const struct ddr_seq_ofdata ddr_seq_b22 = { | ||
| 616 | .needs_ddr_pad = false, | ||
| 617 | .warm_boot_offset = 0x2c, | ||
| 618 | }; | ||
| 619 | |||
| 620 | static const struct ddr_seq_ofdata ddr_seq = { | ||
| 621 | .needs_ddr_pad = true, | ||
| 622 | }; | ||
| 623 | |||
| 624 | static const struct of_device_id ddr_shimphy_dt_ids[] = { | ||
| 625 | { .compatible = "brcm,brcmstb-ddr-shimphy-v1.0" }, | ||
| 626 | {} | ||
| 627 | }; | ||
| 628 | |||
| 629 | static const struct of_device_id brcmstb_memc_of_match[] = { | ||
| 630 | { | ||
| 631 | .compatible = "brcm,brcmstb-memc-ddr-rev-b.2.2", | ||
| 632 | .data = &ddr_seq_b22, | ||
| 633 | }, | ||
| 634 | { | ||
| 635 | .compatible = "brcm,brcmstb-memc-ddr", | ||
| 636 | .data = &ddr_seq, | ||
| 637 | }, | ||
| 638 | {}, | ||
| 639 | }; | ||
| 640 | |||
| 641 | static void __iomem *brcmstb_ioremap_match(const struct of_device_id *matches, | ||
| 642 | int index, const void **ofdata) | ||
| 643 | { | ||
| 644 | struct device_node *dn; | ||
| 645 | const struct of_device_id *match; | ||
| 646 | |||
| 647 | dn = of_find_matching_node_and_match(NULL, matches, &match); | ||
| 648 | if (!dn) | ||
| 649 | return ERR_PTR(-EINVAL); | ||
| 650 | |||
| 651 | if (ofdata) | ||
| 652 | *ofdata = match->data; | ||
| 653 | |||
| 654 | return of_io_request_and_map(dn, index, dn->full_name); | ||
| 655 | } | ||
| 656 | |||
| 657 | static int brcmstb_pm_panic_notify(struct notifier_block *nb, | ||
| 658 | unsigned long action, void *data) | ||
| 659 | { | ||
| 660 | writel_relaxed(BRCMSTB_PANIC_MAGIC, ctrl.aon_sram + AON_REG_PANIC); | ||
| 661 | |||
| 662 | return NOTIFY_DONE; | ||
| 663 | } | ||
| 664 | |||
| 665 | static struct notifier_block brcmstb_pm_panic_nb = { | ||
| 666 | .notifier_call = brcmstb_pm_panic_notify, | ||
| 667 | }; | ||
| 668 | |||
| 669 | static int brcmstb_pm_probe(struct platform_device *pdev) | ||
| 670 | { | ||
| 671 | const struct ddr_phy_ofdata *ddr_phy_data; | ||
| 672 | const struct ddr_seq_ofdata *ddr_seq_data; | ||
| 673 | const struct of_device_id *of_id = NULL; | ||
| 674 | struct device_node *dn; | ||
| 675 | void __iomem *base; | ||
| 676 | int ret, i; | ||
| 677 | |||
| 678 | /* AON ctrl registers */ | ||
| 679 | base = brcmstb_ioremap_match(aon_ctrl_dt_ids, 0, NULL); | ||
| 680 | if (IS_ERR(base)) { | ||
| 681 | pr_err("error mapping AON_CTRL\n"); | ||
| 682 | return PTR_ERR(base); | ||
| 683 | } | ||
| 684 | ctrl.aon_ctrl_base = base; | ||
| 685 | |||
| 686 | /* AON SRAM registers */ | ||
| 687 | base = brcmstb_ioremap_match(aon_ctrl_dt_ids, 1, NULL); | ||
| 688 | if (IS_ERR(base)) { | ||
| 689 | /* Assume standard offset */ | ||
| 690 | ctrl.aon_sram = ctrl.aon_ctrl_base + | ||
| 691 | AON_CTRL_SYSTEM_DATA_RAM_OFS; | ||
| 692 | } else { | ||
| 693 | ctrl.aon_sram = base; | ||
| 694 | } | ||
| 695 | |||
| 696 | writel_relaxed(0, ctrl.aon_sram + AON_REG_PANIC); | ||
| 697 | |||
| 698 | /* DDR PHY registers */ | ||
| 699 | base = brcmstb_ioremap_match(ddr_phy_dt_ids, 0, | ||
| 700 | (const void **)&ddr_phy_data); | ||
| 701 | if (IS_ERR(base)) { | ||
| 702 | pr_err("error mapping DDR PHY\n"); | ||
| 703 | return PTR_ERR(base); | ||
| 704 | } | ||
| 705 | ctrl.support_warm_boot = ddr_phy_data->supports_warm_boot; | ||
| 706 | ctrl.pll_status_offset = ddr_phy_data->pll_status_offset; | ||
| 707 | /* Only need DDR PHY 0 for now? */ | ||
| 708 | ctrl.memcs[0].ddr_phy_base = base; | ||
| 709 | ctrl.s3entry_method = ddr_phy_data->s3entry_method; | ||
| 710 | ctrl.phy_a_standby_ctrl_offs = ddr_phy_data->phy_a_standby_ctrl_offs; | ||
| 711 | ctrl.phy_b_standby_ctrl_offs = ddr_phy_data->phy_b_standby_ctrl_offs; | ||
| 712 | /* | ||
| 713 | * Slightly grosss to use the phy ver to get a memc, | ||
| 714 | * offset but that is the only versioned things so far | ||
| 715 | * we can test for. | ||
| 716 | */ | ||
| 717 | ctrl.warm_boot_offset = ddr_phy_data->warm_boot_offset; | ||
| 718 | |||
| 719 | /* DDR SHIM-PHY registers */ | ||
| 720 | for_each_matching_node(dn, ddr_shimphy_dt_ids) { | ||
| 721 | i = ctrl.num_memc; | ||
| 722 | if (i >= MAX_NUM_MEMC) { | ||
| 723 | pr_warn("too many MEMCs (max %d)\n", MAX_NUM_MEMC); | ||
| 724 | break; | ||
| 725 | } | ||
| 726 | |||
| 727 | base = of_io_request_and_map(dn, 0, dn->full_name); | ||
| 728 | if (IS_ERR(base)) { | ||
| 729 | if (!ctrl.support_warm_boot) | ||
| 730 | break; | ||
| 731 | |||
| 732 | pr_err("error mapping DDR SHIMPHY %d\n", i); | ||
| 733 | return PTR_ERR(base); | ||
| 734 | } | ||
| 735 | ctrl.memcs[i].ddr_shimphy_base = base; | ||
| 736 | ctrl.num_memc++; | ||
| 737 | } | ||
| 738 | |||
| 739 | /* Sequencer DRAM Param and Control Registers */ | ||
| 740 | i = 0; | ||
| 741 | for_each_matching_node(dn, brcmstb_memc_of_match) { | ||
| 742 | base = of_iomap(dn, 0); | ||
| 743 | if (!base) { | ||
| 744 | pr_err("error mapping DDR Sequencer %d\n", i); | ||
| 745 | return -ENOMEM; | ||
| 746 | } | ||
| 747 | |||
| 748 | of_id = of_match_node(brcmstb_memc_of_match, dn); | ||
| 749 | if (!of_id) { | ||
| 750 | iounmap(base); | ||
| 751 | return -EINVAL; | ||
| 752 | } | ||
| 753 | |||
| 754 | ddr_seq_data = of_id->data; | ||
| 755 | ctrl.needs_ddr_pad = ddr_seq_data->needs_ddr_pad; | ||
| 756 | /* Adjust warm boot offset based on the DDR sequencer */ | ||
| 757 | if (ddr_seq_data->warm_boot_offset) | ||
| 758 | ctrl.warm_boot_offset = ddr_seq_data->warm_boot_offset; | ||
| 759 | |||
| 760 | ctrl.memcs[i].ddr_ctrl = base; | ||
| 761 | i++; | ||
| 762 | } | ||
| 763 | |||
| 764 | pr_debug("PM: supports warm boot:%d, method:%d, wboffs:%x\n", | ||
| 765 | ctrl.support_warm_boot, ctrl.s3entry_method, | ||
| 766 | ctrl.warm_boot_offset); | ||
| 767 | |||
| 768 | dn = of_find_matching_node(NULL, sram_dt_ids); | ||
| 769 | if (!dn) { | ||
| 770 | pr_err("SRAM not found\n"); | ||
| 771 | return -EINVAL; | ||
| 772 | } | ||
| 773 | |||
| 774 | ret = brcmstb_init_sram(dn); | ||
| 775 | if (ret) { | ||
| 776 | pr_err("error setting up SRAM for PM\n"); | ||
| 777 | return ret; | ||
| 778 | } | ||
| 779 | |||
| 780 | ctrl.pdev = pdev; | ||
| 781 | |||
| 782 | ctrl.s3_params = kmalloc(sizeof(*ctrl.s3_params), GFP_KERNEL); | ||
| 783 | if (!ctrl.s3_params) | ||
| 784 | return -ENOMEM; | ||
| 785 | ctrl.s3_params_pa = dma_map_single(&pdev->dev, ctrl.s3_params, | ||
| 786 | sizeof(*ctrl.s3_params), | ||
| 787 | DMA_TO_DEVICE); | ||
| 788 | if (dma_mapping_error(&pdev->dev, ctrl.s3_params_pa)) { | ||
| 789 | pr_err("error mapping DMA memory\n"); | ||
| 790 | ret = -ENOMEM; | ||
| 791 | goto out; | ||
| 792 | } | ||
| 793 | |||
| 794 | atomic_notifier_chain_register(&panic_notifier_list, | ||
| 795 | &brcmstb_pm_panic_nb); | ||
| 796 | |||
| 797 | pm_power_off = brcmstb_pm_poweroff; | ||
| 798 | suspend_set_ops(&brcmstb_pm_ops); | ||
| 799 | |||
| 800 | return 0; | ||
| 801 | |||
| 802 | out: | ||
| 803 | kfree(ctrl.s3_params); | ||
| 804 | |||
| 805 | pr_warn("PM: initialization failed with code %d\n", ret); | ||
| 806 | |||
| 807 | return ret; | ||
| 808 | } | ||
| 809 | |||
| 810 | static struct platform_driver brcmstb_pm_driver = { | ||
| 811 | .driver = { | ||
| 812 | .name = "brcmstb-pm", | ||
| 813 | .of_match_table = aon_ctrl_dt_ids, | ||
| 814 | }, | ||
| 815 | }; | ||
| 816 | |||
| 817 | static int __init brcmstb_pm_init(void) | ||
| 818 | { | ||
| 819 | return platform_driver_probe(&brcmstb_pm_driver, | ||
| 820 | brcmstb_pm_probe); | ||
| 821 | } | ||
| 822 | module_init(brcmstb_pm_init); | ||
diff --git a/drivers/soc/bcm/brcmstb/pm/pm-mips.c b/drivers/soc/bcm/brcmstb/pm/pm-mips.c new file mode 100644 index 000000000000..9300b5f62e56 --- /dev/null +++ b/drivers/soc/bcm/brcmstb/pm/pm-mips.c | |||
| @@ -0,0 +1,461 @@ | |||
| 1 | /* | ||
| 2 | * MIPS-specific support for Broadcom STB S2/S3/S5 power management | ||
| 3 | * | ||
| 4 | * Copyright (C) 2016-2017 Broadcom | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify | ||
| 7 | * it under the terms of the GNU General Public License version 2 as | ||
| 8 | * published by the Free Software Foundation. | ||
| 9 | * | ||
| 10 | * This program is distributed in the hope that it will be useful, | ||
| 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 13 | * GNU General Public License for more details. | ||
| 14 | */ | ||
| 15 | |||
| 16 | #include <linux/kernel.h> | ||
| 17 | #include <linux/printk.h> | ||
| 18 | #include <linux/io.h> | ||
| 19 | #include <linux/of.h> | ||
| 20 | #include <linux/of_address.h> | ||
| 21 | #include <linux/delay.h> | ||
| 22 | #include <linux/suspend.h> | ||
| 23 | #include <asm/bmips.h> | ||
| 24 | #include <asm/tlbflush.h> | ||
| 25 | |||
| 26 | #include "pm.h" | ||
| 27 | |||
| 28 | #define S2_NUM_PARAMS 6 | ||
| 29 | #define MAX_NUM_MEMC 3 | ||
| 30 | |||
| 31 | /* S3 constants */ | ||
| 32 | #define MAX_GP_REGS 16 | ||
| 33 | #define MAX_CP0_REGS 32 | ||
| 34 | #define NUM_MEMC_CLIENTS 128 | ||
| 35 | #define AON_CTRL_RAM_SIZE 128 | ||
| 36 | #define BRCMSTB_S3_MAGIC 0x5AFEB007 | ||
| 37 | |||
| 38 | #define CLEAR_RESET_MASK 0x01 | ||
| 39 | |||
| 40 | /* Index each CP0 register that needs to be saved */ | ||
| 41 | #define CONTEXT 0 | ||
| 42 | #define USER_LOCAL 1 | ||
| 43 | #define PGMK 2 | ||
| 44 | #define HWRENA 3 | ||
| 45 | #define COMPARE 4 | ||
| 46 | #define STATUS 5 | ||
| 47 | #define CONFIG 6 | ||
| 48 | #define MODE 7 | ||
| 49 | #define EDSP 8 | ||
| 50 | #define BOOT_VEC 9 | ||
| 51 | #define EBASE 10 | ||
| 52 | |||
| 53 | struct brcmstb_memc { | ||
| 54 | void __iomem *ddr_phy_base; | ||
| 55 | void __iomem *arb_base; | ||
| 56 | }; | ||
| 57 | |||
| 58 | struct brcmstb_pm_control { | ||
| 59 | void __iomem *aon_ctrl_base; | ||
| 60 | void __iomem *aon_sram_base; | ||
| 61 | void __iomem *timers_base; | ||
| 62 | struct brcmstb_memc memcs[MAX_NUM_MEMC]; | ||
| 63 | int num_memc; | ||
| 64 | }; | ||
| 65 | |||
| 66 | struct brcm_pm_s3_context { | ||
| 67 | u32 cp0_regs[MAX_CP0_REGS]; | ||
| 68 | u32 memc0_rts[NUM_MEMC_CLIENTS]; | ||
| 69 | u32 sc_boot_vec; | ||
| 70 | }; | ||
| 71 | |||
| 72 | struct brcmstb_mem_transfer; | ||
| 73 | |||
| 74 | struct brcmstb_mem_transfer { | ||
| 75 | struct brcmstb_mem_transfer *next; | ||
| 76 | void *src; | ||
| 77 | void *dst; | ||
| 78 | dma_addr_t pa_src; | ||
| 79 | dma_addr_t pa_dst; | ||
| 80 | u32 len; | ||
| 81 | u8 key; | ||
| 82 | u8 mode; | ||
| 83 | u8 src_remapped; | ||
| 84 | u8 dst_remapped; | ||
| 85 | u8 src_dst_remapped; | ||
| 86 | }; | ||
| 87 | |||
| 88 | #define AON_SAVE_SRAM(base, idx, val) \ | ||
| 89 | __raw_writel(val, base + (idx << 2)) | ||
| 90 | |||
| 91 | /* Used for saving registers in asm */ | ||
| 92 | u32 gp_regs[MAX_GP_REGS]; | ||
| 93 | |||
| 94 | #define BSP_CLOCK_STOP 0x00 | ||
| 95 | #define PM_INITIATE 0x01 | ||
| 96 | |||
| 97 | static struct brcmstb_pm_control ctrl; | ||
| 98 | |||
| 99 | static void brcm_pm_save_cp0_context(struct brcm_pm_s3_context *ctx) | ||
| 100 | { | ||
| 101 | /* Generic MIPS */ | ||
| 102 | ctx->cp0_regs[CONTEXT] = read_c0_context(); | ||
| 103 | ctx->cp0_regs[USER_LOCAL] = read_c0_userlocal(); | ||
| 104 | ctx->cp0_regs[PGMK] = read_c0_pagemask(); | ||
| 105 | ctx->cp0_regs[HWRENA] = read_c0_cache(); | ||
| 106 | ctx->cp0_regs[COMPARE] = read_c0_compare(); | ||
| 107 | ctx->cp0_regs[STATUS] = read_c0_status(); | ||
| 108 | |||
| 109 | /* Broadcom specific */ | ||
| 110 | ctx->cp0_regs[CONFIG] = read_c0_brcm_config(); | ||
| 111 | ctx->cp0_regs[MODE] = read_c0_brcm_mode(); | ||
| 112 | ctx->cp0_regs[EDSP] = read_c0_brcm_edsp(); | ||
| 113 | ctx->cp0_regs[BOOT_VEC] = read_c0_brcm_bootvec(); | ||
| 114 | ctx->cp0_regs[EBASE] = read_c0_ebase(); | ||
| 115 | |||
| 116 | ctx->sc_boot_vec = bmips_read_zscm_reg(0xa0); | ||
| 117 | } | ||
| 118 | |||
| 119 | static void brcm_pm_restore_cp0_context(struct brcm_pm_s3_context *ctx) | ||
| 120 | { | ||
| 121 | /* Restore cp0 state */ | ||
| 122 | bmips_write_zscm_reg(0xa0, ctx->sc_boot_vec); | ||
| 123 | |||
| 124 | /* Generic MIPS */ | ||
| 125 | write_c0_context(ctx->cp0_regs[CONTEXT]); | ||
| 126 | write_c0_userlocal(ctx->cp0_regs[USER_LOCAL]); | ||
| 127 | write_c0_pagemask(ctx->cp0_regs[PGMK]); | ||
| 128 | write_c0_cache(ctx->cp0_regs[HWRENA]); | ||
| 129 | write_c0_compare(ctx->cp0_regs[COMPARE]); | ||
| 130 | write_c0_status(ctx->cp0_regs[STATUS]); | ||
| 131 | |||
| 132 | /* Broadcom specific */ | ||
| 133 | write_c0_brcm_config(ctx->cp0_regs[CONFIG]); | ||
| 134 | write_c0_brcm_mode(ctx->cp0_regs[MODE]); | ||
| 135 | write_c0_brcm_edsp(ctx->cp0_regs[EDSP]); | ||
| 136 | write_c0_brcm_bootvec(ctx->cp0_regs[BOOT_VEC]); | ||
| 137 | write_c0_ebase(ctx->cp0_regs[EBASE]); | ||
| 138 | } | ||
| 139 | |||
| 140 | static void brcmstb_pm_handshake(void) | ||
| 141 | { | ||
| 142 | void __iomem *base = ctrl.aon_ctrl_base; | ||
| 143 | u32 tmp; | ||
| 144 | |||
| 145 | /* BSP power handshake, v1 */ | ||
| 146 | tmp = __raw_readl(base + AON_CTRL_HOST_MISC_CMDS); | ||
| 147 | tmp &= ~1UL; | ||
| 148 | __raw_writel(tmp, base + AON_CTRL_HOST_MISC_CMDS); | ||
| 149 | (void)__raw_readl(base + AON_CTRL_HOST_MISC_CMDS); | ||
| 150 | |||
| 151 | __raw_writel(0, base + AON_CTRL_PM_INITIATE); | ||
| 152 | (void)__raw_readl(base + AON_CTRL_PM_INITIATE); | ||
| 153 | __raw_writel(BSP_CLOCK_STOP | PM_INITIATE, | ||
| 154 | base + AON_CTRL_PM_INITIATE); | ||
| 155 | /* | ||
| 156 | * HACK: BSP may have internal race on the CLOCK_STOP command. | ||
| 157 | * Avoid touching the BSP for a few milliseconds. | ||
| 158 | */ | ||
| 159 | mdelay(3); | ||
| 160 | } | ||
| 161 | |||
| 162 | static void brcmstb_pm_s5(void) | ||
| 163 | { | ||
| 164 | void __iomem *base = ctrl.aon_ctrl_base; | ||
| 165 | |||
| 166 | brcmstb_pm_handshake(); | ||
| 167 | |||
| 168 | /* Clear magic s3 warm-boot value */ | ||
| 169 | AON_SAVE_SRAM(ctrl.aon_sram_base, 0, 0); | ||
| 170 | |||
| 171 | /* Set the countdown */ | ||
| 172 | __raw_writel(0x10, base + AON_CTRL_PM_CPU_WAIT_COUNT); | ||
| 173 | (void)__raw_readl(base + AON_CTRL_PM_CPU_WAIT_COUNT); | ||
| 174 | |||
| 175 | /* Prepare to S5 cold boot */ | ||
| 176 | __raw_writel(PM_COLD_CONFIG, base + AON_CTRL_PM_CTRL); | ||
| 177 | (void)__raw_readl(base + AON_CTRL_PM_CTRL); | ||
| 178 | |||
| 179 | __raw_writel((PM_COLD_CONFIG | PM_PWR_DOWN), base + | ||
| 180 | AON_CTRL_PM_CTRL); | ||
| 181 | (void)__raw_readl(base + AON_CTRL_PM_CTRL); | ||
| 182 | |||
| 183 | __asm__ __volatile__( | ||
| 184 | " wait\n" | ||
| 185 | : : : "memory"); | ||
| 186 | } | ||
| 187 | |||
| 188 | static int brcmstb_pm_s3(void) | ||
| 189 | { | ||
| 190 | struct brcm_pm_s3_context s3_context; | ||
| 191 | void __iomem *memc_arb_base; | ||
| 192 | unsigned long flags; | ||
| 193 | u32 tmp; | ||
| 194 | int i; | ||
| 195 | |||
| 196 | /* Prepare for s3 */ | ||
| 197 | AON_SAVE_SRAM(ctrl.aon_sram_base, 0, BRCMSTB_S3_MAGIC); | ||
| 198 | AON_SAVE_SRAM(ctrl.aon_sram_base, 1, (u32)&s3_reentry); | ||
| 199 | AON_SAVE_SRAM(ctrl.aon_sram_base, 2, 0); | ||
| 200 | |||
| 201 | /* Clear RESET_HISTORY */ | ||
| 202 | tmp = __raw_readl(ctrl.aon_ctrl_base + AON_CTRL_RESET_CTRL); | ||
| 203 | tmp &= ~CLEAR_RESET_MASK; | ||
| 204 | __raw_writel(tmp, ctrl.aon_ctrl_base + AON_CTRL_RESET_CTRL); | ||
| 205 | |||
| 206 | local_irq_save(flags); | ||
| 207 | |||
| 208 | /* Inhibit DDR_RSTb pulse for both MMCs*/ | ||
| 209 | for (i = 0; i < ctrl.num_memc; i++) { | ||
| 210 | tmp = __raw_readl(ctrl.memcs[i].ddr_phy_base + | ||
| 211 | DDR40_PHY_CONTROL_REGS_0_STANDBY_CTRL); | ||
| 212 | |||
| 213 | tmp &= ~0x0f; | ||
| 214 | __raw_writel(tmp, ctrl.memcs[i].ddr_phy_base + | ||
| 215 | DDR40_PHY_CONTROL_REGS_0_STANDBY_CTRL); | ||
| 216 | tmp |= (0x05 | BIT(5)); | ||
| 217 | __raw_writel(tmp, ctrl.memcs[i].ddr_phy_base + | ||
| 218 | DDR40_PHY_CONTROL_REGS_0_STANDBY_CTRL); | ||
| 219 | } | ||
| 220 | |||
| 221 | /* Save CP0 context */ | ||
| 222 | brcm_pm_save_cp0_context(&s3_context); | ||
| 223 | |||
| 224 | /* Save RTS(skip debug register) */ | ||
| 225 | memc_arb_base = ctrl.memcs[0].arb_base + 4; | ||
| 226 | for (i = 0; i < NUM_MEMC_CLIENTS; i++) { | ||
| 227 | s3_context.memc0_rts[i] = __raw_readl(memc_arb_base); | ||
| 228 | memc_arb_base += 4; | ||
| 229 | } | ||
| 230 | |||
| 231 | /* Save I/O context */ | ||
| 232 | local_flush_tlb_all(); | ||
| 233 | _dma_cache_wback_inv(0, ~0); | ||
| 234 | |||
| 235 | brcm_pm_do_s3(ctrl.aon_ctrl_base, current_cpu_data.dcache.linesz); | ||
| 236 | |||
| 237 | /* CPU reconfiguration */ | ||
| 238 | local_flush_tlb_all(); | ||
| 239 | bmips_cpu_setup(); | ||
| 240 | cpumask_clear(&bmips_booted_mask); | ||
| 241 | |||
| 242 | /* Restore RTS (skip debug register) */ | ||
| 243 | memc_arb_base = ctrl.memcs[0].arb_base + 4; | ||
| 244 | for (i = 0; i < NUM_MEMC_CLIENTS; i++) { | ||
| 245 | __raw_writel(s3_context.memc0_rts[i], memc_arb_base); | ||
| 246 | memc_arb_base += 4; | ||
| 247 | } | ||
| 248 | |||
| 249 | /* restore CP0 context */ | ||
| 250 | brcm_pm_restore_cp0_context(&s3_context); | ||
| 251 | |||
| 252 | local_irq_restore(flags); | ||
| 253 | |||
| 254 | return 0; | ||
| 255 | } | ||
| 256 | |||
| 257 | static int brcmstb_pm_s2(void) | ||
| 258 | { | ||
| 259 | /* | ||
| 260 | * We need to pass 6 arguments to an assembly function. Lets avoid the | ||
| 261 | * stack and pass arguments in a explicit 4 byte array. The assembly | ||
| 262 | * code assumes all arguments are 4 bytes and arguments are ordered | ||
| 263 | * like so: | ||
| 264 | * | ||
| 265 | * 0: AON_CTRl base register | ||
| 266 | * 1: DDR_PHY base register | ||
| 267 | * 2: TIMERS base resgister | ||
| 268 | * 3: I-Cache line size | ||
| 269 | * 4: Restart vector address | ||
| 270 | * 5: Restart vector size | ||
| 271 | */ | ||
| 272 | u32 s2_params[6]; | ||
| 273 | |||
| 274 | /* Prepare s2 parameters */ | ||
| 275 | s2_params[0] = (u32)ctrl.aon_ctrl_base; | ||
| 276 | s2_params[1] = (u32)ctrl.memcs[0].ddr_phy_base; | ||
| 277 | s2_params[2] = (u32)ctrl.timers_base; | ||
| 278 | s2_params[3] = (u32)current_cpu_data.icache.linesz; | ||
| 279 | s2_params[4] = (u32)BMIPS_WARM_RESTART_VEC; | ||
| 280 | s2_params[5] = (u32)(bmips_smp_int_vec_end - | ||
| 281 | bmips_smp_int_vec); | ||
| 282 | |||
| 283 | /* Drop to standby */ | ||
| 284 | brcm_pm_do_s2(s2_params); | ||
| 285 | |||
| 286 | return 0; | ||
| 287 | } | ||
| 288 | |||
| 289 | static int brcmstb_pm_standby(bool deep_standby) | ||
| 290 | { | ||
| 291 | brcmstb_pm_handshake(); | ||
| 292 | |||
| 293 | /* Send IRQs to BMIPS_WARM_RESTART_VEC */ | ||
| 294 | clear_c0_cause(CAUSEF_IV); | ||
| 295 | irq_disable_hazard(); | ||
| 296 | set_c0_status(ST0_BEV); | ||
| 297 | irq_disable_hazard(); | ||
| 298 | |||
| 299 | if (deep_standby) | ||
| 300 | brcmstb_pm_s3(); | ||
| 301 | else | ||
| 302 | brcmstb_pm_s2(); | ||
| 303 | |||
| 304 | /* Send IRQs to normal runtime vectors */ | ||
| 305 | clear_c0_status(ST0_BEV); | ||
| 306 | irq_disable_hazard(); | ||
| 307 | set_c0_cause(CAUSEF_IV); | ||
| 308 | irq_disable_hazard(); | ||
| 309 | |||
| 310 | return 0; | ||
| 311 | } | ||
| 312 | |||
| 313 | static int brcmstb_pm_enter(suspend_state_t state) | ||
| 314 | { | ||
| 315 | int ret = -EINVAL; | ||
| 316 | |||
| 317 | switch (state) { | ||
| 318 | case PM_SUSPEND_STANDBY: | ||
| 319 | ret = brcmstb_pm_standby(false); | ||
| 320 | break; | ||
| 321 | case PM_SUSPEND_MEM: | ||
| 322 | ret = brcmstb_pm_standby(true); | ||
| 323 | break; | ||
| 324 | } | ||
| 325 | |||
| 326 | return ret; | ||
| 327 | } | ||
| 328 | |||
| 329 | static int brcmstb_pm_valid(suspend_state_t state) | ||
| 330 | { | ||
| 331 | switch (state) { | ||
| 332 | case PM_SUSPEND_STANDBY: | ||
| 333 | return true; | ||
| 334 | case PM_SUSPEND_MEM: | ||
| 335 | return true; | ||
| 336 | default: | ||
| 337 | return false; | ||
| 338 | } | ||
| 339 | } | ||
| 340 | |||
| 341 | static const struct platform_suspend_ops brcmstb_pm_ops = { | ||
| 342 | .enter = brcmstb_pm_enter, | ||
| 343 | .valid = brcmstb_pm_valid, | ||
| 344 | }; | ||
| 345 | |||
| 346 | static const struct of_device_id aon_ctrl_dt_ids[] = { | ||
| 347 | { .compatible = "brcm,brcmstb-aon-ctrl" }, | ||
| 348 | { /* sentinel */ } | ||
| 349 | }; | ||
| 350 | |||
| 351 | static const struct of_device_id ddr_phy_dt_ids[] = { | ||
| 352 | { .compatible = "brcm,brcmstb-ddr-phy" }, | ||
| 353 | { /* sentinel */ } | ||
| 354 | }; | ||
| 355 | |||
| 356 | static const struct of_device_id arb_dt_ids[] = { | ||
| 357 | { .compatible = "brcm,brcmstb-memc-arb" }, | ||
| 358 | { /* sentinel */ } | ||
| 359 | }; | ||
| 360 | |||
| 361 | static const struct of_device_id timers_ids[] = { | ||
| 362 | { .compatible = "brcm,brcmstb-timers" }, | ||
| 363 | { /* sentinel */ } | ||
| 364 | }; | ||
| 365 | |||
| 366 | static inline void __iomem *brcmstb_ioremap_node(struct device_node *dn, | ||
| 367 | int index) | ||
| 368 | { | ||
| 369 | return of_io_request_and_map(dn, index, dn->full_name); | ||
| 370 | } | ||
| 371 | |||
| 372 | static void __iomem *brcmstb_ioremap_match(const struct of_device_id *matches, | ||
| 373 | int index, const void **ofdata) | ||
| 374 | { | ||
| 375 | struct device_node *dn; | ||
| 376 | const struct of_device_id *match; | ||
| 377 | |||
| 378 | dn = of_find_matching_node_and_match(NULL, matches, &match); | ||
| 379 | if (!dn) | ||
| 380 | return ERR_PTR(-EINVAL); | ||
| 381 | |||
| 382 | if (ofdata) | ||
| 383 | *ofdata = match->data; | ||
| 384 | |||
| 385 | return brcmstb_ioremap_node(dn, index); | ||
| 386 | } | ||
| 387 | |||
| 388 | static int brcmstb_pm_init(void) | ||
| 389 | { | ||
| 390 | struct device_node *dn; | ||
| 391 | void __iomem *base; | ||
| 392 | int i; | ||
| 393 | |||
| 394 | /* AON ctrl registers */ | ||
| 395 | base = brcmstb_ioremap_match(aon_ctrl_dt_ids, 0, NULL); | ||
| 396 | if (IS_ERR(base)) { | ||
| 397 | pr_err("error mapping AON_CTRL\n"); | ||
| 398 | goto aon_err; | ||
| 399 | } | ||
| 400 | ctrl.aon_ctrl_base = base; | ||
| 401 | |||
| 402 | /* AON SRAM registers */ | ||
| 403 | base = brcmstb_ioremap_match(aon_ctrl_dt_ids, 1, NULL); | ||
| 404 | if (IS_ERR(base)) { | ||
| 405 | pr_err("error mapping AON_SRAM\n"); | ||
| 406 | goto sram_err; | ||
| 407 | } | ||
| 408 | ctrl.aon_sram_base = base; | ||
| 409 | |||
| 410 | ctrl.num_memc = 0; | ||
| 411 | /* Map MEMC DDR PHY registers */ | ||
| 412 | for_each_matching_node(dn, ddr_phy_dt_ids) { | ||
| 413 | i = ctrl.num_memc; | ||
| 414 | if (i >= MAX_NUM_MEMC) { | ||
| 415 | pr_warn("Too many MEMCs (max %d)\n", MAX_NUM_MEMC); | ||
| 416 | break; | ||
| 417 | } | ||
| 418 | base = brcmstb_ioremap_node(dn, 0); | ||
| 419 | if (IS_ERR(base)) | ||
| 420 | goto ddr_err; | ||
| 421 | |||
| 422 | ctrl.memcs[i].ddr_phy_base = base; | ||
| 423 | ctrl.num_memc++; | ||
| 424 | } | ||
| 425 | |||
| 426 | /* MEMC ARB registers */ | ||
| 427 | base = brcmstb_ioremap_match(arb_dt_ids, 0, NULL); | ||
| 428 | if (IS_ERR(base)) { | ||
| 429 | pr_err("error mapping MEMC ARB\n"); | ||
| 430 | goto ddr_err; | ||
| 431 | } | ||
| 432 | ctrl.memcs[0].arb_base = base; | ||
| 433 | |||
| 434 | /* Timer registers */ | ||
| 435 | base = brcmstb_ioremap_match(timers_ids, 0, NULL); | ||
| 436 | if (IS_ERR(base)) { | ||
| 437 | pr_err("error mapping timers\n"); | ||
| 438 | goto tmr_err; | ||
| 439 | } | ||
| 440 | ctrl.timers_base = base; | ||
| 441 | |||
| 442 | /* s3 cold boot aka s5 */ | ||
| 443 | pm_power_off = brcmstb_pm_s5; | ||
| 444 | |||
| 445 | suspend_set_ops(&brcmstb_pm_ops); | ||
| 446 | |||
| 447 | return 0; | ||
| 448 | |||
| 449 | tmr_err: | ||
| 450 | iounmap(ctrl.memcs[0].arb_base); | ||
| 451 | ddr_err: | ||
| 452 | for (i = 0; i < ctrl.num_memc; i++) | ||
| 453 | iounmap(ctrl.memcs[i].ddr_phy_base); | ||
| 454 | |||
| 455 | iounmap(ctrl.aon_sram_base); | ||
| 456 | sram_err: | ||
| 457 | iounmap(ctrl.aon_ctrl_base); | ||
| 458 | aon_err: | ||
| 459 | return PTR_ERR(base); | ||
| 460 | } | ||
| 461 | arch_initcall(brcmstb_pm_init); | ||
diff --git a/drivers/soc/bcm/brcmstb/pm/pm.h b/drivers/soc/bcm/brcmstb/pm/pm.h new file mode 100644 index 000000000000..b7d35ac70e60 --- /dev/null +++ b/drivers/soc/bcm/brcmstb/pm/pm.h | |||
| @@ -0,0 +1,89 @@ | |||
| 1 | /* | ||
| 2 | * Definitions for Broadcom STB power management / Always ON (AON) block | ||
| 3 | * | ||
| 4 | * Copyright © 2016-2017 Broadcom | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify | ||
| 7 | * it under the terms of the GNU General Public License version 2 as | ||
| 8 | * published by the Free Software Foundation. | ||
| 9 | * | ||
| 10 | * This program is distributed in the hope that it will be useful, | ||
| 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 13 | * GNU General Public License for more details. | ||
| 14 | */ | ||
| 15 | |||
| 16 | #ifndef __BRCMSTB_PM_H__ | ||
| 17 | #define __BRCMSTB_PM_H__ | ||
| 18 | |||
| 19 | #define AON_CTRL_RESET_CTRL 0x00 | ||
| 20 | #define AON_CTRL_PM_CTRL 0x04 | ||
| 21 | #define AON_CTRL_PM_STATUS 0x08 | ||
| 22 | #define AON_CTRL_PM_CPU_WAIT_COUNT 0x10 | ||
| 23 | #define AON_CTRL_PM_INITIATE 0x88 | ||
| 24 | #define AON_CTRL_HOST_MISC_CMDS 0x8c | ||
| 25 | #define AON_CTRL_SYSTEM_DATA_RAM_OFS 0x200 | ||
| 26 | |||
| 27 | /* MIPS PM constants */ | ||
| 28 | /* MEMC0 offsets */ | ||
| 29 | #define DDR40_PHY_CONTROL_REGS_0_PLL_STATUS 0x10 | ||
| 30 | #define DDR40_PHY_CONTROL_REGS_0_STANDBY_CTRL 0xa4 | ||
| 31 | |||
| 32 | /* TIMER offsets */ | ||
| 33 | #define TIMER_TIMER1_CTRL 0x0c | ||
| 34 | #define TIMER_TIMER1_STAT 0x1c | ||
| 35 | |||
| 36 | /* TIMER defines */ | ||
| 37 | #define RESET_TIMER 0x0 | ||
| 38 | #define START_TIMER 0xbfffffff | ||
| 39 | #define TIMER_MASK 0x3fffffff | ||
| 40 | |||
| 41 | /* PM_CTRL bitfield (Method #0) */ | ||
| 42 | #define PM_FAST_PWRDOWN (1 << 6) | ||
| 43 | #define PM_WARM_BOOT (1 << 5) | ||
| 44 | #define PM_DEEP_STANDBY (1 << 4) | ||
| 45 | #define PM_CPU_PWR (1 << 3) | ||
| 46 | #define PM_USE_CPU_RDY (1 << 2) | ||
| 47 | #define PM_PLL_PWRDOWN (1 << 1) | ||
| 48 | #define PM_PWR_DOWN (1 << 0) | ||
| 49 | |||
| 50 | /* PM_CTRL bitfield (Method #1) */ | ||
| 51 | #define PM_DPHY_STANDBY_CLEAR (1 << 20) | ||
| 52 | #define PM_MIN_S3_WIDTH_TIMER_BYPASS (1 << 7) | ||
| 53 | |||
| 54 | #define PM_S2_COMMAND (PM_PLL_PWRDOWN | PM_USE_CPU_RDY | PM_PWR_DOWN) | ||
| 55 | |||
| 56 | /* Method 0 bitmasks */ | ||
| 57 | #define PM_COLD_CONFIG (PM_PLL_PWRDOWN | PM_DEEP_STANDBY) | ||
| 58 | #define PM_WARM_CONFIG (PM_COLD_CONFIG | PM_USE_CPU_RDY | PM_WARM_BOOT) | ||
| 59 | |||
| 60 | /* Method 1 bitmask */ | ||
| 61 | #define M1_PM_WARM_CONFIG (PM_DPHY_STANDBY_CLEAR | \ | ||
| 62 | PM_MIN_S3_WIDTH_TIMER_BYPASS | \ | ||
| 63 | PM_WARM_BOOT | PM_DEEP_STANDBY | \ | ||
| 64 | PM_PLL_PWRDOWN | PM_PWR_DOWN) | ||
| 65 | |||
| 66 | #define M1_PM_COLD_CONFIG (PM_DPHY_STANDBY_CLEAR | \ | ||
| 67 | PM_MIN_S3_WIDTH_TIMER_BYPASS | \ | ||
| 68 | PM_DEEP_STANDBY | \ | ||
| 69 | PM_PLL_PWRDOWN | PM_PWR_DOWN) | ||
| 70 | |||
| 71 | #ifndef __ASSEMBLY__ | ||
| 72 | |||
| 73 | #ifndef CONFIG_MIPS | ||
| 74 | extern const unsigned long brcmstb_pm_do_s2_sz; | ||
| 75 | extern asmlinkage int brcmstb_pm_do_s2(void __iomem *aon_ctrl_base, | ||
| 76 | void __iomem *ddr_phy_pll_status); | ||
| 77 | #else | ||
| 78 | /* s2 asm */ | ||
| 79 | extern asmlinkage int brcm_pm_do_s2(u32 *s2_params); | ||
| 80 | |||
| 81 | /* s3 asm */ | ||
| 82 | extern asmlinkage int brcm_pm_do_s3(void __iomem *aon_ctrl_base, | ||
| 83 | int dcache_linesz); | ||
| 84 | extern int s3_reentry; | ||
| 85 | #endif /* CONFIG_MIPS */ | ||
| 86 | |||
| 87 | #endif | ||
| 88 | |||
| 89 | #endif /* __BRCMSTB_PM_H__ */ | ||
diff --git a/drivers/soc/bcm/brcmstb/pm/s2-arm.S b/drivers/soc/bcm/brcmstb/pm/s2-arm.S new file mode 100644 index 000000000000..1d472d564638 --- /dev/null +++ b/drivers/soc/bcm/brcmstb/pm/s2-arm.S | |||
| @@ -0,0 +1,76 @@ | |||
| 1 | /* | ||
| 2 | * Copyright © 2014-2017 Broadcom | ||
| 3 | * | ||
| 4 | * This program is free software; you can redistribute it and/or modify | ||
| 5 | * it under the terms of the GNU General Public License version 2 as | ||
| 6 | * published by the Free Software Foundation. | ||
| 7 | * | ||
| 8 | * This program is distributed in the hope that it will be useful, | ||
| 9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 11 | * GNU General Public License for more details. | ||
| 12 | */ | ||
| 13 | |||
| 14 | #include <linux/linkage.h> | ||
| 15 | #include <asm/assembler.h> | ||
| 16 | |||
| 17 | #include "pm.h" | ||
| 18 | |||
| 19 | .text | ||
| 20 | .align 3 | ||
| 21 | |||
| 22 | #define AON_CTRL_REG r10 | ||
| 23 | #define DDR_PHY_STATUS_REG r11 | ||
| 24 | |||
| 25 | /* | ||
| 26 | * r0: AON_CTRL base address | ||
| 27 | * r1: DDRY PHY PLL status register address | ||
| 28 | */ | ||
| 29 | ENTRY(brcmstb_pm_do_s2) | ||
| 30 | stmfd sp!, {r4-r11, lr} | ||
| 31 | mov AON_CTRL_REG, r0 | ||
| 32 | mov DDR_PHY_STATUS_REG, r1 | ||
| 33 | |||
| 34 | /* Flush memory transactions */ | ||
| 35 | dsb | ||
| 36 | |||
| 37 | /* Cache DDR_PHY_STATUS_REG translation */ | ||
| 38 | ldr r0, [DDR_PHY_STATUS_REG] | ||
| 39 | |||
| 40 | /* power down request */ | ||
| 41 | ldr r0, =PM_S2_COMMAND | ||
| 42 | ldr r1, =0 | ||
| 43 | str r1, [AON_CTRL_REG, #AON_CTRL_PM_CTRL] | ||
| 44 | ldr r1, [AON_CTRL_REG, #AON_CTRL_PM_CTRL] | ||
| 45 | str r0, [AON_CTRL_REG, #AON_CTRL_PM_CTRL] | ||
| 46 | ldr r0, [AON_CTRL_REG, #AON_CTRL_PM_CTRL] | ||
| 47 | |||
| 48 | /* Wait for interrupt */ | ||
| 49 | wfi | ||
| 50 | nop | ||
| 51 | |||
| 52 | /* Bring MEMC back up */ | ||
| 53 | 1: ldr r0, [DDR_PHY_STATUS_REG] | ||
| 54 | ands r0, #1 | ||
| 55 | beq 1b | ||
| 56 | |||
| 57 | /* Power-up handshake */ | ||
| 58 | ldr r0, =1 | ||
| 59 | str r0, [AON_CTRL_REG, #AON_CTRL_HOST_MISC_CMDS] | ||
| 60 | ldr r0, [AON_CTRL_REG, #AON_CTRL_HOST_MISC_CMDS] | ||
| 61 | |||
| 62 | ldr r0, =0 | ||
| 63 | str r0, [AON_CTRL_REG, #AON_CTRL_PM_CTRL] | ||
| 64 | ldr r0, [AON_CTRL_REG, #AON_CTRL_PM_CTRL] | ||
| 65 | |||
| 66 | /* Return to caller */ | ||
| 67 | ldr r0, =0 | ||
| 68 | ldmfd sp!, {r4-r11, pc} | ||
| 69 | |||
| 70 | ENDPROC(brcmstb_pm_do_s2) | ||
| 71 | |||
| 72 | /* Place literal pool here */ | ||
| 73 | .ltorg | ||
| 74 | |||
| 75 | ENTRY(brcmstb_pm_do_s2_sz) | ||
| 76 | .word . - brcmstb_pm_do_s2 | ||
diff --git a/drivers/soc/bcm/brcmstb/pm/s2-mips.S b/drivers/soc/bcm/brcmstb/pm/s2-mips.S new file mode 100644 index 000000000000..27a14bc46043 --- /dev/null +++ b/drivers/soc/bcm/brcmstb/pm/s2-mips.S | |||
| @@ -0,0 +1,200 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2016 Broadcom Corporation | ||
| 3 | * | ||
| 4 | * This program is free software; you can redistribute it and/or modify | ||
| 5 | * it under the terms of the GNU General Public License version 2 as | ||
| 6 | * published by the Free Software Foundation. | ||
| 7 | * | ||
| 8 | * This program is distributed in the hope that it will be useful, | ||
| 9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 11 | * GNU General Public License for more details. | ||
| 12 | */ | ||
| 13 | |||
| 14 | #include <asm/asm.h> | ||
| 15 | #include <asm/regdef.h> | ||
| 16 | #include <asm/mipsregs.h> | ||
| 17 | #include <asm/stackframe.h> | ||
| 18 | |||
| 19 | #include "pm.h" | ||
| 20 | |||
| 21 | .text | ||
| 22 | .set noreorder | ||
| 23 | .align 5 | ||
| 24 | |||
| 25 | /* | ||
| 26 | * a0: u32 params array | ||
| 27 | */ | ||
| 28 | LEAF(brcm_pm_do_s2) | ||
| 29 | |||
| 30 | subu sp, 64 | ||
| 31 | sw ra, 0(sp) | ||
| 32 | sw s0, 4(sp) | ||
| 33 | sw s1, 8(sp) | ||
| 34 | sw s2, 12(sp) | ||
| 35 | sw s3, 16(sp) | ||
| 36 | sw s4, 20(sp) | ||
| 37 | sw s5, 24(sp) | ||
| 38 | sw s6, 28(sp) | ||
| 39 | sw s7, 32(sp) | ||
| 40 | |||
| 41 | /* | ||
| 42 | * Dereference the params array | ||
| 43 | * s0: AON_CTRL base register | ||
| 44 | * s1: DDR_PHY base register | ||
| 45 | * s2: TIMERS base register | ||
| 46 | * s3: I-Cache line size | ||
| 47 | * s4: Restart vector address | ||
| 48 | * s5: Restart vector size | ||
| 49 | */ | ||
| 50 | move t0, a0 | ||
| 51 | |||
| 52 | lw s0, 0(t0) | ||
| 53 | lw s1, 4(t0) | ||
| 54 | lw s2, 8(t0) | ||
| 55 | lw s3, 12(t0) | ||
| 56 | lw s4, 16(t0) | ||
| 57 | lw s5, 20(t0) | ||
| 58 | |||
| 59 | /* Lock this asm section into the I-cache */ | ||
| 60 | addiu t1, s3, -1 | ||
| 61 | not t1 | ||
| 62 | |||
| 63 | la t0, brcm_pm_do_s2 | ||
| 64 | and t0, t1 | ||
| 65 | |||
| 66 | la t2, asm_end | ||
| 67 | and t2, t1 | ||
| 68 | |||
| 69 | 1: cache 0x1c, 0(t0) | ||
| 70 | bne t0, t2, 1b | ||
| 71 | addu t0, s3 | ||
| 72 | |||
| 73 | /* Lock the interrupt vector into the I-cache */ | ||
| 74 | move t0, zero | ||
| 75 | |||
| 76 | 2: move t1, s4 | ||
| 77 | cache 0x1c, 0(t1) | ||
| 78 | addu t1, s3 | ||
| 79 | addu t0, s3 | ||
| 80 | ble t0, s5, 2b | ||
| 81 | nop | ||
| 82 | |||
| 83 | sync | ||
| 84 | |||
| 85 | /* Power down request */ | ||
| 86 | li t0, PM_S2_COMMAND | ||
| 87 | sw zero, AON_CTRL_PM_CTRL(s0) | ||
| 88 | lw zero, AON_CTRL_PM_CTRL(s0) | ||
| 89 | sw t0, AON_CTRL_PM_CTRL(s0) | ||
| 90 | lw t0, AON_CTRL_PM_CTRL(s0) | ||
| 91 | |||
| 92 | /* Enable CP0 interrupt 2 and wait for interrupt */ | ||
| 93 | mfc0 t0, CP0_STATUS | ||
| 94 | /* Save cp0 sr for restoring later */ | ||
| 95 | move s6, t0 | ||
| 96 | |||
| 97 | li t1, ~(ST0_IM | ST0_IE) | ||
| 98 | and t0, t1 | ||
| 99 | ori t0, STATUSF_IP2 | ||
| 100 | mtc0 t0, CP0_STATUS | ||
| 101 | nop | ||
| 102 | nop | ||
| 103 | nop | ||
| 104 | ori t0, ST0_IE | ||
| 105 | mtc0 t0, CP0_STATUS | ||
| 106 | |||
| 107 | /* Wait for interrupt */ | ||
| 108 | wait | ||
| 109 | nop | ||
| 110 | |||
| 111 | /* Wait for memc0 */ | ||
| 112 | 1: lw t0, DDR40_PHY_CONTROL_REGS_0_PLL_STATUS(s1) | ||
| 113 | andi t0, 1 | ||
| 114 | beqz t0, 1b | ||
| 115 | nop | ||
| 116 | |||
| 117 | /* 1ms delay needed for stable recovery */ | ||
| 118 | /* Use TIMER1 to count 1 ms */ | ||
| 119 | li t0, RESET_TIMER | ||
| 120 | sw t0, TIMER_TIMER1_CTRL(s2) | ||
| 121 | lw t0, TIMER_TIMER1_CTRL(s2) | ||
| 122 | |||
| 123 | li t0, START_TIMER | ||
| 124 | sw t0, TIMER_TIMER1_CTRL(s2) | ||
| 125 | lw t0, TIMER_TIMER1_CTRL(s2) | ||
| 126 | |||
| 127 | /* Prepare delay */ | ||
| 128 | li t0, TIMER_MASK | ||
| 129 | lw t1, TIMER_TIMER1_STAT(s2) | ||
| 130 | and t1, t0 | ||
| 131 | /* 1ms delay */ | ||
| 132 | addi t1, 27000 | ||
| 133 | |||
| 134 | /* Wait for the timer value to exceed t1 */ | ||
| 135 | 1: lw t0, TIMER_TIMER1_STAT(s2) | ||
| 136 | sgtu t2, t1, t0 | ||
| 137 | bnez t2, 1b | ||
| 138 | nop | ||
| 139 | |||
| 140 | /* Power back up */ | ||
| 141 | li t1, 1 | ||
| 142 | sw t1, AON_CTRL_HOST_MISC_CMDS(s0) | ||
| 143 | lw t1, AON_CTRL_HOST_MISC_CMDS(s0) | ||
| 144 | |||
| 145 | sw zero, AON_CTRL_PM_CTRL(s0) | ||
| 146 | lw zero, AON_CTRL_PM_CTRL(s0) | ||
| 147 | |||
| 148 | /* Unlock I-cache */ | ||
| 149 | addiu t1, s3, -1 | ||
| 150 | not t1 | ||
| 151 | |||
| 152 | la t0, brcm_pm_do_s2 | ||
| 153 | and t0, t1 | ||
| 154 | |||
| 155 | la t2, asm_end | ||
| 156 | and t2, t1 | ||
| 157 | |||
| 158 | 1: cache 0x00, 0(t0) | ||
| 159 | bne t0, t2, 1b | ||
| 160 | addu t0, s3 | ||
| 161 | |||
| 162 | /* Unlock interrupt vector */ | ||
| 163 | move t0, zero | ||
| 164 | |||
| 165 | 2: move t1, s4 | ||
| 166 | cache 0x00, 0(t1) | ||
| 167 | addu t1, s3 | ||
| 168 | addu t0, s3 | ||
| 169 | ble t0, s5, 2b | ||
| 170 | nop | ||
| 171 | |||
| 172 | /* Restore cp0 sr */ | ||
| 173 | sync | ||
| 174 | nop | ||
| 175 | mtc0 s6, CP0_STATUS | ||
| 176 | nop | ||
| 177 | |||
| 178 | /* Set return value to success */ | ||
| 179 | li v0, 0 | ||
| 180 | |||
| 181 | /* Return to caller */ | ||
| 182 | lw s7, 32(sp) | ||
| 183 | lw s6, 28(sp) | ||
| 184 | lw s5, 24(sp) | ||
| 185 | lw s4, 20(sp) | ||
| 186 | lw s3, 16(sp) | ||
| 187 | lw s2, 12(sp) | ||
| 188 | lw s1, 8(sp) | ||
| 189 | lw s0, 4(sp) | ||
| 190 | lw ra, 0(sp) | ||
| 191 | addiu sp, 64 | ||
| 192 | |||
| 193 | jr ra | ||
| 194 | nop | ||
| 195 | END(brcm_pm_do_s2) | ||
| 196 | |||
| 197 | .globl asm_end | ||
| 198 | asm_end: | ||
| 199 | nop | ||
| 200 | |||
diff --git a/drivers/soc/bcm/brcmstb/pm/s3-mips.S b/drivers/soc/bcm/brcmstb/pm/s3-mips.S new file mode 100644 index 000000000000..1242308a8868 --- /dev/null +++ b/drivers/soc/bcm/brcmstb/pm/s3-mips.S | |||
| @@ -0,0 +1,146 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2016 Broadcom Corporation | ||
| 3 | * | ||
| 4 | * This program is free software; you can redistribute it and/or modify | ||
| 5 | * it under the terms of the GNU General Public License version 2 as | ||
| 6 | * published by the Free Software Foundation. | ||
| 7 | * | ||
| 8 | * This program is distributed in the hope that it will be useful, | ||
| 9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 11 | * GNU General Public License for more details. | ||
| 12 | */ | ||
| 13 | |||
| 14 | #include <asm/asm.h> | ||
| 15 | #include <asm/regdef.h> | ||
| 16 | #include <asm/mipsregs.h> | ||
| 17 | #include <asm/bmips.h> | ||
| 18 | |||
| 19 | #include "pm.h" | ||
| 20 | |||
| 21 | .text | ||
| 22 | .set noreorder | ||
| 23 | .align 5 | ||
| 24 | .global s3_reentry | ||
| 25 | |||
| 26 | /* | ||
| 27 | * a0: AON_CTRL base register | ||
| 28 | * a1: D-Cache line size | ||
| 29 | */ | ||
| 30 | LEAF(brcm_pm_do_s3) | ||
| 31 | |||
| 32 | /* Get the address of s3_context */ | ||
| 33 | la t0, gp_regs | ||
| 34 | sw ra, 0(t0) | ||
| 35 | sw s0, 4(t0) | ||
| 36 | sw s1, 8(t0) | ||
| 37 | sw s2, 12(t0) | ||
| 38 | sw s3, 16(t0) | ||
| 39 | sw s4, 20(t0) | ||
| 40 | sw s5, 24(t0) | ||
| 41 | sw s6, 28(t0) | ||
| 42 | sw s7, 32(t0) | ||
| 43 | sw gp, 36(t0) | ||
| 44 | sw sp, 40(t0) | ||
| 45 | sw fp, 44(t0) | ||
| 46 | |||
| 47 | /* Save CP0 Status */ | ||
| 48 | mfc0 t1, CP0_STATUS | ||
| 49 | sw t1, 48(t0) | ||
| 50 | |||
| 51 | /* Write-back gp registers - cache will be gone */ | ||
| 52 | addiu t1, a1, -1 | ||
| 53 | not t1 | ||
| 54 | and t0, t1 | ||
| 55 | |||
| 56 | /* Flush at least 64 bytes */ | ||
| 57 | addiu t2, t0, 64 | ||
| 58 | and t2, t1 | ||
| 59 | |||
| 60 | 1: cache 0x17, 0(t0) | ||
| 61 | bne t0, t2, 1b | ||
| 62 | addu t0, a1 | ||
| 63 | |||
| 64 | /* Drop to deep standby */ | ||
| 65 | li t1, PM_WARM_CONFIG | ||
| 66 | sw zero, AON_CTRL_PM_CTRL(a0) | ||
| 67 | lw zero, AON_CTRL_PM_CTRL(a0) | ||
| 68 | sw t1, AON_CTRL_PM_CTRL(a0) | ||
| 69 | lw t1, AON_CTRL_PM_CTRL(a0) | ||
| 70 | |||
| 71 | li t1, (PM_WARM_CONFIG | PM_PWR_DOWN) | ||
| 72 | sw t1, AON_CTRL_PM_CTRL(a0) | ||
| 73 | lw t1, AON_CTRL_PM_CTRL(a0) | ||
| 74 | |||
| 75 | /* Enable CP0 interrupt 2 and wait for interrupt */ | ||
| 76 | mfc0 t0, CP0_STATUS | ||
| 77 | |||
| 78 | li t1, ~(ST0_IM | ST0_IE) | ||
| 79 | and t0, t1 | ||
| 80 | ori t0, STATUSF_IP2 | ||
| 81 | mtc0 t0, CP0_STATUS | ||
| 82 | nop | ||
| 83 | nop | ||
| 84 | nop | ||
| 85 | ori t0, ST0_IE | ||
| 86 | mtc0 t0, CP0_STATUS | ||
| 87 | |||
| 88 | /* Wait for interrupt */ | ||
| 89 | wait | ||
| 90 | nop | ||
| 91 | |||
| 92 | s3_reentry: | ||
| 93 | |||
| 94 | /* Clear call/return stack */ | ||
| 95 | li t0, (0x06 << 16) | ||
| 96 | mtc0 t0, $22, 2 | ||
| 97 | ssnop | ||
| 98 | ssnop | ||
| 99 | ssnop | ||
| 100 | |||
| 101 | /* Clear jump target buffer */ | ||
| 102 | li t0, (0x04 << 16) | ||
| 103 | mtc0 t0, $22, 2 | ||
| 104 | ssnop | ||
| 105 | ssnop | ||
| 106 | ssnop | ||
| 107 | |||
| 108 | sync | ||
| 109 | nop | ||
| 110 | |||
| 111 | /* Setup mmu defaults */ | ||
| 112 | mtc0 zero, CP0_WIRED | ||
| 113 | mtc0 zero, CP0_ENTRYHI | ||
| 114 | li k0, PM_DEFAULT_MASK | ||
| 115 | mtc0 k0, CP0_PAGEMASK | ||
| 116 | |||
| 117 | li sp, BMIPS_WARM_RESTART_VEC | ||
| 118 | la k0, plat_wired_tlb_setup | ||
| 119 | jalr k0 | ||
| 120 | nop | ||
| 121 | |||
| 122 | /* Restore general purpose registers */ | ||
| 123 | la t0, gp_regs | ||
| 124 | lw fp, 44(t0) | ||
| 125 | lw sp, 40(t0) | ||
| 126 | lw gp, 36(t0) | ||
| 127 | lw s7, 32(t0) | ||
| 128 | lw s6, 28(t0) | ||
| 129 | lw s5, 24(t0) | ||
| 130 | lw s4, 20(t0) | ||
| 131 | lw s3, 16(t0) | ||
| 132 | lw s2, 12(t0) | ||
| 133 | lw s1, 8(t0) | ||
| 134 | lw s0, 4(t0) | ||
| 135 | lw ra, 0(t0) | ||
| 136 | |||
| 137 | /* Restore CP0 status */ | ||
| 138 | lw t1, 48(t0) | ||
| 139 | mtc0 t1, CP0_STATUS | ||
| 140 | |||
| 141 | /* Return to caller */ | ||
| 142 | li v0, 0 | ||
| 143 | jr ra | ||
| 144 | nop | ||
| 145 | |||
| 146 | END(brcm_pm_do_s3) | ||
diff --git a/drivers/soc/fsl/guts.c b/drivers/soc/fsl/guts.c index 6af7a11f09a5..d89a6a80c8ef 100644 --- a/drivers/soc/fsl/guts.c +++ b/drivers/soc/fsl/guts.c | |||
| @@ -213,6 +213,7 @@ static const struct of_device_id fsl_guts_of_match[] = { | |||
| 213 | { .compatible = "fsl,ls1021a-dcfg", }, | 213 | { .compatible = "fsl,ls1021a-dcfg", }, |
| 214 | { .compatible = "fsl,ls1043a-dcfg", }, | 214 | { .compatible = "fsl,ls1043a-dcfg", }, |
| 215 | { .compatible = "fsl,ls2080a-dcfg", }, | 215 | { .compatible = "fsl,ls2080a-dcfg", }, |
| 216 | { .compatible = "fsl,ls1088a-dcfg", }, | ||
| 216 | {} | 217 | {} |
| 217 | }; | 218 | }; |
| 218 | MODULE_DEVICE_TABLE(of, fsl_guts_of_match); | 219 | MODULE_DEVICE_TABLE(of, fsl_guts_of_match); |
diff --git a/drivers/soc/fsl/qbman/Kconfig b/drivers/soc/fsl/qbman/Kconfig index 757033c0586c..fb4e6bf0a0c4 100644 --- a/drivers/soc/fsl/qbman/Kconfig +++ b/drivers/soc/fsl/qbman/Kconfig | |||
| @@ -1,6 +1,6 @@ | |||
| 1 | menuconfig FSL_DPAA | 1 | menuconfig FSL_DPAA |
| 2 | bool "Freescale DPAA 1.x support" | 2 | bool "Freescale DPAA 1.x support" |
| 3 | depends on FSL_SOC_BOOKE | 3 | depends on (FSL_SOC_BOOKE || ARCH_LAYERSCAPE) |
| 4 | select GENERIC_ALLOCATOR | 4 | select GENERIC_ALLOCATOR |
| 5 | help | 5 | help |
| 6 | The Freescale Data Path Acceleration Architecture (DPAA) is a set of | 6 | The Freescale Data Path Acceleration Architecture (DPAA) is a set of |
diff --git a/drivers/soc/fsl/qbman/Makefile b/drivers/soc/fsl/qbman/Makefile index 363982b83ab5..811312ad526f 100644 --- a/drivers/soc/fsl/qbman/Makefile +++ b/drivers/soc/fsl/qbman/Makefile | |||
| @@ -1,7 +1,7 @@ | |||
| 1 | # SPDX-License-Identifier: GPL-2.0 | 1 | # SPDX-License-Identifier: GPL-2.0 |
| 2 | obj-$(CONFIG_FSL_DPAA) += bman_ccsr.o qman_ccsr.o \ | 2 | obj-$(CONFIG_FSL_DPAA) += bman_ccsr.o qman_ccsr.o \ |
| 3 | bman_portal.o qman_portal.o \ | 3 | bman_portal.o qman_portal.o \ |
| 4 | bman.o qman.o | 4 | bman.o qman.o dpaa_sys.o |
| 5 | 5 | ||
| 6 | obj-$(CONFIG_FSL_BMAN_TEST) += bman-test.o | 6 | obj-$(CONFIG_FSL_BMAN_TEST) += bman-test.o |
| 7 | bman-test-y = bman_test.o | 7 | bman-test-y = bman_test.o |
diff --git a/drivers/soc/fsl/qbman/bman.c b/drivers/soc/fsl/qbman/bman.c index a3d6d7cfa929..f9485cedc648 100644 --- a/drivers/soc/fsl/qbman/bman.c +++ b/drivers/soc/fsl/qbman/bman.c | |||
| @@ -35,6 +35,27 @@ | |||
| 35 | 35 | ||
| 36 | /* Portal register assists */ | 36 | /* Portal register assists */ |
| 37 | 37 | ||
| 38 | #if defined(CONFIG_ARM) || defined(CONFIG_ARM64) | ||
| 39 | /* Cache-inhibited register offsets */ | ||
| 40 | #define BM_REG_RCR_PI_CINH 0x3000 | ||
| 41 | #define BM_REG_RCR_CI_CINH 0x3100 | ||
| 42 | #define BM_REG_RCR_ITR 0x3200 | ||
| 43 | #define BM_REG_CFG 0x3300 | ||
| 44 | #define BM_REG_SCN(n) (0x3400 + ((n) << 6)) | ||
| 45 | #define BM_REG_ISR 0x3e00 | ||
| 46 | #define BM_REG_IER 0x3e40 | ||
| 47 | #define BM_REG_ISDR 0x3e80 | ||
| 48 | #define BM_REG_IIR 0x3ec0 | ||
| 49 | |||
| 50 | /* Cache-enabled register offsets */ | ||
| 51 | #define BM_CL_CR 0x0000 | ||
| 52 | #define BM_CL_RR0 0x0100 | ||
| 53 | #define BM_CL_RR1 0x0140 | ||
| 54 | #define BM_CL_RCR 0x1000 | ||
| 55 | #define BM_CL_RCR_PI_CENA 0x3000 | ||
| 56 | #define BM_CL_RCR_CI_CENA 0x3100 | ||
| 57 | |||
| 58 | #else | ||
| 38 | /* Cache-inhibited register offsets */ | 59 | /* Cache-inhibited register offsets */ |
| 39 | #define BM_REG_RCR_PI_CINH 0x0000 | 60 | #define BM_REG_RCR_PI_CINH 0x0000 |
| 40 | #define BM_REG_RCR_CI_CINH 0x0004 | 61 | #define BM_REG_RCR_CI_CINH 0x0004 |
| @@ -53,6 +74,7 @@ | |||
| 53 | #define BM_CL_RCR 0x1000 | 74 | #define BM_CL_RCR 0x1000 |
| 54 | #define BM_CL_RCR_PI_CENA 0x3000 | 75 | #define BM_CL_RCR_PI_CENA 0x3000 |
| 55 | #define BM_CL_RCR_CI_CENA 0x3100 | 76 | #define BM_CL_RCR_CI_CENA 0x3100 |
| 77 | #endif | ||
| 56 | 78 | ||
| 57 | /* | 79 | /* |
| 58 | * Portal modes. | 80 | * Portal modes. |
| @@ -154,7 +176,8 @@ struct bm_mc { | |||
| 154 | }; | 176 | }; |
| 155 | 177 | ||
| 156 | struct bm_addr { | 178 | struct bm_addr { |
| 157 | void __iomem *ce; /* cache-enabled */ | 179 | void *ce; /* cache-enabled */ |
| 180 | __be32 *ce_be; /* Same as above but for direct access */ | ||
| 158 | void __iomem *ci; /* cache-inhibited */ | 181 | void __iomem *ci; /* cache-inhibited */ |
| 159 | }; | 182 | }; |
| 160 | 183 | ||
| @@ -167,12 +190,12 @@ struct bm_portal { | |||
| 167 | /* Cache-inhibited register access. */ | 190 | /* Cache-inhibited register access. */ |
| 168 | static inline u32 bm_in(struct bm_portal *p, u32 offset) | 191 | static inline u32 bm_in(struct bm_portal *p, u32 offset) |
| 169 | { | 192 | { |
| 170 | return be32_to_cpu(__raw_readl(p->addr.ci + offset)); | 193 | return ioread32be(p->addr.ci + offset); |
| 171 | } | 194 | } |
| 172 | 195 | ||
| 173 | static inline void bm_out(struct bm_portal *p, u32 offset, u32 val) | 196 | static inline void bm_out(struct bm_portal *p, u32 offset, u32 val) |
| 174 | { | 197 | { |
| 175 | __raw_writel(cpu_to_be32(val), p->addr.ci + offset); | 198 | iowrite32be(val, p->addr.ci + offset); |
| 176 | } | 199 | } |
| 177 | 200 | ||
| 178 | /* Cache Enabled Portal Access */ | 201 | /* Cache Enabled Portal Access */ |
| @@ -188,7 +211,7 @@ static inline void bm_cl_touch_ro(struct bm_portal *p, u32 offset) | |||
| 188 | 211 | ||
| 189 | static inline u32 bm_ce_in(struct bm_portal *p, u32 offset) | 212 | static inline u32 bm_ce_in(struct bm_portal *p, u32 offset) |
| 190 | { | 213 | { |
| 191 | return be32_to_cpu(__raw_readl(p->addr.ce + offset)); | 214 | return be32_to_cpu(*(p->addr.ce_be + (offset/4))); |
| 192 | } | 215 | } |
| 193 | 216 | ||
| 194 | struct bman_portal { | 217 | struct bman_portal { |
| @@ -408,7 +431,7 @@ static int bm_mc_init(struct bm_portal *portal) | |||
| 408 | 431 | ||
| 409 | mc->cr = portal->addr.ce + BM_CL_CR; | 432 | mc->cr = portal->addr.ce + BM_CL_CR; |
| 410 | mc->rr = portal->addr.ce + BM_CL_RR0; | 433 | mc->rr = portal->addr.ce + BM_CL_RR0; |
| 411 | mc->rridx = (__raw_readb(&mc->cr->_ncw_verb) & BM_MCC_VERB_VBIT) ? | 434 | mc->rridx = (mc->cr->_ncw_verb & BM_MCC_VERB_VBIT) ? |
| 412 | 0 : 1; | 435 | 0 : 1; |
| 413 | mc->vbit = mc->rridx ? BM_MCC_VERB_VBIT : 0; | 436 | mc->vbit = mc->rridx ? BM_MCC_VERB_VBIT : 0; |
| 414 | #ifdef CONFIG_FSL_DPAA_CHECKING | 437 | #ifdef CONFIG_FSL_DPAA_CHECKING |
| @@ -466,7 +489,7 @@ static inline union bm_mc_result *bm_mc_result(struct bm_portal *portal) | |||
| 466 | * its command is submitted and completed. This includes the valid-bit, | 489 | * its command is submitted and completed. This includes the valid-bit, |
| 467 | * in case you were wondering... | 490 | * in case you were wondering... |
| 468 | */ | 491 | */ |
| 469 | if (!__raw_readb(&rr->verb)) { | 492 | if (!rr->verb) { |
| 470 | dpaa_invalidate_touch_ro(rr); | 493 | dpaa_invalidate_touch_ro(rr); |
| 471 | return NULL; | 494 | return NULL; |
| 472 | } | 495 | } |
| @@ -512,8 +535,9 @@ static int bman_create_portal(struct bman_portal *portal, | |||
| 512 | * config, everything that follows depends on it and "config" is more | 535 | * config, everything that follows depends on it and "config" is more |
| 513 | * for (de)reference... | 536 | * for (de)reference... |
| 514 | */ | 537 | */ |
| 515 | p->addr.ce = c->addr_virt[DPAA_PORTAL_CE]; | 538 | p->addr.ce = c->addr_virt_ce; |
| 516 | p->addr.ci = c->addr_virt[DPAA_PORTAL_CI]; | 539 | p->addr.ce_be = c->addr_virt_ce; |
| 540 | p->addr.ci = c->addr_virt_ci; | ||
| 517 | if (bm_rcr_init(p, bm_rcr_pvb, bm_rcr_cce)) { | 541 | if (bm_rcr_init(p, bm_rcr_pvb, bm_rcr_cce)) { |
| 518 | dev_err(c->dev, "RCR initialisation failed\n"); | 542 | dev_err(c->dev, "RCR initialisation failed\n"); |
| 519 | goto fail_rcr; | 543 | goto fail_rcr; |
| @@ -607,7 +631,7 @@ int bman_p_irqsource_add(struct bman_portal *p, u32 bits) | |||
| 607 | unsigned long irqflags; | 631 | unsigned long irqflags; |
| 608 | 632 | ||
| 609 | local_irq_save(irqflags); | 633 | local_irq_save(irqflags); |
| 610 | set_bits(bits & BM_PIRQ_VISIBLE, &p->irq_sources); | 634 | p->irq_sources |= bits & BM_PIRQ_VISIBLE; |
| 611 | bm_out(&p->p, BM_REG_IER, p->irq_sources); | 635 | bm_out(&p->p, BM_REG_IER, p->irq_sources); |
| 612 | local_irq_restore(irqflags); | 636 | local_irq_restore(irqflags); |
| 613 | return 0; | 637 | return 0; |
diff --git a/drivers/soc/fsl/qbman/bman_ccsr.c b/drivers/soc/fsl/qbman/bman_ccsr.c index eaa9585c7347..05c42235dd41 100644 --- a/drivers/soc/fsl/qbman/bman_ccsr.c +++ b/drivers/soc/fsl/qbman/bman_ccsr.c | |||
| @@ -201,6 +201,21 @@ static int fsl_bman_probe(struct platform_device *pdev) | |||
| 201 | return -ENODEV; | 201 | return -ENODEV; |
| 202 | } | 202 | } |
| 203 | 203 | ||
| 204 | /* | ||
| 205 | * If FBPR memory wasn't defined using the qbman compatible string | ||
| 206 | * try using the of_reserved_mem_device method | ||
| 207 | */ | ||
| 208 | if (!fbpr_a) { | ||
| 209 | ret = qbman_init_private_mem(dev, 0, &fbpr_a, &fbpr_sz); | ||
| 210 | if (ret) { | ||
| 211 | dev_err(dev, "qbman_init_private_mem() failed 0x%x\n", | ||
| 212 | ret); | ||
| 213 | return -ENODEV; | ||
| 214 | } | ||
| 215 | } | ||
| 216 | |||
| 217 | dev_dbg(dev, "Allocated FBPR 0x%llx 0x%zx\n", fbpr_a, fbpr_sz); | ||
| 218 | |||
| 204 | bm_set_memory(fbpr_a, fbpr_sz); | 219 | bm_set_memory(fbpr_a, fbpr_sz); |
| 205 | 220 | ||
| 206 | err_irq = platform_get_irq(pdev, 0); | 221 | err_irq = platform_get_irq(pdev, 0); |
diff --git a/drivers/soc/fsl/qbman/bman_portal.c b/drivers/soc/fsl/qbman/bman_portal.c index 39b39c8f1399..2f71f7df3465 100644 --- a/drivers/soc/fsl/qbman/bman_portal.c +++ b/drivers/soc/fsl/qbman/bman_portal.c | |||
| @@ -91,7 +91,6 @@ static int bman_portal_probe(struct platform_device *pdev) | |||
| 91 | struct device_node *node = dev->of_node; | 91 | struct device_node *node = dev->of_node; |
| 92 | struct bm_portal_config *pcfg; | 92 | struct bm_portal_config *pcfg; |
| 93 | struct resource *addr_phys[2]; | 93 | struct resource *addr_phys[2]; |
| 94 | void __iomem *va; | ||
| 95 | int irq, cpu; | 94 | int irq, cpu; |
| 96 | 95 | ||
| 97 | pcfg = devm_kmalloc(dev, sizeof(*pcfg), GFP_KERNEL); | 96 | pcfg = devm_kmalloc(dev, sizeof(*pcfg), GFP_KERNEL); |
| @@ -123,23 +122,21 @@ static int bman_portal_probe(struct platform_device *pdev) | |||
| 123 | } | 122 | } |
| 124 | pcfg->irq = irq; | 123 | pcfg->irq = irq; |
| 125 | 124 | ||
| 126 | va = ioremap_prot(addr_phys[0]->start, resource_size(addr_phys[0]), 0); | 125 | pcfg->addr_virt_ce = memremap(addr_phys[0]->start, |
| 127 | if (!va) { | 126 | resource_size(addr_phys[0]), |
| 128 | dev_err(dev, "ioremap::CE failed\n"); | 127 | QBMAN_MEMREMAP_ATTR); |
| 128 | if (!pcfg->addr_virt_ce) { | ||
| 129 | dev_err(dev, "memremap::CE failed\n"); | ||
| 129 | goto err_ioremap1; | 130 | goto err_ioremap1; |
| 130 | } | 131 | } |
| 131 | 132 | ||
| 132 | pcfg->addr_virt[DPAA_PORTAL_CE] = va; | 133 | pcfg->addr_virt_ci = ioremap(addr_phys[1]->start, |
| 133 | 134 | resource_size(addr_phys[1])); | |
| 134 | va = ioremap_prot(addr_phys[1]->start, resource_size(addr_phys[1]), | 135 | if (!pcfg->addr_virt_ci) { |
| 135 | _PAGE_GUARDED | _PAGE_NO_CACHE); | ||
| 136 | if (!va) { | ||
| 137 | dev_err(dev, "ioremap::CI failed\n"); | 136 | dev_err(dev, "ioremap::CI failed\n"); |
| 138 | goto err_ioremap2; | 137 | goto err_ioremap2; |
| 139 | } | 138 | } |
| 140 | 139 | ||
| 141 | pcfg->addr_virt[DPAA_PORTAL_CI] = va; | ||
| 142 | |||
| 143 | spin_lock(&bman_lock); | 140 | spin_lock(&bman_lock); |
| 144 | cpu = cpumask_next_zero(-1, &portal_cpus); | 141 | cpu = cpumask_next_zero(-1, &portal_cpus); |
| 145 | if (cpu >= nr_cpu_ids) { | 142 | if (cpu >= nr_cpu_ids) { |
| @@ -164,9 +161,9 @@ static int bman_portal_probe(struct platform_device *pdev) | |||
| 164 | return 0; | 161 | return 0; |
| 165 | 162 | ||
| 166 | err_portal_init: | 163 | err_portal_init: |
| 167 | iounmap(pcfg->addr_virt[DPAA_PORTAL_CI]); | 164 | iounmap(pcfg->addr_virt_ci); |
| 168 | err_ioremap2: | 165 | err_ioremap2: |
| 169 | iounmap(pcfg->addr_virt[DPAA_PORTAL_CE]); | 166 | memunmap(pcfg->addr_virt_ce); |
| 170 | err_ioremap1: | 167 | err_ioremap1: |
| 171 | return -ENXIO; | 168 | return -ENXIO; |
| 172 | } | 169 | } |
diff --git a/drivers/soc/fsl/qbman/bman_priv.h b/drivers/soc/fsl/qbman/bman_priv.h index f6896a2f6d90..751ce90383b7 100644 --- a/drivers/soc/fsl/qbman/bman_priv.h +++ b/drivers/soc/fsl/qbman/bman_priv.h | |||
| @@ -46,11 +46,9 @@ extern u16 bman_ip_rev; /* 0 if uninitialised, otherwise BMAN_REVx */ | |||
| 46 | extern struct gen_pool *bm_bpalloc; | 46 | extern struct gen_pool *bm_bpalloc; |
| 47 | 47 | ||
| 48 | struct bm_portal_config { | 48 | struct bm_portal_config { |
| 49 | /* | 49 | /* Portal addresses */ |
| 50 | * Corenet portal addresses; | 50 | void *addr_virt_ce; |
| 51 | * [0]==cache-enabled, [1]==cache-inhibited. | 51 | void __iomem *addr_virt_ci; |
| 52 | */ | ||
| 53 | void __iomem *addr_virt[2]; | ||
| 54 | /* Allow these to be joined in lists */ | 52 | /* Allow these to be joined in lists */ |
| 55 | struct list_head list; | 53 | struct list_head list; |
| 56 | struct device *dev; | 54 | struct device *dev; |
diff --git a/drivers/soc/fsl/qbman/dpaa_sys.c b/drivers/soc/fsl/qbman/dpaa_sys.c new file mode 100644 index 000000000000..9436aa83ff1b --- /dev/null +++ b/drivers/soc/fsl/qbman/dpaa_sys.c | |||
| @@ -0,0 +1,78 @@ | |||
| 1 | /* Copyright 2017 NXP Semiconductor, Inc. | ||
| 2 | * | ||
| 3 | * Redistribution and use in source and binary forms, with or without | ||
| 4 | * modification, are permitted provided that the following conditions are met: | ||
| 5 | * * Redistributions of source code must retain the above copyright | ||
| 6 | * notice, this list of conditions and the following disclaimer. | ||
| 7 | * * Redistributions in binary form must reproduce the above copyright | ||
| 8 | * notice, this list of conditions and the following disclaimer in the | ||
| 9 | * documentation and/or other materials provided with the distribution. | ||
| 10 | * * Neither the name of NXP Semiconductor nor the | ||
| 11 | * names of its contributors may be used to endorse or promote products | ||
| 12 | * derived from this software without specific prior written permission. | ||
| 13 | * | ||
| 14 | * ALTERNATIVELY, this software may be distributed under the terms of the | ||
| 15 | * GNU General Public License ("GPL") as published by the Free Software | ||
| 16 | * Foundation, either version 2 of that License or (at your option) any | ||
| 17 | * later version. | ||
| 18 | * | ||
| 19 | * THIS SOFTWARE IS PROVIDED BY NXP Semiconductor ``AS IS'' AND ANY | ||
| 20 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED | ||
| 21 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE | ||
| 22 | * DISCLAIMED. IN NO EVENT SHALL NXP Semiconductor BE LIABLE FOR ANY | ||
| 23 | * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES | ||
| 24 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; | ||
| 25 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND | ||
| 26 | * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
| 27 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS | ||
| 28 | * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
| 29 | */ | ||
| 30 | |||
| 31 | #include <linux/dma-mapping.h> | ||
| 32 | #include "dpaa_sys.h" | ||
| 33 | |||
| 34 | /* | ||
| 35 | * Initialize a devices private memory region | ||
| 36 | */ | ||
| 37 | int qbman_init_private_mem(struct device *dev, int idx, dma_addr_t *addr, | ||
| 38 | size_t *size) | ||
| 39 | { | ||
| 40 | int ret; | ||
| 41 | struct device_node *mem_node; | ||
| 42 | u64 size64; | ||
| 43 | |||
| 44 | ret = of_reserved_mem_device_init_by_idx(dev, dev->of_node, idx); | ||
| 45 | if (ret) { | ||
| 46 | dev_err(dev, | ||
| 47 | "of_reserved_mem_device_init_by_idx(%d) failed 0x%x\n", | ||
| 48 | idx, ret); | ||
| 49 | return -ENODEV; | ||
| 50 | } | ||
| 51 | mem_node = of_parse_phandle(dev->of_node, "memory-region", 0); | ||
| 52 | if (mem_node) { | ||
| 53 | ret = of_property_read_u64(mem_node, "size", &size64); | ||
| 54 | if (ret) { | ||
| 55 | dev_err(dev, "of_address_to_resource fails 0x%x\n", | ||
| 56 | ret); | ||
| 57 | return -ENODEV; | ||
| 58 | } | ||
| 59 | *size = size64; | ||
| 60 | } else { | ||
| 61 | dev_err(dev, "No memory-region found for index %d\n", idx); | ||
| 62 | return -ENODEV; | ||
| 63 | } | ||
| 64 | |||
| 65 | if (!dma_zalloc_coherent(dev, *size, addr, 0)) { | ||
| 66 | dev_err(dev, "DMA Alloc memory failed\n"); | ||
| 67 | return -ENODEV; | ||
| 68 | } | ||
| 69 | |||
| 70 | /* | ||
| 71 | * Disassociate the reserved memory area from the device | ||
| 72 | * because a device can only have one DMA memory area. This | ||
| 73 | * should be fine since the memory is allocated and initialized | ||
| 74 | * and only ever accessed by the QBMan device from now on | ||
| 75 | */ | ||
| 76 | of_reserved_mem_device_release(dev); | ||
| 77 | return 0; | ||
| 78 | } | ||
diff --git a/drivers/soc/fsl/qbman/dpaa_sys.h b/drivers/soc/fsl/qbman/dpaa_sys.h index 2ce394aa4c95..9f379000da85 100644 --- a/drivers/soc/fsl/qbman/dpaa_sys.h +++ b/drivers/soc/fsl/qbman/dpaa_sys.h | |||
| @@ -44,23 +44,21 @@ | |||
| 44 | #include <linux/prefetch.h> | 44 | #include <linux/prefetch.h> |
| 45 | #include <linux/genalloc.h> | 45 | #include <linux/genalloc.h> |
| 46 | #include <asm/cacheflush.h> | 46 | #include <asm/cacheflush.h> |
| 47 | #include <linux/io.h> | ||
| 48 | #include <linux/delay.h> | ||
| 47 | 49 | ||
| 48 | /* For 2-element tables related to cache-inhibited and cache-enabled mappings */ | 50 | /* For 2-element tables related to cache-inhibited and cache-enabled mappings */ |
| 49 | #define DPAA_PORTAL_CE 0 | 51 | #define DPAA_PORTAL_CE 0 |
| 50 | #define DPAA_PORTAL_CI 1 | 52 | #define DPAA_PORTAL_CI 1 |
| 51 | 53 | ||
| 52 | #if (L1_CACHE_BYTES != 32) && (L1_CACHE_BYTES != 64) | ||
| 53 | #error "Unsupported Cacheline Size" | ||
| 54 | #endif | ||
| 55 | |||
| 56 | static inline void dpaa_flush(void *p) | 54 | static inline void dpaa_flush(void *p) |
| 57 | { | 55 | { |
| 56 | /* | ||
| 57 | * Only PPC needs to flush the cache currently - on ARM the mapping | ||
| 58 | * is non cacheable | ||
| 59 | */ | ||
| 58 | #ifdef CONFIG_PPC | 60 | #ifdef CONFIG_PPC |
| 59 | flush_dcache_range((unsigned long)p, (unsigned long)p+64); | 61 | flush_dcache_range((unsigned long)p, (unsigned long)p+64); |
| 60 | #elif defined(CONFIG_ARM32) | ||
| 61 | __cpuc_flush_dcache_area(p, 64); | ||
| 62 | #elif defined(CONFIG_ARM64) | ||
| 63 | __flush_dcache_area(p, 64); | ||
| 64 | #endif | 62 | #endif |
| 65 | } | 63 | } |
| 66 | 64 | ||
| @@ -102,4 +100,15 @@ static inline u8 dpaa_cyc_diff(u8 ringsize, u8 first, u8 last) | |||
| 102 | /* Offset applied to genalloc pools due to zero being an error return */ | 100 | /* Offset applied to genalloc pools due to zero being an error return */ |
| 103 | #define DPAA_GENALLOC_OFF 0x80000000 | 101 | #define DPAA_GENALLOC_OFF 0x80000000 |
| 104 | 102 | ||
| 103 | /* Initialize the devices private memory region */ | ||
| 104 | int qbman_init_private_mem(struct device *dev, int idx, dma_addr_t *addr, | ||
| 105 | size_t *size); | ||
| 106 | |||
| 107 | /* memremap() attributes for different platforms */ | ||
| 108 | #ifdef CONFIG_PPC | ||
| 109 | #define QBMAN_MEMREMAP_ATTR MEMREMAP_WB | ||
| 110 | #else | ||
| 111 | #define QBMAN_MEMREMAP_ATTR MEMREMAP_WC | ||
| 112 | #endif | ||
| 113 | |||
| 105 | #endif /* __DPAA_SYS_H */ | 114 | #endif /* __DPAA_SYS_H */ |
diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c index 18eefc3f1abe..e4f5bb056fd2 100644 --- a/drivers/soc/fsl/qbman/qman.c +++ b/drivers/soc/fsl/qbman/qman.c | |||
| @@ -41,6 +41,43 @@ | |||
| 41 | 41 | ||
| 42 | /* Portal register assists */ | 42 | /* Portal register assists */ |
| 43 | 43 | ||
| 44 | #if defined(CONFIG_ARM) || defined(CONFIG_ARM64) | ||
| 45 | /* Cache-inhibited register offsets */ | ||
| 46 | #define QM_REG_EQCR_PI_CINH 0x3000 | ||
| 47 | #define QM_REG_EQCR_CI_CINH 0x3040 | ||
| 48 | #define QM_REG_EQCR_ITR 0x3080 | ||
| 49 | #define QM_REG_DQRR_PI_CINH 0x3100 | ||
| 50 | #define QM_REG_DQRR_CI_CINH 0x3140 | ||
| 51 | #define QM_REG_DQRR_ITR 0x3180 | ||
| 52 | #define QM_REG_DQRR_DCAP 0x31C0 | ||
| 53 | #define QM_REG_DQRR_SDQCR 0x3200 | ||
| 54 | #define QM_REG_DQRR_VDQCR 0x3240 | ||
| 55 | #define QM_REG_DQRR_PDQCR 0x3280 | ||
| 56 | #define QM_REG_MR_PI_CINH 0x3300 | ||
| 57 | #define QM_REG_MR_CI_CINH 0x3340 | ||
| 58 | #define QM_REG_MR_ITR 0x3380 | ||
| 59 | #define QM_REG_CFG 0x3500 | ||
| 60 | #define QM_REG_ISR 0x3600 | ||
| 61 | #define QM_REG_IER 0x3640 | ||
| 62 | #define QM_REG_ISDR 0x3680 | ||
| 63 | #define QM_REG_IIR 0x36C0 | ||
| 64 | #define QM_REG_ITPR 0x3740 | ||
| 65 | |||
| 66 | /* Cache-enabled register offsets */ | ||
| 67 | #define QM_CL_EQCR 0x0000 | ||
| 68 | #define QM_CL_DQRR 0x1000 | ||
| 69 | #define QM_CL_MR 0x2000 | ||
| 70 | #define QM_CL_EQCR_PI_CENA 0x3000 | ||
| 71 | #define QM_CL_EQCR_CI_CENA 0x3040 | ||
| 72 | #define QM_CL_DQRR_PI_CENA 0x3100 | ||
| 73 | #define QM_CL_DQRR_CI_CENA 0x3140 | ||
| 74 | #define QM_CL_MR_PI_CENA 0x3300 | ||
| 75 | #define QM_CL_MR_CI_CENA 0x3340 | ||
| 76 | #define QM_CL_CR 0x3800 | ||
| 77 | #define QM_CL_RR0 0x3900 | ||
| 78 | #define QM_CL_RR1 0x3940 | ||
| 79 | |||
| 80 | #else | ||
| 44 | /* Cache-inhibited register offsets */ | 81 | /* Cache-inhibited register offsets */ |
| 45 | #define QM_REG_EQCR_PI_CINH 0x0000 | 82 | #define QM_REG_EQCR_PI_CINH 0x0000 |
| 46 | #define QM_REG_EQCR_CI_CINH 0x0004 | 83 | #define QM_REG_EQCR_CI_CINH 0x0004 |
| @@ -75,6 +112,7 @@ | |||
| 75 | #define QM_CL_CR 0x3800 | 112 | #define QM_CL_CR 0x3800 |
| 76 | #define QM_CL_RR0 0x3900 | 113 | #define QM_CL_RR0 0x3900 |
| 77 | #define QM_CL_RR1 0x3940 | 114 | #define QM_CL_RR1 0x3940 |
| 115 | #endif | ||
| 78 | 116 | ||
| 79 | /* | 117 | /* |
| 80 | * BTW, the drivers (and h/w programming model) already obtain the required | 118 | * BTW, the drivers (and h/w programming model) already obtain the required |
| @@ -300,7 +338,8 @@ struct qm_mc { | |||
| 300 | }; | 338 | }; |
| 301 | 339 | ||
| 302 | struct qm_addr { | 340 | struct qm_addr { |
| 303 | void __iomem *ce; /* cache-enabled */ | 341 | void *ce; /* cache-enabled */ |
| 342 | __be32 *ce_be; /* same value as above but for direct access */ | ||
| 304 | void __iomem *ci; /* cache-inhibited */ | 343 | void __iomem *ci; /* cache-inhibited */ |
| 305 | }; | 344 | }; |
| 306 | 345 | ||
| @@ -321,12 +360,12 @@ struct qm_portal { | |||
| 321 | /* Cache-inhibited register access. */ | 360 | /* Cache-inhibited register access. */ |
| 322 | static inline u32 qm_in(struct qm_portal *p, u32 offset) | 361 | static inline u32 qm_in(struct qm_portal *p, u32 offset) |
| 323 | { | 362 | { |
| 324 | return be32_to_cpu(__raw_readl(p->addr.ci + offset)); | 363 | return ioread32be(p->addr.ci + offset); |
| 325 | } | 364 | } |
| 326 | 365 | ||
| 327 | static inline void qm_out(struct qm_portal *p, u32 offset, u32 val) | 366 | static inline void qm_out(struct qm_portal *p, u32 offset, u32 val) |
| 328 | { | 367 | { |
| 329 | __raw_writel(cpu_to_be32(val), p->addr.ci + offset); | 368 | iowrite32be(val, p->addr.ci + offset); |
| 330 | } | 369 | } |
| 331 | 370 | ||
| 332 | /* Cache Enabled Portal Access */ | 371 | /* Cache Enabled Portal Access */ |
| @@ -342,7 +381,7 @@ static inline void qm_cl_touch_ro(struct qm_portal *p, u32 offset) | |||
| 342 | 381 | ||
| 343 | static inline u32 qm_ce_in(struct qm_portal *p, u32 offset) | 382 | static inline u32 qm_ce_in(struct qm_portal *p, u32 offset) |
| 344 | { | 383 | { |
| 345 | return be32_to_cpu(__raw_readl(p->addr.ce + offset)); | 384 | return be32_to_cpu(*(p->addr.ce_be + (offset/4))); |
| 346 | } | 385 | } |
| 347 | 386 | ||
| 348 | /* --- EQCR API --- */ | 387 | /* --- EQCR API --- */ |
| @@ -646,11 +685,7 @@ static inline void qm_dqrr_pvb_update(struct qm_portal *portal) | |||
| 646 | */ | 685 | */ |
| 647 | dpaa_invalidate_touch_ro(res); | 686 | dpaa_invalidate_touch_ro(res); |
| 648 | #endif | 687 | #endif |
| 649 | /* | 688 | if ((res->verb & QM_DQRR_VERB_VBIT) == dqrr->vbit) { |
| 650 | * when accessing 'verb', use __raw_readb() to ensure that compiler | ||
| 651 | * inlining doesn't try to optimise out "excess reads". | ||
| 652 | */ | ||
| 653 | if ((__raw_readb(&res->verb) & QM_DQRR_VERB_VBIT) == dqrr->vbit) { | ||
| 654 | dqrr->pi = (dqrr->pi + 1) & (QM_DQRR_SIZE - 1); | 689 | dqrr->pi = (dqrr->pi + 1) & (QM_DQRR_SIZE - 1); |
| 655 | if (!dqrr->pi) | 690 | if (!dqrr->pi) |
| 656 | dqrr->vbit ^= QM_DQRR_VERB_VBIT; | 691 | dqrr->vbit ^= QM_DQRR_VERB_VBIT; |
| @@ -777,11 +812,8 @@ static inline void qm_mr_pvb_update(struct qm_portal *portal) | |||
| 777 | union qm_mr_entry *res = qm_cl(mr->ring, mr->pi); | 812 | union qm_mr_entry *res = qm_cl(mr->ring, mr->pi); |
| 778 | 813 | ||
| 779 | DPAA_ASSERT(mr->pmode == qm_mr_pvb); | 814 | DPAA_ASSERT(mr->pmode == qm_mr_pvb); |
| 780 | /* | 815 | |
| 781 | * when accessing 'verb', use __raw_readb() to ensure that compiler | 816 | if ((res->verb & QM_MR_VERB_VBIT) == mr->vbit) { |
| 782 | * inlining doesn't try to optimise out "excess reads". | ||
| 783 | */ | ||
| 784 | if ((__raw_readb(&res->verb) & QM_MR_VERB_VBIT) == mr->vbit) { | ||
| 785 | mr->pi = (mr->pi + 1) & (QM_MR_SIZE - 1); | 817 | mr->pi = (mr->pi + 1) & (QM_MR_SIZE - 1); |
| 786 | if (!mr->pi) | 818 | if (!mr->pi) |
| 787 | mr->vbit ^= QM_MR_VERB_VBIT; | 819 | mr->vbit ^= QM_MR_VERB_VBIT; |
| @@ -822,7 +854,7 @@ static inline int qm_mc_init(struct qm_portal *portal) | |||
| 822 | 854 | ||
| 823 | mc->cr = portal->addr.ce + QM_CL_CR; | 855 | mc->cr = portal->addr.ce + QM_CL_CR; |
| 824 | mc->rr = portal->addr.ce + QM_CL_RR0; | 856 | mc->rr = portal->addr.ce + QM_CL_RR0; |
| 825 | mc->rridx = (__raw_readb(&mc->cr->_ncw_verb) & QM_MCC_VERB_VBIT) | 857 | mc->rridx = (mc->cr->_ncw_verb & QM_MCC_VERB_VBIT) |
| 826 | ? 0 : 1; | 858 | ? 0 : 1; |
| 827 | mc->vbit = mc->rridx ? QM_MCC_VERB_VBIT : 0; | 859 | mc->vbit = mc->rridx ? QM_MCC_VERB_VBIT : 0; |
| 828 | #ifdef CONFIG_FSL_DPAA_CHECKING | 860 | #ifdef CONFIG_FSL_DPAA_CHECKING |
| @@ -880,7 +912,7 @@ static inline union qm_mc_result *qm_mc_result(struct qm_portal *portal) | |||
| 880 | * its command is submitted and completed. This includes the valid-bit, | 912 | * its command is submitted and completed. This includes the valid-bit, |
| 881 | * in case you were wondering... | 913 | * in case you were wondering... |
| 882 | */ | 914 | */ |
| 883 | if (!__raw_readb(&rr->verb)) { | 915 | if (!rr->verb) { |
| 884 | dpaa_invalidate_touch_ro(rr); | 916 | dpaa_invalidate_touch_ro(rr); |
| 885 | return NULL; | 917 | return NULL; |
| 886 | } | 918 | } |
| @@ -909,12 +941,12 @@ static inline int qm_mc_result_timeout(struct qm_portal *portal, | |||
| 909 | 941 | ||
| 910 | static inline void fq_set(struct qman_fq *fq, u32 mask) | 942 | static inline void fq_set(struct qman_fq *fq, u32 mask) |
| 911 | { | 943 | { |
| 912 | set_bits(mask, &fq->flags); | 944 | fq->flags |= mask; |
| 913 | } | 945 | } |
| 914 | 946 | ||
| 915 | static inline void fq_clear(struct qman_fq *fq, u32 mask) | 947 | static inline void fq_clear(struct qman_fq *fq, u32 mask) |
| 916 | { | 948 | { |
| 917 | clear_bits(mask, &fq->flags); | 949 | fq->flags &= ~mask; |
| 918 | } | 950 | } |
| 919 | 951 | ||
| 920 | static inline int fq_isset(struct qman_fq *fq, u32 mask) | 952 | static inline int fq_isset(struct qman_fq *fq, u32 mask) |
| @@ -1084,11 +1116,7 @@ loop: | |||
| 1084 | * entries well before the ring has been fully consumed, so | 1116 | * entries well before the ring has been fully consumed, so |
| 1085 | * we're being *really* paranoid here. | 1117 | * we're being *really* paranoid here. |
| 1086 | */ | 1118 | */ |
| 1087 | u64 now, then = jiffies; | 1119 | msleep(1); |
| 1088 | |||
| 1089 | do { | ||
| 1090 | now = jiffies; | ||
| 1091 | } while ((then + 10000) > now); | ||
| 1092 | msg = qm_mr_current(p); | 1120 | msg = qm_mr_current(p); |
| 1093 | if (!msg) | 1121 | if (!msg) |
| 1094 | return 0; | 1122 | return 0; |
| @@ -1124,8 +1152,9 @@ static int qman_create_portal(struct qman_portal *portal, | |||
| 1124 | * config, everything that follows depends on it and "config" is more | 1152 | * config, everything that follows depends on it and "config" is more |
| 1125 | * for (de)reference | 1153 | * for (de)reference |
| 1126 | */ | 1154 | */ |
| 1127 | p->addr.ce = c->addr_virt[DPAA_PORTAL_CE]; | 1155 | p->addr.ce = c->addr_virt_ce; |
| 1128 | p->addr.ci = c->addr_virt[DPAA_PORTAL_CI]; | 1156 | p->addr.ce_be = c->addr_virt_ce; |
| 1157 | p->addr.ci = c->addr_virt_ci; | ||
| 1129 | /* | 1158 | /* |
| 1130 | * If CI-stashing is used, the current defaults use a threshold of 3, | 1159 | * If CI-stashing is used, the current defaults use a threshold of 3, |
| 1131 | * and stash with high-than-DQRR priority. | 1160 | * and stash with high-than-DQRR priority. |
| @@ -1566,7 +1595,7 @@ void qman_p_irqsource_add(struct qman_portal *p, u32 bits) | |||
| 1566 | unsigned long irqflags; | 1595 | unsigned long irqflags; |
| 1567 | 1596 | ||
| 1568 | local_irq_save(irqflags); | 1597 | local_irq_save(irqflags); |
| 1569 | set_bits(bits & QM_PIRQ_VISIBLE, &p->irq_sources); | 1598 | p->irq_sources |= bits & QM_PIRQ_VISIBLE; |
| 1570 | qm_out(&p->p, QM_REG_IER, p->irq_sources); | 1599 | qm_out(&p->p, QM_REG_IER, p->irq_sources); |
| 1571 | local_irq_restore(irqflags); | 1600 | local_irq_restore(irqflags); |
| 1572 | } | 1601 | } |
| @@ -1589,7 +1618,7 @@ void qman_p_irqsource_remove(struct qman_portal *p, u32 bits) | |||
| 1589 | */ | 1618 | */ |
| 1590 | local_irq_save(irqflags); | 1619 | local_irq_save(irqflags); |
| 1591 | bits &= QM_PIRQ_VISIBLE; | 1620 | bits &= QM_PIRQ_VISIBLE; |
| 1592 | clear_bits(bits, &p->irq_sources); | 1621 | p->irq_sources &= ~bits; |
| 1593 | qm_out(&p->p, QM_REG_IER, p->irq_sources); | 1622 | qm_out(&p->p, QM_REG_IER, p->irq_sources); |
| 1594 | ier = qm_in(&p->p, QM_REG_IER); | 1623 | ier = qm_in(&p->p, QM_REG_IER); |
| 1595 | /* | 1624 | /* |
diff --git a/drivers/soc/fsl/qbman/qman_ccsr.c b/drivers/soc/fsl/qbman/qman_ccsr.c index 835ce947ffca..79cba58387a5 100644 --- a/drivers/soc/fsl/qbman/qman_ccsr.c +++ b/drivers/soc/fsl/qbman/qman_ccsr.c | |||
| @@ -401,21 +401,42 @@ static int qm_init_pfdr(struct device *dev, u32 pfdr_start, u32 num) | |||
| 401 | } | 401 | } |
| 402 | 402 | ||
| 403 | /* | 403 | /* |
| 404 | * Ideally we would use the DMA API to turn rmem->base into a DMA address | 404 | * QMan needs two global memory areas initialized at boot time: |
| 405 | * (especially if iommu translations ever get involved). Unfortunately, the | 405 | * 1) FQD: Frame Queue Descriptors used to manage frame queues |
| 406 | * DMA API currently does not allow mapping anything that is not backed with | 406 | * 2) PFDR: Packed Frame Queue Descriptor Records used to store frames |
| 407 | * a struct page. | 407 | * Both areas are reserved using the device tree reserved memory framework |
| 408 | * and the addresses and sizes are initialized when the QMan device is probed | ||
| 408 | */ | 409 | */ |
| 409 | static dma_addr_t fqd_a, pfdr_a; | 410 | static dma_addr_t fqd_a, pfdr_a; |
| 410 | static size_t fqd_sz, pfdr_sz; | 411 | static size_t fqd_sz, pfdr_sz; |
| 411 | 412 | ||
| 413 | #ifdef CONFIG_PPC | ||
| 414 | /* | ||
| 415 | * Support for PPC Device Tree backward compatibility when compatible | ||
| 416 | * string is set to fsl-qman-fqd and fsl-qman-pfdr | ||
| 417 | */ | ||
| 418 | static int zero_priv_mem(phys_addr_t addr, size_t sz) | ||
| 419 | { | ||
| 420 | /* map as cacheable, non-guarded */ | ||
| 421 | void __iomem *tmpp = ioremap_prot(addr, sz, 0); | ||
| 422 | |||
| 423 | if (!tmpp) | ||
| 424 | return -ENOMEM; | ||
| 425 | |||
| 426 | memset_io(tmpp, 0, sz); | ||
| 427 | flush_dcache_range((unsigned long)tmpp, | ||
| 428 | (unsigned long)tmpp + sz); | ||
| 429 | iounmap(tmpp); | ||
| 430 | |||
| 431 | return 0; | ||
| 432 | } | ||
| 433 | |||
| 412 | static int qman_fqd(struct reserved_mem *rmem) | 434 | static int qman_fqd(struct reserved_mem *rmem) |
| 413 | { | 435 | { |
| 414 | fqd_a = rmem->base; | 436 | fqd_a = rmem->base; |
| 415 | fqd_sz = rmem->size; | 437 | fqd_sz = rmem->size; |
| 416 | 438 | ||
| 417 | WARN_ON(!(fqd_a && fqd_sz)); | 439 | WARN_ON(!(fqd_a && fqd_sz)); |
| 418 | |||
| 419 | return 0; | 440 | return 0; |
| 420 | } | 441 | } |
| 421 | RESERVEDMEM_OF_DECLARE(qman_fqd, "fsl,qman-fqd", qman_fqd); | 442 | RESERVEDMEM_OF_DECLARE(qman_fqd, "fsl,qman-fqd", qman_fqd); |
| @@ -431,32 +452,13 @@ static int qman_pfdr(struct reserved_mem *rmem) | |||
| 431 | } | 452 | } |
| 432 | RESERVEDMEM_OF_DECLARE(qman_pfdr, "fsl,qman-pfdr", qman_pfdr); | 453 | RESERVEDMEM_OF_DECLARE(qman_pfdr, "fsl,qman-pfdr", qman_pfdr); |
| 433 | 454 | ||
| 455 | #endif | ||
| 456 | |||
| 434 | static unsigned int qm_get_fqid_maxcnt(void) | 457 | static unsigned int qm_get_fqid_maxcnt(void) |
| 435 | { | 458 | { |
| 436 | return fqd_sz / 64; | 459 | return fqd_sz / 64; |
| 437 | } | 460 | } |
| 438 | 461 | ||
| 439 | /* | ||
| 440 | * Flush this memory range from data cache so that QMAN originated | ||
| 441 | * transactions for this memory region could be marked non-coherent. | ||
| 442 | */ | ||
| 443 | static int zero_priv_mem(struct device *dev, struct device_node *node, | ||
| 444 | phys_addr_t addr, size_t sz) | ||
| 445 | { | ||
| 446 | /* map as cacheable, non-guarded */ | ||
| 447 | void __iomem *tmpp = ioremap_prot(addr, sz, 0); | ||
| 448 | |||
| 449 | if (!tmpp) | ||
| 450 | return -ENOMEM; | ||
| 451 | |||
| 452 | memset_io(tmpp, 0, sz); | ||
| 453 | flush_dcache_range((unsigned long)tmpp, | ||
| 454 | (unsigned long)tmpp + sz); | ||
| 455 | iounmap(tmpp); | ||
| 456 | |||
| 457 | return 0; | ||
| 458 | } | ||
| 459 | |||
| 460 | static void log_edata_bits(struct device *dev, u32 bit_count) | 462 | static void log_edata_bits(struct device *dev, u32 bit_count) |
| 461 | { | 463 | { |
| 462 | u32 i, j, mask = 0xffffffff; | 464 | u32 i, j, mask = 0xffffffff; |
| @@ -717,6 +719,8 @@ static int fsl_qman_probe(struct platform_device *pdev) | |||
| 717 | qman_ip_rev = QMAN_REV30; | 719 | qman_ip_rev = QMAN_REV30; |
| 718 | else if (major == 3 && minor == 1) | 720 | else if (major == 3 && minor == 1) |
| 719 | qman_ip_rev = QMAN_REV31; | 721 | qman_ip_rev = QMAN_REV31; |
| 722 | else if (major == 3 && minor == 2) | ||
| 723 | qman_ip_rev = QMAN_REV32; | ||
| 720 | else { | 724 | else { |
| 721 | dev_err(dev, "Unknown QMan version\n"); | 725 | dev_err(dev, "Unknown QMan version\n"); |
| 722 | return -ENODEV; | 726 | return -ENODEV; |
| @@ -727,10 +731,41 @@ static int fsl_qman_probe(struct platform_device *pdev) | |||
| 727 | qm_channel_caam = QMAN_CHANNEL_CAAM_REV3; | 731 | qm_channel_caam = QMAN_CHANNEL_CAAM_REV3; |
| 728 | } | 732 | } |
| 729 | 733 | ||
| 730 | ret = zero_priv_mem(dev, node, fqd_a, fqd_sz); | 734 | if (fqd_a) { |
| 731 | WARN_ON(ret); | 735 | #ifdef CONFIG_PPC |
| 732 | if (ret) | 736 | /* |
| 733 | return -ENODEV; | 737 | * For PPC backward DT compatibility |
| 738 | * FQD memory MUST be zero'd by software | ||
| 739 | */ | ||
| 740 | zero_priv_mem(fqd_a, fqd_sz); | ||
| 741 | #else | ||
| 742 | WARN(1, "Unexpected architecture using non shared-dma-mem reservations"); | ||
| 743 | #endif | ||
| 744 | } else { | ||
| 745 | /* | ||
| 746 | * Order of memory regions is assumed as FQD followed by PFDR | ||
| 747 | * in order to ensure allocations from the correct regions the | ||
| 748 | * driver initializes then allocates each piece in order | ||
| 749 | */ | ||
| 750 | ret = qbman_init_private_mem(dev, 0, &fqd_a, &fqd_sz); | ||
| 751 | if (ret) { | ||
| 752 | dev_err(dev, "qbman_init_private_mem() for FQD failed 0x%x\n", | ||
| 753 | ret); | ||
| 754 | return -ENODEV; | ||
| 755 | } | ||
| 756 | } | ||
| 757 | dev_dbg(dev, "Allocated FQD 0x%llx 0x%zx\n", fqd_a, fqd_sz); | ||
| 758 | |||
| 759 | if (!pfdr_a) { | ||
| 760 | /* Setup PFDR memory */ | ||
| 761 | ret = qbman_init_private_mem(dev, 1, &pfdr_a, &pfdr_sz); | ||
| 762 | if (ret) { | ||
| 763 | dev_err(dev, "qbman_init_private_mem() for PFDR failed 0x%x\n", | ||
| 764 | ret); | ||
| 765 | return -ENODEV; | ||
| 766 | } | ||
| 767 | } | ||
| 768 | dev_dbg(dev, "Allocated PFDR 0x%llx 0x%zx\n", pfdr_a, pfdr_sz); | ||
| 734 | 769 | ||
| 735 | ret = qman_init_ccsr(dev); | 770 | ret = qman_init_ccsr(dev); |
| 736 | if (ret) { | 771 | if (ret) { |
diff --git a/drivers/soc/fsl/qbman/qman_portal.c b/drivers/soc/fsl/qbman/qman_portal.c index cbacdf4f98ed..a120002b630e 100644 --- a/drivers/soc/fsl/qbman/qman_portal.c +++ b/drivers/soc/fsl/qbman/qman_portal.c | |||
| @@ -224,7 +224,6 @@ static int qman_portal_probe(struct platform_device *pdev) | |||
| 224 | struct device_node *node = dev->of_node; | 224 | struct device_node *node = dev->of_node; |
| 225 | struct qm_portal_config *pcfg; | 225 | struct qm_portal_config *pcfg; |
| 226 | struct resource *addr_phys[2]; | 226 | struct resource *addr_phys[2]; |
| 227 | void __iomem *va; | ||
| 228 | int irq, cpu, err; | 227 | int irq, cpu, err; |
| 229 | u32 val; | 228 | u32 val; |
| 230 | 229 | ||
| @@ -262,23 +261,21 @@ static int qman_portal_probe(struct platform_device *pdev) | |||
| 262 | } | 261 | } |
| 263 | pcfg->irq = irq; | 262 | pcfg->irq = irq; |
| 264 | 263 | ||
| 265 | va = ioremap_prot(addr_phys[0]->start, resource_size(addr_phys[0]), 0); | 264 | pcfg->addr_virt_ce = memremap(addr_phys[0]->start, |
| 266 | if (!va) { | 265 | resource_size(addr_phys[0]), |
| 267 | dev_err(dev, "ioremap::CE failed\n"); | 266 | QBMAN_MEMREMAP_ATTR); |
| 267 | if (!pcfg->addr_virt_ce) { | ||
| 268 | dev_err(dev, "memremap::CE failed\n"); | ||
| 268 | goto err_ioremap1; | 269 | goto err_ioremap1; |
| 269 | } | 270 | } |
| 270 | 271 | ||
| 271 | pcfg->addr_virt[DPAA_PORTAL_CE] = va; | 272 | pcfg->addr_virt_ci = ioremap(addr_phys[1]->start, |
| 272 | 273 | resource_size(addr_phys[1])); | |
| 273 | va = ioremap_prot(addr_phys[1]->start, resource_size(addr_phys[1]), | 274 | if (!pcfg->addr_virt_ci) { |
| 274 | _PAGE_GUARDED | _PAGE_NO_CACHE); | ||
| 275 | if (!va) { | ||
| 276 | dev_err(dev, "ioremap::CI failed\n"); | 275 | dev_err(dev, "ioremap::CI failed\n"); |
| 277 | goto err_ioremap2; | 276 | goto err_ioremap2; |
| 278 | } | 277 | } |
| 279 | 278 | ||
| 280 | pcfg->addr_virt[DPAA_PORTAL_CI] = va; | ||
| 281 | |||
| 282 | pcfg->pools = qm_get_pools_sdqcr(); | 279 | pcfg->pools = qm_get_pools_sdqcr(); |
| 283 | 280 | ||
| 284 | spin_lock(&qman_lock); | 281 | spin_lock(&qman_lock); |
| @@ -310,9 +307,9 @@ static int qman_portal_probe(struct platform_device *pdev) | |||
| 310 | return 0; | 307 | return 0; |
| 311 | 308 | ||
| 312 | err_portal_init: | 309 | err_portal_init: |
| 313 | iounmap(pcfg->addr_virt[DPAA_PORTAL_CI]); | 310 | iounmap(pcfg->addr_virt_ci); |
| 314 | err_ioremap2: | 311 | err_ioremap2: |
| 315 | iounmap(pcfg->addr_virt[DPAA_PORTAL_CE]); | 312 | memunmap(pcfg->addr_virt_ce); |
| 316 | err_ioremap1: | 313 | err_ioremap1: |
| 317 | return -ENXIO; | 314 | return -ENXIO; |
| 318 | } | 315 | } |
diff --git a/drivers/soc/fsl/qbman/qman_priv.h b/drivers/soc/fsl/qbman/qman_priv.h index 5fe9faf6232e..75a8f905f8f7 100644 --- a/drivers/soc/fsl/qbman/qman_priv.h +++ b/drivers/soc/fsl/qbman/qman_priv.h | |||
| @@ -28,8 +28,6 @@ | |||
| 28 | * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 28 | * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 29 | */ | 29 | */ |
| 30 | 30 | ||
| 31 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
| 32 | |||
| 33 | #include "dpaa_sys.h" | 31 | #include "dpaa_sys.h" |
| 34 | 32 | ||
| 35 | #include <soc/fsl/qman.h> | 33 | #include <soc/fsl/qman.h> |
| @@ -155,11 +153,9 @@ static inline void qman_cgrs_xor(struct qman_cgrs *dest, | |||
| 155 | void qman_init_cgr_all(void); | 153 | void qman_init_cgr_all(void); |
| 156 | 154 | ||
| 157 | struct qm_portal_config { | 155 | struct qm_portal_config { |
| 158 | /* | 156 | /* Portal addresses */ |
| 159 | * Corenet portal addresses; | 157 | void *addr_virt_ce; |
| 160 | * [0]==cache-enabled, [1]==cache-inhibited. | 158 | void __iomem *addr_virt_ci; |
| 161 | */ | ||
| 162 | void __iomem *addr_virt[2]; | ||
| 163 | struct device *dev; | 159 | struct device *dev; |
| 164 | struct iommu_domain *iommu_domain; | 160 | struct iommu_domain *iommu_domain; |
| 165 | /* Allow these to be joined in lists */ | 161 | /* Allow these to be joined in lists */ |
| @@ -187,6 +183,7 @@ struct qm_portal_config { | |||
| 187 | #define QMAN_REV20 0x0200 | 183 | #define QMAN_REV20 0x0200 |
| 188 | #define QMAN_REV30 0x0300 | 184 | #define QMAN_REV30 0x0300 |
| 189 | #define QMAN_REV31 0x0301 | 185 | #define QMAN_REV31 0x0301 |
| 186 | #define QMAN_REV32 0x0302 | ||
| 190 | extern u16 qman_ip_rev; /* 0 if uninitialised, otherwise QMAN_REVx */ | 187 | extern u16 qman_ip_rev; /* 0 if uninitialised, otherwise QMAN_REVx */ |
| 191 | 188 | ||
| 192 | #define QM_FQID_RANGE_START 1 /* FQID 0 reserved for internal use */ | 189 | #define QM_FQID_RANGE_START 1 /* FQID 0 reserved for internal use */ |
diff --git a/drivers/soc/fsl/qbman/qman_test.h b/drivers/soc/fsl/qbman/qman_test.h index d5f8cb2260dc..41bdbc48cade 100644 --- a/drivers/soc/fsl/qbman/qman_test.h +++ b/drivers/soc/fsl/qbman/qman_test.h | |||
| @@ -30,7 +30,5 @@ | |||
| 30 | 30 | ||
| 31 | #include "qman_priv.h" | 31 | #include "qman_priv.h" |
| 32 | 32 | ||
| 33 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
| 34 | |||
| 35 | int qman_test_stash(void); | 33 | int qman_test_stash(void); |
| 36 | int qman_test_api(void); | 34 | int qman_test_api(void); |
diff --git a/drivers/soc/mediatek/Kconfig b/drivers/soc/mediatek/Kconfig index 609bb3424c14..a7d0667338f2 100644 --- a/drivers/soc/mediatek/Kconfig +++ b/drivers/soc/mediatek/Kconfig | |||
| @@ -1,9 +1,11 @@ | |||
| 1 | # | 1 | # |
| 2 | # MediaTek SoC drivers | 2 | # MediaTek SoC drivers |
| 3 | # | 3 | # |
| 4 | menu "MediaTek SoC drivers" | ||
| 5 | depends on ARCH_MEDIATEK || COMPILE_TEST | ||
| 6 | |||
| 4 | config MTK_INFRACFG | 7 | config MTK_INFRACFG |
| 5 | bool "MediaTek INFRACFG Support" | 8 | bool "MediaTek INFRACFG Support" |
| 6 | depends on ARCH_MEDIATEK || COMPILE_TEST | ||
| 7 | select REGMAP | 9 | select REGMAP |
| 8 | help | 10 | help |
| 9 | Say yes here to add support for the MediaTek INFRACFG controller. The | 11 | Say yes here to add support for the MediaTek INFRACFG controller. The |
| @@ -12,7 +14,6 @@ config MTK_INFRACFG | |||
| 12 | 14 | ||
| 13 | config MTK_PMIC_WRAP | 15 | config MTK_PMIC_WRAP |
| 14 | tristate "MediaTek PMIC Wrapper Support" | 16 | tristate "MediaTek PMIC Wrapper Support" |
| 15 | depends on ARCH_MEDIATEK | ||
| 16 | depends on RESET_CONTROLLER | 17 | depends on RESET_CONTROLLER |
| 17 | select REGMAP | 18 | select REGMAP |
| 18 | help | 19 | help |
| @@ -22,7 +23,6 @@ config MTK_PMIC_WRAP | |||
| 22 | 23 | ||
| 23 | config MTK_SCPSYS | 24 | config MTK_SCPSYS |
| 24 | bool "MediaTek SCPSYS Support" | 25 | bool "MediaTek SCPSYS Support" |
| 25 | depends on ARCH_MEDIATEK || COMPILE_TEST | ||
| 26 | default ARCH_MEDIATEK | 26 | default ARCH_MEDIATEK |
| 27 | select REGMAP | 27 | select REGMAP |
| 28 | select MTK_INFRACFG | 28 | select MTK_INFRACFG |
| @@ -30,3 +30,5 @@ config MTK_SCPSYS | |||
| 30 | help | 30 | help |
| 31 | Say yes here to add support for the MediaTek SCPSYS power domain | 31 | Say yes here to add support for the MediaTek SCPSYS power domain |
| 32 | driver. | 32 | driver. |
| 33 | |||
| 34 | endmenu | ||
diff --git a/drivers/soc/mediatek/mtk-pmic-wrap.c b/drivers/soc/mediatek/mtk-pmic-wrap.c index c2048382830f..e9e054a15b7d 100644 --- a/drivers/soc/mediatek/mtk-pmic-wrap.c +++ b/drivers/soc/mediatek/mtk-pmic-wrap.c | |||
| @@ -70,6 +70,12 @@ | |||
| 70 | PWRAP_WDT_SRC_EN_HARB_STAUPD_DLE | \ | 70 | PWRAP_WDT_SRC_EN_HARB_STAUPD_DLE | \ |
| 71 | PWRAP_WDT_SRC_EN_HARB_STAUPD_ALE) | 71 | PWRAP_WDT_SRC_EN_HARB_STAUPD_ALE) |
| 72 | 72 | ||
| 73 | /* Group of bits used for shown slave capability */ | ||
| 74 | #define PWRAP_SLV_CAP_SPI BIT(0) | ||
| 75 | #define PWRAP_SLV_CAP_DUALIO BIT(1) | ||
| 76 | #define PWRAP_SLV_CAP_SECURITY BIT(2) | ||
| 77 | #define HAS_CAP(_c, _x) (((_c) & (_x)) == (_x)) | ||
| 78 | |||
| 73 | /* defines for slave device wrapper registers */ | 79 | /* defines for slave device wrapper registers */ |
| 74 | enum dew_regs { | 80 | enum dew_regs { |
| 75 | PWRAP_DEW_BASE, | 81 | PWRAP_DEW_BASE, |
| @@ -208,6 +214,36 @@ enum pwrap_regs { | |||
| 208 | PWRAP_ADC_RDATA_ADDR1, | 214 | PWRAP_ADC_RDATA_ADDR1, |
| 209 | PWRAP_ADC_RDATA_ADDR2, | 215 | PWRAP_ADC_RDATA_ADDR2, |
| 210 | 216 | ||
| 217 | /* MT7622 only regs */ | ||
| 218 | PWRAP_EINT_STA0_ADR, | ||
| 219 | PWRAP_EINT_STA1_ADR, | ||
| 220 | PWRAP_STA, | ||
| 221 | PWRAP_CLR, | ||
| 222 | PWRAP_DVFS_ADR8, | ||
| 223 | PWRAP_DVFS_WDATA8, | ||
| 224 | PWRAP_DVFS_ADR9, | ||
| 225 | PWRAP_DVFS_WDATA9, | ||
| 226 | PWRAP_DVFS_ADR10, | ||
| 227 | PWRAP_DVFS_WDATA10, | ||
| 228 | PWRAP_DVFS_ADR11, | ||
| 229 | PWRAP_DVFS_WDATA11, | ||
| 230 | PWRAP_DVFS_ADR12, | ||
| 231 | PWRAP_DVFS_WDATA12, | ||
| 232 | PWRAP_DVFS_ADR13, | ||
| 233 | PWRAP_DVFS_WDATA13, | ||
| 234 | PWRAP_DVFS_ADR14, | ||
| 235 | PWRAP_DVFS_WDATA14, | ||
| 236 | PWRAP_DVFS_ADR15, | ||
| 237 | PWRAP_DVFS_WDATA15, | ||
| 238 | PWRAP_EXT_CK, | ||
| 239 | PWRAP_ADC_RDATA_ADDR, | ||
| 240 | PWRAP_GPS_STA, | ||
| 241 | PWRAP_SW_RST, | ||
| 242 | PWRAP_DVFS_STEP_CTRL0, | ||
| 243 | PWRAP_DVFS_STEP_CTRL1, | ||
| 244 | PWRAP_DVFS_STEP_CTRL2, | ||
| 245 | PWRAP_SPI2_CTRL, | ||
| 246 | |||
| 211 | /* MT8135 only regs */ | 247 | /* MT8135 only regs */ |
| 212 | PWRAP_CSHEXT, | 248 | PWRAP_CSHEXT, |
| 213 | PWRAP_EVENT_IN_EN, | 249 | PWRAP_EVENT_IN_EN, |
| @@ -330,6 +366,118 @@ static int mt2701_regs[] = { | |||
| 330 | [PWRAP_ADC_RDATA_ADDR2] = 0x154, | 366 | [PWRAP_ADC_RDATA_ADDR2] = 0x154, |
| 331 | }; | 367 | }; |
| 332 | 368 | ||
| 369 | static int mt7622_regs[] = { | ||
| 370 | [PWRAP_MUX_SEL] = 0x0, | ||
| 371 | [PWRAP_WRAP_EN] = 0x4, | ||
| 372 | [PWRAP_DIO_EN] = 0x8, | ||
| 373 | [PWRAP_SIDLY] = 0xC, | ||
| 374 | [PWRAP_RDDMY] = 0x10, | ||
| 375 | [PWRAP_SI_CK_CON] = 0x14, | ||
| 376 | [PWRAP_CSHEXT_WRITE] = 0x18, | ||
| 377 | [PWRAP_CSHEXT_READ] = 0x1C, | ||
| 378 | [PWRAP_CSLEXT_START] = 0x20, | ||
| 379 | [PWRAP_CSLEXT_END] = 0x24, | ||
| 380 | [PWRAP_STAUPD_PRD] = 0x28, | ||
| 381 | [PWRAP_STAUPD_GRPEN] = 0x2C, | ||
| 382 | [PWRAP_EINT_STA0_ADR] = 0x30, | ||
| 383 | [PWRAP_EINT_STA1_ADR] = 0x34, | ||
| 384 | [PWRAP_STA] = 0x38, | ||
| 385 | [PWRAP_CLR] = 0x3C, | ||
| 386 | [PWRAP_STAUPD_MAN_TRIG] = 0x40, | ||
| 387 | [PWRAP_STAUPD_STA] = 0x44, | ||
| 388 | [PWRAP_WRAP_STA] = 0x48, | ||
| 389 | [PWRAP_HARB_INIT] = 0x4C, | ||
| 390 | [PWRAP_HARB_HPRIO] = 0x50, | ||
| 391 | [PWRAP_HIPRIO_ARB_EN] = 0x54, | ||
| 392 | [PWRAP_HARB_STA0] = 0x58, | ||
| 393 | [PWRAP_HARB_STA1] = 0x5C, | ||
| 394 | [PWRAP_MAN_EN] = 0x60, | ||
| 395 | [PWRAP_MAN_CMD] = 0x64, | ||
| 396 | [PWRAP_MAN_RDATA] = 0x68, | ||
| 397 | [PWRAP_MAN_VLDCLR] = 0x6C, | ||
| 398 | [PWRAP_WACS0_EN] = 0x70, | ||
| 399 | [PWRAP_INIT_DONE0] = 0x74, | ||
| 400 | [PWRAP_WACS0_CMD] = 0x78, | ||
| 401 | [PWRAP_WACS0_RDATA] = 0x7C, | ||
| 402 | [PWRAP_WACS0_VLDCLR] = 0x80, | ||
| 403 | [PWRAP_WACS1_EN] = 0x84, | ||
| 404 | [PWRAP_INIT_DONE1] = 0x88, | ||
| 405 | [PWRAP_WACS1_CMD] = 0x8C, | ||
| 406 | [PWRAP_WACS1_RDATA] = 0x90, | ||
| 407 | [PWRAP_WACS1_VLDCLR] = 0x94, | ||
| 408 | [PWRAP_WACS2_EN] = 0x98, | ||
| 409 | [PWRAP_INIT_DONE2] = 0x9C, | ||
| 410 | [PWRAP_WACS2_CMD] = 0xA0, | ||
| 411 | [PWRAP_WACS2_RDATA] = 0xA4, | ||
| 412 | [PWRAP_WACS2_VLDCLR] = 0xA8, | ||
| 413 | [PWRAP_INT_EN] = 0xAC, | ||
| 414 | [PWRAP_INT_FLG_RAW] = 0xB0, | ||
| 415 | [PWRAP_INT_FLG] = 0xB4, | ||
| 416 | [PWRAP_INT_CLR] = 0xB8, | ||
| 417 | [PWRAP_SIG_ADR] = 0xBC, | ||
| 418 | [PWRAP_SIG_MODE] = 0xC0, | ||
| 419 | [PWRAP_SIG_VALUE] = 0xC4, | ||
| 420 | [PWRAP_SIG_ERRVAL] = 0xC8, | ||
| 421 | [PWRAP_CRC_EN] = 0xCC, | ||
| 422 | [PWRAP_TIMER_EN] = 0xD0, | ||
| 423 | [PWRAP_TIMER_STA] = 0xD4, | ||
| 424 | [PWRAP_WDT_UNIT] = 0xD8, | ||
| 425 | [PWRAP_WDT_SRC_EN] = 0xDC, | ||
| 426 | [PWRAP_WDT_FLG] = 0xE0, | ||
| 427 | [PWRAP_DEBUG_INT_SEL] = 0xE4, | ||
| 428 | [PWRAP_DVFS_ADR0] = 0xE8, | ||
| 429 | [PWRAP_DVFS_WDATA0] = 0xEC, | ||
| 430 | [PWRAP_DVFS_ADR1] = 0xF0, | ||
| 431 | [PWRAP_DVFS_WDATA1] = 0xF4, | ||
| 432 | [PWRAP_DVFS_ADR2] = 0xF8, | ||
| 433 | [PWRAP_DVFS_WDATA2] = 0xFC, | ||
| 434 | [PWRAP_DVFS_ADR3] = 0x100, | ||
| 435 | [PWRAP_DVFS_WDATA3] = 0x104, | ||
| 436 | [PWRAP_DVFS_ADR4] = 0x108, | ||
| 437 | [PWRAP_DVFS_WDATA4] = 0x10C, | ||
| 438 | [PWRAP_DVFS_ADR5] = 0x110, | ||
| 439 | [PWRAP_DVFS_WDATA5] = 0x114, | ||
| 440 | [PWRAP_DVFS_ADR6] = 0x118, | ||
| 441 | [PWRAP_DVFS_WDATA6] = 0x11C, | ||
| 442 | [PWRAP_DVFS_ADR7] = 0x120, | ||
| 443 | [PWRAP_DVFS_WDATA7] = 0x124, | ||
| 444 | [PWRAP_DVFS_ADR8] = 0x128, | ||
| 445 | [PWRAP_DVFS_WDATA8] = 0x12C, | ||
| 446 | [PWRAP_DVFS_ADR9] = 0x130, | ||
| 447 | [PWRAP_DVFS_WDATA9] = 0x134, | ||
| 448 | [PWRAP_DVFS_ADR10] = 0x138, | ||
| 449 | [PWRAP_DVFS_WDATA10] = 0x13C, | ||
| 450 | [PWRAP_DVFS_ADR11] = 0x140, | ||
| 451 | [PWRAP_DVFS_WDATA11] = 0x144, | ||
| 452 | [PWRAP_DVFS_ADR12] = 0x148, | ||
| 453 | [PWRAP_DVFS_WDATA12] = 0x14C, | ||
| 454 | [PWRAP_DVFS_ADR13] = 0x150, | ||
| 455 | [PWRAP_DVFS_WDATA13] = 0x154, | ||
| 456 | [PWRAP_DVFS_ADR14] = 0x158, | ||
| 457 | [PWRAP_DVFS_WDATA14] = 0x15C, | ||
| 458 | [PWRAP_DVFS_ADR15] = 0x160, | ||
| 459 | [PWRAP_DVFS_WDATA15] = 0x164, | ||
| 460 | [PWRAP_SPMINF_STA] = 0x168, | ||
| 461 | [PWRAP_CIPHER_KEY_SEL] = 0x16C, | ||
| 462 | [PWRAP_CIPHER_IV_SEL] = 0x170, | ||
| 463 | [PWRAP_CIPHER_EN] = 0x174, | ||
| 464 | [PWRAP_CIPHER_RDY] = 0x178, | ||
| 465 | [PWRAP_CIPHER_MODE] = 0x17C, | ||
| 466 | [PWRAP_CIPHER_SWRST] = 0x180, | ||
| 467 | [PWRAP_DCM_EN] = 0x184, | ||
| 468 | [PWRAP_DCM_DBC_PRD] = 0x188, | ||
| 469 | [PWRAP_EXT_CK] = 0x18C, | ||
| 470 | [PWRAP_ADC_CMD_ADDR] = 0x190, | ||
| 471 | [PWRAP_PWRAP_ADC_CMD] = 0x194, | ||
| 472 | [PWRAP_ADC_RDATA_ADDR] = 0x198, | ||
| 473 | [PWRAP_GPS_STA] = 0x19C, | ||
| 474 | [PWRAP_SW_RST] = 0x1A0, | ||
| 475 | [PWRAP_DVFS_STEP_CTRL0] = 0x238, | ||
| 476 | [PWRAP_DVFS_STEP_CTRL1] = 0x23C, | ||
| 477 | [PWRAP_DVFS_STEP_CTRL2] = 0x240, | ||
| 478 | [PWRAP_SPI2_CTRL] = 0x244, | ||
| 479 | }; | ||
| 480 | |||
| 333 | static int mt8173_regs[] = { | 481 | static int mt8173_regs[] = { |
| 334 | [PWRAP_MUX_SEL] = 0x0, | 482 | [PWRAP_MUX_SEL] = 0x0, |
| 335 | [PWRAP_WRAP_EN] = 0x4, | 483 | [PWRAP_WRAP_EN] = 0x4, |
| @@ -487,18 +635,31 @@ static int mt8135_regs[] = { | |||
| 487 | 635 | ||
| 488 | enum pmic_type { | 636 | enum pmic_type { |
| 489 | PMIC_MT6323, | 637 | PMIC_MT6323, |
| 638 | PMIC_MT6380, | ||
| 490 | PMIC_MT6397, | 639 | PMIC_MT6397, |
| 491 | }; | 640 | }; |
| 492 | 641 | ||
| 493 | enum pwrap_type { | 642 | enum pwrap_type { |
| 494 | PWRAP_MT2701, | 643 | PWRAP_MT2701, |
| 644 | PWRAP_MT7622, | ||
| 495 | PWRAP_MT8135, | 645 | PWRAP_MT8135, |
| 496 | PWRAP_MT8173, | 646 | PWRAP_MT8173, |
| 497 | }; | 647 | }; |
| 498 | 648 | ||
| 649 | struct pmic_wrapper; | ||
| 499 | struct pwrap_slv_type { | 650 | struct pwrap_slv_type { |
| 500 | const u32 *dew_regs; | 651 | const u32 *dew_regs; |
| 501 | enum pmic_type type; | 652 | enum pmic_type type; |
| 653 | const struct regmap_config *regmap; | ||
| 654 | /* Flags indicating the capability for the target slave */ | ||
| 655 | u32 caps; | ||
| 656 | /* | ||
| 657 | * pwrap operations are highly associated with the PMIC types, | ||
| 658 | * so the pointers added increases flexibility allowing determination | ||
| 659 | * which type is used by the detection through device tree. | ||
| 660 | */ | ||
| 661 | int (*pwrap_read)(struct pmic_wrapper *wrp, u32 adr, u32 *rdata); | ||
| 662 | int (*pwrap_write)(struct pmic_wrapper *wrp, u32 adr, u32 wdata); | ||
| 502 | }; | 663 | }; |
| 503 | 664 | ||
| 504 | struct pmic_wrapper { | 665 | struct pmic_wrapper { |
| @@ -522,7 +683,7 @@ struct pmic_wrapper_type { | |||
| 522 | u32 int_en_all; | 683 | u32 int_en_all; |
| 523 | u32 spi_w; | 684 | u32 spi_w; |
| 524 | u32 wdt_src; | 685 | u32 wdt_src; |
| 525 | int has_bridge:1; | 686 | unsigned int has_bridge:1; |
| 526 | int (*init_reg_clock)(struct pmic_wrapper *wrp); | 687 | int (*init_reg_clock)(struct pmic_wrapper *wrp); |
| 527 | int (*init_soc_specific)(struct pmic_wrapper *wrp); | 688 | int (*init_soc_specific)(struct pmic_wrapper *wrp); |
| 528 | }; | 689 | }; |
| @@ -593,7 +754,7 @@ static int pwrap_wait_for_state(struct pmic_wrapper *wrp, | |||
| 593 | } while (1); | 754 | } while (1); |
| 594 | } | 755 | } |
| 595 | 756 | ||
| 596 | static int pwrap_write(struct pmic_wrapper *wrp, u32 adr, u32 wdata) | 757 | static int pwrap_read16(struct pmic_wrapper *wrp, u32 adr, u32 *rdata) |
| 597 | { | 758 | { |
| 598 | int ret; | 759 | int ret; |
| 599 | 760 | ||
| @@ -603,14 +764,54 @@ static int pwrap_write(struct pmic_wrapper *wrp, u32 adr, u32 wdata) | |||
| 603 | return ret; | 764 | return ret; |
| 604 | } | 765 | } |
| 605 | 766 | ||
| 606 | pwrap_writel(wrp, (1 << 31) | ((adr >> 1) << 16) | wdata, | 767 | pwrap_writel(wrp, (adr >> 1) << 16, PWRAP_WACS2_CMD); |
| 607 | PWRAP_WACS2_CMD); | 768 | |
| 769 | ret = pwrap_wait_for_state(wrp, pwrap_is_fsm_vldclr); | ||
| 770 | if (ret) | ||
| 771 | return ret; | ||
| 772 | |||
| 773 | *rdata = PWRAP_GET_WACS_RDATA(pwrap_readl(wrp, PWRAP_WACS2_RDATA)); | ||
| 774 | |||
| 775 | pwrap_writel(wrp, 1, PWRAP_WACS2_VLDCLR); | ||
| 776 | |||
| 777 | return 0; | ||
| 778 | } | ||
| 779 | |||
| 780 | static int pwrap_read32(struct pmic_wrapper *wrp, u32 adr, u32 *rdata) | ||
| 781 | { | ||
| 782 | int ret, msb; | ||
| 783 | |||
| 784 | *rdata = 0; | ||
| 785 | for (msb = 0; msb < 2; msb++) { | ||
| 786 | ret = pwrap_wait_for_state(wrp, pwrap_is_fsm_idle); | ||
| 787 | if (ret) { | ||
| 788 | pwrap_leave_fsm_vldclr(wrp); | ||
| 789 | return ret; | ||
| 790 | } | ||
| 791 | |||
| 792 | pwrap_writel(wrp, ((msb << 30) | (adr << 16)), | ||
| 793 | PWRAP_WACS2_CMD); | ||
| 794 | |||
| 795 | ret = pwrap_wait_for_state(wrp, pwrap_is_fsm_vldclr); | ||
| 796 | if (ret) | ||
| 797 | return ret; | ||
| 798 | |||
| 799 | *rdata += (PWRAP_GET_WACS_RDATA(pwrap_readl(wrp, | ||
| 800 | PWRAP_WACS2_RDATA)) << (16 * msb)); | ||
| 801 | |||
| 802 | pwrap_writel(wrp, 1, PWRAP_WACS2_VLDCLR); | ||
| 803 | } | ||
| 608 | 804 | ||
| 609 | return 0; | 805 | return 0; |
| 610 | } | 806 | } |
| 611 | 807 | ||
| 612 | static int pwrap_read(struct pmic_wrapper *wrp, u32 adr, u32 *rdata) | 808 | static int pwrap_read(struct pmic_wrapper *wrp, u32 adr, u32 *rdata) |
| 613 | { | 809 | { |
| 810 | return wrp->slave->pwrap_read(wrp, adr, rdata); | ||
| 811 | } | ||
| 812 | |||
| 813 | static int pwrap_write16(struct pmic_wrapper *wrp, u32 adr, u32 wdata) | ||
| 814 | { | ||
| 614 | int ret; | 815 | int ret; |
| 615 | 816 | ||
| 616 | ret = pwrap_wait_for_state(wrp, pwrap_is_fsm_idle); | 817 | ret = pwrap_wait_for_state(wrp, pwrap_is_fsm_idle); |
| @@ -619,19 +820,46 @@ static int pwrap_read(struct pmic_wrapper *wrp, u32 adr, u32 *rdata) | |||
| 619 | return ret; | 820 | return ret; |
| 620 | } | 821 | } |
| 621 | 822 | ||
| 622 | pwrap_writel(wrp, (adr >> 1) << 16, PWRAP_WACS2_CMD); | 823 | pwrap_writel(wrp, (1 << 31) | ((adr >> 1) << 16) | wdata, |
| 824 | PWRAP_WACS2_CMD); | ||
| 623 | 825 | ||
| 624 | ret = pwrap_wait_for_state(wrp, pwrap_is_fsm_vldclr); | 826 | return 0; |
| 625 | if (ret) | 827 | } |
| 626 | return ret; | ||
| 627 | 828 | ||
| 628 | *rdata = PWRAP_GET_WACS_RDATA(pwrap_readl(wrp, PWRAP_WACS2_RDATA)); | 829 | static int pwrap_write32(struct pmic_wrapper *wrp, u32 adr, u32 wdata) |
| 830 | { | ||
| 831 | int ret, msb, rdata; | ||
| 629 | 832 | ||
| 630 | pwrap_writel(wrp, 1, PWRAP_WACS2_VLDCLR); | 833 | for (msb = 0; msb < 2; msb++) { |
| 834 | ret = pwrap_wait_for_state(wrp, pwrap_is_fsm_idle); | ||
| 835 | if (ret) { | ||
| 836 | pwrap_leave_fsm_vldclr(wrp); | ||
| 837 | return ret; | ||
| 838 | } | ||
| 839 | |||
| 840 | pwrap_writel(wrp, (1 << 31) | (msb << 30) | (adr << 16) | | ||
| 841 | ((wdata >> (msb * 16)) & 0xffff), | ||
| 842 | PWRAP_WACS2_CMD); | ||
| 843 | |||
| 844 | /* | ||
| 845 | * The pwrap_read operation is the requirement of hardware used | ||
| 846 | * for the synchronization between two successive 16-bit | ||
| 847 | * pwrap_writel operations composing one 32-bit bus writing. | ||
| 848 | * Otherwise, we'll find the result fails on the lower 16-bit | ||
| 849 | * pwrap writing. | ||
| 850 | */ | ||
| 851 | if (!msb) | ||
| 852 | pwrap_read(wrp, adr, &rdata); | ||
| 853 | } | ||
| 631 | 854 | ||
| 632 | return 0; | 855 | return 0; |
| 633 | } | 856 | } |
| 634 | 857 | ||
| 858 | static int pwrap_write(struct pmic_wrapper *wrp, u32 adr, u32 wdata) | ||
| 859 | { | ||
| 860 | return wrp->slave->pwrap_write(wrp, adr, wdata); | ||
| 861 | } | ||
| 862 | |||
| 635 | static int pwrap_regmap_read(void *context, u32 adr, u32 *rdata) | 863 | static int pwrap_regmap_read(void *context, u32 adr, u32 *rdata) |
| 636 | { | 864 | { |
| 637 | return pwrap_read(context, adr, rdata); | 865 | return pwrap_read(context, adr, rdata); |
| @@ -711,23 +939,75 @@ static int pwrap_init_sidly(struct pmic_wrapper *wrp) | |||
| 711 | return 0; | 939 | return 0; |
| 712 | } | 940 | } |
| 713 | 941 | ||
| 714 | static int pwrap_mt8135_init_reg_clock(struct pmic_wrapper *wrp) | 942 | static int pwrap_init_dual_io(struct pmic_wrapper *wrp) |
| 715 | { | 943 | { |
| 716 | pwrap_writel(wrp, 0x4, PWRAP_CSHEXT); | 944 | int ret; |
| 717 | pwrap_writel(wrp, 0x0, PWRAP_CSHEXT_WRITE); | 945 | u32 rdata; |
| 718 | pwrap_writel(wrp, 0x4, PWRAP_CSHEXT_READ); | 946 | |
| 719 | pwrap_writel(wrp, 0x0, PWRAP_CSLEXT_START); | 947 | /* Enable dual IO mode */ |
| 720 | pwrap_writel(wrp, 0x0, PWRAP_CSLEXT_END); | 948 | pwrap_write(wrp, wrp->slave->dew_regs[PWRAP_DEW_DIO_EN], 1); |
| 949 | |||
| 950 | /* Check IDLE & INIT_DONE in advance */ | ||
| 951 | ret = pwrap_wait_for_state(wrp, | ||
| 952 | pwrap_is_fsm_idle_and_sync_idle); | ||
| 953 | if (ret) { | ||
| 954 | dev_err(wrp->dev, "%s fail, ret=%d\n", __func__, ret); | ||
| 955 | return ret; | ||
| 956 | } | ||
| 957 | |||
| 958 | pwrap_writel(wrp, 1, PWRAP_DIO_EN); | ||
| 959 | |||
| 960 | /* Read Test */ | ||
| 961 | pwrap_read(wrp, | ||
| 962 | wrp->slave->dew_regs[PWRAP_DEW_READ_TEST], &rdata); | ||
| 963 | if (rdata != PWRAP_DEW_READ_TEST_VAL) { | ||
| 964 | dev_err(wrp->dev, | ||
| 965 | "Read failed on DIO mode: 0x%04x!=0x%04x\n", | ||
| 966 | PWRAP_DEW_READ_TEST_VAL, rdata); | ||
| 967 | return -EFAULT; | ||
| 968 | } | ||
| 721 | 969 | ||
| 722 | return 0; | 970 | return 0; |
| 723 | } | 971 | } |
| 724 | 972 | ||
| 725 | static int pwrap_mt8173_init_reg_clock(struct pmic_wrapper *wrp) | 973 | /* |
| 974 | * pwrap_init_chip_select_ext is used to configure CS extension time for each | ||
| 975 | * phase during data transactions on the pwrap bus. | ||
| 976 | */ | ||
| 977 | static void pwrap_init_chip_select_ext(struct pmic_wrapper *wrp, u8 hext_write, | ||
| 978 | u8 hext_read, u8 lext_start, | ||
| 979 | u8 lext_end) | ||
| 726 | { | 980 | { |
| 727 | pwrap_writel(wrp, 0x0, PWRAP_CSHEXT_WRITE); | 981 | /* |
| 728 | pwrap_writel(wrp, 0x4, PWRAP_CSHEXT_READ); | 982 | * After finishing a write and read transaction, extends CS high time |
| 729 | pwrap_writel(wrp, 0x2, PWRAP_CSLEXT_START); | 983 | * to be at least xT of BUS CLK as hext_write and hext_read specifies |
| 730 | pwrap_writel(wrp, 0x2, PWRAP_CSLEXT_END); | 984 | * respectively. |
| 985 | */ | ||
| 986 | pwrap_writel(wrp, hext_write, PWRAP_CSHEXT_WRITE); | ||
| 987 | pwrap_writel(wrp, hext_read, PWRAP_CSHEXT_READ); | ||
| 988 | |||
| 989 | /* | ||
| 990 | * Extends CS low time after CSL and before CSH command to be at | ||
| 991 | * least xT of BUS CLK as lext_start and lext_end specifies | ||
| 992 | * respectively. | ||
| 993 | */ | ||
| 994 | pwrap_writel(wrp, lext_start, PWRAP_CSLEXT_START); | ||
| 995 | pwrap_writel(wrp, lext_end, PWRAP_CSLEXT_END); | ||
| 996 | } | ||
| 997 | |||
| 998 | static int pwrap_common_init_reg_clock(struct pmic_wrapper *wrp) | ||
| 999 | { | ||
| 1000 | switch (wrp->master->type) { | ||
| 1001 | case PWRAP_MT8173: | ||
| 1002 | pwrap_init_chip_select_ext(wrp, 0, 4, 2, 2); | ||
| 1003 | break; | ||
| 1004 | case PWRAP_MT8135: | ||
| 1005 | pwrap_writel(wrp, 0x4, PWRAP_CSHEXT); | ||
| 1006 | pwrap_init_chip_select_ext(wrp, 0, 4, 0, 0); | ||
| 1007 | break; | ||
| 1008 | default: | ||
| 1009 | break; | ||
| 1010 | } | ||
| 731 | 1011 | ||
| 732 | return 0; | 1012 | return 0; |
| 733 | } | 1013 | } |
| @@ -737,20 +1017,16 @@ static int pwrap_mt2701_init_reg_clock(struct pmic_wrapper *wrp) | |||
| 737 | switch (wrp->slave->type) { | 1017 | switch (wrp->slave->type) { |
| 738 | case PMIC_MT6397: | 1018 | case PMIC_MT6397: |
| 739 | pwrap_writel(wrp, 0xc, PWRAP_RDDMY); | 1019 | pwrap_writel(wrp, 0xc, PWRAP_RDDMY); |
| 740 | pwrap_writel(wrp, 0x4, PWRAP_CSHEXT_WRITE); | 1020 | pwrap_init_chip_select_ext(wrp, 4, 0, 2, 2); |
| 741 | pwrap_writel(wrp, 0x0, PWRAP_CSHEXT_READ); | ||
| 742 | pwrap_writel(wrp, 0x2, PWRAP_CSLEXT_START); | ||
| 743 | pwrap_writel(wrp, 0x2, PWRAP_CSLEXT_END); | ||
| 744 | break; | 1021 | break; |
| 745 | 1022 | ||
| 746 | case PMIC_MT6323: | 1023 | case PMIC_MT6323: |
| 747 | pwrap_writel(wrp, 0x8, PWRAP_RDDMY); | 1024 | pwrap_writel(wrp, 0x8, PWRAP_RDDMY); |
| 748 | pwrap_write(wrp, wrp->slave->dew_regs[PWRAP_DEW_RDDMY_NO], | 1025 | pwrap_write(wrp, wrp->slave->dew_regs[PWRAP_DEW_RDDMY_NO], |
| 749 | 0x8); | 1026 | 0x8); |
| 750 | pwrap_writel(wrp, 0x5, PWRAP_CSHEXT_WRITE); | 1027 | pwrap_init_chip_select_ext(wrp, 5, 0, 2, 2); |
| 751 | pwrap_writel(wrp, 0x0, PWRAP_CSHEXT_READ); | 1028 | break; |
| 752 | pwrap_writel(wrp, 0x2, PWRAP_CSLEXT_START); | 1029 | default: |
| 753 | pwrap_writel(wrp, 0x2, PWRAP_CSLEXT_END); | ||
| 754 | break; | 1030 | break; |
| 755 | } | 1031 | } |
| 756 | 1032 | ||
| @@ -794,6 +1070,9 @@ static int pwrap_init_cipher(struct pmic_wrapper *wrp) | |||
| 794 | case PWRAP_MT8173: | 1070 | case PWRAP_MT8173: |
| 795 | pwrap_writel(wrp, 1, PWRAP_CIPHER_EN); | 1071 | pwrap_writel(wrp, 1, PWRAP_CIPHER_EN); |
| 796 | break; | 1072 | break; |
| 1073 | case PWRAP_MT7622: | ||
| 1074 | pwrap_writel(wrp, 0, PWRAP_CIPHER_EN); | ||
| 1075 | break; | ||
| 797 | } | 1076 | } |
| 798 | 1077 | ||
| 799 | /* Config cipher mode @PMIC */ | 1078 | /* Config cipher mode @PMIC */ |
| @@ -815,6 +1094,8 @@ static int pwrap_init_cipher(struct pmic_wrapper *wrp) | |||
| 815 | pwrap_write(wrp, wrp->slave->dew_regs[PWRAP_DEW_CIPHER_EN], | 1094 | pwrap_write(wrp, wrp->slave->dew_regs[PWRAP_DEW_CIPHER_EN], |
| 816 | 0x1); | 1095 | 0x1); |
| 817 | break; | 1096 | break; |
| 1097 | default: | ||
| 1098 | break; | ||
| 818 | } | 1099 | } |
| 819 | 1100 | ||
| 820 | /* wait for cipher data ready@AP */ | 1101 | /* wait for cipher data ready@AP */ |
| @@ -827,7 +1108,8 @@ static int pwrap_init_cipher(struct pmic_wrapper *wrp) | |||
| 827 | /* wait for cipher data ready@PMIC */ | 1108 | /* wait for cipher data ready@PMIC */ |
| 828 | ret = pwrap_wait_for_state(wrp, pwrap_is_pmic_cipher_ready); | 1109 | ret = pwrap_wait_for_state(wrp, pwrap_is_pmic_cipher_ready); |
| 829 | if (ret) { | 1110 | if (ret) { |
| 830 | dev_err(wrp->dev, "timeout waiting for cipher data ready@PMIC\n"); | 1111 | dev_err(wrp->dev, |
| 1112 | "timeout waiting for cipher data ready@PMIC\n"); | ||
| 831 | return ret; | 1113 | return ret; |
| 832 | } | 1114 | } |
| 833 | 1115 | ||
| @@ -854,6 +1136,30 @@ static int pwrap_init_cipher(struct pmic_wrapper *wrp) | |||
| 854 | return 0; | 1136 | return 0; |
| 855 | } | 1137 | } |
| 856 | 1138 | ||
| 1139 | static int pwrap_init_security(struct pmic_wrapper *wrp) | ||
| 1140 | { | ||
| 1141 | int ret; | ||
| 1142 | |||
| 1143 | /* Enable encryption */ | ||
| 1144 | ret = pwrap_init_cipher(wrp); | ||
| 1145 | if (ret) | ||
| 1146 | return ret; | ||
| 1147 | |||
| 1148 | /* Signature checking - using CRC */ | ||
| 1149 | if (pwrap_write(wrp, | ||
| 1150 | wrp->slave->dew_regs[PWRAP_DEW_CRC_EN], 0x1)) | ||
| 1151 | return -EFAULT; | ||
| 1152 | |||
| 1153 | pwrap_writel(wrp, 0x1, PWRAP_CRC_EN); | ||
| 1154 | pwrap_writel(wrp, 0x0, PWRAP_SIG_MODE); | ||
| 1155 | pwrap_writel(wrp, wrp->slave->dew_regs[PWRAP_DEW_CRC_VAL], | ||
| 1156 | PWRAP_SIG_ADR); | ||
| 1157 | pwrap_writel(wrp, | ||
| 1158 | wrp->master->arb_en_all, PWRAP_HIPRIO_ARB_EN); | ||
| 1159 | |||
| 1160 | return 0; | ||
| 1161 | } | ||
| 1162 | |||
| 857 | static int pwrap_mt8135_init_soc_specific(struct pmic_wrapper *wrp) | 1163 | static int pwrap_mt8135_init_soc_specific(struct pmic_wrapper *wrp) |
| 858 | { | 1164 | { |
| 859 | /* enable pwrap events and pwrap bridge in AP side */ | 1165 | /* enable pwrap events and pwrap bridge in AP side */ |
| @@ -911,10 +1217,18 @@ static int pwrap_mt2701_init_soc_specific(struct pmic_wrapper *wrp) | |||
| 911 | return 0; | 1217 | return 0; |
| 912 | } | 1218 | } |
| 913 | 1219 | ||
| 1220 | static int pwrap_mt7622_init_soc_specific(struct pmic_wrapper *wrp) | ||
| 1221 | { | ||
| 1222 | pwrap_writel(wrp, 0, PWRAP_STAUPD_PRD); | ||
| 1223 | /* enable 2wire SPI master */ | ||
| 1224 | pwrap_writel(wrp, 0x8000000, PWRAP_SPI2_CTRL); | ||
| 1225 | |||
| 1226 | return 0; | ||
| 1227 | } | ||
| 1228 | |||
| 914 | static int pwrap_init(struct pmic_wrapper *wrp) | 1229 | static int pwrap_init(struct pmic_wrapper *wrp) |
| 915 | { | 1230 | { |
| 916 | int ret; | 1231 | int ret; |
| 917 | u32 rdata; | ||
| 918 | 1232 | ||
| 919 | reset_control_reset(wrp->rstc); | 1233 | reset_control_reset(wrp->rstc); |
| 920 | if (wrp->rstc_bridge) | 1234 | if (wrp->rstc_bridge) |
| @@ -926,10 +1240,12 @@ static int pwrap_init(struct pmic_wrapper *wrp) | |||
| 926 | pwrap_writel(wrp, 0, PWRAP_DCM_DBC_PRD); | 1240 | pwrap_writel(wrp, 0, PWRAP_DCM_DBC_PRD); |
| 927 | } | 1241 | } |
| 928 | 1242 | ||
| 929 | /* Reset SPI slave */ | 1243 | if (HAS_CAP(wrp->slave->caps, PWRAP_SLV_CAP_SPI)) { |
| 930 | ret = pwrap_reset_spislave(wrp); | 1244 | /* Reset SPI slave */ |
| 931 | if (ret) | 1245 | ret = pwrap_reset_spislave(wrp); |
| 932 | return ret; | 1246 | if (ret) |
| 1247 | return ret; | ||
| 1248 | } | ||
| 933 | 1249 | ||
| 934 | pwrap_writel(wrp, 1, PWRAP_WRAP_EN); | 1250 | pwrap_writel(wrp, 1, PWRAP_WRAP_EN); |
| 935 | 1251 | ||
| @@ -941,45 +1257,26 @@ static int pwrap_init(struct pmic_wrapper *wrp) | |||
| 941 | if (ret) | 1257 | if (ret) |
| 942 | return ret; | 1258 | return ret; |
| 943 | 1259 | ||
| 944 | /* Setup serial input delay */ | 1260 | if (HAS_CAP(wrp->slave->caps, PWRAP_SLV_CAP_SPI)) { |
| 945 | ret = pwrap_init_sidly(wrp); | 1261 | /* Setup serial input delay */ |
| 946 | if (ret) | 1262 | ret = pwrap_init_sidly(wrp); |
| 947 | return ret; | 1263 | if (ret) |
| 948 | 1264 | return ret; | |
| 949 | /* Enable dual IO mode */ | ||
| 950 | pwrap_write(wrp, wrp->slave->dew_regs[PWRAP_DEW_DIO_EN], 1); | ||
| 951 | |||
| 952 | /* Check IDLE & INIT_DONE in advance */ | ||
| 953 | ret = pwrap_wait_for_state(wrp, pwrap_is_fsm_idle_and_sync_idle); | ||
| 954 | if (ret) { | ||
| 955 | dev_err(wrp->dev, "%s fail, ret=%d\n", __func__, ret); | ||
| 956 | return ret; | ||
| 957 | } | 1265 | } |
| 958 | 1266 | ||
| 959 | pwrap_writel(wrp, 1, PWRAP_DIO_EN); | 1267 | if (HAS_CAP(wrp->slave->caps, PWRAP_SLV_CAP_DUALIO)) { |
| 960 | 1268 | /* Enable dual I/O mode */ | |
| 961 | /* Read Test */ | 1269 | ret = pwrap_init_dual_io(wrp); |
| 962 | pwrap_read(wrp, wrp->slave->dew_regs[PWRAP_DEW_READ_TEST], &rdata); | 1270 | if (ret) |
| 963 | if (rdata != PWRAP_DEW_READ_TEST_VAL) { | 1271 | return ret; |
| 964 | dev_err(wrp->dev, "Read test failed after switch to DIO mode: 0x%04x != 0x%04x\n", | ||
| 965 | PWRAP_DEW_READ_TEST_VAL, rdata); | ||
| 966 | return -EFAULT; | ||
| 967 | } | 1272 | } |
| 968 | 1273 | ||
| 969 | /* Enable encryption */ | 1274 | if (HAS_CAP(wrp->slave->caps, PWRAP_SLV_CAP_SECURITY)) { |
| 970 | ret = pwrap_init_cipher(wrp); | 1275 | /* Enable security on bus */ |
| 971 | if (ret) | 1276 | ret = pwrap_init_security(wrp); |
| 972 | return ret; | 1277 | if (ret) |
| 973 | 1278 | return ret; | |
| 974 | /* Signature checking - using CRC */ | 1279 | } |
| 975 | if (pwrap_write(wrp, wrp->slave->dew_regs[PWRAP_DEW_CRC_EN], 0x1)) | ||
| 976 | return -EFAULT; | ||
| 977 | |||
| 978 | pwrap_writel(wrp, 0x1, PWRAP_CRC_EN); | ||
| 979 | pwrap_writel(wrp, 0x0, PWRAP_SIG_MODE); | ||
| 980 | pwrap_writel(wrp, wrp->slave->dew_regs[PWRAP_DEW_CRC_VAL], | ||
| 981 | PWRAP_SIG_ADR); | ||
| 982 | pwrap_writel(wrp, wrp->master->arb_en_all, PWRAP_HIPRIO_ARB_EN); | ||
| 983 | 1280 | ||
| 984 | if (wrp->master->type == PWRAP_MT8135) | 1281 | if (wrp->master->type == PWRAP_MT8135) |
| 985 | pwrap_writel(wrp, 0x7, PWRAP_RRARB_EN); | 1282 | pwrap_writel(wrp, 0x7, PWRAP_RRARB_EN); |
| @@ -1023,7 +1320,7 @@ static irqreturn_t pwrap_interrupt(int irqno, void *dev_id) | |||
| 1023 | return IRQ_HANDLED; | 1320 | return IRQ_HANDLED; |
| 1024 | } | 1321 | } |
| 1025 | 1322 | ||
| 1026 | static const struct regmap_config pwrap_regmap_config = { | 1323 | static const struct regmap_config pwrap_regmap_config16 = { |
| 1027 | .reg_bits = 16, | 1324 | .reg_bits = 16, |
| 1028 | .val_bits = 16, | 1325 | .val_bits = 16, |
| 1029 | .reg_stride = 2, | 1326 | .reg_stride = 2, |
| @@ -1032,14 +1329,42 @@ static const struct regmap_config pwrap_regmap_config = { | |||
| 1032 | .max_register = 0xffff, | 1329 | .max_register = 0xffff, |
| 1033 | }; | 1330 | }; |
| 1034 | 1331 | ||
| 1332 | static const struct regmap_config pwrap_regmap_config32 = { | ||
| 1333 | .reg_bits = 32, | ||
| 1334 | .val_bits = 32, | ||
| 1335 | .reg_stride = 4, | ||
| 1336 | .reg_read = pwrap_regmap_read, | ||
| 1337 | .reg_write = pwrap_regmap_write, | ||
| 1338 | .max_register = 0xffff, | ||
| 1339 | }; | ||
| 1340 | |||
| 1035 | static const struct pwrap_slv_type pmic_mt6323 = { | 1341 | static const struct pwrap_slv_type pmic_mt6323 = { |
| 1036 | .dew_regs = mt6323_regs, | 1342 | .dew_regs = mt6323_regs, |
| 1037 | .type = PMIC_MT6323, | 1343 | .type = PMIC_MT6323, |
| 1344 | .regmap = &pwrap_regmap_config16, | ||
| 1345 | .caps = PWRAP_SLV_CAP_SPI | PWRAP_SLV_CAP_DUALIO | | ||
| 1346 | PWRAP_SLV_CAP_SECURITY, | ||
| 1347 | .pwrap_read = pwrap_read16, | ||
| 1348 | .pwrap_write = pwrap_write16, | ||
| 1349 | }; | ||
| 1350 | |||
| 1351 | static const struct pwrap_slv_type pmic_mt6380 = { | ||
| 1352 | .dew_regs = NULL, | ||
| 1353 | .type = PMIC_MT6380, | ||
| 1354 | .regmap = &pwrap_regmap_config32, | ||
| 1355 | .caps = 0, | ||
| 1356 | .pwrap_read = pwrap_read32, | ||
| 1357 | .pwrap_write = pwrap_write32, | ||
| 1038 | }; | 1358 | }; |
| 1039 | 1359 | ||
| 1040 | static const struct pwrap_slv_type pmic_mt6397 = { | 1360 | static const struct pwrap_slv_type pmic_mt6397 = { |
| 1041 | .dew_regs = mt6397_regs, | 1361 | .dew_regs = mt6397_regs, |
| 1042 | .type = PMIC_MT6397, | 1362 | .type = PMIC_MT6397, |
| 1363 | .regmap = &pwrap_regmap_config16, | ||
| 1364 | .caps = PWRAP_SLV_CAP_SPI | PWRAP_SLV_CAP_DUALIO | | ||
| 1365 | PWRAP_SLV_CAP_SECURITY, | ||
| 1366 | .pwrap_read = pwrap_read16, | ||
| 1367 | .pwrap_write = pwrap_write16, | ||
| 1043 | }; | 1368 | }; |
| 1044 | 1369 | ||
| 1045 | static const struct of_device_id of_slave_match_tbl[] = { | 1370 | static const struct of_device_id of_slave_match_tbl[] = { |
| @@ -1047,6 +1372,12 @@ static const struct of_device_id of_slave_match_tbl[] = { | |||
| 1047 | .compatible = "mediatek,mt6323", | 1372 | .compatible = "mediatek,mt6323", |
| 1048 | .data = &pmic_mt6323, | 1373 | .data = &pmic_mt6323, |
| 1049 | }, { | 1374 | }, { |
| 1375 | /* The MT6380 PMIC only implements a regulator, so we bind it | ||
| 1376 | * directly instead of using a MFD. | ||
| 1377 | */ | ||
| 1378 | .compatible = "mediatek,mt6380-regulator", | ||
| 1379 | .data = &pmic_mt6380, | ||
| 1380 | }, { | ||
| 1050 | .compatible = "mediatek,mt6397", | 1381 | .compatible = "mediatek,mt6397", |
| 1051 | .data = &pmic_mt6397, | 1382 | .data = &pmic_mt6397, |
| 1052 | }, { | 1383 | }, { |
| @@ -1067,6 +1398,18 @@ static const struct pmic_wrapper_type pwrap_mt2701 = { | |||
| 1067 | .init_soc_specific = pwrap_mt2701_init_soc_specific, | 1398 | .init_soc_specific = pwrap_mt2701_init_soc_specific, |
| 1068 | }; | 1399 | }; |
| 1069 | 1400 | ||
| 1401 | static const struct pmic_wrapper_type pwrap_mt7622 = { | ||
| 1402 | .regs = mt7622_regs, | ||
| 1403 | .type = PWRAP_MT7622, | ||
| 1404 | .arb_en_all = 0xff, | ||
| 1405 | .int_en_all = ~(u32)BIT(31), | ||
| 1406 | .spi_w = PWRAP_MAN_CMD_SPI_WRITE, | ||
| 1407 | .wdt_src = PWRAP_WDT_SRC_MASK_ALL, | ||
| 1408 | .has_bridge = 0, | ||
| 1409 | .init_reg_clock = pwrap_common_init_reg_clock, | ||
| 1410 | .init_soc_specific = pwrap_mt7622_init_soc_specific, | ||
| 1411 | }; | ||
| 1412 | |||
| 1070 | static const struct pmic_wrapper_type pwrap_mt8135 = { | 1413 | static const struct pmic_wrapper_type pwrap_mt8135 = { |
| 1071 | .regs = mt8135_regs, | 1414 | .regs = mt8135_regs, |
| 1072 | .type = PWRAP_MT8135, | 1415 | .type = PWRAP_MT8135, |
| @@ -1075,7 +1418,7 @@ static const struct pmic_wrapper_type pwrap_mt8135 = { | |||
| 1075 | .spi_w = PWRAP_MAN_CMD_SPI_WRITE, | 1418 | .spi_w = PWRAP_MAN_CMD_SPI_WRITE, |
| 1076 | .wdt_src = PWRAP_WDT_SRC_MASK_ALL, | 1419 | .wdt_src = PWRAP_WDT_SRC_MASK_ALL, |
| 1077 | .has_bridge = 1, | 1420 | .has_bridge = 1, |
| 1078 | .init_reg_clock = pwrap_mt8135_init_reg_clock, | 1421 | .init_reg_clock = pwrap_common_init_reg_clock, |
| 1079 | .init_soc_specific = pwrap_mt8135_init_soc_specific, | 1422 | .init_soc_specific = pwrap_mt8135_init_soc_specific, |
| 1080 | }; | 1423 | }; |
| 1081 | 1424 | ||
| @@ -1087,7 +1430,7 @@ static const struct pmic_wrapper_type pwrap_mt8173 = { | |||
| 1087 | .spi_w = PWRAP_MAN_CMD_SPI_WRITE, | 1430 | .spi_w = PWRAP_MAN_CMD_SPI_WRITE, |
| 1088 | .wdt_src = PWRAP_WDT_SRC_MASK_NO_STAUPD, | 1431 | .wdt_src = PWRAP_WDT_SRC_MASK_NO_STAUPD, |
| 1089 | .has_bridge = 0, | 1432 | .has_bridge = 0, |
| 1090 | .init_reg_clock = pwrap_mt8173_init_reg_clock, | 1433 | .init_reg_clock = pwrap_common_init_reg_clock, |
| 1091 | .init_soc_specific = pwrap_mt8173_init_soc_specific, | 1434 | .init_soc_specific = pwrap_mt8173_init_soc_specific, |
| 1092 | }; | 1435 | }; |
| 1093 | 1436 | ||
| @@ -1096,6 +1439,9 @@ static const struct of_device_id of_pwrap_match_tbl[] = { | |||
| 1096 | .compatible = "mediatek,mt2701-pwrap", | 1439 | .compatible = "mediatek,mt2701-pwrap", |
| 1097 | .data = &pwrap_mt2701, | 1440 | .data = &pwrap_mt2701, |
| 1098 | }, { | 1441 | }, { |
| 1442 | .compatible = "mediatek,mt7622-pwrap", | ||
| 1443 | .data = &pwrap_mt7622, | ||
| 1444 | }, { | ||
| 1099 | .compatible = "mediatek,mt8135-pwrap", | 1445 | .compatible = "mediatek,mt8135-pwrap", |
| 1100 | .data = &pwrap_mt8135, | 1446 | .data = &pwrap_mt8135, |
| 1101 | }, { | 1447 | }, { |
| @@ -1159,23 +1505,27 @@ static int pwrap_probe(struct platform_device *pdev) | |||
| 1159 | if (IS_ERR(wrp->bridge_base)) | 1505 | if (IS_ERR(wrp->bridge_base)) |
| 1160 | return PTR_ERR(wrp->bridge_base); | 1506 | return PTR_ERR(wrp->bridge_base); |
| 1161 | 1507 | ||
| 1162 | wrp->rstc_bridge = devm_reset_control_get(wrp->dev, "pwrap-bridge"); | 1508 | wrp->rstc_bridge = devm_reset_control_get(wrp->dev, |
| 1509 | "pwrap-bridge"); | ||
| 1163 | if (IS_ERR(wrp->rstc_bridge)) { | 1510 | if (IS_ERR(wrp->rstc_bridge)) { |
| 1164 | ret = PTR_ERR(wrp->rstc_bridge); | 1511 | ret = PTR_ERR(wrp->rstc_bridge); |
| 1165 | dev_dbg(wrp->dev, "cannot get pwrap-bridge reset: %d\n", ret); | 1512 | dev_dbg(wrp->dev, |
| 1513 | "cannot get pwrap-bridge reset: %d\n", ret); | ||
| 1166 | return ret; | 1514 | return ret; |
| 1167 | } | 1515 | } |
| 1168 | } | 1516 | } |
| 1169 | 1517 | ||
| 1170 | wrp->clk_spi = devm_clk_get(wrp->dev, "spi"); | 1518 | wrp->clk_spi = devm_clk_get(wrp->dev, "spi"); |
| 1171 | if (IS_ERR(wrp->clk_spi)) { | 1519 | if (IS_ERR(wrp->clk_spi)) { |
| 1172 | dev_dbg(wrp->dev, "failed to get clock: %ld\n", PTR_ERR(wrp->clk_spi)); | 1520 | dev_dbg(wrp->dev, "failed to get clock: %ld\n", |
| 1521 | PTR_ERR(wrp->clk_spi)); | ||
| 1173 | return PTR_ERR(wrp->clk_spi); | 1522 | return PTR_ERR(wrp->clk_spi); |
| 1174 | } | 1523 | } |
| 1175 | 1524 | ||
| 1176 | wrp->clk_wrap = devm_clk_get(wrp->dev, "wrap"); | 1525 | wrp->clk_wrap = devm_clk_get(wrp->dev, "wrap"); |
| 1177 | if (IS_ERR(wrp->clk_wrap)) { | 1526 | if (IS_ERR(wrp->clk_wrap)) { |
| 1178 | dev_dbg(wrp->dev, "failed to get clock: %ld\n", PTR_ERR(wrp->clk_wrap)); | 1527 | dev_dbg(wrp->dev, "failed to get clock: %ld\n", |
| 1528 | PTR_ERR(wrp->clk_wrap)); | ||
| 1179 | return PTR_ERR(wrp->clk_wrap); | 1529 | return PTR_ERR(wrp->clk_wrap); |
| 1180 | } | 1530 | } |
| 1181 | 1531 | ||
| @@ -1220,12 +1570,13 @@ static int pwrap_probe(struct platform_device *pdev) | |||
| 1220 | pwrap_writel(wrp, wrp->master->int_en_all, PWRAP_INT_EN); | 1570 | pwrap_writel(wrp, wrp->master->int_en_all, PWRAP_INT_EN); |
| 1221 | 1571 | ||
| 1222 | irq = platform_get_irq(pdev, 0); | 1572 | irq = platform_get_irq(pdev, 0); |
| 1223 | ret = devm_request_irq(wrp->dev, irq, pwrap_interrupt, IRQF_TRIGGER_HIGH, | 1573 | ret = devm_request_irq(wrp->dev, irq, pwrap_interrupt, |
| 1224 | "mt-pmic-pwrap", wrp); | 1574 | IRQF_TRIGGER_HIGH, |
| 1575 | "mt-pmic-pwrap", wrp); | ||
| 1225 | if (ret) | 1576 | if (ret) |
| 1226 | goto err_out2; | 1577 | goto err_out2; |
| 1227 | 1578 | ||
| 1228 | wrp->regmap = devm_regmap_init(wrp->dev, NULL, wrp, &pwrap_regmap_config); | 1579 | wrp->regmap = devm_regmap_init(wrp->dev, NULL, wrp, wrp->slave->regmap); |
| 1229 | if (IS_ERR(wrp->regmap)) { | 1580 | if (IS_ERR(wrp->regmap)) { |
| 1230 | ret = PTR_ERR(wrp->regmap); | 1581 | ret = PTR_ERR(wrp->regmap); |
| 1231 | goto err_out2; | 1582 | goto err_out2; |
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig index b00bccddcd3b..b81374bb6713 100644 --- a/drivers/soc/qcom/Kconfig +++ b/drivers/soc/qcom/Kconfig | |||
| @@ -35,6 +35,17 @@ config QCOM_PM | |||
| 35 | modes. It interface with various system drivers to put the cores in | 35 | modes. It interface with various system drivers to put the cores in |
| 36 | low power modes. | 36 | low power modes. |
| 37 | 37 | ||
| 38 | config QCOM_RMTFS_MEM | ||
| 39 | tristate "Qualcomm Remote Filesystem memory driver" | ||
| 40 | depends on ARCH_QCOM | ||
| 41 | help | ||
| 42 | The Qualcomm remote filesystem memory driver is used for allocating | ||
| 43 | and exposing regions of shared memory with remote processors for the | ||
| 44 | purpose of exchanging sector-data between the remote filesystem | ||
| 45 | service and its clients. | ||
| 46 | |||
| 47 | Say y here if you intend to boot the modem remoteproc. | ||
| 48 | |||
| 38 | config QCOM_SMEM | 49 | config QCOM_SMEM |
| 39 | tristate "Qualcomm Shared Memory Manager (SMEM)" | 50 | tristate "Qualcomm Shared Memory Manager (SMEM)" |
| 40 | depends on ARCH_QCOM | 51 | depends on ARCH_QCOM |
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile index fab44666b214..40c56f67e94a 100644 --- a/drivers/soc/qcom/Makefile +++ b/drivers/soc/qcom/Makefile | |||
| @@ -3,6 +3,7 @@ obj-$(CONFIG_QCOM_GLINK_SSR) += glink_ssr.o | |||
| 3 | obj-$(CONFIG_QCOM_GSBI) += qcom_gsbi.o | 3 | obj-$(CONFIG_QCOM_GSBI) += qcom_gsbi.o |
| 4 | obj-$(CONFIG_QCOM_MDT_LOADER) += mdt_loader.o | 4 | obj-$(CONFIG_QCOM_MDT_LOADER) += mdt_loader.o |
| 5 | obj-$(CONFIG_QCOM_PM) += spm.o | 5 | obj-$(CONFIG_QCOM_PM) += spm.o |
| 6 | obj-$(CONFIG_QCOM_RMTFS_MEM) += rmtfs_mem.o | ||
| 6 | obj-$(CONFIG_QCOM_SMD_RPM) += smd-rpm.o | 7 | obj-$(CONFIG_QCOM_SMD_RPM) += smd-rpm.o |
| 7 | obj-$(CONFIG_QCOM_SMEM) += smem.o | 8 | obj-$(CONFIG_QCOM_SMEM) += smem.o |
| 8 | obj-$(CONFIG_QCOM_SMEM_STATE) += smem_state.o | 9 | obj-$(CONFIG_QCOM_SMEM_STATE) += smem_state.o |
diff --git a/drivers/soc/qcom/rmtfs_mem.c b/drivers/soc/qcom/rmtfs_mem.c new file mode 100644 index 000000000000..ce35ff748adf --- /dev/null +++ b/drivers/soc/qcom/rmtfs_mem.c | |||
| @@ -0,0 +1,269 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (c) 2017 Linaro Ltd. | ||
| 3 | * | ||
| 4 | * This program is free software; you can redistribute it and/or modify | ||
| 5 | * it under the terms of the GNU General Public License version 2 and | ||
| 6 | * only version 2 as published by the Free Software Foundation. | ||
| 7 | * | ||
| 8 | * This program is distributed in the hope that it will be useful, | ||
| 9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 11 | * GNU General Public License for more details. | ||
| 12 | */ | ||
| 13 | |||
| 14 | #include <linux/kernel.h> | ||
| 15 | #include <linux/cdev.h> | ||
| 16 | #include <linux/err.h> | ||
| 17 | #include <linux/module.h> | ||
| 18 | #include <linux/platform_device.h> | ||
| 19 | #include <linux/of.h> | ||
| 20 | #include <linux/of_reserved_mem.h> | ||
| 21 | #include <linux/dma-mapping.h> | ||
| 22 | #include <linux/slab.h> | ||
| 23 | #include <linux/uaccess.h> | ||
| 24 | #include <linux/io.h> | ||
| 25 | #include <linux/qcom_scm.h> | ||
| 26 | |||
| 27 | #define QCOM_RMTFS_MEM_DEV_MAX (MINORMASK + 1) | ||
| 28 | |||
| 29 | static dev_t qcom_rmtfs_mem_major; | ||
| 30 | |||
| 31 | struct qcom_rmtfs_mem { | ||
| 32 | struct device dev; | ||
| 33 | struct cdev cdev; | ||
| 34 | |||
| 35 | void *base; | ||
| 36 | phys_addr_t addr; | ||
| 37 | phys_addr_t size; | ||
| 38 | |||
| 39 | unsigned int client_id; | ||
| 40 | }; | ||
| 41 | |||
| 42 | static ssize_t qcom_rmtfs_mem_show(struct device *dev, | ||
| 43 | struct device_attribute *attr, | ||
| 44 | char *buf); | ||
| 45 | |||
| 46 | static DEVICE_ATTR(phys_addr, 0400, qcom_rmtfs_mem_show, NULL); | ||
| 47 | static DEVICE_ATTR(size, 0400, qcom_rmtfs_mem_show, NULL); | ||
| 48 | static DEVICE_ATTR(client_id, 0400, qcom_rmtfs_mem_show, NULL); | ||
| 49 | |||
| 50 | static ssize_t qcom_rmtfs_mem_show(struct device *dev, | ||
| 51 | struct device_attribute *attr, | ||
| 52 | char *buf) | ||
| 53 | { | ||
| 54 | struct qcom_rmtfs_mem *rmtfs_mem = container_of(dev, | ||
| 55 | struct qcom_rmtfs_mem, | ||
| 56 | dev); | ||
| 57 | |||
| 58 | if (attr == &dev_attr_phys_addr) | ||
| 59 | return sprintf(buf, "%pa\n", &rmtfs_mem->addr); | ||
| 60 | if (attr == &dev_attr_size) | ||
| 61 | return sprintf(buf, "%pa\n", &rmtfs_mem->size); | ||
| 62 | if (attr == &dev_attr_client_id) | ||
| 63 | return sprintf(buf, "%d\n", rmtfs_mem->client_id); | ||
| 64 | |||
| 65 | return -EINVAL; | ||
| 66 | } | ||
| 67 | |||
| 68 | static struct attribute *qcom_rmtfs_mem_attrs[] = { | ||
| 69 | &dev_attr_phys_addr.attr, | ||
| 70 | &dev_attr_size.attr, | ||
| 71 | &dev_attr_client_id.attr, | ||
| 72 | NULL | ||
| 73 | }; | ||
| 74 | ATTRIBUTE_GROUPS(qcom_rmtfs_mem); | ||
| 75 | |||
| 76 | static int qcom_rmtfs_mem_open(struct inode *inode, struct file *filp) | ||
| 77 | { | ||
| 78 | struct qcom_rmtfs_mem *rmtfs_mem = container_of(inode->i_cdev, | ||
| 79 | struct qcom_rmtfs_mem, | ||
| 80 | cdev); | ||
| 81 | |||
| 82 | get_device(&rmtfs_mem->dev); | ||
| 83 | filp->private_data = rmtfs_mem; | ||
| 84 | |||
| 85 | return 0; | ||
| 86 | } | ||
| 87 | static ssize_t qcom_rmtfs_mem_read(struct file *filp, | ||
| 88 | char __user *buf, size_t count, loff_t *f_pos) | ||
| 89 | { | ||
| 90 | struct qcom_rmtfs_mem *rmtfs_mem = filp->private_data; | ||
| 91 | |||
| 92 | if (*f_pos >= rmtfs_mem->size) | ||
| 93 | return 0; | ||
| 94 | |||
| 95 | if (*f_pos + count >= rmtfs_mem->size) | ||
| 96 | count = rmtfs_mem->size - *f_pos; | ||
| 97 | |||
| 98 | if (copy_to_user(buf, rmtfs_mem->base + *f_pos, count)) | ||
| 99 | return -EFAULT; | ||
| 100 | |||
| 101 | *f_pos += count; | ||
| 102 | return count; | ||
| 103 | } | ||
| 104 | |||
| 105 | static ssize_t qcom_rmtfs_mem_write(struct file *filp, | ||
| 106 | const char __user *buf, size_t count, | ||
| 107 | loff_t *f_pos) | ||
| 108 | { | ||
| 109 | struct qcom_rmtfs_mem *rmtfs_mem = filp->private_data; | ||
| 110 | |||
| 111 | if (*f_pos >= rmtfs_mem->size) | ||
| 112 | return 0; | ||
| 113 | |||
| 114 | if (*f_pos + count >= rmtfs_mem->size) | ||
| 115 | count = rmtfs_mem->size - *f_pos; | ||
| 116 | |||
| 117 | if (copy_from_user(rmtfs_mem->base + *f_pos, buf, count)) | ||
| 118 | return -EFAULT; | ||
| 119 | |||
| 120 | *f_pos += count; | ||
| 121 | return count; | ||
| 122 | } | ||
| 123 | |||
| 124 | static int qcom_rmtfs_mem_release(struct inode *inode, struct file *filp) | ||
| 125 | { | ||
| 126 | struct qcom_rmtfs_mem *rmtfs_mem = filp->private_data; | ||
| 127 | |||
| 128 | put_device(&rmtfs_mem->dev); | ||
| 129 | |||
| 130 | return 0; | ||
| 131 | } | ||
| 132 | |||
| 133 | static const struct file_operations qcom_rmtfs_mem_fops = { | ||
| 134 | .owner = THIS_MODULE, | ||
| 135 | .open = qcom_rmtfs_mem_open, | ||
| 136 | .read = qcom_rmtfs_mem_read, | ||
| 137 | .write = qcom_rmtfs_mem_write, | ||
| 138 | .release = qcom_rmtfs_mem_release, | ||
| 139 | .llseek = default_llseek, | ||
| 140 | }; | ||
| 141 | |||
| 142 | static void qcom_rmtfs_mem_release_device(struct device *dev) | ||
| 143 | { | ||
| 144 | struct qcom_rmtfs_mem *rmtfs_mem = container_of(dev, | ||
| 145 | struct qcom_rmtfs_mem, | ||
| 146 | dev); | ||
| 147 | |||
| 148 | kfree(rmtfs_mem); | ||
| 149 | } | ||
| 150 | |||
| 151 | static int qcom_rmtfs_mem_probe(struct platform_device *pdev) | ||
| 152 | { | ||
| 153 | struct device_node *node = pdev->dev.of_node; | ||
| 154 | struct reserved_mem *rmem; | ||
| 155 | struct qcom_rmtfs_mem *rmtfs_mem; | ||
| 156 | u32 client_id; | ||
| 157 | int ret; | ||
| 158 | |||
| 159 | rmem = of_reserved_mem_lookup(node); | ||
| 160 | if (!rmem) { | ||
| 161 | dev_err(&pdev->dev, "failed to acquire memory region\n"); | ||
| 162 | return -EINVAL; | ||
| 163 | } | ||
| 164 | |||
| 165 | ret = of_property_read_u32(node, "qcom,client-id", &client_id); | ||
| 166 | if (ret) { | ||
| 167 | dev_err(&pdev->dev, "failed to parse \"qcom,client-id\"\n"); | ||
| 168 | return ret; | ||
| 169 | |||
| 170 | } | ||
| 171 | |||
| 172 | rmtfs_mem = kzalloc(sizeof(*rmtfs_mem), GFP_KERNEL); | ||
| 173 | if (!rmtfs_mem) | ||
| 174 | return -ENOMEM; | ||
| 175 | |||
| 176 | rmtfs_mem->addr = rmem->base; | ||
| 177 | rmtfs_mem->client_id = client_id; | ||
| 178 | rmtfs_mem->size = rmem->size; | ||
| 179 | |||
| 180 | device_initialize(&rmtfs_mem->dev); | ||
| 181 | rmtfs_mem->dev.parent = &pdev->dev; | ||
| 182 | rmtfs_mem->dev.groups = qcom_rmtfs_mem_groups; | ||
| 183 | |||
| 184 | rmtfs_mem->base = devm_memremap(&rmtfs_mem->dev, rmtfs_mem->addr, | ||
| 185 | rmtfs_mem->size, MEMREMAP_WC); | ||
| 186 | if (IS_ERR(rmtfs_mem->base)) { | ||
| 187 | dev_err(&pdev->dev, "failed to remap rmtfs_mem region\n"); | ||
| 188 | ret = PTR_ERR(rmtfs_mem->base); | ||
| 189 | goto put_device; | ||
| 190 | } | ||
| 191 | |||
| 192 | cdev_init(&rmtfs_mem->cdev, &qcom_rmtfs_mem_fops); | ||
| 193 | rmtfs_mem->cdev.owner = THIS_MODULE; | ||
| 194 | |||
| 195 | dev_set_name(&rmtfs_mem->dev, "qcom_rmtfs_mem%d", client_id); | ||
| 196 | rmtfs_mem->dev.id = client_id; | ||
| 197 | rmtfs_mem->dev.devt = MKDEV(MAJOR(qcom_rmtfs_mem_major), client_id); | ||
| 198 | |||
| 199 | ret = cdev_device_add(&rmtfs_mem->cdev, &rmtfs_mem->dev); | ||
| 200 | if (ret) { | ||
| 201 | dev_err(&pdev->dev, "failed to add cdev: %d\n", ret); | ||
| 202 | goto put_device; | ||
| 203 | } | ||
| 204 | |||
| 205 | rmtfs_mem->dev.release = qcom_rmtfs_mem_release_device; | ||
| 206 | |||
| 207 | dev_set_drvdata(&pdev->dev, rmtfs_mem); | ||
| 208 | |||
| 209 | return 0; | ||
| 210 | |||
| 211 | put_device: | ||
| 212 | put_device(&rmtfs_mem->dev); | ||
| 213 | |||
| 214 | return ret; | ||
| 215 | } | ||
| 216 | |||
| 217 | static int qcom_rmtfs_mem_remove(struct platform_device *pdev) | ||
| 218 | { | ||
| 219 | struct qcom_rmtfs_mem *rmtfs_mem = dev_get_drvdata(&pdev->dev); | ||
| 220 | |||
| 221 | cdev_device_del(&rmtfs_mem->cdev, &rmtfs_mem->dev); | ||
| 222 | put_device(&rmtfs_mem->dev); | ||
| 223 | |||
| 224 | return 0; | ||
| 225 | } | ||
| 226 | |||
| 227 | static const struct of_device_id qcom_rmtfs_mem_of_match[] = { | ||
| 228 | { .compatible = "qcom,rmtfs-mem" }, | ||
| 229 | {} | ||
| 230 | }; | ||
| 231 | MODULE_DEVICE_TABLE(of, qcom_rmtfs_mem_of_match); | ||
| 232 | |||
| 233 | static struct platform_driver qcom_rmtfs_mem_driver = { | ||
| 234 | .probe = qcom_rmtfs_mem_probe, | ||
| 235 | .remove = qcom_rmtfs_mem_remove, | ||
| 236 | .driver = { | ||
| 237 | .name = "qcom_rmtfs_mem", | ||
| 238 | .of_match_table = qcom_rmtfs_mem_of_match, | ||
| 239 | }, | ||
| 240 | }; | ||
| 241 | |||
| 242 | static int qcom_rmtfs_mem_init(void) | ||
| 243 | { | ||
| 244 | int ret; | ||
| 245 | |||
| 246 | ret = alloc_chrdev_region(&qcom_rmtfs_mem_major, 0, | ||
| 247 | QCOM_RMTFS_MEM_DEV_MAX, "qcom_rmtfs_mem"); | ||
| 248 | if (ret < 0) { | ||
| 249 | pr_err("qcom_rmtfs_mem: failed to allocate char dev region\n"); | ||
| 250 | return ret; | ||
| 251 | } | ||
| 252 | |||
| 253 | ret = platform_driver_register(&qcom_rmtfs_mem_driver); | ||
| 254 | if (ret < 0) { | ||
| 255 | pr_err("qcom_rmtfs_mem: failed to register rmtfs_mem driver\n"); | ||
| 256 | unregister_chrdev_region(qcom_rmtfs_mem_major, | ||
| 257 | QCOM_RMTFS_MEM_DEV_MAX); | ||
| 258 | } | ||
| 259 | |||
| 260 | return ret; | ||
| 261 | } | ||
| 262 | module_init(qcom_rmtfs_mem_init); | ||
| 263 | |||
| 264 | static void qcom_rmtfs_mem_exit(void) | ||
| 265 | { | ||
| 266 | platform_driver_unregister(&qcom_rmtfs_mem_driver); | ||
| 267 | unregister_chrdev_region(qcom_rmtfs_mem_major, QCOM_RMTFS_MEM_DEV_MAX); | ||
| 268 | } | ||
| 269 | module_exit(qcom_rmtfs_mem_exit); | ||
diff --git a/drivers/soc/qcom/smem.c b/drivers/soc/qcom/smem.c index 18ec52f2078a..0b94d62fad2b 100644 --- a/drivers/soc/qcom/smem.c +++ b/drivers/soc/qcom/smem.c | |||
| @@ -52,8 +52,13 @@ | |||
| 52 | * | 52 | * |
| 53 | * Items in the non-cached region are allocated from the start of the partition | 53 | * Items in the non-cached region are allocated from the start of the partition |
| 54 | * while items in the cached region are allocated from the end. The free area | 54 | * while items in the cached region are allocated from the end. The free area |
| 55 | * is hence the region between the cached and non-cached offsets. | 55 | * is hence the region between the cached and non-cached offsets. The header of |
| 56 | * cached items comes after the data. | ||
| 56 | * | 57 | * |
| 58 | * Version 12 (SMEM_GLOBAL_PART_VERSION) changes the item alloc/get procedure | ||
| 59 | * for the global heap. A new global partition is created from the global heap | ||
| 60 | * region with partition type (SMEM_GLOBAL_HOST) and the max smem item count is | ||
| 61 | * set by the bootloader. | ||
| 57 | * | 62 | * |
| 58 | * To synchronize allocations in the shared memory heaps a remote spinlock must | 63 | * To synchronize allocations in the shared memory heaps a remote spinlock must |
| 59 | * be held - currently lock number 3 of the sfpb or tcsr is used for this on all | 64 | * be held - currently lock number 3 of the sfpb or tcsr is used for this on all |
| @@ -62,13 +67,13 @@ | |||
| 62 | */ | 67 | */ |
| 63 | 68 | ||
| 64 | /* | 69 | /* |
| 65 | * Item 3 of the global heap contains an array of versions for the various | 70 | * The version member of the smem header contains an array of versions for the |
| 66 | * software components in the SoC. We verify that the boot loader version is | 71 | * various software components in the SoC. We verify that the boot loader |
| 67 | * what the expected version (SMEM_EXPECTED_VERSION) as a sanity check. | 72 | * version is a valid version as a sanity check. |
| 68 | */ | 73 | */ |
| 69 | #define SMEM_ITEM_VERSION 3 | 74 | #define SMEM_MASTER_SBL_VERSION_INDEX 7 |
| 70 | #define SMEM_MASTER_SBL_VERSION_INDEX 7 | 75 | #define SMEM_GLOBAL_HEAP_VERSION 11 |
| 71 | #define SMEM_EXPECTED_VERSION 11 | 76 | #define SMEM_GLOBAL_PART_VERSION 12 |
| 72 | 77 | ||
| 73 | /* | 78 | /* |
| 74 | * The first 8 items are only to be allocated by the boot loader while | 79 | * The first 8 items are only to be allocated by the boot loader while |
| @@ -82,8 +87,11 @@ | |||
| 82 | /* Processor/host identifier for the application processor */ | 87 | /* Processor/host identifier for the application processor */ |
| 83 | #define SMEM_HOST_APPS 0 | 88 | #define SMEM_HOST_APPS 0 |
| 84 | 89 | ||
| 90 | /* Processor/host identifier for the global partition */ | ||
| 91 | #define SMEM_GLOBAL_HOST 0xfffe | ||
| 92 | |||
| 85 | /* Max number of processors/hosts in a system */ | 93 | /* Max number of processors/hosts in a system */ |
| 86 | #define SMEM_HOST_COUNT 9 | 94 | #define SMEM_HOST_COUNT 10 |
| 87 | 95 | ||
| 88 | /** | 96 | /** |
| 89 | * struct smem_proc_comm - proc_comm communication struct (legacy) | 97 | * struct smem_proc_comm - proc_comm communication struct (legacy) |
| @@ -140,6 +148,7 @@ struct smem_header { | |||
| 140 | * @flags: flags for the partition (currently unused) | 148 | * @flags: flags for the partition (currently unused) |
| 141 | * @host0: first processor/host with access to this partition | 149 | * @host0: first processor/host with access to this partition |
| 142 | * @host1: second processor/host with access to this partition | 150 | * @host1: second processor/host with access to this partition |
| 151 | * @cacheline: alignment for "cached" entries | ||
| 143 | * @reserved: reserved entries for later use | 152 | * @reserved: reserved entries for later use |
| 144 | */ | 153 | */ |
| 145 | struct smem_ptable_entry { | 154 | struct smem_ptable_entry { |
| @@ -148,7 +157,8 @@ struct smem_ptable_entry { | |||
| 148 | __le32 flags; | 157 | __le32 flags; |
| 149 | __le16 host0; | 158 | __le16 host0; |
| 150 | __le16 host1; | 159 | __le16 host1; |
| 151 | __le32 reserved[8]; | 160 | __le32 cacheline; |
| 161 | __le32 reserved[7]; | ||
| 152 | }; | 162 | }; |
| 153 | 163 | ||
| 154 | /** | 164 | /** |
| @@ -213,6 +223,24 @@ struct smem_private_entry { | |||
| 213 | #define SMEM_PRIVATE_CANARY 0xa5a5 | 223 | #define SMEM_PRIVATE_CANARY 0xa5a5 |
| 214 | 224 | ||
| 215 | /** | 225 | /** |
| 226 | * struct smem_info - smem region info located after the table of contents | ||
| 227 | * @magic: magic number, must be SMEM_INFO_MAGIC | ||
| 228 | * @size: size of the smem region | ||
| 229 | * @base_addr: base address of the smem region | ||
| 230 | * @reserved: for now reserved entry | ||
| 231 | * @num_items: highest accepted item number | ||
| 232 | */ | ||
| 233 | struct smem_info { | ||
| 234 | u8 magic[4]; | ||
| 235 | __le32 size; | ||
| 236 | __le32 base_addr; | ||
| 237 | __le32 reserved; | ||
| 238 | __le16 num_items; | ||
| 239 | }; | ||
| 240 | |||
| 241 | static const u8 SMEM_INFO_MAGIC[] = { 0x53, 0x49, 0x49, 0x49 }; /* SIII */ | ||
| 242 | |||
| 243 | /** | ||
| 216 | * struct smem_region - representation of a chunk of memory used for smem | 244 | * struct smem_region - representation of a chunk of memory used for smem |
| 217 | * @aux_base: identifier of aux_mem base | 245 | * @aux_base: identifier of aux_mem base |
| 218 | * @virt_base: virtual base address of memory with this aux_mem identifier | 246 | * @virt_base: virtual base address of memory with this aux_mem identifier |
| @@ -228,8 +256,12 @@ struct smem_region { | |||
| 228 | * struct qcom_smem - device data for the smem device | 256 | * struct qcom_smem - device data for the smem device |
| 229 | * @dev: device pointer | 257 | * @dev: device pointer |
| 230 | * @hwlock: reference to a hwspinlock | 258 | * @hwlock: reference to a hwspinlock |
| 259 | * @global_partition: pointer to global partition when in use | ||
| 260 | * @global_cacheline: cacheline size for global partition | ||
| 231 | * @partitions: list of pointers to partitions affecting the current | 261 | * @partitions: list of pointers to partitions affecting the current |
| 232 | * processor/host | 262 | * processor/host |
| 263 | * @cacheline: list of cacheline sizes for each host | ||
| 264 | * @item_count: max accepted item number | ||
| 233 | * @num_regions: number of @regions | 265 | * @num_regions: number of @regions |
| 234 | * @regions: list of the memory regions defining the shared memory | 266 | * @regions: list of the memory regions defining the shared memory |
| 235 | */ | 267 | */ |
| @@ -238,21 +270,33 @@ struct qcom_smem { | |||
| 238 | 270 | ||
| 239 | struct hwspinlock *hwlock; | 271 | struct hwspinlock *hwlock; |
| 240 | 272 | ||
| 273 | struct smem_partition_header *global_partition; | ||
| 274 | size_t global_cacheline; | ||
| 241 | struct smem_partition_header *partitions[SMEM_HOST_COUNT]; | 275 | struct smem_partition_header *partitions[SMEM_HOST_COUNT]; |
| 276 | size_t cacheline[SMEM_HOST_COUNT]; | ||
| 277 | u32 item_count; | ||
| 242 | 278 | ||
| 243 | unsigned num_regions; | 279 | unsigned num_regions; |
| 244 | struct smem_region regions[0]; | 280 | struct smem_region regions[0]; |
| 245 | }; | 281 | }; |
| 246 | 282 | ||
| 247 | static struct smem_private_entry * | 283 | static struct smem_private_entry * |
| 248 | phdr_to_last_private_entry(struct smem_partition_header *phdr) | 284 | phdr_to_last_uncached_entry(struct smem_partition_header *phdr) |
| 249 | { | 285 | { |
| 250 | void *p = phdr; | 286 | void *p = phdr; |
| 251 | 287 | ||
| 252 | return p + le32_to_cpu(phdr->offset_free_uncached); | 288 | return p + le32_to_cpu(phdr->offset_free_uncached); |
| 253 | } | 289 | } |
| 254 | 290 | ||
| 255 | static void *phdr_to_first_cached_entry(struct smem_partition_header *phdr) | 291 | static void *phdr_to_first_cached_entry(struct smem_partition_header *phdr, |
| 292 | size_t cacheline) | ||
| 293 | { | ||
| 294 | void *p = phdr; | ||
| 295 | |||
| 296 | return p + le32_to_cpu(phdr->size) - ALIGN(sizeof(*phdr), cacheline); | ||
| 297 | } | ||
| 298 | |||
| 299 | static void *phdr_to_last_cached_entry(struct smem_partition_header *phdr) | ||
| 256 | { | 300 | { |
| 257 | void *p = phdr; | 301 | void *p = phdr; |
| 258 | 302 | ||
| @@ -260,7 +304,7 @@ static void *phdr_to_first_cached_entry(struct smem_partition_header *phdr) | |||
| 260 | } | 304 | } |
| 261 | 305 | ||
| 262 | static struct smem_private_entry * | 306 | static struct smem_private_entry * |
| 263 | phdr_to_first_private_entry(struct smem_partition_header *phdr) | 307 | phdr_to_first_uncached_entry(struct smem_partition_header *phdr) |
| 264 | { | 308 | { |
| 265 | void *p = phdr; | 309 | void *p = phdr; |
| 266 | 310 | ||
| @@ -268,7 +312,7 @@ phdr_to_first_private_entry(struct smem_partition_header *phdr) | |||
| 268 | } | 312 | } |
| 269 | 313 | ||
| 270 | static struct smem_private_entry * | 314 | static struct smem_private_entry * |
| 271 | private_entry_next(struct smem_private_entry *e) | 315 | uncached_entry_next(struct smem_private_entry *e) |
| 272 | { | 316 | { |
| 273 | void *p = e; | 317 | void *p = e; |
| 274 | 318 | ||
| @@ -276,13 +320,28 @@ private_entry_next(struct smem_private_entry *e) | |||
| 276 | le32_to_cpu(e->size); | 320 | le32_to_cpu(e->size); |
| 277 | } | 321 | } |
| 278 | 322 | ||
| 279 | static void *entry_to_item(struct smem_private_entry *e) | 323 | static struct smem_private_entry * |
| 324 | cached_entry_next(struct smem_private_entry *e, size_t cacheline) | ||
| 325 | { | ||
| 326 | void *p = e; | ||
| 327 | |||
| 328 | return p - le32_to_cpu(e->size) - ALIGN(sizeof(*e), cacheline); | ||
| 329 | } | ||
| 330 | |||
| 331 | static void *uncached_entry_to_item(struct smem_private_entry *e) | ||
| 280 | { | 332 | { |
| 281 | void *p = e; | 333 | void *p = e; |
| 282 | 334 | ||
| 283 | return p + sizeof(*e) + le16_to_cpu(e->padding_hdr); | 335 | return p + sizeof(*e) + le16_to_cpu(e->padding_hdr); |
| 284 | } | 336 | } |
| 285 | 337 | ||
| 338 | static void *cached_entry_to_item(struct smem_private_entry *e) | ||
| 339 | { | ||
| 340 | void *p = e; | ||
| 341 | |||
| 342 | return p - le32_to_cpu(e->size); | ||
| 343 | } | ||
| 344 | |||
| 286 | /* Pointer to the one and only smem handle */ | 345 | /* Pointer to the one and only smem handle */ |
| 287 | static struct qcom_smem *__smem; | 346 | static struct qcom_smem *__smem; |
| 288 | 347 | ||
| @@ -290,32 +349,30 @@ static struct qcom_smem *__smem; | |||
| 290 | #define HWSPINLOCK_TIMEOUT 1000 | 349 | #define HWSPINLOCK_TIMEOUT 1000 |
| 291 | 350 | ||
| 292 | static int qcom_smem_alloc_private(struct qcom_smem *smem, | 351 | static int qcom_smem_alloc_private(struct qcom_smem *smem, |
| 293 | unsigned host, | 352 | struct smem_partition_header *phdr, |
| 294 | unsigned item, | 353 | unsigned item, |
| 295 | size_t size) | 354 | size_t size) |
| 296 | { | 355 | { |
| 297 | struct smem_partition_header *phdr; | ||
| 298 | struct smem_private_entry *hdr, *end; | 356 | struct smem_private_entry *hdr, *end; |
| 299 | size_t alloc_size; | 357 | size_t alloc_size; |
| 300 | void *cached; | 358 | void *cached; |
| 301 | 359 | ||
| 302 | phdr = smem->partitions[host]; | 360 | hdr = phdr_to_first_uncached_entry(phdr); |
| 303 | hdr = phdr_to_first_private_entry(phdr); | 361 | end = phdr_to_last_uncached_entry(phdr); |
| 304 | end = phdr_to_last_private_entry(phdr); | 362 | cached = phdr_to_last_cached_entry(phdr); |
| 305 | cached = phdr_to_first_cached_entry(phdr); | ||
| 306 | 363 | ||
| 307 | while (hdr < end) { | 364 | while (hdr < end) { |
| 308 | if (hdr->canary != SMEM_PRIVATE_CANARY) { | 365 | if (hdr->canary != SMEM_PRIVATE_CANARY) { |
| 309 | dev_err(smem->dev, | 366 | dev_err(smem->dev, |
| 310 | "Found invalid canary in host %d partition\n", | 367 | "Found invalid canary in hosts %d:%d partition\n", |
| 311 | host); | 368 | phdr->host0, phdr->host1); |
| 312 | return -EINVAL; | 369 | return -EINVAL; |
| 313 | } | 370 | } |
| 314 | 371 | ||
| 315 | if (le16_to_cpu(hdr->item) == item) | 372 | if (le16_to_cpu(hdr->item) == item) |
| 316 | return -EEXIST; | 373 | return -EEXIST; |
| 317 | 374 | ||
| 318 | hdr = private_entry_next(hdr); | 375 | hdr = uncached_entry_next(hdr); |
| 319 | } | 376 | } |
| 320 | 377 | ||
| 321 | /* Check that we don't grow into the cached region */ | 378 | /* Check that we don't grow into the cached region */ |
| @@ -346,11 +403,8 @@ static int qcom_smem_alloc_global(struct qcom_smem *smem, | |||
| 346 | unsigned item, | 403 | unsigned item, |
| 347 | size_t size) | 404 | size_t size) |
| 348 | { | 405 | { |
| 349 | struct smem_header *header; | ||
| 350 | struct smem_global_entry *entry; | 406 | struct smem_global_entry *entry; |
| 351 | 407 | struct smem_header *header; | |
| 352 | if (WARN_ON(item >= SMEM_ITEM_COUNT)) | ||
| 353 | return -EINVAL; | ||
| 354 | 408 | ||
| 355 | header = smem->regions[0].virt_base; | 409 | header = smem->regions[0].virt_base; |
| 356 | entry = &header->toc[item]; | 410 | entry = &header->toc[item]; |
| @@ -389,6 +443,7 @@ static int qcom_smem_alloc_global(struct qcom_smem *smem, | |||
| 389 | */ | 443 | */ |
| 390 | int qcom_smem_alloc(unsigned host, unsigned item, size_t size) | 444 | int qcom_smem_alloc(unsigned host, unsigned item, size_t size) |
| 391 | { | 445 | { |
| 446 | struct smem_partition_header *phdr; | ||
| 392 | unsigned long flags; | 447 | unsigned long flags; |
| 393 | int ret; | 448 | int ret; |
| 394 | 449 | ||
| @@ -401,16 +456,24 @@ int qcom_smem_alloc(unsigned host, unsigned item, size_t size) | |||
| 401 | return -EINVAL; | 456 | return -EINVAL; |
| 402 | } | 457 | } |
| 403 | 458 | ||
| 459 | if (WARN_ON(item >= __smem->item_count)) | ||
| 460 | return -EINVAL; | ||
| 461 | |||
| 404 | ret = hwspin_lock_timeout_irqsave(__smem->hwlock, | 462 | ret = hwspin_lock_timeout_irqsave(__smem->hwlock, |
| 405 | HWSPINLOCK_TIMEOUT, | 463 | HWSPINLOCK_TIMEOUT, |
| 406 | &flags); | 464 | &flags); |
| 407 | if (ret) | 465 | if (ret) |
| 408 | return ret; | 466 | return ret; |
| 409 | 467 | ||
| 410 | if (host < SMEM_HOST_COUNT && __smem->partitions[host]) | 468 | if (host < SMEM_HOST_COUNT && __smem->partitions[host]) { |
| 411 | ret = qcom_smem_alloc_private(__smem, host, item, size); | 469 | phdr = __smem->partitions[host]; |
| 412 | else | 470 | ret = qcom_smem_alloc_private(__smem, phdr, item, size); |
| 471 | } else if (__smem->global_partition) { | ||
| 472 | phdr = __smem->global_partition; | ||
| 473 | ret = qcom_smem_alloc_private(__smem, phdr, item, size); | ||
| 474 | } else { | ||
| 413 | ret = qcom_smem_alloc_global(__smem, item, size); | 475 | ret = qcom_smem_alloc_global(__smem, item, size); |
| 476 | } | ||
| 414 | 477 | ||
| 415 | hwspin_unlock_irqrestore(__smem->hwlock, &flags); | 478 | hwspin_unlock_irqrestore(__smem->hwlock, &flags); |
| 416 | 479 | ||
| @@ -428,9 +491,6 @@ static void *qcom_smem_get_global(struct qcom_smem *smem, | |||
| 428 | u32 aux_base; | 491 | u32 aux_base; |
| 429 | unsigned i; | 492 | unsigned i; |
| 430 | 493 | ||
| 431 | if (WARN_ON(item >= SMEM_ITEM_COUNT)) | ||
| 432 | return ERR_PTR(-EINVAL); | ||
| 433 | |||
| 434 | header = smem->regions[0].virt_base; | 494 | header = smem->regions[0].virt_base; |
| 435 | entry = &header->toc[item]; | 495 | entry = &header->toc[item]; |
| 436 | if (!entry->allocated) | 496 | if (!entry->allocated) |
| @@ -452,37 +512,58 @@ static void *qcom_smem_get_global(struct qcom_smem *smem, | |||
| 452 | } | 512 | } |
| 453 | 513 | ||
| 454 | static void *qcom_smem_get_private(struct qcom_smem *smem, | 514 | static void *qcom_smem_get_private(struct qcom_smem *smem, |
| 455 | unsigned host, | 515 | struct smem_partition_header *phdr, |
| 516 | size_t cacheline, | ||
| 456 | unsigned item, | 517 | unsigned item, |
| 457 | size_t *size) | 518 | size_t *size) |
| 458 | { | 519 | { |
| 459 | struct smem_partition_header *phdr; | ||
| 460 | struct smem_private_entry *e, *end; | 520 | struct smem_private_entry *e, *end; |
| 461 | 521 | ||
| 462 | phdr = smem->partitions[host]; | 522 | e = phdr_to_first_uncached_entry(phdr); |
| 463 | e = phdr_to_first_private_entry(phdr); | 523 | end = phdr_to_last_uncached_entry(phdr); |
| 464 | end = phdr_to_last_private_entry(phdr); | ||
| 465 | 524 | ||
| 466 | while (e < end) { | 525 | while (e < end) { |
| 467 | if (e->canary != SMEM_PRIVATE_CANARY) { | 526 | if (e->canary != SMEM_PRIVATE_CANARY) |
| 468 | dev_err(smem->dev, | 527 | goto invalid_canary; |
| 469 | "Found invalid canary in host %d partition\n", | 528 | |
| 470 | host); | 529 | if (le16_to_cpu(e->item) == item) { |
| 471 | return ERR_PTR(-EINVAL); | 530 | if (size != NULL) |
| 531 | *size = le32_to_cpu(e->size) - | ||
| 532 | le16_to_cpu(e->padding_data); | ||
| 533 | |||
| 534 | return uncached_entry_to_item(e); | ||
| 472 | } | 535 | } |
| 473 | 536 | ||
| 537 | e = uncached_entry_next(e); | ||
| 538 | } | ||
| 539 | |||
| 540 | /* Item was not found in the uncached list, search the cached list */ | ||
| 541 | |||
| 542 | e = phdr_to_first_cached_entry(phdr, cacheline); | ||
| 543 | end = phdr_to_last_cached_entry(phdr); | ||
| 544 | |||
| 545 | while (e > end) { | ||
| 546 | if (e->canary != SMEM_PRIVATE_CANARY) | ||
| 547 | goto invalid_canary; | ||
| 548 | |||
| 474 | if (le16_to_cpu(e->item) == item) { | 549 | if (le16_to_cpu(e->item) == item) { |
| 475 | if (size != NULL) | 550 | if (size != NULL) |
| 476 | *size = le32_to_cpu(e->size) - | 551 | *size = le32_to_cpu(e->size) - |
| 477 | le16_to_cpu(e->padding_data); | 552 | le16_to_cpu(e->padding_data); |
| 478 | 553 | ||
| 479 | return entry_to_item(e); | 554 | return cached_entry_to_item(e); |
| 480 | } | 555 | } |
| 481 | 556 | ||
| 482 | e = private_entry_next(e); | 557 | e = cached_entry_next(e, cacheline); |
| 483 | } | 558 | } |
| 484 | 559 | ||
| 485 | return ERR_PTR(-ENOENT); | 560 | return ERR_PTR(-ENOENT); |
| 561 | |||
| 562 | invalid_canary: | ||
| 563 | dev_err(smem->dev, "Found invalid canary in hosts %d:%d partition\n", | ||
| 564 | phdr->host0, phdr->host1); | ||
| 565 | |||
| 566 | return ERR_PTR(-EINVAL); | ||
| 486 | } | 567 | } |
| 487 | 568 | ||
| 488 | /** | 569 | /** |
| @@ -496,23 +577,35 @@ static void *qcom_smem_get_private(struct qcom_smem *smem, | |||
| 496 | */ | 577 | */ |
| 497 | void *qcom_smem_get(unsigned host, unsigned item, size_t *size) | 578 | void *qcom_smem_get(unsigned host, unsigned item, size_t *size) |
| 498 | { | 579 | { |
| 580 | struct smem_partition_header *phdr; | ||
| 499 | unsigned long flags; | 581 | unsigned long flags; |
| 582 | size_t cacheln; | ||
| 500 | int ret; | 583 | int ret; |
| 501 | void *ptr = ERR_PTR(-EPROBE_DEFER); | 584 | void *ptr = ERR_PTR(-EPROBE_DEFER); |
| 502 | 585 | ||
| 503 | if (!__smem) | 586 | if (!__smem) |
| 504 | return ptr; | 587 | return ptr; |
| 505 | 588 | ||
| 589 | if (WARN_ON(item >= __smem->item_count)) | ||
| 590 | return ERR_PTR(-EINVAL); | ||
| 591 | |||
| 506 | ret = hwspin_lock_timeout_irqsave(__smem->hwlock, | 592 | ret = hwspin_lock_timeout_irqsave(__smem->hwlock, |
| 507 | HWSPINLOCK_TIMEOUT, | 593 | HWSPINLOCK_TIMEOUT, |
| 508 | &flags); | 594 | &flags); |
| 509 | if (ret) | 595 | if (ret) |
| 510 | return ERR_PTR(ret); | 596 | return ERR_PTR(ret); |
| 511 | 597 | ||
| 512 | if (host < SMEM_HOST_COUNT && __smem->partitions[host]) | 598 | if (host < SMEM_HOST_COUNT && __smem->partitions[host]) { |
| 513 | ptr = qcom_smem_get_private(__smem, host, item, size); | 599 | phdr = __smem->partitions[host]; |
| 514 | else | 600 | cacheln = __smem->cacheline[host]; |
| 601 | ptr = qcom_smem_get_private(__smem, phdr, cacheln, item, size); | ||
| 602 | } else if (__smem->global_partition) { | ||
| 603 | phdr = __smem->global_partition; | ||
| 604 | cacheln = __smem->global_cacheline; | ||
| 605 | ptr = qcom_smem_get_private(__smem, phdr, cacheln, item, size); | ||
| 606 | } else { | ||
| 515 | ptr = qcom_smem_get_global(__smem, item, size); | 607 | ptr = qcom_smem_get_global(__smem, item, size); |
| 608 | } | ||
| 516 | 609 | ||
| 517 | hwspin_unlock_irqrestore(__smem->hwlock, &flags); | 610 | hwspin_unlock_irqrestore(__smem->hwlock, &flags); |
| 518 | 611 | ||
| @@ -541,6 +634,10 @@ int qcom_smem_get_free_space(unsigned host) | |||
| 541 | phdr = __smem->partitions[host]; | 634 | phdr = __smem->partitions[host]; |
| 542 | ret = le32_to_cpu(phdr->offset_free_cached) - | 635 | ret = le32_to_cpu(phdr->offset_free_cached) - |
| 543 | le32_to_cpu(phdr->offset_free_uncached); | 636 | le32_to_cpu(phdr->offset_free_uncached); |
| 637 | } else if (__smem->global_partition) { | ||
| 638 | phdr = __smem->global_partition; | ||
| 639 | ret = le32_to_cpu(phdr->offset_free_cached) - | ||
| 640 | le32_to_cpu(phdr->offset_free_uncached); | ||
| 544 | } else { | 641 | } else { |
| 545 | header = __smem->regions[0].virt_base; | 642 | header = __smem->regions[0].virt_base; |
| 546 | ret = le32_to_cpu(header->available); | 643 | ret = le32_to_cpu(header->available); |
| @@ -552,44 +649,131 @@ EXPORT_SYMBOL(qcom_smem_get_free_space); | |||
| 552 | 649 | ||
| 553 | static int qcom_smem_get_sbl_version(struct qcom_smem *smem) | 650 | static int qcom_smem_get_sbl_version(struct qcom_smem *smem) |
| 554 | { | 651 | { |
| 652 | struct smem_header *header; | ||
| 555 | __le32 *versions; | 653 | __le32 *versions; |
| 556 | size_t size; | ||
| 557 | 654 | ||
| 558 | versions = qcom_smem_get_global(smem, SMEM_ITEM_VERSION, &size); | 655 | header = smem->regions[0].virt_base; |
| 559 | if (IS_ERR(versions)) { | 656 | versions = header->version; |
| 560 | dev_err(smem->dev, "Unable to read the version item\n"); | ||
| 561 | return -ENOENT; | ||
| 562 | } | ||
| 563 | |||
| 564 | if (size < sizeof(unsigned) * SMEM_MASTER_SBL_VERSION_INDEX) { | ||
| 565 | dev_err(smem->dev, "Version item is too small\n"); | ||
| 566 | return -EINVAL; | ||
| 567 | } | ||
| 568 | 657 | ||
| 569 | return le32_to_cpu(versions[SMEM_MASTER_SBL_VERSION_INDEX]); | 658 | return le32_to_cpu(versions[SMEM_MASTER_SBL_VERSION_INDEX]); |
| 570 | } | 659 | } |
| 571 | 660 | ||
| 572 | static int qcom_smem_enumerate_partitions(struct qcom_smem *smem, | 661 | static struct smem_ptable *qcom_smem_get_ptable(struct qcom_smem *smem) |
| 573 | unsigned local_host) | ||
| 574 | { | 662 | { |
| 575 | struct smem_partition_header *header; | ||
| 576 | struct smem_ptable_entry *entry; | ||
| 577 | struct smem_ptable *ptable; | 663 | struct smem_ptable *ptable; |
| 578 | unsigned remote_host; | 664 | u32 version; |
| 579 | u32 version, host0, host1; | ||
| 580 | int i; | ||
| 581 | 665 | ||
| 582 | ptable = smem->regions[0].virt_base + smem->regions[0].size - SZ_4K; | 666 | ptable = smem->regions[0].virt_base + smem->regions[0].size - SZ_4K; |
| 583 | if (memcmp(ptable->magic, SMEM_PTABLE_MAGIC, sizeof(ptable->magic))) | 667 | if (memcmp(ptable->magic, SMEM_PTABLE_MAGIC, sizeof(ptable->magic))) |
| 584 | return 0; | 668 | return ERR_PTR(-ENOENT); |
| 585 | 669 | ||
| 586 | version = le32_to_cpu(ptable->version); | 670 | version = le32_to_cpu(ptable->version); |
| 587 | if (version != 1) { | 671 | if (version != 1) { |
| 588 | dev_err(smem->dev, | 672 | dev_err(smem->dev, |
| 589 | "Unsupported partition header version %d\n", version); | 673 | "Unsupported partition header version %d\n", version); |
| 674 | return ERR_PTR(-EINVAL); | ||
| 675 | } | ||
| 676 | return ptable; | ||
| 677 | } | ||
| 678 | |||
| 679 | static u32 qcom_smem_get_item_count(struct qcom_smem *smem) | ||
| 680 | { | ||
| 681 | struct smem_ptable *ptable; | ||
| 682 | struct smem_info *info; | ||
| 683 | |||
| 684 | ptable = qcom_smem_get_ptable(smem); | ||
| 685 | if (IS_ERR_OR_NULL(ptable)) | ||
| 686 | return SMEM_ITEM_COUNT; | ||
| 687 | |||
| 688 | info = (struct smem_info *)&ptable->entry[ptable->num_entries]; | ||
| 689 | if (memcmp(info->magic, SMEM_INFO_MAGIC, sizeof(info->magic))) | ||
| 690 | return SMEM_ITEM_COUNT; | ||
| 691 | |||
| 692 | return le16_to_cpu(info->num_items); | ||
| 693 | } | ||
| 694 | |||
| 695 | static int qcom_smem_set_global_partition(struct qcom_smem *smem) | ||
| 696 | { | ||
| 697 | struct smem_partition_header *header; | ||
| 698 | struct smem_ptable_entry *entry = NULL; | ||
| 699 | struct smem_ptable *ptable; | ||
| 700 | u32 host0, host1, size; | ||
| 701 | int i; | ||
| 702 | |||
| 703 | ptable = qcom_smem_get_ptable(smem); | ||
| 704 | if (IS_ERR(ptable)) | ||
| 705 | return PTR_ERR(ptable); | ||
| 706 | |||
| 707 | for (i = 0; i < le32_to_cpu(ptable->num_entries); i++) { | ||
| 708 | entry = &ptable->entry[i]; | ||
| 709 | host0 = le16_to_cpu(entry->host0); | ||
| 710 | host1 = le16_to_cpu(entry->host1); | ||
| 711 | |||
| 712 | if (host0 == SMEM_GLOBAL_HOST && host0 == host1) | ||
| 713 | break; | ||
| 714 | } | ||
| 715 | |||
| 716 | if (!entry) { | ||
| 717 | dev_err(smem->dev, "Missing entry for global partition\n"); | ||
| 718 | return -EINVAL; | ||
| 719 | } | ||
| 720 | |||
| 721 | if (!le32_to_cpu(entry->offset) || !le32_to_cpu(entry->size)) { | ||
| 722 | dev_err(smem->dev, "Invalid entry for global partition\n"); | ||
| 723 | return -EINVAL; | ||
| 724 | } | ||
| 725 | |||
| 726 | if (smem->global_partition) { | ||
| 727 | dev_err(smem->dev, "Already found the global partition\n"); | ||
| 728 | return -EINVAL; | ||
| 729 | } | ||
| 730 | |||
| 731 | header = smem->regions[0].virt_base + le32_to_cpu(entry->offset); | ||
| 732 | host0 = le16_to_cpu(header->host0); | ||
| 733 | host1 = le16_to_cpu(header->host1); | ||
| 734 | |||
| 735 | if (memcmp(header->magic, SMEM_PART_MAGIC, sizeof(header->magic))) { | ||
| 736 | dev_err(smem->dev, "Global partition has invalid magic\n"); | ||
| 737 | return -EINVAL; | ||
| 738 | } | ||
| 739 | |||
| 740 | if (host0 != SMEM_GLOBAL_HOST && host1 != SMEM_GLOBAL_HOST) { | ||
| 741 | dev_err(smem->dev, "Global partition hosts are invalid\n"); | ||
| 742 | return -EINVAL; | ||
| 743 | } | ||
| 744 | |||
| 745 | if (le32_to_cpu(header->size) != le32_to_cpu(entry->size)) { | ||
| 746 | dev_err(smem->dev, "Global partition has invalid size\n"); | ||
| 590 | return -EINVAL; | 747 | return -EINVAL; |
| 591 | } | 748 | } |
| 592 | 749 | ||
| 750 | size = le32_to_cpu(header->offset_free_uncached); | ||
| 751 | if (size > le32_to_cpu(header->size)) { | ||
| 752 | dev_err(smem->dev, | ||
| 753 | "Global partition has invalid free pointer\n"); | ||
| 754 | return -EINVAL; | ||
| 755 | } | ||
| 756 | |||
| 757 | smem->global_partition = header; | ||
| 758 | smem->global_cacheline = le32_to_cpu(entry->cacheline); | ||
| 759 | |||
| 760 | return 0; | ||
| 761 | } | ||
| 762 | |||
| 763 | static int qcom_smem_enumerate_partitions(struct qcom_smem *smem, | ||
| 764 | unsigned int local_host) | ||
| 765 | { | ||
| 766 | struct smem_partition_header *header; | ||
| 767 | struct smem_ptable_entry *entry; | ||
| 768 | struct smem_ptable *ptable; | ||
| 769 | unsigned int remote_host; | ||
| 770 | u32 host0, host1; | ||
| 771 | int i; | ||
| 772 | |||
| 773 | ptable = qcom_smem_get_ptable(smem); | ||
| 774 | if (IS_ERR(ptable)) | ||
| 775 | return PTR_ERR(ptable); | ||
| 776 | |||
| 593 | for (i = 0; i < le32_to_cpu(ptable->num_entries); i++) { | 777 | for (i = 0; i < le32_to_cpu(ptable->num_entries); i++) { |
| 594 | entry = &ptable->entry[i]; | 778 | entry = &ptable->entry[i]; |
| 595 | host0 = le16_to_cpu(entry->host0); | 779 | host0 = le16_to_cpu(entry->host0); |
| @@ -646,7 +830,7 @@ static int qcom_smem_enumerate_partitions(struct qcom_smem *smem, | |||
| 646 | return -EINVAL; | 830 | return -EINVAL; |
| 647 | } | 831 | } |
| 648 | 832 | ||
| 649 | if (header->size != entry->size) { | 833 | if (le32_to_cpu(header->size) != le32_to_cpu(entry->size)) { |
| 650 | dev_err(smem->dev, | 834 | dev_err(smem->dev, |
| 651 | "Partition %d has invalid size\n", i); | 835 | "Partition %d has invalid size\n", i); |
| 652 | return -EINVAL; | 836 | return -EINVAL; |
| @@ -659,6 +843,7 @@ static int qcom_smem_enumerate_partitions(struct qcom_smem *smem, | |||
| 659 | } | 843 | } |
| 660 | 844 | ||
| 661 | smem->partitions[remote_host] = header; | 845 | smem->partitions[remote_host] = header; |
| 846 | smem->cacheline[remote_host] = le32_to_cpu(entry->cacheline); | ||
| 662 | } | 847 | } |
| 663 | 848 | ||
| 664 | return 0; | 849 | return 0; |
| @@ -729,13 +914,23 @@ static int qcom_smem_probe(struct platform_device *pdev) | |||
| 729 | } | 914 | } |
| 730 | 915 | ||
| 731 | version = qcom_smem_get_sbl_version(smem); | 916 | version = qcom_smem_get_sbl_version(smem); |
| 732 | if (version >> 16 != SMEM_EXPECTED_VERSION) { | 917 | switch (version >> 16) { |
| 918 | case SMEM_GLOBAL_PART_VERSION: | ||
| 919 | ret = qcom_smem_set_global_partition(smem); | ||
| 920 | if (ret < 0) | ||
| 921 | return ret; | ||
| 922 | smem->item_count = qcom_smem_get_item_count(smem); | ||
| 923 | break; | ||
| 924 | case SMEM_GLOBAL_HEAP_VERSION: | ||
| 925 | smem->item_count = SMEM_ITEM_COUNT; | ||
| 926 | break; | ||
| 927 | default: | ||
| 733 | dev_err(&pdev->dev, "Unsupported SMEM version 0x%x\n", version); | 928 | dev_err(&pdev->dev, "Unsupported SMEM version 0x%x\n", version); |
| 734 | return -EINVAL; | 929 | return -EINVAL; |
| 735 | } | 930 | } |
| 736 | 931 | ||
| 737 | ret = qcom_smem_enumerate_partitions(smem, SMEM_HOST_APPS); | 932 | ret = qcom_smem_enumerate_partitions(smem, SMEM_HOST_APPS); |
| 738 | if (ret < 0) | 933 | if (ret < 0 && ret != -ENOENT) |
| 739 | return ret; | 934 | return ret; |
| 740 | 935 | ||
| 741 | hwlock_id = of_hwspin_lock_get_id(pdev->dev.of_node, 0); | 936 | hwlock_id = of_hwspin_lock_get_id(pdev->dev.of_node, 0); |
diff --git a/drivers/soc/renesas/Kconfig b/drivers/soc/renesas/Kconfig index 567414cb42ba..09550b1da56d 100644 --- a/drivers/soc/renesas/Kconfig +++ b/drivers/soc/renesas/Kconfig | |||
| @@ -3,7 +3,8 @@ config SOC_RENESAS | |||
| 3 | default y if ARCH_RENESAS | 3 | default y if ARCH_RENESAS |
| 4 | select SOC_BUS | 4 | select SOC_BUS |
| 5 | select RST_RCAR if ARCH_RCAR_GEN1 || ARCH_RCAR_GEN2 || \ | 5 | select RST_RCAR if ARCH_RCAR_GEN1 || ARCH_RCAR_GEN2 || \ |
| 6 | ARCH_R8A7795 || ARCH_R8A7796 || ARCH_R8A77995 | 6 | ARCH_R8A7795 || ARCH_R8A7796 || ARCH_R8A77970 || \ |
| 7 | ARCH_R8A77995 | ||
| 7 | select SYSC_R8A7743 if ARCH_R8A7743 | 8 | select SYSC_R8A7743 if ARCH_R8A7743 |
| 8 | select SYSC_R8A7745 if ARCH_R8A7745 | 9 | select SYSC_R8A7745 if ARCH_R8A7745 |
| 9 | select SYSC_R8A7779 if ARCH_R8A7779 | 10 | select SYSC_R8A7779 if ARCH_R8A7779 |
| @@ -13,6 +14,7 @@ config SOC_RENESAS | |||
| 13 | select SYSC_R8A7794 if ARCH_R8A7794 | 14 | select SYSC_R8A7794 if ARCH_R8A7794 |
| 14 | select SYSC_R8A7795 if ARCH_R8A7795 | 15 | select SYSC_R8A7795 if ARCH_R8A7795 |
| 15 | select SYSC_R8A7796 if ARCH_R8A7796 | 16 | select SYSC_R8A7796 if ARCH_R8A7796 |
| 17 | select SYSC_R8A77970 if ARCH_R8A77970 | ||
| 16 | select SYSC_R8A77995 if ARCH_R8A77995 | 18 | select SYSC_R8A77995 if ARCH_R8A77995 |
| 17 | 19 | ||
| 18 | if SOC_RENESAS | 20 | if SOC_RENESAS |
| @@ -54,6 +56,10 @@ config SYSC_R8A7796 | |||
| 54 | bool "R-Car M3-W System Controller support" if COMPILE_TEST | 56 | bool "R-Car M3-W System Controller support" if COMPILE_TEST |
| 55 | select SYSC_RCAR | 57 | select SYSC_RCAR |
| 56 | 58 | ||
| 59 | config SYSC_R8A77970 | ||
| 60 | bool "R-Car V3M System Controller support" if COMPILE_TEST | ||
| 61 | select SYSC_RCAR | ||
| 62 | |||
| 57 | config SYSC_R8A77995 | 63 | config SYSC_R8A77995 |
| 58 | bool "R-Car D3 System Controller support" if COMPILE_TEST | 64 | bool "R-Car D3 System Controller support" if COMPILE_TEST |
| 59 | select SYSC_RCAR | 65 | select SYSC_RCAR |
diff --git a/drivers/soc/renesas/Makefile b/drivers/soc/renesas/Makefile index 763c03d80436..845d62a08ce1 100644 --- a/drivers/soc/renesas/Makefile +++ b/drivers/soc/renesas/Makefile | |||
| @@ -12,6 +12,7 @@ obj-$(CONFIG_SYSC_R8A7792) += r8a7792-sysc.o | |||
| 12 | obj-$(CONFIG_SYSC_R8A7794) += r8a7794-sysc.o | 12 | obj-$(CONFIG_SYSC_R8A7794) += r8a7794-sysc.o |
| 13 | obj-$(CONFIG_SYSC_R8A7795) += r8a7795-sysc.o | 13 | obj-$(CONFIG_SYSC_R8A7795) += r8a7795-sysc.o |
| 14 | obj-$(CONFIG_SYSC_R8A7796) += r8a7796-sysc.o | 14 | obj-$(CONFIG_SYSC_R8A7796) += r8a7796-sysc.o |
| 15 | obj-$(CONFIG_SYSC_R8A77970) += r8a77970-sysc.o | ||
| 15 | obj-$(CONFIG_SYSC_R8A77995) += r8a77995-sysc.o | 16 | obj-$(CONFIG_SYSC_R8A77995) += r8a77995-sysc.o |
| 16 | 17 | ||
| 17 | # Family | 18 | # Family |
diff --git a/drivers/soc/renesas/r8a77970-sysc.c b/drivers/soc/renesas/r8a77970-sysc.c new file mode 100644 index 000000000000..8c614164718e --- /dev/null +++ b/drivers/soc/renesas/r8a77970-sysc.c | |||
| @@ -0,0 +1,39 @@ | |||
| 1 | /* | ||
| 2 | * Renesas R-Car V3M System Controller | ||
| 3 | * | ||
| 4 | * Copyright (C) 2017 Cogent Embedded Inc. | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify | ||
| 7 | * it under the terms of the GNU General Public License version 2 as | ||
| 8 | * published by the Free Software Foundation. | ||
| 9 | */ | ||
| 10 | |||
| 11 | #include <linux/bug.h> | ||
| 12 | #include <linux/kernel.h> | ||
| 13 | |||
| 14 | #include <dt-bindings/power/r8a77970-sysc.h> | ||
| 15 | |||
| 16 | #include "rcar-sysc.h" | ||
| 17 | |||
| 18 | static const struct rcar_sysc_area r8a77970_areas[] __initconst = { | ||
| 19 | { "always-on", 0, 0, R8A77970_PD_ALWAYS_ON, -1, PD_ALWAYS_ON }, | ||
| 20 | { "ca53-scu", 0x140, 0, R8A77970_PD_CA53_SCU, R8A77970_PD_ALWAYS_ON, | ||
| 21 | PD_SCU }, | ||
| 22 | { "ca53-cpu0", 0x200, 0, R8A77970_PD_CA53_CPU0, R8A77970_PD_CA53_SCU, | ||
| 23 | PD_CPU_NOCR }, | ||
| 24 | { "ca53-cpu1", 0x200, 1, R8A77970_PD_CA53_CPU1, R8A77970_PD_CA53_SCU, | ||
| 25 | PD_CPU_NOCR }, | ||
| 26 | { "cr7", 0x240, 0, R8A77970_PD_CR7, R8A77970_PD_ALWAYS_ON }, | ||
| 27 | { "a3ir", 0x180, 0, R8A77970_PD_A3IR, R8A77970_PD_ALWAYS_ON }, | ||
| 28 | { "a2ir0", 0x400, 0, R8A77970_PD_A2IR0, R8A77970_PD_ALWAYS_ON }, | ||
| 29 | { "a2ir1", 0x400, 1, R8A77970_PD_A2IR1, R8A77970_PD_A2IR0 }, | ||
| 30 | { "a2ir2", 0x400, 2, R8A77970_PD_A2IR2, R8A77970_PD_A2IR0 }, | ||
| 31 | { "a2ir3", 0x400, 3, R8A77970_PD_A2IR3, R8A77970_PD_A2IR0 }, | ||
| 32 | { "a2sc0", 0x400, 4, R8A77970_PD_A2SC0, R8A77970_PD_ALWAYS_ON }, | ||
| 33 | { "a2sc1", 0x400, 5, R8A77970_PD_A2SC1, R8A77970_PD_A2SC0 }, | ||
| 34 | }; | ||
| 35 | |||
| 36 | const struct rcar_sysc_info r8a77970_sysc_info __initconst = { | ||
| 37 | .areas = r8a77970_areas, | ||
| 38 | .num_areas = ARRAY_SIZE(r8a77970_areas), | ||
| 39 | }; | ||
diff --git a/drivers/soc/renesas/rcar-rst.c b/drivers/soc/renesas/rcar-rst.c index baa47014e96b..3316b028f231 100644 --- a/drivers/soc/renesas/rcar-rst.c +++ b/drivers/soc/renesas/rcar-rst.c | |||
| @@ -41,6 +41,7 @@ static const struct of_device_id rcar_rst_matches[] __initconst = { | |||
| 41 | /* R-Car Gen3 is handled like R-Car Gen2 */ | 41 | /* R-Car Gen3 is handled like R-Car Gen2 */ |
| 42 | { .compatible = "renesas,r8a7795-rst", .data = &rcar_rst_gen2 }, | 42 | { .compatible = "renesas,r8a7795-rst", .data = &rcar_rst_gen2 }, |
| 43 | { .compatible = "renesas,r8a7796-rst", .data = &rcar_rst_gen2 }, | 43 | { .compatible = "renesas,r8a7796-rst", .data = &rcar_rst_gen2 }, |
| 44 | { .compatible = "renesas,r8a77970-rst", .data = &rcar_rst_gen2 }, | ||
| 44 | { .compatible = "renesas,r8a77995-rst", .data = &rcar_rst_gen2 }, | 45 | { .compatible = "renesas,r8a77995-rst", .data = &rcar_rst_gen2 }, |
| 45 | { /* sentinel */ } | 46 | { /* sentinel */ } |
| 46 | }; | 47 | }; |
diff --git a/drivers/soc/renesas/rcar-sysc.c b/drivers/soc/renesas/rcar-sysc.c index c8406e81640f..55a47e509e49 100644 --- a/drivers/soc/renesas/rcar-sysc.c +++ b/drivers/soc/renesas/rcar-sysc.c | |||
| @@ -284,6 +284,9 @@ static const struct of_device_id rcar_sysc_matches[] = { | |||
| 284 | #ifdef CONFIG_SYSC_R8A7796 | 284 | #ifdef CONFIG_SYSC_R8A7796 |
| 285 | { .compatible = "renesas,r8a7796-sysc", .data = &r8a7796_sysc_info }, | 285 | { .compatible = "renesas,r8a7796-sysc", .data = &r8a7796_sysc_info }, |
| 286 | #endif | 286 | #endif |
| 287 | #ifdef CONFIG_SYSC_R8A77970 | ||
| 288 | { .compatible = "renesas,r8a77970-sysc", .data = &r8a77970_sysc_info }, | ||
| 289 | #endif | ||
| 287 | #ifdef CONFIG_SYSC_R8A77995 | 290 | #ifdef CONFIG_SYSC_R8A77995 |
| 288 | { .compatible = "renesas,r8a77995-sysc", .data = &r8a77995_sysc_info }, | 291 | { .compatible = "renesas,r8a77995-sysc", .data = &r8a77995_sysc_info }, |
| 289 | #endif | 292 | #endif |
diff --git a/drivers/soc/renesas/rcar-sysc.h b/drivers/soc/renesas/rcar-sysc.h index 2f524922c4d2..9d9daf9eb91b 100644 --- a/drivers/soc/renesas/rcar-sysc.h +++ b/drivers/soc/renesas/rcar-sysc.h | |||
| @@ -58,6 +58,7 @@ extern const struct rcar_sysc_info r8a7792_sysc_info; | |||
| 58 | extern const struct rcar_sysc_info r8a7794_sysc_info; | 58 | extern const struct rcar_sysc_info r8a7794_sysc_info; |
| 59 | extern const struct rcar_sysc_info r8a7795_sysc_info; | 59 | extern const struct rcar_sysc_info r8a7795_sysc_info; |
| 60 | extern const struct rcar_sysc_info r8a7796_sysc_info; | 60 | extern const struct rcar_sysc_info r8a7796_sysc_info; |
| 61 | extern const struct rcar_sysc_info r8a77970_sysc_info; | ||
| 61 | extern const struct rcar_sysc_info r8a77995_sysc_info; | 62 | extern const struct rcar_sysc_info r8a77995_sysc_info; |
| 62 | 63 | ||
| 63 | 64 | ||
diff --git a/drivers/soc/renesas/renesas-soc.c b/drivers/soc/renesas/renesas-soc.c index 90d6b7a4340a..9f4ee2567c72 100644 --- a/drivers/soc/renesas/renesas-soc.c +++ b/drivers/soc/renesas/renesas-soc.c | |||
| @@ -144,6 +144,11 @@ static const struct renesas_soc soc_rcar_m3_w __initconst __maybe_unused = { | |||
| 144 | .id = 0x52, | 144 | .id = 0x52, |
| 145 | }; | 145 | }; |
| 146 | 146 | ||
| 147 | static const struct renesas_soc soc_rcar_v3m __initconst __maybe_unused = { | ||
| 148 | .family = &fam_rcar_gen3, | ||
| 149 | .id = 0x54, | ||
| 150 | }; | ||
| 151 | |||
| 147 | static const struct renesas_soc soc_rcar_d3 __initconst __maybe_unused = { | 152 | static const struct renesas_soc soc_rcar_d3 __initconst __maybe_unused = { |
| 148 | .family = &fam_rcar_gen3, | 153 | .family = &fam_rcar_gen3, |
| 149 | .id = 0x58, | 154 | .id = 0x58, |
| @@ -204,6 +209,9 @@ static const struct of_device_id renesas_socs[] __initconst = { | |||
| 204 | #ifdef CONFIG_ARCH_R8A7796 | 209 | #ifdef CONFIG_ARCH_R8A7796 |
| 205 | { .compatible = "renesas,r8a7796", .data = &soc_rcar_m3_w }, | 210 | { .compatible = "renesas,r8a7796", .data = &soc_rcar_m3_w }, |
| 206 | #endif | 211 | #endif |
| 212 | #ifdef CONFIG_ARCH_R8A77970 | ||
| 213 | { .compatible = "renesas,r8a77970", .data = &soc_rcar_v3m }, | ||
| 214 | #endif | ||
| 207 | #ifdef CONFIG_ARCH_R8A77995 | 215 | #ifdef CONFIG_ARCH_R8A77995 |
| 208 | { .compatible = "renesas,r8a77995", .data = &soc_rcar_d3 }, | 216 | { .compatible = "renesas,r8a77995", .data = &soc_rcar_d3 }, |
| 209 | #endif | 217 | #endif |
diff --git a/drivers/soc/samsung/exynos-pmu.c b/drivers/soc/samsung/exynos-pmu.c index bd4a76f27bc2..938f8ccfcb74 100644 --- a/drivers/soc/samsung/exynos-pmu.c +++ b/drivers/soc/samsung/exynos-pmu.c | |||
| @@ -60,12 +60,6 @@ void exynos_sys_powerdown_conf(enum sys_powerdown mode) | |||
| 60 | 60 | ||
| 61 | if (pmu_data->powerdown_conf_extra) | 61 | if (pmu_data->powerdown_conf_extra) |
| 62 | pmu_data->powerdown_conf_extra(mode); | 62 | pmu_data->powerdown_conf_extra(mode); |
| 63 | |||
| 64 | if (pmu_data->pmu_config_extra) { | ||
| 65 | for (i = 0; pmu_data->pmu_config_extra[i].offset != PMU_TABLE_END; i++) | ||
| 66 | pmu_raw_writel(pmu_data->pmu_config_extra[i].val[mode], | ||
| 67 | pmu_data->pmu_config_extra[i].offset); | ||
| 68 | } | ||
| 69 | } | 63 | } |
| 70 | 64 | ||
| 71 | /* | 65 | /* |
| @@ -89,9 +83,6 @@ static const struct of_device_id exynos_pmu_of_device_ids[] = { | |||
| 89 | .compatible = "samsung,exynos4210-pmu", | 83 | .compatible = "samsung,exynos4210-pmu", |
| 90 | .data = exynos_pmu_data_arm_ptr(exynos4210_pmu_data), | 84 | .data = exynos_pmu_data_arm_ptr(exynos4210_pmu_data), |
| 91 | }, { | 85 | }, { |
| 92 | .compatible = "samsung,exynos4212-pmu", | ||
| 93 | .data = exynos_pmu_data_arm_ptr(exynos4212_pmu_data), | ||
| 94 | }, { | ||
| 95 | .compatible = "samsung,exynos4412-pmu", | 86 | .compatible = "samsung,exynos4412-pmu", |
| 96 | .data = exynos_pmu_data_arm_ptr(exynos4412_pmu_data), | 87 | .data = exynos_pmu_data_arm_ptr(exynos4412_pmu_data), |
| 97 | }, { | 88 | }, { |
diff --git a/drivers/soc/samsung/exynos-pmu.h b/drivers/soc/samsung/exynos-pmu.h index 40d4229abfb5..86b3f2f8966d 100644 --- a/drivers/soc/samsung/exynos-pmu.h +++ b/drivers/soc/samsung/exynos-pmu.h | |||
| @@ -23,7 +23,6 @@ struct exynos_pmu_conf { | |||
| 23 | 23 | ||
| 24 | struct exynos_pmu_data { | 24 | struct exynos_pmu_data { |
| 25 | const struct exynos_pmu_conf *pmu_config; | 25 | const struct exynos_pmu_conf *pmu_config; |
| 26 | const struct exynos_pmu_conf *pmu_config_extra; | ||
| 27 | 26 | ||
| 28 | void (*pmu_init)(void); | 27 | void (*pmu_init)(void); |
| 29 | void (*powerdown_conf)(enum sys_powerdown); | 28 | void (*powerdown_conf)(enum sys_powerdown); |
| @@ -36,7 +35,6 @@ extern void __iomem *pmu_base_addr; | |||
| 36 | /* list of all exported SoC specific data */ | 35 | /* list of all exported SoC specific data */ |
| 37 | extern const struct exynos_pmu_data exynos3250_pmu_data; | 36 | extern const struct exynos_pmu_data exynos3250_pmu_data; |
| 38 | extern const struct exynos_pmu_data exynos4210_pmu_data; | 37 | extern const struct exynos_pmu_data exynos4210_pmu_data; |
| 39 | extern const struct exynos_pmu_data exynos4212_pmu_data; | ||
| 40 | extern const struct exynos_pmu_data exynos4412_pmu_data; | 38 | extern const struct exynos_pmu_data exynos4412_pmu_data; |
| 41 | extern const struct exynos_pmu_data exynos5250_pmu_data; | 39 | extern const struct exynos_pmu_data exynos5250_pmu_data; |
| 42 | extern const struct exynos_pmu_data exynos5420_pmu_data; | 40 | extern const struct exynos_pmu_data exynos5420_pmu_data; |
diff --git a/drivers/soc/samsung/exynos4-pmu.c b/drivers/soc/samsung/exynos4-pmu.c index bc4fa73bed11..5dbfe4e31f4c 100644 --- a/drivers/soc/samsung/exynos4-pmu.c +++ b/drivers/soc/samsung/exynos4-pmu.c | |||
| @@ -90,7 +90,7 @@ static const struct exynos_pmu_conf exynos4210_pmu_config[] = { | |||
| 90 | { PMU_TABLE_END,}, | 90 | { PMU_TABLE_END,}, |
| 91 | }; | 91 | }; |
| 92 | 92 | ||
| 93 | static const struct exynos_pmu_conf exynos4x12_pmu_config[] = { | 93 | static const struct exynos_pmu_conf exynos4412_pmu_config[] = { |
| 94 | { S5P_ARM_CORE0_LOWPWR, { 0x0, 0x0, 0x2 } }, | 94 | { S5P_ARM_CORE0_LOWPWR, { 0x0, 0x0, 0x2 } }, |
| 95 | { S5P_DIS_IRQ_CORE0, { 0x0, 0x0, 0x0 } }, | 95 | { S5P_DIS_IRQ_CORE0, { 0x0, 0x0, 0x0 } }, |
| 96 | { S5P_DIS_IRQ_CENTRAL0, { 0x0, 0x0, 0x0 } }, | 96 | { S5P_DIS_IRQ_CENTRAL0, { 0x0, 0x0, 0x0 } }, |
| @@ -195,10 +195,6 @@ static const struct exynos_pmu_conf exynos4x12_pmu_config[] = { | |||
| 195 | { S5P_GPS_ALIVE_LOWPWR, { 0x7, 0x0, 0x0 } }, | 195 | { S5P_GPS_ALIVE_LOWPWR, { 0x7, 0x0, 0x0 } }, |
| 196 | { S5P_CMU_SYSCLK_ISP_LOWPWR, { 0x1, 0x0, 0x0 } }, | 196 | { S5P_CMU_SYSCLK_ISP_LOWPWR, { 0x1, 0x0, 0x0 } }, |
| 197 | { S5P_CMU_SYSCLK_GPS_LOWPWR, { 0x1, 0x0, 0x0 } }, | 197 | { S5P_CMU_SYSCLK_GPS_LOWPWR, { 0x1, 0x0, 0x0 } }, |
| 198 | { PMU_TABLE_END,}, | ||
| 199 | }; | ||
| 200 | |||
| 201 | static const struct exynos_pmu_conf exynos4412_pmu_config[] = { | ||
| 202 | { S5P_ARM_CORE2_LOWPWR, { 0x0, 0x0, 0x2 } }, | 198 | { S5P_ARM_CORE2_LOWPWR, { 0x0, 0x0, 0x2 } }, |
| 203 | { S5P_DIS_IRQ_CORE2, { 0x0, 0x0, 0x0 } }, | 199 | { S5P_DIS_IRQ_CORE2, { 0x0, 0x0, 0x0 } }, |
| 204 | { S5P_DIS_IRQ_CENTRAL2, { 0x0, 0x0, 0x0 } }, | 200 | { S5P_DIS_IRQ_CENTRAL2, { 0x0, 0x0, 0x0 } }, |
| @@ -212,11 +208,6 @@ const struct exynos_pmu_data exynos4210_pmu_data = { | |||
| 212 | .pmu_config = exynos4210_pmu_config, | 208 | .pmu_config = exynos4210_pmu_config, |
| 213 | }; | 209 | }; |
| 214 | 210 | ||
| 215 | const struct exynos_pmu_data exynos4212_pmu_data = { | ||
| 216 | .pmu_config = exynos4x12_pmu_config, | ||
| 217 | }; | ||
| 218 | |||
| 219 | const struct exynos_pmu_data exynos4412_pmu_data = { | 211 | const struct exynos_pmu_data exynos4412_pmu_data = { |
| 220 | .pmu_config = exynos4x12_pmu_config, | 212 | .pmu_config = exynos4412_pmu_config, |
| 221 | .pmu_config_extra = exynos4412_pmu_config, | ||
| 222 | }; | 213 | }; |
diff --git a/drivers/soc/tegra/powergate-bpmp.c b/drivers/soc/tegra/powergate-bpmp.c index 8fc356039401..82c7e27cd8bb 100644 --- a/drivers/soc/tegra/powergate-bpmp.c +++ b/drivers/soc/tegra/powergate-bpmp.c | |||
| @@ -42,6 +42,7 @@ static int tegra_bpmp_powergate_set_state(struct tegra_bpmp *bpmp, | |||
| 42 | { | 42 | { |
| 43 | struct mrq_pg_request request; | 43 | struct mrq_pg_request request; |
| 44 | struct tegra_bpmp_message msg; | 44 | struct tegra_bpmp_message msg; |
| 45 | int err; | ||
| 45 | 46 | ||
| 46 | memset(&request, 0, sizeof(request)); | 47 | memset(&request, 0, sizeof(request)); |
| 47 | request.cmd = CMD_PG_SET_STATE; | 48 | request.cmd = CMD_PG_SET_STATE; |
| @@ -53,7 +54,13 @@ static int tegra_bpmp_powergate_set_state(struct tegra_bpmp *bpmp, | |||
| 53 | msg.tx.data = &request; | 54 | msg.tx.data = &request; |
| 54 | msg.tx.size = sizeof(request); | 55 | msg.tx.size = sizeof(request); |
| 55 | 56 | ||
| 56 | return tegra_bpmp_transfer(bpmp, &msg); | 57 | err = tegra_bpmp_transfer(bpmp, &msg); |
| 58 | if (err < 0) | ||
| 59 | return err; | ||
| 60 | else if (msg.rx.ret < 0) | ||
| 61 | return -EINVAL; | ||
| 62 | |||
| 63 | return 0; | ||
| 57 | } | 64 | } |
| 58 | 65 | ||
| 59 | static int tegra_bpmp_powergate_get_state(struct tegra_bpmp *bpmp, | 66 | static int tegra_bpmp_powergate_get_state(struct tegra_bpmp *bpmp, |
| @@ -80,6 +87,8 @@ static int tegra_bpmp_powergate_get_state(struct tegra_bpmp *bpmp, | |||
| 80 | err = tegra_bpmp_transfer(bpmp, &msg); | 87 | err = tegra_bpmp_transfer(bpmp, &msg); |
| 81 | if (err < 0) | 88 | if (err < 0) |
| 82 | return PG_STATE_OFF; | 89 | return PG_STATE_OFF; |
| 90 | else if (msg.rx.ret < 0) | ||
| 91 | return -EINVAL; | ||
| 83 | 92 | ||
| 84 | return response.get_state.state; | 93 | return response.get_state.state; |
| 85 | } | 94 | } |
| @@ -106,6 +115,8 @@ static int tegra_bpmp_powergate_get_max_id(struct tegra_bpmp *bpmp) | |||
| 106 | err = tegra_bpmp_transfer(bpmp, &msg); | 115 | err = tegra_bpmp_transfer(bpmp, &msg); |
| 107 | if (err < 0) | 116 | if (err < 0) |
| 108 | return err; | 117 | return err; |
| 118 | else if (msg.rx.ret < 0) | ||
| 119 | return -EINVAL; | ||
| 109 | 120 | ||
| 110 | return response.get_max_id.max_id; | 121 | return response.get_max_id.max_id; |
| 111 | } | 122 | } |
| @@ -132,7 +143,7 @@ static char *tegra_bpmp_powergate_get_name(struct tegra_bpmp *bpmp, | |||
| 132 | msg.rx.size = sizeof(response); | 143 | msg.rx.size = sizeof(response); |
| 133 | 144 | ||
| 134 | err = tegra_bpmp_transfer(bpmp, &msg); | 145 | err = tegra_bpmp_transfer(bpmp, &msg); |
| 135 | if (err < 0) | 146 | if (err < 0 || msg.rx.ret < 0) |
| 136 | return NULL; | 147 | return NULL; |
| 137 | 148 | ||
| 138 | return kstrdup(response.get_name.name, GFP_KERNEL); | 149 | return kstrdup(response.get_name.name, GFP_KERNEL); |
diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile index 195cd08fbc30..610344eb3e03 100644 --- a/drivers/thermal/Makefile +++ b/drivers/thermal/Makefile | |||
| @@ -55,7 +55,7 @@ obj-$(CONFIG_INTEL_BXT_PMIC_THERMAL) += intel_bxt_pmic_thermal.o | |||
| 55 | obj-$(CONFIG_INTEL_PCH_THERMAL) += intel_pch_thermal.o | 55 | obj-$(CONFIG_INTEL_PCH_THERMAL) += intel_pch_thermal.o |
| 56 | obj-$(CONFIG_ST_THERMAL) += st/ | 56 | obj-$(CONFIG_ST_THERMAL) += st/ |
| 57 | obj-$(CONFIG_QCOM_TSENS) += qcom/ | 57 | obj-$(CONFIG_QCOM_TSENS) += qcom/ |
| 58 | obj-$(CONFIG_TEGRA_SOCTHERM) += tegra/ | 58 | obj-y += tegra/ |
| 59 | obj-$(CONFIG_HISI_THERMAL) += hisi_thermal.o | 59 | obj-$(CONFIG_HISI_THERMAL) += hisi_thermal.o |
| 60 | obj-$(CONFIG_MTK_THERMAL) += mtk_thermal.o | 60 | obj-$(CONFIG_MTK_THERMAL) += mtk_thermal.o |
| 61 | obj-$(CONFIG_GENERIC_ADC_THERMAL) += thermal-generic-adc.o | 61 | obj-$(CONFIG_GENERIC_ADC_THERMAL) += thermal-generic-adc.o |
diff --git a/drivers/thermal/tegra/Kconfig b/drivers/thermal/tegra/Kconfig index cec586ec7e4b..f8740f7852e3 100644 --- a/drivers/thermal/tegra/Kconfig +++ b/drivers/thermal/tegra/Kconfig | |||
| @@ -10,4 +10,11 @@ config TEGRA_SOCTHERM | |||
| 10 | zones to manage temperatures. This option is also required for the | 10 | zones to manage temperatures. This option is also required for the |
| 11 | emergency thermal reset (thermtrip) feature to function. | 11 | emergency thermal reset (thermtrip) feature to function. |
| 12 | 12 | ||
| 13 | config TEGRA_BPMP_THERMAL | ||
| 14 | tristate "Tegra BPMP thermal sensing" | ||
| 15 | depends on TEGRA_BPMP || COMPILE_TEST | ||
| 16 | help | ||
| 17 | Enable this option for support for sensing system temperature of NVIDIA | ||
| 18 | Tegra systems-on-chip with the BPMP coprocessor (Tegra186). | ||
| 19 | |||
| 13 | endmenu | 20 | endmenu |
diff --git a/drivers/thermal/tegra/Makefile b/drivers/thermal/tegra/Makefile index 8a3f221f17c1..0f2b66edf0d2 100644 --- a/drivers/thermal/tegra/Makefile +++ b/drivers/thermal/tegra/Makefile | |||
| @@ -1,5 +1,6 @@ | |||
| 1 | # SPDX-License-Identifier: GPL-2.0 | 1 | # SPDX-License-Identifier: GPL-2.0 |
| 2 | obj-$(CONFIG_TEGRA_SOCTHERM) += tegra-soctherm.o | 2 | obj-$(CONFIG_TEGRA_SOCTHERM) += tegra-soctherm.o |
| 3 | obj-$(CONFIG_TEGRA_BPMP_THERMAL) += tegra-bpmp-thermal.o | ||
| 3 | 4 | ||
| 4 | tegra-soctherm-y := soctherm.o soctherm-fuse.o | 5 | tegra-soctherm-y := soctherm.o soctherm-fuse.o |
| 5 | tegra-soctherm-$(CONFIG_ARCH_TEGRA_124_SOC) += tegra124-soctherm.o | 6 | tegra-soctherm-$(CONFIG_ARCH_TEGRA_124_SOC) += tegra124-soctherm.o |
diff --git a/drivers/thermal/tegra/tegra-bpmp-thermal.c b/drivers/thermal/tegra/tegra-bpmp-thermal.c new file mode 100644 index 000000000000..b0980dbca3b3 --- /dev/null +++ b/drivers/thermal/tegra/tegra-bpmp-thermal.c | |||
| @@ -0,0 +1,263 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (c) 2015-2017, NVIDIA CORPORATION. All rights reserved. | ||
| 3 | * | ||
| 4 | * Author: | ||
| 5 | * Mikko Perttunen <mperttunen@nvidia.com> | ||
| 6 | * Aapo Vienamo <avienamo@nvidia.com> | ||
| 7 | * | ||
| 8 | * This software is licensed under the terms of the GNU General Public | ||
| 9 | * License version 2, as published by the Free Software Foundation, and | ||
| 10 | * may be copied, distributed, and modified under those terms. | ||
| 11 | * | ||
| 12 | * This program is distributed in the hope that it will be useful, | ||
| 13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 15 | * GNU General Public License for more details. | ||
| 16 | * | ||
| 17 | */ | ||
| 18 | |||
| 19 | #include <linux/err.h> | ||
| 20 | #include <linux/module.h> | ||
| 21 | #include <linux/platform_device.h> | ||
| 22 | #include <linux/thermal.h> | ||
| 23 | #include <linux/workqueue.h> | ||
| 24 | |||
| 25 | #include <soc/tegra/bpmp.h> | ||
| 26 | #include <soc/tegra/bpmp-abi.h> | ||
| 27 | |||
| 28 | struct tegra_bpmp_thermal_zone { | ||
| 29 | struct tegra_bpmp_thermal *tegra; | ||
| 30 | struct thermal_zone_device *tzd; | ||
| 31 | struct work_struct tz_device_update_work; | ||
| 32 | unsigned int idx; | ||
| 33 | }; | ||
| 34 | |||
| 35 | struct tegra_bpmp_thermal { | ||
| 36 | struct device *dev; | ||
| 37 | struct tegra_bpmp *bpmp; | ||
| 38 | unsigned int num_zones; | ||
| 39 | struct tegra_bpmp_thermal_zone **zones; | ||
| 40 | }; | ||
| 41 | |||
| 42 | static int tegra_bpmp_thermal_get_temp(void *data, int *out_temp) | ||
| 43 | { | ||
| 44 | struct tegra_bpmp_thermal_zone *zone = data; | ||
| 45 | struct mrq_thermal_host_to_bpmp_request req; | ||
| 46 | union mrq_thermal_bpmp_to_host_response reply; | ||
| 47 | struct tegra_bpmp_message msg; | ||
| 48 | int err; | ||
| 49 | |||
| 50 | memset(&req, 0, sizeof(req)); | ||
| 51 | req.type = CMD_THERMAL_GET_TEMP; | ||
| 52 | req.get_temp.zone = zone->idx; | ||
| 53 | |||
| 54 | memset(&msg, 0, sizeof(msg)); | ||
| 55 | msg.mrq = MRQ_THERMAL; | ||
| 56 | msg.tx.data = &req; | ||
| 57 | msg.tx.size = sizeof(req); | ||
| 58 | msg.rx.data = &reply; | ||
| 59 | msg.rx.size = sizeof(reply); | ||
| 60 | |||
| 61 | err = tegra_bpmp_transfer(zone->tegra->bpmp, &msg); | ||
| 62 | if (err) | ||
| 63 | return err; | ||
| 64 | |||
| 65 | *out_temp = reply.get_temp.temp; | ||
| 66 | |||
| 67 | return 0; | ||
| 68 | } | ||
| 69 | |||
| 70 | static int tegra_bpmp_thermal_set_trips(void *data, int low, int high) | ||
| 71 | { | ||
| 72 | struct tegra_bpmp_thermal_zone *zone = data; | ||
| 73 | struct mrq_thermal_host_to_bpmp_request req; | ||
| 74 | struct tegra_bpmp_message msg; | ||
| 75 | |||
| 76 | memset(&req, 0, sizeof(req)); | ||
| 77 | req.type = CMD_THERMAL_SET_TRIP; | ||
| 78 | req.set_trip.zone = zone->idx; | ||
| 79 | req.set_trip.enabled = true; | ||
| 80 | req.set_trip.low = low; | ||
| 81 | req.set_trip.high = high; | ||
| 82 | |||
| 83 | memset(&msg, 0, sizeof(msg)); | ||
| 84 | msg.mrq = MRQ_THERMAL; | ||
| 85 | msg.tx.data = &req; | ||
| 86 | msg.tx.size = sizeof(req); | ||
| 87 | |||
| 88 | return tegra_bpmp_transfer(zone->tegra->bpmp, &msg); | ||
| 89 | } | ||
| 90 | |||
| 91 | static void tz_device_update_work_fn(struct work_struct *work) | ||
| 92 | { | ||
| 93 | struct tegra_bpmp_thermal_zone *zone; | ||
| 94 | |||
| 95 | zone = container_of(work, struct tegra_bpmp_thermal_zone, | ||
| 96 | tz_device_update_work); | ||
| 97 | |||
| 98 | thermal_zone_device_update(zone->tzd, THERMAL_TRIP_VIOLATED); | ||
| 99 | } | ||
| 100 | |||
| 101 | static void bpmp_mrq_thermal(unsigned int mrq, struct tegra_bpmp_channel *ch, | ||
| 102 | void *data) | ||
| 103 | { | ||
| 104 | struct mrq_thermal_bpmp_to_host_request *req; | ||
| 105 | struct tegra_bpmp_thermal *tegra = data; | ||
| 106 | int i; | ||
| 107 | |||
| 108 | req = (struct mrq_thermal_bpmp_to_host_request *)ch->ib->data; | ||
| 109 | |||
| 110 | if (req->type != CMD_THERMAL_HOST_TRIP_REACHED) { | ||
| 111 | dev_err(tegra->dev, "%s: invalid request type: %d\n", | ||
| 112 | __func__, req->type); | ||
| 113 | tegra_bpmp_mrq_return(ch, -EINVAL, NULL, 0); | ||
| 114 | return; | ||
| 115 | } | ||
| 116 | |||
| 117 | for (i = 0; i < tegra->num_zones; ++i) { | ||
| 118 | if (tegra->zones[i]->idx != req->host_trip_reached.zone) | ||
| 119 | continue; | ||
| 120 | |||
| 121 | schedule_work(&tegra->zones[i]->tz_device_update_work); | ||
| 122 | tegra_bpmp_mrq_return(ch, 0, NULL, 0); | ||
| 123 | return; | ||
| 124 | } | ||
| 125 | |||
| 126 | dev_err(tegra->dev, "%s: invalid thermal zone: %d\n", __func__, | ||
| 127 | req->host_trip_reached.zone); | ||
| 128 | tegra_bpmp_mrq_return(ch, -EINVAL, NULL, 0); | ||
| 129 | } | ||
| 130 | |||
| 131 | static int tegra_bpmp_thermal_get_num_zones(struct tegra_bpmp *bpmp, | ||
| 132 | int *num_zones) | ||
| 133 | { | ||
| 134 | struct mrq_thermal_host_to_bpmp_request req; | ||
| 135 | union mrq_thermal_bpmp_to_host_response reply; | ||
| 136 | struct tegra_bpmp_message msg; | ||
| 137 | int err; | ||
| 138 | |||
| 139 | memset(&req, 0, sizeof(req)); | ||
| 140 | req.type = CMD_THERMAL_GET_NUM_ZONES; | ||
| 141 | |||
| 142 | memset(&msg, 0, sizeof(msg)); | ||
| 143 | msg.mrq = MRQ_THERMAL; | ||
| 144 | msg.tx.data = &req; | ||
| 145 | msg.tx.size = sizeof(req); | ||
| 146 | msg.rx.data = &reply; | ||
| 147 | msg.rx.size = sizeof(reply); | ||
| 148 | |||
| 149 | err = tegra_bpmp_transfer(bpmp, &msg); | ||
| 150 | if (err) | ||
| 151 | return err; | ||
| 152 | |||
| 153 | *num_zones = reply.get_num_zones.num; | ||
| 154 | |||
| 155 | return 0; | ||
| 156 | } | ||
| 157 | |||
| 158 | static const struct thermal_zone_of_device_ops tegra_bpmp_of_thermal_ops = { | ||
| 159 | .get_temp = tegra_bpmp_thermal_get_temp, | ||
| 160 | .set_trips = tegra_bpmp_thermal_set_trips, | ||
| 161 | }; | ||
| 162 | |||
| 163 | static int tegra_bpmp_thermal_probe(struct platform_device *pdev) | ||
| 164 | { | ||
| 165 | struct tegra_bpmp *bpmp = dev_get_drvdata(pdev->dev.parent); | ||
| 166 | struct tegra_bpmp_thermal *tegra; | ||
| 167 | struct thermal_zone_device *tzd; | ||
| 168 | unsigned int i, max_num_zones; | ||
| 169 | int err; | ||
| 170 | |||
| 171 | tegra = devm_kzalloc(&pdev->dev, sizeof(*tegra), GFP_KERNEL); | ||
| 172 | if (!tegra) | ||
| 173 | return -ENOMEM; | ||
| 174 | |||
| 175 | tegra->dev = &pdev->dev; | ||
| 176 | tegra->bpmp = bpmp; | ||
| 177 | |||
| 178 | err = tegra_bpmp_thermal_get_num_zones(bpmp, &max_num_zones); | ||
| 179 | if (err) { | ||
| 180 | dev_err(&pdev->dev, "failed to get the number of zones: %d\n", | ||
| 181 | err); | ||
| 182 | return err; | ||
| 183 | } | ||
| 184 | |||
| 185 | tegra->zones = devm_kcalloc(&pdev->dev, max_num_zones, | ||
| 186 | sizeof(*tegra->zones), GFP_KERNEL); | ||
| 187 | if (!tegra->zones) | ||
| 188 | return -ENOMEM; | ||
| 189 | |||
| 190 | for (i = 0; i < max_num_zones; ++i) { | ||
| 191 | struct tegra_bpmp_thermal_zone *zone; | ||
| 192 | int temp; | ||
| 193 | |||
| 194 | zone = devm_kzalloc(&pdev->dev, sizeof(*zone), GFP_KERNEL); | ||
| 195 | if (!zone) | ||
| 196 | return -ENOMEM; | ||
| 197 | |||
| 198 | zone->idx = i; | ||
| 199 | zone->tegra = tegra; | ||
| 200 | |||
| 201 | err = tegra_bpmp_thermal_get_temp(zone, &temp); | ||
| 202 | if (err < 0) { | ||
| 203 | devm_kfree(&pdev->dev, zone); | ||
| 204 | continue; | ||
| 205 | } | ||
| 206 | |||
| 207 | tzd = devm_thermal_zone_of_sensor_register( | ||
| 208 | &pdev->dev, i, zone, &tegra_bpmp_of_thermal_ops); | ||
| 209 | if (IS_ERR(tzd)) { | ||
| 210 | if (PTR_ERR(tzd) == -EPROBE_DEFER) | ||
| 211 | return -EPROBE_DEFER; | ||
| 212 | devm_kfree(&pdev->dev, zone); | ||
| 213 | continue; | ||
| 214 | } | ||
| 215 | |||
| 216 | zone->tzd = tzd; | ||
| 217 | INIT_WORK(&zone->tz_device_update_work, | ||
| 218 | tz_device_update_work_fn); | ||
| 219 | |||
| 220 | tegra->zones[tegra->num_zones++] = zone; | ||
| 221 | } | ||
| 222 | |||
| 223 | err = tegra_bpmp_request_mrq(bpmp, MRQ_THERMAL, bpmp_mrq_thermal, | ||
| 224 | tegra); | ||
| 225 | if (err) { | ||
| 226 | dev_err(&pdev->dev, "failed to register mrq handler: %d\n", | ||
| 227 | err); | ||
| 228 | return err; | ||
| 229 | } | ||
| 230 | |||
| 231 | platform_set_drvdata(pdev, tegra); | ||
| 232 | |||
| 233 | return 0; | ||
| 234 | } | ||
| 235 | |||
| 236 | static int tegra_bpmp_thermal_remove(struct platform_device *pdev) | ||
| 237 | { | ||
| 238 | struct tegra_bpmp_thermal *tegra = platform_get_drvdata(pdev); | ||
| 239 | |||
| 240 | tegra_bpmp_free_mrq(tegra->bpmp, MRQ_THERMAL, tegra); | ||
| 241 | |||
| 242 | return 0; | ||
| 243 | } | ||
| 244 | |||
| 245 | static const struct of_device_id tegra_bpmp_thermal_of_match[] = { | ||
| 246 | { .compatible = "nvidia,tegra186-bpmp-thermal" }, | ||
| 247 | { }, | ||
| 248 | }; | ||
| 249 | MODULE_DEVICE_TABLE(of, tegra_bpmp_thermal_of_match); | ||
| 250 | |||
| 251 | static struct platform_driver tegra_bpmp_thermal_driver = { | ||
| 252 | .probe = tegra_bpmp_thermal_probe, | ||
| 253 | .remove = tegra_bpmp_thermal_remove, | ||
| 254 | .driver = { | ||
| 255 | .name = "tegra-bpmp-thermal", | ||
| 256 | .of_match_table = tegra_bpmp_thermal_of_match, | ||
| 257 | }, | ||
| 258 | }; | ||
| 259 | module_platform_driver(tegra_bpmp_thermal_driver); | ||
| 260 | |||
| 261 | MODULE_AUTHOR("Mikko Perttunen <mperttunen@nvidia.com>"); | ||
| 262 | MODULE_DESCRIPTION("NVIDIA Tegra BPMP thermal sensor driver"); | ||
| 263 | MODULE_LICENSE("GPL v2"); | ||
