diff options
Diffstat (limited to 'drivers')
29 files changed, 1084 insertions, 477 deletions
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index 3188da3df8da..adceafda9c17 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c | |||
@@ -182,41 +182,66 @@ EXPORT_SYMBOL(acpi_bus_get_private_data); | |||
182 | Power Management | 182 | Power Management |
183 | -------------------------------------------------------------------------- */ | 183 | -------------------------------------------------------------------------- */ |
184 | 184 | ||
185 | static const char *state_string(int state) | ||
186 | { | ||
187 | switch (state) { | ||
188 | case ACPI_STATE_D0: | ||
189 | return "D0"; | ||
190 | case ACPI_STATE_D1: | ||
191 | return "D1"; | ||
192 | case ACPI_STATE_D2: | ||
193 | return "D2"; | ||
194 | case ACPI_STATE_D3_HOT: | ||
195 | return "D3hot"; | ||
196 | case ACPI_STATE_D3_COLD: | ||
197 | return "D3"; | ||
198 | default: | ||
199 | return "(unknown)"; | ||
200 | } | ||
201 | } | ||
202 | |||
185 | static int __acpi_bus_get_power(struct acpi_device *device, int *state) | 203 | static int __acpi_bus_get_power(struct acpi_device *device, int *state) |
186 | { | 204 | { |
187 | int result = 0; | 205 | int result = ACPI_STATE_UNKNOWN; |
188 | acpi_status status = 0; | ||
189 | unsigned long long psc = 0; | ||
190 | 206 | ||
191 | if (!device || !state) | 207 | if (!device || !state) |
192 | return -EINVAL; | 208 | return -EINVAL; |
193 | 209 | ||
194 | *state = ACPI_STATE_UNKNOWN; | 210 | if (!device->flags.power_manageable) { |
195 | |||
196 | if (device->flags.power_manageable) { | ||
197 | /* | ||
198 | * Get the device's power state either directly (via _PSC) or | ||
199 | * indirectly (via power resources). | ||
200 | */ | ||
201 | if (device->power.flags.power_resources) { | ||
202 | result = acpi_power_get_inferred_state(device, state); | ||
203 | if (result) | ||
204 | return result; | ||
205 | } else if (device->power.flags.explicit_get) { | ||
206 | status = acpi_evaluate_integer(device->handle, "_PSC", | ||
207 | NULL, &psc); | ||
208 | if (ACPI_FAILURE(status)) | ||
209 | return -ENODEV; | ||
210 | *state = (int)psc; | ||
211 | } | ||
212 | } else { | ||
213 | /* TBD: Non-recursive algorithm for walking up hierarchy. */ | 211 | /* TBD: Non-recursive algorithm for walking up hierarchy. */ |
214 | *state = device->parent ? | 212 | *state = device->parent ? |
215 | device->parent->power.state : ACPI_STATE_D0; | 213 | device->parent->power.state : ACPI_STATE_D0; |
214 | goto out; | ||
215 | } | ||
216 | |||
217 | /* | ||
218 | * Get the device's power state either directly (via _PSC) or | ||
219 | * indirectly (via power resources). | ||
220 | */ | ||
221 | if (device->power.flags.explicit_get) { | ||
222 | unsigned long long psc; | ||
223 | acpi_status status = acpi_evaluate_integer(device->handle, | ||
224 | "_PSC", NULL, &psc); | ||
225 | if (ACPI_FAILURE(status)) | ||
226 | return -ENODEV; | ||
227 | |||
228 | result = psc; | ||
229 | } | ||
230 | /* The test below covers ACPI_STATE_UNKNOWN too. */ | ||
231 | if (result <= ACPI_STATE_D2) { | ||
232 | ; /* Do nothing. */ | ||
233 | } else if (device->power.flags.power_resources) { | ||
234 | int error = acpi_power_get_inferred_state(device, &result); | ||
235 | if (error) | ||
236 | return error; | ||
237 | } else if (result == ACPI_STATE_D3_HOT) { | ||
238 | result = ACPI_STATE_D3; | ||
216 | } | 239 | } |
240 | *state = result; | ||
217 | 241 | ||
218 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] power state is D%d\n", | 242 | out: |
219 | device->pnp.bus_id, *state)); | 243 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] power state is %s\n", |
244 | device->pnp.bus_id, state_string(*state))); | ||
220 | 245 | ||
221 | return 0; | 246 | return 0; |
222 | } | 247 | } |
@@ -234,13 +259,14 @@ static int __acpi_bus_set_power(struct acpi_device *device, int state) | |||
234 | /* Make sure this is a valid target state */ | 259 | /* Make sure this is a valid target state */ |
235 | 260 | ||
236 | if (state == device->power.state) { | 261 | if (state == device->power.state) { |
237 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device is already at D%d\n", | 262 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device is already at %s\n", |
238 | state)); | 263 | state_string(state))); |
239 | return 0; | 264 | return 0; |
240 | } | 265 | } |
241 | 266 | ||
242 | if (!device->power.states[state].flags.valid) { | 267 | if (!device->power.states[state].flags.valid) { |
243 | printk(KERN_WARNING PREFIX "Device does not support D%d\n", state); | 268 | printk(KERN_WARNING PREFIX "Device does not support %s\n", |
269 | state_string(state)); | ||
244 | return -ENODEV; | 270 | return -ENODEV; |
245 | } | 271 | } |
246 | if (device->parent && (state < device->parent->power.state)) { | 272 | if (device->parent && (state < device->parent->power.state)) { |
@@ -294,13 +320,13 @@ static int __acpi_bus_set_power(struct acpi_device *device, int state) | |||
294 | end: | 320 | end: |
295 | if (result) | 321 | if (result) |
296 | printk(KERN_WARNING PREFIX | 322 | printk(KERN_WARNING PREFIX |
297 | "Device [%s] failed to transition to D%d\n", | 323 | "Device [%s] failed to transition to %s\n", |
298 | device->pnp.bus_id, state); | 324 | device->pnp.bus_id, state_string(state)); |
299 | else { | 325 | else { |
300 | device->power.state = state; | 326 | device->power.state = state; |
301 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, | 327 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, |
302 | "Device [%s] transitioned to D%d\n", | 328 | "Device [%s] transitioned to %s\n", |
303 | device->pnp.bus_id, state)); | 329 | device->pnp.bus_id, state_string(state))); |
304 | } | 330 | } |
305 | 331 | ||
306 | return result; | 332 | return result; |
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c index 0500f719f63e..dd6d6a3c6780 100644 --- a/drivers/acpi/power.c +++ b/drivers/acpi/power.c | |||
@@ -631,7 +631,7 @@ int acpi_power_get_inferred_state(struct acpi_device *device, int *state) | |||
631 | * We know a device's inferred power state when all the resources | 631 | * We know a device's inferred power state when all the resources |
632 | * required for a given D-state are 'on'. | 632 | * required for a given D-state are 'on'. |
633 | */ | 633 | */ |
634 | for (i = ACPI_STATE_D0; i < ACPI_STATE_D3_HOT; i++) { | 634 | for (i = ACPI_STATE_D0; i <= ACPI_STATE_D3_HOT; i++) { |
635 | list = &device->power.states[i].resources; | 635 | list = &device->power.states[i].resources; |
636 | if (list->count < 1) | 636 | if (list->count < 1) |
637 | continue; | 637 | continue; |
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index 85cbfdccc97c..c8a1f3b68110 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c | |||
@@ -1567,6 +1567,7 @@ static int acpi_bus_scan_fixed(void) | |||
1567 | ACPI_BUS_TYPE_POWER_BUTTON, | 1567 | ACPI_BUS_TYPE_POWER_BUTTON, |
1568 | ACPI_STA_DEFAULT, | 1568 | ACPI_STA_DEFAULT, |
1569 | &ops); | 1569 | &ops); |
1570 | device_init_wakeup(&device->dev, true); | ||
1570 | } | 1571 | } |
1571 | 1572 | ||
1572 | if ((acpi_gbl_FADT.flags & ACPI_FADT_SLEEP_BUTTON) == 0) { | 1573 | if ((acpi_gbl_FADT.flags & ACPI_FADT_SLEEP_BUTTON) == 0) { |
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index 74ee4ab577b6..88561029cca8 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c | |||
@@ -57,6 +57,7 @@ MODULE_PARM_DESC(gts, "Enable evaluation of _GTS on suspend."); | |||
57 | MODULE_PARM_DESC(bfs, "Enable evaluation of _BFS on resume".); | 57 | MODULE_PARM_DESC(bfs, "Enable evaluation of _BFS on resume".); |
58 | 58 | ||
59 | static u8 sleep_states[ACPI_S_STATE_COUNT]; | 59 | static u8 sleep_states[ACPI_S_STATE_COUNT]; |
60 | static bool pwr_btn_event_pending; | ||
60 | 61 | ||
61 | static void acpi_sleep_tts_switch(u32 acpi_state) | 62 | static void acpi_sleep_tts_switch(u32 acpi_state) |
62 | { | 63 | { |
@@ -184,6 +185,14 @@ static int acpi_pm_prepare(void) | |||
184 | return error; | 185 | return error; |
185 | } | 186 | } |
186 | 187 | ||
188 | static int find_powerf_dev(struct device *dev, void *data) | ||
189 | { | ||
190 | struct acpi_device *device = to_acpi_device(dev); | ||
191 | const char *hid = acpi_device_hid(device); | ||
192 | |||
193 | return !strcmp(hid, ACPI_BUTTON_HID_POWERF); | ||
194 | } | ||
195 | |||
187 | /** | 196 | /** |
188 | * acpi_pm_finish - Instruct the platform to leave a sleep state. | 197 | * acpi_pm_finish - Instruct the platform to leave a sleep state. |
189 | * | 198 | * |
@@ -192,6 +201,7 @@ static int acpi_pm_prepare(void) | |||
192 | */ | 201 | */ |
193 | static void acpi_pm_finish(void) | 202 | static void acpi_pm_finish(void) |
194 | { | 203 | { |
204 | struct device *pwr_btn_dev; | ||
195 | u32 acpi_state = acpi_target_sleep_state; | 205 | u32 acpi_state = acpi_target_sleep_state; |
196 | 206 | ||
197 | acpi_ec_unblock_transactions(); | 207 | acpi_ec_unblock_transactions(); |
@@ -209,6 +219,23 @@ static void acpi_pm_finish(void) | |||
209 | acpi_set_firmware_waking_vector((acpi_physical_address) 0); | 219 | acpi_set_firmware_waking_vector((acpi_physical_address) 0); |
210 | 220 | ||
211 | acpi_target_sleep_state = ACPI_STATE_S0; | 221 | acpi_target_sleep_state = ACPI_STATE_S0; |
222 | |||
223 | /* If we were woken with the fixed power button, provide a small | ||
224 | * hint to userspace in the form of a wakeup event on the fixed power | ||
225 | * button device (if it can be found). | ||
226 | * | ||
227 | * We delay the event generation til now, as the PM layer requires | ||
228 | * timekeeping to be running before we generate events. */ | ||
229 | if (!pwr_btn_event_pending) | ||
230 | return; | ||
231 | |||
232 | pwr_btn_event_pending = false; | ||
233 | pwr_btn_dev = bus_find_device(&acpi_bus_type, NULL, NULL, | ||
234 | find_powerf_dev); | ||
235 | if (pwr_btn_dev) { | ||
236 | pm_wakeup_event(pwr_btn_dev, 0); | ||
237 | put_device(pwr_btn_dev); | ||
238 | } | ||
212 | } | 239 | } |
213 | 240 | ||
214 | /** | 241 | /** |
@@ -298,9 +325,23 @@ static int acpi_suspend_enter(suspend_state_t pm_state) | |||
298 | /* ACPI 3.0 specs (P62) says that it's the responsibility | 325 | /* ACPI 3.0 specs (P62) says that it's the responsibility |
299 | * of the OSPM to clear the status bit [ implying that the | 326 | * of the OSPM to clear the status bit [ implying that the |
300 | * POWER_BUTTON event should not reach userspace ] | 327 | * POWER_BUTTON event should not reach userspace ] |
328 | * | ||
329 | * However, we do generate a small hint for userspace in the form of | ||
330 | * a wakeup event. We flag this condition for now and generate the | ||
331 | * event later, as we're currently too early in resume to be able to | ||
332 | * generate wakeup events. | ||
301 | */ | 333 | */ |
302 | if (ACPI_SUCCESS(status) && (acpi_state == ACPI_STATE_S3)) | 334 | if (ACPI_SUCCESS(status) && (acpi_state == ACPI_STATE_S3)) { |
303 | acpi_clear_event(ACPI_EVENT_POWER_BUTTON); | 335 | acpi_event_status pwr_btn_status; |
336 | |||
337 | acpi_get_event_status(ACPI_EVENT_POWER_BUTTON, &pwr_btn_status); | ||
338 | |||
339 | if (pwr_btn_status & ACPI_EVENT_FLAG_SET) { | ||
340 | acpi_clear_event(ACPI_EVENT_POWER_BUTTON); | ||
341 | /* Flag for later */ | ||
342 | pwr_btn_event_pending = true; | ||
343 | } | ||
344 | } | ||
304 | 345 | ||
305 | /* | 346 | /* |
306 | * Disable and clear GPE status before interrupt is enabled. Some GPEs | 347 | * Disable and clear GPE status before interrupt is enabled. Some GPEs |
@@ -730,8 +771,8 @@ int acpi_pm_device_sleep_state(struct device *dev, int *d_min_p) | |||
730 | * can wake the system. _S0W may be valid, too. | 771 | * can wake the system. _S0W may be valid, too. |
731 | */ | 772 | */ |
732 | if (acpi_target_sleep_state == ACPI_STATE_S0 || | 773 | if (acpi_target_sleep_state == ACPI_STATE_S0 || |
733 | (device_may_wakeup(dev) && | 774 | (device_may_wakeup(dev) && adev->wakeup.flags.valid && |
734 | adev->wakeup.sleep_state <= acpi_target_sleep_state)) { | 775 | adev->wakeup.sleep_state >= acpi_target_sleep_state)) { |
735 | acpi_status status; | 776 | acpi_status status; |
736 | 777 | ||
737 | acpi_method[3] = 'W'; | 778 | acpi_method[3] = 'W'; |
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile index 8d81a1d32653..dd3e661a124d 100644 --- a/drivers/clocksource/Makefile +++ b/drivers/clocksource/Makefile | |||
@@ -6,6 +6,7 @@ obj-$(CONFIG_CS5535_CLOCK_EVENT_SRC) += cs5535-clockevt.o | |||
6 | obj-$(CONFIG_SH_TIMER_CMT) += sh_cmt.o | 6 | obj-$(CONFIG_SH_TIMER_CMT) += sh_cmt.o |
7 | obj-$(CONFIG_SH_TIMER_MTU2) += sh_mtu2.o | 7 | obj-$(CONFIG_SH_TIMER_MTU2) += sh_mtu2.o |
8 | obj-$(CONFIG_SH_TIMER_TMU) += sh_tmu.o | 8 | obj-$(CONFIG_SH_TIMER_TMU) += sh_tmu.o |
9 | obj-$(CONFIG_EM_TIMER_STI) += em_sti.o | ||
9 | obj-$(CONFIG_CLKBLD_I8253) += i8253.o | 10 | obj-$(CONFIG_CLKBLD_I8253) += i8253.o |
10 | obj-$(CONFIG_CLKSRC_MMIO) += mmio.o | 11 | obj-$(CONFIG_CLKSRC_MMIO) += mmio.o |
11 | obj-$(CONFIG_DW_APB_TIMER) += dw_apb_timer.o | 12 | obj-$(CONFIG_DW_APB_TIMER) += dw_apb_timer.o |
diff --git a/drivers/clocksource/em_sti.c b/drivers/clocksource/em_sti.c new file mode 100644 index 000000000000..372051d1bba8 --- /dev/null +++ b/drivers/clocksource/em_sti.c | |||
@@ -0,0 +1,406 @@ | |||
1 | /* | ||
2 | * Emma Mobile Timer Support - STI | ||
3 | * | ||
4 | * Copyright (C) 2012 Magnus Damm | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program; if not, write to the Free Software | ||
17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
18 | */ | ||
19 | |||
20 | #include <linux/init.h> | ||
21 | #include <linux/platform_device.h> | ||
22 | #include <linux/spinlock.h> | ||
23 | #include <linux/interrupt.h> | ||
24 | #include <linux/ioport.h> | ||
25 | #include <linux/io.h> | ||
26 | #include <linux/clk.h> | ||
27 | #include <linux/irq.h> | ||
28 | #include <linux/err.h> | ||
29 | #include <linux/delay.h> | ||
30 | #include <linux/clocksource.h> | ||
31 | #include <linux/clockchips.h> | ||
32 | #include <linux/slab.h> | ||
33 | #include <linux/module.h> | ||
34 | |||
35 | enum { USER_CLOCKSOURCE, USER_CLOCKEVENT, USER_NR }; | ||
36 | |||
37 | struct em_sti_priv { | ||
38 | void __iomem *base; | ||
39 | struct clk *clk; | ||
40 | struct platform_device *pdev; | ||
41 | unsigned int active[USER_NR]; | ||
42 | unsigned long rate; | ||
43 | raw_spinlock_t lock; | ||
44 | struct clock_event_device ced; | ||
45 | struct clocksource cs; | ||
46 | }; | ||
47 | |||
48 | #define STI_CONTROL 0x00 | ||
49 | #define STI_COMPA_H 0x10 | ||
50 | #define STI_COMPA_L 0x14 | ||
51 | #define STI_COMPB_H 0x18 | ||
52 | #define STI_COMPB_L 0x1c | ||
53 | #define STI_COUNT_H 0x20 | ||
54 | #define STI_COUNT_L 0x24 | ||
55 | #define STI_COUNT_RAW_H 0x28 | ||
56 | #define STI_COUNT_RAW_L 0x2c | ||
57 | #define STI_SET_H 0x30 | ||
58 | #define STI_SET_L 0x34 | ||
59 | #define STI_INTSTATUS 0x40 | ||
60 | #define STI_INTRAWSTATUS 0x44 | ||
61 | #define STI_INTENSET 0x48 | ||
62 | #define STI_INTENCLR 0x4c | ||
63 | #define STI_INTFFCLR 0x50 | ||
64 | |||
65 | static inline unsigned long em_sti_read(struct em_sti_priv *p, int offs) | ||
66 | { | ||
67 | return ioread32(p->base + offs); | ||
68 | } | ||
69 | |||
70 | static inline void em_sti_write(struct em_sti_priv *p, int offs, | ||
71 | unsigned long value) | ||
72 | { | ||
73 | iowrite32(value, p->base + offs); | ||
74 | } | ||
75 | |||
76 | static int em_sti_enable(struct em_sti_priv *p) | ||
77 | { | ||
78 | int ret; | ||
79 | |||
80 | /* enable clock */ | ||
81 | ret = clk_enable(p->clk); | ||
82 | if (ret) { | ||
83 | dev_err(&p->pdev->dev, "cannot enable clock\n"); | ||
84 | return ret; | ||
85 | } | ||
86 | |||
87 | /* configure channel, periodic mode and maximum timeout */ | ||
88 | p->rate = clk_get_rate(p->clk); | ||
89 | |||
90 | /* reset the counter */ | ||
91 | em_sti_write(p, STI_SET_H, 0x40000000); | ||
92 | em_sti_write(p, STI_SET_L, 0x00000000); | ||
93 | |||
94 | /* mask and clear pending interrupts */ | ||
95 | em_sti_write(p, STI_INTENCLR, 3); | ||
96 | em_sti_write(p, STI_INTFFCLR, 3); | ||
97 | |||
98 | /* enable updates of counter registers */ | ||
99 | em_sti_write(p, STI_CONTROL, 1); | ||
100 | |||
101 | return 0; | ||
102 | } | ||
103 | |||
104 | static void em_sti_disable(struct em_sti_priv *p) | ||
105 | { | ||
106 | /* mask interrupts */ | ||
107 | em_sti_write(p, STI_INTENCLR, 3); | ||
108 | |||
109 | /* stop clock */ | ||
110 | clk_disable(p->clk); | ||
111 | } | ||
112 | |||
113 | static cycle_t em_sti_count(struct em_sti_priv *p) | ||
114 | { | ||
115 | cycle_t ticks; | ||
116 | unsigned long flags; | ||
117 | |||
118 | /* the STI hardware buffers the 48-bit count, but to | ||
119 | * break it out into two 32-bit access the registers | ||
120 | * must be accessed in a certain order. | ||
121 | * Always read STI_COUNT_H before STI_COUNT_L. | ||
122 | */ | ||
123 | raw_spin_lock_irqsave(&p->lock, flags); | ||
124 | ticks = (cycle_t)(em_sti_read(p, STI_COUNT_H) & 0xffff) << 32; | ||
125 | ticks |= em_sti_read(p, STI_COUNT_L); | ||
126 | raw_spin_unlock_irqrestore(&p->lock, flags); | ||
127 | |||
128 | return ticks; | ||
129 | } | ||
130 | |||
131 | static cycle_t em_sti_set_next(struct em_sti_priv *p, cycle_t next) | ||
132 | { | ||
133 | unsigned long flags; | ||
134 | |||
135 | raw_spin_lock_irqsave(&p->lock, flags); | ||
136 | |||
137 | /* mask compare A interrupt */ | ||
138 | em_sti_write(p, STI_INTENCLR, 1); | ||
139 | |||
140 | /* update compare A value */ | ||
141 | em_sti_write(p, STI_COMPA_H, next >> 32); | ||
142 | em_sti_write(p, STI_COMPA_L, next & 0xffffffff); | ||
143 | |||
144 | /* clear compare A interrupt source */ | ||
145 | em_sti_write(p, STI_INTFFCLR, 1); | ||
146 | |||
147 | /* unmask compare A interrupt */ | ||
148 | em_sti_write(p, STI_INTENSET, 1); | ||
149 | |||
150 | raw_spin_unlock_irqrestore(&p->lock, flags); | ||
151 | |||
152 | return next; | ||
153 | } | ||
154 | |||
155 | static irqreturn_t em_sti_interrupt(int irq, void *dev_id) | ||
156 | { | ||
157 | struct em_sti_priv *p = dev_id; | ||
158 | |||
159 | p->ced.event_handler(&p->ced); | ||
160 | return IRQ_HANDLED; | ||
161 | } | ||
162 | |||
163 | static int em_sti_start(struct em_sti_priv *p, unsigned int user) | ||
164 | { | ||
165 | unsigned long flags; | ||
166 | int used_before; | ||
167 | int ret = 0; | ||
168 | |||
169 | raw_spin_lock_irqsave(&p->lock, flags); | ||
170 | used_before = p->active[USER_CLOCKSOURCE] | p->active[USER_CLOCKEVENT]; | ||
171 | if (!used_before) | ||
172 | ret = em_sti_enable(p); | ||
173 | |||
174 | if (!ret) | ||
175 | p->active[user] = 1; | ||
176 | raw_spin_unlock_irqrestore(&p->lock, flags); | ||
177 | |||
178 | return ret; | ||
179 | } | ||
180 | |||
181 | static void em_sti_stop(struct em_sti_priv *p, unsigned int user) | ||
182 | { | ||
183 | unsigned long flags; | ||
184 | int used_before, used_after; | ||
185 | |||
186 | raw_spin_lock_irqsave(&p->lock, flags); | ||
187 | used_before = p->active[USER_CLOCKSOURCE] | p->active[USER_CLOCKEVENT]; | ||
188 | p->active[user] = 0; | ||
189 | used_after = p->active[USER_CLOCKSOURCE] | p->active[USER_CLOCKEVENT]; | ||
190 | |||
191 | if (used_before && !used_after) | ||
192 | em_sti_disable(p); | ||
193 | raw_spin_unlock_irqrestore(&p->lock, flags); | ||
194 | } | ||
195 | |||
196 | static struct em_sti_priv *cs_to_em_sti(struct clocksource *cs) | ||
197 | { | ||
198 | return container_of(cs, struct em_sti_priv, cs); | ||
199 | } | ||
200 | |||
201 | static cycle_t em_sti_clocksource_read(struct clocksource *cs) | ||
202 | { | ||
203 | return em_sti_count(cs_to_em_sti(cs)); | ||
204 | } | ||
205 | |||
206 | static int em_sti_clocksource_enable(struct clocksource *cs) | ||
207 | { | ||
208 | int ret; | ||
209 | struct em_sti_priv *p = cs_to_em_sti(cs); | ||
210 | |||
211 | ret = em_sti_start(p, USER_CLOCKSOURCE); | ||
212 | if (!ret) | ||
213 | __clocksource_updatefreq_hz(cs, p->rate); | ||
214 | return ret; | ||
215 | } | ||
216 | |||
217 | static void em_sti_clocksource_disable(struct clocksource *cs) | ||
218 | { | ||
219 | em_sti_stop(cs_to_em_sti(cs), USER_CLOCKSOURCE); | ||
220 | } | ||
221 | |||
222 | static void em_sti_clocksource_resume(struct clocksource *cs) | ||
223 | { | ||
224 | em_sti_clocksource_enable(cs); | ||
225 | } | ||
226 | |||
227 | static int em_sti_register_clocksource(struct em_sti_priv *p) | ||
228 | { | ||
229 | struct clocksource *cs = &p->cs; | ||
230 | |||
231 | memset(cs, 0, sizeof(*cs)); | ||
232 | cs->name = dev_name(&p->pdev->dev); | ||
233 | cs->rating = 200; | ||
234 | cs->read = em_sti_clocksource_read; | ||
235 | cs->enable = em_sti_clocksource_enable; | ||
236 | cs->disable = em_sti_clocksource_disable; | ||
237 | cs->suspend = em_sti_clocksource_disable; | ||
238 | cs->resume = em_sti_clocksource_resume; | ||
239 | cs->mask = CLOCKSOURCE_MASK(48); | ||
240 | cs->flags = CLOCK_SOURCE_IS_CONTINUOUS; | ||
241 | |||
242 | dev_info(&p->pdev->dev, "used as clock source\n"); | ||
243 | |||
244 | /* Register with dummy 1 Hz value, gets updated in ->enable() */ | ||
245 | clocksource_register_hz(cs, 1); | ||
246 | return 0; | ||
247 | } | ||
248 | |||
249 | static struct em_sti_priv *ced_to_em_sti(struct clock_event_device *ced) | ||
250 | { | ||
251 | return container_of(ced, struct em_sti_priv, ced); | ||
252 | } | ||
253 | |||
254 | static void em_sti_clock_event_mode(enum clock_event_mode mode, | ||
255 | struct clock_event_device *ced) | ||
256 | { | ||
257 | struct em_sti_priv *p = ced_to_em_sti(ced); | ||
258 | |||
259 | /* deal with old setting first */ | ||
260 | switch (ced->mode) { | ||
261 | case CLOCK_EVT_MODE_ONESHOT: | ||
262 | em_sti_stop(p, USER_CLOCKEVENT); | ||
263 | break; | ||
264 | default: | ||
265 | break; | ||
266 | } | ||
267 | |||
268 | switch (mode) { | ||
269 | case CLOCK_EVT_MODE_ONESHOT: | ||
270 | dev_info(&p->pdev->dev, "used for oneshot clock events\n"); | ||
271 | em_sti_start(p, USER_CLOCKEVENT); | ||
272 | clockevents_config(&p->ced, p->rate); | ||
273 | break; | ||
274 | case CLOCK_EVT_MODE_SHUTDOWN: | ||
275 | case CLOCK_EVT_MODE_UNUSED: | ||
276 | em_sti_stop(p, USER_CLOCKEVENT); | ||
277 | break; | ||
278 | default: | ||
279 | break; | ||
280 | } | ||
281 | } | ||
282 | |||
283 | static int em_sti_clock_event_next(unsigned long delta, | ||
284 | struct clock_event_device *ced) | ||
285 | { | ||
286 | struct em_sti_priv *p = ced_to_em_sti(ced); | ||
287 | cycle_t next; | ||
288 | int safe; | ||
289 | |||
290 | next = em_sti_set_next(p, em_sti_count(p) + delta); | ||
291 | safe = em_sti_count(p) < (next - 1); | ||
292 | |||
293 | return !safe; | ||
294 | } | ||
295 | |||
296 | static void em_sti_register_clockevent(struct em_sti_priv *p) | ||
297 | { | ||
298 | struct clock_event_device *ced = &p->ced; | ||
299 | |||
300 | memset(ced, 0, sizeof(*ced)); | ||
301 | ced->name = dev_name(&p->pdev->dev); | ||
302 | ced->features = CLOCK_EVT_FEAT_ONESHOT; | ||
303 | ced->rating = 200; | ||
304 | ced->cpumask = cpumask_of(0); | ||
305 | ced->set_next_event = em_sti_clock_event_next; | ||
306 | ced->set_mode = em_sti_clock_event_mode; | ||
307 | |||
308 | dev_info(&p->pdev->dev, "used for clock events\n"); | ||
309 | |||
310 | /* Register with dummy 1 Hz value, gets updated in ->set_mode() */ | ||
311 | clockevents_config_and_register(ced, 1, 2, 0xffffffff); | ||
312 | } | ||
313 | |||
314 | static int __devinit em_sti_probe(struct platform_device *pdev) | ||
315 | { | ||
316 | struct em_sti_priv *p; | ||
317 | struct resource *res; | ||
318 | int irq, ret; | ||
319 | |||
320 | p = kzalloc(sizeof(*p), GFP_KERNEL); | ||
321 | if (p == NULL) { | ||
322 | dev_err(&pdev->dev, "failed to allocate driver data\n"); | ||
323 | ret = -ENOMEM; | ||
324 | goto err0; | ||
325 | } | ||
326 | |||
327 | p->pdev = pdev; | ||
328 | platform_set_drvdata(pdev, p); | ||
329 | |||
330 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
331 | if (!res) { | ||
332 | dev_err(&pdev->dev, "failed to get I/O memory\n"); | ||
333 | ret = -EINVAL; | ||
334 | goto err0; | ||
335 | } | ||
336 | |||
337 | irq = platform_get_irq(pdev, 0); | ||
338 | if (irq < 0) { | ||
339 | dev_err(&pdev->dev, "failed to get irq\n"); | ||
340 | ret = -EINVAL; | ||
341 | goto err0; | ||
342 | } | ||
343 | |||
344 | /* map memory, let base point to the STI instance */ | ||
345 | p->base = ioremap_nocache(res->start, resource_size(res)); | ||
346 | if (p->base == NULL) { | ||
347 | dev_err(&pdev->dev, "failed to remap I/O memory\n"); | ||
348 | ret = -ENXIO; | ||
349 | goto err0; | ||
350 | } | ||
351 | |||
352 | /* get hold of clock */ | ||
353 | p->clk = clk_get(&pdev->dev, "sclk"); | ||
354 | if (IS_ERR(p->clk)) { | ||
355 | dev_err(&pdev->dev, "cannot get clock\n"); | ||
356 | ret = PTR_ERR(p->clk); | ||
357 | goto err1; | ||
358 | } | ||
359 | |||
360 | if (request_irq(irq, em_sti_interrupt, | ||
361 | IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING, | ||
362 | dev_name(&pdev->dev), p)) { | ||
363 | dev_err(&pdev->dev, "failed to request low IRQ\n"); | ||
364 | ret = -ENOENT; | ||
365 | goto err2; | ||
366 | } | ||
367 | |||
368 | raw_spin_lock_init(&p->lock); | ||
369 | em_sti_register_clockevent(p); | ||
370 | em_sti_register_clocksource(p); | ||
371 | return 0; | ||
372 | |||
373 | err2: | ||
374 | clk_put(p->clk); | ||
375 | err1: | ||
376 | iounmap(p->base); | ||
377 | err0: | ||
378 | kfree(p); | ||
379 | return ret; | ||
380 | } | ||
381 | |||
382 | static int __devexit em_sti_remove(struct platform_device *pdev) | ||
383 | { | ||
384 | return -EBUSY; /* cannot unregister clockevent and clocksource */ | ||
385 | } | ||
386 | |||
387 | static const struct of_device_id em_sti_dt_ids[] __devinitconst = { | ||
388 | { .compatible = "renesas,em-sti", }, | ||
389 | {}, | ||
390 | }; | ||
391 | MODULE_DEVICE_TABLE(of, em_sti_dt_ids); | ||
392 | |||
393 | static struct platform_driver em_sti_device_driver = { | ||
394 | .probe = em_sti_probe, | ||
395 | .remove = __devexit_p(em_sti_remove), | ||
396 | .driver = { | ||
397 | .name = "em_sti", | ||
398 | .of_match_table = em_sti_dt_ids, | ||
399 | } | ||
400 | }; | ||
401 | |||
402 | module_platform_driver(em_sti_device_driver); | ||
403 | |||
404 | MODULE_AUTHOR("Magnus Damm"); | ||
405 | MODULE_DESCRIPTION("Renesas Emma Mobile STI Timer Driver"); | ||
406 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/gpio/gpio-samsung.c b/drivers/gpio/gpio-samsung.c index 7bb00448e13d..b6453d0e44ad 100644 --- a/drivers/gpio/gpio-samsung.c +++ b/drivers/gpio/gpio-samsung.c | |||
@@ -2833,7 +2833,7 @@ static __init void exynos5_gpiolib_init(void) | |||
2833 | } | 2833 | } |
2834 | 2834 | ||
2835 | /* need to set base address for gpc4 */ | 2835 | /* need to set base address for gpc4 */ |
2836 | exonys5_gpios_1[11].base = gpio_base1 + 0x2E0; | 2836 | exynos5_gpios_1[11].base = gpio_base1 + 0x2E0; |
2837 | 2837 | ||
2838 | /* need to set base address for gpx */ | 2838 | /* need to set base address for gpx */ |
2839 | chip = &exynos5_gpios_1[21]; | 2839 | chip = &exynos5_gpios_1[21]; |
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index 3df4efa11942..3186522a4458 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c | |||
@@ -460,15 +460,28 @@ static void cayman_gpu_init(struct radeon_device *rdev) | |||
460 | rdev->config.cayman.max_pipes_per_simd = 4; | 460 | rdev->config.cayman.max_pipes_per_simd = 4; |
461 | rdev->config.cayman.max_tile_pipes = 2; | 461 | rdev->config.cayman.max_tile_pipes = 2; |
462 | if ((rdev->pdev->device == 0x9900) || | 462 | if ((rdev->pdev->device == 0x9900) || |
463 | (rdev->pdev->device == 0x9901)) { | 463 | (rdev->pdev->device == 0x9901) || |
464 | (rdev->pdev->device == 0x9905) || | ||
465 | (rdev->pdev->device == 0x9906) || | ||
466 | (rdev->pdev->device == 0x9907) || | ||
467 | (rdev->pdev->device == 0x9908) || | ||
468 | (rdev->pdev->device == 0x9909) || | ||
469 | (rdev->pdev->device == 0x9910) || | ||
470 | (rdev->pdev->device == 0x9917)) { | ||
464 | rdev->config.cayman.max_simds_per_se = 6; | 471 | rdev->config.cayman.max_simds_per_se = 6; |
465 | rdev->config.cayman.max_backends_per_se = 2; | 472 | rdev->config.cayman.max_backends_per_se = 2; |
466 | } else if ((rdev->pdev->device == 0x9903) || | 473 | } else if ((rdev->pdev->device == 0x9903) || |
467 | (rdev->pdev->device == 0x9904)) { | 474 | (rdev->pdev->device == 0x9904) || |
475 | (rdev->pdev->device == 0x990A) || | ||
476 | (rdev->pdev->device == 0x9913) || | ||
477 | (rdev->pdev->device == 0x9918)) { | ||
468 | rdev->config.cayman.max_simds_per_se = 4; | 478 | rdev->config.cayman.max_simds_per_se = 4; |
469 | rdev->config.cayman.max_backends_per_se = 2; | 479 | rdev->config.cayman.max_backends_per_se = 2; |
470 | } else if ((rdev->pdev->device == 0x9990) || | 480 | } else if ((rdev->pdev->device == 0x9919) || |
471 | (rdev->pdev->device == 0x9991)) { | 481 | (rdev->pdev->device == 0x9990) || |
482 | (rdev->pdev->device == 0x9991) || | ||
483 | (rdev->pdev->device == 0x9994) || | ||
484 | (rdev->pdev->device == 0x99A0)) { | ||
472 | rdev->config.cayman.max_simds_per_se = 3; | 485 | rdev->config.cayman.max_simds_per_se = 3; |
473 | rdev->config.cayman.max_backends_per_se = 1; | 486 | rdev->config.cayman.max_backends_per_se = 1; |
474 | } else { | 487 | } else { |
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 45cfcea63507..f30dc95f83b1 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c | |||
@@ -2426,6 +2426,12 @@ int r600_startup(struct radeon_device *rdev) | |||
2426 | if (r) | 2426 | if (r) |
2427 | return r; | 2427 | return r; |
2428 | 2428 | ||
2429 | r = r600_audio_init(rdev); | ||
2430 | if (r) { | ||
2431 | DRM_ERROR("radeon: audio init failed\n"); | ||
2432 | return r; | ||
2433 | } | ||
2434 | |||
2429 | return 0; | 2435 | return 0; |
2430 | } | 2436 | } |
2431 | 2437 | ||
@@ -2462,12 +2468,6 @@ int r600_resume(struct radeon_device *rdev) | |||
2462 | return r; | 2468 | return r; |
2463 | } | 2469 | } |
2464 | 2470 | ||
2465 | r = r600_audio_init(rdev); | ||
2466 | if (r) { | ||
2467 | DRM_ERROR("radeon: audio resume failed\n"); | ||
2468 | return r; | ||
2469 | } | ||
2470 | |||
2471 | return r; | 2471 | return r; |
2472 | } | 2472 | } |
2473 | 2473 | ||
@@ -2577,9 +2577,6 @@ int r600_init(struct radeon_device *rdev) | |||
2577 | rdev->accel_working = false; | 2577 | rdev->accel_working = false; |
2578 | } | 2578 | } |
2579 | 2579 | ||
2580 | r = r600_audio_init(rdev); | ||
2581 | if (r) | ||
2582 | return r; /* TODO error handling */ | ||
2583 | return 0; | 2580 | return 0; |
2584 | } | 2581 | } |
2585 | 2582 | ||
diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c index 7c4fa77f018f..7479a5c503e4 100644 --- a/drivers/gpu/drm/radeon/r600_audio.c +++ b/drivers/gpu/drm/radeon/r600_audio.c | |||
@@ -192,6 +192,7 @@ void r600_audio_set_clock(struct drm_encoder *encoder, int clock) | |||
192 | struct radeon_device *rdev = dev->dev_private; | 192 | struct radeon_device *rdev = dev->dev_private; |
193 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 193 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
194 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; | 194 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; |
195 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); | ||
195 | int base_rate = 48000; | 196 | int base_rate = 48000; |
196 | 197 | ||
197 | switch (radeon_encoder->encoder_id) { | 198 | switch (radeon_encoder->encoder_id) { |
@@ -217,8 +218,8 @@ void r600_audio_set_clock(struct drm_encoder *encoder, int clock) | |||
217 | WREG32(EVERGREEN_AUDIO_PLL1_DIV, clock * 10); | 218 | WREG32(EVERGREEN_AUDIO_PLL1_DIV, clock * 10); |
218 | WREG32(EVERGREEN_AUDIO_PLL1_UNK, 0x00000071); | 219 | WREG32(EVERGREEN_AUDIO_PLL1_UNK, 0x00000071); |
219 | 220 | ||
220 | /* Some magic trigger or src sel? */ | 221 | /* Select DTO source */ |
221 | WREG32_P(0x5ac, 0x01, ~0x77); | 222 | WREG32(0x5ac, radeon_crtc->crtc_id); |
222 | } else { | 223 | } else { |
223 | switch (dig->dig_encoder) { | 224 | switch (dig->dig_encoder) { |
224 | case 0: | 225 | case 0: |
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c index 226379e00ac1..969c27529dfe 100644 --- a/drivers/gpu/drm/radeon/r600_hdmi.c +++ b/drivers/gpu/drm/radeon/r600_hdmi.c | |||
@@ -348,7 +348,6 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod | |||
348 | WREG32(HDMI0_AUDIO_PACKET_CONTROL + offset, | 348 | WREG32(HDMI0_AUDIO_PACKET_CONTROL + offset, |
349 | HDMI0_AUDIO_SAMPLE_SEND | /* send audio packets */ | 349 | HDMI0_AUDIO_SAMPLE_SEND | /* send audio packets */ |
350 | HDMI0_AUDIO_DELAY_EN(1) | /* default audio delay */ | 350 | HDMI0_AUDIO_DELAY_EN(1) | /* default audio delay */ |
351 | HDMI0_AUDIO_SEND_MAX_PACKETS | /* send NULL packets if no audio is available */ | ||
352 | HDMI0_AUDIO_PACKETS_PER_LINE(3) | /* should be suffient for all audio modes and small enough for all hblanks */ | 351 | HDMI0_AUDIO_PACKETS_PER_LINE(3) | /* should be suffient for all audio modes and small enough for all hblanks */ |
353 | HDMI0_60958_CS_UPDATE); /* allow 60958 channel status fields to be updated */ | 352 | HDMI0_60958_CS_UPDATE); /* allow 60958 channel status fields to be updated */ |
354 | } | 353 | } |
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 85dac33e3cce..fefcca55c1eb 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h | |||
@@ -1374,9 +1374,9 @@ struct cayman_asic { | |||
1374 | 1374 | ||
1375 | struct si_asic { | 1375 | struct si_asic { |
1376 | unsigned max_shader_engines; | 1376 | unsigned max_shader_engines; |
1377 | unsigned max_pipes_per_simd; | ||
1378 | unsigned max_tile_pipes; | 1377 | unsigned max_tile_pipes; |
1379 | unsigned max_simds_per_se; | 1378 | unsigned max_cu_per_sh; |
1379 | unsigned max_sh_per_se; | ||
1380 | unsigned max_backends_per_se; | 1380 | unsigned max_backends_per_se; |
1381 | unsigned max_texture_channel_caches; | 1381 | unsigned max_texture_channel_caches; |
1382 | unsigned max_gprs; | 1382 | unsigned max_gprs; |
@@ -1387,7 +1387,6 @@ struct si_asic { | |||
1387 | unsigned sc_hiz_tile_fifo_size; | 1387 | unsigned sc_hiz_tile_fifo_size; |
1388 | unsigned sc_earlyz_tile_fifo_size; | 1388 | unsigned sc_earlyz_tile_fifo_size; |
1389 | 1389 | ||
1390 | unsigned num_shader_engines; | ||
1391 | unsigned num_tile_pipes; | 1390 | unsigned num_tile_pipes; |
1392 | unsigned num_backends_per_se; | 1391 | unsigned num_backends_per_se; |
1393 | unsigned backend_disable_mask_per_asic; | 1392 | unsigned backend_disable_mask_per_asic; |
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c index 79db56e6c2ac..59d44937dd9f 100644 --- a/drivers/gpu/drm/radeon/radeon_gart.c +++ b/drivers/gpu/drm/radeon/radeon_gart.c | |||
@@ -476,12 +476,18 @@ int radeon_vm_bo_add(struct radeon_device *rdev, | |||
476 | 476 | ||
477 | mutex_lock(&vm->mutex); | 477 | mutex_lock(&vm->mutex); |
478 | if (last_pfn > vm->last_pfn) { | 478 | if (last_pfn > vm->last_pfn) { |
479 | /* grow va space 32M by 32M */ | 479 | /* release mutex and lock in right order */ |
480 | unsigned align = ((32 << 20) >> 12) - 1; | 480 | mutex_unlock(&vm->mutex); |
481 | radeon_mutex_lock(&rdev->cs_mutex); | 481 | radeon_mutex_lock(&rdev->cs_mutex); |
482 | radeon_vm_unbind_locked(rdev, vm); | 482 | mutex_lock(&vm->mutex); |
483 | /* and check again */ | ||
484 | if (last_pfn > vm->last_pfn) { | ||
485 | /* grow va space 32M by 32M */ | ||
486 | unsigned align = ((32 << 20) >> 12) - 1; | ||
487 | radeon_vm_unbind_locked(rdev, vm); | ||
488 | vm->last_pfn = (last_pfn + align) & ~align; | ||
489 | } | ||
483 | radeon_mutex_unlock(&rdev->cs_mutex); | 490 | radeon_mutex_unlock(&rdev->cs_mutex); |
484 | vm->last_pfn = (last_pfn + align) & ~align; | ||
485 | } | 491 | } |
486 | head = &vm->va; | 492 | head = &vm->va; |
487 | last_offset = 0; | 493 | last_offset = 0; |
@@ -595,8 +601,8 @@ int radeon_vm_bo_rmv(struct radeon_device *rdev, | |||
595 | if (bo_va == NULL) | 601 | if (bo_va == NULL) |
596 | return 0; | 602 | return 0; |
597 | 603 | ||
598 | mutex_lock(&vm->mutex); | ||
599 | radeon_mutex_lock(&rdev->cs_mutex); | 604 | radeon_mutex_lock(&rdev->cs_mutex); |
605 | mutex_lock(&vm->mutex); | ||
600 | radeon_vm_bo_update_pte(rdev, vm, bo, NULL); | 606 | radeon_vm_bo_update_pte(rdev, vm, bo, NULL); |
601 | radeon_mutex_unlock(&rdev->cs_mutex); | 607 | radeon_mutex_unlock(&rdev->cs_mutex); |
602 | list_del(&bo_va->vm_list); | 608 | list_del(&bo_va->vm_list); |
@@ -641,9 +647,8 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm) | |||
641 | struct radeon_bo_va *bo_va, *tmp; | 647 | struct radeon_bo_va *bo_va, *tmp; |
642 | int r; | 648 | int r; |
643 | 649 | ||
644 | mutex_lock(&vm->mutex); | ||
645 | |||
646 | radeon_mutex_lock(&rdev->cs_mutex); | 650 | radeon_mutex_lock(&rdev->cs_mutex); |
651 | mutex_lock(&vm->mutex); | ||
647 | radeon_vm_unbind_locked(rdev, vm); | 652 | radeon_vm_unbind_locked(rdev, vm); |
648 | radeon_mutex_unlock(&rdev->cs_mutex); | 653 | radeon_mutex_unlock(&rdev->cs_mutex); |
649 | 654 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index f1016a5820d1..5c58d7d90cb2 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c | |||
@@ -273,7 +273,7 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) | |||
273 | break; | 273 | break; |
274 | case RADEON_INFO_MAX_PIPES: | 274 | case RADEON_INFO_MAX_PIPES: |
275 | if (rdev->family >= CHIP_TAHITI) | 275 | if (rdev->family >= CHIP_TAHITI) |
276 | value = rdev->config.si.max_pipes_per_simd; | 276 | value = rdev->config.si.max_cu_per_sh; |
277 | else if (rdev->family >= CHIP_CAYMAN) | 277 | else if (rdev->family >= CHIP_CAYMAN) |
278 | value = rdev->config.cayman.max_pipes_per_simd; | 278 | value = rdev->config.cayman.max_pipes_per_simd; |
279 | else if (rdev->family >= CHIP_CEDAR) | 279 | else if (rdev->family >= CHIP_CEDAR) |
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index 25f9eef12c42..e95c5e61d4e2 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c | |||
@@ -908,12 +908,6 @@ static int rs600_startup(struct radeon_device *rdev) | |||
908 | return r; | 908 | return r; |
909 | } | 909 | } |
910 | 910 | ||
911 | r = r600_audio_init(rdev); | ||
912 | if (r) { | ||
913 | dev_err(rdev->dev, "failed initializing audio\n"); | ||
914 | return r; | ||
915 | } | ||
916 | |||
917 | r = radeon_ib_pool_start(rdev); | 911 | r = radeon_ib_pool_start(rdev); |
918 | if (r) | 912 | if (r) |
919 | return r; | 913 | return r; |
@@ -922,6 +916,12 @@ static int rs600_startup(struct radeon_device *rdev) | |||
922 | if (r) | 916 | if (r) |
923 | return r; | 917 | return r; |
924 | 918 | ||
919 | r = r600_audio_init(rdev); | ||
920 | if (r) { | ||
921 | dev_err(rdev->dev, "failed initializing audio\n"); | ||
922 | return r; | ||
923 | } | ||
924 | |||
925 | return 0; | 925 | return 0; |
926 | } | 926 | } |
927 | 927 | ||
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c index 3277ddecfe9f..159b6a43fda0 100644 --- a/drivers/gpu/drm/radeon/rs690.c +++ b/drivers/gpu/drm/radeon/rs690.c | |||
@@ -637,12 +637,6 @@ static int rs690_startup(struct radeon_device *rdev) | |||
637 | return r; | 637 | return r; |
638 | } | 638 | } |
639 | 639 | ||
640 | r = r600_audio_init(rdev); | ||
641 | if (r) { | ||
642 | dev_err(rdev->dev, "failed initializing audio\n"); | ||
643 | return r; | ||
644 | } | ||
645 | |||
646 | r = radeon_ib_pool_start(rdev); | 640 | r = radeon_ib_pool_start(rdev); |
647 | if (r) | 641 | if (r) |
648 | return r; | 642 | return r; |
@@ -651,6 +645,12 @@ static int rs690_startup(struct radeon_device *rdev) | |||
651 | if (r) | 645 | if (r) |
652 | return r; | 646 | return r; |
653 | 647 | ||
648 | r = r600_audio_init(rdev); | ||
649 | if (r) { | ||
650 | dev_err(rdev->dev, "failed initializing audio\n"); | ||
651 | return r; | ||
652 | } | ||
653 | |||
654 | return 0; | 654 | return 0; |
655 | } | 655 | } |
656 | 656 | ||
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index 04ddc365a908..4ad0281fdc37 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c | |||
@@ -956,6 +956,12 @@ static int rv770_startup(struct radeon_device *rdev) | |||
956 | if (r) | 956 | if (r) |
957 | return r; | 957 | return r; |
958 | 958 | ||
959 | r = r600_audio_init(rdev); | ||
960 | if (r) { | ||
961 | DRM_ERROR("radeon: audio init failed\n"); | ||
962 | return r; | ||
963 | } | ||
964 | |||
959 | return 0; | 965 | return 0; |
960 | } | 966 | } |
961 | 967 | ||
@@ -978,12 +984,6 @@ int rv770_resume(struct radeon_device *rdev) | |||
978 | return r; | 984 | return r; |
979 | } | 985 | } |
980 | 986 | ||
981 | r = r600_audio_init(rdev); | ||
982 | if (r) { | ||
983 | dev_err(rdev->dev, "radeon: audio init failed\n"); | ||
984 | return r; | ||
985 | } | ||
986 | |||
987 | return r; | 987 | return r; |
988 | 988 | ||
989 | } | 989 | } |
@@ -1092,12 +1092,6 @@ int rv770_init(struct radeon_device *rdev) | |||
1092 | rdev->accel_working = false; | 1092 | rdev->accel_working = false; |
1093 | } | 1093 | } |
1094 | 1094 | ||
1095 | r = r600_audio_init(rdev); | ||
1096 | if (r) { | ||
1097 | dev_err(rdev->dev, "radeon: audio init failed\n"); | ||
1098 | return r; | ||
1099 | } | ||
1100 | |||
1101 | return 0; | 1095 | return 0; |
1102 | } | 1096 | } |
1103 | 1097 | ||
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index 549732e56ca9..c7b61f16ecfd 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c | |||
@@ -867,200 +867,6 @@ void dce6_bandwidth_update(struct radeon_device *rdev) | |||
867 | /* | 867 | /* |
868 | * Core functions | 868 | * Core functions |
869 | */ | 869 | */ |
870 | static u32 si_get_tile_pipe_to_backend_map(struct radeon_device *rdev, | ||
871 | u32 num_tile_pipes, | ||
872 | u32 num_backends_per_asic, | ||
873 | u32 *backend_disable_mask_per_asic, | ||
874 | u32 num_shader_engines) | ||
875 | { | ||
876 | u32 backend_map = 0; | ||
877 | u32 enabled_backends_mask = 0; | ||
878 | u32 enabled_backends_count = 0; | ||
879 | u32 num_backends_per_se; | ||
880 | u32 cur_pipe; | ||
881 | u32 swizzle_pipe[SI_MAX_PIPES]; | ||
882 | u32 cur_backend = 0; | ||
883 | u32 i; | ||
884 | bool force_no_swizzle; | ||
885 | |||
886 | /* force legal values */ | ||
887 | if (num_tile_pipes < 1) | ||
888 | num_tile_pipes = 1; | ||
889 | if (num_tile_pipes > rdev->config.si.max_tile_pipes) | ||
890 | num_tile_pipes = rdev->config.si.max_tile_pipes; | ||
891 | if (num_shader_engines < 1) | ||
892 | num_shader_engines = 1; | ||
893 | if (num_shader_engines > rdev->config.si.max_shader_engines) | ||
894 | num_shader_engines = rdev->config.si.max_shader_engines; | ||
895 | if (num_backends_per_asic < num_shader_engines) | ||
896 | num_backends_per_asic = num_shader_engines; | ||
897 | if (num_backends_per_asic > (rdev->config.si.max_backends_per_se * num_shader_engines)) | ||
898 | num_backends_per_asic = rdev->config.si.max_backends_per_se * num_shader_engines; | ||
899 | |||
900 | /* make sure we have the same number of backends per se */ | ||
901 | num_backends_per_asic = ALIGN(num_backends_per_asic, num_shader_engines); | ||
902 | /* set up the number of backends per se */ | ||
903 | num_backends_per_se = num_backends_per_asic / num_shader_engines; | ||
904 | if (num_backends_per_se > rdev->config.si.max_backends_per_se) { | ||
905 | num_backends_per_se = rdev->config.si.max_backends_per_se; | ||
906 | num_backends_per_asic = num_backends_per_se * num_shader_engines; | ||
907 | } | ||
908 | |||
909 | /* create enable mask and count for enabled backends */ | ||
910 | for (i = 0; i < SI_MAX_BACKENDS; ++i) { | ||
911 | if (((*backend_disable_mask_per_asic >> i) & 1) == 0) { | ||
912 | enabled_backends_mask |= (1 << i); | ||
913 | ++enabled_backends_count; | ||
914 | } | ||
915 | if (enabled_backends_count == num_backends_per_asic) | ||
916 | break; | ||
917 | } | ||
918 | |||
919 | /* force the backends mask to match the current number of backends */ | ||
920 | if (enabled_backends_count != num_backends_per_asic) { | ||
921 | u32 this_backend_enabled; | ||
922 | u32 shader_engine; | ||
923 | u32 backend_per_se; | ||
924 | |||
925 | enabled_backends_mask = 0; | ||
926 | enabled_backends_count = 0; | ||
927 | *backend_disable_mask_per_asic = SI_MAX_BACKENDS_MASK; | ||
928 | for (i = 0; i < SI_MAX_BACKENDS; ++i) { | ||
929 | /* calc the current se */ | ||
930 | shader_engine = i / rdev->config.si.max_backends_per_se; | ||
931 | /* calc the backend per se */ | ||
932 | backend_per_se = i % rdev->config.si.max_backends_per_se; | ||
933 | /* default to not enabled */ | ||
934 | this_backend_enabled = 0; | ||
935 | if ((shader_engine < num_shader_engines) && | ||
936 | (backend_per_se < num_backends_per_se)) | ||
937 | this_backend_enabled = 1; | ||
938 | if (this_backend_enabled) { | ||
939 | enabled_backends_mask |= (1 << i); | ||
940 | *backend_disable_mask_per_asic &= ~(1 << i); | ||
941 | ++enabled_backends_count; | ||
942 | } | ||
943 | } | ||
944 | } | ||
945 | |||
946 | |||
947 | memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * SI_MAX_PIPES); | ||
948 | switch (rdev->family) { | ||
949 | case CHIP_TAHITI: | ||
950 | case CHIP_PITCAIRN: | ||
951 | case CHIP_VERDE: | ||
952 | force_no_swizzle = true; | ||
953 | break; | ||
954 | default: | ||
955 | force_no_swizzle = false; | ||
956 | break; | ||
957 | } | ||
958 | if (force_no_swizzle) { | ||
959 | bool last_backend_enabled = false; | ||
960 | |||
961 | force_no_swizzle = false; | ||
962 | for (i = 0; i < SI_MAX_BACKENDS; ++i) { | ||
963 | if (((enabled_backends_mask >> i) & 1) == 1) { | ||
964 | if (last_backend_enabled) | ||
965 | force_no_swizzle = true; | ||
966 | last_backend_enabled = true; | ||
967 | } else | ||
968 | last_backend_enabled = false; | ||
969 | } | ||
970 | } | ||
971 | |||
972 | switch (num_tile_pipes) { | ||
973 | case 1: | ||
974 | case 3: | ||
975 | case 5: | ||
976 | case 7: | ||
977 | DRM_ERROR("odd number of pipes!\n"); | ||
978 | break; | ||
979 | case 2: | ||
980 | swizzle_pipe[0] = 0; | ||
981 | swizzle_pipe[1] = 1; | ||
982 | break; | ||
983 | case 4: | ||
984 | if (force_no_swizzle) { | ||
985 | swizzle_pipe[0] = 0; | ||
986 | swizzle_pipe[1] = 1; | ||
987 | swizzle_pipe[2] = 2; | ||
988 | swizzle_pipe[3] = 3; | ||
989 | } else { | ||
990 | swizzle_pipe[0] = 0; | ||
991 | swizzle_pipe[1] = 2; | ||
992 | swizzle_pipe[2] = 1; | ||
993 | swizzle_pipe[3] = 3; | ||
994 | } | ||
995 | break; | ||
996 | case 6: | ||
997 | if (force_no_swizzle) { | ||
998 | swizzle_pipe[0] = 0; | ||
999 | swizzle_pipe[1] = 1; | ||
1000 | swizzle_pipe[2] = 2; | ||
1001 | swizzle_pipe[3] = 3; | ||
1002 | swizzle_pipe[4] = 4; | ||
1003 | swizzle_pipe[5] = 5; | ||
1004 | } else { | ||
1005 | swizzle_pipe[0] = 0; | ||
1006 | swizzle_pipe[1] = 2; | ||
1007 | swizzle_pipe[2] = 4; | ||
1008 | swizzle_pipe[3] = 1; | ||
1009 | swizzle_pipe[4] = 3; | ||
1010 | swizzle_pipe[5] = 5; | ||
1011 | } | ||
1012 | break; | ||
1013 | case 8: | ||
1014 | if (force_no_swizzle) { | ||
1015 | swizzle_pipe[0] = 0; | ||
1016 | swizzle_pipe[1] = 1; | ||
1017 | swizzle_pipe[2] = 2; | ||
1018 | swizzle_pipe[3] = 3; | ||
1019 | swizzle_pipe[4] = 4; | ||
1020 | swizzle_pipe[5] = 5; | ||
1021 | swizzle_pipe[6] = 6; | ||
1022 | swizzle_pipe[7] = 7; | ||
1023 | } else { | ||
1024 | swizzle_pipe[0] = 0; | ||
1025 | swizzle_pipe[1] = 2; | ||
1026 | swizzle_pipe[2] = 4; | ||
1027 | swizzle_pipe[3] = 6; | ||
1028 | swizzle_pipe[4] = 1; | ||
1029 | swizzle_pipe[5] = 3; | ||
1030 | swizzle_pipe[6] = 5; | ||
1031 | swizzle_pipe[7] = 7; | ||
1032 | } | ||
1033 | break; | ||
1034 | } | ||
1035 | |||
1036 | for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) { | ||
1037 | while (((1 << cur_backend) & enabled_backends_mask) == 0) | ||
1038 | cur_backend = (cur_backend + 1) % SI_MAX_BACKENDS; | ||
1039 | |||
1040 | backend_map |= (((cur_backend & 0xf) << (swizzle_pipe[cur_pipe] * 4))); | ||
1041 | |||
1042 | cur_backend = (cur_backend + 1) % SI_MAX_BACKENDS; | ||
1043 | } | ||
1044 | |||
1045 | return backend_map; | ||
1046 | } | ||
1047 | |||
1048 | static u32 si_get_disable_mask_per_asic(struct radeon_device *rdev, | ||
1049 | u32 disable_mask_per_se, | ||
1050 | u32 max_disable_mask_per_se, | ||
1051 | u32 num_shader_engines) | ||
1052 | { | ||
1053 | u32 disable_field_width_per_se = r600_count_pipe_bits(disable_mask_per_se); | ||
1054 | u32 disable_mask_per_asic = disable_mask_per_se & max_disable_mask_per_se; | ||
1055 | |||
1056 | if (num_shader_engines == 1) | ||
1057 | return disable_mask_per_asic; | ||
1058 | else if (num_shader_engines == 2) | ||
1059 | return disable_mask_per_asic | (disable_mask_per_asic << disable_field_width_per_se); | ||
1060 | else | ||
1061 | return 0xffffffff; | ||
1062 | } | ||
1063 | |||
1064 | static void si_tiling_mode_table_init(struct radeon_device *rdev) | 870 | static void si_tiling_mode_table_init(struct radeon_device *rdev) |
1065 | { | 871 | { |
1066 | const u32 num_tile_mode_states = 32; | 872 | const u32 num_tile_mode_states = 32; |
@@ -1562,18 +1368,151 @@ static void si_tiling_mode_table_init(struct radeon_device *rdev) | |||
1562 | DRM_ERROR("unknown asic: 0x%x\n", rdev->family); | 1368 | DRM_ERROR("unknown asic: 0x%x\n", rdev->family); |
1563 | } | 1369 | } |
1564 | 1370 | ||
1371 | static void si_select_se_sh(struct radeon_device *rdev, | ||
1372 | u32 se_num, u32 sh_num) | ||
1373 | { | ||
1374 | u32 data = INSTANCE_BROADCAST_WRITES; | ||
1375 | |||
1376 | if ((se_num == 0xffffffff) && (sh_num == 0xffffffff)) | ||
1377 | data = SH_BROADCAST_WRITES | SE_BROADCAST_WRITES; | ||
1378 | else if (se_num == 0xffffffff) | ||
1379 | data |= SE_BROADCAST_WRITES | SH_INDEX(sh_num); | ||
1380 | else if (sh_num == 0xffffffff) | ||
1381 | data |= SH_BROADCAST_WRITES | SE_INDEX(se_num); | ||
1382 | else | ||
1383 | data |= SH_INDEX(sh_num) | SE_INDEX(se_num); | ||
1384 | WREG32(GRBM_GFX_INDEX, data); | ||
1385 | } | ||
1386 | |||
1387 | static u32 si_create_bitmask(u32 bit_width) | ||
1388 | { | ||
1389 | u32 i, mask = 0; | ||
1390 | |||
1391 | for (i = 0; i < bit_width; i++) { | ||
1392 | mask <<= 1; | ||
1393 | mask |= 1; | ||
1394 | } | ||
1395 | return mask; | ||
1396 | } | ||
1397 | |||
1398 | static u32 si_get_cu_enabled(struct radeon_device *rdev, u32 cu_per_sh) | ||
1399 | { | ||
1400 | u32 data, mask; | ||
1401 | |||
1402 | data = RREG32(CC_GC_SHADER_ARRAY_CONFIG); | ||
1403 | if (data & 1) | ||
1404 | data &= INACTIVE_CUS_MASK; | ||
1405 | else | ||
1406 | data = 0; | ||
1407 | data |= RREG32(GC_USER_SHADER_ARRAY_CONFIG); | ||
1408 | |||
1409 | data >>= INACTIVE_CUS_SHIFT; | ||
1410 | |||
1411 | mask = si_create_bitmask(cu_per_sh); | ||
1412 | |||
1413 | return ~data & mask; | ||
1414 | } | ||
1415 | |||
1416 | static void si_setup_spi(struct radeon_device *rdev, | ||
1417 | u32 se_num, u32 sh_per_se, | ||
1418 | u32 cu_per_sh) | ||
1419 | { | ||
1420 | int i, j, k; | ||
1421 | u32 data, mask, active_cu; | ||
1422 | |||
1423 | for (i = 0; i < se_num; i++) { | ||
1424 | for (j = 0; j < sh_per_se; j++) { | ||
1425 | si_select_se_sh(rdev, i, j); | ||
1426 | data = RREG32(SPI_STATIC_THREAD_MGMT_3); | ||
1427 | active_cu = si_get_cu_enabled(rdev, cu_per_sh); | ||
1428 | |||
1429 | mask = 1; | ||
1430 | for (k = 0; k < 16; k++) { | ||
1431 | mask <<= k; | ||
1432 | if (active_cu & mask) { | ||
1433 | data &= ~mask; | ||
1434 | WREG32(SPI_STATIC_THREAD_MGMT_3, data); | ||
1435 | break; | ||
1436 | } | ||
1437 | } | ||
1438 | } | ||
1439 | } | ||
1440 | si_select_se_sh(rdev, 0xffffffff, 0xffffffff); | ||
1441 | } | ||
1442 | |||
1443 | static u32 si_get_rb_disabled(struct radeon_device *rdev, | ||
1444 | u32 max_rb_num, u32 se_num, | ||
1445 | u32 sh_per_se) | ||
1446 | { | ||
1447 | u32 data, mask; | ||
1448 | |||
1449 | data = RREG32(CC_RB_BACKEND_DISABLE); | ||
1450 | if (data & 1) | ||
1451 | data &= BACKEND_DISABLE_MASK; | ||
1452 | else | ||
1453 | data = 0; | ||
1454 | data |= RREG32(GC_USER_RB_BACKEND_DISABLE); | ||
1455 | |||
1456 | data >>= BACKEND_DISABLE_SHIFT; | ||
1457 | |||
1458 | mask = si_create_bitmask(max_rb_num / se_num / sh_per_se); | ||
1459 | |||
1460 | return data & mask; | ||
1461 | } | ||
1462 | |||
1463 | static void si_setup_rb(struct radeon_device *rdev, | ||
1464 | u32 se_num, u32 sh_per_se, | ||
1465 | u32 max_rb_num) | ||
1466 | { | ||
1467 | int i, j; | ||
1468 | u32 data, mask; | ||
1469 | u32 disabled_rbs = 0; | ||
1470 | u32 enabled_rbs = 0; | ||
1471 | |||
1472 | for (i = 0; i < se_num; i++) { | ||
1473 | for (j = 0; j < sh_per_se; j++) { | ||
1474 | si_select_se_sh(rdev, i, j); | ||
1475 | data = si_get_rb_disabled(rdev, max_rb_num, se_num, sh_per_se); | ||
1476 | disabled_rbs |= data << ((i * sh_per_se + j) * TAHITI_RB_BITMAP_WIDTH_PER_SH); | ||
1477 | } | ||
1478 | } | ||
1479 | si_select_se_sh(rdev, 0xffffffff, 0xffffffff); | ||
1480 | |||
1481 | mask = 1; | ||
1482 | for (i = 0; i < max_rb_num; i++) { | ||
1483 | if (!(disabled_rbs & mask)) | ||
1484 | enabled_rbs |= mask; | ||
1485 | mask <<= 1; | ||
1486 | } | ||
1487 | |||
1488 | for (i = 0; i < se_num; i++) { | ||
1489 | si_select_se_sh(rdev, i, 0xffffffff); | ||
1490 | data = 0; | ||
1491 | for (j = 0; j < sh_per_se; j++) { | ||
1492 | switch (enabled_rbs & 3) { | ||
1493 | case 1: | ||
1494 | data |= (RASTER_CONFIG_RB_MAP_0 << (i * sh_per_se + j) * 2); | ||
1495 | break; | ||
1496 | case 2: | ||
1497 | data |= (RASTER_CONFIG_RB_MAP_3 << (i * sh_per_se + j) * 2); | ||
1498 | break; | ||
1499 | case 3: | ||
1500 | default: | ||
1501 | data |= (RASTER_CONFIG_RB_MAP_2 << (i * sh_per_se + j) * 2); | ||
1502 | break; | ||
1503 | } | ||
1504 | enabled_rbs >>= 2; | ||
1505 | } | ||
1506 | WREG32(PA_SC_RASTER_CONFIG, data); | ||
1507 | } | ||
1508 | si_select_se_sh(rdev, 0xffffffff, 0xffffffff); | ||
1509 | } | ||
1510 | |||
1565 | static void si_gpu_init(struct radeon_device *rdev) | 1511 | static void si_gpu_init(struct radeon_device *rdev) |
1566 | { | 1512 | { |
1567 | u32 cc_rb_backend_disable = 0; | ||
1568 | u32 cc_gc_shader_array_config; | ||
1569 | u32 gb_addr_config = 0; | 1513 | u32 gb_addr_config = 0; |
1570 | u32 mc_shared_chmap, mc_arb_ramcfg; | 1514 | u32 mc_shared_chmap, mc_arb_ramcfg; |
1571 | u32 gb_backend_map; | ||
1572 | u32 cgts_tcc_disable; | ||
1573 | u32 sx_debug_1; | 1515 | u32 sx_debug_1; |
1574 | u32 gc_user_shader_array_config; | ||
1575 | u32 gc_user_rb_backend_disable; | ||
1576 | u32 cgts_user_tcc_disable; | ||
1577 | u32 hdp_host_path_cntl; | 1516 | u32 hdp_host_path_cntl; |
1578 | u32 tmp; | 1517 | u32 tmp; |
1579 | int i, j; | 1518 | int i, j; |
@@ -1581,9 +1520,9 @@ static void si_gpu_init(struct radeon_device *rdev) | |||
1581 | switch (rdev->family) { | 1520 | switch (rdev->family) { |
1582 | case CHIP_TAHITI: | 1521 | case CHIP_TAHITI: |
1583 | rdev->config.si.max_shader_engines = 2; | 1522 | rdev->config.si.max_shader_engines = 2; |
1584 | rdev->config.si.max_pipes_per_simd = 4; | ||
1585 | rdev->config.si.max_tile_pipes = 12; | 1523 | rdev->config.si.max_tile_pipes = 12; |
1586 | rdev->config.si.max_simds_per_se = 8; | 1524 | rdev->config.si.max_cu_per_sh = 8; |
1525 | rdev->config.si.max_sh_per_se = 2; | ||
1587 | rdev->config.si.max_backends_per_se = 4; | 1526 | rdev->config.si.max_backends_per_se = 4; |
1588 | rdev->config.si.max_texture_channel_caches = 12; | 1527 | rdev->config.si.max_texture_channel_caches = 12; |
1589 | rdev->config.si.max_gprs = 256; | 1528 | rdev->config.si.max_gprs = 256; |
@@ -1594,12 +1533,13 @@ static void si_gpu_init(struct radeon_device *rdev) | |||
1594 | rdev->config.si.sc_prim_fifo_size_backend = 0x100; | 1533 | rdev->config.si.sc_prim_fifo_size_backend = 0x100; |
1595 | rdev->config.si.sc_hiz_tile_fifo_size = 0x30; | 1534 | rdev->config.si.sc_hiz_tile_fifo_size = 0x30; |
1596 | rdev->config.si.sc_earlyz_tile_fifo_size = 0x130; | 1535 | rdev->config.si.sc_earlyz_tile_fifo_size = 0x130; |
1536 | gb_addr_config = TAHITI_GB_ADDR_CONFIG_GOLDEN; | ||
1597 | break; | 1537 | break; |
1598 | case CHIP_PITCAIRN: | 1538 | case CHIP_PITCAIRN: |
1599 | rdev->config.si.max_shader_engines = 2; | 1539 | rdev->config.si.max_shader_engines = 2; |
1600 | rdev->config.si.max_pipes_per_simd = 4; | ||
1601 | rdev->config.si.max_tile_pipes = 8; | 1540 | rdev->config.si.max_tile_pipes = 8; |
1602 | rdev->config.si.max_simds_per_se = 5; | 1541 | rdev->config.si.max_cu_per_sh = 5; |
1542 | rdev->config.si.max_sh_per_se = 2; | ||
1603 | rdev->config.si.max_backends_per_se = 4; | 1543 | rdev->config.si.max_backends_per_se = 4; |
1604 | rdev->config.si.max_texture_channel_caches = 8; | 1544 | rdev->config.si.max_texture_channel_caches = 8; |
1605 | rdev->config.si.max_gprs = 256; | 1545 | rdev->config.si.max_gprs = 256; |
@@ -1610,13 +1550,14 @@ static void si_gpu_init(struct radeon_device *rdev) | |||
1610 | rdev->config.si.sc_prim_fifo_size_backend = 0x100; | 1550 | rdev->config.si.sc_prim_fifo_size_backend = 0x100; |
1611 | rdev->config.si.sc_hiz_tile_fifo_size = 0x30; | 1551 | rdev->config.si.sc_hiz_tile_fifo_size = 0x30; |
1612 | rdev->config.si.sc_earlyz_tile_fifo_size = 0x130; | 1552 | rdev->config.si.sc_earlyz_tile_fifo_size = 0x130; |
1553 | gb_addr_config = TAHITI_GB_ADDR_CONFIG_GOLDEN; | ||
1613 | break; | 1554 | break; |
1614 | case CHIP_VERDE: | 1555 | case CHIP_VERDE: |
1615 | default: | 1556 | default: |
1616 | rdev->config.si.max_shader_engines = 1; | 1557 | rdev->config.si.max_shader_engines = 1; |
1617 | rdev->config.si.max_pipes_per_simd = 4; | ||
1618 | rdev->config.si.max_tile_pipes = 4; | 1558 | rdev->config.si.max_tile_pipes = 4; |
1619 | rdev->config.si.max_simds_per_se = 2; | 1559 | rdev->config.si.max_cu_per_sh = 2; |
1560 | rdev->config.si.max_sh_per_se = 2; | ||
1620 | rdev->config.si.max_backends_per_se = 4; | 1561 | rdev->config.si.max_backends_per_se = 4; |
1621 | rdev->config.si.max_texture_channel_caches = 4; | 1562 | rdev->config.si.max_texture_channel_caches = 4; |
1622 | rdev->config.si.max_gprs = 256; | 1563 | rdev->config.si.max_gprs = 256; |
@@ -1627,6 +1568,7 @@ static void si_gpu_init(struct radeon_device *rdev) | |||
1627 | rdev->config.si.sc_prim_fifo_size_backend = 0x40; | 1568 | rdev->config.si.sc_prim_fifo_size_backend = 0x40; |
1628 | rdev->config.si.sc_hiz_tile_fifo_size = 0x30; | 1569 | rdev->config.si.sc_hiz_tile_fifo_size = 0x30; |
1629 | rdev->config.si.sc_earlyz_tile_fifo_size = 0x130; | 1570 | rdev->config.si.sc_earlyz_tile_fifo_size = 0x130; |
1571 | gb_addr_config = VERDE_GB_ADDR_CONFIG_GOLDEN; | ||
1630 | break; | 1572 | break; |
1631 | } | 1573 | } |
1632 | 1574 | ||
@@ -1648,31 +1590,7 @@ static void si_gpu_init(struct radeon_device *rdev) | |||
1648 | mc_shared_chmap = RREG32(MC_SHARED_CHMAP); | 1590 | mc_shared_chmap = RREG32(MC_SHARED_CHMAP); |
1649 | mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG); | 1591 | mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG); |
1650 | 1592 | ||
1651 | cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE); | ||
1652 | cc_gc_shader_array_config = RREG32(CC_GC_SHADER_ARRAY_CONFIG); | ||
1653 | cgts_tcc_disable = 0xffff0000; | ||
1654 | for (i = 0; i < rdev->config.si.max_texture_channel_caches; i++) | ||
1655 | cgts_tcc_disable &= ~(1 << (16 + i)); | ||
1656 | gc_user_rb_backend_disable = RREG32(GC_USER_RB_BACKEND_DISABLE); | ||
1657 | gc_user_shader_array_config = RREG32(GC_USER_SHADER_ARRAY_CONFIG); | ||
1658 | cgts_user_tcc_disable = RREG32(CGTS_USER_TCC_DISABLE); | ||
1659 | |||
1660 | rdev->config.si.num_shader_engines = rdev->config.si.max_shader_engines; | ||
1661 | rdev->config.si.num_tile_pipes = rdev->config.si.max_tile_pipes; | 1593 | rdev->config.si.num_tile_pipes = rdev->config.si.max_tile_pipes; |
1662 | tmp = ((~gc_user_rb_backend_disable) & BACKEND_DISABLE_MASK) >> BACKEND_DISABLE_SHIFT; | ||
1663 | rdev->config.si.num_backends_per_se = r600_count_pipe_bits(tmp); | ||
1664 | tmp = (gc_user_rb_backend_disable & BACKEND_DISABLE_MASK) >> BACKEND_DISABLE_SHIFT; | ||
1665 | rdev->config.si.backend_disable_mask_per_asic = | ||
1666 | si_get_disable_mask_per_asic(rdev, tmp, SI_MAX_BACKENDS_PER_SE_MASK, | ||
1667 | rdev->config.si.num_shader_engines); | ||
1668 | rdev->config.si.backend_map = | ||
1669 | si_get_tile_pipe_to_backend_map(rdev, rdev->config.si.num_tile_pipes, | ||
1670 | rdev->config.si.num_backends_per_se * | ||
1671 | rdev->config.si.num_shader_engines, | ||
1672 | &rdev->config.si.backend_disable_mask_per_asic, | ||
1673 | rdev->config.si.num_shader_engines); | ||
1674 | tmp = ((~cgts_user_tcc_disable) & TCC_DISABLE_MASK) >> TCC_DISABLE_SHIFT; | ||
1675 | rdev->config.si.num_texture_channel_caches = r600_count_pipe_bits(tmp); | ||
1676 | rdev->config.si.mem_max_burst_length_bytes = 256; | 1594 | rdev->config.si.mem_max_burst_length_bytes = 256; |
1677 | tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT; | 1595 | tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT; |
1678 | rdev->config.si.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024; | 1596 | rdev->config.si.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024; |
@@ -1683,55 +1601,8 @@ static void si_gpu_init(struct radeon_device *rdev) | |||
1683 | rdev->config.si.num_gpus = 1; | 1601 | rdev->config.si.num_gpus = 1; |
1684 | rdev->config.si.multi_gpu_tile_size = 64; | 1602 | rdev->config.si.multi_gpu_tile_size = 64; |
1685 | 1603 | ||
1686 | gb_addr_config = 0; | 1604 | /* fix up row size */ |
1687 | switch (rdev->config.si.num_tile_pipes) { | 1605 | gb_addr_config &= ~ROW_SIZE_MASK; |
1688 | case 1: | ||
1689 | gb_addr_config |= NUM_PIPES(0); | ||
1690 | break; | ||
1691 | case 2: | ||
1692 | gb_addr_config |= NUM_PIPES(1); | ||
1693 | break; | ||
1694 | case 4: | ||
1695 | gb_addr_config |= NUM_PIPES(2); | ||
1696 | break; | ||
1697 | case 8: | ||
1698 | default: | ||
1699 | gb_addr_config |= NUM_PIPES(3); | ||
1700 | break; | ||
1701 | } | ||
1702 | |||
1703 | tmp = (rdev->config.si.mem_max_burst_length_bytes / 256) - 1; | ||
1704 | gb_addr_config |= PIPE_INTERLEAVE_SIZE(tmp); | ||
1705 | gb_addr_config |= NUM_SHADER_ENGINES(rdev->config.si.num_shader_engines - 1); | ||
1706 | tmp = (rdev->config.si.shader_engine_tile_size / 16) - 1; | ||
1707 | gb_addr_config |= SHADER_ENGINE_TILE_SIZE(tmp); | ||
1708 | switch (rdev->config.si.num_gpus) { | ||
1709 | case 1: | ||
1710 | default: | ||
1711 | gb_addr_config |= NUM_GPUS(0); | ||
1712 | break; | ||
1713 | case 2: | ||
1714 | gb_addr_config |= NUM_GPUS(1); | ||
1715 | break; | ||
1716 | case 4: | ||
1717 | gb_addr_config |= NUM_GPUS(2); | ||
1718 | break; | ||
1719 | } | ||
1720 | switch (rdev->config.si.multi_gpu_tile_size) { | ||
1721 | case 16: | ||
1722 | gb_addr_config |= MULTI_GPU_TILE_SIZE(0); | ||
1723 | break; | ||
1724 | case 32: | ||
1725 | default: | ||
1726 | gb_addr_config |= MULTI_GPU_TILE_SIZE(1); | ||
1727 | break; | ||
1728 | case 64: | ||
1729 | gb_addr_config |= MULTI_GPU_TILE_SIZE(2); | ||
1730 | break; | ||
1731 | case 128: | ||
1732 | gb_addr_config |= MULTI_GPU_TILE_SIZE(3); | ||
1733 | break; | ||
1734 | } | ||
1735 | switch (rdev->config.si.mem_row_size_in_kb) { | 1606 | switch (rdev->config.si.mem_row_size_in_kb) { |
1736 | case 1: | 1607 | case 1: |
1737 | default: | 1608 | default: |
@@ -1745,26 +1616,6 @@ static void si_gpu_init(struct radeon_device *rdev) | |||
1745 | break; | 1616 | break; |
1746 | } | 1617 | } |
1747 | 1618 | ||
1748 | tmp = (gb_addr_config & NUM_PIPES_MASK) >> NUM_PIPES_SHIFT; | ||
1749 | rdev->config.si.num_tile_pipes = (1 << tmp); | ||
1750 | tmp = (gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT; | ||
1751 | rdev->config.si.mem_max_burst_length_bytes = (tmp + 1) * 256; | ||
1752 | tmp = (gb_addr_config & NUM_SHADER_ENGINES_MASK) >> NUM_SHADER_ENGINES_SHIFT; | ||
1753 | rdev->config.si.num_shader_engines = tmp + 1; | ||
1754 | tmp = (gb_addr_config & NUM_GPUS_MASK) >> NUM_GPUS_SHIFT; | ||
1755 | rdev->config.si.num_gpus = tmp + 1; | ||
1756 | tmp = (gb_addr_config & MULTI_GPU_TILE_SIZE_MASK) >> MULTI_GPU_TILE_SIZE_SHIFT; | ||
1757 | rdev->config.si.multi_gpu_tile_size = 1 << tmp; | ||
1758 | tmp = (gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT; | ||
1759 | rdev->config.si.mem_row_size_in_kb = 1 << tmp; | ||
1760 | |||
1761 | gb_backend_map = | ||
1762 | si_get_tile_pipe_to_backend_map(rdev, rdev->config.si.num_tile_pipes, | ||
1763 | rdev->config.si.num_backends_per_se * | ||
1764 | rdev->config.si.num_shader_engines, | ||
1765 | &rdev->config.si.backend_disable_mask_per_asic, | ||
1766 | rdev->config.si.num_shader_engines); | ||
1767 | |||
1768 | /* setup tiling info dword. gb_addr_config is not adequate since it does | 1619 | /* setup tiling info dword. gb_addr_config is not adequate since it does |
1769 | * not have bank info, so create a custom tiling dword. | 1620 | * not have bank info, so create a custom tiling dword. |
1770 | * bits 3:0 num_pipes | 1621 | * bits 3:0 num_pipes |
@@ -1789,33 +1640,29 @@ static void si_gpu_init(struct radeon_device *rdev) | |||
1789 | rdev->config.si.tile_config |= (3 << 0); | 1640 | rdev->config.si.tile_config |= (3 << 0); |
1790 | break; | 1641 | break; |
1791 | } | 1642 | } |
1792 | rdev->config.si.tile_config |= | 1643 | if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) |
1793 | ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4; | 1644 | rdev->config.si.tile_config |= 1 << 4; |
1645 | else | ||
1646 | rdev->config.si.tile_config |= 0 << 4; | ||
1794 | rdev->config.si.tile_config |= | 1647 | rdev->config.si.tile_config |= |
1795 | ((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8; | 1648 | ((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8; |
1796 | rdev->config.si.tile_config |= | 1649 | rdev->config.si.tile_config |= |
1797 | ((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12; | 1650 | ((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12; |
1798 | 1651 | ||
1799 | rdev->config.si.backend_map = gb_backend_map; | ||
1800 | WREG32(GB_ADDR_CONFIG, gb_addr_config); | 1652 | WREG32(GB_ADDR_CONFIG, gb_addr_config); |
1801 | WREG32(DMIF_ADDR_CONFIG, gb_addr_config); | 1653 | WREG32(DMIF_ADDR_CONFIG, gb_addr_config); |
1802 | WREG32(HDP_ADDR_CONFIG, gb_addr_config); | 1654 | WREG32(HDP_ADDR_CONFIG, gb_addr_config); |
1803 | 1655 | ||
1804 | /* primary versions */ | 1656 | si_tiling_mode_table_init(rdev); |
1805 | WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable); | ||
1806 | WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable); | ||
1807 | WREG32(CC_GC_SHADER_ARRAY_CONFIG, cc_gc_shader_array_config); | ||
1808 | |||
1809 | WREG32(CGTS_TCC_DISABLE, cgts_tcc_disable); | ||
1810 | 1657 | ||
1811 | /* user versions */ | 1658 | si_setup_rb(rdev, rdev->config.si.max_shader_engines, |
1812 | WREG32(GC_USER_RB_BACKEND_DISABLE, cc_rb_backend_disable); | 1659 | rdev->config.si.max_sh_per_se, |
1813 | WREG32(GC_USER_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable); | 1660 | rdev->config.si.max_backends_per_se); |
1814 | WREG32(GC_USER_SHADER_ARRAY_CONFIG, cc_gc_shader_array_config); | ||
1815 | 1661 | ||
1816 | WREG32(CGTS_USER_TCC_DISABLE, cgts_tcc_disable); | 1662 | si_setup_spi(rdev, rdev->config.si.max_shader_engines, |
1663 | rdev->config.si.max_sh_per_se, | ||
1664 | rdev->config.si.max_cu_per_sh); | ||
1817 | 1665 | ||
1818 | si_tiling_mode_table_init(rdev); | ||
1819 | 1666 | ||
1820 | /* set HW defaults for 3D engine */ | 1667 | /* set HW defaults for 3D engine */ |
1821 | WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) | | 1668 | WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) | |
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h index 53ea2c42dbd6..db4067962868 100644 --- a/drivers/gpu/drm/radeon/sid.h +++ b/drivers/gpu/drm/radeon/sid.h | |||
@@ -24,6 +24,11 @@ | |||
24 | #ifndef SI_H | 24 | #ifndef SI_H |
25 | #define SI_H | 25 | #define SI_H |
26 | 26 | ||
27 | #define TAHITI_RB_BITMAP_WIDTH_PER_SH 2 | ||
28 | |||
29 | #define TAHITI_GB_ADDR_CONFIG_GOLDEN 0x12011003 | ||
30 | #define VERDE_GB_ADDR_CONFIG_GOLDEN 0x12010002 | ||
31 | |||
27 | #define CG_MULT_THERMAL_STATUS 0x714 | 32 | #define CG_MULT_THERMAL_STATUS 0x714 |
28 | #define ASIC_MAX_TEMP(x) ((x) << 0) | 33 | #define ASIC_MAX_TEMP(x) ((x) << 0) |
29 | #define ASIC_MAX_TEMP_MASK 0x000001ff | 34 | #define ASIC_MAX_TEMP_MASK 0x000001ff |
@@ -408,6 +413,12 @@ | |||
408 | #define SOFT_RESET_IA (1 << 15) | 413 | #define SOFT_RESET_IA (1 << 15) |
409 | 414 | ||
410 | #define GRBM_GFX_INDEX 0x802C | 415 | #define GRBM_GFX_INDEX 0x802C |
416 | #define INSTANCE_INDEX(x) ((x) << 0) | ||
417 | #define SH_INDEX(x) ((x) << 8) | ||
418 | #define SE_INDEX(x) ((x) << 16) | ||
419 | #define SH_BROADCAST_WRITES (1 << 29) | ||
420 | #define INSTANCE_BROADCAST_WRITES (1 << 30) | ||
421 | #define SE_BROADCAST_WRITES (1 << 31) | ||
411 | 422 | ||
412 | #define GRBM_INT_CNTL 0x8060 | 423 | #define GRBM_INT_CNTL 0x8060 |
413 | # define RDERR_INT_ENABLE (1 << 0) | 424 | # define RDERR_INT_ENABLE (1 << 0) |
@@ -480,6 +491,8 @@ | |||
480 | #define VGT_TF_MEMORY_BASE 0x89B8 | 491 | #define VGT_TF_MEMORY_BASE 0x89B8 |
481 | 492 | ||
482 | #define CC_GC_SHADER_ARRAY_CONFIG 0x89bc | 493 | #define CC_GC_SHADER_ARRAY_CONFIG 0x89bc |
494 | #define INACTIVE_CUS_MASK 0xFFFF0000 | ||
495 | #define INACTIVE_CUS_SHIFT 16 | ||
483 | #define GC_USER_SHADER_ARRAY_CONFIG 0x89c0 | 496 | #define GC_USER_SHADER_ARRAY_CONFIG 0x89c0 |
484 | 497 | ||
485 | #define PA_CL_ENHANCE 0x8A14 | 498 | #define PA_CL_ENHANCE 0x8A14 |
@@ -688,6 +701,12 @@ | |||
688 | #define RLC_MC_CNTL 0xC344 | 701 | #define RLC_MC_CNTL 0xC344 |
689 | #define RLC_UCODE_CNTL 0xC348 | 702 | #define RLC_UCODE_CNTL 0xC348 |
690 | 703 | ||
704 | #define PA_SC_RASTER_CONFIG 0x28350 | ||
705 | # define RASTER_CONFIG_RB_MAP_0 0 | ||
706 | # define RASTER_CONFIG_RB_MAP_1 1 | ||
707 | # define RASTER_CONFIG_RB_MAP_2 2 | ||
708 | # define RASTER_CONFIG_RB_MAP_3 3 | ||
709 | |||
691 | #define VGT_EVENT_INITIATOR 0x28a90 | 710 | #define VGT_EVENT_INITIATOR 0x28a90 |
692 | # define SAMPLE_STREAMOUTSTATS1 (1 << 0) | 711 | # define SAMPLE_STREAMOUTSTATS1 (1 << 0) |
693 | # define SAMPLE_STREAMOUTSTATS2 (2 << 0) | 712 | # define SAMPLE_STREAMOUTSTATS2 (2 << 0) |
diff --git a/drivers/i2c/muxes/Kconfig b/drivers/i2c/muxes/Kconfig index beb2491db274..a0edd9854218 100644 --- a/drivers/i2c/muxes/Kconfig +++ b/drivers/i2c/muxes/Kconfig | |||
@@ -37,4 +37,16 @@ config I2C_MUX_PCA954x | |||
37 | This driver can also be built as a module. If so, the module | 37 | This driver can also be built as a module. If so, the module |
38 | will be called i2c-mux-pca954x. | 38 | will be called i2c-mux-pca954x. |
39 | 39 | ||
40 | config I2C_MUX_PINCTRL | ||
41 | tristate "pinctrl-based I2C multiplexer" | ||
42 | depends on PINCTRL | ||
43 | help | ||
44 | If you say yes to this option, support will be included for an I2C | ||
45 | multiplexer that uses the pinctrl subsystem, i.e. pin multiplexing. | ||
46 | This is useful for SoCs whose I2C module's signals can be routed to | ||
47 | different sets of pins at run-time. | ||
48 | |||
49 | This driver can also be built as a module. If so, the module will be | ||
50 | called pinctrl-i2cmux. | ||
51 | |||
40 | endmenu | 52 | endmenu |
diff --git a/drivers/i2c/muxes/Makefile b/drivers/i2c/muxes/Makefile index 5826249b29ca..76da8692afff 100644 --- a/drivers/i2c/muxes/Makefile +++ b/drivers/i2c/muxes/Makefile | |||
@@ -4,5 +4,6 @@ | |||
4 | obj-$(CONFIG_I2C_MUX_GPIO) += i2c-mux-gpio.o | 4 | obj-$(CONFIG_I2C_MUX_GPIO) += i2c-mux-gpio.o |
5 | obj-$(CONFIG_I2C_MUX_PCA9541) += i2c-mux-pca9541.o | 5 | obj-$(CONFIG_I2C_MUX_PCA9541) += i2c-mux-pca9541.o |
6 | obj-$(CONFIG_I2C_MUX_PCA954x) += i2c-mux-pca954x.o | 6 | obj-$(CONFIG_I2C_MUX_PCA954x) += i2c-mux-pca954x.o |
7 | obj-$(CONFIG_I2C_MUX_PINCTRL) += i2c-mux-pinctrl.o | ||
7 | 8 | ||
8 | ccflags-$(CONFIG_I2C_DEBUG_BUS) := -DDEBUG | 9 | ccflags-$(CONFIG_I2C_DEBUG_BUS) := -DDEBUG |
diff --git a/drivers/i2c/muxes/i2c-mux-pinctrl.c b/drivers/i2c/muxes/i2c-mux-pinctrl.c new file mode 100644 index 000000000000..46a669763476 --- /dev/null +++ b/drivers/i2c/muxes/i2c-mux-pinctrl.c | |||
@@ -0,0 +1,279 @@ | |||
1 | /* | ||
2 | * I2C multiplexer using pinctrl API | ||
3 | * | ||
4 | * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms and conditions of the GNU General Public License, | ||
8 | * version 2, as published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
11 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
12 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
13 | * more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
17 | */ | ||
18 | |||
19 | #include <linux/i2c.h> | ||
20 | #include <linux/i2c-mux.h> | ||
21 | #include <linux/init.h> | ||
22 | #include <linux/module.h> | ||
23 | #include <linux/of_i2c.h> | ||
24 | #include <linux/pinctrl/consumer.h> | ||
25 | #include <linux/i2c-mux-pinctrl.h> | ||
26 | #include <linux/platform_device.h> | ||
27 | #include <linux/slab.h> | ||
28 | |||
29 | struct i2c_mux_pinctrl { | ||
30 | struct device *dev; | ||
31 | struct i2c_mux_pinctrl_platform_data *pdata; | ||
32 | struct pinctrl *pinctrl; | ||
33 | struct pinctrl_state **states; | ||
34 | struct pinctrl_state *state_idle; | ||
35 | struct i2c_adapter *parent; | ||
36 | struct i2c_adapter **busses; | ||
37 | }; | ||
38 | |||
39 | static int i2c_mux_pinctrl_select(struct i2c_adapter *adap, void *data, | ||
40 | u32 chan) | ||
41 | { | ||
42 | struct i2c_mux_pinctrl *mux = data; | ||
43 | |||
44 | return pinctrl_select_state(mux->pinctrl, mux->states[chan]); | ||
45 | } | ||
46 | |||
47 | static int i2c_mux_pinctrl_deselect(struct i2c_adapter *adap, void *data, | ||
48 | u32 chan) | ||
49 | { | ||
50 | struct i2c_mux_pinctrl *mux = data; | ||
51 | |||
52 | return pinctrl_select_state(mux->pinctrl, mux->state_idle); | ||
53 | } | ||
54 | |||
55 | #ifdef CONFIG_OF | ||
56 | static int i2c_mux_pinctrl_parse_dt(struct i2c_mux_pinctrl *mux, | ||
57 | struct platform_device *pdev) | ||
58 | { | ||
59 | struct device_node *np = pdev->dev.of_node; | ||
60 | int num_names, i, ret; | ||
61 | struct device_node *adapter_np; | ||
62 | struct i2c_adapter *adapter; | ||
63 | |||
64 | if (!np) | ||
65 | return 0; | ||
66 | |||
67 | mux->pdata = devm_kzalloc(&pdev->dev, sizeof(*mux->pdata), GFP_KERNEL); | ||
68 | if (!mux->pdata) { | ||
69 | dev_err(mux->dev, | ||
70 | "Cannot allocate i2c_mux_pinctrl_platform_data\n"); | ||
71 | return -ENOMEM; | ||
72 | } | ||
73 | |||
74 | num_names = of_property_count_strings(np, "pinctrl-names"); | ||
75 | if (num_names < 0) { | ||
76 | dev_err(mux->dev, "Cannot parse pinctrl-names: %d\n", | ||
77 | num_names); | ||
78 | return num_names; | ||
79 | } | ||
80 | |||
81 | mux->pdata->pinctrl_states = devm_kzalloc(&pdev->dev, | ||
82 | sizeof(*mux->pdata->pinctrl_states) * num_names, | ||
83 | GFP_KERNEL); | ||
84 | if (!mux->pdata->pinctrl_states) { | ||
85 | dev_err(mux->dev, "Cannot allocate pinctrl_states\n"); | ||
86 | return -ENOMEM; | ||
87 | } | ||
88 | |||
89 | for (i = 0; i < num_names; i++) { | ||
90 | ret = of_property_read_string_index(np, "pinctrl-names", i, | ||
91 | &mux->pdata->pinctrl_states[mux->pdata->bus_count]); | ||
92 | if (ret < 0) { | ||
93 | dev_err(mux->dev, "Cannot parse pinctrl-names: %d\n", | ||
94 | ret); | ||
95 | return ret; | ||
96 | } | ||
97 | if (!strcmp(mux->pdata->pinctrl_states[mux->pdata->bus_count], | ||
98 | "idle")) { | ||
99 | if (i != num_names - 1) { | ||
100 | dev_err(mux->dev, "idle state must be last\n"); | ||
101 | return -EINVAL; | ||
102 | } | ||
103 | mux->pdata->pinctrl_state_idle = "idle"; | ||
104 | } else { | ||
105 | mux->pdata->bus_count++; | ||
106 | } | ||
107 | } | ||
108 | |||
109 | adapter_np = of_parse_phandle(np, "i2c-parent", 0); | ||
110 | if (!adapter_np) { | ||
111 | dev_err(mux->dev, "Cannot parse i2c-parent\n"); | ||
112 | return -ENODEV; | ||
113 | } | ||
114 | adapter = of_find_i2c_adapter_by_node(adapter_np); | ||
115 | if (!adapter) { | ||
116 | dev_err(mux->dev, "Cannot find parent bus\n"); | ||
117 | return -ENODEV; | ||
118 | } | ||
119 | mux->pdata->parent_bus_num = i2c_adapter_id(adapter); | ||
120 | put_device(&adapter->dev); | ||
121 | |||
122 | return 0; | ||
123 | } | ||
124 | #else | ||
125 | static inline int i2c_mux_pinctrl_parse_dt(struct i2c_mux_pinctrl *mux, | ||
126 | struct platform_device *pdev) | ||
127 | { | ||
128 | return 0; | ||
129 | } | ||
130 | #endif | ||
131 | |||
132 | static int __devinit i2c_mux_pinctrl_probe(struct platform_device *pdev) | ||
133 | { | ||
134 | struct i2c_mux_pinctrl *mux; | ||
135 | int (*deselect)(struct i2c_adapter *, void *, u32); | ||
136 | int i, ret; | ||
137 | |||
138 | mux = devm_kzalloc(&pdev->dev, sizeof(*mux), GFP_KERNEL); | ||
139 | if (!mux) { | ||
140 | dev_err(&pdev->dev, "Cannot allocate i2c_mux_pinctrl\n"); | ||
141 | ret = -ENOMEM; | ||
142 | goto err; | ||
143 | } | ||
144 | platform_set_drvdata(pdev, mux); | ||
145 | |||
146 | mux->dev = &pdev->dev; | ||
147 | |||
148 | mux->pdata = pdev->dev.platform_data; | ||
149 | if (!mux->pdata) { | ||
150 | ret = i2c_mux_pinctrl_parse_dt(mux, pdev); | ||
151 | if (ret < 0) | ||
152 | goto err; | ||
153 | } | ||
154 | if (!mux->pdata) { | ||
155 | dev_err(&pdev->dev, "Missing platform data\n"); | ||
156 | ret = -ENODEV; | ||
157 | goto err; | ||
158 | } | ||
159 | |||
160 | mux->states = devm_kzalloc(&pdev->dev, | ||
161 | sizeof(*mux->states) * mux->pdata->bus_count, | ||
162 | GFP_KERNEL); | ||
163 | if (!mux->states) { | ||
164 | dev_err(&pdev->dev, "Cannot allocate states\n"); | ||
165 | ret = -ENOMEM; | ||
166 | goto err; | ||
167 | } | ||
168 | |||
169 | mux->busses = devm_kzalloc(&pdev->dev, | ||
170 | sizeof(mux->busses) * mux->pdata->bus_count, | ||
171 | GFP_KERNEL); | ||
172 | if (!mux->states) { | ||
173 | dev_err(&pdev->dev, "Cannot allocate busses\n"); | ||
174 | ret = -ENOMEM; | ||
175 | goto err; | ||
176 | } | ||
177 | |||
178 | mux->pinctrl = devm_pinctrl_get(&pdev->dev); | ||
179 | if (IS_ERR(mux->pinctrl)) { | ||
180 | ret = PTR_ERR(mux->pinctrl); | ||
181 | dev_err(&pdev->dev, "Cannot get pinctrl: %d\n", ret); | ||
182 | goto err; | ||
183 | } | ||
184 | for (i = 0; i < mux->pdata->bus_count; i++) { | ||
185 | mux->states[i] = pinctrl_lookup_state(mux->pinctrl, | ||
186 | mux->pdata->pinctrl_states[i]); | ||
187 | if (IS_ERR(mux->states[i])) { | ||
188 | ret = PTR_ERR(mux->states[i]); | ||
189 | dev_err(&pdev->dev, | ||
190 | "Cannot look up pinctrl state %s: %d\n", | ||
191 | mux->pdata->pinctrl_states[i], ret); | ||
192 | goto err; | ||
193 | } | ||
194 | } | ||
195 | if (mux->pdata->pinctrl_state_idle) { | ||
196 | mux->state_idle = pinctrl_lookup_state(mux->pinctrl, | ||
197 | mux->pdata->pinctrl_state_idle); | ||
198 | if (IS_ERR(mux->state_idle)) { | ||
199 | ret = PTR_ERR(mux->state_idle); | ||
200 | dev_err(&pdev->dev, | ||
201 | "Cannot look up pinctrl state %s: %d\n", | ||
202 | mux->pdata->pinctrl_state_idle, ret); | ||
203 | goto err; | ||
204 | } | ||
205 | |||
206 | deselect = i2c_mux_pinctrl_deselect; | ||
207 | } else { | ||
208 | deselect = NULL; | ||
209 | } | ||
210 | |||
211 | mux->parent = i2c_get_adapter(mux->pdata->parent_bus_num); | ||
212 | if (!mux->parent) { | ||
213 | dev_err(&pdev->dev, "Parent adapter (%d) not found\n", | ||
214 | mux->pdata->parent_bus_num); | ||
215 | ret = -ENODEV; | ||
216 | goto err; | ||
217 | } | ||
218 | |||
219 | for (i = 0; i < mux->pdata->bus_count; i++) { | ||
220 | u32 bus = mux->pdata->base_bus_num ? | ||
221 | (mux->pdata->base_bus_num + i) : 0; | ||
222 | |||
223 | mux->busses[i] = i2c_add_mux_adapter(mux->parent, &pdev->dev, | ||
224 | mux, bus, i, | ||
225 | i2c_mux_pinctrl_select, | ||
226 | deselect); | ||
227 | if (!mux->busses[i]) { | ||
228 | ret = -ENODEV; | ||
229 | dev_err(&pdev->dev, "Failed to add adapter %d\n", i); | ||
230 | goto err_del_adapter; | ||
231 | } | ||
232 | } | ||
233 | |||
234 | return 0; | ||
235 | |||
236 | err_del_adapter: | ||
237 | for (; i > 0; i--) | ||
238 | i2c_del_mux_adapter(mux->busses[i - 1]); | ||
239 | i2c_put_adapter(mux->parent); | ||
240 | err: | ||
241 | return ret; | ||
242 | } | ||
243 | |||
244 | static int __devexit i2c_mux_pinctrl_remove(struct platform_device *pdev) | ||
245 | { | ||
246 | struct i2c_mux_pinctrl *mux = platform_get_drvdata(pdev); | ||
247 | int i; | ||
248 | |||
249 | for (i = 0; i < mux->pdata->bus_count; i++) | ||
250 | i2c_del_mux_adapter(mux->busses[i]); | ||
251 | |||
252 | i2c_put_adapter(mux->parent); | ||
253 | |||
254 | return 0; | ||
255 | } | ||
256 | |||
257 | #ifdef CONFIG_OF | ||
258 | static const struct of_device_id i2c_mux_pinctrl_of_match[] __devinitconst = { | ||
259 | { .compatible = "i2c-mux-pinctrl", }, | ||
260 | {}, | ||
261 | }; | ||
262 | MODULE_DEVICE_TABLE(of, i2c_mux_pinctrl_of_match); | ||
263 | #endif | ||
264 | |||
265 | static struct platform_driver i2c_mux_pinctrl_driver = { | ||
266 | .driver = { | ||
267 | .name = "i2c-mux-pinctrl", | ||
268 | .owner = THIS_MODULE, | ||
269 | .of_match_table = of_match_ptr(i2c_mux_pinctrl_of_match), | ||
270 | }, | ||
271 | .probe = i2c_mux_pinctrl_probe, | ||
272 | .remove = __devexit_p(i2c_mux_pinctrl_remove), | ||
273 | }; | ||
274 | module_platform_driver(i2c_mux_pinctrl_driver); | ||
275 | |||
276 | MODULE_DESCRIPTION("pinctrl-based I2C multiplexer driver"); | ||
277 | MODULE_AUTHOR("Stephen Warren <swarren@nvidia.com>"); | ||
278 | MODULE_LICENSE("GPL v2"); | ||
279 | MODULE_ALIAS("platform:i2c-mux-pinctrl"); | ||
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c index 7d5f56edb8ef..4267789ca995 100644 --- a/drivers/rtc/rtc-cmos.c +++ b/drivers/rtc/rtc-cmos.c | |||
@@ -910,14 +910,17 @@ static inline int cmos_poweroff(struct device *dev) | |||
910 | 910 | ||
911 | static u32 rtc_handler(void *context) | 911 | static u32 rtc_handler(void *context) |
912 | { | 912 | { |
913 | struct device *dev = context; | ||
914 | |||
915 | pm_wakeup_event(dev, 0); | ||
913 | acpi_clear_event(ACPI_EVENT_RTC); | 916 | acpi_clear_event(ACPI_EVENT_RTC); |
914 | acpi_disable_event(ACPI_EVENT_RTC, 0); | 917 | acpi_disable_event(ACPI_EVENT_RTC, 0); |
915 | return ACPI_INTERRUPT_HANDLED; | 918 | return ACPI_INTERRUPT_HANDLED; |
916 | } | 919 | } |
917 | 920 | ||
918 | static inline void rtc_wake_setup(void) | 921 | static inline void rtc_wake_setup(struct device *dev) |
919 | { | 922 | { |
920 | acpi_install_fixed_event_handler(ACPI_EVENT_RTC, rtc_handler, NULL); | 923 | acpi_install_fixed_event_handler(ACPI_EVENT_RTC, rtc_handler, dev); |
921 | /* | 924 | /* |
922 | * After the RTC handler is installed, the Fixed_RTC event should | 925 | * After the RTC handler is installed, the Fixed_RTC event should |
923 | * be disabled. Only when the RTC alarm is set will it be enabled. | 926 | * be disabled. Only when the RTC alarm is set will it be enabled. |
@@ -950,7 +953,7 @@ cmos_wake_setup(struct device *dev) | |||
950 | if (acpi_disabled) | 953 | if (acpi_disabled) |
951 | return; | 954 | return; |
952 | 955 | ||
953 | rtc_wake_setup(); | 956 | rtc_wake_setup(dev); |
954 | acpi_rtc_info.wake_on = rtc_wake_on; | 957 | acpi_rtc_info.wake_on = rtc_wake_on; |
955 | acpi_rtc_info.wake_off = rtc_wake_off; | 958 | acpi_rtc_info.wake_off = rtc_wake_off; |
956 | 959 | ||
diff --git a/drivers/staging/ramster/zcache-main.c b/drivers/staging/ramster/zcache-main.c index 4e7ef0e6b79c..d46764b5aaba 100644 --- a/drivers/staging/ramster/zcache-main.c +++ b/drivers/staging/ramster/zcache-main.c | |||
@@ -3002,7 +3002,7 @@ static inline struct tmem_oid oswiz(unsigned type, u32 ind) | |||
3002 | return oid; | 3002 | return oid; |
3003 | } | 3003 | } |
3004 | 3004 | ||
3005 | static int zcache_frontswap_put_page(unsigned type, pgoff_t offset, | 3005 | static int zcache_frontswap_store(unsigned type, pgoff_t offset, |
3006 | struct page *page) | 3006 | struct page *page) |
3007 | { | 3007 | { |
3008 | u64 ind64 = (u64)offset; | 3008 | u64 ind64 = (u64)offset; |
@@ -3025,7 +3025,7 @@ static int zcache_frontswap_put_page(unsigned type, pgoff_t offset, | |||
3025 | 3025 | ||
3026 | /* returns 0 if the page was successfully gotten from frontswap, -1 if | 3026 | /* returns 0 if the page was successfully gotten from frontswap, -1 if |
3027 | * was not present (should never happen!) */ | 3027 | * was not present (should never happen!) */ |
3028 | static int zcache_frontswap_get_page(unsigned type, pgoff_t offset, | 3028 | static int zcache_frontswap_load(unsigned type, pgoff_t offset, |
3029 | struct page *page) | 3029 | struct page *page) |
3030 | { | 3030 | { |
3031 | u64 ind64 = (u64)offset; | 3031 | u64 ind64 = (u64)offset; |
@@ -3080,8 +3080,8 @@ static void zcache_frontswap_init(unsigned ignored) | |||
3080 | } | 3080 | } |
3081 | 3081 | ||
3082 | static struct frontswap_ops zcache_frontswap_ops = { | 3082 | static struct frontswap_ops zcache_frontswap_ops = { |
3083 | .put_page = zcache_frontswap_put_page, | 3083 | .store = zcache_frontswap_store, |
3084 | .get_page = zcache_frontswap_get_page, | 3084 | .load = zcache_frontswap_load, |
3085 | .invalidate_page = zcache_frontswap_flush_page, | 3085 | .invalidate_page = zcache_frontswap_flush_page, |
3086 | .invalidate_area = zcache_frontswap_flush_area, | 3086 | .invalidate_area = zcache_frontswap_flush_area, |
3087 | .init = zcache_frontswap_init | 3087 | .init = zcache_frontswap_init |
diff --git a/drivers/staging/zcache/zcache-main.c b/drivers/staging/zcache/zcache-main.c index 2734dacacbaf..784c796b9848 100644 --- a/drivers/staging/zcache/zcache-main.c +++ b/drivers/staging/zcache/zcache-main.c | |||
@@ -1835,7 +1835,7 @@ static int zcache_frontswap_poolid = -1; | |||
1835 | * Swizzling increases objects per swaptype, increasing tmem concurrency | 1835 | * Swizzling increases objects per swaptype, increasing tmem concurrency |
1836 | * for heavy swaploads. Later, larger nr_cpus -> larger SWIZ_BITS | 1836 | * for heavy swaploads. Later, larger nr_cpus -> larger SWIZ_BITS |
1837 | * Setting SWIZ_BITS to 27 basically reconstructs the swap entry from | 1837 | * Setting SWIZ_BITS to 27 basically reconstructs the swap entry from |
1838 | * frontswap_get_page(), but has side-effects. Hence using 8. | 1838 | * frontswap_load(), but has side-effects. Hence using 8. |
1839 | */ | 1839 | */ |
1840 | #define SWIZ_BITS 8 | 1840 | #define SWIZ_BITS 8 |
1841 | #define SWIZ_MASK ((1 << SWIZ_BITS) - 1) | 1841 | #define SWIZ_MASK ((1 << SWIZ_BITS) - 1) |
@@ -1849,7 +1849,7 @@ static inline struct tmem_oid oswiz(unsigned type, u32 ind) | |||
1849 | return oid; | 1849 | return oid; |
1850 | } | 1850 | } |
1851 | 1851 | ||
1852 | static int zcache_frontswap_put_page(unsigned type, pgoff_t offset, | 1852 | static int zcache_frontswap_store(unsigned type, pgoff_t offset, |
1853 | struct page *page) | 1853 | struct page *page) |
1854 | { | 1854 | { |
1855 | u64 ind64 = (u64)offset; | 1855 | u64 ind64 = (u64)offset; |
@@ -1870,7 +1870,7 @@ static int zcache_frontswap_put_page(unsigned type, pgoff_t offset, | |||
1870 | 1870 | ||
1871 | /* returns 0 if the page was successfully gotten from frontswap, -1 if | 1871 | /* returns 0 if the page was successfully gotten from frontswap, -1 if |
1872 | * was not present (should never happen!) */ | 1872 | * was not present (should never happen!) */ |
1873 | static int zcache_frontswap_get_page(unsigned type, pgoff_t offset, | 1873 | static int zcache_frontswap_load(unsigned type, pgoff_t offset, |
1874 | struct page *page) | 1874 | struct page *page) |
1875 | { | 1875 | { |
1876 | u64 ind64 = (u64)offset; | 1876 | u64 ind64 = (u64)offset; |
@@ -1919,8 +1919,8 @@ static void zcache_frontswap_init(unsigned ignored) | |||
1919 | } | 1919 | } |
1920 | 1920 | ||
1921 | static struct frontswap_ops zcache_frontswap_ops = { | 1921 | static struct frontswap_ops zcache_frontswap_ops = { |
1922 | .put_page = zcache_frontswap_put_page, | 1922 | .store = zcache_frontswap_store, |
1923 | .get_page = zcache_frontswap_get_page, | 1923 | .load = zcache_frontswap_load, |
1924 | .invalidate_page = zcache_frontswap_flush_page, | 1924 | .invalidate_page = zcache_frontswap_flush_page, |
1925 | .invalidate_area = zcache_frontswap_flush_area, | 1925 | .invalidate_area = zcache_frontswap_flush_area, |
1926 | .init = zcache_frontswap_init | 1926 | .init = zcache_frontswap_init |
diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c index 37c609898f84..7e6136e2ce81 100644 --- a/drivers/target/sbp/sbp_target.c +++ b/drivers/target/sbp/sbp_target.c | |||
@@ -587,14 +587,14 @@ static void sbp_management_request_logout( | |||
587 | { | 587 | { |
588 | struct sbp_tport *tport = agent->tport; | 588 | struct sbp_tport *tport = agent->tport; |
589 | struct sbp_tpg *tpg = tport->tpg; | 589 | struct sbp_tpg *tpg = tport->tpg; |
590 | int login_id; | 590 | int id; |
591 | struct sbp_login_descriptor *login; | 591 | struct sbp_login_descriptor *login; |
592 | 592 | ||
593 | login_id = LOGOUT_ORB_LOGIN_ID(be32_to_cpu(req->orb.misc)); | 593 | id = LOGOUT_ORB_LOGIN_ID(be32_to_cpu(req->orb.misc)); |
594 | 594 | ||
595 | login = sbp_login_find_by_id(tpg, login_id); | 595 | login = sbp_login_find_by_id(tpg, id); |
596 | if (!login) { | 596 | if (!login) { |
597 | pr_warn("cannot find login: %d\n", login_id); | 597 | pr_warn("cannot find login: %d\n", id); |
598 | 598 | ||
599 | req->status.status = cpu_to_be32( | 599 | req->status.status = cpu_to_be32( |
600 | STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | | 600 | STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | |
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c index 686dba189f8e..9f99d0404908 100644 --- a/drivers/target/target_core_file.c +++ b/drivers/target/target_core_file.c | |||
@@ -133,16 +133,11 @@ static struct se_device *fd_create_virtdevice( | |||
133 | ret = PTR_ERR(dev_p); | 133 | ret = PTR_ERR(dev_p); |
134 | goto fail; | 134 | goto fail; |
135 | } | 135 | } |
136 | |||
137 | /* O_DIRECT too? */ | ||
138 | flags = O_RDWR | O_CREAT | O_LARGEFILE; | ||
139 | |||
140 | /* | 136 | /* |
141 | * If fd_buffered_io=1 has not been set explicitly (the default), | 137 | * Use O_DSYNC by default instead of O_SYNC to forgo syncing |
142 | * use O_SYNC to force FILEIO writes to disk. | 138 | * of pure timestamp updates. |
143 | */ | 139 | */ |
144 | if (!(fd_dev->fbd_flags & FDBD_USE_BUFFERED_IO)) | 140 | flags = O_RDWR | O_CREAT | O_LARGEFILE | O_DSYNC; |
145 | flags |= O_SYNC; | ||
146 | 141 | ||
147 | file = filp_open(dev_p, flags, 0600); | 142 | file = filp_open(dev_p, flags, 0600); |
148 | if (IS_ERR(file)) { | 143 | if (IS_ERR(file)) { |
@@ -380,23 +375,6 @@ static void fd_emulate_sync_cache(struct se_cmd *cmd) | |||
380 | } | 375 | } |
381 | } | 376 | } |
382 | 377 | ||
383 | static void fd_emulate_write_fua(struct se_cmd *cmd) | ||
384 | { | ||
385 | struct se_device *dev = cmd->se_dev; | ||
386 | struct fd_dev *fd_dev = dev->dev_ptr; | ||
387 | loff_t start = cmd->t_task_lba * | ||
388 | dev->se_sub_dev->se_dev_attrib.block_size; | ||
389 | loff_t end = start + cmd->data_length; | ||
390 | int ret; | ||
391 | |||
392 | pr_debug("FILEIO: FUA WRITE LBA: %llu, bytes: %u\n", | ||
393 | cmd->t_task_lba, cmd->data_length); | ||
394 | |||
395 | ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1); | ||
396 | if (ret != 0) | ||
397 | pr_err("FILEIO: vfs_fsync_range() failed: %d\n", ret); | ||
398 | } | ||
399 | |||
400 | static int fd_execute_cmd(struct se_cmd *cmd, struct scatterlist *sgl, | 378 | static int fd_execute_cmd(struct se_cmd *cmd, struct scatterlist *sgl, |
401 | u32 sgl_nents, enum dma_data_direction data_direction) | 379 | u32 sgl_nents, enum dma_data_direction data_direction) |
402 | { | 380 | { |
@@ -411,19 +389,21 @@ static int fd_execute_cmd(struct se_cmd *cmd, struct scatterlist *sgl, | |||
411 | ret = fd_do_readv(cmd, sgl, sgl_nents); | 389 | ret = fd_do_readv(cmd, sgl, sgl_nents); |
412 | } else { | 390 | } else { |
413 | ret = fd_do_writev(cmd, sgl, sgl_nents); | 391 | ret = fd_do_writev(cmd, sgl, sgl_nents); |
414 | 392 | /* | |
393 | * Perform implict vfs_fsync_range() for fd_do_writev() ops | ||
394 | * for SCSI WRITEs with Forced Unit Access (FUA) set. | ||
395 | * Allow this to happen independent of WCE=0 setting. | ||
396 | */ | ||
415 | if (ret > 0 && | 397 | if (ret > 0 && |
416 | dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0 && | ||
417 | dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 && | 398 | dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 && |
418 | (cmd->se_cmd_flags & SCF_FUA)) { | 399 | (cmd->se_cmd_flags & SCF_FUA)) { |
419 | /* | 400 | struct fd_dev *fd_dev = dev->dev_ptr; |
420 | * We might need to be a bit smarter here | 401 | loff_t start = cmd->t_task_lba * |
421 | * and return some sense data to let the initiator | 402 | dev->se_sub_dev->se_dev_attrib.block_size; |
422 | * know the FUA WRITE cache sync failed..? | 403 | loff_t end = start + cmd->data_length; |
423 | */ | ||
424 | fd_emulate_write_fua(cmd); | ||
425 | } | ||
426 | 404 | ||
405 | vfs_fsync_range(fd_dev->fd_file, start, end, 1); | ||
406 | } | ||
427 | } | 407 | } |
428 | 408 | ||
429 | if (ret < 0) { | 409 | if (ret < 0) { |
@@ -442,7 +422,6 @@ enum { | |||
442 | static match_table_t tokens = { | 422 | static match_table_t tokens = { |
443 | {Opt_fd_dev_name, "fd_dev_name=%s"}, | 423 | {Opt_fd_dev_name, "fd_dev_name=%s"}, |
444 | {Opt_fd_dev_size, "fd_dev_size=%s"}, | 424 | {Opt_fd_dev_size, "fd_dev_size=%s"}, |
445 | {Opt_fd_buffered_io, "fd_buffered_io=%d"}, | ||
446 | {Opt_err, NULL} | 425 | {Opt_err, NULL} |
447 | }; | 426 | }; |
448 | 427 | ||
@@ -454,7 +433,7 @@ static ssize_t fd_set_configfs_dev_params( | |||
454 | struct fd_dev *fd_dev = se_dev->se_dev_su_ptr; | 433 | struct fd_dev *fd_dev = se_dev->se_dev_su_ptr; |
455 | char *orig, *ptr, *arg_p, *opts; | 434 | char *orig, *ptr, *arg_p, *opts; |
456 | substring_t args[MAX_OPT_ARGS]; | 435 | substring_t args[MAX_OPT_ARGS]; |
457 | int ret = 0, arg, token; | 436 | int ret = 0, token; |
458 | 437 | ||
459 | opts = kstrdup(page, GFP_KERNEL); | 438 | opts = kstrdup(page, GFP_KERNEL); |
460 | if (!opts) | 439 | if (!opts) |
@@ -498,19 +477,6 @@ static ssize_t fd_set_configfs_dev_params( | |||
498 | " bytes\n", fd_dev->fd_dev_size); | 477 | " bytes\n", fd_dev->fd_dev_size); |
499 | fd_dev->fbd_flags |= FBDF_HAS_SIZE; | 478 | fd_dev->fbd_flags |= FBDF_HAS_SIZE; |
500 | break; | 479 | break; |
501 | case Opt_fd_buffered_io: | ||
502 | match_int(args, &arg); | ||
503 | if (arg != 1) { | ||
504 | pr_err("bogus fd_buffered_io=%d value\n", arg); | ||
505 | ret = -EINVAL; | ||
506 | goto out; | ||
507 | } | ||
508 | |||
509 | pr_debug("FILEIO: Using buffered I/O" | ||
510 | " operations for struct fd_dev\n"); | ||
511 | |||
512 | fd_dev->fbd_flags |= FDBD_USE_BUFFERED_IO; | ||
513 | break; | ||
514 | default: | 480 | default: |
515 | break; | 481 | break; |
516 | } | 482 | } |
@@ -542,10 +508,8 @@ static ssize_t fd_show_configfs_dev_params( | |||
542 | ssize_t bl = 0; | 508 | ssize_t bl = 0; |
543 | 509 | ||
544 | bl = sprintf(b + bl, "TCM FILEIO ID: %u", fd_dev->fd_dev_id); | 510 | bl = sprintf(b + bl, "TCM FILEIO ID: %u", fd_dev->fd_dev_id); |
545 | bl += sprintf(b + bl, " File: %s Size: %llu Mode: %s\n", | 511 | bl += sprintf(b + bl, " File: %s Size: %llu Mode: O_DSYNC\n", |
546 | fd_dev->fd_dev_name, fd_dev->fd_dev_size, | 512 | fd_dev->fd_dev_name, fd_dev->fd_dev_size); |
547 | (fd_dev->fbd_flags & FDBD_USE_BUFFERED_IO) ? | ||
548 | "Buffered" : "Synchronous"); | ||
549 | return bl; | 513 | return bl; |
550 | } | 514 | } |
551 | 515 | ||
diff --git a/drivers/target/target_core_file.h b/drivers/target/target_core_file.h index fbd59ef7d8be..70ce7fd7111d 100644 --- a/drivers/target/target_core_file.h +++ b/drivers/target/target_core_file.h | |||
@@ -14,7 +14,6 @@ | |||
14 | 14 | ||
15 | #define FBDF_HAS_PATH 0x01 | 15 | #define FBDF_HAS_PATH 0x01 |
16 | #define FBDF_HAS_SIZE 0x02 | 16 | #define FBDF_HAS_SIZE 0x02 |
17 | #define FDBD_USE_BUFFERED_IO 0x04 | ||
18 | 17 | ||
19 | struct fd_dev { | 18 | struct fd_dev { |
20 | u32 fbd_flags; | 19 | u32 fbd_flags; |
diff --git a/drivers/xen/tmem.c b/drivers/xen/tmem.c index dcb79521e6c8..89f264c67420 100644 --- a/drivers/xen/tmem.c +++ b/drivers/xen/tmem.c | |||
@@ -269,7 +269,7 @@ static inline struct tmem_oid oswiz(unsigned type, u32 ind) | |||
269 | } | 269 | } |
270 | 270 | ||
271 | /* returns 0 if the page was successfully put into frontswap, -1 if not */ | 271 | /* returns 0 if the page was successfully put into frontswap, -1 if not */ |
272 | static int tmem_frontswap_put_page(unsigned type, pgoff_t offset, | 272 | static int tmem_frontswap_store(unsigned type, pgoff_t offset, |
273 | struct page *page) | 273 | struct page *page) |
274 | { | 274 | { |
275 | u64 ind64 = (u64)offset; | 275 | u64 ind64 = (u64)offset; |
@@ -295,7 +295,7 @@ static int tmem_frontswap_put_page(unsigned type, pgoff_t offset, | |||
295 | * returns 0 if the page was successfully gotten from frontswap, -1 if | 295 | * returns 0 if the page was successfully gotten from frontswap, -1 if |
296 | * was not present (should never happen!) | 296 | * was not present (should never happen!) |
297 | */ | 297 | */ |
298 | static int tmem_frontswap_get_page(unsigned type, pgoff_t offset, | 298 | static int tmem_frontswap_load(unsigned type, pgoff_t offset, |
299 | struct page *page) | 299 | struct page *page) |
300 | { | 300 | { |
301 | u64 ind64 = (u64)offset; | 301 | u64 ind64 = (u64)offset; |
@@ -362,8 +362,8 @@ static int __init no_frontswap(char *s) | |||
362 | __setup("nofrontswap", no_frontswap); | 362 | __setup("nofrontswap", no_frontswap); |
363 | 363 | ||
364 | static struct frontswap_ops __initdata tmem_frontswap_ops = { | 364 | static struct frontswap_ops __initdata tmem_frontswap_ops = { |
365 | .put_page = tmem_frontswap_put_page, | 365 | .store = tmem_frontswap_store, |
366 | .get_page = tmem_frontswap_get_page, | 366 | .load = tmem_frontswap_load, |
367 | .invalidate_page = tmem_frontswap_flush_page, | 367 | .invalidate_page = tmem_frontswap_flush_page, |
368 | .invalidate_area = tmem_frontswap_flush_area, | 368 | .invalidate_area = tmem_frontswap_flush_area, |
369 | .init = tmem_frontswap_init | 369 | .init = tmem_frontswap_init |