aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/macintosh/smu.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/macintosh/smu.c')
-rw-r--r--drivers/macintosh/smu.c1030
1 files changed, 891 insertions, 139 deletions
diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c
index fb535737d17d..a85ac18dd21d 100644
--- a/drivers/macintosh/smu.c
+++ b/drivers/macintosh/smu.c
@@ -8,21 +8,15 @@
8 */ 8 */
9 9
10/* 10/*
11 * For now, this driver includes:
12 * - RTC get & set
13 * - reboot & shutdown commands
14 * all synchronous with IRQ disabled (ugh)
15 *
16 * TODO: 11 * TODO:
17 * rework in a way the PMU driver works, that is asynchronous 12 * - maybe add timeout to commands ?
18 * with a queue of commands. I'll do that as soon as I have an 13 * - blocking version of time functions
19 * SMU based machine at hand. Some more cleanup is needed too, 14 * - polling version of i2c commands (including timer that works with
20 * like maybe fitting it into a platform device, etc... 15 * interrutps off)
21 * Also check what's up with cache coherency, and if we really 16 * - maybe avoid some data copies with i2c by directly using the smu cmd
22 * can't do better than flushing the cache, maybe build a table 17 * buffer and a lower level internal interface
23 * of command len/reply len like the PMU driver to only flush 18 * - understand SMU -> CPU events and implement reception of them via
24 * what is actually necessary. 19 * the userland interface
25 * --BenH.
26 */ 20 */
27 21
28#include <linux/config.h> 22#include <linux/config.h>
@@ -36,6 +30,11 @@
36#include <linux/jiffies.h> 30#include <linux/jiffies.h>
37#include <linux/interrupt.h> 31#include <linux/interrupt.h>
38#include <linux/rtc.h> 32#include <linux/rtc.h>
33#include <linux/completion.h>
34#include <linux/miscdevice.h>
35#include <linux/delay.h>
36#include <linux/sysdev.h>
37#include <linux/poll.h>
39 38
40#include <asm/byteorder.h> 39#include <asm/byteorder.h>
41#include <asm/io.h> 40#include <asm/io.h>
@@ -45,8 +44,13 @@
45#include <asm/smu.h> 44#include <asm/smu.h>
46#include <asm/sections.h> 45#include <asm/sections.h>
47#include <asm/abs_addr.h> 46#include <asm/abs_addr.h>
47#include <asm/uaccess.h>
48#include <asm/of_device.h>
49
50#define VERSION "0.6"
51#define AUTHOR "(c) 2005 Benjamin Herrenschmidt, IBM Corp."
48 52
49#define DEBUG_SMU 1 53#undef DEBUG_SMU
50 54
51#ifdef DEBUG_SMU 55#ifdef DEBUG_SMU
52#define DPRINTK(fmt, args...) do { printk(KERN_DEBUG fmt , ##args); } while (0) 56#define DPRINTK(fmt, args...) do { printk(KERN_DEBUG fmt , ##args); } while (0)
@@ -57,20 +61,30 @@
57/* 61/*
58 * This is the command buffer passed to the SMU hardware 62 * This is the command buffer passed to the SMU hardware
59 */ 63 */
64#define SMU_MAX_DATA 254
65
60struct smu_cmd_buf { 66struct smu_cmd_buf {
61 u8 cmd; 67 u8 cmd;
62 u8 length; 68 u8 length;
63 u8 data[0x0FFE]; 69 u8 data[SMU_MAX_DATA];
64}; 70};
65 71
66struct smu_device { 72struct smu_device {
67 spinlock_t lock; 73 spinlock_t lock;
68 struct device_node *of_node; 74 struct device_node *of_node;
69 int db_ack; /* doorbell ack GPIO */ 75 struct of_device *of_dev;
70 int db_req; /* doorbell req GPIO */ 76 int doorbell; /* doorbell gpio */
71 u32 __iomem *db_buf; /* doorbell buffer */ 77 u32 __iomem *db_buf; /* doorbell buffer */
78 int db_irq;
79 int msg;
80 int msg_irq;
72 struct smu_cmd_buf *cmd_buf; /* command buffer virtual */ 81 struct smu_cmd_buf *cmd_buf; /* command buffer virtual */
73 u32 cmd_buf_abs; /* command buffer absolute */ 82 u32 cmd_buf_abs; /* command buffer absolute */
83 struct list_head cmd_list;
84 struct smu_cmd *cmd_cur; /* pending command */
85 struct list_head cmd_i2c_list;
86 struct smu_i2c_cmd *cmd_i2c_cur; /* pending i2c command */
87 struct timer_list i2c_timer;
74}; 88};
75 89
76/* 90/*
@@ -79,113 +93,243 @@ struct smu_device {
79 */ 93 */
80static struct smu_device *smu; 94static struct smu_device *smu;
81 95
96
82/* 97/*
83 * SMU low level communication stuff 98 * SMU driver low level stuff
84 */ 99 */
85static inline int smu_cmd_stat(struct smu_cmd_buf *cmd_buf, u8 cmd_ack)
86{
87 rmb();
88 return cmd_buf->cmd == cmd_ack && cmd_buf->length != 0;
89}
90 100
91static inline u8 smu_save_ack_cmd(struct smu_cmd_buf *cmd_buf) 101static void smu_start_cmd(void)
92{ 102{
93 return (~cmd_buf->cmd) & 0xff; 103 unsigned long faddr, fend;
94} 104 struct smu_cmd *cmd;
95 105
96static void smu_send_cmd(struct smu_device *dev) 106 if (list_empty(&smu->cmd_list))
97{ 107 return;
98 /* SMU command buf is currently cacheable, we need a physical 108
99 * address. This isn't exactly a DMA mapping here, I suspect 109 /* Fetch first command in queue */
110 cmd = list_entry(smu->cmd_list.next, struct smu_cmd, link);
111 smu->cmd_cur = cmd;
112 list_del(&cmd->link);
113
114 DPRINTK("SMU: starting cmd %x, %d bytes data\n", cmd->cmd,
115 cmd->data_len);
116 DPRINTK("SMU: data buffer: %02x %02x %02x %02x ...\n",
117 ((u8 *)cmd->data_buf)[0], ((u8 *)cmd->data_buf)[1],
118 ((u8 *)cmd->data_buf)[2], ((u8 *)cmd->data_buf)[3]);
119
120 /* Fill the SMU command buffer */
121 smu->cmd_buf->cmd = cmd->cmd;
122 smu->cmd_buf->length = cmd->data_len;
123 memcpy(smu->cmd_buf->data, cmd->data_buf, cmd->data_len);
124
125 /* Flush command and data to RAM */
126 faddr = (unsigned long)smu->cmd_buf;
127 fend = faddr + smu->cmd_buf->length + 2;
128 flush_inval_dcache_range(faddr, fend);
129
130 /* This isn't exactly a DMA mapping here, I suspect
100 * the SMU is actually communicating with us via i2c to the 131 * the SMU is actually communicating with us via i2c to the
101 * northbridge or the CPU to access RAM. 132 * northbridge or the CPU to access RAM.
102 */ 133 */
103 writel(dev->cmd_buf_abs, dev->db_buf); 134 writel(smu->cmd_buf_abs, smu->db_buf);
104 135
105 /* Ring the SMU doorbell */ 136 /* Ring the SMU doorbell */
106 pmac_do_feature_call(PMAC_FTR_WRITE_GPIO, NULL, dev->db_req, 4); 137 pmac_do_feature_call(PMAC_FTR_WRITE_GPIO, NULL, smu->doorbell, 4);
107 pmac_do_feature_call(PMAC_FTR_READ_GPIO, NULL, dev->db_req, 4);
108} 138}
109 139
110static int smu_cmd_done(struct smu_device *dev) 140
141static irqreturn_t smu_db_intr(int irq, void *arg, struct pt_regs *regs)
111{ 142{
112 unsigned long wait = 0; 143 unsigned long flags;
113 int gpio; 144 struct smu_cmd *cmd;
145 void (*done)(struct smu_cmd *cmd, void *misc) = NULL;
146 void *misc = NULL;
147 u8 gpio;
148 int rc = 0;
114 149
115 /* Check the SMU doorbell */ 150 /* SMU completed the command, well, we hope, let's make sure
116 do { 151 * of it
117 gpio = pmac_do_feature_call(PMAC_FTR_READ_GPIO, 152 */
118 NULL, dev->db_ack); 153 spin_lock_irqsave(&smu->lock, flags);
119 if ((gpio & 7) == 7)
120 return 0;
121 udelay(100);
122 } while(++wait < 10000);
123 154
124 printk(KERN_ERR "SMU timeout !\n"); 155 gpio = pmac_do_feature_call(PMAC_FTR_READ_GPIO, NULL, smu->doorbell);
125 return -ENXIO; 156 if ((gpio & 7) != 7)
157 return IRQ_HANDLED;
158
159 cmd = smu->cmd_cur;
160 smu->cmd_cur = NULL;
161 if (cmd == NULL)
162 goto bail;
163
164 if (rc == 0) {
165 unsigned long faddr;
166 int reply_len;
167 u8 ack;
168
169 /* CPU might have brought back the cache line, so we need
170 * to flush again before peeking at the SMU response. We
171 * flush the entire buffer for now as we haven't read the
172 * reply lenght (it's only 2 cache lines anyway)
173 */
174 faddr = (unsigned long)smu->cmd_buf;
175 flush_inval_dcache_range(faddr, faddr + 256);
176
177 /* Now check ack */
178 ack = (~cmd->cmd) & 0xff;
179 if (ack != smu->cmd_buf->cmd) {
180 DPRINTK("SMU: incorrect ack, want %x got %x\n",
181 ack, smu->cmd_buf->cmd);
182 rc = -EIO;
183 }
184 reply_len = rc == 0 ? smu->cmd_buf->length : 0;
185 DPRINTK("SMU: reply len: %d\n", reply_len);
186 if (reply_len > cmd->reply_len) {
187 printk(KERN_WARNING "SMU: reply buffer too small,"
188 "got %d bytes for a %d bytes buffer\n",
189 reply_len, cmd->reply_len);
190 reply_len = cmd->reply_len;
191 }
192 cmd->reply_len = reply_len;
193 if (cmd->reply_buf && reply_len)
194 memcpy(cmd->reply_buf, smu->cmd_buf->data, reply_len);
195 }
196
197 /* Now complete the command. Write status last in order as we lost
198 * ownership of the command structure as soon as it's no longer -1
199 */
200 done = cmd->done;
201 misc = cmd->misc;
202 mb();
203 cmd->status = rc;
204 bail:
205 /* Start next command if any */
206 smu_start_cmd();
207 spin_unlock_irqrestore(&smu->lock, flags);
208
209 /* Call command completion handler if any */
210 if (done)
211 done(cmd, misc);
212
213 /* It's an edge interrupt, nothing to do */
214 return IRQ_HANDLED;
126} 215}
127 216
128static int smu_do_cmd(struct smu_device *dev) 217
218static irqreturn_t smu_msg_intr(int irq, void *arg, struct pt_regs *regs)
129{ 219{
130 int rc; 220 /* I don't quite know what to do with this one, we seem to never
131 u8 cmd_ack; 221 * receive it, so I suspect we have to arm it someway in the SMU
222 * to start getting events that way.
223 */
224
225 printk(KERN_INFO "SMU: message interrupt !\n");
132 226
133 DPRINTK("SMU do_cmd %02x len=%d %02x\n", 227 /* It's an edge interrupt, nothing to do */
134 dev->cmd_buf->cmd, dev->cmd_buf->length, 228 return IRQ_HANDLED;
135 dev->cmd_buf->data[0]); 229}
136 230
137 cmd_ack = smu_save_ack_cmd(dev->cmd_buf);
138 231
139 /* Clear cmd_buf cache lines */ 232/*
140 flush_inval_dcache_range((unsigned long)dev->cmd_buf, 233 * Queued command management.
141 ((unsigned long)dev->cmd_buf) + 234 *
142 sizeof(struct smu_cmd_buf)); 235 */
143 smu_send_cmd(dev);
144 rc = smu_cmd_done(dev);
145 if (rc == 0)
146 rc = smu_cmd_stat(dev->cmd_buf, cmd_ack) ? 0 : -1;
147 236
148 DPRINTK("SMU do_cmd %02x len=%d %02x => %d (%02x)\n", 237int smu_queue_cmd(struct smu_cmd *cmd)
149 dev->cmd_buf->cmd, dev->cmd_buf->length, 238{
150 dev->cmd_buf->data[0], rc, cmd_ack); 239 unsigned long flags;
151 240
152 return rc; 241 if (smu == NULL)
242 return -ENODEV;
243 if (cmd->data_len > SMU_MAX_DATA ||
244 cmd->reply_len > SMU_MAX_DATA)
245 return -EINVAL;
246
247 cmd->status = 1;
248 spin_lock_irqsave(&smu->lock, flags);
249 list_add_tail(&cmd->link, &smu->cmd_list);
250 if (smu->cmd_cur == NULL)
251 smu_start_cmd();
252 spin_unlock_irqrestore(&smu->lock, flags);
253
254 return 0;
153} 255}
256EXPORT_SYMBOL(smu_queue_cmd);
154 257
155/* RTC low level commands */ 258
156static inline int bcd2hex (int n) 259int smu_queue_simple(struct smu_simple_cmd *scmd, u8 command,
260 unsigned int data_len,
261 void (*done)(struct smu_cmd *cmd, void *misc),
262 void *misc, ...)
157{ 263{
158 return (((n & 0xf0) >> 4) * 10) + (n & 0xf); 264 struct smu_cmd *cmd = &scmd->cmd;
265 va_list list;
266 int i;
267
268 if (data_len > sizeof(scmd->buffer))
269 return -EINVAL;
270
271 memset(scmd, 0, sizeof(*scmd));
272 cmd->cmd = command;
273 cmd->data_len = data_len;
274 cmd->data_buf = scmd->buffer;
275 cmd->reply_len = sizeof(scmd->buffer);
276 cmd->reply_buf = scmd->buffer;
277 cmd->done = done;
278 cmd->misc = misc;
279
280 va_start(list, misc);
281 for (i = 0; i < data_len; ++i)
282 scmd->buffer[i] = (u8)va_arg(list, int);
283 va_end(list);
284
285 return smu_queue_cmd(cmd);
159} 286}
287EXPORT_SYMBOL(smu_queue_simple);
160 288
161static inline int hex2bcd (int n) 289
290void smu_poll(void)
162{ 291{
163 return ((n / 10) << 4) + (n % 10); 292 u8 gpio;
293
294 if (smu == NULL)
295 return;
296
297 gpio = pmac_do_feature_call(PMAC_FTR_READ_GPIO, NULL, smu->doorbell);
298 if ((gpio & 7) == 7)
299 smu_db_intr(smu->db_irq, smu, NULL);
164} 300}
301EXPORT_SYMBOL(smu_poll);
302
165 303
166#if 0 304void smu_done_complete(struct smu_cmd *cmd, void *misc)
167static inline void smu_fill_set_pwrup_timer_cmd(struct smu_cmd_buf *cmd_buf)
168{ 305{
169 cmd_buf->cmd = 0x8e; 306 struct completion *comp = misc;
170 cmd_buf->length = 8; 307
171 cmd_buf->data[0] = 0x00; 308 complete(comp);
172 memset(cmd_buf->data + 1, 0, 7);
173} 309}
310EXPORT_SYMBOL(smu_done_complete);
311
174 312
175static inline void smu_fill_get_pwrup_timer_cmd(struct smu_cmd_buf *cmd_buf) 313void smu_spinwait_cmd(struct smu_cmd *cmd)
176{ 314{
177 cmd_buf->cmd = 0x8e; 315 while(cmd->status == 1)
178 cmd_buf->length = 1; 316 smu_poll();
179 cmd_buf->data[0] = 0x01; 317}
318EXPORT_SYMBOL(smu_spinwait_cmd);
319
320
321/* RTC low level commands */
322static inline int bcd2hex (int n)
323{
324 return (((n & 0xf0) >> 4) * 10) + (n & 0xf);
180} 325}
181 326
182static inline void smu_fill_dis_pwrup_timer_cmd(struct smu_cmd_buf *cmd_buf) 327
328static inline int hex2bcd (int n)
183{ 329{
184 cmd_buf->cmd = 0x8e; 330 return ((n / 10) << 4) + (n % 10);
185 cmd_buf->length = 1;
186 cmd_buf->data[0] = 0x02;
187} 331}
188#endif 332
189 333
190static inline void smu_fill_set_rtc_cmd(struct smu_cmd_buf *cmd_buf, 334static inline void smu_fill_set_rtc_cmd(struct smu_cmd_buf *cmd_buf,
191 struct rtc_time *time) 335 struct rtc_time *time)
@@ -202,100 +346,96 @@ static inline void smu_fill_set_rtc_cmd(struct smu_cmd_buf *cmd_buf,
202 cmd_buf->data[7] = hex2bcd(time->tm_year - 100); 346 cmd_buf->data[7] = hex2bcd(time->tm_year - 100);
203} 347}
204 348
205static inline void smu_fill_get_rtc_cmd(struct smu_cmd_buf *cmd_buf)
206{
207 cmd_buf->cmd = 0x8e;
208 cmd_buf->length = 1;
209 cmd_buf->data[0] = 0x81;
210}
211 349
212static void smu_parse_get_rtc_reply(struct smu_cmd_buf *cmd_buf, 350int smu_get_rtc_time(struct rtc_time *time, int spinwait)
213 struct rtc_time *time)
214{ 351{
215 time->tm_sec = bcd2hex(cmd_buf->data[0]); 352 struct smu_simple_cmd cmd;
216 time->tm_min = bcd2hex(cmd_buf->data[1]);
217 time->tm_hour = bcd2hex(cmd_buf->data[2]);
218 time->tm_wday = bcd2hex(cmd_buf->data[3]);
219 time->tm_mday = bcd2hex(cmd_buf->data[4]);
220 time->tm_mon = bcd2hex(cmd_buf->data[5]) - 1;
221 time->tm_year = bcd2hex(cmd_buf->data[6]) + 100;
222}
223
224int smu_get_rtc_time(struct rtc_time *time)
225{
226 unsigned long flags;
227 int rc; 353 int rc;
228 354
229 if (smu == NULL) 355 if (smu == NULL)
230 return -ENODEV; 356 return -ENODEV;
231 357
232 memset(time, 0, sizeof(struct rtc_time)); 358 memset(time, 0, sizeof(struct rtc_time));
233 spin_lock_irqsave(&smu->lock, flags); 359 rc = smu_queue_simple(&cmd, SMU_CMD_RTC_COMMAND, 1, NULL, NULL,
234 smu_fill_get_rtc_cmd(smu->cmd_buf); 360 SMU_CMD_RTC_GET_DATETIME);
235 rc = smu_do_cmd(smu); 361 if (rc)
236 if (rc == 0) 362 return rc;
237 smu_parse_get_rtc_reply(smu->cmd_buf, time); 363 smu_spinwait_simple(&cmd);
238 spin_unlock_irqrestore(&smu->lock, flags);
239 364
240 return rc; 365 time->tm_sec = bcd2hex(cmd.buffer[0]);
366 time->tm_min = bcd2hex(cmd.buffer[1]);
367 time->tm_hour = bcd2hex(cmd.buffer[2]);
368 time->tm_wday = bcd2hex(cmd.buffer[3]);
369 time->tm_mday = bcd2hex(cmd.buffer[4]);
370 time->tm_mon = bcd2hex(cmd.buffer[5]) - 1;
371 time->tm_year = bcd2hex(cmd.buffer[6]) + 100;
372
373 return 0;
241} 374}
242 375
243int smu_set_rtc_time(struct rtc_time *time) 376
377int smu_set_rtc_time(struct rtc_time *time, int spinwait)
244{ 378{
245 unsigned long flags; 379 struct smu_simple_cmd cmd;
246 int rc; 380 int rc;
247 381
248 if (smu == NULL) 382 if (smu == NULL)
249 return -ENODEV; 383 return -ENODEV;
250 384
251 spin_lock_irqsave(&smu->lock, flags); 385 rc = smu_queue_simple(&cmd, SMU_CMD_RTC_COMMAND, 8, NULL, NULL,
252 smu_fill_set_rtc_cmd(smu->cmd_buf, time); 386 SMU_CMD_RTC_SET_DATETIME,
253 rc = smu_do_cmd(smu); 387 hex2bcd(time->tm_sec),
254 spin_unlock_irqrestore(&smu->lock, flags); 388 hex2bcd(time->tm_min),
389 hex2bcd(time->tm_hour),
390 time->tm_wday,
391 hex2bcd(time->tm_mday),
392 hex2bcd(time->tm_mon) + 1,
393 hex2bcd(time->tm_year - 100));
394 if (rc)
395 return rc;
396 smu_spinwait_simple(&cmd);
255 397
256 return rc; 398 return 0;
257} 399}
258 400
401
259void smu_shutdown(void) 402void smu_shutdown(void)
260{ 403{
261 const unsigned char *command = "SHUTDOWN"; 404 struct smu_simple_cmd cmd;
262 unsigned long flags;
263 405
264 if (smu == NULL) 406 if (smu == NULL)
265 return; 407 return;
266 408
267 spin_lock_irqsave(&smu->lock, flags); 409 if (smu_queue_simple(&cmd, SMU_CMD_POWER_COMMAND, 9, NULL, NULL,
268 smu->cmd_buf->cmd = 0xaa; 410 'S', 'H', 'U', 'T', 'D', 'O', 'W', 'N', 0))
269 smu->cmd_buf->length = strlen(command); 411 return;
270 strcpy(smu->cmd_buf->data, command); 412 smu_spinwait_simple(&cmd);
271 smu_do_cmd(smu);
272 for (;;) 413 for (;;)
273 ; 414 ;
274 spin_unlock_irqrestore(&smu->lock, flags);
275} 415}
276 416
417
277void smu_restart(void) 418void smu_restart(void)
278{ 419{
279 const unsigned char *command = "RESTART"; 420 struct smu_simple_cmd cmd;
280 unsigned long flags;
281 421
282 if (smu == NULL) 422 if (smu == NULL)
283 return; 423 return;
284 424
285 spin_lock_irqsave(&smu->lock, flags); 425 if (smu_queue_simple(&cmd, SMU_CMD_POWER_COMMAND, 8, NULL, NULL,
286 smu->cmd_buf->cmd = 0xaa; 426 'R', 'E', 'S', 'T', 'A', 'R', 'T', 0))
287 smu->cmd_buf->length = strlen(command); 427 return;
288 strcpy(smu->cmd_buf->data, command); 428 smu_spinwait_simple(&cmd);
289 smu_do_cmd(smu);
290 for (;;) 429 for (;;)
291 ; 430 ;
292 spin_unlock_irqrestore(&smu->lock, flags);
293} 431}
294 432
433
295int smu_present(void) 434int smu_present(void)
296{ 435{
297 return smu != NULL; 436 return smu != NULL;
298} 437}
438EXPORT_SYMBOL(smu_present);
299 439
300 440
301int smu_init (void) 441int smu_init (void)
@@ -307,6 +447,8 @@ int smu_init (void)
307 if (np == NULL) 447 if (np == NULL)
308 return -ENODEV; 448 return -ENODEV;
309 449
450 printk(KERN_INFO "SMU driver %s %s\n", VERSION, AUTHOR);
451
310 if (smu_cmdbuf_abs == 0) { 452 if (smu_cmdbuf_abs == 0) {
311 printk(KERN_ERR "SMU: Command buffer not allocated !\n"); 453 printk(KERN_ERR "SMU: Command buffer not allocated !\n");
312 return -EINVAL; 454 return -EINVAL;
@@ -318,7 +460,13 @@ int smu_init (void)
318 memset(smu, 0, sizeof(*smu)); 460 memset(smu, 0, sizeof(*smu));
319 461
320 spin_lock_init(&smu->lock); 462 spin_lock_init(&smu->lock);
463 INIT_LIST_HEAD(&smu->cmd_list);
464 INIT_LIST_HEAD(&smu->cmd_i2c_list);
321 smu->of_node = np; 465 smu->of_node = np;
466 smu->db_irq = NO_IRQ;
467 smu->msg_irq = NO_IRQ;
468 init_timer(&smu->i2c_timer);
469
322 /* smu_cmdbuf_abs is in the low 2G of RAM, can be converted to a 470 /* smu_cmdbuf_abs is in the low 2G of RAM, can be converted to a
323 * 32 bits value safely 471 * 32 bits value safely
324 */ 472 */
@@ -331,8 +479,8 @@ int smu_init (void)
331 goto fail; 479 goto fail;
332 } 480 }
333 data = (u32 *)get_property(np, "reg", NULL); 481 data = (u32 *)get_property(np, "reg", NULL);
334 of_node_put(np);
335 if (data == NULL) { 482 if (data == NULL) {
483 of_node_put(np);
336 printk(KERN_ERR "SMU: Can't find doorbell GPIO address !\n"); 484 printk(KERN_ERR "SMU: Can't find doorbell GPIO address !\n");
337 goto fail; 485 goto fail;
338 } 486 }
@@ -341,8 +489,31 @@ int smu_init (void)
341 * and ack. GPIOs are at 0x50, best would be to find that out 489 * and ack. GPIOs are at 0x50, best would be to find that out
342 * in the device-tree though. 490 * in the device-tree though.
343 */ 491 */
344 smu->db_req = 0x50 + *data; 492 smu->doorbell = *data;
345 smu->db_ack = 0x50 + *data; 493 if (smu->doorbell < 0x50)
494 smu->doorbell += 0x50;
495 if (np->n_intrs > 0)
496 smu->db_irq = np->intrs[0].line;
497
498 of_node_put(np);
499
500 /* Now look for the smu-interrupt GPIO */
501 do {
502 np = of_find_node_by_name(NULL, "smu-interrupt");
503 if (np == NULL)
504 break;
505 data = (u32 *)get_property(np, "reg", NULL);
506 if (data == NULL) {
507 of_node_put(np);
508 break;
509 }
510 smu->msg = *data;
511 if (smu->msg < 0x50)
512 smu->msg += 0x50;
513 if (np->n_intrs > 0)
514 smu->msg_irq = np->intrs[0].line;
515 of_node_put(np);
516 } while(0);
346 517
347 /* Doorbell buffer is currently hard-coded, I didn't find a proper 518 /* Doorbell buffer is currently hard-coded, I didn't find a proper
348 * device-tree entry giving the address. Best would probably to use 519 * device-tree entry giving the address. Best would probably to use
@@ -362,3 +533,584 @@ int smu_init (void)
362 return -ENXIO; 533 return -ENXIO;
363 534
364} 535}
536
537
538static int smu_late_init(void)
539{
540 if (!smu)
541 return 0;
542
543 /*
544 * Try to request the interrupts
545 */
546
547 if (smu->db_irq != NO_IRQ) {
548 if (request_irq(smu->db_irq, smu_db_intr,
549 SA_SHIRQ, "SMU doorbell", smu) < 0) {
550 printk(KERN_WARNING "SMU: can't "
551 "request interrupt %d\n",
552 smu->db_irq);
553 smu->db_irq = NO_IRQ;
554 }
555 }
556
557 if (smu->msg_irq != NO_IRQ) {
558 if (request_irq(smu->msg_irq, smu_msg_intr,
559 SA_SHIRQ, "SMU message", smu) < 0) {
560 printk(KERN_WARNING "SMU: can't "
561 "request interrupt %d\n",
562 smu->msg_irq);
563 smu->msg_irq = NO_IRQ;
564 }
565 }
566
567 return 0;
568}
569arch_initcall(smu_late_init);
570
571/*
572 * sysfs visibility
573 */
574
575static void smu_expose_childs(void *unused)
576{
577 struct device_node *np;
578
579 for (np = NULL; (np = of_get_next_child(smu->of_node, np)) != NULL;) {
580 if (device_is_compatible(np, "smu-i2c")) {
581 char name[32];
582 u32 *reg = (u32 *)get_property(np, "reg", NULL);
583
584 if (reg == NULL)
585 continue;
586 sprintf(name, "smu-i2c-%02x", *reg);
587 of_platform_device_create(np, name, &smu->of_dev->dev);
588 }
589 }
590
591}
592
593static DECLARE_WORK(smu_expose_childs_work, smu_expose_childs, NULL);
594
595static int smu_platform_probe(struct of_device* dev,
596 const struct of_device_id *match)
597{
598 if (!smu)
599 return -ENODEV;
600 smu->of_dev = dev;
601
602 /*
603 * Ok, we are matched, now expose all i2c busses. We have to defer
604 * that unfortunately or it would deadlock inside the device model
605 */
606 schedule_work(&smu_expose_childs_work);
607
608 return 0;
609}
610
611static struct of_device_id smu_platform_match[] =
612{
613 {
614 .type = "smu",
615 },
616 {},
617};
618
619static struct of_platform_driver smu_of_platform_driver =
620{
621 .name = "smu",
622 .match_table = smu_platform_match,
623 .probe = smu_platform_probe,
624};
625
626static int __init smu_init_sysfs(void)
627{
628 int rc;
629
630 /*
631 * Due to sysfs bogosity, a sysdev is not a real device, so
632 * we should in fact create both if we want sysdev semantics
633 * for power management.
634 * For now, we don't power manage machines with an SMU chip,
635 * I'm a bit too far from figuring out how that works with those
636 * new chipsets, but that will come back and bite us
637 */
638 rc = of_register_driver(&smu_of_platform_driver);
639 return 0;
640}
641
642device_initcall(smu_init_sysfs);
643
644struct of_device *smu_get_ofdev(void)
645{
646 if (!smu)
647 return NULL;
648 return smu->of_dev;
649}
650
651EXPORT_SYMBOL_GPL(smu_get_ofdev);
652
653/*
654 * i2c interface
655 */
656
657static void smu_i2c_complete_command(struct smu_i2c_cmd *cmd, int fail)
658{
659 void (*done)(struct smu_i2c_cmd *cmd, void *misc) = cmd->done;
660 void *misc = cmd->misc;
661 unsigned long flags;
662
663 /* Check for read case */
664 if (!fail && cmd->read) {
665 if (cmd->pdata[0] < 1)
666 fail = 1;
667 else
668 memcpy(cmd->info.data, &cmd->pdata[1],
669 cmd->info.datalen);
670 }
671
672 DPRINTK("SMU: completing, success: %d\n", !fail);
673
674 /* Update status and mark no pending i2c command with lock
675 * held so nobody comes in while we dequeue an eventual
676 * pending next i2c command
677 */
678 spin_lock_irqsave(&smu->lock, flags);
679 smu->cmd_i2c_cur = NULL;
680 wmb();
681 cmd->status = fail ? -EIO : 0;
682
683 /* Is there another i2c command waiting ? */
684 if (!list_empty(&smu->cmd_i2c_list)) {
685 struct smu_i2c_cmd *newcmd;
686
687 /* Fetch it, new current, remove from list */
688 newcmd = list_entry(smu->cmd_i2c_list.next,
689 struct smu_i2c_cmd, link);
690 smu->cmd_i2c_cur = newcmd;
691 list_del(&cmd->link);
692
693 /* Queue with low level smu */
694 list_add_tail(&cmd->scmd.link, &smu->cmd_list);
695 if (smu->cmd_cur == NULL)
696 smu_start_cmd();
697 }
698 spin_unlock_irqrestore(&smu->lock, flags);
699
700 /* Call command completion handler if any */
701 if (done)
702 done(cmd, misc);
703
704}
705
706
707static void smu_i2c_retry(unsigned long data)
708{
709 struct smu_i2c_cmd *cmd = (struct smu_i2c_cmd *)data;
710
711 DPRINTK("SMU: i2c failure, requeuing...\n");
712
713 /* requeue command simply by resetting reply_len */
714 cmd->pdata[0] = 0xff;
715 cmd->scmd.reply_len = 0x10;
716 smu_queue_cmd(&cmd->scmd);
717}
718
719
720static void smu_i2c_low_completion(struct smu_cmd *scmd, void *misc)
721{
722 struct smu_i2c_cmd *cmd = misc;
723 int fail = 0;
724
725 DPRINTK("SMU: i2c compl. stage=%d status=%x pdata[0]=%x rlen: %x\n",
726 cmd->stage, scmd->status, cmd->pdata[0], scmd->reply_len);
727
728 /* Check for possible status */
729 if (scmd->status < 0)
730 fail = 1;
731 else if (cmd->read) {
732 if (cmd->stage == 0)
733 fail = cmd->pdata[0] != 0;
734 else
735 fail = cmd->pdata[0] >= 0x80;
736 } else {
737 fail = cmd->pdata[0] != 0;
738 }
739
740 /* Handle failures by requeuing command, after 5ms interval
741 */
742 if (fail && --cmd->retries > 0) {
743 DPRINTK("SMU: i2c failure, starting timer...\n");
744 smu->i2c_timer.function = smu_i2c_retry;
745 smu->i2c_timer.data = (unsigned long)cmd;
746 smu->i2c_timer.expires = jiffies + msecs_to_jiffies(5);
747 add_timer(&smu->i2c_timer);
748 return;
749 }
750
751 /* If failure or stage 1, command is complete */
752 if (fail || cmd->stage != 0) {
753 smu_i2c_complete_command(cmd, fail);
754 return;
755 }
756
757 DPRINTK("SMU: going to stage 1\n");
758
759 /* Ok, initial command complete, now poll status */
760 scmd->reply_buf = cmd->pdata;
761 scmd->reply_len = 0x10;
762 scmd->data_buf = cmd->pdata;
763 scmd->data_len = 1;
764 cmd->pdata[0] = 0;
765 cmd->stage = 1;
766 cmd->retries = 20;
767 smu_queue_cmd(scmd);
768}
769
770
771int smu_queue_i2c(struct smu_i2c_cmd *cmd)
772{
773 unsigned long flags;
774
775 if (smu == NULL)
776 return -ENODEV;
777
778 /* Fill most fields of scmd */
779 cmd->scmd.cmd = SMU_CMD_I2C_COMMAND;
780 cmd->scmd.done = smu_i2c_low_completion;
781 cmd->scmd.misc = cmd;
782 cmd->scmd.reply_buf = cmd->pdata;
783 cmd->scmd.reply_len = 0x10;
784 cmd->scmd.data_buf = (u8 *)(char *)&cmd->info;
785 cmd->scmd.status = 1;
786 cmd->stage = 0;
787 cmd->pdata[0] = 0xff;
788 cmd->retries = 20;
789 cmd->status = 1;
790
791 /* Check transfer type, sanitize some "info" fields
792 * based on transfer type and do more checking
793 */
794 cmd->info.caddr = cmd->info.devaddr;
795 cmd->read = cmd->info.devaddr & 0x01;
796 switch(cmd->info.type) {
797 case SMU_I2C_TRANSFER_SIMPLE:
798 memset(&cmd->info.sublen, 0, 4);
799 break;
800 case SMU_I2C_TRANSFER_COMBINED:
801 cmd->info.devaddr &= 0xfe;
802 case SMU_I2C_TRANSFER_STDSUB:
803 if (cmd->info.sublen > 3)
804 return -EINVAL;
805 break;
806 default:
807 return -EINVAL;
808 }
809
810 /* Finish setting up command based on transfer direction
811 */
812 if (cmd->read) {
813 if (cmd->info.datalen > SMU_I2C_READ_MAX)
814 return -EINVAL;
815 memset(cmd->info.data, 0xff, cmd->info.datalen);
816 cmd->scmd.data_len = 9;
817 } else {
818 if (cmd->info.datalen > SMU_I2C_WRITE_MAX)
819 return -EINVAL;
820 cmd->scmd.data_len = 9 + cmd->info.datalen;
821 }
822
823 DPRINTK("SMU: i2c enqueuing command\n");
824 DPRINTK("SMU: %s, len=%d bus=%x addr=%x sub0=%x type=%x\n",
825 cmd->read ? "read" : "write", cmd->info.datalen,
826 cmd->info.bus, cmd->info.caddr,
827 cmd->info.subaddr[0], cmd->info.type);
828
829
830 /* Enqueue command in i2c list, and if empty, enqueue also in
831 * main command list
832 */
833 spin_lock_irqsave(&smu->lock, flags);
834 if (smu->cmd_i2c_cur == NULL) {
835 smu->cmd_i2c_cur = cmd;
836 list_add_tail(&cmd->scmd.link, &smu->cmd_list);
837 if (smu->cmd_cur == NULL)
838 smu_start_cmd();
839 } else
840 list_add_tail(&cmd->link, &smu->cmd_i2c_list);
841 spin_unlock_irqrestore(&smu->lock, flags);
842
843 return 0;
844}
845
846
847
848/*
849 * Userland driver interface
850 */
851
852
853static LIST_HEAD(smu_clist);
854static DEFINE_SPINLOCK(smu_clist_lock);
855
856enum smu_file_mode {
857 smu_file_commands,
858 smu_file_events,
859 smu_file_closing
860};
861
862struct smu_private
863{
864 struct list_head list;
865 enum smu_file_mode mode;
866 int busy;
867 struct smu_cmd cmd;
868 spinlock_t lock;
869 wait_queue_head_t wait;
870 u8 buffer[SMU_MAX_DATA];
871};
872
873
874static int smu_open(struct inode *inode, struct file *file)
875{
876 struct smu_private *pp;
877 unsigned long flags;
878
879 pp = kmalloc(sizeof(struct smu_private), GFP_KERNEL);
880 if (pp == 0)
881 return -ENOMEM;
882 memset(pp, 0, sizeof(struct smu_private));
883 spin_lock_init(&pp->lock);
884 pp->mode = smu_file_commands;
885 init_waitqueue_head(&pp->wait);
886
887 spin_lock_irqsave(&smu_clist_lock, flags);
888 list_add(&pp->list, &smu_clist);
889 spin_unlock_irqrestore(&smu_clist_lock, flags);
890 file->private_data = pp;
891
892 return 0;
893}
894
895
896static void smu_user_cmd_done(struct smu_cmd *cmd, void *misc)
897{
898 struct smu_private *pp = misc;
899
900 wake_up_all(&pp->wait);
901}
902
903
904static ssize_t smu_write(struct file *file, const char __user *buf,
905 size_t count, loff_t *ppos)
906{
907 struct smu_private *pp = file->private_data;
908 unsigned long flags;
909 struct smu_user_cmd_hdr hdr;
910 int rc = 0;
911
912 if (pp->busy)
913 return -EBUSY;
914 else if (copy_from_user(&hdr, buf, sizeof(hdr)))
915 return -EFAULT;
916 else if (hdr.cmdtype == SMU_CMDTYPE_WANTS_EVENTS) {
917 pp->mode = smu_file_events;
918 return 0;
919 } else if (hdr.cmdtype != SMU_CMDTYPE_SMU)
920 return -EINVAL;
921 else if (pp->mode != smu_file_commands)
922 return -EBADFD;
923 else if (hdr.data_len > SMU_MAX_DATA)
924 return -EINVAL;
925
926 spin_lock_irqsave(&pp->lock, flags);
927 if (pp->busy) {
928 spin_unlock_irqrestore(&pp->lock, flags);
929 return -EBUSY;
930 }
931 pp->busy = 1;
932 pp->cmd.status = 1;
933 spin_unlock_irqrestore(&pp->lock, flags);
934
935 if (copy_from_user(pp->buffer, buf + sizeof(hdr), hdr.data_len)) {
936 pp->busy = 0;
937 return -EFAULT;
938 }
939
940 pp->cmd.cmd = hdr.cmd;
941 pp->cmd.data_len = hdr.data_len;
942 pp->cmd.reply_len = SMU_MAX_DATA;
943 pp->cmd.data_buf = pp->buffer;
944 pp->cmd.reply_buf = pp->buffer;
945 pp->cmd.done = smu_user_cmd_done;
946 pp->cmd.misc = pp;
947 rc = smu_queue_cmd(&pp->cmd);
948 if (rc < 0)
949 return rc;
950 return count;
951}
952
953
954static ssize_t smu_read_command(struct file *file, struct smu_private *pp,
955 char __user *buf, size_t count)
956{
957 DECLARE_WAITQUEUE(wait, current);
958 struct smu_user_reply_hdr hdr;
959 unsigned long flags;
960 int size, rc = 0;
961
962 if (!pp->busy)
963 return 0;
964 if (count < sizeof(struct smu_user_reply_hdr))
965 return -EOVERFLOW;
966 spin_lock_irqsave(&pp->lock, flags);
967 if (pp->cmd.status == 1) {
968 if (file->f_flags & O_NONBLOCK)
969 return -EAGAIN;
970 add_wait_queue(&pp->wait, &wait);
971 for (;;) {
972 set_current_state(TASK_INTERRUPTIBLE);
973 rc = 0;
974 if (pp->cmd.status != 1)
975 break;
976 rc = -ERESTARTSYS;
977 if (signal_pending(current))
978 break;
979 spin_unlock_irqrestore(&pp->lock, flags);
980 schedule();
981 spin_lock_irqsave(&pp->lock, flags);
982 }
983 set_current_state(TASK_RUNNING);
984 remove_wait_queue(&pp->wait, &wait);
985 }
986 spin_unlock_irqrestore(&pp->lock, flags);
987 if (rc)
988 return rc;
989 if (pp->cmd.status != 0)
990 pp->cmd.reply_len = 0;
991 size = sizeof(hdr) + pp->cmd.reply_len;
992 if (count < size)
993 size = count;
994 rc = size;
995 hdr.status = pp->cmd.status;
996 hdr.reply_len = pp->cmd.reply_len;
997 if (copy_to_user(buf, &hdr, sizeof(hdr)))
998 return -EFAULT;
999 size -= sizeof(hdr);
1000 if (size && copy_to_user(buf + sizeof(hdr), pp->buffer, size))
1001 return -EFAULT;
1002 pp->busy = 0;
1003
1004 return rc;
1005}
1006
1007
1008static ssize_t smu_read_events(struct file *file, struct smu_private *pp,
1009 char __user *buf, size_t count)
1010{
1011 /* Not implemented */
1012 msleep_interruptible(1000);
1013 return 0;
1014}
1015
1016
1017static ssize_t smu_read(struct file *file, char __user *buf,
1018 size_t count, loff_t *ppos)
1019{
1020 struct smu_private *pp = file->private_data;
1021
1022 if (pp->mode == smu_file_commands)
1023 return smu_read_command(file, pp, buf, count);
1024 if (pp->mode == smu_file_events)
1025 return smu_read_events(file, pp, buf, count);
1026
1027 return -EBADFD;
1028}
1029
1030static unsigned int smu_fpoll(struct file *file, poll_table *wait)
1031{
1032 struct smu_private *pp = file->private_data;
1033 unsigned int mask = 0;
1034 unsigned long flags;
1035
1036 if (pp == 0)
1037 return 0;
1038
1039 if (pp->mode == smu_file_commands) {
1040 poll_wait(file, &pp->wait, wait);
1041
1042 spin_lock_irqsave(&pp->lock, flags);
1043 if (pp->busy && pp->cmd.status != 1)
1044 mask |= POLLIN;
1045 spin_unlock_irqrestore(&pp->lock, flags);
1046 } if (pp->mode == smu_file_events) {
1047 /* Not yet implemented */
1048 }
1049 return mask;
1050}
1051
1052static int smu_release(struct inode *inode, struct file *file)
1053{
1054 struct smu_private *pp = file->private_data;
1055 unsigned long flags;
1056 unsigned int busy;
1057
1058 if (pp == 0)
1059 return 0;
1060
1061 file->private_data = NULL;
1062
1063 /* Mark file as closing to avoid races with new request */
1064 spin_lock_irqsave(&pp->lock, flags);
1065 pp->mode = smu_file_closing;
1066 busy = pp->busy;
1067
1068 /* Wait for any pending request to complete */
1069 if (busy && pp->cmd.status == 1) {
1070 DECLARE_WAITQUEUE(wait, current);
1071
1072 add_wait_queue(&pp->wait, &wait);
1073 for (;;) {
1074 set_current_state(TASK_UNINTERRUPTIBLE);
1075 if (pp->cmd.status != 1)
1076 break;
1077 spin_lock_irqsave(&pp->lock, flags);
1078 schedule();
1079 spin_unlock_irqrestore(&pp->lock, flags);
1080 }
1081 set_current_state(TASK_RUNNING);
1082 remove_wait_queue(&pp->wait, &wait);
1083 }
1084 spin_unlock_irqrestore(&pp->lock, flags);
1085
1086 spin_lock_irqsave(&smu_clist_lock, flags);
1087 list_del(&pp->list);
1088 spin_unlock_irqrestore(&smu_clist_lock, flags);
1089 kfree(pp);
1090
1091 return 0;
1092}
1093
1094
1095static struct file_operations smu_device_fops __pmacdata = {
1096 .llseek = no_llseek,
1097 .read = smu_read,
1098 .write = smu_write,
1099 .poll = smu_fpoll,
1100 .open = smu_open,
1101 .release = smu_release,
1102};
1103
1104static struct miscdevice pmu_device __pmacdata = {
1105 MISC_DYNAMIC_MINOR, "smu", &smu_device_fops
1106};
1107
1108static int smu_device_init(void)
1109{
1110 if (!smu)
1111 return -ENODEV;
1112 if (misc_register(&pmu_device) < 0)
1113 printk(KERN_ERR "via-pmu: cannot register misc device.\n");
1114 return 0;
1115}
1116device_initcall(smu_device_init);