aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/battery.c2
-rw-r--r--drivers/acpi/ec.c43
-rw-r--r--drivers/acpi/processor_idle.c111
-rw-r--r--drivers/acpi/sbs.c25
-rw-r--r--drivers/pnp/pnpacpi/rsparser.c24
5 files changed, 128 insertions, 77 deletions
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index b871f289dd30..7d6be23eff89 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -153,6 +153,8 @@ static int acpi_battery_get_property(struct power_supply *psy,
153 val->intval = POWER_SUPPLY_STATUS_CHARGING; 153 val->intval = POWER_SUPPLY_STATUS_CHARGING;
154 else if (battery->state == 0) 154 else if (battery->state == 0)
155 val->intval = POWER_SUPPLY_STATUS_FULL; 155 val->intval = POWER_SUPPLY_STATUS_FULL;
156 else
157 val->intval = POWER_SUPPLY_STATUS_UNKNOWN;
156 break; 158 break;
157 case POWER_SUPPLY_PROP_PRESENT: 159 case POWER_SUPPLY_PROP_PRESENT:
158 val->intval = acpi_battery_present(battery); 160 val->intval = acpi_battery_present(battery);
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 06b78e5e33a1..d6ddb547f2d9 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -75,7 +75,8 @@ enum {
75 EC_FLAGS_WAIT_GPE = 0, /* Don't check status until GPE arrives */ 75 EC_FLAGS_WAIT_GPE = 0, /* Don't check status until GPE arrives */
76 EC_FLAGS_QUERY_PENDING, /* Query is pending */ 76 EC_FLAGS_QUERY_PENDING, /* Query is pending */
77 EC_FLAGS_GPE_MODE, /* Expect GPE to be sent for status change */ 77 EC_FLAGS_GPE_MODE, /* Expect GPE to be sent for status change */
78 EC_FLAGS_ONLY_IBF_GPE, /* Expect GPE only for IBF = 0 event */ 78 EC_FLAGS_NO_ADDRESS_GPE, /* Expect GPE only for non-address event */
79 EC_FLAGS_ADDRESS, /* Address is being written */
79}; 80};
80 81
81static int acpi_ec_remove(struct acpi_device *device, int type); 82static int acpi_ec_remove(struct acpi_device *device, int type);
@@ -166,38 +167,45 @@ static inline int acpi_ec_check_status(struct acpi_ec *ec, enum ec_event event)
166 167
167static int acpi_ec_wait(struct acpi_ec *ec, enum ec_event event, int force_poll) 168static int acpi_ec_wait(struct acpi_ec *ec, enum ec_event event, int force_poll)
168{ 169{
170 int ret = 0;
171 if (unlikely(test_bit(EC_FLAGS_ADDRESS, &ec->flags) &&
172 test_bit(EC_FLAGS_NO_ADDRESS_GPE, &ec->flags)))
173 force_poll = 1;
169 if (likely(test_bit(EC_FLAGS_GPE_MODE, &ec->flags)) && 174 if (likely(test_bit(EC_FLAGS_GPE_MODE, &ec->flags)) &&
170 likely(!force_poll)) { 175 likely(!force_poll)) {
171 if (wait_event_timeout(ec->wait, acpi_ec_check_status(ec, event), 176 if (wait_event_timeout(ec->wait, acpi_ec_check_status(ec, event),
172 msecs_to_jiffies(ACPI_EC_DELAY))) 177 msecs_to_jiffies(ACPI_EC_DELAY)))
173 return 0; 178 goto end;
174 clear_bit(EC_FLAGS_WAIT_GPE, &ec->flags); 179 clear_bit(EC_FLAGS_WAIT_GPE, &ec->flags);
175 if (acpi_ec_check_status(ec, event)) { 180 if (acpi_ec_check_status(ec, event)) {
176 if (event == ACPI_EC_EVENT_OBF_1) { 181 if (test_bit(EC_FLAGS_ADDRESS, &ec->flags)) {
177 /* miss OBF = 1 GPE, don't expect it anymore */ 182 /* miss address GPE, don't expect it anymore */
178 printk(KERN_INFO PREFIX "missing OBF_1 confirmation," 183 printk(KERN_INFO PREFIX "missing address confirmation,"
179 "switching to degraded mode.\n"); 184 "don't expect it any longer.\n");
180 set_bit(EC_FLAGS_ONLY_IBF_GPE, &ec->flags); 185 set_bit(EC_FLAGS_NO_ADDRESS_GPE, &ec->flags);
181 } else { 186 } else {
182 /* missing GPEs, switch back to poll mode */ 187 /* missing GPEs, switch back to poll mode */
183 printk(KERN_INFO PREFIX "missing IBF_1 confirmations," 188 printk(KERN_INFO PREFIX "missing confirmations,"
184 "switch off interrupt mode.\n"); 189 "switch off interrupt mode.\n");
185 clear_bit(EC_FLAGS_GPE_MODE, &ec->flags); 190 clear_bit(EC_FLAGS_GPE_MODE, &ec->flags);
186 } 191 }
187 return 0; 192 goto end;
188 } 193 }
189 } else { 194 } else {
190 unsigned long delay = jiffies + msecs_to_jiffies(ACPI_EC_DELAY); 195 unsigned long delay = jiffies + msecs_to_jiffies(ACPI_EC_DELAY);
191 clear_bit(EC_FLAGS_WAIT_GPE, &ec->flags); 196 clear_bit(EC_FLAGS_WAIT_GPE, &ec->flags);
192 while (time_before(jiffies, delay)) { 197 while (time_before(jiffies, delay)) {
193 if (acpi_ec_check_status(ec, event)) 198 if (acpi_ec_check_status(ec, event))
194 return 0; 199 goto end;
195 } 200 }
196 } 201 }
197 printk(KERN_ERR PREFIX "acpi_ec_wait timeout," 202 printk(KERN_ERR PREFIX "acpi_ec_wait timeout,"
198 " status = %d, expect_event = %d\n", 203 " status = %d, expect_event = %d\n",
199 acpi_ec_read_status(ec), event); 204 acpi_ec_read_status(ec), event);
200 return -ETIME; 205 ret = -ETIME;
206 end:
207 clear_bit(EC_FLAGS_ADDRESS, &ec->flags);
208 return ret;
201} 209}
202 210
203static int acpi_ec_transaction_unlocked(struct acpi_ec *ec, u8 command, 211static int acpi_ec_transaction_unlocked(struct acpi_ec *ec, u8 command,
@@ -216,6 +224,9 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec, u8 command,
216 "write_cmd timeout, command = %d\n", command); 224 "write_cmd timeout, command = %d\n", command);
217 goto end; 225 goto end;
218 } 226 }
227 /* mark the address byte written to EC */
228 if (rdata_len + wdata_len > 1)
229 set_bit(EC_FLAGS_ADDRESS, &ec->flags);
219 set_bit(EC_FLAGS_WAIT_GPE, &ec->flags); 230 set_bit(EC_FLAGS_WAIT_GPE, &ec->flags);
220 acpi_ec_write_data(ec, *(wdata++)); 231 acpi_ec_write_data(ec, *(wdata++));
221 } 232 }
@@ -231,8 +242,6 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec, u8 command,
231 clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags); 242 clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags);
232 243
233 for (; rdata_len > 0; --rdata_len) { 244 for (; rdata_len > 0; --rdata_len) {
234 if (test_bit(EC_FLAGS_ONLY_IBF_GPE, &ec->flags))
235 force_poll = 1;
236 result = acpi_ec_wait(ec, ACPI_EC_EVENT_OBF_1, force_poll); 245 result = acpi_ec_wait(ec, ACPI_EC_EVENT_OBF_1, force_poll);
237 if (result) { 246 if (result) {
238 printk(KERN_ERR PREFIX "read timeout, command = %d\n", 247 printk(KERN_ERR PREFIX "read timeout, command = %d\n",
@@ -881,12 +890,20 @@ int __init acpi_ec_ecdt_probe(void)
881 boot_ec->gpe = ecdt_ptr->gpe; 890 boot_ec->gpe = ecdt_ptr->gpe;
882 boot_ec->handle = ACPI_ROOT_OBJECT; 891 boot_ec->handle = ACPI_ROOT_OBJECT;
883 } else { 892 } else {
893 /* This workaround is needed only on some broken machines,
894 * which require early EC, but fail to provide ECDT */
895 acpi_handle x;
884 printk(KERN_DEBUG PREFIX "Look up EC in DSDT\n"); 896 printk(KERN_DEBUG PREFIX "Look up EC in DSDT\n");
885 status = acpi_get_devices(ec_device_ids[0].id, ec_parse_device, 897 status = acpi_get_devices(ec_device_ids[0].id, ec_parse_device,
886 boot_ec, NULL); 898 boot_ec, NULL);
887 /* Check that acpi_get_devices actually find something */ 899 /* Check that acpi_get_devices actually find something */
888 if (ACPI_FAILURE(status) || !boot_ec->handle) 900 if (ACPI_FAILURE(status) || !boot_ec->handle)
889 goto error; 901 goto error;
902 /* We really need to limit this workaround, the only ASUS,
903 * which needs it, has fake EC._INI method, so use it as flag.
904 */
905 if (ACPI_FAILURE(acpi_get_handle(boot_ec->handle, "_INI", &x)))
906 goto error;
890 } 907 }
891 908
892 ret = ec_install_handlers(boot_ec); 909 ret = ec_install_handlers(boot_ec);
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index f996d0e37689..7b6c20eeeaff 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -197,6 +197,19 @@ static inline u32 ticks_elapsed_in_us(u32 t1, u32 t2)
197 return PM_TIMER_TICKS_TO_US((0xFFFFFFFF - t1) + t2); 197 return PM_TIMER_TICKS_TO_US((0xFFFFFFFF - t1) + t2);
198} 198}
199 199
200static void acpi_safe_halt(void)
201{
202 current_thread_info()->status &= ~TS_POLLING;
203 /*
204 * TS_POLLING-cleared state must be visible before we
205 * test NEED_RESCHED:
206 */
207 smp_mb();
208 if (!need_resched())
209 safe_halt();
210 current_thread_info()->status |= TS_POLLING;
211}
212
200#ifndef CONFIG_CPU_IDLE 213#ifndef CONFIG_CPU_IDLE
201 214
202static void 215static void
@@ -239,19 +252,6 @@ acpi_processor_power_activate(struct acpi_processor *pr,
239 return; 252 return;
240} 253}
241 254
242static void acpi_safe_halt(void)
243{
244 current_thread_info()->status &= ~TS_POLLING;
245 /*
246 * TS_POLLING-cleared state must be visible before we
247 * test NEED_RESCHED:
248 */
249 smp_mb();
250 if (!need_resched())
251 safe_halt();
252 current_thread_info()->status |= TS_POLLING;
253}
254
255static atomic_t c3_cpu_count; 255static atomic_t c3_cpu_count;
256 256
257/* Common C-state entry for C2, C3, .. */ 257/* Common C-state entry for C2, C3, .. */
@@ -1373,15 +1373,7 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev,
1373 if (pr->flags.bm_check) 1373 if (pr->flags.bm_check)
1374 acpi_idle_update_bm_rld(pr, cx); 1374 acpi_idle_update_bm_rld(pr, cx);
1375 1375
1376 current_thread_info()->status &= ~TS_POLLING; 1376 acpi_safe_halt();
1377 /*
1378 * TS_POLLING-cleared state must be visible before we test
1379 * NEED_RESCHED:
1380 */
1381 smp_mb();
1382 if (!need_resched())
1383 safe_halt();
1384 current_thread_info()->status |= TS_POLLING;
1385 1377
1386 cx->usage++; 1378 cx->usage++;
1387 1379
@@ -1399,6 +1391,8 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
1399 struct acpi_processor *pr; 1391 struct acpi_processor *pr;
1400 struct acpi_processor_cx *cx = cpuidle_get_statedata(state); 1392 struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
1401 u32 t1, t2; 1393 u32 t1, t2;
1394 int sleep_ticks = 0;
1395
1402 pr = processors[smp_processor_id()]; 1396 pr = processors[smp_processor_id()];
1403 1397
1404 if (unlikely(!pr)) 1398 if (unlikely(!pr))
@@ -1428,6 +1422,8 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
1428 ACPI_FLUSH_CPU_CACHE(); 1422 ACPI_FLUSH_CPU_CACHE();
1429 1423
1430 t1 = inl(acpi_gbl_FADT.xpm_timer_block.address); 1424 t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
1425 /* Tell the scheduler that we are going deep-idle: */
1426 sched_clock_idle_sleep_event();
1431 acpi_state_timer_broadcast(pr, cx, 1); 1427 acpi_state_timer_broadcast(pr, cx, 1);
1432 acpi_idle_do_entry(cx); 1428 acpi_idle_do_entry(cx);
1433 t2 = inl(acpi_gbl_FADT.xpm_timer_block.address); 1429 t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
@@ -1436,6 +1432,10 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
1436 /* TSC could halt in idle, so notify users */ 1432 /* TSC could halt in idle, so notify users */
1437 mark_tsc_unstable("TSC halts in idle");; 1433 mark_tsc_unstable("TSC halts in idle");;
1438#endif 1434#endif
1435 sleep_ticks = ticks_elapsed(t1, t2);
1436
1437 /* Tell the scheduler how much we idled: */
1438 sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
1439 1439
1440 local_irq_enable(); 1440 local_irq_enable();
1441 current_thread_info()->status |= TS_POLLING; 1441 current_thread_info()->status |= TS_POLLING;
@@ -1443,7 +1443,7 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
1443 cx->usage++; 1443 cx->usage++;
1444 1444
1445 acpi_state_timer_broadcast(pr, cx, 0); 1445 acpi_state_timer_broadcast(pr, cx, 0);
1446 cx->time += ticks_elapsed(t1, t2); 1446 cx->time += sleep_ticks;
1447 return ticks_elapsed_in_us(t1, t2); 1447 return ticks_elapsed_in_us(t1, t2);
1448} 1448}
1449 1449
@@ -1463,6 +1463,8 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
1463 struct acpi_processor *pr; 1463 struct acpi_processor *pr;
1464 struct acpi_processor_cx *cx = cpuidle_get_statedata(state); 1464 struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
1465 u32 t1, t2; 1465 u32 t1, t2;
1466 int sleep_ticks = 0;
1467
1466 pr = processors[smp_processor_id()]; 1468 pr = processors[smp_processor_id()];
1467 1469
1468 if (unlikely(!pr)) 1470 if (unlikely(!pr))
@@ -1471,6 +1473,15 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
1471 if (acpi_idle_suspend) 1473 if (acpi_idle_suspend)
1472 return(acpi_idle_enter_c1(dev, state)); 1474 return(acpi_idle_enter_c1(dev, state));
1473 1475
1476 if (acpi_idle_bm_check()) {
1477 if (dev->safe_state) {
1478 return dev->safe_state->enter(dev, dev->safe_state);
1479 } else {
1480 acpi_safe_halt();
1481 return 0;
1482 }
1483 }
1484
1474 local_irq_disable(); 1485 local_irq_disable();
1475 current_thread_info()->status &= ~TS_POLLING; 1486 current_thread_info()->status &= ~TS_POLLING;
1476 /* 1487 /*
@@ -1485,38 +1496,45 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
1485 return 0; 1496 return 0;
1486 } 1497 }
1487 1498
1499 /* Tell the scheduler that we are going deep-idle: */
1500 sched_clock_idle_sleep_event();
1488 /* 1501 /*
1489 * Must be done before busmaster disable as we might need to 1502 * Must be done before busmaster disable as we might need to
1490 * access HPET ! 1503 * access HPET !
1491 */ 1504 */
1492 acpi_state_timer_broadcast(pr, cx, 1); 1505 acpi_state_timer_broadcast(pr, cx, 1);
1493 1506
1494 if (acpi_idle_bm_check()) { 1507 acpi_idle_update_bm_rld(pr, cx);
1495 cx = pr->power.bm_state;
1496
1497 acpi_idle_update_bm_rld(pr, cx);
1498
1499 t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
1500 acpi_idle_do_entry(cx);
1501 t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
1502 } else {
1503 acpi_idle_update_bm_rld(pr, cx);
1504 1508
1509 /*
1510 * disable bus master
1511 * bm_check implies we need ARB_DIS
1512 * !bm_check implies we need cache flush
1513 * bm_control implies whether we can do ARB_DIS
1514 *
1515 * That leaves a case where bm_check is set and bm_control is
1516 * not set. In that case we cannot do much, we enter C3
1517 * without doing anything.
1518 */
1519 if (pr->flags.bm_check && pr->flags.bm_control) {
1505 spin_lock(&c3_lock); 1520 spin_lock(&c3_lock);
1506 c3_cpu_count++; 1521 c3_cpu_count++;
1507 /* Disable bus master arbitration when all CPUs are in C3 */ 1522 /* Disable bus master arbitration when all CPUs are in C3 */
1508 if (c3_cpu_count == num_online_cpus()) 1523 if (c3_cpu_count == num_online_cpus())
1509 acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1); 1524 acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1);
1510 spin_unlock(&c3_lock); 1525 spin_unlock(&c3_lock);
1526 } else if (!pr->flags.bm_check) {
1527 ACPI_FLUSH_CPU_CACHE();
1528 }
1511 1529
1512 t1 = inl(acpi_gbl_FADT.xpm_timer_block.address); 1530 t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
1513 acpi_idle_do_entry(cx); 1531 acpi_idle_do_entry(cx);
1514 t2 = inl(acpi_gbl_FADT.xpm_timer_block.address); 1532 t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
1515 1533
1534 /* Re-enable bus master arbitration */
1535 if (pr->flags.bm_check && pr->flags.bm_control) {
1516 spin_lock(&c3_lock); 1536 spin_lock(&c3_lock);
1517 /* Re-enable bus master arbitration */ 1537 acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0);
1518 if (c3_cpu_count == num_online_cpus())
1519 acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0);
1520 c3_cpu_count--; 1538 c3_cpu_count--;
1521 spin_unlock(&c3_lock); 1539 spin_unlock(&c3_lock);
1522 } 1540 }
@@ -1525,6 +1543,9 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
1525 /* TSC could halt in idle, so notify users */ 1543 /* TSC could halt in idle, so notify users */
1526 mark_tsc_unstable("TSC halts in idle"); 1544 mark_tsc_unstable("TSC halts in idle");
1527#endif 1545#endif
1546 sleep_ticks = ticks_elapsed(t1, t2);
1547 /* Tell the scheduler how much we idled: */
1548 sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
1528 1549
1529 local_irq_enable(); 1550 local_irq_enable();
1530 current_thread_info()->status |= TS_POLLING; 1551 current_thread_info()->status |= TS_POLLING;
@@ -1532,7 +1553,7 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
1532 cx->usage++; 1553 cx->usage++;
1533 1554
1534 acpi_state_timer_broadcast(pr, cx, 0); 1555 acpi_state_timer_broadcast(pr, cx, 0);
1535 cx->time += ticks_elapsed(t1, t2); 1556 cx->time += sleep_ticks;
1536 return ticks_elapsed_in_us(t1, t2); 1557 return ticks_elapsed_in_us(t1, t2);
1537} 1558}
1538 1559
@@ -1584,12 +1605,14 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr)
1584 case ACPI_STATE_C1: 1605 case ACPI_STATE_C1:
1585 state->flags |= CPUIDLE_FLAG_SHALLOW; 1606 state->flags |= CPUIDLE_FLAG_SHALLOW;
1586 state->enter = acpi_idle_enter_c1; 1607 state->enter = acpi_idle_enter_c1;
1608 dev->safe_state = state;
1587 break; 1609 break;
1588 1610
1589 case ACPI_STATE_C2: 1611 case ACPI_STATE_C2:
1590 state->flags |= CPUIDLE_FLAG_BALANCED; 1612 state->flags |= CPUIDLE_FLAG_BALANCED;
1591 state->flags |= CPUIDLE_FLAG_TIME_VALID; 1613 state->flags |= CPUIDLE_FLAG_TIME_VALID;
1592 state->enter = acpi_idle_enter_simple; 1614 state->enter = acpi_idle_enter_simple;
1615 dev->safe_state = state;
1593 break; 1616 break;
1594 1617
1595 case ACPI_STATE_C3: 1618 case ACPI_STATE_C3:
@@ -1610,14 +1633,6 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr)
1610 if (!count) 1633 if (!count)
1611 return -EINVAL; 1634 return -EINVAL;
1612 1635
1613 /* find the deepest state that can handle active BM */
1614 if (pr->flags.bm_check) {
1615 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++)
1616 if (pr->power.states[i].type == ACPI_STATE_C3)
1617 break;
1618 pr->power.bm_state = &pr->power.states[i-1];
1619 }
1620
1621 return 0; 1636 return 0;
1622} 1637}
1623 1638
diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c
index 278d20f9366a..6045cdbe176b 100644
--- a/drivers/acpi/sbs.c
+++ b/drivers/acpi/sbs.c
@@ -113,6 +113,7 @@ struct acpi_battery {
113 u16 spec; 113 u16 spec;
114 u8 id; 114 u8 id;
115 u8 present:1; 115 u8 present:1;
116 u8 have_sysfs_alarm:1;
116}; 117};
117 118
118#define to_acpi_battery(x) container_of(x, struct acpi_battery, bat); 119#define to_acpi_battery(x) container_of(x, struct acpi_battery, bat);
@@ -808,7 +809,13 @@ static int acpi_battery_add(struct acpi_sbs *sbs, int id)
808 } 809 }
809 battery->bat.get_property = acpi_sbs_battery_get_property; 810 battery->bat.get_property = acpi_sbs_battery_get_property;
810 result = power_supply_register(&sbs->device->dev, &battery->bat); 811 result = power_supply_register(&sbs->device->dev, &battery->bat);
811 device_create_file(battery->bat.dev, &alarm_attr); 812 if (result)
813 goto end;
814 result = device_create_file(battery->bat.dev, &alarm_attr);
815 if (result)
816 goto end;
817 battery->have_sysfs_alarm = 1;
818 end:
812 printk(KERN_INFO PREFIX "%s [%s]: Battery Slot [%s] (battery %s)\n", 819 printk(KERN_INFO PREFIX "%s [%s]: Battery Slot [%s] (battery %s)\n",
813 ACPI_SBS_DEVICE_NAME, acpi_device_bid(sbs->device), 820 ACPI_SBS_DEVICE_NAME, acpi_device_bid(sbs->device),
814 battery->name, sbs->battery->present ? "present" : "absent"); 821 battery->name, sbs->battery->present ? "present" : "absent");
@@ -817,14 +824,16 @@ static int acpi_battery_add(struct acpi_sbs *sbs, int id)
817 824
818static void acpi_battery_remove(struct acpi_sbs *sbs, int id) 825static void acpi_battery_remove(struct acpi_sbs *sbs, int id)
819{ 826{
820 if (sbs->battery[id].bat.dev) 827 struct acpi_battery *battery = &sbs->battery[id];
821 device_remove_file(sbs->battery[id].bat.dev, &alarm_attr); 828
822 power_supply_unregister(&sbs->battery[id].bat); 829 if (battery->bat.dev) {
823#ifdef CONFIG_ACPI_PROCFS_POWER 830 if (battery->have_sysfs_alarm)
824 if (sbs->battery[id].proc_entry) { 831 device_remove_file(battery->bat.dev, &alarm_attr);
825 acpi_sbs_remove_fs(&(sbs->battery[id].proc_entry), 832 power_supply_unregister(&battery->bat);
826 acpi_battery_dir);
827 } 833 }
834#ifdef CONFIG_ACPI_PROCFS_POWER
835 if (battery->proc_entry)
836 acpi_sbs_remove_fs(&battery->proc_entry, acpi_battery_dir);
828#endif 837#endif
829} 838}
830 839
diff --git a/drivers/pnp/pnpacpi/rsparser.c b/drivers/pnp/pnpacpi/rsparser.c
index cd0a204d96d1..11adab13f2b7 100644
--- a/drivers/pnp/pnpacpi/rsparser.c
+++ b/drivers/pnp/pnpacpi/rsparser.c
@@ -75,6 +75,7 @@ static void pnpacpi_parse_allocated_irqresource(struct pnp_resource_table *res,
75{ 75{
76 int i = 0; 76 int i = 0;
77 int irq; 77 int irq;
78 int p, t;
78 79
79 if (!valid_IRQ(gsi)) 80 if (!valid_IRQ(gsi))
80 return; 81 return;
@@ -85,15 +86,22 @@ static void pnpacpi_parse_allocated_irqresource(struct pnp_resource_table *res,
85 if (i >= PNP_MAX_IRQ) 86 if (i >= PNP_MAX_IRQ)
86 return; 87 return;
87 88
88#ifdef CONFIG_X86 89 /*
89 if (gsi < 16 && (triggering != ACPI_EDGE_SENSITIVE || 90 * in IO-APIC mode, use overrided attribute. Two reasons:
90 polarity != ACPI_ACTIVE_HIGH)) { 91 * 1. BIOS bug in DSDT
91 pnp_warn("BIOS BUG: legacy PNP IRQ %d should be edge trigger, " 92 * 2. BIOS uses IO-APIC mode Interrupt Source Override
92 "active high", gsi); 93 */
93 triggering = ACPI_EDGE_SENSITIVE; 94 if (!acpi_get_override_irq(gsi, &t, &p)) {
94 polarity = ACPI_ACTIVE_HIGH; 95 t = t ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE;
96 p = p ? ACPI_ACTIVE_LOW : ACPI_ACTIVE_HIGH;
97
98 if (triggering != t || polarity != p) {
99 pnp_warn("IRQ %d override to %s, %s",
100 gsi, t ? "edge":"level", p ? "low":"high");
101 triggering = t;
102 polarity = p;
103 }
95 } 104 }
96#endif
97 105
98 res->irq_resource[i].flags = IORESOURCE_IRQ; // Also clears _UNSET flag 106 res->irq_resource[i].flags = IORESOURCE_IRQ; // Also clears _UNSET flag
99 res->irq_resource[i].flags |= irq_flags(triggering, polarity); 107 res->irq_resource[i].flags |= irq_flags(triggering, polarity);