diff options
Diffstat (limited to 'drivers/pci/hotplug/pciehp_hpc.c')
-rw-r--r-- | drivers/pci/hotplug/pciehp_hpc.c | 318 |
1 files changed, 137 insertions, 181 deletions
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c index 79f104963166..1323a43285d7 100644 --- a/drivers/pci/hotplug/pciehp_hpc.c +++ b/drivers/pci/hotplug/pciehp_hpc.c | |||
@@ -247,30 +247,32 @@ static inline void pciehp_free_irq(struct controller *ctrl) | |||
247 | free_irq(ctrl->pci_dev->irq, ctrl); | 247 | free_irq(ctrl->pci_dev->irq, ctrl); |
248 | } | 248 | } |
249 | 249 | ||
250 | static inline int pcie_poll_cmd(struct controller *ctrl) | 250 | static int pcie_poll_cmd(struct controller *ctrl) |
251 | { | 251 | { |
252 | u16 slot_status; | 252 | u16 slot_status; |
253 | int timeout = 1000; | 253 | int timeout = 1000; |
254 | 254 | ||
255 | if (!pciehp_readw(ctrl, SLOTSTATUS, &slot_status)) | 255 | if (!pciehp_readw(ctrl, SLOTSTATUS, &slot_status)) { |
256 | if (slot_status & CMD_COMPLETED) | 256 | if (slot_status & CMD_COMPLETED) { |
257 | goto completed; | 257 | pciehp_writew(ctrl, SLOTSTATUS, CMD_COMPLETED); |
258 | for (timeout = 1000; timeout > 0; timeout -= 100) { | 258 | return 1; |
259 | msleep(100); | 259 | } |
260 | if (!pciehp_readw(ctrl, SLOTSTATUS, &slot_status)) | 260 | } |
261 | if (slot_status & CMD_COMPLETED) | 261 | while (timeout > 1000) { |
262 | goto completed; | 262 | msleep(10); |
263 | timeout -= 10; | ||
264 | if (!pciehp_readw(ctrl, SLOTSTATUS, &slot_status)) { | ||
265 | if (slot_status & CMD_COMPLETED) { | ||
266 | pciehp_writew(ctrl, SLOTSTATUS, CMD_COMPLETED); | ||
267 | return 1; | ||
268 | } | ||
269 | } | ||
263 | } | 270 | } |
264 | return 0; /* timeout */ | 271 | return 0; /* timeout */ |
265 | |||
266 | completed: | ||
267 | pciehp_writew(ctrl, SLOTSTATUS, CMD_COMPLETED); | ||
268 | return timeout; | ||
269 | } | 272 | } |
270 | 273 | ||
271 | static inline int pcie_wait_cmd(struct controller *ctrl, int poll) | 274 | static void pcie_wait_cmd(struct controller *ctrl, int poll) |
272 | { | 275 | { |
273 | int retval = 0; | ||
274 | unsigned int msecs = pciehp_poll_mode ? 2500 : 1000; | 276 | unsigned int msecs = pciehp_poll_mode ? 2500 : 1000; |
275 | unsigned long timeout = msecs_to_jiffies(msecs); | 277 | unsigned long timeout = msecs_to_jiffies(msecs); |
276 | int rc; | 278 | int rc; |
@@ -278,16 +280,9 @@ static inline int pcie_wait_cmd(struct controller *ctrl, int poll) | |||
278 | if (poll) | 280 | if (poll) |
279 | rc = pcie_poll_cmd(ctrl); | 281 | rc = pcie_poll_cmd(ctrl); |
280 | else | 282 | else |
281 | rc = wait_event_interruptible_timeout(ctrl->queue, | 283 | rc = wait_event_timeout(ctrl->queue, !ctrl->cmd_busy, timeout); |
282 | !ctrl->cmd_busy, timeout); | ||
283 | if (!rc) | 284 | if (!rc) |
284 | dbg("Command not completed in 1000 msec\n"); | 285 | dbg("Command not completed in 1000 msec\n"); |
285 | else if (rc < 0) { | ||
286 | retval = -EINTR; | ||
287 | info("Command was interrupted by a signal\n"); | ||
288 | } | ||
289 | |||
290 | return retval; | ||
291 | } | 286 | } |
292 | 287 | ||
293 | /** | 288 | /** |
@@ -342,10 +337,6 @@ static int pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask) | |||
342 | 337 | ||
343 | slot_ctrl &= ~mask; | 338 | slot_ctrl &= ~mask; |
344 | slot_ctrl |= (cmd & mask); | 339 | slot_ctrl |= (cmd & mask); |
345 | /* Don't enable command completed if caller is changing it. */ | ||
346 | if (!(mask & CMD_CMPL_INTR_ENABLE)) | ||
347 | slot_ctrl |= CMD_CMPL_INTR_ENABLE; | ||
348 | |||
349 | ctrl->cmd_busy = 1; | 340 | ctrl->cmd_busy = 1; |
350 | smp_mb(); | 341 | smp_mb(); |
351 | retval = pciehp_writew(ctrl, SLOTCTRL, slot_ctrl); | 342 | retval = pciehp_writew(ctrl, SLOTCTRL, slot_ctrl); |
@@ -365,7 +356,7 @@ static int pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask) | |||
365 | if (!(slot_ctrl & HP_INTR_ENABLE) || | 356 | if (!(slot_ctrl & HP_INTR_ENABLE) || |
366 | !(slot_ctrl & CMD_CMPL_INTR_ENABLE)) | 357 | !(slot_ctrl & CMD_CMPL_INTR_ENABLE)) |
367 | poll = 1; | 358 | poll = 1; |
368 | retval = pcie_wait_cmd(ctrl, poll); | 359 | pcie_wait_cmd(ctrl, poll); |
369 | } | 360 | } |
370 | out: | 361 | out: |
371 | mutex_unlock(&ctrl->ctrl_lock); | 362 | mutex_unlock(&ctrl->ctrl_lock); |
@@ -614,23 +605,6 @@ static void hpc_set_green_led_blink(struct slot *slot) | |||
614 | __func__, ctrl->cap_base + SLOTCTRL, slot_cmd); | 605 | __func__, ctrl->cap_base + SLOTCTRL, slot_cmd); |
615 | } | 606 | } |
616 | 607 | ||
617 | static void hpc_release_ctlr(struct controller *ctrl) | ||
618 | { | ||
619 | /* Mask Hot-plug Interrupt Enable */ | ||
620 | if (pcie_write_cmd(ctrl, 0, HP_INTR_ENABLE | CMD_CMPL_INTR_ENABLE)) | ||
621 | err("%s: Cannot mask hotplut interrupt enable\n", __func__); | ||
622 | |||
623 | /* Free interrupt handler or interrupt polling timer */ | ||
624 | pciehp_free_irq(ctrl); | ||
625 | |||
626 | /* | ||
627 | * If this is the last controller to be released, destroy the | ||
628 | * pciehp work queue | ||
629 | */ | ||
630 | if (atomic_dec_and_test(&pciehp_num_controllers)) | ||
631 | destroy_workqueue(pciehp_wq); | ||
632 | } | ||
633 | |||
634 | static int hpc_power_on_slot(struct slot * slot) | 608 | static int hpc_power_on_slot(struct slot * slot) |
635 | { | 609 | { |
636 | struct controller *ctrl = slot->ctrl; | 610 | struct controller *ctrl = slot->ctrl; |
@@ -785,7 +759,7 @@ static irqreturn_t pcie_isr(int irq, void *dev_id) | |||
785 | intr_loc |= detected; | 759 | intr_loc |= detected; |
786 | if (!intr_loc) | 760 | if (!intr_loc) |
787 | return IRQ_NONE; | 761 | return IRQ_NONE; |
788 | if (pciehp_writew(ctrl, SLOTSTATUS, detected)) { | 762 | if (detected && pciehp_writew(ctrl, SLOTSTATUS, detected)) { |
789 | err("%s: Cannot write to SLOTSTATUS\n", __func__); | 763 | err("%s: Cannot write to SLOTSTATUS\n", __func__); |
790 | return IRQ_NONE; | 764 | return IRQ_NONE; |
791 | } | 765 | } |
@@ -797,25 +771,13 @@ static irqreturn_t pcie_isr(int irq, void *dev_id) | |||
797 | if (intr_loc & CMD_COMPLETED) { | 771 | if (intr_loc & CMD_COMPLETED) { |
798 | ctrl->cmd_busy = 0; | 772 | ctrl->cmd_busy = 0; |
799 | smp_mb(); | 773 | smp_mb(); |
800 | wake_up_interruptible(&ctrl->queue); | 774 | wake_up(&ctrl->queue); |
801 | } | 775 | } |
802 | 776 | ||
803 | if (!(intr_loc & ~CMD_COMPLETED)) | 777 | if (!(intr_loc & ~CMD_COMPLETED)) |
804 | return IRQ_HANDLED; | 778 | return IRQ_HANDLED; |
805 | 779 | ||
806 | /* | ||
807 | * Return without handling events if this handler routine is | ||
808 | * called before controller initialization is done. This may | ||
809 | * happen if hotplug event or another interrupt that shares | ||
810 | * the IRQ with pciehp arrives before slot initialization is | ||
811 | * done after interrupt handler is registered. | ||
812 | * | ||
813 | * FIXME - Need more structural fixes. We need to be ready to | ||
814 | * handle the event before installing interrupt handler. | ||
815 | */ | ||
816 | p_slot = pciehp_find_slot(ctrl, ctrl->slot_device_offset); | 780 | p_slot = pciehp_find_slot(ctrl, ctrl->slot_device_offset); |
817 | if (!p_slot || !p_slot->hpc_ops) | ||
818 | return IRQ_HANDLED; | ||
819 | 781 | ||
820 | /* Check MRL Sensor Changed */ | 782 | /* Check MRL Sensor Changed */ |
821 | if (intr_loc & MRL_SENS_CHANGED) | 783 | if (intr_loc & MRL_SENS_CHANGED) |
@@ -992,6 +954,7 @@ static int hpc_get_cur_lnk_width(struct slot *slot, | |||
992 | return retval; | 954 | return retval; |
993 | } | 955 | } |
994 | 956 | ||
957 | static void pcie_release_ctrl(struct controller *ctrl); | ||
995 | static struct hpc_ops pciehp_hpc_ops = { | 958 | static struct hpc_ops pciehp_hpc_ops = { |
996 | .power_on_slot = hpc_power_on_slot, | 959 | .power_on_slot = hpc_power_on_slot, |
997 | .power_off_slot = hpc_power_off_slot, | 960 | .power_off_slot = hpc_power_off_slot, |
@@ -1013,97 +976,11 @@ static struct hpc_ops pciehp_hpc_ops = { | |||
1013 | .green_led_off = hpc_set_green_led_off, | 976 | .green_led_off = hpc_set_green_led_off, |
1014 | .green_led_blink = hpc_set_green_led_blink, | 977 | .green_led_blink = hpc_set_green_led_blink, |
1015 | 978 | ||
1016 | .release_ctlr = hpc_release_ctlr, | 979 | .release_ctlr = pcie_release_ctrl, |
1017 | .check_lnk_status = hpc_check_lnk_status, | 980 | .check_lnk_status = hpc_check_lnk_status, |
1018 | }; | 981 | }; |
1019 | 982 | ||
1020 | #ifdef CONFIG_ACPI | 983 | int pcie_enable_notification(struct controller *ctrl) |
1021 | static int pciehp_acpi_get_hp_hw_control_from_firmware(struct pci_dev *dev) | ||
1022 | { | ||
1023 | acpi_status status; | ||
1024 | acpi_handle chandle, handle = DEVICE_ACPI_HANDLE(&(dev->dev)); | ||
1025 | struct pci_dev *pdev = dev; | ||
1026 | struct pci_bus *parent; | ||
1027 | struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL }; | ||
1028 | |||
1029 | /* | ||
1030 | * Per PCI firmware specification, we should run the ACPI _OSC | ||
1031 | * method to get control of hotplug hardware before using it. | ||
1032 | * If an _OSC is missing, we look for an OSHP to do the same thing. | ||
1033 | * To handle different BIOS behavior, we look for _OSC and OSHP | ||
1034 | * within the scope of the hotplug controller and its parents, upto | ||
1035 | * the host bridge under which this controller exists. | ||
1036 | */ | ||
1037 | while (!handle) { | ||
1038 | /* | ||
1039 | * This hotplug controller was not listed in the ACPI name | ||
1040 | * space at all. Try to get acpi handle of parent pci bus. | ||
1041 | */ | ||
1042 | if (!pdev || !pdev->bus->parent) | ||
1043 | break; | ||
1044 | parent = pdev->bus->parent; | ||
1045 | dbg("Could not find %s in acpi namespace, trying parent\n", | ||
1046 | pci_name(pdev)); | ||
1047 | if (!parent->self) | ||
1048 | /* Parent must be a host bridge */ | ||
1049 | handle = acpi_get_pci_rootbridge_handle( | ||
1050 | pci_domain_nr(parent), | ||
1051 | parent->number); | ||
1052 | else | ||
1053 | handle = DEVICE_ACPI_HANDLE( | ||
1054 | &(parent->self->dev)); | ||
1055 | pdev = parent->self; | ||
1056 | } | ||
1057 | |||
1058 | while (handle) { | ||
1059 | acpi_get_name(handle, ACPI_FULL_PATHNAME, &string); | ||
1060 | dbg("Trying to get hotplug control for %s \n", | ||
1061 | (char *)string.pointer); | ||
1062 | status = pci_osc_control_set(handle, | ||
1063 | OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL | | ||
1064 | OSC_PCI_EXPRESS_NATIVE_HP_CONTROL); | ||
1065 | if (status == AE_NOT_FOUND) | ||
1066 | status = acpi_run_oshp(handle); | ||
1067 | if (ACPI_SUCCESS(status)) { | ||
1068 | dbg("Gained control for hotplug HW for pci %s (%s)\n", | ||
1069 | pci_name(dev), (char *)string.pointer); | ||
1070 | kfree(string.pointer); | ||
1071 | return 0; | ||
1072 | } | ||
1073 | if (acpi_root_bridge(handle)) | ||
1074 | break; | ||
1075 | chandle = handle; | ||
1076 | status = acpi_get_parent(chandle, &handle); | ||
1077 | if (ACPI_FAILURE(status)) | ||
1078 | break; | ||
1079 | } | ||
1080 | |||
1081 | dbg("Cannot get control of hotplug hardware for pci %s\n", | ||
1082 | pci_name(dev)); | ||
1083 | |||
1084 | kfree(string.pointer); | ||
1085 | return -1; | ||
1086 | } | ||
1087 | #endif | ||
1088 | |||
1089 | static int pcie_init_hardware_part1(struct controller *ctrl, | ||
1090 | struct pcie_device *dev) | ||
1091 | { | ||
1092 | /* Clear all remaining event bits in Slot Status register */ | ||
1093 | if (pciehp_writew(ctrl, SLOTSTATUS, 0x1f)) { | ||
1094 | err("%s: Cannot write to SLOTSTATUS register\n", __func__); | ||
1095 | return -1; | ||
1096 | } | ||
1097 | |||
1098 | /* Mask Hot-plug Interrupt Enable */ | ||
1099 | if (pcie_write_cmd(ctrl, 0, HP_INTR_ENABLE | CMD_CMPL_INTR_ENABLE)) { | ||
1100 | err("%s: Cannot mask hotplug interrupt enable\n", __func__); | ||
1101 | return -1; | ||
1102 | } | ||
1103 | return 0; | ||
1104 | } | ||
1105 | |||
1106 | int pcie_init_hardware_part2(struct controller *ctrl, struct pcie_device *dev) | ||
1107 | { | 984 | { |
1108 | u16 cmd, mask; | 985 | u16 cmd, mask; |
1109 | 986 | ||
@@ -1115,30 +992,83 @@ int pcie_init_hardware_part2(struct controller *ctrl, struct pcie_device *dev) | |||
1115 | if (MRL_SENS(ctrl)) | 992 | if (MRL_SENS(ctrl)) |
1116 | cmd |= MRL_DETECT_ENABLE; | 993 | cmd |= MRL_DETECT_ENABLE; |
1117 | if (!pciehp_poll_mode) | 994 | if (!pciehp_poll_mode) |
1118 | cmd |= HP_INTR_ENABLE; | 995 | cmd |= HP_INTR_ENABLE | CMD_CMPL_INTR_ENABLE; |
1119 | 996 | ||
1120 | mask = PRSN_DETECT_ENABLE | ATTN_BUTTN_ENABLE | | 997 | mask = PRSN_DETECT_ENABLE | ATTN_BUTTN_ENABLE | MRL_DETECT_ENABLE | |
1121 | PWR_FAULT_DETECT_ENABLE | MRL_DETECT_ENABLE | HP_INTR_ENABLE; | 998 | PWR_FAULT_DETECT_ENABLE | HP_INTR_ENABLE | CMD_CMPL_INTR_ENABLE; |
1122 | 999 | ||
1123 | if (pcie_write_cmd(ctrl, cmd, mask)) { | 1000 | if (pcie_write_cmd(ctrl, cmd, mask)) { |
1124 | err("%s: Cannot enable software notification\n", __func__); | 1001 | err("%s: Cannot enable software notification\n", __func__); |
1125 | goto abort; | 1002 | return -1; |
1126 | } | 1003 | } |
1004 | return 0; | ||
1005 | } | ||
1127 | 1006 | ||
1128 | if (pciehp_force) | 1007 | static void pcie_disable_notification(struct controller *ctrl) |
1129 | dbg("Bypassing BIOS check for pciehp use on %s\n", | 1008 | { |
1130 | pci_name(ctrl->pci_dev)); | 1009 | u16 mask; |
1131 | else if (pciehp_get_hp_hw_control_from_firmware(ctrl->pci_dev)) | 1010 | mask = PRSN_DETECT_ENABLE | ATTN_BUTTN_ENABLE | MRL_DETECT_ENABLE | |
1132 | goto abort_disable_intr; | 1011 | PWR_FAULT_DETECT_ENABLE | HP_INTR_ENABLE | CMD_CMPL_INTR_ENABLE; |
1012 | if (pcie_write_cmd(ctrl, 0, mask)) | ||
1013 | warn("%s: Cannot disable software notification\n", __func__); | ||
1014 | } | ||
1133 | 1015 | ||
1016 | static int pcie_init_notification(struct controller *ctrl) | ||
1017 | { | ||
1018 | if (pciehp_request_irq(ctrl)) | ||
1019 | return -1; | ||
1020 | if (pcie_enable_notification(ctrl)) { | ||
1021 | pciehp_free_irq(ctrl); | ||
1022 | return -1; | ||
1023 | } | ||
1134 | return 0; | 1024 | return 0; |
1025 | } | ||
1135 | 1026 | ||
1136 | /* We end up here for the many possible ways to fail this API. */ | 1027 | static void pcie_shutdown_notification(struct controller *ctrl) |
1137 | abort_disable_intr: | 1028 | { |
1138 | if (pcie_write_cmd(ctrl, 0, HP_INTR_ENABLE)) | 1029 | pcie_disable_notification(ctrl); |
1139 | err("%s : disabling interrupts failed\n", __func__); | 1030 | pciehp_free_irq(ctrl); |
1140 | abort: | 1031 | } |
1141 | return -1; | 1032 | |
1033 | static void make_slot_name(struct slot *slot) | ||
1034 | { | ||
1035 | if (pciehp_slot_with_bus) | ||
1036 | snprintf(slot->name, SLOT_NAME_SIZE, "%04d_%04d", | ||
1037 | slot->bus, slot->number); | ||
1038 | else | ||
1039 | snprintf(slot->name, SLOT_NAME_SIZE, "%d", slot->number); | ||
1040 | } | ||
1041 | |||
1042 | static int pcie_init_slot(struct controller *ctrl) | ||
1043 | { | ||
1044 | struct slot *slot; | ||
1045 | |||
1046 | slot = kzalloc(sizeof(*slot), GFP_KERNEL); | ||
1047 | if (!slot) | ||
1048 | return -ENOMEM; | ||
1049 | |||
1050 | slot->hp_slot = 0; | ||
1051 | slot->ctrl = ctrl; | ||
1052 | slot->bus = ctrl->pci_dev->subordinate->number; | ||
1053 | slot->device = ctrl->slot_device_offset + slot->hp_slot; | ||
1054 | slot->hpc_ops = ctrl->hpc_ops; | ||
1055 | slot->number = ctrl->first_slot; | ||
1056 | make_slot_name(slot); | ||
1057 | mutex_init(&slot->lock); | ||
1058 | INIT_DELAYED_WORK(&slot->work, pciehp_queue_pushbutton_work); | ||
1059 | list_add(&slot->slot_list, &ctrl->slot_list); | ||
1060 | return 0; | ||
1061 | } | ||
1062 | |||
1063 | static void pcie_cleanup_slot(struct controller *ctrl) | ||
1064 | { | ||
1065 | struct slot *slot; | ||
1066 | slot = list_first_entry(&ctrl->slot_list, struct slot, slot_list); | ||
1067 | list_del(&slot->slot_list); | ||
1068 | cancel_delayed_work(&slot->work); | ||
1069 | flush_scheduled_work(); | ||
1070 | flush_workqueue(pciehp_wq); | ||
1071 | kfree(slot); | ||
1142 | } | 1072 | } |
1143 | 1073 | ||
1144 | static inline void dbg_ctrl(struct controller *ctrl) | 1074 | static inline void dbg_ctrl(struct controller *ctrl) |
@@ -1176,15 +1106,23 @@ static inline void dbg_ctrl(struct controller *ctrl) | |||
1176 | dbg(" Comamnd Completed : %3s\n", NO_CMD_CMPL(ctrl)? "no" : "yes"); | 1106 | dbg(" Comamnd Completed : %3s\n", NO_CMD_CMPL(ctrl)? "no" : "yes"); |
1177 | pciehp_readw(ctrl, SLOTSTATUS, ®16); | 1107 | pciehp_readw(ctrl, SLOTSTATUS, ®16); |
1178 | dbg("Slot Status : 0x%04x\n", reg16); | 1108 | dbg("Slot Status : 0x%04x\n", reg16); |
1179 | pciehp_readw(ctrl, SLOTSTATUS, ®16); | 1109 | pciehp_readw(ctrl, SLOTCTRL, ®16); |
1180 | dbg("Slot Control : 0x%04x\n", reg16); | 1110 | dbg("Slot Control : 0x%04x\n", reg16); |
1181 | } | 1111 | } |
1182 | 1112 | ||
1183 | int pcie_init(struct controller *ctrl, struct pcie_device *dev) | 1113 | struct controller *pcie_init(struct pcie_device *dev) |
1184 | { | 1114 | { |
1115 | struct controller *ctrl; | ||
1185 | u32 slot_cap; | 1116 | u32 slot_cap; |
1186 | struct pci_dev *pdev = dev->port; | 1117 | struct pci_dev *pdev = dev->port; |
1187 | 1118 | ||
1119 | ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); | ||
1120 | if (!ctrl) { | ||
1121 | err("%s : out of memory\n", __func__); | ||
1122 | goto abort; | ||
1123 | } | ||
1124 | INIT_LIST_HEAD(&ctrl->slot_list); | ||
1125 | |||
1188 | ctrl->pci_dev = pdev; | 1126 | ctrl->pci_dev = pdev; |
1189 | ctrl->cap_base = pci_find_capability(pdev, PCI_CAP_ID_EXP); | 1127 | ctrl->cap_base = pci_find_capability(pdev, PCI_CAP_ID_EXP); |
1190 | if (!ctrl->cap_base) { | 1128 | if (!ctrl->cap_base) { |
@@ -1215,15 +1153,12 @@ int pcie_init(struct controller *ctrl, struct pcie_device *dev) | |||
1215 | !(POWER_CTRL(ctrl) | ATTN_LED(ctrl) | PWR_LED(ctrl) | EMI(ctrl))) | 1153 | !(POWER_CTRL(ctrl) | ATTN_LED(ctrl) | PWR_LED(ctrl) | EMI(ctrl))) |
1216 | ctrl->no_cmd_complete = 1; | 1154 | ctrl->no_cmd_complete = 1; |
1217 | 1155 | ||
1218 | info("HPC vendor_id %x device_id %x ss_vid %x ss_did %x\n", | 1156 | /* Clear all remaining event bits in Slot Status register */ |
1219 | pdev->vendor, pdev->device, | 1157 | if (pciehp_writew(ctrl, SLOTSTATUS, 0x1f)) |
1220 | pdev->subsystem_vendor, pdev->subsystem_device); | 1158 | goto abort_ctrl; |
1221 | 1159 | ||
1222 | if (pcie_init_hardware_part1(ctrl, dev)) | 1160 | /* Disable sotfware notification */ |
1223 | goto abort; | 1161 | pcie_disable_notification(ctrl); |
1224 | |||
1225 | if (pciehp_request_irq(ctrl)) | ||
1226 | goto abort; | ||
1227 | 1162 | ||
1228 | /* | 1163 | /* |
1229 | * If this is the first controller to be initialized, | 1164 | * If this is the first controller to be initialized, |
@@ -1231,18 +1166,39 @@ int pcie_init(struct controller *ctrl, struct pcie_device *dev) | |||
1231 | */ | 1166 | */ |
1232 | if (atomic_add_return(1, &pciehp_num_controllers) == 1) { | 1167 | if (atomic_add_return(1, &pciehp_num_controllers) == 1) { |
1233 | pciehp_wq = create_singlethread_workqueue("pciehpd"); | 1168 | pciehp_wq = create_singlethread_workqueue("pciehpd"); |
1234 | if (!pciehp_wq) { | 1169 | if (!pciehp_wq) |
1235 | goto abort_free_irq; | 1170 | goto abort_ctrl; |
1236 | } | ||
1237 | } | 1171 | } |
1238 | 1172 | ||
1239 | if (pcie_init_hardware_part2(ctrl, dev)) | 1173 | info("HPC vendor_id %x device_id %x ss_vid %x ss_did %x\n", |
1240 | goto abort_free_irq; | 1174 | pdev->vendor, pdev->device, |
1175 | pdev->subsystem_vendor, pdev->subsystem_device); | ||
1176 | |||
1177 | if (pcie_init_slot(ctrl)) | ||
1178 | goto abort_ctrl; | ||
1241 | 1179 | ||
1242 | return 0; | 1180 | if (pcie_init_notification(ctrl)) |
1181 | goto abort_slot; | ||
1243 | 1182 | ||
1244 | abort_free_irq: | 1183 | return ctrl; |
1245 | pciehp_free_irq(ctrl); | 1184 | |
1185 | abort_slot: | ||
1186 | pcie_cleanup_slot(ctrl); | ||
1187 | abort_ctrl: | ||
1188 | kfree(ctrl); | ||
1246 | abort: | 1189 | abort: |
1247 | return -1; | 1190 | return NULL; |
1191 | } | ||
1192 | |||
1193 | void pcie_release_ctrl(struct controller *ctrl) | ||
1194 | { | ||
1195 | pcie_shutdown_notification(ctrl); | ||
1196 | pcie_cleanup_slot(ctrl); | ||
1197 | /* | ||
1198 | * If this is the last controller to be released, destroy the | ||
1199 | * pciehp work queue | ||
1200 | */ | ||
1201 | if (atomic_dec_and_test(&pciehp_num_controllers)) | ||
1202 | destroy_workqueue(pciehp_wq); | ||
1203 | kfree(ctrl); | ||
1248 | } | 1204 | } |