diff options
| -rw-r--r-- | arch/powerpc/kernel/paca.c | 1 | ||||
| -rw-r--r-- | arch/powerpc/platforms/iseries/lpevents.c | 55 | ||||
| -rw-r--r-- | arch/powerpc/platforms/iseries/proc.c | 1 | ||||
| -rw-r--r-- | include/asm-powerpc/iseries/it_lp_queue.h | 40 |
4 files changed, 48 insertions, 49 deletions
diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c index f505a8827e3e..a0bb354c1c08 100644 --- a/arch/powerpc/kernel/paca.c +++ b/arch/powerpc/kernel/paca.c | |||
| @@ -16,7 +16,6 @@ | |||
| 16 | #include <asm/ptrace.h> | 16 | #include <asm/ptrace.h> |
| 17 | #include <asm/page.h> | 17 | #include <asm/page.h> |
| 18 | #include <asm/lppaca.h> | 18 | #include <asm/lppaca.h> |
| 19 | #include <asm/iseries/it_lp_queue.h> | ||
| 20 | #include <asm/iseries/it_lp_reg_save.h> | 19 | #include <asm/iseries/it_lp_reg_save.h> |
| 21 | #include <asm/paca.h> | 20 | #include <asm/paca.h> |
| 22 | 21 | ||
diff --git a/arch/powerpc/platforms/iseries/lpevents.c b/arch/powerpc/platforms/iseries/lpevents.c index 8ca7b9396355..2a9f81ea27d6 100644 --- a/arch/powerpc/platforms/iseries/lpevents.c +++ b/arch/powerpc/platforms/iseries/lpevents.c | |||
| @@ -51,20 +51,21 @@ static unsigned lpEventHandlerPaths[HvLpEvent_Type_NumTypes]; | |||
| 51 | static struct HvLpEvent * get_next_hvlpevent(void) | 51 | static struct HvLpEvent * get_next_hvlpevent(void) |
| 52 | { | 52 | { |
| 53 | struct HvLpEvent * event; | 53 | struct HvLpEvent * event; |
| 54 | event = (struct HvLpEvent *)hvlpevent_queue.xSlicCurEventPtr; | 54 | event = (struct HvLpEvent *)hvlpevent_queue.hq_current_event; |
| 55 | 55 | ||
| 56 | if (hvlpevent_is_valid(event)) { | 56 | if (hvlpevent_is_valid(event)) { |
| 57 | /* rmb() needed only for weakly consistent machines (regatta) */ | 57 | /* rmb() needed only for weakly consistent machines (regatta) */ |
| 58 | rmb(); | 58 | rmb(); |
| 59 | /* Set pointer to next potential event */ | 59 | /* Set pointer to next potential event */ |
| 60 | hvlpevent_queue.xSlicCurEventPtr += ((event->xSizeMinus1 + | 60 | hvlpevent_queue.hq_current_event += ((event->xSizeMinus1 + |
| 61 | LpEventAlign) / LpEventAlign) * LpEventAlign; | 61 | IT_LP_EVENT_ALIGN) / IT_LP_EVENT_ALIGN) * |
| 62 | IT_LP_EVENT_ALIGN; | ||
| 62 | 63 | ||
| 63 | /* Wrap to beginning if no room at end */ | 64 | /* Wrap to beginning if no room at end */ |
| 64 | if (hvlpevent_queue.xSlicCurEventPtr > | 65 | if (hvlpevent_queue.hq_current_event > |
| 65 | hvlpevent_queue.xSlicLastValidEventPtr) { | 66 | hvlpevent_queue.hq_last_event) { |
| 66 | hvlpevent_queue.xSlicCurEventPtr = | 67 | hvlpevent_queue.hq_current_event = |
| 67 | hvlpevent_queue.xSlicEventStackPtr; | 68 | hvlpevent_queue.hq_event_stack; |
| 68 | } | 69 | } |
| 69 | } else { | 70 | } else { |
| 70 | event = NULL; | 71 | event = NULL; |
| @@ -82,10 +83,10 @@ int hvlpevent_is_pending(void) | |||
| 82 | if (smp_processor_id() >= spread_lpevents) | 83 | if (smp_processor_id() >= spread_lpevents) |
| 83 | return 0; | 84 | return 0; |
| 84 | 85 | ||
| 85 | next_event = (struct HvLpEvent *)hvlpevent_queue.xSlicCurEventPtr; | 86 | next_event = (struct HvLpEvent *)hvlpevent_queue.hq_current_event; |
| 86 | 87 | ||
| 87 | return hvlpevent_is_valid(next_event) || | 88 | return hvlpevent_is_valid(next_event) || |
| 88 | hvlpevent_queue.xPlicOverflowIntPending; | 89 | hvlpevent_queue.hq_overflow_pending; |
| 89 | } | 90 | } |
| 90 | 91 | ||
| 91 | static void hvlpevent_clear_valid(struct HvLpEvent * event) | 92 | static void hvlpevent_clear_valid(struct HvLpEvent * event) |
| @@ -95,18 +96,18 @@ static void hvlpevent_clear_valid(struct HvLpEvent * event) | |||
| 95 | * ie. on 64-byte boundaries. | 96 | * ie. on 64-byte boundaries. |
| 96 | */ | 97 | */ |
| 97 | struct HvLpEvent *tmp; | 98 | struct HvLpEvent *tmp; |
| 98 | unsigned extra = ((event->xSizeMinus1 + LpEventAlign) / | 99 | unsigned extra = ((event->xSizeMinus1 + IT_LP_EVENT_ALIGN) / |
| 99 | LpEventAlign) - 1; | 100 | IT_LP_EVENT_ALIGN) - 1; |
| 100 | 101 | ||
| 101 | switch (extra) { | 102 | switch (extra) { |
| 102 | case 3: | 103 | case 3: |
| 103 | tmp = (struct HvLpEvent*)((char*)event + 3 * LpEventAlign); | 104 | tmp = (struct HvLpEvent*)((char*)event + 3 * IT_LP_EVENT_ALIGN); |
| 104 | hvlpevent_invalidate(tmp); | 105 | hvlpevent_invalidate(tmp); |
| 105 | case 2: | 106 | case 2: |
| 106 | tmp = (struct HvLpEvent*)((char*)event + 2 * LpEventAlign); | 107 | tmp = (struct HvLpEvent*)((char*)event + 2 * IT_LP_EVENT_ALIGN); |
| 107 | hvlpevent_invalidate(tmp); | 108 | hvlpevent_invalidate(tmp); |
| 108 | case 1: | 109 | case 1: |
| 109 | tmp = (struct HvLpEvent*)((char*)event + 1 * LpEventAlign); | 110 | tmp = (struct HvLpEvent*)((char*)event + 1 * IT_LP_EVENT_ALIGN); |
| 110 | hvlpevent_invalidate(tmp); | 111 | hvlpevent_invalidate(tmp); |
| 111 | } | 112 | } |
| 112 | 113 | ||
| @@ -120,7 +121,7 @@ void process_hvlpevents(struct pt_regs *regs) | |||
| 120 | struct HvLpEvent * event; | 121 | struct HvLpEvent * event; |
| 121 | 122 | ||
| 122 | /* If we have recursed, just return */ | 123 | /* If we have recursed, just return */ |
| 123 | if (!spin_trylock(&hvlpevent_queue.lock)) | 124 | if (!spin_trylock(&hvlpevent_queue.hq_lock)) |
| 124 | return; | 125 | return; |
| 125 | 126 | ||
| 126 | for (;;) { | 127 | for (;;) { |
| @@ -148,17 +149,17 @@ void process_hvlpevents(struct pt_regs *regs) | |||
| 148 | printk(KERN_INFO "Unexpected Lp Event type=%d\n", event->xType ); | 149 | printk(KERN_INFO "Unexpected Lp Event type=%d\n", event->xType ); |
| 149 | 150 | ||
| 150 | hvlpevent_clear_valid(event); | 151 | hvlpevent_clear_valid(event); |
| 151 | } else if (hvlpevent_queue.xPlicOverflowIntPending) | 152 | } else if (hvlpevent_queue.hq_overflow_pending) |
| 152 | /* | 153 | /* |
| 153 | * No more valid events. If overflow events are | 154 | * No more valid events. If overflow events are |
| 154 | * pending process them | 155 | * pending process them |
| 155 | */ | 156 | */ |
| 156 | HvCallEvent_getOverflowLpEvents(hvlpevent_queue.xIndex); | 157 | HvCallEvent_getOverflowLpEvents(hvlpevent_queue.hq_index); |
| 157 | else | 158 | else |
| 158 | break; | 159 | break; |
| 159 | } | 160 | } |
| 160 | 161 | ||
| 161 | spin_unlock(&hvlpevent_queue.lock); | 162 | spin_unlock(&hvlpevent_queue.hq_lock); |
| 162 | } | 163 | } |
| 163 | 164 | ||
| 164 | static int set_spread_lpevents(char *str) | 165 | static int set_spread_lpevents(char *str) |
| @@ -184,20 +185,20 @@ void setup_hvlpevent_queue(void) | |||
| 184 | { | 185 | { |
| 185 | void *eventStack; | 186 | void *eventStack; |
| 186 | 187 | ||
| 187 | spin_lock_init(&hvlpevent_queue.lock); | 188 | spin_lock_init(&hvlpevent_queue.hq_lock); |
| 188 | 189 | ||
| 189 | /* Allocate a page for the Event Stack. */ | 190 | /* Allocate a page for the Event Stack. */ |
| 190 | eventStack = alloc_bootmem_pages(LpEventStackSize); | 191 | eventStack = alloc_bootmem_pages(IT_LP_EVENT_STACK_SIZE); |
| 191 | memset(eventStack, 0, LpEventStackSize); | 192 | memset(eventStack, 0, IT_LP_EVENT_STACK_SIZE); |
| 192 | 193 | ||
| 193 | /* Invoke the hypervisor to initialize the event stack */ | 194 | /* Invoke the hypervisor to initialize the event stack */ |
| 194 | HvCallEvent_setLpEventStack(0, eventStack, LpEventStackSize); | 195 | HvCallEvent_setLpEventStack(0, eventStack, IT_LP_EVENT_STACK_SIZE); |
| 195 | 196 | ||
| 196 | hvlpevent_queue.xSlicEventStackPtr = (char *)eventStack; | 197 | hvlpevent_queue.hq_event_stack = eventStack; |
| 197 | hvlpevent_queue.xSlicCurEventPtr = (char *)eventStack; | 198 | hvlpevent_queue.hq_current_event = eventStack; |
| 198 | hvlpevent_queue.xSlicLastValidEventPtr = (char *)eventStack + | 199 | hvlpevent_queue.hq_last_event = (char *)eventStack + |
| 199 | (LpEventStackSize - LpEventMaxSize); | 200 | (IT_LP_EVENT_STACK_SIZE - IT_LP_EVENT_MAX_SIZE); |
| 200 | hvlpevent_queue.xIndex = 0; | 201 | hvlpevent_queue.hq_index = 0; |
| 201 | } | 202 | } |
| 202 | 203 | ||
| 203 | /* Register a handler for an LpEvent type */ | 204 | /* Register a handler for an LpEvent type */ |
diff --git a/arch/powerpc/platforms/iseries/proc.c b/arch/powerpc/platforms/iseries/proc.c index e68b6b5fa89f..c241413629ac 100644 --- a/arch/powerpc/platforms/iseries/proc.c +++ b/arch/powerpc/platforms/iseries/proc.c | |||
| @@ -24,7 +24,6 @@ | |||
| 24 | #include <asm/processor.h> | 24 | #include <asm/processor.h> |
| 25 | #include <asm/time.h> | 25 | #include <asm/time.h> |
| 26 | #include <asm/lppaca.h> | 26 | #include <asm/lppaca.h> |
| 27 | #include <asm/iseries/it_lp_queue.h> | ||
| 28 | #include <asm/iseries/hv_call_xm.h> | 27 | #include <asm/iseries/hv_call_xm.h> |
| 29 | 28 | ||
| 30 | #include "processor_vpd.h" | 29 | #include "processor_vpd.h" |
diff --git a/include/asm-powerpc/iseries/it_lp_queue.h b/include/asm-powerpc/iseries/it_lp_queue.h index b7c6fc12cce2..284c5a7db3ac 100644 --- a/include/asm-powerpc/iseries/it_lp_queue.h +++ b/include/asm-powerpc/iseries/it_lp_queue.h | |||
| @@ -29,20 +29,20 @@ | |||
| 29 | 29 | ||
| 30 | struct HvLpEvent; | 30 | struct HvLpEvent; |
| 31 | 31 | ||
| 32 | #define ITMaxLpQueues 8 | 32 | #define IT_LP_MAX_QUEUES 8 |
| 33 | 33 | ||
| 34 | #define NotUsed 0 // Queue will not be used by PLIC | 34 | #define IT_LP_NOT_USED 0 /* Queue will not be used by PLIC */ |
| 35 | #define DedicatedIo 1 // Queue dedicated to IO processor specified | 35 | #define IT_LP_DEDICATED_IO 1 /* Queue dedicated to IO processor specified */ |
| 36 | #define DedicatedLp 2 // Queue dedicated to LP specified | 36 | #define IT_LP_DEDICATED_LP 2 /* Queue dedicated to LP specified */ |
| 37 | #define Shared 3 // Queue shared for both IO and LP | 37 | #define IT_LP_SHARED 3 /* Queue shared for both IO and LP */ |
| 38 | 38 | ||
| 39 | #define LpEventStackSize 4096 | 39 | #define IT_LP_EVENT_STACK_SIZE 4096 |
| 40 | #define LpEventMaxSize 256 | 40 | #define IT_LP_EVENT_MAX_SIZE 256 |
| 41 | #define LpEventAlign 64 | 41 | #define IT_LP_EVENT_ALIGN 64 |
| 42 | 42 | ||
| 43 | struct hvlpevent_queue { | 43 | struct hvlpevent_queue { |
| 44 | /* | 44 | /* |
| 45 | * The xSlicCurEventPtr is the pointer to the next event stack entry | 45 | * The hq_current_event is the pointer to the next event stack entry |
| 46 | * that will become valid. The OS must peek at this entry to determine | 46 | * that will become valid. The OS must peek at this entry to determine |
| 47 | * if it is valid. PLIC will set the valid indicator as the very last | 47 | * if it is valid. PLIC will set the valid indicator as the very last |
| 48 | * store into that entry. | 48 | * store into that entry. |
| @@ -52,23 +52,23 @@ struct hvlpevent_queue { | |||
| 52 | * location again. | 52 | * location again. |
| 53 | * | 53 | * |
| 54 | * If the event stack fills and there are overflow events, then PLIC | 54 | * If the event stack fills and there are overflow events, then PLIC |
| 55 | * will set the xPlicOverflowIntPending flag in which case the OS will | 55 | * will set the hq_overflow_pending flag in which case the OS will |
| 56 | * have to fetch the additional LP events once they have drained the | 56 | * have to fetch the additional LP events once they have drained the |
| 57 | * event stack. | 57 | * event stack. |
| 58 | * | 58 | * |
| 59 | * The first 16-bytes are known by both the OS and PLIC. The remainder | 59 | * The first 16-bytes are known by both the OS and PLIC. The remainder |
| 60 | * of the cache line is for use by the OS. | 60 | * of the cache line is for use by the OS. |
| 61 | */ | 61 | */ |
| 62 | u8 xPlicOverflowIntPending;// 0x00 Overflow events are pending | 62 | u8 hq_overflow_pending; /* 0x00 Overflow events are pending */ |
| 63 | u8 xPlicStatus; // 0x01 DedicatedIo or DedicatedLp or NotUsed | 63 | u8 hq_status; /* 0x01 DedicatedIo or DedicatedLp or NotUsed */ |
| 64 | u16 xSlicLogicalProcIndex; // 0x02 Logical Proc Index for correlation | 64 | u16 hq_proc_index; /* 0x02 Logical Proc Index for correlation */ |
| 65 | u8 xPlicRsvd[12]; // 0x04 | 65 | u8 hq_reserved1[12]; /* 0x04 */ |
| 66 | char *xSlicCurEventPtr; // 0x10 | 66 | char *hq_current_event; /* 0x10 */ |
| 67 | char *xSlicLastValidEventPtr; // 0x18 | 67 | char *hq_last_event; /* 0x18 */ |
| 68 | char *xSlicEventStackPtr; // 0x20 | 68 | char *hq_event_stack; /* 0x20 */ |
| 69 | u8 xIndex; // 0x28 unique sequential index. | 69 | u8 hq_index; /* 0x28 unique sequential index. */ |
| 70 | u8 xSlicRsvd[3]; // 0x29-2b | 70 | u8 hq_reserved2[3]; /* 0x29-2b */ |
| 71 | spinlock_t lock; | 71 | spinlock_t hq_lock; |
| 72 | }; | 72 | }; |
| 73 | 73 | ||
| 74 | extern struct hvlpevent_queue hvlpevent_queue; | 74 | extern struct hvlpevent_queue hvlpevent_queue; |
