diff options
author | Michael Ellerman <michael@ellerman.id.au> | 2005-06-30 01:16:48 -0400 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2005-06-30 01:16:48 -0400 |
commit | ffe1b7e14e6b606bd84cab564aa2f481dbd4e418 (patch) | |
tree | 91135759c4a00c49814fd192cb359252a1c01ae3 /arch/ppc64 | |
parent | 38fcdcfe38fc3f8972c906db64cd7d540b7760e8 (diff) |
[PATCH] ppc64: Formatting cleanups in arch/ppc64/kernel/ItLpQueue.c
Just formatting cleanups:
* rename some "nextLpEvent" variables to just "event"
* make code fit in 80 columns
* use brackets around if/else
* use a temporary to make hvlpevent_clear_valid clearer
Signed-off-by: Michael Ellerman <michael@ellerman.id.au>
Acked-by: Stephen Rothwell <sfr@canb.auug.org.au>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/ppc64')
-rw-r--r-- | arch/ppc64/kernel/ItLpQueue.c | 72 |
1 files changed, 41 insertions, 31 deletions
diff --git a/arch/ppc64/kernel/ItLpQueue.c b/arch/ppc64/kernel/ItLpQueue.c index 61a9dbdd295a..a4f32cbf5297 100644 --- a/arch/ppc64/kernel/ItLpQueue.c +++ b/arch/ppc64/kernel/ItLpQueue.c | |||
@@ -74,24 +74,27 @@ unsigned long ItLpQueueInProcess = 0; | |||
74 | 74 | ||
75 | static struct HvLpEvent * get_next_hvlpevent(void) | 75 | static struct HvLpEvent * get_next_hvlpevent(void) |
76 | { | 76 | { |
77 | struct HvLpEvent * nextLpEvent = | 77 | struct HvLpEvent * event; |
78 | (struct HvLpEvent *)hvlpevent_queue.xSlicCurEventPtr; | 78 | event = (struct HvLpEvent *)hvlpevent_queue.xSlicCurEventPtr; |
79 | if (nextLpEvent->xFlags.xValid) { | 79 | |
80 | if (event->xFlags.xValid) { | ||
80 | /* rmb() needed only for weakly consistent machines (regatta) */ | 81 | /* rmb() needed only for weakly consistent machines (regatta) */ |
81 | rmb(); | 82 | rmb(); |
82 | /* Set pointer to next potential event */ | 83 | /* Set pointer to next potential event */ |
83 | hvlpevent_queue.xSlicCurEventPtr += ((nextLpEvent->xSizeMinus1 + | 84 | hvlpevent_queue.xSlicCurEventPtr += ((event->xSizeMinus1 + |
84 | LpEventAlign) / | 85 | LpEventAlign) / LpEventAlign) * LpEventAlign; |
85 | LpEventAlign) * | 86 | |
86 | LpEventAlign; | ||
87 | /* Wrap to beginning if no room at end */ | 87 | /* Wrap to beginning if no room at end */ |
88 | if (hvlpevent_queue.xSlicCurEventPtr > hvlpevent_queue.xSlicLastValidEventPtr) | 88 | if (hvlpevent_queue.xSlicCurEventPtr > |
89 | hvlpevent_queue.xSlicCurEventPtr = hvlpevent_queue.xSlicEventStackPtr; | 89 | hvlpevent_queue.xSlicLastValidEventPtr) { |
90 | hvlpevent_queue.xSlicCurEventPtr = | ||
91 | hvlpevent_queue.xSlicEventStackPtr; | ||
92 | } | ||
93 | } else { | ||
94 | event = NULL; | ||
90 | } | 95 | } |
91 | else | ||
92 | nextLpEvent = NULL; | ||
93 | 96 | ||
94 | return nextLpEvent; | 97 | return event; |
95 | } | 98 | } |
96 | 99 | ||
97 | static unsigned long spread_lpevents = NR_CPUS; | 100 | static unsigned long spread_lpevents = NR_CPUS; |
@@ -104,34 +107,41 @@ int hvlpevent_is_pending(void) | |||
104 | return 0; | 107 | return 0; |
105 | 108 | ||
106 | next_event = (struct HvLpEvent *)hvlpevent_queue.xSlicCurEventPtr; | 109 | next_event = (struct HvLpEvent *)hvlpevent_queue.xSlicCurEventPtr; |
107 | return next_event->xFlags.xValid | hvlpevent_queue.xPlicOverflowIntPending; | 110 | |
111 | return next_event->xFlags.xValid | | ||
112 | hvlpevent_queue.xPlicOverflowIntPending; | ||
108 | } | 113 | } |
109 | 114 | ||
110 | static void hvlpevent_clear_valid(struct HvLpEvent * event) | 115 | static void hvlpevent_clear_valid(struct HvLpEvent * event) |
111 | { | 116 | { |
112 | /* Clear the valid bit of the event | 117 | /* Tell the Hypervisor that we're done with this event. |
113 | * Also clear bits within this event that might | 118 | * Also clear bits within this event that might look like valid bits. |
114 | * look like valid bits (on 64-byte boundaries) | 119 | * ie. on 64-byte boundaries. |
115 | */ | 120 | */ |
121 | struct HvLpEvent *tmp; | ||
116 | unsigned extra = ((event->xSizeMinus1 + LpEventAlign) / | 122 | unsigned extra = ((event->xSizeMinus1 + LpEventAlign) / |
117 | LpEventAlign) - 1; | 123 | LpEventAlign) - 1; |
124 | |||
118 | switch (extra) { | 125 | switch (extra) { |
119 | case 3: | 126 | case 3: |
120 | ((struct HvLpEvent*)((char*)event+3*LpEventAlign))->xFlags.xValid=0; | 127 | tmp = (struct HvLpEvent*)((char*)event + 3 * LpEventAlign); |
128 | tmp->xFlags.xValid = 0; | ||
121 | case 2: | 129 | case 2: |
122 | ((struct HvLpEvent*)((char*)event+2*LpEventAlign))->xFlags.xValid=0; | 130 | tmp = (struct HvLpEvent*)((char*)event + 2 * LpEventAlign); |
131 | tmp->xFlags.xValid = 0; | ||
123 | case 1: | 132 | case 1: |
124 | ((struct HvLpEvent*)((char*)event+1*LpEventAlign))->xFlags.xValid=0; | 133 | tmp = (struct HvLpEvent*)((char*)event + 1 * LpEventAlign); |
125 | case 0: | 134 | tmp->xFlags.xValid = 0; |
126 | ; | ||
127 | } | 135 | } |
136 | |||
128 | mb(); | 137 | mb(); |
138 | |||
129 | event->xFlags.xValid = 0; | 139 | event->xFlags.xValid = 0; |
130 | } | 140 | } |
131 | 141 | ||
132 | void process_hvlpevents(struct pt_regs *regs) | 142 | void process_hvlpevents(struct pt_regs *regs) |
133 | { | 143 | { |
134 | struct HvLpEvent * nextLpEvent; | 144 | struct HvLpEvent * event; |
135 | 145 | ||
136 | /* If we have recursed, just return */ | 146 | /* If we have recursed, just return */ |
137 | if ( !set_inUse() ) | 147 | if ( !set_inUse() ) |
@@ -143,8 +153,8 @@ void process_hvlpevents(struct pt_regs *regs) | |||
143 | BUG(); | 153 | BUG(); |
144 | 154 | ||
145 | for (;;) { | 155 | for (;;) { |
146 | nextLpEvent = get_next_hvlpevent(); | 156 | event = get_next_hvlpevent(); |
147 | if (nextLpEvent) { | 157 | if (event) { |
148 | /* Call appropriate handler here, passing | 158 | /* Call appropriate handler here, passing |
149 | * a pointer to the LpEvent. The handler | 159 | * a pointer to the LpEvent. The handler |
150 | * must make a copy of the LpEvent if it | 160 | * must make a copy of the LpEvent if it |
@@ -158,15 +168,15 @@ void process_hvlpevents(struct pt_regs *regs) | |||
158 | * registered for, so no type check is necessary | 168 | * registered for, so no type check is necessary |
159 | * here! | 169 | * here! |
160 | */ | 170 | */ |
161 | if (nextLpEvent->xType < HvLpEvent_Type_NumTypes) | 171 | if (event->xType < HvLpEvent_Type_NumTypes) |
162 | __get_cpu_var(hvlpevent_counts)[nextLpEvent->xType]++; | 172 | __get_cpu_var(hvlpevent_counts)[event->xType]++; |
163 | if (nextLpEvent->xType < HvLpEvent_Type_NumTypes && | 173 | if (event->xType < HvLpEvent_Type_NumTypes && |
164 | lpEventHandler[nextLpEvent->xType]) | 174 | lpEventHandler[event->xType]) |
165 | lpEventHandler[nextLpEvent->xType](nextLpEvent, regs); | 175 | lpEventHandler[event->xType](event, regs); |
166 | else | 176 | else |
167 | printk(KERN_INFO "Unexpected Lp Event type=%d\n", nextLpEvent->xType ); | 177 | printk(KERN_INFO "Unexpected Lp Event type=%d\n", event->xType ); |
168 | 178 | ||
169 | hvlpevent_clear_valid(nextLpEvent); | 179 | hvlpevent_clear_valid(event); |
170 | } else if (hvlpevent_queue.xPlicOverflowIntPending) | 180 | } else if (hvlpevent_queue.xPlicOverflowIntPending) |
171 | /* | 181 | /* |
172 | * No more valid events. If overflow events are | 182 | * No more valid events. If overflow events are |