aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorMichael Ellerman <michael@ellerman.id.au>2005-06-30 01:16:28 -0400
committerPaul Mackerras <paulus@samba.org>2005-06-30 01:16:28 -0400
commit38fcdcfe38fc3f8972c906db64cd7d540b7760e8 (patch)
treef1edfa2dffd6c2c244c7c80a68b719e5459e34b6 /arch
parent9b0470200a2441766599ad84f92ab9daca8ed86d (diff)
[PATCH] ppc64: Cleanup whitespace in arch/ppc64/kernel/ItLpQueue.c
Just cleanup white space. Signed-off-by: Michael Ellerman <michael@ellerman.id.au> Acked-by: Stephen Rothwell <sfr@canb.auug.org.au> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/ppc64/kernel/ItLpQueue.c58
1 files changed, 29 insertions, 29 deletions
diff --git a/arch/ppc64/kernel/ItLpQueue.c b/arch/ppc64/kernel/ItLpQueue.c
index 83fb36a9bc09..61a9dbdd295a 100644
--- a/arch/ppc64/kernel/ItLpQueue.c
+++ b/arch/ppc64/kernel/ItLpQueue.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * ItLpQueue.c 2 * ItLpQueue.c
3 * Copyright (C) 2001 Mike Corrigan IBM Corporation 3 * Copyright (C) 2001 Mike Corrigan IBM Corporation
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or 7 * the Free Software Foundation; either version 2 of the License, or
@@ -74,21 +74,21 @@ unsigned long ItLpQueueInProcess = 0;
74 74
75static struct HvLpEvent * get_next_hvlpevent(void) 75static struct HvLpEvent * get_next_hvlpevent(void)
76{ 76{
77 struct HvLpEvent * nextLpEvent = 77 struct HvLpEvent * nextLpEvent =
78 (struct HvLpEvent *)hvlpevent_queue.xSlicCurEventPtr; 78 (struct HvLpEvent *)hvlpevent_queue.xSlicCurEventPtr;
79 if ( nextLpEvent->xFlags.xValid ) { 79 if (nextLpEvent->xFlags.xValid) {
80 /* rmb() needed only for weakly consistent machines (regatta) */ 80 /* rmb() needed only for weakly consistent machines (regatta) */
81 rmb(); 81 rmb();
82 /* Set pointer to next potential event */ 82 /* Set pointer to next potential event */
83 hvlpevent_queue.xSlicCurEventPtr += ((nextLpEvent->xSizeMinus1 + 83 hvlpevent_queue.xSlicCurEventPtr += ((nextLpEvent->xSizeMinus1 +
84 LpEventAlign ) / 84 LpEventAlign) /
85 LpEventAlign ) * 85 LpEventAlign) *
86 LpEventAlign; 86 LpEventAlign;
87 /* Wrap to beginning if no room at end */ 87 /* Wrap to beginning if no room at end */
88 if (hvlpevent_queue.xSlicCurEventPtr > hvlpevent_queue.xSlicLastValidEventPtr) 88 if (hvlpevent_queue.xSlicCurEventPtr > hvlpevent_queue.xSlicLastValidEventPtr)
89 hvlpevent_queue.xSlicCurEventPtr = hvlpevent_queue.xSlicEventStackPtr; 89 hvlpevent_queue.xSlicCurEventPtr = hvlpevent_queue.xSlicEventStackPtr;
90 } 90 }
91 else 91 else
92 nextLpEvent = NULL; 92 nextLpEvent = NULL;
93 93
94 return nextLpEvent; 94 return nextLpEvent;
@@ -107,23 +107,23 @@ int hvlpevent_is_pending(void)
107 return next_event->xFlags.xValid | hvlpevent_queue.xPlicOverflowIntPending; 107 return next_event->xFlags.xValid | hvlpevent_queue.xPlicOverflowIntPending;
108} 108}
109 109
110static void hvlpevent_clear_valid( struct HvLpEvent * event ) 110static void hvlpevent_clear_valid(struct HvLpEvent * event)
111{ 111{
112 /* Clear the valid bit of the event 112 /* Clear the valid bit of the event
113 * Also clear bits within this event that might 113 * Also clear bits within this event that might
114 * look like valid bits (on 64-byte boundaries) 114 * look like valid bits (on 64-byte boundaries)
115 */ 115 */
116 unsigned extra = (( event->xSizeMinus1 + LpEventAlign ) / 116 unsigned extra = ((event->xSizeMinus1 + LpEventAlign) /
117 LpEventAlign ) - 1; 117 LpEventAlign) - 1;
118 switch ( extra ) { 118 switch (extra) {
119 case 3: 119 case 3:
120 ((struct HvLpEvent*)((char*)event+3*LpEventAlign))->xFlags.xValid=0; 120 ((struct HvLpEvent*)((char*)event+3*LpEventAlign))->xFlags.xValid=0;
121 case 2: 121 case 2:
122 ((struct HvLpEvent*)((char*)event+2*LpEventAlign))->xFlags.xValid=0; 122 ((struct HvLpEvent*)((char*)event+2*LpEventAlign))->xFlags.xValid=0;
123 case 1: 123 case 1:
124 ((struct HvLpEvent*)((char*)event+1*LpEventAlign))->xFlags.xValid=0; 124 ((struct HvLpEvent*)((char*)event+1*LpEventAlign))->xFlags.xValid=0;
125 case 0: 125 case 0:
126 ; 126 ;
127 } 127 }
128 mb(); 128 mb();
129 event->xFlags.xValid = 0; 129 event->xFlags.xValid = 0;
@@ -136,7 +136,7 @@ void process_hvlpevents(struct pt_regs *regs)
136 /* If we have recursed, just return */ 136 /* If we have recursed, just return */
137 if ( !set_inUse() ) 137 if ( !set_inUse() )
138 return; 138 return;
139 139
140 if (ItLpQueueInProcess == 0) 140 if (ItLpQueueInProcess == 0)
141 ItLpQueueInProcess = 1; 141 ItLpQueueInProcess = 1;
142 else 142 else
@@ -144,35 +144,35 @@ void process_hvlpevents(struct pt_regs *regs)
144 144
145 for (;;) { 145 for (;;) {
146 nextLpEvent = get_next_hvlpevent(); 146 nextLpEvent = get_next_hvlpevent();
147 if ( nextLpEvent ) { 147 if (nextLpEvent) {
148 /* Call appropriate handler here, passing 148 /* Call appropriate handler here, passing
149 * a pointer to the LpEvent. The handler 149 * a pointer to the LpEvent. The handler
150 * must make a copy of the LpEvent if it 150 * must make a copy of the LpEvent if it
151 * needs it in a bottom half. (perhaps for 151 * needs it in a bottom half. (perhaps for
152 * an ACK) 152 * an ACK)
153 * 153 *
154 * Handlers are responsible for ACK processing 154 * Handlers are responsible for ACK processing
155 * 155 *
156 * The Hypervisor guarantees that LpEvents will 156 * The Hypervisor guarantees that LpEvents will
157 * only be delivered with types that we have 157 * only be delivered with types that we have
158 * registered for, so no type check is necessary 158 * registered for, so no type check is necessary
159 * here! 159 * here!
160 */ 160 */
161 if ( nextLpEvent->xType < HvLpEvent_Type_NumTypes ) 161 if (nextLpEvent->xType < HvLpEvent_Type_NumTypes)
162 __get_cpu_var(hvlpevent_counts)[nextLpEvent->xType]++; 162 __get_cpu_var(hvlpevent_counts)[nextLpEvent->xType]++;
163 if ( nextLpEvent->xType < HvLpEvent_Type_NumTypes && 163 if (nextLpEvent->xType < HvLpEvent_Type_NumTypes &&
164 lpEventHandler[nextLpEvent->xType] ) 164 lpEventHandler[nextLpEvent->xType])
165 lpEventHandler[nextLpEvent->xType](nextLpEvent, regs); 165 lpEventHandler[nextLpEvent->xType](nextLpEvent, regs);
166 else 166 else
167 printk(KERN_INFO "Unexpected Lp Event type=%d\n", nextLpEvent->xType ); 167 printk(KERN_INFO "Unexpected Lp Event type=%d\n", nextLpEvent->xType );
168 168
169 hvlpevent_clear_valid( nextLpEvent ); 169 hvlpevent_clear_valid(nextLpEvent);
170 } else if ( hvlpevent_queue.xPlicOverflowIntPending ) 170 } else if (hvlpevent_queue.xPlicOverflowIntPending)
171 /* 171 /*
172 * No more valid events. If overflow events are 172 * No more valid events. If overflow events are
173 * pending process them 173 * pending process them
174 */ 174 */
175 HvCallEvent_getOverflowLpEvents( hvlpevent_queue.xIndex); 175 HvCallEvent_getOverflowLpEvents(hvlpevent_queue.xIndex);
176 else 176 else
177 break; 177 break;
178 } 178 }