diff options
Diffstat (limited to 'arch/i386/kernel/smpboot.c')
| -rw-r--r-- | arch/i386/kernel/smpboot.c | 62 |
1 files changed, 33 insertions, 29 deletions
diff --git a/arch/i386/kernel/smpboot.c b/arch/i386/kernel/smpboot.c index 6f5fea05f1d7..f948419c888a 100644 --- a/arch/i386/kernel/smpboot.c +++ b/arch/i386/kernel/smpboot.c | |||
| @@ -212,14 +212,20 @@ valid_k7: | |||
| 212 | * then we print a warning if not, and always resync. | 212 | * then we print a warning if not, and always resync. |
| 213 | */ | 213 | */ |
| 214 | 214 | ||
| 215 | static atomic_t tsc_start_flag = ATOMIC_INIT(0); | 215 | static struct { |
| 216 | static atomic_t tsc_count_start = ATOMIC_INIT(0); | 216 | atomic_t start_flag; |
| 217 | static atomic_t tsc_count_stop = ATOMIC_INIT(0); | 217 | atomic_t count_start; |
| 218 | static unsigned long long tsc_values[NR_CPUS]; | 218 | atomic_t count_stop; |
| 219 | unsigned long long values[NR_CPUS]; | ||
| 220 | } tsc __initdata = { | ||
| 221 | .start_flag = ATOMIC_INIT(0), | ||
| 222 | .count_start = ATOMIC_INIT(0), | ||
| 223 | .count_stop = ATOMIC_INIT(0), | ||
| 224 | }; | ||
| 219 | 225 | ||
| 220 | #define NR_LOOPS 5 | 226 | #define NR_LOOPS 5 |
| 221 | 227 | ||
| 222 | static void __init synchronize_tsc_bp (void) | 228 | static void __init synchronize_tsc_bp(void) |
| 223 | { | 229 | { |
| 224 | int i; | 230 | int i; |
| 225 | unsigned long long t0; | 231 | unsigned long long t0; |
| @@ -233,7 +239,7 @@ static void __init synchronize_tsc_bp (void) | |||
| 233 | /* convert from kcyc/sec to cyc/usec */ | 239 | /* convert from kcyc/sec to cyc/usec */ |
| 234 | one_usec = cpu_khz / 1000; | 240 | one_usec = cpu_khz / 1000; |
| 235 | 241 | ||
| 236 | atomic_set(&tsc_start_flag, 1); | 242 | atomic_set(&tsc.start_flag, 1); |
| 237 | wmb(); | 243 | wmb(); |
| 238 | 244 | ||
| 239 | /* | 245 | /* |
| @@ -250,16 +256,16 @@ static void __init synchronize_tsc_bp (void) | |||
| 250 | /* | 256 | /* |
| 251 | * all APs synchronize but they loop on '== num_cpus' | 257 | * all APs synchronize but they loop on '== num_cpus' |
| 252 | */ | 258 | */ |
| 253 | while (atomic_read(&tsc_count_start) != num_booting_cpus()-1) | 259 | while (atomic_read(&tsc.count_start) != num_booting_cpus()-1) |
| 254 | cpu_relax(); | 260 | cpu_relax(); |
| 255 | atomic_set(&tsc_count_stop, 0); | 261 | atomic_set(&tsc.count_stop, 0); |
| 256 | wmb(); | 262 | wmb(); |
| 257 | /* | 263 | /* |
| 258 | * this lets the APs save their current TSC: | 264 | * this lets the APs save their current TSC: |
| 259 | */ | 265 | */ |
| 260 | atomic_inc(&tsc_count_start); | 266 | atomic_inc(&tsc.count_start); |
| 261 | 267 | ||
| 262 | rdtscll(tsc_values[smp_processor_id()]); | 268 | rdtscll(tsc.values[smp_processor_id()]); |
| 263 | /* | 269 | /* |
| 264 | * We clear the TSC in the last loop: | 270 | * We clear the TSC in the last loop: |
| 265 | */ | 271 | */ |
| @@ -269,56 +275,54 @@ static void __init synchronize_tsc_bp (void) | |||
| 269 | /* | 275 | /* |
| 270 | * Wait for all APs to leave the synchronization point: | 276 | * Wait for all APs to leave the synchronization point: |
| 271 | */ | 277 | */ |
| 272 | while (atomic_read(&tsc_count_stop) != num_booting_cpus()-1) | 278 | while (atomic_read(&tsc.count_stop) != num_booting_cpus()-1) |
| 273 | cpu_relax(); | 279 | cpu_relax(); |
| 274 | atomic_set(&tsc_count_start, 0); | 280 | atomic_set(&tsc.count_start, 0); |
| 275 | wmb(); | 281 | wmb(); |
| 276 | atomic_inc(&tsc_count_stop); | 282 | atomic_inc(&tsc.count_stop); |
| 277 | } | 283 | } |
| 278 | 284 | ||
| 279 | sum = 0; | 285 | sum = 0; |
| 280 | for (i = 0; i < NR_CPUS; i++) { | 286 | for (i = 0; i < NR_CPUS; i++) { |
| 281 | if (cpu_isset(i, cpu_callout_map)) { | 287 | if (cpu_isset(i, cpu_callout_map)) { |
| 282 | t0 = tsc_values[i]; | 288 | t0 = tsc.values[i]; |
| 283 | sum += t0; | 289 | sum += t0; |
| 284 | } | 290 | } |
| 285 | } | 291 | } |
| 286 | avg = sum; | 292 | avg = sum; |
| 287 | do_div(avg, num_booting_cpus()); | 293 | do_div(avg, num_booting_cpus()); |
| 288 | 294 | ||
| 289 | sum = 0; | ||
| 290 | for (i = 0; i < NR_CPUS; i++) { | 295 | for (i = 0; i < NR_CPUS; i++) { |
| 291 | if (!cpu_isset(i, cpu_callout_map)) | 296 | if (!cpu_isset(i, cpu_callout_map)) |
| 292 | continue; | 297 | continue; |
| 293 | delta = tsc_values[i] - avg; | 298 | delta = tsc.values[i] - avg; |
| 294 | if (delta < 0) | 299 | if (delta < 0) |
| 295 | delta = -delta; | 300 | delta = -delta; |
| 296 | /* | 301 | /* |
| 297 | * We report bigger than 2 microseconds clock differences. | 302 | * We report bigger than 2 microseconds clock differences. |
| 298 | */ | 303 | */ |
| 299 | if (delta > 2*one_usec) { | 304 | if (delta > 2*one_usec) { |
| 300 | long realdelta; | 305 | long long realdelta; |
| 306 | |||
| 301 | if (!buggy) { | 307 | if (!buggy) { |
| 302 | buggy = 1; | 308 | buggy = 1; |
| 303 | printk("\n"); | 309 | printk("\n"); |
| 304 | } | 310 | } |
| 305 | realdelta = delta; | 311 | realdelta = delta; |
| 306 | do_div(realdelta, one_usec); | 312 | do_div(realdelta, one_usec); |
| 307 | if (tsc_values[i] < avg) | 313 | if (tsc.values[i] < avg) |
| 308 | realdelta = -realdelta; | 314 | realdelta = -realdelta; |
| 309 | 315 | ||
| 310 | if (realdelta > 0) | 316 | if (realdelta) |
| 311 | printk(KERN_INFO "CPU#%d had %ld usecs TSC " | 317 | printk(KERN_INFO "CPU#%d had %Ld usecs TSC " |
| 312 | "skew, fixed it up.\n", i, realdelta); | 318 | "skew, fixed it up.\n", i, realdelta); |
| 313 | } | 319 | } |
| 314 | |||
| 315 | sum += delta; | ||
| 316 | } | 320 | } |
| 317 | if (!buggy) | 321 | if (!buggy) |
| 318 | printk("passed.\n"); | 322 | printk("passed.\n"); |
| 319 | } | 323 | } |
| 320 | 324 | ||
| 321 | static void __init synchronize_tsc_ap (void) | 325 | static void __init synchronize_tsc_ap(void) |
| 322 | { | 326 | { |
| 323 | int i; | 327 | int i; |
| 324 | 328 | ||
| @@ -327,20 +331,20 @@ static void __init synchronize_tsc_ap (void) | |||
| 327 | * this gets called, so we first wait for the BP to | 331 | * this gets called, so we first wait for the BP to |
| 328 | * finish SMP initialization: | 332 | * finish SMP initialization: |
| 329 | */ | 333 | */ |
| 330 | while (!atomic_read(&tsc_start_flag)) | 334 | while (!atomic_read(&tsc.start_flag)) |
| 331 | cpu_relax(); | 335 | cpu_relax(); |
| 332 | 336 | ||
| 333 | for (i = 0; i < NR_LOOPS; i++) { | 337 | for (i = 0; i < NR_LOOPS; i++) { |
| 334 | atomic_inc(&tsc_count_start); | 338 | atomic_inc(&tsc.count_start); |
| 335 | while (atomic_read(&tsc_count_start) != num_booting_cpus()) | 339 | while (atomic_read(&tsc.count_start) != num_booting_cpus()) |
| 336 | cpu_relax(); | 340 | cpu_relax(); |
| 337 | 341 | ||
| 338 | rdtscll(tsc_values[smp_processor_id()]); | 342 | rdtscll(tsc.values[smp_processor_id()]); |
| 339 | if (i == NR_LOOPS-1) | 343 | if (i == NR_LOOPS-1) |
| 340 | write_tsc(0, 0); | 344 | write_tsc(0, 0); |
| 341 | 345 | ||
| 342 | atomic_inc(&tsc_count_stop); | 346 | atomic_inc(&tsc.count_stop); |
| 343 | while (atomic_read(&tsc_count_stop) != num_booting_cpus()) | 347 | while (atomic_read(&tsc.count_stop) != num_booting_cpus()) |
| 344 | cpu_relax(); | 348 | cpu_relax(); |
| 345 | } | 349 | } |
| 346 | } | 350 | } |
