[PATCH] x86_64: Don't try to synchronize the TSC over CPUs on Intel CPUs at boot.

They already do this in hardware and the Linux algorithm
actually adds errors.

Cc: mingo@elte.hu
Cc: rohit.seth@intel.com

Signed-off-by: Andi Kleen <ak@suse.de>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
diff --git a/arch/x86_64/kernel/smpboot.c b/arch/x86_64/kernel/smpboot.c
index e6af93b..9947285 100644
--- a/arch/x86_64/kernel/smpboot.c
+++ b/arch/x86_64/kernel/smpboot.c
@@ -335,7 +335,13 @@
 
 static void __cpuinit tsc_sync_wait(void)
 {
-	if (notscsync || !cpu_has_tsc)
+	/*
+	 * When the CPU has synchronized TSCs assume the BIOS
+  	 * or the hardware already synced.  Otherwise we could
+	 * mess up a possible perfect synchronization with a
+	 * not-quite-perfect algorithm.
+	 */
+	if (notscsync || !cpu_has_tsc || !unsynchronized_tsc())
 		return;
 	sync_tsc(0);
 }
diff --git a/arch/x86_64/kernel/time.c b/arch/x86_64/kernel/time.c
index 211bf0e..f8d4b69 100644
--- a/arch/x86_64/kernel/time.c
+++ b/arch/x86_64/kernel/time.c
@@ -953,7 +953,7 @@
  * Make an educated guess if the TSC is trustworthy and synchronized
  * over all CPUs.
  */
-static __init int unsynchronized_tsc(void)
+__init int unsynchronized_tsc(void)
 {
 #ifdef CONFIG_SMP
 	if (oem_force_hpet_timer())
@@ -964,7 +964,7 @@
  		return 0;
 #endif
  	/* Assume multi socket systems are not synchronized */
- 	return num_online_cpus() > 1;
+ 	return num_present_cpus() > 1;
 }
 
 /*
diff --git a/include/asm-x86_64/proto.h b/include/asm-x86_64/proto.h
index 56dc70b..115e496 100644
--- a/include/asm-x86_64/proto.h
+++ b/include/asm-x86_64/proto.h
@@ -89,6 +89,8 @@
 
 extern int unhandled_signal(struct task_struct *tsk, int sig);
 
+extern int unsynchronized_tsc(void);
+
 extern void select_idle_routine(const struct cpuinfo_x86 *c);
 
 extern void gart_parse_options(char *);