diff options
| author | Peter Zijlstra <peterz@infradead.org> | 2018-04-05 04:05:21 -0400 |
|---|---|---|
| committer | Ingo Molnar <mingo@kernel.org> | 2018-04-05 04:56:16 -0400 |
| commit | 317d359df95dd0cb7653d09b7fc513770590cf85 (patch) | |
| tree | 999cc75a56d86410682a1cb42e372006741ea1d5 | |
| parent | adcc8da8859bee9548bb6d323b1e8de8a7252acd (diff) | |
sched/core: Force proper alignment of 'struct util_est'
For some as yet not understood reason, Tony gets unaligned access
traps on IA64 because of:
struct util_est ue = READ_ONCE(p->se.avg.util_est);
and:
WRITE_ONCE(p->se.avg.util_est, ue);
introduced by commit:
d519329f72a6 ("sched/fair: Update util_est only on util_avg updates")
Normally those two fields should end up on an 8-byte aligned location,
but UP and RANDSTRUCT can mess that up so enforce the alignment
explicitly.
Also make the alignment on sched_avg unconditional, as it is really
about data locality, not false-sharing.
With or without this patch the layout for sched_avg on a
ia64-defconfig build looks like:
$ pahole -EC sched_avg ia64-defconfig/kernel/sched/core.o
die__process_function: tag not supported (INVALID)!
struct sched_avg {
/* typedef u64 */ long long unsigned int last_update_time; /* 0 8 */
/* typedef u64 */ long long unsigned int load_sum; /* 8 8 */
/* typedef u64 */ long long unsigned int runnable_load_sum; /* 16 8 */
/* typedef u32 */ unsigned int util_sum; /* 24 4 */
/* typedef u32 */ unsigned int period_contrib; /* 28 4 */
long unsigned int load_avg; /* 32 8 */
long unsigned int runnable_load_avg; /* 40 8 */
long unsigned int util_avg; /* 48 8 */
struct util_est {
unsigned int enqueued; /* 56 4 */
unsigned int ewma; /* 60 4 */
} util_est; /* 56 8 */
/* --- cacheline 1 boundary (64 bytes) --- */
/* size: 64, cachelines: 1, members: 9 */
};
Reported-and-Tested-by: Tony Luck <tony.luck@intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Frederic Weisbecker <frederic@kernel.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Norbert Manthey <nmanthey@amazon.de>
Cc: Patrick Bellasi <patrick.bellasi@arm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Tony <tony.luck@intel.com>
Cc: Vincent Guittot <vincent.guittot@linaro.org>
Fixes: d519329f72a6 ("sched/fair: Update util_est only on util_avg updates")
Link: http://lkml.kernel.org/r/20180405080521.GG4129@hirez.programming.kicks-ass.net
Signed-off-by: Ingo Molnar <mingo@kernel.org>
| -rw-r--r-- | include/linux/sched.h | 6 |
1 files changed, 3 insertions, 3 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index f228c6033832..b3d697f3b573 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
| @@ -300,7 +300,7 @@ struct util_est { | |||
| 300 | unsigned int enqueued; | 300 | unsigned int enqueued; |
| 301 | unsigned int ewma; | 301 | unsigned int ewma; |
| 302 | #define UTIL_EST_WEIGHT_SHIFT 2 | 302 | #define UTIL_EST_WEIGHT_SHIFT 2 |
| 303 | }; | 303 | } __attribute__((__aligned__(sizeof(u64)))); |
| 304 | 304 | ||
| 305 | /* | 305 | /* |
| 306 | * The load_avg/util_avg accumulates an infinite geometric series | 306 | * The load_avg/util_avg accumulates an infinite geometric series |
| @@ -364,7 +364,7 @@ struct sched_avg { | |||
| 364 | unsigned long runnable_load_avg; | 364 | unsigned long runnable_load_avg; |
| 365 | unsigned long util_avg; | 365 | unsigned long util_avg; |
| 366 | struct util_est util_est; | 366 | struct util_est util_est; |
| 367 | }; | 367 | } ____cacheline_aligned; |
| 368 | 368 | ||
| 369 | struct sched_statistics { | 369 | struct sched_statistics { |
| 370 | #ifdef CONFIG_SCHEDSTATS | 370 | #ifdef CONFIG_SCHEDSTATS |
| @@ -435,7 +435,7 @@ struct sched_entity { | |||
| 435 | * Put into separate cache line so it does not | 435 | * Put into separate cache line so it does not |
| 436 | * collide with read-mostly values above. | 436 | * collide with read-mostly values above. |
| 437 | */ | 437 | */ |
| 438 | struct sched_avg avg ____cacheline_aligned_in_smp; | 438 | struct sched_avg avg; |
| 439 | #endif | 439 | #endif |
| 440 | }; | 440 | }; |
| 441 | 441 | ||
