aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/kernel/cache.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2015-03-03 21:16:48 -0500
committerDavid S. Miller <davem@davemloft.net>2015-03-03 21:16:48 -0500
commit71a83a6db6138b9d41d8a0b6b91cb59f6dc4742c (patch)
treef74b6e4e48257ec6ce40b95645ecb8533b9cc1f8 /arch/s390/kernel/cache.c
parentb97526f3ff95f92b107f0fb52cbb8627e395429b (diff)
parenta6c5170d1edea97c538c81e377e56c7b5c5b7e63 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts: drivers/net/ethernet/rocker/rocker.c The rocker commit was two overlapping changes, one to rename the ->vport member to ->pport, and another making the bitmask expression use '1ULL' instead of plain '1'. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/s390/kernel/cache.c')
-rw-r--r--arch/s390/kernel/cache.c25
1 files changed, 7 insertions, 18 deletions
diff --git a/arch/s390/kernel/cache.c b/arch/s390/kernel/cache.c
index 632fa06ea162..0969d113b3d6 100644
--- a/arch/s390/kernel/cache.c
+++ b/arch/s390/kernel/cache.c
@@ -91,12 +91,9 @@ static inline enum cache_type get_cache_type(struct cache_info *ci, int level)
91{ 91{
92 if (level >= CACHE_MAX_LEVEL) 92 if (level >= CACHE_MAX_LEVEL)
93 return CACHE_TYPE_NOCACHE; 93 return CACHE_TYPE_NOCACHE;
94
95 ci += level; 94 ci += level;
96
97 if (ci->scope != CACHE_SCOPE_SHARED && ci->scope != CACHE_SCOPE_PRIVATE) 95 if (ci->scope != CACHE_SCOPE_SHARED && ci->scope != CACHE_SCOPE_PRIVATE)
98 return CACHE_TYPE_NOCACHE; 96 return CACHE_TYPE_NOCACHE;
99
100 return cache_type_map[ci->type]; 97 return cache_type_map[ci->type];
101} 98}
102 99
@@ -111,23 +108,19 @@ static inline unsigned long ecag(int ai, int li, int ti)
111} 108}
112 109
113static void ci_leaf_init(struct cacheinfo *this_leaf, int private, 110static void ci_leaf_init(struct cacheinfo *this_leaf, int private,
114 enum cache_type type, unsigned int level) 111 enum cache_type type, unsigned int level, int cpu)
115{ 112{
116 int ti, num_sets; 113 int ti, num_sets;
117 int cpu = smp_processor_id();
118 114
119 if (type == CACHE_TYPE_INST) 115 if (type == CACHE_TYPE_INST)
120 ti = CACHE_TI_INSTRUCTION; 116 ti = CACHE_TI_INSTRUCTION;
121 else 117 else
122 ti = CACHE_TI_UNIFIED; 118 ti = CACHE_TI_UNIFIED;
123
124 this_leaf->level = level + 1; 119 this_leaf->level = level + 1;
125 this_leaf->type = type; 120 this_leaf->type = type;
126 this_leaf->coherency_line_size = ecag(EXTRACT_LINE_SIZE, level, ti); 121 this_leaf->coherency_line_size = ecag(EXTRACT_LINE_SIZE, level, ti);
127 this_leaf->ways_of_associativity = ecag(EXTRACT_ASSOCIATIVITY, 122 this_leaf->ways_of_associativity = ecag(EXTRACT_ASSOCIATIVITY, level, ti);
128 level, ti);
129 this_leaf->size = ecag(EXTRACT_SIZE, level, ti); 123 this_leaf->size = ecag(EXTRACT_SIZE, level, ti);
130
131 num_sets = this_leaf->size / this_leaf->coherency_line_size; 124 num_sets = this_leaf->size / this_leaf->coherency_line_size;
132 num_sets /= this_leaf->ways_of_associativity; 125 num_sets /= this_leaf->ways_of_associativity;
133 this_leaf->number_of_sets = num_sets; 126 this_leaf->number_of_sets = num_sets;
@@ -145,7 +138,6 @@ int init_cache_level(unsigned int cpu)
145 138
146 if (!this_cpu_ci) 139 if (!this_cpu_ci)
147 return -EINVAL; 140 return -EINVAL;
148
149 ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0); 141 ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0);
150 do { 142 do {
151 ctype = get_cache_type(&ct.ci[0], level); 143 ctype = get_cache_type(&ct.ci[0], level);
@@ -154,34 +146,31 @@ int init_cache_level(unsigned int cpu)
154 /* Separate instruction and data caches */ 146 /* Separate instruction and data caches */
155 leaves += (ctype == CACHE_TYPE_SEPARATE) ? 2 : 1; 147 leaves += (ctype == CACHE_TYPE_SEPARATE) ? 2 : 1;
156 } while (++level < CACHE_MAX_LEVEL); 148 } while (++level < CACHE_MAX_LEVEL);
157
158 this_cpu_ci->num_levels = level; 149 this_cpu_ci->num_levels = level;
159 this_cpu_ci->num_leaves = leaves; 150 this_cpu_ci->num_leaves = leaves;
160
161 return 0; 151 return 0;
162} 152}
163 153
164int populate_cache_leaves(unsigned int cpu) 154int populate_cache_leaves(unsigned int cpu)
165{ 155{
156 struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
157 struct cacheinfo *this_leaf = this_cpu_ci->info_list;
166 unsigned int level, idx, pvt; 158 unsigned int level, idx, pvt;
167 union cache_topology ct; 159 union cache_topology ct;
168 enum cache_type ctype; 160 enum cache_type ctype;
169 struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
170 struct cacheinfo *this_leaf = this_cpu_ci->info_list;
171 161
172 ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0); 162 ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0);
173 for (idx = 0, level = 0; level < this_cpu_ci->num_levels && 163 for (idx = 0, level = 0; level < this_cpu_ci->num_levels &&
174 idx < this_cpu_ci->num_leaves; idx++, level++) { 164 idx < this_cpu_ci->num_leaves; idx++, level++) {
175 if (!this_leaf) 165 if (!this_leaf)
176 return -EINVAL; 166 return -EINVAL;
177
178 pvt = (ct.ci[level].scope == CACHE_SCOPE_PRIVATE) ? 1 : 0; 167 pvt = (ct.ci[level].scope == CACHE_SCOPE_PRIVATE) ? 1 : 0;
179 ctype = get_cache_type(&ct.ci[0], level); 168 ctype = get_cache_type(&ct.ci[0], level);
180 if (ctype == CACHE_TYPE_SEPARATE) { 169 if (ctype == CACHE_TYPE_SEPARATE) {
181 ci_leaf_init(this_leaf++, pvt, CACHE_TYPE_DATA, level); 170 ci_leaf_init(this_leaf++, pvt, CACHE_TYPE_DATA, level, cpu);
182 ci_leaf_init(this_leaf++, pvt, CACHE_TYPE_INST, level); 171 ci_leaf_init(this_leaf++, pvt, CACHE_TYPE_INST, level, cpu);
183 } else { 172 } else {
184 ci_leaf_init(this_leaf++, pvt, ctype, level); 173 ci_leaf_init(this_leaf++, pvt, ctype, level, cpu);
185 } 174 }
186 } 175 }
187 return 0; 176 return 0;