aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/radeon/r600_cs.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/radeon/r600_cs.c')
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c186
1 files changed, 156 insertions, 30 deletions
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 33b89cd8743e..d28970db6a2d 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -28,7 +28,6 @@
28#include "drmP.h" 28#include "drmP.h"
29#include "radeon.h" 29#include "radeon.h"
30#include "r600d.h" 30#include "r600d.h"
31#include "avivod.h"
32 31
33static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p, 32static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p,
34 struct radeon_cs_reloc **cs_reloc); 33 struct radeon_cs_reloc **cs_reloc);
@@ -57,7 +56,7 @@ int r600_cs_packet_parse(struct radeon_cs_parser *p,
57 idx, ib_chunk->length_dw); 56 idx, ib_chunk->length_dw);
58 return -EINVAL; 57 return -EINVAL;
59 } 58 }
60 header = ib_chunk->kdata[idx]; 59 header = radeon_get_ib_value(p, idx);
61 pkt->idx = idx; 60 pkt->idx = idx;
62 pkt->type = CP_PACKET_GET_TYPE(header); 61 pkt->type = CP_PACKET_GET_TYPE(header);
63 pkt->count = CP_PACKET_GET_COUNT(header); 62 pkt->count = CP_PACKET_GET_COUNT(header);
@@ -98,7 +97,6 @@ int r600_cs_packet_parse(struct radeon_cs_parser *p,
98static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p, 97static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p,
99 struct radeon_cs_reloc **cs_reloc) 98 struct radeon_cs_reloc **cs_reloc)
100{ 99{
101 struct radeon_cs_chunk *ib_chunk;
102 struct radeon_cs_chunk *relocs_chunk; 100 struct radeon_cs_chunk *relocs_chunk;
103 struct radeon_cs_packet p3reloc; 101 struct radeon_cs_packet p3reloc;
104 unsigned idx; 102 unsigned idx;
@@ -109,7 +107,6 @@ static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p,
109 return -EINVAL; 107 return -EINVAL;
110 } 108 }
111 *cs_reloc = NULL; 109 *cs_reloc = NULL;
112 ib_chunk = &p->chunks[p->chunk_ib_idx];
113 relocs_chunk = &p->chunks[p->chunk_relocs_idx]; 110 relocs_chunk = &p->chunks[p->chunk_relocs_idx];
114 r = r600_cs_packet_parse(p, &p3reloc, p->idx); 111 r = r600_cs_packet_parse(p, &p3reloc, p->idx);
115 if (r) { 112 if (r) {
@@ -121,7 +118,7 @@ static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p,
121 p3reloc.idx); 118 p3reloc.idx);
122 return -EINVAL; 119 return -EINVAL;
123 } 120 }
124 idx = ib_chunk->kdata[p3reloc.idx + 1]; 121 idx = radeon_get_ib_value(p, p3reloc.idx + 1);
125 if (idx >= relocs_chunk->length_dw) { 122 if (idx >= relocs_chunk->length_dw) {
126 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n", 123 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
127 idx, relocs_chunk->length_dw); 124 idx, relocs_chunk->length_dw);
@@ -146,7 +143,6 @@ static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p,
146static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p, 143static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p,
147 struct radeon_cs_reloc **cs_reloc) 144 struct radeon_cs_reloc **cs_reloc)
148{ 145{
149 struct radeon_cs_chunk *ib_chunk;
150 struct radeon_cs_chunk *relocs_chunk; 146 struct radeon_cs_chunk *relocs_chunk;
151 struct radeon_cs_packet p3reloc; 147 struct radeon_cs_packet p3reloc;
152 unsigned idx; 148 unsigned idx;
@@ -157,7 +153,6 @@ static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p,
157 return -EINVAL; 153 return -EINVAL;
158 } 154 }
159 *cs_reloc = NULL; 155 *cs_reloc = NULL;
160 ib_chunk = &p->chunks[p->chunk_ib_idx];
161 relocs_chunk = &p->chunks[p->chunk_relocs_idx]; 156 relocs_chunk = &p->chunks[p->chunk_relocs_idx];
162 r = r600_cs_packet_parse(p, &p3reloc, p->idx); 157 r = r600_cs_packet_parse(p, &p3reloc, p->idx);
163 if (r) { 158 if (r) {
@@ -169,7 +164,7 @@ static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p,
169 p3reloc.idx); 164 p3reloc.idx);
170 return -EINVAL; 165 return -EINVAL;
171 } 166 }
172 idx = ib_chunk->kdata[p3reloc.idx + 1]; 167 idx = radeon_get_ib_value(p, p3reloc.idx + 1);
173 if (idx >= relocs_chunk->length_dw) { 168 if (idx >= relocs_chunk->length_dw) {
174 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n", 169 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
175 idx, relocs_chunk->length_dw); 170 idx, relocs_chunk->length_dw);
@@ -181,13 +176,136 @@ static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p,
181 return 0; 176 return 0;
182} 177}
183 178
179/**
180 * r600_cs_packet_next_vline() - parse userspace VLINE packet
181 * @parser: parser structure holding parsing context.
182 *
183 * Userspace sends a special sequence for VLINE waits.
184 * PACKET0 - VLINE_START_END + value
185 * PACKET3 - WAIT_REG_MEM poll vline status reg
186 * RELOC (P3) - crtc_id in reloc.
187 *
188 * This function parses this and relocates the VLINE START END
189 * and WAIT_REG_MEM packets to the correct crtc.
190 * It also detects a switched off crtc and nulls out the
191 * wait in that case.
192 */
193static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p)
194{
195 struct drm_mode_object *obj;
196 struct drm_crtc *crtc;
197 struct radeon_crtc *radeon_crtc;
198 struct radeon_cs_packet p3reloc, wait_reg_mem;
199 int crtc_id;
200 int r;
201 uint32_t header, h_idx, reg, wait_reg_mem_info;
202 volatile uint32_t *ib;
203
204 ib = p->ib->ptr;
205
206 /* parse the WAIT_REG_MEM */
207 r = r600_cs_packet_parse(p, &wait_reg_mem, p->idx);
208 if (r)
209 return r;
210
211 /* check its a WAIT_REG_MEM */
212 if (wait_reg_mem.type != PACKET_TYPE3 ||
213 wait_reg_mem.opcode != PACKET3_WAIT_REG_MEM) {
214 DRM_ERROR("vline wait missing WAIT_REG_MEM segment\n");
215 r = -EINVAL;
216 return r;
217 }
218
219 wait_reg_mem_info = radeon_get_ib_value(p, wait_reg_mem.idx + 1);
220 /* bit 4 is reg (0) or mem (1) */
221 if (wait_reg_mem_info & 0x10) {
222 DRM_ERROR("vline WAIT_REG_MEM waiting on MEM rather than REG\n");
223 r = -EINVAL;
224 return r;
225 }
226 /* waiting for value to be equal */
227 if ((wait_reg_mem_info & 0x7) != 0x3) {
228 DRM_ERROR("vline WAIT_REG_MEM function not equal\n");
229 r = -EINVAL;
230 return r;
231 }
232 if ((radeon_get_ib_value(p, wait_reg_mem.idx + 2) << 2) != AVIVO_D1MODE_VLINE_STATUS) {
233 DRM_ERROR("vline WAIT_REG_MEM bad reg\n");
234 r = -EINVAL;
235 return r;
236 }
237
238 if (radeon_get_ib_value(p, wait_reg_mem.idx + 5) != AVIVO_D1MODE_VLINE_STAT) {
239 DRM_ERROR("vline WAIT_REG_MEM bad bit mask\n");
240 r = -EINVAL;
241 return r;
242 }
243
244 /* jump over the NOP */
245 r = r600_cs_packet_parse(p, &p3reloc, p->idx + wait_reg_mem.count + 2);
246 if (r)
247 return r;
248
249 h_idx = p->idx - 2;
250 p->idx += wait_reg_mem.count + 2;
251 p->idx += p3reloc.count + 2;
252
253 header = radeon_get_ib_value(p, h_idx);
254 crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1);
255 reg = header >> 2;
256 mutex_lock(&p->rdev->ddev->mode_config.mutex);
257 obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
258 if (!obj) {
259 DRM_ERROR("cannot find crtc %d\n", crtc_id);
260 r = -EINVAL;
261 goto out;
262 }
263 crtc = obj_to_crtc(obj);
264 radeon_crtc = to_radeon_crtc(crtc);
265 crtc_id = radeon_crtc->crtc_id;
266
267 if (!crtc->enabled) {
268 /* if the CRTC isn't enabled - we need to nop out the WAIT_REG_MEM */
269 ib[h_idx + 2] = PACKET2(0);
270 ib[h_idx + 3] = PACKET2(0);
271 ib[h_idx + 4] = PACKET2(0);
272 ib[h_idx + 5] = PACKET2(0);
273 ib[h_idx + 6] = PACKET2(0);
274 ib[h_idx + 7] = PACKET2(0);
275 ib[h_idx + 8] = PACKET2(0);
276 } else if (crtc_id == 1) {
277 switch (reg) {
278 case AVIVO_D1MODE_VLINE_START_END:
279 header &= ~R600_CP_PACKET0_REG_MASK;
280 header |= AVIVO_D2MODE_VLINE_START_END >> 2;
281 break;
282 default:
283 DRM_ERROR("unknown crtc reloc\n");
284 r = -EINVAL;
285 goto out;
286 }
287 ib[h_idx] = header;
288 ib[h_idx + 4] = AVIVO_D2MODE_VLINE_STATUS >> 2;
289 }
290out:
291 mutex_unlock(&p->rdev->ddev->mode_config.mutex);
292 return r;
293}
294
184static int r600_packet0_check(struct radeon_cs_parser *p, 295static int r600_packet0_check(struct radeon_cs_parser *p,
185 struct radeon_cs_packet *pkt, 296 struct radeon_cs_packet *pkt,
186 unsigned idx, unsigned reg) 297 unsigned idx, unsigned reg)
187{ 298{
299 int r;
300
188 switch (reg) { 301 switch (reg) {
189 case AVIVO_D1MODE_VLINE_START_END: 302 case AVIVO_D1MODE_VLINE_START_END:
190 case AVIVO_D2MODE_VLINE_START_END: 303 r = r600_cs_packet_parse_vline(p);
304 if (r) {
305 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
306 idx, reg);
307 return r;
308 }
191 break; 309 break;
192 default: 310 default:
193 printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n", 311 printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
@@ -218,17 +336,18 @@ static int r600_cs_parse_packet0(struct radeon_cs_parser *p,
218static int r600_packet3_check(struct radeon_cs_parser *p, 336static int r600_packet3_check(struct radeon_cs_parser *p,
219 struct radeon_cs_packet *pkt) 337 struct radeon_cs_packet *pkt)
220{ 338{
221 struct radeon_cs_chunk *ib_chunk;
222 struct radeon_cs_reloc *reloc; 339 struct radeon_cs_reloc *reloc;
223 volatile u32 *ib; 340 volatile u32 *ib;
224 unsigned idx; 341 unsigned idx;
225 unsigned i; 342 unsigned i;
226 unsigned start_reg, end_reg, reg; 343 unsigned start_reg, end_reg, reg;
227 int r; 344 int r;
345 u32 idx_value;
228 346
229 ib = p->ib->ptr; 347 ib = p->ib->ptr;
230 ib_chunk = &p->chunks[p->chunk_ib_idx];
231 idx = pkt->idx + 1; 348 idx = pkt->idx + 1;
349 idx_value = radeon_get_ib_value(p, idx);
350
232 switch (pkt->opcode) { 351 switch (pkt->opcode) {
233 case PACKET3_START_3D_CMDBUF: 352 case PACKET3_START_3D_CMDBUF:
234 if (p->family >= CHIP_RV770 || pkt->count) { 353 if (p->family >= CHIP_RV770 || pkt->count) {
@@ -259,8 +378,8 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
259 DRM_ERROR("bad DRAW_INDEX\n"); 378 DRM_ERROR("bad DRAW_INDEX\n");
260 return -EINVAL; 379 return -EINVAL;
261 } 380 }
262 ib[idx+0] += (u32)(reloc->lobj.gpu_offset & 0xffffffff); 381 ib[idx+0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff);
263 ib[idx+1] = upper_32_bits(reloc->lobj.gpu_offset) & 0xff; 382 ib[idx+1] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
264 break; 383 break;
265 case PACKET3_DRAW_INDEX_AUTO: 384 case PACKET3_DRAW_INDEX_AUTO:
266 if (pkt->count != 1) { 385 if (pkt->count != 1) {
@@ -281,14 +400,14 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
281 return -EINVAL; 400 return -EINVAL;
282 } 401 }
283 /* bit 4 is reg (0) or mem (1) */ 402 /* bit 4 is reg (0) or mem (1) */
284 if (ib_chunk->kdata[idx+0] & 0x10) { 403 if (idx_value & 0x10) {
285 r = r600_cs_packet_next_reloc(p, &reloc); 404 r = r600_cs_packet_next_reloc(p, &reloc);
286 if (r) { 405 if (r) {
287 DRM_ERROR("bad WAIT_REG_MEM\n"); 406 DRM_ERROR("bad WAIT_REG_MEM\n");
288 return -EINVAL; 407 return -EINVAL;
289 } 408 }
290 ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff); 409 ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
291 ib[idx+2] = upper_32_bits(reloc->lobj.gpu_offset) & 0xff; 410 ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
292 } 411 }
293 break; 412 break;
294 case PACKET3_SURFACE_SYNC: 413 case PACKET3_SURFACE_SYNC:
@@ -297,8 +416,8 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
297 return -EINVAL; 416 return -EINVAL;
298 } 417 }
299 /* 0xffffffff/0x0 is flush all cache flag */ 418 /* 0xffffffff/0x0 is flush all cache flag */
300 if (ib_chunk->kdata[idx+1] != 0xffffffff || 419 if (radeon_get_ib_value(p, idx + 1) != 0xffffffff ||
301 ib_chunk->kdata[idx+2] != 0) { 420 radeon_get_ib_value(p, idx + 2) != 0) {
302 r = r600_cs_packet_next_reloc(p, &reloc); 421 r = r600_cs_packet_next_reloc(p, &reloc);
303 if (r) { 422 if (r) {
304 DRM_ERROR("bad SURFACE_SYNC\n"); 423 DRM_ERROR("bad SURFACE_SYNC\n");
@@ -319,7 +438,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
319 return -EINVAL; 438 return -EINVAL;
320 } 439 }
321 ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff); 440 ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
322 ib[idx+2] |= upper_32_bits(reloc->lobj.gpu_offset) & 0xff; 441 ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
323 } 442 }
324 break; 443 break;
325 case PACKET3_EVENT_WRITE_EOP: 444 case PACKET3_EVENT_WRITE_EOP:
@@ -333,10 +452,10 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
333 return -EINVAL; 452 return -EINVAL;
334 } 453 }
335 ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff); 454 ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
336 ib[idx+2] |= upper_32_bits(reloc->lobj.gpu_offset) & 0xff; 455 ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
337 break; 456 break;
338 case PACKET3_SET_CONFIG_REG: 457 case PACKET3_SET_CONFIG_REG:
339 start_reg = (ib[idx+0] << 2) + PACKET3_SET_CONFIG_REG_OFFSET; 458 start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_OFFSET;
340 end_reg = 4 * pkt->count + start_reg - 4; 459 end_reg = 4 * pkt->count + start_reg - 4;
341 if ((start_reg < PACKET3_SET_CONFIG_REG_OFFSET) || 460 if ((start_reg < PACKET3_SET_CONFIG_REG_OFFSET) ||
342 (start_reg >= PACKET3_SET_CONFIG_REG_END) || 461 (start_reg >= PACKET3_SET_CONFIG_REG_END) ||
@@ -356,7 +475,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
356 } 475 }
357 break; 476 break;
358 case PACKET3_SET_CONTEXT_REG: 477 case PACKET3_SET_CONTEXT_REG:
359 start_reg = (ib[idx+0] << 2) + PACKET3_SET_CONTEXT_REG_OFFSET; 478 start_reg = (idx_value << 2) + PACKET3_SET_CONTEXT_REG_OFFSET;
360 end_reg = 4 * pkt->count + start_reg - 4; 479 end_reg = 4 * pkt->count + start_reg - 4;
361 if ((start_reg < PACKET3_SET_CONTEXT_REG_OFFSET) || 480 if ((start_reg < PACKET3_SET_CONTEXT_REG_OFFSET) ||
362 (start_reg >= PACKET3_SET_CONTEXT_REG_END) || 481 (start_reg >= PACKET3_SET_CONTEXT_REG_END) ||
@@ -421,7 +540,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
421 DRM_ERROR("bad SET_RESOURCE\n"); 540 DRM_ERROR("bad SET_RESOURCE\n");
422 return -EINVAL; 541 return -EINVAL;
423 } 542 }
424 start_reg = (ib[idx+0] << 2) + PACKET3_SET_RESOURCE_OFFSET; 543 start_reg = (idx_value << 2) + PACKET3_SET_RESOURCE_OFFSET;
425 end_reg = 4 * pkt->count + start_reg - 4; 544 end_reg = 4 * pkt->count + start_reg - 4;
426 if ((start_reg < PACKET3_SET_RESOURCE_OFFSET) || 545 if ((start_reg < PACKET3_SET_RESOURCE_OFFSET) ||
427 (start_reg >= PACKET3_SET_RESOURCE_END) || 546 (start_reg >= PACKET3_SET_RESOURCE_END) ||
@@ -430,7 +549,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
430 return -EINVAL; 549 return -EINVAL;
431 } 550 }
432 for (i = 0; i < (pkt->count / 7); i++) { 551 for (i = 0; i < (pkt->count / 7); i++) {
433 switch (G__SQ_VTX_CONSTANT_TYPE(ib[idx+(i*7)+6+1])) { 552 switch (G__SQ_VTX_CONSTANT_TYPE(radeon_get_ib_value(p, idx+(i*7)+6+1))) {
434 case SQ_TEX_VTX_VALID_TEXTURE: 553 case SQ_TEX_VTX_VALID_TEXTURE:
435 /* tex base */ 554 /* tex base */
436 r = r600_cs_packet_next_reloc(p, &reloc); 555 r = r600_cs_packet_next_reloc(p, &reloc);
@@ -455,7 +574,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
455 return -EINVAL; 574 return -EINVAL;
456 } 575 }
457 ib[idx+1+(i*7)+0] += (u32)((reloc->lobj.gpu_offset) & 0xffffffff); 576 ib[idx+1+(i*7)+0] += (u32)((reloc->lobj.gpu_offset) & 0xffffffff);
458 ib[idx+1+(i*7)+2] |= upper_32_bits(reloc->lobj.gpu_offset) & 0xff; 577 ib[idx+1+(i*7)+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
459 break; 578 break;
460 case SQ_TEX_VTX_INVALID_TEXTURE: 579 case SQ_TEX_VTX_INVALID_TEXTURE:
461 case SQ_TEX_VTX_INVALID_BUFFER: 580 case SQ_TEX_VTX_INVALID_BUFFER:
@@ -466,7 +585,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
466 } 585 }
467 break; 586 break;
468 case PACKET3_SET_ALU_CONST: 587 case PACKET3_SET_ALU_CONST:
469 start_reg = (ib[idx+0] << 2) + PACKET3_SET_ALU_CONST_OFFSET; 588 start_reg = (idx_value << 2) + PACKET3_SET_ALU_CONST_OFFSET;
470 end_reg = 4 * pkt->count + start_reg - 4; 589 end_reg = 4 * pkt->count + start_reg - 4;
471 if ((start_reg < PACKET3_SET_ALU_CONST_OFFSET) || 590 if ((start_reg < PACKET3_SET_ALU_CONST_OFFSET) ||
472 (start_reg >= PACKET3_SET_ALU_CONST_END) || 591 (start_reg >= PACKET3_SET_ALU_CONST_END) ||
@@ -476,7 +595,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
476 } 595 }
477 break; 596 break;
478 case PACKET3_SET_BOOL_CONST: 597 case PACKET3_SET_BOOL_CONST:
479 start_reg = (ib[idx+0] << 2) + PACKET3_SET_BOOL_CONST_OFFSET; 598 start_reg = (idx_value << 2) + PACKET3_SET_BOOL_CONST_OFFSET;
480 end_reg = 4 * pkt->count + start_reg - 4; 599 end_reg = 4 * pkt->count + start_reg - 4;
481 if ((start_reg < PACKET3_SET_BOOL_CONST_OFFSET) || 600 if ((start_reg < PACKET3_SET_BOOL_CONST_OFFSET) ||
482 (start_reg >= PACKET3_SET_BOOL_CONST_END) || 601 (start_reg >= PACKET3_SET_BOOL_CONST_END) ||
@@ -486,7 +605,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
486 } 605 }
487 break; 606 break;
488 case PACKET3_SET_LOOP_CONST: 607 case PACKET3_SET_LOOP_CONST:
489 start_reg = (ib[idx+0] << 2) + PACKET3_SET_LOOP_CONST_OFFSET; 608 start_reg = (idx_value << 2) + PACKET3_SET_LOOP_CONST_OFFSET;
490 end_reg = 4 * pkt->count + start_reg - 4; 609 end_reg = 4 * pkt->count + start_reg - 4;
491 if ((start_reg < PACKET3_SET_LOOP_CONST_OFFSET) || 610 if ((start_reg < PACKET3_SET_LOOP_CONST_OFFSET) ||
492 (start_reg >= PACKET3_SET_LOOP_CONST_END) || 611 (start_reg >= PACKET3_SET_LOOP_CONST_END) ||
@@ -496,7 +615,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
496 } 615 }
497 break; 616 break;
498 case PACKET3_SET_CTL_CONST: 617 case PACKET3_SET_CTL_CONST:
499 start_reg = (ib[idx+0] << 2) + PACKET3_SET_CTL_CONST_OFFSET; 618 start_reg = (idx_value << 2) + PACKET3_SET_CTL_CONST_OFFSET;
500 end_reg = 4 * pkt->count + start_reg - 4; 619 end_reg = 4 * pkt->count + start_reg - 4;
501 if ((start_reg < PACKET3_SET_CTL_CONST_OFFSET) || 620 if ((start_reg < PACKET3_SET_CTL_CONST_OFFSET) ||
502 (start_reg >= PACKET3_SET_CTL_CONST_END) || 621 (start_reg >= PACKET3_SET_CTL_CONST_END) ||
@@ -510,7 +629,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
510 DRM_ERROR("bad SET_SAMPLER\n"); 629 DRM_ERROR("bad SET_SAMPLER\n");
511 return -EINVAL; 630 return -EINVAL;
512 } 631 }
513 start_reg = (ib[idx+0] << 2) + PACKET3_SET_SAMPLER_OFFSET; 632 start_reg = (idx_value << 2) + PACKET3_SET_SAMPLER_OFFSET;
514 end_reg = 4 * pkt->count + start_reg - 4; 633 end_reg = 4 * pkt->count + start_reg - 4;
515 if ((start_reg < PACKET3_SET_SAMPLER_OFFSET) || 634 if ((start_reg < PACKET3_SET_SAMPLER_OFFSET) ||
516 (start_reg >= PACKET3_SET_SAMPLER_END) || 635 (start_reg >= PACKET3_SET_SAMPLER_END) ||
@@ -602,6 +721,8 @@ static void r600_cs_parser_fini(struct radeon_cs_parser *parser, int error)
602 kfree(parser->relocs); 721 kfree(parser->relocs);
603 for (i = 0; i < parser->nchunks; i++) { 722 for (i = 0; i < parser->nchunks; i++) {
604 kfree(parser->chunks[i].kdata); 723 kfree(parser->chunks[i].kdata);
724 kfree(parser->chunks[i].kpage[0]);
725 kfree(parser->chunks[i].kpage[1]);
605 } 726 }
606 kfree(parser->chunks); 727 kfree(parser->chunks);
607 kfree(parser->chunks_array); 728 kfree(parser->chunks_array);
@@ -639,7 +760,6 @@ int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
639 * uncached). */ 760 * uncached). */
640 ib_chunk = &parser.chunks[parser.chunk_ib_idx]; 761 ib_chunk = &parser.chunks[parser.chunk_ib_idx];
641 parser.ib->length_dw = ib_chunk->length_dw; 762 parser.ib->length_dw = ib_chunk->length_dw;
642 memcpy((void *)parser.ib->ptr, ib_chunk->kdata, ib_chunk->length_dw*4);
643 *l = parser.ib->length_dw; 763 *l = parser.ib->length_dw;
644 r = r600_cs_parse(&parser); 764 r = r600_cs_parse(&parser);
645 if (r) { 765 if (r) {
@@ -647,6 +767,12 @@ int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
647 r600_cs_parser_fini(&parser, r); 767 r600_cs_parser_fini(&parser, r);
648 return r; 768 return r;
649 } 769 }
770 r = radeon_cs_finish_pages(&parser);
771 if (r) {
772 DRM_ERROR("Invalid command stream !\n");
773 r600_cs_parser_fini(&parser, r);
774 return r;
775 }
650 r600_cs_parser_fini(&parser, r); 776 r600_cs_parser_fini(&parser, r);
651 return r; 777 return r;
652} 778}
s="hl opt">], *envp[3]; int i; if (!pathbuf) return; i = 0; argv[i++] = "/sbin/cpuset_release_agent"; argv[i++] = (char *)pathbuf; argv[i] = NULL; i = 0; /* minimal command environment */ envp[i++] = "HOME=/"; envp[i++] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin"; envp[i] = NULL; call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC); kfree(pathbuf); } /* * Either cs->count of using tasks transitioned to zero, or the * cs->children list of child cpusets just became empty. If this * cs is notify_on_release() and now both the user count is zero and * the list of children is empty, prepare cpuset path in a kmalloc'd * buffer, to be returned via ppathbuf, so that the caller can invoke * cpuset_release_agent() with it later on, once manage_mutex is dropped. * Call here with manage_mutex held. * * This check_for_release() routine is responsible for kmalloc'ing * pathbuf. The above cpuset_release_agent() is responsible for * kfree'ing pathbuf. The caller of these routines is responsible * for providing a pathbuf pointer, initialized to NULL, then * calling check_for_release() with manage_mutex held and the address * of the pathbuf pointer, then dropping manage_mutex, then calling * cpuset_release_agent() with pathbuf, as set by check_for_release(). */ static void check_for_release(struct cpuset *cs, char **ppathbuf) { if (notify_on_release(cs) && atomic_read(&cs->count) == 0 && list_empty(&cs->children)) { char *buf; buf = kmalloc(PAGE_SIZE, GFP_KERNEL); if (!buf) return; if (cpuset_path(cs, buf, PAGE_SIZE) < 0) kfree(buf); else *ppathbuf = buf; } } /* * Return in *pmask the portion of a cpusets's cpus_allowed that * are online. If none are online, walk up the cpuset hierarchy * until we find one that does have some online cpus. If we get * all the way to the top and still haven't found any online cpus, * return cpu_online_map. Or if passed a NULL cs from an exit'ing * task, return cpu_online_map. * * One way or another, we guarantee to return some non-empty subset * of cpu_online_map. * * Call with callback_mutex held. */ static void guarantee_online_cpus(const struct cpuset *cs, cpumask_t *pmask) { while (cs && !cpus_intersects(cs->cpus_allowed, cpu_online_map)) cs = cs->parent; if (cs) cpus_and(*pmask, cs->cpus_allowed, cpu_online_map); else *pmask = cpu_online_map; BUG_ON(!cpus_intersects(*pmask, cpu_online_map)); } /* * Return in *pmask the portion of a cpusets's mems_allowed that * are online. If none are online, walk up the cpuset hierarchy * until we find one that does have some online mems. If we get * all the way to the top and still haven't found any online mems, * return node_online_map. * * One way or another, we guarantee to return some non-empty subset * of node_online_map. * * Call with callback_mutex held. */ static void guarantee_online_mems(const struct cpuset *cs, nodemask_t *pmask) { while (cs && !nodes_intersects(cs->mems_allowed, node_online_map)) cs = cs->parent; if (cs) nodes_and(*pmask, cs->mems_allowed, node_online_map); else *pmask = node_online_map; BUG_ON(!nodes_intersects(*pmask, node_online_map)); } /** * cpuset_update_task_memory_state - update task memory placement * * If the current tasks cpusets mems_allowed changed behind our * backs, update current->mems_allowed, mems_generation and task NUMA * mempolicy to the new value. * * Task mempolicy is updated by rebinding it relative to the * current->cpuset if a task has its memory placement changed. * Do not call this routine if in_interrupt(). * * Call without callback_mutex or task_lock() held. May be * called with or without manage_mutex held. Thanks in part to * 'the_top_cpuset_hack', the tasks cpuset pointer will never * be NULL. This routine also might acquire callback_mutex and * current->mm->mmap_sem during call. * * Reading current->cpuset->mems_generation doesn't need task_lock * to guard the current->cpuset derefence, because it is guarded * from concurrent freeing of current->cpuset by attach_task(), * using RCU. * * The rcu_dereference() is technically probably not needed, * as I don't actually mind if I see a new cpuset pointer but * an old value of mems_generation. However this really only * matters on alpha systems using cpusets heavily. If I dropped * that rcu_dereference(), it would save them a memory barrier. * For all other arch's, rcu_dereference is a no-op anyway, and for * alpha systems not using cpusets, another planned optimization, * avoiding the rcu critical section for tasks in the root cpuset * which is statically allocated, so can't vanish, will make this * irrelevant. Better to use RCU as intended, than to engage in * some cute trick to save a memory barrier that is impossible to * test, for alpha systems using cpusets heavily, which might not * even exist. * * This routine is needed to update the per-task mems_allowed data, * within the tasks context, when it is trying to allocate memory * (in various mm/mempolicy.c routines) and notices that some other * task has been modifying its cpuset. */ void cpuset_update_task_memory_state(void) { int my_cpusets_mem_gen; struct task_struct *tsk = current; struct cpuset *cs; if (tsk->cpuset == &top_cpuset) { /* Don't need rcu for top_cpuset. It's never freed. */ my_cpusets_mem_gen = top_cpuset.mems_generation; } else { rcu_read_lock(); cs = rcu_dereference(tsk->cpuset); my_cpusets_mem_gen = cs->mems_generation; rcu_read_unlock(); } if (my_cpusets_mem_gen != tsk->cpuset_mems_generation) { mutex_lock(&callback_mutex); task_lock(tsk); cs = tsk->cpuset; /* Maybe changed when task not locked */ guarantee_online_mems(cs, &tsk->mems_allowed); tsk->cpuset_mems_generation = cs->mems_generation; if (is_spread_page(cs)) tsk->flags |= PF_SPREAD_PAGE; else tsk->flags &= ~PF_SPREAD_PAGE; if (is_spread_slab(cs)) tsk->flags |= PF_SPREAD_SLAB; else tsk->flags &= ~PF_SPREAD_SLAB; task_unlock(tsk); mutex_unlock(&callback_mutex); mpol_rebind_task(tsk, &tsk->mems_allowed); } } /* * is_cpuset_subset(p, q) - Is cpuset p a subset of cpuset q? * * One cpuset is a subset of another if all its allowed CPUs and * Memory Nodes are a subset of the other, and its exclusive flags * are only set if the other's are set. Call holding manage_mutex. */ static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q) { return cpus_subset(p->cpus_allowed, q->cpus_allowed) && nodes_subset(p->mems_allowed, q->mems_allowed) && is_cpu_exclusive(p) <= is_cpu_exclusive(q) && is_mem_exclusive(p) <= is_mem_exclusive(q); } /* * validate_change() - Used to validate that any proposed cpuset change * follows the structural rules for cpusets. * * If we replaced the flag and mask values of the current cpuset * (cur) with those values in the trial cpuset (trial), would * our various subset and exclusive rules still be valid? Presumes * manage_mutex held. * * 'cur' is the address of an actual, in-use cpuset. Operations * such as list traversal that depend on the actual address of the * cpuset in the list must use cur below, not trial. * * 'trial' is the address of bulk structure copy of cur, with * perhaps one or more of the fields cpus_allowed, mems_allowed, * or flags changed to new, trial values. * * Return 0 if valid, -errno if not. */ static int validate_change(const struct cpuset *cur, const struct cpuset *trial) { struct cpuset *c, *par; /* Each of our child cpusets must be a subset of us */ list_for_each_entry(c, &cur->children, sibling) { if (!is_cpuset_subset(c, trial)) return -EBUSY; } /* Remaining checks don't apply to root cpuset */ if (cur == &top_cpuset) return 0; par = cur->parent; /* We must be a subset of our parent cpuset */ if (!is_cpuset_subset(trial, par)) return -EACCES; /* If either I or some sibling (!= me) is exclusive, we can't overlap */ list_for_each_entry(c, &par->children, sibling) { if ((is_cpu_exclusive(trial) || is_cpu_exclusive(c)) && c != cur && cpus_intersects(trial->cpus_allowed, c->cpus_allowed)) return -EINVAL; if ((is_mem_exclusive(trial) || is_mem_exclusive(c)) && c != cur && nodes_intersects(trial->mems_allowed, c->mems_allowed)) return -EINVAL; } return 0; } /* * For a given cpuset cur, partition the system as follows * a. All cpus in the parent cpuset's cpus_allowed that are not part of any * exclusive child cpusets * b. All cpus in the current cpuset's cpus_allowed that are not part of any * exclusive child cpusets * Build these two partitions by calling partition_sched_domains * * Call with manage_mutex held. May nest a call to the * lock_cpu_hotplug()/unlock_cpu_hotplug() pair. * Must not be called holding callback_mutex, because we must * not call lock_cpu_hotplug() while holding callback_mutex. */ static void update_cpu_domains(struct cpuset *cur) { struct cpuset *c, *par = cur->parent; cpumask_t pspan, cspan; if (par == NULL || cpus_empty(cur->cpus_allowed)) return; /* * Get all cpus from parent's cpus_allowed not part of exclusive * children */ pspan = par->cpus_allowed; list_for_each_entry(c, &par->children, sibling) { if (is_cpu_exclusive(c)) cpus_andnot(pspan, pspan, c->cpus_allowed); } if (!is_cpu_exclusive(cur)) { cpus_or(pspan, pspan, cur->cpus_allowed); if (cpus_equal(pspan, cur->cpus_allowed)) return; cspan = CPU_MASK_NONE; } else { if (cpus_empty(pspan)) return; cspan = cur->cpus_allowed; /* * Get all cpus from current cpuset's cpus_allowed not part * of exclusive children */ list_for_each_entry(c, &cur->children, sibling) { if (is_cpu_exclusive(c)) cpus_andnot(cspan, cspan, c->cpus_allowed); } } lock_cpu_hotplug(); partition_sched_domains(&pspan, &cspan); unlock_cpu_hotplug(); } /* * Call with manage_mutex held. May take callback_mutex during call. */ static int update_cpumask(struct cpuset *cs, char *buf) { struct cpuset trialcs; int retval, cpus_unchanged; /* top_cpuset.cpus_allowed tracks cpu_online_map; it's read-only */ if (cs == &top_cpuset) return -EACCES; trialcs = *cs; /* * We allow a cpuset's cpus_allowed to be empty; if it has attached * tasks, we'll catch it later when we validate the change and return * -ENOSPC. */ if (!buf[0] || (buf[0] == '\n' && !buf[1])) { cpus_clear(trialcs.cpus_allowed); } else { retval = cpulist_parse(buf, trialcs.cpus_allowed); if (retval < 0) return retval; } cpus_and(trialcs.cpus_allowed, trialcs.cpus_allowed, cpu_online_map); /* cpus_allowed cannot be empty for a cpuset with attached tasks. */ if (atomic_read(&cs->count) && cpus_empty(trialcs.cpus_allowed)) return -ENOSPC; retval = validate_change(cs, &trialcs); if (retval < 0) return retval; cpus_unchanged = cpus_equal(cs->cpus_allowed, trialcs.cpus_allowed); mutex_lock(&callback_mutex); cs->cpus_allowed = trialcs.cpus_allowed; mutex_unlock(&callback_mutex); if (is_cpu_exclusive(cs) && !cpus_unchanged) update_cpu_domains(cs); return 0; } /* * cpuset_migrate_mm * * Migrate memory region from one set of nodes to another. * * Temporarilly set tasks mems_allowed to target nodes of migration, * so that the migration code can allocate pages on these nodes. * * Call holding manage_mutex, so our current->cpuset won't change * during this call, as manage_mutex holds off any attach_task() * calls. Therefore we don't need to take task_lock around the * call to guarantee_online_mems(), as we know no one is changing * our tasks cpuset. * * Hold callback_mutex around the two modifications of our tasks * mems_allowed to synchronize with cpuset_mems_allowed(). * * While the mm_struct we are migrating is typically from some * other task, the task_struct mems_allowed that we are hacking * is for our current task, which must allocate new pages for that * migrating memory region. * * We call cpuset_update_task_memory_state() before hacking * our tasks mems_allowed, so that we are assured of being in * sync with our tasks cpuset, and in particular, callbacks to * cpuset_update_task_memory_state() from nested page allocations * won't see any mismatch of our cpuset and task mems_generation * values, so won't overwrite our hacked tasks mems_allowed * nodemask. */ static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from, const nodemask_t *to) { struct task_struct *tsk = current; cpuset_update_task_memory_state(); mutex_lock(&callback_mutex); tsk->mems_allowed = *to; mutex_unlock(&callback_mutex); do_migrate_pages(mm, from, to, MPOL_MF_MOVE_ALL); mutex_lock(&callback_mutex); guarantee_online_mems(tsk->cpuset, &tsk->mems_allowed); mutex_unlock(&callback_mutex); } /* * Handle user request to change the 'mems' memory placement * of a cpuset. Needs to validate the request, update the * cpusets mems_allowed and mems_generation, and for each * task in the cpuset, rebind any vma mempolicies and if * the cpuset is marked 'memory_migrate', migrate the tasks * pages to the new memory. * * Call with manage_mutex held. May take callback_mutex during call. * Will take tasklist_lock, scan tasklist for tasks in cpuset cs, * lock each such tasks mm->mmap_sem, scan its vma's and rebind * their mempolicies to the cpusets new mems_allowed. */ static int update_nodemask(struct cpuset *cs, char *buf) { struct cpuset trialcs; nodemask_t oldmem; struct task_struct *g, *p; struct mm_struct **mmarray; int i, n, ntasks; int migrate; int fudge; int retval; /* top_cpuset.mems_allowed tracks node_online_map; it's read-only */ if (cs == &top_cpuset) return -EACCES; trialcs = *cs; /* * We allow a cpuset's mems_allowed to be empty; if it has attached * tasks, we'll catch it later when we validate the change and return * -ENOSPC. */ if (!buf[0] || (buf[0] == '\n' && !buf[1])) { nodes_clear(trialcs.mems_allowed); } else { retval = nodelist_parse(buf, trialcs.mems_allowed); if (retval < 0) goto done; } nodes_and(trialcs.mems_allowed, trialcs.mems_allowed, node_online_map); oldmem = cs->mems_allowed; if (nodes_equal(oldmem, trialcs.mems_allowed)) { retval = 0; /* Too easy - nothing to do */ goto done; } /* mems_allowed cannot be empty for a cpuset with attached tasks. */ if (atomic_read(&cs->count) && nodes_empty(trialcs.mems_allowed)) { retval = -ENOSPC; goto done; } retval = validate_change(cs, &trialcs); if (retval < 0) goto done; mutex_lock(&callback_mutex); cs->mems_allowed = trialcs.mems_allowed; cs->mems_generation = cpuset_mems_generation++; mutex_unlock(&callback_mutex); set_cpuset_being_rebound(cs); /* causes mpol_copy() rebind */ fudge = 10; /* spare mmarray[] slots */ fudge += cpus_weight(cs->cpus_allowed); /* imagine one fork-bomb/cpu */ retval = -ENOMEM; /* * Allocate mmarray[] to hold mm reference for each task * in cpuset cs. Can't kmalloc GFP_KERNEL while holding * tasklist_lock. We could use GFP_ATOMIC, but with a * few more lines of code, we can retry until we get a big * enough mmarray[] w/o using GFP_ATOMIC. */ while (1) { ntasks = atomic_read(&cs->count); /* guess */ ntasks += fudge; mmarray = kmalloc(ntasks * sizeof(*mmarray), GFP_KERNEL); if (!mmarray) goto done; read_lock(&tasklist_lock); /* block fork */ if (atomic_read(&cs->count) <= ntasks) break; /* got enough */ read_unlock(&tasklist_lock); /* try again */ kfree(mmarray); } n = 0; /* Load up mmarray[] with mm reference for each task in cpuset. */ do_each_thread(g, p) { struct mm_struct *mm; if (n >= ntasks) { printk(KERN_WARNING "Cpuset mempolicy rebind incomplete.\n"); continue; } if (p->cpuset != cs) continue; mm = get_task_mm(p); if (!mm) continue; mmarray[n++] = mm; } while_each_thread(g, p); read_unlock(&tasklist_lock); /* * Now that we've dropped the tasklist spinlock, we can * rebind the vma mempolicies of each mm in mmarray[] to their * new cpuset, and release that mm. The mpol_rebind_mm() * call takes mmap_sem, which we couldn't take while holding * tasklist_lock. Forks can happen again now - the mpol_copy() * cpuset_being_rebound check will catch such forks, and rebind * their vma mempolicies too. Because we still hold the global * cpuset manage_mutex, we know that no other rebind effort will * be contending for the global variable cpuset_being_rebound. * It's ok if we rebind the same mm twice; mpol_rebind_mm() * is idempotent. Also migrate pages in each mm to new nodes. */ migrate = is_memory_migrate(cs); for (i = 0; i < n; i++) { struct mm_struct *mm = mmarray[i]; mpol_rebind_mm(mm, &cs->mems_allowed); if (migrate) cpuset_migrate_mm(mm, &oldmem, &cs->mems_allowed); mmput(mm); } /* We're done rebinding vma's to this cpusets new mems_allowed. */ kfree(mmarray); set_cpuset_being_rebound(NULL); retval = 0; done: return retval; } /* * Call with manage_mutex held. */ static int update_memory_pressure_enabled(struct cpuset *cs, char *buf) { if (simple_strtoul(buf, NULL, 10) != 0) cpuset_memory_pressure_enabled = 1; else cpuset_memory_pressure_enabled = 0; return 0; } /* * update_flag - read a 0 or a 1 in a file and update associated flag * bit: the bit to update (CS_CPU_EXCLUSIVE, CS_MEM_EXCLUSIVE, * CS_NOTIFY_ON_RELEASE, CS_MEMORY_MIGRATE, * CS_SPREAD_PAGE, CS_SPREAD_SLAB) * cs: the cpuset to update * buf: the buffer where we read the 0 or 1 * * Call with manage_mutex held. */ static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, char *buf) { int turning_on; struct cpuset trialcs; int err, cpu_exclusive_changed; turning_on = (simple_strtoul(buf, NULL, 10) != 0); trialcs = *cs; if (turning_on) set_bit(bit, &trialcs.flags); else clear_bit(bit, &trialcs.flags); err = validate_change(cs, &trialcs); if (err < 0) return err; cpu_exclusive_changed = (is_cpu_exclusive(cs) != is_cpu_exclusive(&trialcs)); mutex_lock(&callback_mutex); cs->flags = trialcs.flags; mutex_unlock(&callback_mutex); if (cpu_exclusive_changed) update_cpu_domains(cs); return 0; } /* * Frequency meter - How fast is some event occurring? * * These routines manage a digitally filtered, constant time based, * event frequency meter. There are four routines: * fmeter_init() - initialize a frequency meter. * fmeter_markevent() - called each time the event happens. * fmeter_getrate() - returns the recent rate of such events. * fmeter_update() - internal routine used to update fmeter. * * A common data structure is passed to each of these routines, * which is used to keep track of the state required to manage the * frequency meter and its digital filter. * * The filter works on the number of events marked per unit time. * The filter is single-pole low-pass recursive (IIR). The time unit * is 1 second. Arithmetic is done using 32-bit integers scaled to * simulate 3 decimal digits of precision (multiplied by 1000). * * With an FM_COEF of 933, and a time base of 1 second, the filter * has a half-life of 10 seconds, meaning that if the events quit * happening, then the rate returned from the fmeter_getrate() * will be cut in half each 10 seconds, until it converges to zero. * * It is not worth doing a real infinitely recursive filter. If more * than FM_MAXTICKS ticks have elapsed since the last filter event, * just compute FM_MAXTICKS ticks worth, by which point the level * will be stable. * * Limit the count of unprocessed events to FM_MAXCNT, so as to avoid * arithmetic overflow in the fmeter_update() routine. * * Given the simple 32 bit integer arithmetic used, this meter works * best for reporting rates between one per millisecond (msec) and * one per 32 (approx) seconds. At constant rates faster than one * per msec it maxes out at values just under 1,000,000. At constant * rates between one per msec, and one per second it will stabilize * to a value N*1000, where N is the rate of events per second. * At constant rates between one per second and one per 32 seconds, * it will be choppy, moving up on the seconds that have an event, * and then decaying until the next event. At rates slower than * about one in 32 seconds, it decays all the way back to zero between * each event. */ #define FM_COEF 933 /* coefficient for half-life of 10 secs */ #define FM_MAXTICKS ((time_t)99) /* useless computing more ticks than this */ #define FM_MAXCNT 1000000 /* limit cnt to avoid overflow */ #define FM_SCALE 1000 /* faux fixed point scale */ /* Initialize a frequency meter */ static void fmeter_init(struct fmeter *fmp) { fmp->cnt = 0; fmp->val = 0; fmp->time = 0; spin_lock_init(&fmp->lock); } /* Internal meter update - process cnt events and update value */ static void fmeter_update(struct fmeter *fmp) { time_t now = get_seconds(); time_t ticks = now - fmp->time; if (ticks == 0) return; ticks = min(FM_MAXTICKS, ticks); while (ticks-- > 0) fmp->val = (FM_COEF * fmp->val) / FM_SCALE; fmp->time = now; fmp->val += ((FM_SCALE - FM_COEF) * fmp->cnt) / FM_SCALE; fmp->cnt = 0; } /* Process any previous ticks, then bump cnt by one (times scale). */ static void fmeter_markevent(struct fmeter *fmp) { spin_lock(&fmp->lock); fmeter_update(fmp); fmp->cnt = min(FM_MAXCNT, fmp->cnt + FM_SCALE); spin_unlock(&fmp->lock); } /* Process any previous ticks, then return current value. */ static int fmeter_getrate(struct fmeter *fmp) { int val; spin_lock(&fmp->lock); fmeter_update(fmp); val = fmp->val; spin_unlock(&fmp->lock); return val; } /* * Attack task specified by pid in 'pidbuf' to cpuset 'cs', possibly * writing the path of the old cpuset in 'ppathbuf' if it needs to be * notified on release. * * Call holding manage_mutex. May take callback_mutex and task_lock of * the task 'pid' during call. */ static int attach_task(struct cpuset *cs, char *pidbuf, char **ppathbuf) { pid_t pid; struct task_struct *tsk; struct cpuset *oldcs; cpumask_t cpus; nodemask_t from, to; struct mm_struct *mm; int retval; if (sscanf(pidbuf, "%d", &pid) != 1) return -EIO; if (cpus_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed)) return -ENOSPC; if (pid) { read_lock(&tasklist_lock); tsk = find_task_by_pid(pid); if (!tsk || tsk->flags & PF_EXITING) { read_unlock(&tasklist_lock); return -ESRCH; } get_task_struct(tsk); read_unlock(&tasklist_lock); if ((current->euid) && (current->euid != tsk->uid) && (current->euid != tsk->suid)) { put_task_struct(tsk); return -EACCES; } } else { tsk = current; get_task_struct(tsk); } retval = security_task_setscheduler(tsk, 0, NULL); if (retval) { put_task_struct(tsk); return retval; } mutex_lock(&callback_mutex); task_lock(tsk); oldcs = tsk->cpuset; /* * After getting 'oldcs' cpuset ptr, be sure still not exiting. * If 'oldcs' might be the top_cpuset due to the_top_cpuset_hack * then fail this attach_task(), to avoid breaking top_cpuset.count. */ if (tsk->flags & PF_EXITING) { task_unlock(tsk); mutex_unlock(&callback_mutex); put_task_struct(tsk); return -ESRCH; } atomic_inc(&cs->count); rcu_assign_pointer(tsk->cpuset, cs); task_unlock(tsk); guarantee_online_cpus(cs, &cpus); set_cpus_allowed(tsk, cpus); from = oldcs->mems_allowed; to = cs->mems_allowed; mutex_unlock(&callback_mutex); mm = get_task_mm(tsk); if (mm) { mpol_rebind_mm(mm, &to); if (is_memory_migrate(cs)) cpuset_migrate_mm(mm, &from, &to); mmput(mm); } put_task_struct(tsk); synchronize_rcu(); if (atomic_dec_and_test(&oldcs->count)) check_for_release(oldcs, ppathbuf); return 0; } /* The various types of files and directories in a cpuset file system */ typedef enum { FILE_ROOT, FILE_DIR, FILE_MEMORY_MIGRATE, FILE_CPULIST, FILE_MEMLIST, FILE_CPU_EXCLUSIVE, FILE_MEM_EXCLUSIVE, FILE_NOTIFY_ON_RELEASE, FILE_MEMORY_PRESSURE_ENABLED, FILE_MEMORY_PRESSURE, FILE_SPREAD_PAGE, FILE_SPREAD_SLAB, FILE_TASKLIST, } cpuset_filetype_t; static ssize_t cpuset_common_file_write(struct file *file, const char __user *userbuf, size_t nbytes, loff_t *unused_ppos) { struct cpuset *cs = __d_cs(file->f_path.dentry->d_parent); struct cftype *cft = __d_cft(file->f_path.dentry); cpuset_filetype_t type = cft->private; char *buffer; char *pathbuf = NULL; int retval = 0; /* Crude upper limit on largest legitimate cpulist user might write. */ if (nbytes > 100 + 6 * max(NR_CPUS, MAX_NUMNODES)) return -E2BIG; /* +1 for nul-terminator */ if ((buffer = kmalloc(nbytes + 1, GFP_KERNEL)) == 0) return -ENOMEM; if (copy_from_user(buffer, userbuf, nbytes)) { retval = -EFAULT; goto out1; } buffer[nbytes] = 0; /* nul-terminate */ mutex_lock(&manage_mutex); if (is_removed(cs)) { retval = -ENODEV; goto out2; } switch (type) { case FILE_CPULIST: retval = update_cpumask(cs, buffer); break; case FILE_MEMLIST: retval = update_nodemask(cs, buffer); break; case FILE_CPU_EXCLUSIVE: retval = update_flag(CS_CPU_EXCLUSIVE, cs, buffer); break; case FILE_MEM_EXCLUSIVE: retval = update_flag(CS_MEM_EXCLUSIVE, cs, buffer); break; case FILE_NOTIFY_ON_RELEASE: retval = update_flag(CS_NOTIFY_ON_RELEASE, cs, buffer); break; case FILE_MEMORY_MIGRATE: retval = update_flag(CS_MEMORY_MIGRATE, cs, buffer); break; case FILE_MEMORY_PRESSURE_ENABLED: retval = update_memory_pressure_enabled(cs, buffer); break; case FILE_MEMORY_PRESSURE: retval = -EACCES; break; case FILE_SPREAD_PAGE: retval = update_flag(CS_SPREAD_PAGE, cs, buffer); cs->mems_generation = cpuset_mems_generation++; break; case FILE_SPREAD_SLAB: retval = update_flag(CS_SPREAD_SLAB, cs, buffer); cs->mems_generation = cpuset_mems_generation++; break; case FILE_TASKLIST: retval = attach_task(cs, buffer, &pathbuf); break; default: retval = -EINVAL; goto out2; } if (retval == 0) retval = nbytes; out2: mutex_unlock(&manage_mutex); cpuset_release_agent(pathbuf); out1: kfree(buffer); return retval; } static ssize_t cpuset_file_write(struct file *file, const char __user *buf, size_t nbytes, loff_t *ppos) { ssize_t retval = 0; struct cftype *cft = __d_cft(file->f_path.dentry); if (!cft) return -ENODEV; /* special function ? */ if (cft->write) retval = cft->write(file, buf, nbytes, ppos); else retval = cpuset_common_file_write(file, buf, nbytes, ppos); return retval; } /* * These ascii lists should be read in a single call, by using a user * buffer large enough to hold the entire map. If read in smaller * chunks, there is no guarantee of atomicity. Since the display format * used, list of ranges of sequential numbers, is variable length, * and since these maps can change value dynamically, one could read * gibberish by doing partial reads while a list was changing. * A single large read to a buffer that crosses a page boundary is * ok, because the result being copied to user land is not recomputed * across a page fault. */ static int cpuset_sprintf_cpulist(char *page, struct cpuset *cs) { cpumask_t mask; mutex_lock(&callback_mutex); mask = cs->cpus_allowed; mutex_unlock(&callback_mutex); return cpulist_scnprintf(page, PAGE_SIZE, mask); } static int cpuset_sprintf_memlist(char *page, struct cpuset *cs) { nodemask_t mask; mutex_lock(&callback_mutex); mask = cs->mems_allowed; mutex_unlock(&callback_mutex); return nodelist_scnprintf(page, PAGE_SIZE, mask); } static ssize_t cpuset_common_file_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) { struct cftype *cft = __d_cft(file->f_path.dentry); struct cpuset *cs = __d_cs(file->f_path.dentry->d_parent); cpuset_filetype_t type = cft->private; char *page; ssize_t retval = 0; char *s; if (!(page = (char *)__get_free_page(GFP_KERNEL))) return -ENOMEM; s = page; switch (type) { case FILE_CPULIST: s += cpuset_sprintf_cpulist(s, cs); break; case FILE_MEMLIST: s += cpuset_sprintf_memlist(s, cs); break; case FILE_CPU_EXCLUSIVE: *s++ = is_cpu_exclusive(cs) ? '1' : '0'; break; case FILE_MEM_EXCLUSIVE: *s++ = is_mem_exclusive(cs) ? '1' : '0'; break; case FILE_NOTIFY_ON_RELEASE: *s++ = notify_on_release(cs) ? '1' : '0'; break; case FILE_MEMORY_MIGRATE: *s++ = is_memory_migrate(cs) ? '1' : '0'; break; case FILE_MEMORY_PRESSURE_ENABLED: *s++ = cpuset_memory_pressure_enabled ? '1' : '0'; break; case FILE_MEMORY_PRESSURE: s += sprintf(s, "%d", fmeter_getrate(&cs->fmeter)); break; case FILE_SPREAD_PAGE: *s++ = is_spread_page(cs) ? '1' : '0'; break; case FILE_SPREAD_SLAB: *s++ = is_spread_slab(cs) ? '1' : '0'; break; default: retval = -EINVAL; goto out; } *s++ = '\n'; retval = simple_read_from_buffer(buf, nbytes, ppos, page, s - page); out: free_page((unsigned long)page); return retval; } static ssize_t cpuset_file_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) { ssize_t retval = 0; struct cftype *cft = __d_cft(file->f_path.dentry); if (!cft) return -ENODEV; /* special function ? */ if (cft->read) retval = cft->read(file, buf, nbytes, ppos); else retval = cpuset_common_file_read(file, buf, nbytes, ppos); return retval; } static int cpuset_file_open(struct inode *inode, struct file *file) { int err; struct cftype *cft; err = generic_file_open(inode, file); if (err) return err; cft = __d_cft(file->f_path.dentry); if (!cft) return -ENODEV; if (cft->open) err = cft->open(inode, file); else err = 0; return err; } static int cpuset_file_release(struct inode *inode, struct file *file) { struct cftype *cft = __d_cft(file->f_path.dentry); if (cft->release) return cft->release(inode, file); return 0; } /* * cpuset_rename - Only allow simple rename of directories in place. */ static int cpuset_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) { if (!S_ISDIR(old_dentry->d_inode->i_mode)) return -ENOTDIR; if (new_dentry->d_inode) return -EEXIST; if (old_dir != new_dir) return -EIO; return simple_rename(old_dir, old_dentry, new_dir, new_dentry); } static const struct file_operations cpuset_file_operations = { .read = cpuset_file_read, .write = cpuset_file_write, .llseek = generic_file_llseek, .open = cpuset_file_open, .release = cpuset_file_release, }; static const struct inode_operations cpuset_dir_inode_operations = { .lookup = simple_lookup, .mkdir = cpuset_mkdir, .rmdir = cpuset_rmdir, .rename = cpuset_rename, }; static int cpuset_create_file(struct dentry *dentry, int mode) { struct inode *inode; if (!dentry) return -ENOENT; if (dentry->d_inode) return -EEXIST; inode = cpuset_new_inode(mode); if (!inode) return -ENOMEM; if (S_ISDIR(mode)) { inode->i_op = &cpuset_dir_inode_operations; inode->i_fop = &simple_dir_operations; /* start off with i_nlink == 2 (for "." entry) */ inc_nlink(inode); } else if (S_ISREG(mode)) { inode->i_size = 0; inode->i_fop = &cpuset_file_operations; } d_instantiate(dentry, inode); dget(dentry); /* Extra count - pin the dentry in core */ return 0; } /* * cpuset_create_dir - create a directory for an object. * cs: the cpuset we create the directory for. * It must have a valid ->parent field * And we are going to fill its ->dentry field. * name: The name to give to the cpuset directory. Will be copied. * mode: mode to set on new directory. */ static int cpuset_create_dir(struct cpuset *cs, const char *name, int mode) { struct dentry *dentry = NULL; struct dentry *parent; int error = 0; parent = cs->parent->dentry; dentry = cpuset_get_dentry(parent, name); if (IS_ERR(dentry)) return PTR_ERR(dentry); error = cpuset_create_file(dentry, S_IFDIR | mode); if (!error) { dentry->d_fsdata = cs; inc_nlink(parent->d_inode); cs->dentry = dentry; } dput(dentry); return error; } static int cpuset_add_file(struct dentry *dir, const struct cftype *cft) { struct dentry *dentry; int error; mutex_lock(&dir->d_inode->i_mutex); dentry = cpuset_get_dentry(dir, cft->name); if (!IS_ERR(dentry)) { error = cpuset_create_file(dentry, 0644 | S_IFREG); if (!error) dentry->d_fsdata = (void *)cft; dput(dentry); } else error = PTR_ERR(dentry); mutex_unlock(&dir->d_inode->i_mutex); return error; } /* * Stuff for reading the 'tasks' file. * * Reading this file can return large amounts of data if a cpuset has * *lots* of attached tasks. So it may need several calls to read(), * but we cannot guarantee that the information we produce is correct * unless we produce it entirely atomically. * * Upon tasks file open(), a struct ctr_struct is allocated, that * will have a pointer to an array (also allocated here). The struct * ctr_struct * is stored in file->private_data. Its resources will * be freed by release() when the file is closed. The array is used * to sprintf the PIDs and then used by read(). */ /* cpusets_tasks_read array */ struct ctr_struct { char *buf; int bufsz; }; /* * Load into 'pidarray' up to 'npids' of the tasks using cpuset 'cs'. * Return actual number of pids loaded. No need to task_lock(p) * when reading out p->cpuset, as we don't really care if it changes * on the next cycle, and we are not going to try to dereference it. */ static int pid_array_load(pid_t *pidarray, int npids, struct cpuset *cs) { int n = 0; struct task_struct *g, *p; read_lock(&tasklist_lock); do_each_thread(g, p) { if (p->cpuset == cs) { if (unlikely(n == npids)) goto array_full; pidarray[n++] = p->pid; } } while_each_thread(g, p); array_full: read_unlock(&tasklist_lock); return n; } static int cmppid(const void *a, const void *b) { return *(pid_t *)a - *(pid_t *)b; } /* * Convert array 'a' of 'npids' pid_t's to a string of newline separated * decimal pids in 'buf'. Don't write more than 'sz' chars, but return * count 'cnt' of how many chars would be written if buf were large enough. */ static int pid_array_to_buf(char *buf, int sz, pid_t *a, int npids) { int cnt = 0; int i; for (i = 0; i < npids; i++) cnt += snprintf(buf + cnt, max(sz - cnt, 0), "%d\n", a[i]); return cnt; } /* * Handle an open on 'tasks' file. Prepare a buffer listing the * process id's of tasks currently attached to the cpuset being opened. * * Does not require any specific cpuset mutexes, and does not take any. */ static int cpuset_tasks_open(struct inode *unused, struct file *file) { struct cpuset *cs = __d_cs(file->f_path.dentry->d_parent); struct ctr_struct *ctr; pid_t *pidarray; int npids; char c; if (!(file->f_mode & FMODE_READ)) return 0; ctr = kmalloc(sizeof(*ctr), GFP_KERNEL); if (!ctr) goto err0; /* * If cpuset gets more users after we read count, we won't have * enough space - tough. This race is indistinguishable to the * caller from the case that the additional cpuset users didn't * show up until sometime later on. */ npids = atomic_read(&cs->count); pidarray = kmalloc(npids * sizeof(pid_t), GFP_KERNEL); if (!pidarray) goto err1; npids = pid_array_load(pidarray, npids, cs); sort(pidarray, npids, sizeof(pid_t), cmppid, NULL); /* Call pid_array_to_buf() twice, first just to get bufsz */ ctr->bufsz = pid_array_to_buf(&c, sizeof(c), pidarray, npids) + 1; ctr->buf = kmalloc(ctr->bufsz, GFP_KERNEL); if (!ctr->buf) goto err2; ctr->bufsz = pid_array_to_buf(ctr->buf, ctr->bufsz, pidarray, npids); kfree(pidarray); file->private_data = ctr; return 0; err2: kfree(pidarray); err1: kfree(ctr); err0: return -ENOMEM; } static ssize_t cpuset_tasks_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) { struct ctr_struct *ctr = file->private_data; return simple_read_from_buffer(buf, nbytes, ppos, ctr->buf, ctr->bufsz); } static int cpuset_tasks_release(struct inode *unused_inode, struct file *file) { struct ctr_struct *ctr; if (file->f_mode & FMODE_READ) { ctr = file->private_data; kfree(ctr->buf); kfree(ctr); } return 0; } /* * for the common functions, 'private' gives the type of file */ static struct cftype cft_tasks = { .name = "tasks", .open = cpuset_tasks_open, .read = cpuset_tasks_read, .release = cpuset_tasks_release, .private = FILE_TASKLIST, }; static struct cftype cft_cpus = { .name = "cpus", .private = FILE_CPULIST, }; static struct cftype cft_mems = { .name = "mems", .private = FILE_MEMLIST, }; static struct cftype cft_cpu_exclusive = { .name = "cpu_exclusive", .private = FILE_CPU_EXCLUSIVE, }; static struct cftype cft_mem_exclusive = { .name = "mem_exclusive", .private = FILE_MEM_EXCLUSIVE, }; static struct cftype cft_notify_on_release = { .name = "notify_on_release", .private = FILE_NOTIFY_ON_RELEASE, }; static struct cftype cft_memory_migrate = { .name = "memory_migrate", .private = FILE_MEMORY_MIGRATE, }; static struct cftype cft_memory_pressure_enabled = { .name = "memory_pressure_enabled", .private = FILE_MEMORY_PRESSURE_ENABLED, }; static struct cftype cft_memory_pressure = { .name = "memory_pressure", .private = FILE_MEMORY_PRESSURE, }; static struct cftype cft_spread_page = { .name = "memory_spread_page", .private = FILE_SPREAD_PAGE, }; static struct cftype cft_spread_slab = { .name = "memory_spread_slab", .private = FILE_SPREAD_SLAB, }; static int cpuset_populate_dir(struct dentry *cs_dentry) { int err; if ((err = cpuset_add_file(cs_dentry, &cft_cpus)) < 0) return err; if ((err = cpuset_add_file(cs_dentry, &cft_mems)) < 0) return err; if ((err = cpuset_add_file(cs_dentry, &cft_cpu_exclusive)) < 0) return err; if ((err = cpuset_add_file(cs_dentry, &cft_mem_exclusive)) < 0) return err; if ((err = cpuset_add_file(cs_dentry, &cft_notify_on_release)) < 0) return err; if ((err = cpuset_add_file(cs_dentry, &cft_memory_migrate)) < 0) return err; if ((err = cpuset_add_file(cs_dentry, &cft_memory_pressure)) < 0) return err; if ((err = cpuset_add_file(cs_dentry, &cft_spread_page)) < 0) return err; if ((err = cpuset_add_file(cs_dentry, &cft_spread_slab)) < 0) return err; if ((err = cpuset_add_file(cs_dentry, &cft_tasks)) < 0) return err; return 0; } /* * cpuset_create - create a cpuset * parent: cpuset that will be parent of the new cpuset. * name: name of the new cpuset. Will be strcpy'ed. * mode: mode to set on new inode * * Must be called with the mutex on the parent inode held */ static long cpuset_create(struct cpuset *parent, const char *name, int mode) { struct cpuset *cs; int err; cs = kmalloc(sizeof(*cs), GFP_KERNEL); if (!cs) return -ENOMEM; mutex_lock(&manage_mutex); cpuset_update_task_memory_state(); cs->flags = 0; if (notify_on_release(parent)) set_bit(CS_NOTIFY_ON_RELEASE, &cs->flags); if (is_spread_page(parent)) set_bit(CS_SPREAD_PAGE, &cs->flags); if (is_spread_slab(parent)) set_bit(CS_SPREAD_SLAB, &cs->flags); cs->cpus_allowed = CPU_MASK_NONE; cs->mems_allowed = NODE_MASK_NONE; atomic_set(&cs->count, 0); INIT_LIST_HEAD(&cs->sibling); INIT_LIST_HEAD(&cs->children); cs->mems_generation = cpuset_mems_generation++; fmeter_init(&cs->fmeter); cs->parent = parent; mutex_lock(&callback_mutex); list_add(&cs->sibling, &cs->parent->children); number_of_cpusets++; mutex_unlock(&callback_mutex); err = cpuset_create_dir(cs, name, mode); if (err < 0) goto err; /* * Release manage_mutex before cpuset_populate_dir() because it * will down() this new directory's i_mutex and if we race with * another mkdir, we might deadlock. */ mutex_unlock(&manage_mutex); err = cpuset_populate_dir(cs->dentry); /* If err < 0, we have a half-filled directory - oh well ;) */ return 0; err: list_del(&cs->sibling); mutex_unlock(&manage_mutex); kfree(cs); return err; } static int cpuset_mkdir(struct inode *dir, struct dentry *dentry, int mode) { struct cpuset *c_parent = dentry->d_parent->d_fsdata; /* the vfs holds inode->i_mutex already */ return cpuset_create(c_parent, dentry->d_name.name, mode | S_IFDIR); } /* * Locking note on the strange update_flag() call below: * * If the cpuset being removed is marked cpu_exclusive, then simulate * turning cpu_exclusive off, which will call update_cpu_domains(). * The lock_cpu_hotplug() call in update_cpu_domains() must not be * made while holding callback_mutex. Elsewhere the kernel nests * callback_mutex inside lock_cpu_hotplug() calls. So the reverse * nesting would risk an ABBA deadlock. */ static int cpuset_rmdir(struct inode *unused_dir, struct dentry *dentry) { struct cpuset *cs = dentry->d_fsdata; struct dentry *d; struct cpuset *parent; char *pathbuf = NULL; /* the vfs holds both inode->i_mutex already */ mutex_lock(&manage_mutex); cpuset_update_task_memory_state(); if (atomic_read(&cs->count) > 0) { mutex_unlock(&manage_mutex); return -EBUSY; } if (!list_empty(&cs->children)) { mutex_unlock(&manage_mutex); return -EBUSY; } if (is_cpu_exclusive(cs)) { int retval = update_flag(CS_CPU_EXCLUSIVE, cs, "0"); if (retval < 0) { mutex_unlock(&manage_mutex); return retval; } } parent = cs->parent; mutex_lock(&callback_mutex); set_bit(CS_REMOVED, &cs->flags); list_del(&cs->sibling); /* delete my sibling from parent->children */ spin_lock(&cs->dentry->d_lock); d = dget(cs->dentry); cs->dentry = NULL; spin_unlock(&d->d_lock); cpuset_d_remove_dir(d); dput(d); number_of_cpusets--; mutex_unlock(&callback_mutex); if (list_empty(&parent->children)) check_for_release(parent, &pathbuf); mutex_unlock(&manage_mutex); cpuset_release_agent(pathbuf); return 0; } /* * cpuset_init_early - just enough so that the calls to * cpuset_update_task_memory_state() in early init code * are harmless. */ int __init cpuset_init_early(void) { struct task_struct *tsk = current; tsk->cpuset = &top_cpuset; tsk->cpuset->mems_generation = cpuset_mems_generation++; return 0; } /** * cpuset_init - initialize cpusets at system boot * * Description: Initialize top_cpuset and the cpuset internal file system, **/ int __init cpuset_init(void) { struct dentry *root; int err; top_cpuset.cpus_allowed = CPU_MASK_ALL; top_cpuset.mems_allowed = NODE_MASK_ALL; fmeter_init(&top_cpuset.fmeter); top_cpuset.mems_generation = cpuset_mems_generation++; init_task.cpuset = &top_cpuset; err = register_filesystem(&cpuset_fs_type); if (err < 0) goto out; cpuset_mount = kern_mount(&cpuset_fs_type); if (IS_ERR(cpuset_mount)) { printk(KERN_ERR "cpuset: could not mount!\n"); err = PTR_ERR(cpuset_mount); cpuset_mount = NULL; goto out; } root = cpuset_mount->mnt_sb->s_root; root->d_fsdata = &top_cpuset; inc_nlink(root->d_inode); top_cpuset.dentry = root; root->d_inode->i_op = &cpuset_dir_inode_operations; number_of_cpusets = 1; err = cpuset_populate_dir(root); /* memory_pressure_enabled is in root cpuset only */ if (err == 0) err = cpuset_add_file(root, &cft_memory_pressure_enabled); out: return err; } /* * If common_cpu_mem_hotplug_unplug(), below, unplugs any CPUs * or memory nodes, we need to walk over the cpuset hierarchy, * removing that CPU or node from all cpusets. If this removes the * last CPU or node from a cpuset, then the guarantee_online_cpus() * or guarantee_online_mems() code will use that emptied cpusets * parent online CPUs or nodes. Cpusets that were already empty of * CPUs or nodes are left empty. * * This routine is intentionally inefficient in a couple of regards. * It will check all cpusets in a subtree even if the top cpuset of * the subtree has no offline CPUs or nodes. It checks both CPUs and * nodes, even though the caller could have been coded to know that * only one of CPUs or nodes needed to be checked on a given call. * This was done to minimize text size rather than cpu cycles. * * Call with both manage_mutex and callback_mutex held. * * Recursive, on depth of cpuset subtree. */ static void guarantee_online_cpus_mems_in_subtree(const struct cpuset *cur) { struct cpuset *c; /* Each of our child cpusets mems must be online */ list_for_each_entry(c, &cur->children, sibling) { guarantee_online_cpus_mems_in_subtree(c); if (!cpus_empty(c->cpus_allowed)) guarantee_online_cpus(c, &c->cpus_allowed); if (!nodes_empty(c->mems_allowed)) guarantee_online_mems(c, &c->mems_allowed); } } /* * The cpus_allowed and mems_allowed nodemasks in the top_cpuset track * cpu_online_map and node_online_map. Force the top cpuset to track * whats online after any CPU or memory node hotplug or unplug event. * * To ensure that we don't remove a CPU or node from the top cpuset * that is currently in use by a child cpuset (which would violate * the rule that cpusets must be subsets of their parent), we first * call the recursive routine guarantee_online_cpus_mems_in_subtree(). * * Since there are two callers of this routine, one for CPU hotplug * events and one for memory node hotplug events, we could have coded * two separate routines here. We code it as a single common routine * in order to minimize text size. */ static void common_cpu_mem_hotplug_unplug(void) { mutex_lock(&manage_mutex); mutex_lock(&callback_mutex); guarantee_online_cpus_mems_in_subtree(&top_cpuset); top_cpuset.cpus_allowed = cpu_online_map; top_cpuset.mems_allowed = node_online_map; mutex_unlock(&callback_mutex); mutex_unlock(&manage_mutex); } /* * The top_cpuset tracks what CPUs and Memory Nodes are online, * period. This is necessary in order to make cpusets transparent * (of no affect) on systems that are actively using CPU hotplug * but making no active use of cpusets. * * This routine ensures that top_cpuset.cpus_allowed tracks * cpu_online_map on each CPU hotplug (cpuhp) event. */ static int cpuset_handle_cpuhp(struct notifier_block *nb, unsigned long phase, void *cpu) { if (phase == CPU_DYING || phase == CPU_DYING_FROZEN) return NOTIFY_DONE; common_cpu_mem_hotplug_unplug(); return 0; } #ifdef CONFIG_MEMORY_HOTPLUG /* * Keep top_cpuset.mems_allowed tracking node_online_map. * Call this routine anytime after you change node_online_map. * See also the previous routine cpuset_handle_cpuhp(). */ void cpuset_track_online_nodes(void) { common_cpu_mem_hotplug_unplug(); } #endif /** * cpuset_init_smp - initialize cpus_allowed * * Description: Finish top cpuset after cpu, node maps are initialized **/ void __init cpuset_init_smp(void) { top_cpuset.cpus_allowed = cpu_online_map; top_cpuset.mems_allowed = node_online_map; hotcpu_notifier(cpuset_handle_cpuhp, 0); } /** * cpuset_fork - attach newly forked task to its parents cpuset. * @tsk: pointer to task_struct of forking parent process. * * Description: A task inherits its parent's cpuset at fork(). * * A pointer to the shared cpuset was automatically copied in fork.c * by dup_task_struct(). However, we ignore that copy, since it was * not made under the protection of task_lock(), so might no longer be * a valid cpuset pointer. attach_task() might have already changed * current->cpuset, allowing the previously referenced cpuset to * be removed and freed. Instead, we task_lock(current) and copy * its present value of current->cpuset for our freshly forked child. * * At the point that cpuset_fork() is called, 'current' is the parent * task, and the passed argument 'child' points to the child task. **/ void cpuset_fork(struct task_struct *child) { task_lock(current); child->cpuset = current->cpuset; atomic_inc(&child->cpuset->count); task_unlock(current); } /** * cpuset_exit - detach cpuset from exiting task * @tsk: pointer to task_struct of exiting process * * Description: Detach cpuset from @tsk and release it. * * Note that cpusets marked notify_on_release force every task in * them to take the global manage_mutex mutex when exiting. * This could impact scaling on very large systems. Be reluctant to * use notify_on_release cpusets where very high task exit scaling * is required on large systems. * * Don't even think about derefencing 'cs' after the cpuset use count * goes to zero, except inside a critical section guarded by manage_mutex * or callback_mutex. Otherwise a zero cpuset use count is a license to * any other task to nuke the cpuset immediately, via cpuset_rmdir(). * * This routine has to take manage_mutex, not callback_mutex, because * it is holding that mutex while calling check_for_release(), * which calls kmalloc(), so can't be called holding callback_mutex(). * * the_top_cpuset_hack: * * Set the exiting tasks cpuset to the root cpuset (top_cpuset). * * Don't leave a task unable to allocate memory, as that is an * accident waiting to happen should someone add a callout in * do_exit() after the cpuset_exit() call that might allocate. * If a task tries to allocate memory with an invalid cpuset, * it will oops in cpuset_update_task_memory_state(). * * We call cpuset_exit() while the task is still competent to * handle notify_on_release(), then leave the task attached to * the root cpuset (top_cpuset) for the remainder of its exit. * * To do this properly, we would increment the reference count on * top_cpuset, and near the very end of the kernel/exit.c do_exit() * code we would add a second cpuset function call, to drop that * reference. This would just create an unnecessary hot spot on * the top_cpuset reference count, to no avail. * * Normally, holding a reference to a cpuset without bumping its * count is unsafe. The cpuset could go away, or someone could * attach us to a different cpuset, decrementing the count on * the first cpuset that we never incremented. But in this case, * top_cpuset isn't going away, and either task has PF_EXITING set, * which wards off any attach_task() attempts, or task is a failed * fork, never visible to attach_task. * * Another way to do this would be to set the cpuset pointer * to NULL here, and check in cpuset_update_task_memory_state() * for a NULL pointer. This hack avoids that NULL check, for no * cost (other than this way too long comment ;). **/ void cpuset_exit(struct task_struct *tsk) { struct cpuset *cs; task_lock(current); cs = tsk->cpuset; tsk->cpuset = &top_cpuset; /* the_top_cpuset_hack - see above */ task_unlock(current); if (notify_on_release(cs)) { char *pathbuf = NULL; mutex_lock(&manage_mutex); if (atomic_dec_and_test(&cs->count)) check_for_release(cs, &pathbuf); mutex_unlock(&manage_mutex); cpuset_release_agent(pathbuf); } else { atomic_dec(&cs->count); } } /** * cpuset_cpus_allowed - return cpus_allowed mask from a tasks cpuset. * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed. * * Description: Returns the cpumask_t cpus_allowed of the cpuset * attached to the specified @tsk. Guaranteed to return some non-empty * subset of cpu_online_map, even if this means going outside the * tasks cpuset. **/ cpumask_t cpuset_cpus_allowed(struct task_struct *tsk) { cpumask_t mask; mutex_lock(&callback_mutex); task_lock(tsk); guarantee_online_cpus(tsk->cpuset, &mask); task_unlock(tsk); mutex_unlock(&callback_mutex); return mask; } void cpuset_init_current_mems_allowed(void) { current->mems_allowed = NODE_MASK_ALL; } /** * cpuset_mems_allowed - return mems_allowed mask from a tasks cpuset. * @tsk: pointer to task_struct from which to obtain cpuset->mems_allowed. * * Description: Returns the nodemask_t mems_allowed of the cpuset * attached to the specified @tsk. Guaranteed to return some non-empty * subset of node_online_map, even if this means going outside the * tasks cpuset. **/ nodemask_t cpuset_mems_allowed(struct task_struct *tsk) { nodemask_t mask; mutex_lock(&callback_mutex); task_lock(tsk); guarantee_online_mems(tsk->cpuset, &mask); task_unlock(tsk); mutex_unlock(&callback_mutex); return mask; } /** * cpuset_zonelist_valid_mems_allowed - check zonelist vs. curremt mems_allowed * @zl: the zonelist to be checked * * Are any of the nodes on zonelist zl allowed in current->mems_allowed? */ int cpuset_zonelist_valid_mems_allowed(struct zonelist *zl) { int i; for (i = 0; zl->zones[i]; i++) { int nid = zone_to_nid(zl->zones[i]); if (node_isset(nid, current->mems_allowed)) return 1; } return 0; } /* * nearest_exclusive_ancestor() - Returns the nearest mem_exclusive * ancestor to the specified cpuset. Call holding callback_mutex. * If no ancestor is mem_exclusive (an unusual configuration), then * returns the root cpuset. */ static const struct cpuset *nearest_exclusive_ancestor(const struct cpuset *cs) { while (!is_mem_exclusive(cs) && cs->parent) cs = cs->parent; return cs; } /** * cpuset_zone_allowed_softwall - Can we allocate on zone z's memory node? * @z: is this zone on an allowed node? * @gfp_mask: memory allocation flags * * If we're in interrupt, yes, we can always allocate. If * __GFP_THISNODE is set, yes, we can always allocate. If zone * z's node is in our tasks mems_allowed, yes. If it's not a * __GFP_HARDWALL request and this zone's nodes is in the nearest * mem_exclusive cpuset ancestor to this tasks cpuset, yes. * If the task has been OOM killed and has access to memory reserves * as specified by the TIF_MEMDIE flag, yes. * Otherwise, no. * * If __GFP_HARDWALL is set, cpuset_zone_allowed_softwall() * reduces to cpuset_zone_allowed_hardwall(). Otherwise, * cpuset_zone_allowed_softwall() might sleep, and might allow a zone * from an enclosing cpuset. * * cpuset_zone_allowed_hardwall() only handles the simpler case of * hardwall cpusets, and never sleeps. * * The __GFP_THISNODE placement logic is really handled elsewhere, * by forcibly using a zonelist starting at a specified node, and by * (in get_page_from_freelist()) refusing to consider the zones for * any node on the zonelist except the first. By the time any such * calls get to this routine, we should just shut up and say 'yes'. * * GFP_USER allocations are marked with the __GFP_HARDWALL bit, * and do not allow allocations outside the current tasks cpuset * unless the task has been OOM killed as is marked TIF_MEMDIE. * GFP_KERNEL allocations are not so marked, so can escape to the * nearest enclosing mem_exclusive ancestor cpuset. * * Scanning up parent cpusets requires callback_mutex. The * __alloc_pages() routine only calls here with __GFP_HARDWALL bit * _not_ set if it's a GFP_KERNEL allocation, and all nodes in the * current tasks mems_allowed came up empty on the first pass over * the zonelist. So only GFP_KERNEL allocations, if all nodes in the * cpuset are short of memory, might require taking the callback_mutex * mutex. * * The first call here from mm/page_alloc:get_page_from_freelist() * has __GFP_HARDWALL set in gfp_mask, enforcing hardwall cpusets, * so no allocation on a node outside the cpuset is allowed (unless * in interrupt, of course). * * The second pass through get_page_from_freelist() doesn't even call * here for GFP_ATOMIC calls. For those calls, the __alloc_pages() * variable 'wait' is not set, and the bit ALLOC_CPUSET is not set * in alloc_flags. That logic and the checks below have the combined * affect that: * in_interrupt - any node ok (current task context irrelevant) * GFP_ATOMIC - any node ok * TIF_MEMDIE - any node ok * GFP_KERNEL - any node in enclosing mem_exclusive cpuset ok * GFP_USER - only nodes in current tasks mems allowed ok. * * Rule: * Don't call cpuset_zone_allowed_softwall if you can't sleep, unless you * pass in the __GFP_HARDWALL flag set in gfp_flag, which disables * the code that might scan up ancestor cpusets and sleep. */ int __cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask) { int node; /* node that zone z is on */ const struct cpuset *cs; /* current cpuset ancestors */ int allowed; /* is allocation in zone z allowed? */ if (in_interrupt() || (gfp_mask & __GFP_THISNODE)) return 1; node = zone_to_nid(z); might_sleep_if(!(gfp_mask & __GFP_HARDWALL)); if (node_isset(node, current->mems_allowed)) return 1; /* * Allow tasks that have access to memory reserves because they have * been OOM killed to get memory anywhere. */ if (unlikely(test_thread_flag(TIF_MEMDIE))) return 1; if (gfp_mask & __GFP_HARDWALL) /* If hardwall request, stop here */ return 0; if (current->flags & PF_EXITING) /* Let dying task have memory */ return 1; /* Not hardwall and node outside mems_allowed: scan up cpusets */ mutex_lock(&callback_mutex); task_lock(current); cs = nearest_exclusive_ancestor(current->cpuset); task_unlock(current); allowed = node_isset(node, cs->mems_allowed); mutex_unlock(&callback_mutex); return allowed; } /* * cpuset_zone_allowed_hardwall - Can we allocate on zone z's memory node? * @z: is this zone on an allowed node? * @gfp_mask: memory allocation flags * * If we're in interrupt, yes, we can always allocate. * If __GFP_THISNODE is set, yes, we can always allocate. If zone * z's node is in our tasks mems_allowed, yes. If the task has been * OOM killed and has access to memory reserves as specified by the * TIF_MEMDIE flag, yes. Otherwise, no. * * The __GFP_THISNODE placement logic is really handled elsewhere, * by forcibly using a zonelist starting at a specified node, and by * (in get_page_from_freelist()) refusing to consider the zones for * any node on the zonelist except the first. By the time any such * calls get to this routine, we should just shut up and say 'yes'. * * Unlike the cpuset_zone_allowed_softwall() variant, above, * this variant requires that the zone be in the current tasks * mems_allowed or that we're in interrupt. It does not scan up the * cpuset hierarchy for the nearest enclosing mem_exclusive cpuset. * It never sleeps. */ int __cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask) { int node; /* node that zone z is on */ if (in_interrupt() || (gfp_mask & __GFP_THISNODE)) return 1; node = zone_to_nid(z); if (node_isset(node, current->mems_allowed)) return 1; /* * Allow tasks that have access to memory reserves because they have * been OOM killed to get memory anywhere. */ if (unlikely(test_thread_flag(TIF_MEMDIE))) return 1; return 0; } /** * cpuset_lock - lock out any changes to cpuset structures * * The out of memory (oom) code needs to mutex_lock cpusets * from being changed while it scans the tasklist looking for a * task in an overlapping cpuset. Expose callback_mutex via this * cpuset_lock() routine, so the oom code can lock it, before * locking the task list. The tasklist_lock is a spinlock, so * must be taken inside callback_mutex. */ void cpuset_lock(void) { mutex_lock(&callback_mutex); } /** * cpuset_unlock - release lock on cpuset changes * * Undo the lock taken in a previous cpuset_lock() call. */ void cpuset_unlock(void) { mutex_unlock(&callback_mutex); } /** * cpuset_mem_spread_node() - On which node to begin search for a page * * If a task is marked PF_SPREAD_PAGE or PF_SPREAD_SLAB (as for * tasks in a cpuset with is_spread_page or is_spread_slab set), * and if the memory allocation used cpuset_mem_spread_node() * to determine on which node to start looking, as it will for * certain page cache or slab cache pages such as used for file * system buffers and inode caches, then instead of starting on the * local node to look for a free page, rather spread the starting * node around the tasks mems_allowed nodes. * * We don't have to worry about the returned node being offline * because "it can't happen", and even if it did, it would be ok. * * The routines calling guarantee_online_mems() are careful to * only set nodes in task->mems_allowed that are online. So it * should not be possible for the following code to return an * offline node. But if it did, that would be ok, as this routine * is not returning the node where the allocation must be, only * the node where the search should start. The zonelist passed to * __alloc_pages() will include all nodes. If the slab allocator * is passed an offline node, it will fall back to the local node. * See kmem_cache_alloc_node(). */ int cpuset_mem_spread_node(void) { int node; node = next_node(current->cpuset_mem_spread_rotor, current->mems_allowed); if (node == MAX_NUMNODES) node = first_node(current->mems_allowed); current->cpuset_mem_spread_rotor = node; return node; } EXPORT_SYMBOL_GPL(cpuset_mem_spread_node); /** * cpuset_excl_nodes_overlap - Do we overlap @p's mem_exclusive ancestors? * @p: pointer to task_struct of some other task. * * Description: Return true if the nearest mem_exclusive ancestor * cpusets of tasks @p and current overlap. Used by oom killer to * determine if task @p's memory usage might impact the memory * available to the current task. * * Call while holding callback_mutex. **/ int cpuset_excl_nodes_overlap(const struct task_struct *p) { const struct cpuset *cs1, *cs2; /* my and p's cpuset ancestors */ int overlap = 1; /* do cpusets overlap? */ task_lock(current); if (current->flags & PF_EXITING) { task_unlock(current); goto done; } cs1 = nearest_exclusive_ancestor(current->cpuset); task_unlock(current); task_lock((struct task_struct *)p); if (p->flags & PF_EXITING) { task_unlock((struct task_struct *)p); goto done; } cs2 = nearest_exclusive_ancestor(p->cpuset); task_unlock((struct task_struct *)p); overlap = nodes_intersects(cs1->mems_allowed, cs2->mems_allowed); done: return overlap; } /* * Collection of memory_pressure is suppressed unless * this flag is enabled by writing "1" to the special * cpuset file 'memory_pressure_enabled' in the root cpuset. */ int cpuset_memory_pressure_enabled __read_mostly; /** * cpuset_memory_pressure_bump - keep stats of per-cpuset reclaims. * * Keep a running average of the rate of synchronous (direct) * page reclaim efforts initiated by tasks in each cpuset. * * This represents the rate at which some task in the cpuset * ran low on memory on all nodes it was allowed to use, and * had to enter the kernels page reclaim code in an effort to * create more free memory by tossing clean pages or swapping * or writing dirty pages. * * Display to user space in the per-cpuset read-only file * "memory_pressure". Value displayed is an integer * representing the recent rate of entry into the synchronous * (direct) page reclaim by any task attached to the cpuset. **/ void __cpuset_memory_pressure_bump(void) { struct cpuset *cs; task_lock(current); cs = current->cpuset; fmeter_markevent(&cs->fmeter); task_unlock(current); } /* * proc_cpuset_show() * - Print tasks cpuset path into seq_file. * - Used for /proc/<pid>/cpuset. * - No need to task_lock(tsk) on this tsk->cpuset reference, as it * doesn't really matter if tsk->cpuset changes after we read it, * and we take manage_mutex, keeping attach_task() from changing it * anyway. No need to check that tsk->cpuset != NULL, thanks to * the_top_cpuset_hack in cpuset_exit(), which sets an exiting tasks * cpuset to top_cpuset. */ static int proc_cpuset_show(struct seq_file *m, void *v) { struct pid *pid; struct task_struct *tsk; char *buf; int retval; retval = -ENOMEM; buf = kmalloc(PAGE_SIZE, GFP_KERNEL); if (!buf) goto out; retval = -ESRCH; pid = m->private; tsk = get_pid_task(pid, PIDTYPE_PID); if (!tsk) goto out_free; retval = -EINVAL; mutex_lock(&manage_mutex); retval = cpuset_path(tsk->cpuset, buf, PAGE_SIZE); if (retval < 0) goto out_unlock; seq_puts(m, buf); seq_putc(m, '\n'); out_unlock: mutex_unlock(&manage_mutex); put_task_struct(tsk); out_free: kfree(buf); out: return retval; } static int cpuset_open(struct inode *inode, struct file *file) { struct pid *pid = PROC_I(inode)->pid; return single_open(file, proc_cpuset_show, pid); } const struct file_operations proc_cpuset_operations = { .open = cpuset_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; /* Display task cpus_allowed, mems_allowed in /proc/<pid>/status file. */ char *cpuset_task_status_allowed(struct task_struct *task, char *buffer) { buffer += sprintf(buffer, "Cpus_allowed:\t"); buffer += cpumask_scnprintf(buffer, PAGE_SIZE, task->cpus_allowed); buffer += sprintf(buffer, "\n"); buffer += sprintf(buffer, "Mems_allowed:\t"); buffer += nodemask_scnprintf(buffer, PAGE_SIZE, task->mems_allowed); buffer += sprintf(buffer, "\n"); return buffer; }