aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGlenn Elliott <gelliott@cs.unc.edu>2013-05-15 02:20:14 -0400
committerGlenn Elliott <gelliott@cs.unc.edu>2013-05-15 02:20:14 -0400
commit992ce8df6eae19c6826018d62cb337fbc632de75 (patch)
tree36ba9db2be606eeb8b47b4f6836517f7e5f55acd
parent62bed6ce5f20e44c80369d224812c012f5dd5ef1 (diff)
signal handling in gpuspin
-rw-r--r--gpu/gpuspin.cu192
-rw-r--r--src/signal.c2
2 files changed, 156 insertions, 38 deletions
diff --git a/gpu/gpuspin.cu b/gpu/gpuspin.cu
index 304d937..8a9b717 100644
--- a/gpu/gpuspin.cu
+++ b/gpu/gpuspin.cu
@@ -8,6 +8,8 @@
8#include <assert.h> 8#include <assert.h>
9#include <execinfo.h> 9#include <execinfo.h>
10 10
11#include <exception>
12
11#include <boost/interprocess/managed_shared_memory.hpp> 13#include <boost/interprocess/managed_shared_memory.hpp>
12#include <boost/interprocess/sync/interprocess_mutex.hpp> 14#include <boost/interprocess/sync/interprocess_mutex.hpp>
13#include <boost/filesystem.hpp> 15#include <boost/filesystem.hpp>
@@ -39,6 +41,7 @@ const unsigned int CE_RECV_START = 400;
39const unsigned int CE_RECV_END = 401; 41const unsigned int CE_RECV_END = 401;
40 42
41bool SILENT = true; 43bool SILENT = true;
44//bool SILENT = false;
42inline int xprintf(const char *format, ...) 45inline int xprintf(const char *format, ...)
43{ 46{
44 int ret = 0; 47 int ret = 0;
@@ -56,6 +59,19 @@ const size_t PAGE_SIZE = sysconf(_SC_PAGESIZE);
56 59
57const int NR_GPUS = 8; 60const int NR_GPUS = 8;
58 61
62bool WANT_SIGNALS = false;
63inline void gpuspin_block_litmus_signals(unsigned long mask)
64{
65 if (WANT_SIGNALS)
66 block_litmus_signals(mask);
67}
68
69inline void gpuspin_unblock_litmus_signals(unsigned long mask)
70{
71 if (WANT_SIGNALS)
72 unblock_litmus_signals(mask);
73}
74
59bool GPU_USING = false; 75bool GPU_USING = false;
60bool ENABLE_AFFINITY = false; 76bool ENABLE_AFFINITY = false;
61bool RELAX_FIFO_MAX_LEN = false; 77bool RELAX_FIFO_MAX_LEN = false;
@@ -305,16 +321,24 @@ struct ce_lock_state
305 void lock() { 321 void lock() {
306 if(locks[0] == locks[1]) crash(); 322 if(locks[0] == locks[1]) crash();
307 323
308 if (num_locks == 1) 324 if (num_locks == 1) {
325 gpuspin_block_litmus_signals(ALL_LITMUS_SIG_MASKS);
309 litmus_lock(locks[0]); 326 litmus_lock(locks[0]);
310 else if(USE_DYNAMIC_GROUP_LOCKS) 327 gpuspin_unblock_litmus_signals(ALL_LITMUS_SIG_MASKS);
328 }
329 else if(USE_DYNAMIC_GROUP_LOCKS) {
330 gpuspin_block_litmus_signals(ALL_LITMUS_SIG_MASKS);
311 litmus_dgl_lock(locks, num_locks); 331 litmus_dgl_lock(locks, num_locks);
332 gpuspin_unblock_litmus_signals(ALL_LITMUS_SIG_MASKS);
333 }
312 else 334 else
313 { 335 {
336 gpuspin_block_litmus_signals(ALL_LITMUS_SIG_MASKS);
314 for(int l = 0; l < num_locks; ++l) 337 for(int l = 0; l < num_locks; ++l)
315 { 338 {
316 litmus_lock(locks[l]); 339 litmus_lock(locks[l]);
317 } 340 }
341 gpuspin_unblock_litmus_signals(ALL_LITMUS_SIG_MASKS);
318 } 342 }
319 locked = true; 343 locked = true;
320 } 344 }
@@ -322,17 +346,25 @@ struct ce_lock_state
322 void unlock() { 346 void unlock() {
323 if(locks[0] == locks[1]) crash(); 347 if(locks[0] == locks[1]) crash();
324 348
325 if (num_locks == 1) 349 if (num_locks == 1) {
350 gpuspin_block_litmus_signals(ALL_LITMUS_SIG_MASKS);
326 litmus_unlock(locks[0]); 351 litmus_unlock(locks[0]);
327 else if(USE_DYNAMIC_GROUP_LOCKS) 352 gpuspin_unblock_litmus_signals(ALL_LITMUS_SIG_MASKS);
353 }
354 else if(USE_DYNAMIC_GROUP_LOCKS) {
355 gpuspin_block_litmus_signals(ALL_LITMUS_SIG_MASKS);
328 litmus_dgl_unlock(locks, num_locks); 356 litmus_dgl_unlock(locks, num_locks);
357 gpuspin_unblock_litmus_signals(ALL_LITMUS_SIG_MASKS);
358 }
329 else 359 else
330 { 360 {
361 gpuspin_block_litmus_signals(ALL_LITMUS_SIG_MASKS);
331 // reverse order 362 // reverse order
332 for(int l = num_locks - 1; l >= 0; --l) 363 for(int l = num_locks - 1; l >= 0; --l)
333 { 364 {
334 litmus_unlock(locks[l]); 365 litmus_unlock(locks[l]);
335 } 366 }
367 gpuspin_unblock_litmus_signals(ALL_LITMUS_SIG_MASKS);
336 } 368 }
337 locked = false; 369 locked = false;
338 } 370 }
@@ -391,14 +423,14 @@ static cudaError_t __chunkMemcpy(void* a_dst, const void* a_src, size_t count,
391 if (!state->budgetIsAvailable(bytesToCopy)) { 423 if (!state->budgetIsAvailable(bytesToCopy)) {
392 // optimization - don't unlock if no one else needs the engine 424 // optimization - don't unlock if no one else needs the engine
393 if (state->should_yield()) { 425 if (state->should_yield()) {
394 //cudaStreamSynchronize(STREAMS[CUR_DEVICE]); 426 gpuspin_block_litmus_signals(ALL_LITMUS_SIG_MASKS);
395 cudaEventSynchronize(EVENTS[CUR_DEVICE]); 427 cudaEventSynchronize(EVENTS[CUR_DEVICE]);
396 ret = cudaGetLastError(); 428 ret = cudaGetLastError();
397
398 if (kind == cudaMemcpyDeviceToHost || kind == cudaMemcpyDeviceToDevice) 429 if (kind == cudaMemcpyDeviceToHost || kind == cudaMemcpyDeviceToDevice)
399 inject_action(CE_RECV_END); 430 inject_action(CE_RECV_END);
400 if (kind == cudaMemcpyHostToDevice) 431 if (kind == cudaMemcpyHostToDevice)
401 inject_action(CE_SEND_END); 432 inject_action(CE_SEND_END);
433 gpuspin_unblock_litmus_signals(ALL_LITMUS_SIG_MASKS);
402 434
403 state->unlock(); 435 state->unlock();
404 if(ret != cudaSuccess) 436 if(ret != cudaSuccess)
@@ -422,8 +454,10 @@ static cudaError_t __chunkMemcpy(void* a_dst, const void* a_src, size_t count,
422 } 454 }
423 455
424 //ret = cudaMemcpy(dst+i*chunk_size, src+i*chunk_size, bytesToCopy, kind); 456 //ret = cudaMemcpy(dst+i*chunk_size, src+i*chunk_size, bytesToCopy, kind);
457 gpuspin_block_litmus_signals(ALL_LITMUS_SIG_MASKS);
425 cudaMemcpyAsync(dst+i*chunk_size, src+i*chunk_size, bytesToCopy, kind, STREAMS[CUR_DEVICE]); 458 cudaMemcpyAsync(dst+i*chunk_size, src+i*chunk_size, bytesToCopy, kind, STREAMS[CUR_DEVICE]);
426 cudaEventRecord(EVENTS[CUR_DEVICE], STREAMS[CUR_DEVICE]); 459 cudaEventRecord(EVENTS[CUR_DEVICE], STREAMS[CUR_DEVICE]);
460 gpuspin_unblock_litmus_signals(ALL_LITMUS_SIG_MASKS);
427 461
428 if(state) 462 if(state)
429 state->decreaseBudget(bytesToCopy); 463 state->decreaseBudget(bytesToCopy);
@@ -444,10 +478,11 @@ static cudaError_t chunkMemcpy(void* a_dst, const void* a_src, size_t count,
444 cudaError_t ret; 478 cudaError_t ret;
445 if(!do_locking || device_a == -1) { 479 if(!do_locking || device_a == -1) {
446 ret = __chunkMemcpy(a_dst, a_src, count, kind, NULL); 480 ret = __chunkMemcpy(a_dst, a_src, count, kind, NULL);
481 gpuspin_block_litmus_signals(ALL_LITMUS_SIG_MASKS);
447 cudaEventSynchronize(cur_event()); 482 cudaEventSynchronize(cur_event());
448// cudaStreamSynchronize(cur_stream());
449 if(ret == cudaSuccess) 483 if(ret == cudaSuccess)
450 ret = cudaGetLastError(); 484 ret = cudaGetLastError();
485 gpuspin_unblock_litmus_signals(ALL_LITMUS_SIG_MASKS);
451 } 486 }
452 else { 487 else {
453 ce_lock_state state(device_a, kind, count, device_b, migration); 488 ce_lock_state state(device_a, kind, count, device_b, migration);
@@ -459,6 +494,7 @@ static cudaError_t chunkMemcpy(void* a_dst, const void* a_src, size_t count,
459 inject_action(CE_SEND_START); 494 inject_action(CE_SEND_START);
460 495
461 ret = __chunkMemcpy(a_dst, a_src, count, kind, &state); 496 ret = __chunkMemcpy(a_dst, a_src, count, kind, &state);
497 gpuspin_block_litmus_signals(ALL_LITMUS_SIG_MASKS);
462 cudaEventSynchronize(cur_event()); 498 cudaEventSynchronize(cur_event());
463 // cudaStreamSynchronize(cur_stream()); 499 // cudaStreamSynchronize(cur_stream());
464 if(ret == cudaSuccess) 500 if(ret == cudaSuccess)
@@ -468,6 +504,7 @@ static cudaError_t chunkMemcpy(void* a_dst, const void* a_src, size_t count,
468 inject_action(CE_RECV_END); 504 inject_action(CE_RECV_END);
469 if (kind == cudaMemcpyHostToDevice) 505 if (kind == cudaMemcpyHostToDevice)
470 inject_action(CE_SEND_END); 506 inject_action(CE_SEND_END);
507 gpuspin_unblock_litmus_signals(ALL_LITMUS_SIG_MASKS);
471 508
472 state.unlock(); 509 state.unlock();
473 } 510 }
@@ -963,11 +1000,11 @@ static void init_cuda(const int num_gpu_users)
963 } 1000 }
964 catch(std::exception &e) 1001 catch(std::exception &e)
965 { 1002 {
966 printf("caught an exception during initializiation!: %s\n", e.what()); 1003 fprintf(stderr, "caught an exception during initializiation!: %s\n", e.what());
967 } 1004 }
968 catch(...) 1005 catch(...)
969 { 1006 {
970 printf("caught unknown exception.\n"); 1007 fprintf(stderr, "caught unknown exception.\n");
971 } 1008 }
972 1009
973 gpu_mgmt_mutexes[which].unlock(); 1010 gpu_mgmt_mutexes[which].unlock();
@@ -1185,63 +1222,109 @@ __global__ void docudaspin(float* data, /*unsigned int* iterations,*/ unsigned i
1185static void gpu_loop_for(double gpu_sec_time, unsigned int num_kernels, double emergency_exit) 1222static void gpu_loop_for(double gpu_sec_time, unsigned int num_kernels, double emergency_exit)
1186{ 1223{
1187 int next_gpu; 1224 int next_gpu;
1225 bool ee_locked = false;
1226 bool early_exit = false;
1188 1227
1189 if (gpu_sec_time <= 0.0) 1228 if (gpu_sec_time <= 0.0)
1190 goto out; 1229 goto out;
1191 if (emergency_exit && wctime() > emergency_exit) 1230 if (emergency_exit && wctime() > emergency_exit)
1192 goto out; 1231 goto out;
1193 1232
1233 gpuspin_block_litmus_signals(ALL_LITMUS_SIG_MASKS);
1194 next_gpu = litmus_lock(TOKEN_LOCK); 1234 next_gpu = litmus_lock(TOKEN_LOCK);
1195 inject_action(TOKEN_START); 1235 inject_action(TOKEN_START);
1236 gpuspin_unblock_litmus_signals(ALL_LITMUS_SIG_MASKS);
1237
1238 LITMUS_TRY
1196 { 1239 {
1240 gpuspin_block_litmus_signals(ALL_LITMUS_SIG_MASKS);
1197 MigrateIfNeeded(next_gpu); 1241 MigrateIfNeeded(next_gpu);
1242 gpuspin_unblock_litmus_signals(ALL_LITMUS_SIG_MASKS);
1243
1198 unsigned int numcycles = ((unsigned int)(cur_hz() * gpu_sec_time))/num_kernels; 1244 unsigned int numcycles = ((unsigned int)(cur_hz() * gpu_sec_time))/num_kernels;
1199 1245
1200 if(SEND_SIZE > 0) 1246 if(SEND_SIZE > 0)
1201 chunkMemcpy(this_gpu(d_state_data), h_send_data, SEND_SIZE, 1247 chunkMemcpy(this_gpu(d_state_data), h_send_data, SEND_SIZE,
1202 cudaMemcpyHostToDevice, CUR_DEVICE, useEngineLocks()); 1248 cudaMemcpyHostToDevice, CUR_DEVICE, useEngineLocks());
1203 1249
1204 bool locked = false;
1205 for(unsigned int i = 0; i < num_kernels; ++i) 1250 for(unsigned int i = 0; i < num_kernels; ++i)
1206 { 1251 {
1207 if(useEngineLocks() && !locked) { 1252 gpuspin_block_litmus_signals(ALL_LITMUS_SIG_MASKS);
1253
1254 if(useEngineLocks() && !ee_locked) {
1208 litmus_lock(cur_ee()); 1255 litmus_lock(cur_ee());
1209 inject_action(EE_START); 1256 inject_action(EE_START);
1210 locked = true; 1257 ee_locked = true;
1211 } 1258 }
1212
1213 /* one block per sm, one warp per block */ 1259 /* one block per sm, one warp per block */
1214 docudaspin <<<cur_sms(), cur_warp_size(), 0, cur_stream()>>> (d_spin_data[cur_gpu()], cur_elem_per_thread(), numcycles); 1260 docudaspin <<<cur_sms(), cur_warp_size(), 0, cur_stream()>>> (d_spin_data[cur_gpu()], cur_elem_per_thread(), numcycles);
1215
1216 if(useEngineLocks() && (!YIELD_LOCKS || (YIELD_LOCKS && litmus_should_yield_lock(cur_ee())))) { 1261 if(useEngineLocks() && (!YIELD_LOCKS || (YIELD_LOCKS && litmus_should_yield_lock(cur_ee())))) {
1217// cudaStreamSynchronize(cur_stream()); 1262// cudaStreamSynchronize(cur_stream());
1218 cudaEventRecord(cur_event(), cur_stream()); 1263 cudaEventRecord(cur_event(), cur_stream());
1219 cudaEventSynchronize(cur_event()); 1264 cudaEventSynchronize(cur_event());
1220 inject_action(EE_END); 1265 inject_action(EE_END);
1221 litmus_unlock(cur_ee()); 1266 litmus_unlock(cur_ee());
1222 locked = false; 1267 ee_locked = false;
1223 } 1268 }
1269 gpuspin_unblock_litmus_signals(ALL_LITMUS_SIG_MASKS);
1224 } 1270 }
1225 if (locked) { 1271
1272 if (ee_locked) {
1273 gpuspin_block_litmus_signals(ALL_LITMUS_SIG_MASKS);
1274
1226 cudaEventRecord(cur_event(), cur_stream()); 1275 cudaEventRecord(cur_event(), cur_stream());
1227 cudaEventSynchronize(cur_event()); 1276 cudaEventSynchronize(cur_event());
1228 inject_action(EE_END); 1277 inject_action(EE_END);
1229 litmus_unlock(cur_ee()); 1278 litmus_unlock(cur_ee());
1230 locked = false; 1279
1280 gpuspin_unblock_litmus_signals(ALL_LITMUS_SIG_MASKS);
1281 ee_locked = false;
1231 } 1282 }
1232 1283
1233 if(RECV_SIZE > 0) 1284 if(RECV_SIZE > 0)
1234 chunkMemcpy(h_recv_data, this_gpu(d_state_data), RECV_SIZE, 1285 chunkMemcpy(h_recv_data, this_gpu(d_state_data), RECV_SIZE,
1235 cudaMemcpyDeviceToHost, CUR_DEVICE, useEngineLocks()); 1286 cudaMemcpyDeviceToHost, CUR_DEVICE, useEngineLocks());
1236 1287
1237 if (MIGRATE_VIA_SYSMEM) 1288 if (MIGRATE_VIA_SYSMEM) {
1289 gpuspin_block_litmus_signals(ALL_LITMUS_SIG_MASKS);
1238 PullState(); 1290 PullState();
1291 gpuspin_unblock_litmus_signals(ALL_LITMUS_SIG_MASKS);
1292 }
1293 }
1294 LITMUS_CATCH(SIG_BUDGET)
1295 {
1296 cudaEventRecord(cur_event(), cur_stream());
1297 cudaEventSynchronize(cur_event());
1298
1299 if (useEngineLocks()) {
1300 /* unlock all engine locks. will fail safely if not held */
1301 litmus_unlock(cur_ee());
1302 if (NUM_COPY_ENGINES == 1) {
1303 litmus_unlock(cur_send());
1304 }
1305 else if (RESERVED_MIGR_COPY_ENGINE) {
1306 litmus_unlock(cur_send());
1307 litmus_unlock(cur_migr_send());
1308 }
1309 else {
1310 litmus_unlock(cur_send());
1311 litmus_unlock(cur_recv());
1312 }
1313 }
1314 early_exit = true;
1239 } 1315 }
1316 END_LITMUS_TRY
1317
1318 gpuspin_block_litmus_signals(ALL_LITMUS_SIG_MASKS);
1240 inject_action(TOKEN_END); 1319 inject_action(TOKEN_END);
1241 litmus_unlock(TOKEN_LOCK); 1320 litmus_unlock(TOKEN_LOCK);
1321 gpuspin_unblock_litmus_signals(ALL_LITMUS_SIG_MASKS);
1242 1322
1243 last_gpu() = cur_gpu(); 1323 last_gpu() = cur_gpu();
1244 1324
1325 if (early_exit)
1326 throw std::exception();
1327
1245out: 1328out:
1246 return; 1329 return;
1247} 1330}
@@ -1463,9 +1546,24 @@ static bool gpu_job(double exec_time, double gpu_exec_time, unsigned int num_ker
1463 chunk1 = exec_time * drand48(); 1546 chunk1 = exec_time * drand48();
1464 chunk2 = exec_time - chunk1; 1547 chunk2 = exec_time - chunk1;
1465 1548
1466 loop_for(chunk1, program_end + 1); 1549 LITMUS_TRY
1467 gpu_loop_for(gpu_exec_time, num_kernels, program_end + 1); 1550 {
1468 loop_for(chunk2, program_end + 1); 1551 try
1552 {
1553 loop_for(chunk1, program_end + 1);
1554 gpu_loop_for(gpu_exec_time, num_kernels, program_end + 1);
1555 loop_for(chunk2, program_end + 1);
1556 }
1557 catch(std::exception& e)
1558 {
1559 xprintf("%d: ran out of time while using GPU\n", gettid());
1560 }
1561 }
1562 LITMUS_CATCH(SIG_BUDGET)
1563 {
1564 xprintf("%d: ran out of time\n", gettid());
1565 }
1566 END_LITMUS_TRY
1469 1567
1470 sleep_next_period(); 1568 sleep_next_period();
1471 } 1569 }
@@ -1478,7 +1576,15 @@ static bool job(double exec_time, double program_end)
1478 return false; 1576 return false;
1479 } 1577 }
1480 else { 1578 else {
1481 loop_for(exec_time, program_end + 1); 1579 LITMUS_TRY
1580 {
1581 loop_for(exec_time, program_end + 1);
1582 }
1583 LITMUS_CATCH(SIG_BUDGET)
1584 {
1585 xprintf("%d: ran out of time\n", gettid());
1586 }
1587 END_LITMUS_TRY
1482 sleep_next_period(); 1588 sleep_next_period();
1483 } 1589 }
1484 return true; 1590 return true;
@@ -1741,6 +1847,8 @@ void apply_args(struct Args* args)
1741 else if (args->scheduler == LITMUS) 1847 else if (args->scheduler == LITMUS)
1742 TRACE_MIGRATIONS = false; 1848 TRACE_MIGRATIONS = false;
1743 1849
1850 WANT_SIGNALS = args->want_signals;
1851
1744 // roll back other globals to an initial state 1852 // roll back other globals to an initial state
1745 CUR_DEVICE = -1; 1853 CUR_DEVICE = -1;
1746 LAST_DEVICE = -1; 1854 LAST_DEVICE = -1;
@@ -1773,7 +1881,6 @@ int __do_normal(struct Args* args)
1773 USE_DYNAMIC_GROUP_LOCKS = false; 1881 USE_DYNAMIC_GROUP_LOCKS = false;
1774 RELAX_FIFO_MAX_LEN = false; 1882 RELAX_FIFO_MAX_LEN = false;
1775 ENABLE_RT_AUX_THREADS = false; 1883 ENABLE_RT_AUX_THREADS = false;
1776 args->budget_ms = -1.0;
1777 args->want_enforcement = false; 1884 args->want_enforcement = false;
1778 args->want_signals = false; 1885 args->want_signals = false;
1779 1886
@@ -1788,23 +1895,24 @@ int __do_normal(struct Args* args)
1788 1895
1789 wcet = ms2ns(args->wcet_ms); 1896 wcet = ms2ns(args->wcet_ms);
1790 period = ms2ns(args->period_ms); 1897 period = ms2ns(args->period_ms);
1898
1791 if (wcet <= 0) { 1899 if (wcet <= 0) {
1792 printf("The worst-case execution time must be a positive number.\n"); 1900 fprintf(stderr, "The worst-case execution time must be a positive number.\n");
1793 ret = -1; 1901 ret = -1;
1794 goto out; 1902 goto out;
1795 } 1903 }
1796 if (period <= 0) { 1904 if (period <= 0) {
1797 printf("The period must be a positive number.\n"); 1905 fprintf(stderr, "The period must be a positive number.\n");
1798 ret = -1; 1906 ret = -1;
1799 goto out; 1907 goto out;
1800 } 1908 }
1801 if (wcet > period) { 1909 if (wcet > period) {
1802 printf("The worst-case execution time must not exceed the period.\n"); 1910 fprintf(stderr, "The worst-case execution time must not exceed the period.\n");
1803 ret = -1; 1911 ret = -1;
1804 goto out; 1912 goto out;
1805 } 1913 }
1806 if (args->gpu_using && args->gpu_wcet_ms <= 0) { 1914 if (args->gpu_using && args->gpu_wcet_ms <= 0) {
1807 printf("The worst-case gpu execution time must be a positive number.\n"); 1915 fprintf(stderr, "The worst-case gpu execution time must be a positive number.\n");
1808 ret = -1; 1916 ret = -1;
1809 goto out; 1917 goto out;
1810 } 1918 }
@@ -1812,7 +1920,7 @@ int __do_normal(struct Args* args)
1812 if (args->budget_ms > 0.0) 1920 if (args->budget_ms > 0.0)
1813 budget = ms2ns(args->budget_ms); 1921 budget = ms2ns(args->budget_ms);
1814 else 1922 else
1815 budget = args->wcet_ms; 1923 budget = wcet;
1816 1924
1817 // randomize execution time according to a normal distribution 1925 // randomize execution time according to a normal distribution
1818 // centered around the desired execution time. 1926 // centered around the desired execution time.
@@ -1822,7 +1930,7 @@ int __do_normal(struct Args* args)
1822 1930
1823 ret = be_migrate_all_to_cluster(args->cluster, args->cluster_size); 1931 ret = be_migrate_all_to_cluster(args->cluster, args->cluster_size);
1824 if (ret < 0) { 1932 if (ret < 0) {
1825 printf("could not migrate to target partition or cluster.\n"); 1933 fprintf(stderr, "could not migrate to target partition or cluster.\n");
1826 goto out; 1934 goto out;
1827 } 1935 }
1828 1936
@@ -1844,9 +1952,10 @@ int __do_normal(struct Args* args)
1844 param.cls = args->cls; 1952 param.cls = args->cls;
1845 param.budget_policy = (args->want_enforcement) ? 1953 param.budget_policy = (args->want_enforcement) ?
1846 PRECISE_ENFORCEMENT : NO_ENFORCEMENT; 1954 PRECISE_ENFORCEMENT : NO_ENFORCEMENT;
1847 param.budget_signal_policy = (args->want_enforcement && args->want_signals) ? 1955 param.budget_signal_policy = (args->want_signals) ?
1848 PRECISE_SIGNALS : NO_SIGNALS; 1956 PRECISE_SIGNALS : NO_SIGNALS;
1849 param.drain_policy = args->drain_policy; 1957 param.drain_policy = args->drain_policy;
1958 param.drain_policy = args->drain_policy;
1850 param.release_policy = PERIODIC; 1959 param.release_policy = PERIODIC;
1851 param.cpu = cluster_to_first_cpu(args->cluster, args->cluster_size); 1960 param.cpu = cluster_to_first_cpu(args->cluster, args->cluster_size);
1852 1961
@@ -1869,7 +1978,7 @@ int __do_normal(struct Args* args)
1869 { 1978 {
1870 ret = task_mode(LITMUS_RT_TASK); 1979 ret = task_mode(LITMUS_RT_TASK);
1871 if (ret < 0) { 1980 if (ret < 0) {
1872 printf("could not become RT task\n"); 1981 fprintf(stderr, "could not become RT task\n");
1873 goto out; 1982 goto out;
1874 } 1983 }
1875 } 1984 }
@@ -1882,7 +1991,7 @@ int __do_normal(struct Args* args)
1882 fifoparams.sched_priority = args->priority; 1991 fifoparams.sched_priority = args->priority;
1883 ret = sched_setscheduler(getpid(), SCHED_FIFO, &fifoparams); 1992 ret = sched_setscheduler(getpid(), SCHED_FIFO, &fifoparams);
1884 if (ret < 0) { 1993 if (ret < 0) {
1885 printf("could not become sched_fifo task\n"); 1994 fprintf(stderr, "could not become sched_fifo task\n");
1886 goto out; 1995 goto out;
1887 } 1996 }
1888 } 1997 }
@@ -1911,14 +2020,14 @@ int __do_normal(struct Args* args)
1911 if (args->scheduler == LITMUS) { 2020 if (args->scheduler == LITMUS) {
1912 ret = enable_aux_rt_tasks(AUX_CURRENT | AUX_FUTURE); 2021 ret = enable_aux_rt_tasks(AUX_CURRENT | AUX_FUTURE);
1913 if (ret != 0) { 2022 if (ret != 0) {
1914 printf("enable_aux_rt_tasks() failed\n"); 2023 fprintf(stderr, "enable_aux_rt_tasks() failed\n");
1915 goto out; 2024 goto out;
1916 } 2025 }
1917 } 2026 }
1918 else if (args->scheduler == RT_LINUX) { 2027 else if (args->scheduler == RT_LINUX) {
1919 ret = enable_aux_rt_tasks_linux(gettid()); 2028 ret = enable_aux_rt_tasks_linux(gettid());
1920 if (ret != 0) { 2029 if (ret != 0) {
1921 printf("enable_aux_rt_tasks_linux() failed\n"); 2030 fprintf(stderr, "enable_aux_rt_tasks_linux() failed\n");
1922 goto out; 2031 goto out;
1923 } 2032 }
1924 } 2033 }
@@ -1954,18 +2063,22 @@ int __do_normal(struct Args* args)
1954 }while(keepgoing); 2063 }while(keepgoing);
1955 } 2064 }
1956 2065
2066 if (args->want_signals)
2067 ignore_litmus_signals(SIG_BUDGET_MASK);
2068
2069
1957 if (args->gpu_using && ENABLE_RT_AUX_THREADS) { 2070 if (args->gpu_using && ENABLE_RT_AUX_THREADS) {
1958 if (args->scheduler == LITMUS) { 2071 if (args->scheduler == LITMUS) {
1959 ret = disable_aux_rt_tasks(AUX_CURRENT | AUX_FUTURE); 2072 ret = disable_aux_rt_tasks(AUX_CURRENT | AUX_FUTURE);
1960 if (ret != 0) { 2073 if (ret != 0) {
1961 printf("disable_aux_rt_tasks() failed\n"); 2074 fprintf(stderr, "disable_aux_rt_tasks() failed\n");
1962 goto out; 2075 goto out;
1963 } 2076 }
1964 } 2077 }
1965 else if(args->scheduler == RT_LINUX) { 2078 else if(args->scheduler == RT_LINUX) {
1966 ret = disable_aux_rt_tasks_linux(gettid()); 2079 ret = disable_aux_rt_tasks_linux(gettid());
1967 if (ret != 0) { 2080 if (ret != 0) {
1968 printf("disable_aux_rt_tasks_linux() failed\n"); 2081 fprintf(stderr, "disable_aux_rt_tasks_linux() failed\n");
1969 goto out; 2082 goto out;
1970 } 2083 }
1971 } 2084 }
@@ -1978,7 +2091,7 @@ int __do_normal(struct Args* args)
1978 { 2091 {
1979 ret = task_mode(BACKGROUND_TASK); 2092 ret = task_mode(BACKGROUND_TASK);
1980 if (ret != 0) { 2093 if (ret != 0) {
1981 printf("could not become regular task (huh?)\n"); 2094 fprintf(stderr, "could not become regular task (huh?)\n");
1982 goto out; 2095 goto out;
1983 } 2096 }
1984 } 2097 }
@@ -1989,7 +2102,7 @@ int __do_normal(struct Args* args)
1989 memset(&normalparams, 0, sizeof(normalparams)); 2102 memset(&normalparams, 0, sizeof(normalparams));
1990 ret = sched_setscheduler(getpid(), SCHED_OTHER, &normalparams); 2103 ret = sched_setscheduler(getpid(), SCHED_OTHER, &normalparams);
1991 if (ret < 0) { 2104 if (ret < 0) {
1992 printf("could not become sched_normal task\n"); 2105 fprintf(stderr, "could not become sched_normal task\n");
1993 goto out; 2106 goto out;
1994 } 2107 }
1995 } 2108 }
@@ -2532,6 +2645,9 @@ int main(int argc, char** argv)
2532 sleep(2); 2645 sleep(2);
2533 } 2646 }
2534 2647
2648 /* make sure children don't take sigmasks */
2649 ignore_litmus_signals(ALL_LITMUS_SIG_MASKS);
2650
2535 if (run_mode == NORMAL) { 2651 if (run_mode == NORMAL) {
2536 return do_normal(&myArgs); 2652 return do_normal(&myArgs);
2537 } 2653 }
diff --git a/src/signal.c b/src/signal.c
index 397a797..1bd0f62 100644
--- a/src/signal.c
+++ b/src/signal.c
@@ -99,9 +99,11 @@ void longjmp_on_litmus_signal(int signum)
99 lit_env = pop_sigjmp(); 99 lit_env = pop_sigjmp();
100 if (lit_env) { 100 if (lit_env) {
101 /* What you say?! */ 101 /* What you say?! */
102 //printf("%d: we get signal = %d!\n", gettid(), signum);
102 siglongjmp(lit_env->env, signum); /* restores signal mask */ 103 siglongjmp(lit_env->env, signum); /* restores signal mask */
103 } 104 }
104 else { 105 else {
105 /* silently ignore the signal */ 106 /* silently ignore the signal */
107 //printf("%d: silently ignoring signal.\n", gettid());
106 } 108 }
107} 109}