Newsgroups: php.internals Path: news.php.net Xref: news.php.net php.internals:47014 Return-Path: Mailing-List: contact internals-help@lists.php.net; run by ezmlm Delivered-To: mailing list internals@lists.php.net Received: (qmail 31190 invoked from network); 22 Feb 2010 01:11:39 -0000 Received: from unknown (HELO lists.php.net) (127.0.0.1) by localhost with SMTP; 22 Feb 2010 01:11:39 -0000 X-Host-Fingerprint: 200.66.16.110 unknown Received: from [200.66.16.110] ([200.66.16.110:27461] helo=localhost.localdomain) by pb1.pair.com (ecelerity 2.1.1.9-wez r(12769M)) with ESMTP id F2/C8-00376-AC9D18B4 for ; Sun, 21 Feb 2010 20:11:39 -0500 Message-ID: To: internals@lists.php.net Followup-To: php.internals Lines: 164 Date: Sun, 21 Feb 2010 19:11:46 -0600 MIME-Version: 1.0 Content-Type: multipart/mixed; boundary="nextPart4156758.TtaCxCBeP4" X-Posted-By: 200.66.16.110 Subject: multi-jobs run-tests.php From: geissert@php.net (Raphael Geissert) --nextPart4156758.TtaCxCBeP4 Content-Type: text/plain; charset="ISO-8859-1" Content-Transfer-Encoding: 7Bit Hi, I've spent the last hours working on making run-tests.php be able to run tests in parallel. The main reason being the time it takes to run the whole testsuite even on multicore CPUs. Attached is the first set of changes needed. It uses pcntl's fork and sysvmsg to send some of the results back to the parent process. Main functionalities still work, but the summary doesn't include failed tests. I plan to move away from sysvmsg because with the current implementation killing the parent process can leave message queues open (nothing that can't be fixed by adding a proper exit handler) and sysvmsg is not enabled by default. Probably to something php://memory-based. To support all the features under multi-jobs mode I think the proper fix is to cleanup the code so that run_test() stores the state information via an interface. This would make the code cleaner and would let multi-jobs mode just change the state-storing interface. How should I handle this? just by committing the changes? first sending them here (to internals)? by contacting somebody else? Cheers, -- Raphael Geissert --nextPart4156758.TtaCxCBeP4 Content-Type: text/x-patch; name="php_parallel_run-tests.patch" Content-Transfer-Encoding: 8Bit Content-Disposition: attachment; filename="php_parallel_run-tests.patch" diff --git a/run-tests.php b/run-tests.php index 81f218b..9516490 100755 --- a/run-tests.php +++ b/run-tests.php @@ -451,6 +451,12 @@ if ($compression) { $output_file = 'compress.zlib://' . $output_file . '.gz'; } +$max_jobs = false; + +if (extension_loaded('pcntl') && extension_loaded('sysvmsg')) { + $max_jobs = 1; +} + $just_save_results = false; $leak_check = false; $html_output = false; @@ -551,6 +557,14 @@ if (isset($argc) && $argc > 1) { $ini_overwrites[] = $argv[++$i]; break; //case 'h' + case 'j': + if ($max_jobs !== false) { + $max_jobs = (int)$argv[++$i]; + if ($max_jobs < 1) { + $max_jobs = 1; + } + } + break; case '--keep-all': foreach($cfgfiles as $file) { $cfg['keep'][$file] = true; @@ -665,6 +679,9 @@ Options: -d foo=bar Pass -d option to the php binary (Define INI entry foo with value 'bar'). + -j Maximum number of jobs to run in parallel (defaults to 1, + requires pcntl and sysvmsg). + -m Test for memory leaks with Valgrind. -p Specify PHP executable to run. @@ -1082,9 +1099,30 @@ function system_with_timeout($commandline, $env = null, $stdin = null) return $data; } +function reap_jobs($queue, &$results) +{ + // process messages aggresively to avoid + // saturating the queue, stalling childs + // waiting for space in the queue to exit + while (msg_receive($queue, 0, $null, 4096, $msg, true, MSG_IPC_NOWAIT)) + $results[$msg['index']] = $msg['result']; + return pcntl_wait($null); +} + function run_all_tests($test_files, $env, $redir_tested = null) { global $test_results, $failed_tests_file, $php, $test_cnt, $test_idx; + global $max_jobs; + + $run_once = 0; + + if ($max_jobs !== false) { + $jobs_avail = $max_jobs; + do { + $queue_key = mt_rand(); + } while (function_exists('msg_queue_exists') && msg_queue_exists($queue_key)); + $queue = msg_get_queue($queue_key); + } foreach($test_files as $name) { @@ -1100,15 +1138,56 @@ function run_all_tests($test_files, $env, $redir_tested = null) $index = $name; } $test_idx++; + + if ($max_jobs && ($max_jobs > 1 || $jobs_avail == 0)) { + if (!$jobs_avail) { + if (reap_jobs($queue, $test_results) > 0) + $jobs_avail++; + } + $pid = pcntl_fork(); + if ($pid == -1) { + // Do nothing special, test will be run by + // the parent process. + // Decrement $jobs_avail and $max_jobs so that if + // future calls to fork fail we will end up + // disabling multi-jobs support without + // leaving childs behind. + $jobs_avail--; + $max_jobs--; + } else if ($pid) { + // parent process + $jobs_avail--; + continue; + } else { + // child process: + $max_jobs = false; + $run_once = 1; + } + } + $result = run_test($php, $name, $env); if (!is_array($name) && $result != 'REDIR') { - $test_results[$index] = $result; + // send the results back to the parent process + // if there's an active queue + if (isset($queue)) + msg_send($queue, 1, array('index' => $index, 'result' => $result)); + else + $test_results[$index] = $result; if ($failed_tests_file && ($result == 'XFAILED' || $result == 'FAILED' || $result == 'WARNED' || $result == 'LEAKED')) { fwrite($failed_tests_file, "$index\n"); } } + if ($run_once) exit; } + + if ($max_jobs !== false) { + // check for messages one last time before removing the queue + // reap jobs at the same time + while (reap_jobs($queue, $test_results) > 0); + msg_remove_queue($queue); + } + } // --nextPart4156758.TtaCxCBeP4--