本文整理汇总了C++中REMARK函数的典型用法代码示例。如果您正苦于以下问题:C++ REMARK函数的具体用法?C++ REMARK怎么用?C++ REMARK使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了REMARK函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。
示例1: TestSimpleDelay
/** The 'tolerance' value inside the test specifies the limit. */
void TestSimpleDelay( int ntrial, double duration, double tolerance ) {
double total_worktime = 0;
// Iteration -1 warms up the code cache.
for( int trial=-1; trial<ntrial; ++trial ) {
tbb::tick_count t0 = tbb::tick_count::now();
if( duration ) WaitForDuration(duration);
tbb::tick_count t1 = tbb::tick_count::now();
if( trial>=0 ) {
total_worktime += (t1-t0).seconds();
}
}
// Compute average worktime and average delta
double worktime = total_worktime/ntrial;
double delta = worktime-duration;
REMARK("worktime=%g delta=%g tolerance=%g\n", worktime, delta, tolerance);
// Check that delta is acceptable
if( delta<0 )
REPORT("ERROR: delta=%g < 0\n",delta);
if( delta>tolerance )
REPORT("%s: delta=%g > %g=tolerance where duration=%g\n",delta>3*tolerance?"ERROR":"Warning",delta,tolerance,duration);
}
开发者ID:RandomDeveloperM,项目名称:UE4_Hairworks,代码行数:23,代码来源:test_tick_count.cpp
示例2: TestMain
int TestMain () {
if( MinThread<1 ) {
REPORT("ERROR: MinThread=%d, but must be at least 1\n",MinThread); MinThread = 1;
}
#if !TBB_DEPRECATED
TestIteratorTraits<tbb::concurrent_vector<Foo>::iterator,Foo>();
TestIteratorTraits<tbb::concurrent_vector<Foo>::const_iterator,const Foo>();
TestSequentialFor<FooWithAssign> ();
TestResizeAndCopy();
TestAssign();
#if HAVE_m128
TestSSE();
#endif /* HAVE_m128 */
#endif
TestCapacity();
ASSERT( !FooCount, NULL );
for( int nthread=MinThread; nthread<=MaxThread; ++nthread ) {
tbb::task_scheduler_init init( nthread );
TestParallelFor( nthread );
TestConcurrentGrowToAtLeast();
TestConcurrentGrowBy( nthread );
}
ASSERT( !FooCount, NULL );
#if !TBB_DEPRECATED
TestComparison();
#if !__TBB_FLOATING_POINT_BROKEN
TestFindPrimes();
#endif
TestSort();
#if __TBB_THROW_ACROSS_MODULE_BOUNDARY_BROKEN
REPORT("Known issue: exception safety test is skipped.\n");
#elif TBB_USE_EXCEPTIONS
TestExceptions();
#endif /* TBB_USE_EXCEPTIONS */
#endif /* !TBB_DEPRECATED */
ASSERT( !FooCount, NULL );
REMARK("sizeof(concurrent_vector<int>) == %d\n", (int)sizeof(tbb::concurrent_vector<int>));
return Harness::Done;
}
开发者ID:Multi2Sim,项目名称:m2s-bench-parsec-3.0-src,代码行数:39,代码来源:test_concurrent_vector.cpp
示例3: TestRehash
void TestRehash() {
REMARK("testing rehashing\n");
MyTable w;
w.insert( std::make_pair(MyKey::make(-5), MyData()) );
w.rehash(); // without this, assertion will fail
MyTable::iterator it = w.begin();
int i = 0; // check for non-rehashed buckets
for( ; it != w.end(); i++ )
w.count( (it++)->first );
ASSERT( i == 1, NULL );
for( i=0; i<1000; i=(i<29 ? i+1 : i*2) ) {
for( int j=max(256+i, i*2); j<10000; j*=3 ) {
MyTable v;
FillTable( v, i );
ASSERT(int(v.size()) == i, NULL);
ASSERT(int(v.bucket_count()) <= j, NULL);
v.rehash( j );
ASSERT(int(v.bucket_count()) >= j, NULL);
CheckTable( v, i );
}
}
}
开发者ID:RandomDeveloperM,项目名称:UE4_Hairworks,代码行数:22,代码来源:test_concurrent_hash_map.cpp
示例4: test
static void test() {
TType v;
source_type* all_source_nodes[MaxNSources];
sink_node_helper<N,SType>::print_parallel_remark();
REMARK(" >\n");
for(int i=0; i < MaxPorts; ++i) {
all_sink_nodes[i] = NULL;
}
// try test for # sources 1 .. MaxNSources
for(int nInputs = 1; nInputs <= MaxNSources; ++nInputs) {
tbb::flow::graph g;
SType* my_split = makeSplit<N,SType>::create(g);
// add sinks first so when sources start spitting out values they are there to catch them
sink_node_helper<N, SType>::add_sink_nodes((*my_split), g);
// now create nInputs source_nodes, each spitting out i, i+nInputs, i+2*nInputs ...
// each element of the tuple is i*(n+1), where n is the tuple element index (1-N)
for(int i = 0; i < nInputs; ++i) {
// create source node
source_type *s = new source_type(g, source_body<TType>(i, nInputs) );
tbb::flow::make_edge(*s, *my_split);
all_source_nodes[i] = s;
}
g.wait_for_all();
// check that we got Count values in each output queue, and all the index values
// are there.
sink_node_helper<N, SType>::check_sink_values();
sink_node_helper<N, SType>::remove_sink_nodes(*my_split);
for(int i = 0; i < nInputs; ++i) {
delete all_source_nodes[i];
}
makeSplit<N,SType>::destroy(my_split);
}
}
开发者ID:AlessioVallero,项目名称:RaspberryPI,代码行数:38,代码来源:test_split_node.cpp
示例5: TestMain
int TestMain() {
// Test with varying number of threads.
for( int nthread=MinThread; nthread<=MaxThread; ++nthread ) {
// Initialize TBB task scheduler
REMARK("\nTesting with nthread=%d\n", nthread);
tbb::task_scheduler_init init(nthread);
// Run test several times with different types
run_function_spec();
run_function<size_t,int>("size_t", "int");
run_function<int,double>("int", "double");
check_type_counter = 0;
run_function<check_type,size_t>("check_type", "size_t");
ASSERT(!check_type_counter, "Error in check_type creation/destruction");
// check_type as the second type in the pipeline only works if check_type
// is also the first type. The middle_filter specialization for <check_type, check_type>
// changes the state of the check_type items, and this is checked in the output_filter
// specialization.
run_function<check_type, check_type>("check_type", "check_type");
ASSERT(!check_type_counter, "Error in check_type creation/destruction");
}
return Harness::Done;
}
开发者ID:aclysma,项目名称:Helium,代码行数:23,代码来源:test_parallel_pipeline.cpp
示例6: TestCleanAllBuffers
// The idea is to allocate a set of objects and then deallocate them in random
// order in parallel to force occuring conflicts in backend during coalescing.
// Thus if the backend does not check the queue of postponed coalescing
// requests it will not be able to unmap all memory and a memory leak will be
// observed.
void TestCleanAllBuffers() {
const int num_threads = 8;
// Clean up if something was allocated before the test
scalable_allocation_command(TBBMALLOC_CLEAN_ALL_BUFFERS,0);
size_t memory_in_use_before = getMemSize();
for ( int i=0; i<num_allocs; ++i ) {
ptrs[i] = scalable_malloc( alloc_size );
ASSERT( ptrs[i] != NULL, "scalable_malloc has return zero." );
}
deallocs_counter = 0;
TestCleanAllBuffersDeallocate::initBarrier(num_threads);
NativeParallelFor(num_threads, TestCleanAllBuffersDeallocate());
if ( defaultMemPool->extMemPool.backend.coalescQ.blocksToFree == NULL )
REPORT( "Warning: The queue of postponed coalescing requests is empty. Unable to create the condition for bug reproduction.\n" );
ASSERT( scalable_allocation_command(TBBMALLOC_CLEAN_ALL_BUFFERS,0) == TBBMALLOC_OK, "The cleanup request has not cleaned anithing." );
size_t memory_in_use_after = getMemSize();
REMARK( "memory_in_use_before = %ld\nmemory_in_use_after = %ld\n", memory_in_use_before, memory_in_use_after );
size_t memory_leak = memory_in_use_after - memory_in_use_before;
ASSERT( memory_leak == 0, "The backend has not processed the queue of postponed coalescing requests during cleanup." );
}
开发者ID:Moqi,项目名称:Orca,代码行数:28,代码来源:test_malloc_whitebox.cpp
示例7: TestReaderWriterLockOnNThreads
void TestReaderWriterLockOnNThreads(int nThreads) {
// Stress-test all interfaces
for (int pc=0; pc<=100; pc+=20) {
REMARK("Testing with %d threads, percent of MAX_WORK=%d...", nThreads, pc);
StressRWLBody myStressBody(nThreads, pc);
NativeParallelFor(nThreads, myStressBody);
REMARK(" OK.\n");
}
int i;
n_tested__sim_readers = 0;
REMARK("Testing with %d threads, direct/unscoped locking mode...", nThreads); // TODO: choose direct or unscoped?
// TODO: refactor the following two for loops into a shared function
for( i=0; i<100; ++i ) {
Harness::SpinBarrier bar0(nThreads);
CorrectRWLBody myCorrectBody(nThreads,bar0);
active_writers = active_readers = 0;
sim_readers = false;
NativeParallelFor(nThreads, myCorrectBody);
if( sim_readers || nThreads==1 ) {
if( ++n_tested__sim_readers>5 )
break;
}
}
ASSERT(i<100, "There were no simultaneous readers.");
REMARK(" OK.\n");
n_tested__sim_readers = 0;
REMARK("Testing with %d threads, scoped locking mode...", nThreads);
for( i=0; i<100; ++i ) {
Harness::SpinBarrier bar0(nThreads);
CorrectRWLScopedBody myCorrectScopedBody(nThreads, bar0);
active_writers = active_readers = 0;
sim_readers = false;
NativeParallelFor(nThreads, myCorrectScopedBody);
if( sim_readers || nThreads==1 ) {
if( ++n_tested__sim_readers>5 )
break;
}
}
ASSERT(i<100, "There were no simultaneous readers.");
REMARK(" OK.\n");
}
开发者ID:dakaufma,项目名称:tbb,代码行数:45,代码来源:test_reader_writer_lock.cpp
示例8: test_parallel_invoke
void test_parallel_invoke()
{
REMARK (__FUNCTION__);
// Testing with pointers to functions
for (int n = 2; n <=10; n++)
{
INIT_TEST;
call_parallel_invoke(n, test_pointer0, test_pointer1, test_pointer2, test_pointer3, test_pointer4,
test_pointer5, test_pointer6, test_pointer7, test_pointer8, test_pointer9, NULL);
VALIDATE_INVOKE_RUN(n, "pointers to function");
}
// Testing parallel_invoke with functors
for (int n = 2; n <=10; n++)
{
INIT_TEST;
call_parallel_invoke(n, functor0, functor1, functor2, functor3, functor4,
functor5, functor6, functor7, functor8, functor9, NULL);
VALIDATE_INVOKE_RUN(n, "functors");
}
#if __TBB_FUNCTION_BY_CONSTREF_IN_TEMPLATE_BROKEN
// some old compilers can't cope with passing function name into parallel_invoke
#else
// and some compile but generate broken code that does not call the function
if (function_by_constref_in_template_codegen_broken())
return;
// Testing parallel_invoke with functions
for (int n = 2; n <=10; n++)
{
INIT_TEST;
call_parallel_invoke(n, test0, test1, test2, test3, test4, test5, test6, test7, test8, test9, NULL);
VALIDATE_INVOKE_RUN(n, "functions");
}
#endif
}
开发者ID:Havoc,项目名称:mangos-boost,代码行数:37,代码来源:test_parallel_invoke.cpp
示例9: TraverseTable
//! Test traversing the table with an iterator.
void TraverseTable( MyTable& table, size_t n, size_t expected_size ) {
REMARK("testing traversal\n");
size_t actual_size = table.size();
ASSERT( actual_size==expected_size, NULL );
size_t count = 0;
bool* array = new bool[n];
memset( array, 0, n*sizeof(bool) );
const MyTable& const_table = table;
MyTable::const_iterator ci = const_table.begin();
for( MyTable::iterator i = table.begin(); i!=table.end(); ++i ) {
// Check iterator
int k = i->first.value_of();
ASSERT( UseKey(k), NULL );
ASSERT( (*i).first.value_of()==k, NULL );
ASSERT( 0<=k && size_t(k)<n, "out of bounds key" );
ASSERT( !array[k], "duplicate key" );
array[k] = true;
++count;
// Check lower/upper bounds
std::pair<MyTable::iterator, MyTable::iterator> er = table.equal_range(i->first);
std::pair<MyTable::const_iterator, MyTable::const_iterator> cer = const_table.equal_range(i->first);
ASSERT(cer.first == er.first && cer.second == er.second, NULL);
ASSERT(cer.first == i, NULL);
ASSERT(std::distance(cer.first, cer.second) == 1, NULL);
// Check const_iterator
MyTable::const_iterator cic = ci++;
ASSERT( cic->first.value_of()==k, NULL );
ASSERT( (*cic).first.value_of()==k, NULL );
}
ASSERT( ci==const_table.end(), NULL );
delete[] array;
if( count!=expected_size ) {
REPORT("Line %d: count=%ld but should be %ld\n",__LINE__,long(count),long(expected_size));
}
}
开发者ID:RandomDeveloperM,项目名称:UE4_Hairworks,代码行数:38,代码来源:test_concurrent_hash_map.cpp
示例10: TestMultifunctionNode
void
TestMultifunctionNode() {
typedef tbb::flow::multifunction_node<int, tbb::flow::tuple<int, int>, P> multinode_type;
REMARK("Testing multifunction_node");
test_reversal<P,multinode_type> my_test;
REMARK(":");
tbb::flow::graph g;
multinode_type mf(g, tbb::flow::serial, mf_body<multinode_type>(serial_fn_state0));
tbb::flow::queue_node<int> qin(g);
tbb::flow::queue_node<int> qodd_out(g);
tbb::flow::queue_node<int> qeven_out(g);
tbb::flow::make_edge(qin,mf);
tbb::flow::make_edge(tbb::flow::output_port<0>(mf), qeven_out);
tbb::flow::make_edge(tbb::flow::output_port<1>(mf), qodd_out);
g.wait_for_all();
for( int ii = 0; ii < 2 ; ++ii) {
serial_fn_state0 = 0;
if(ii == 0) REMARK(" reset preds"); else REMARK(" 2nd");
qin.try_put(0);
// wait for node to be active
BACKOFF_WAIT(serial_fn_state0 == 0, "timed out waiting for first put");
qin.try_put(1);
BACKOFF_WAIT((!my_test(mf)), "Timed out waiting");
ASSERT(my_test(mf), "fail second put test");
g.my_root_task->cancel_group_execution();
// release node
serial_fn_state0 = 2;
g.wait_for_all();
ASSERT(my_test(mf), "fail cancel group test");
if( ii == 1) {
REMARK(" rf_clear_edges");
g.reset(tbb::flow::rf_clear_edges);
ASSERT(tbb::flow::output_port<0>(mf).my_successors.empty(), "output_port<0> not reset (rf_clear_edges)");
ASSERT(tbb::flow::output_port<1>(mf).my_successors.empty(), "output_port<1> not reset (rf_clear_edges)");
}
else
{
g.reset();
}
ASSERT(mf.my_predecessors.empty(), "edge didn't reset");
ASSERT((ii == 0 && !qin.my_successors.empty()) || (ii == 1 && qin.my_successors.empty()), "edge didn't reset");
}
REMARK(" done\n");
}
开发者ID:adiog,项目名称:tbb,代码行数:44,代码来源:test_flow_graph_whitebox.cpp
示例11: RunPrioritySwitchBetweenTwoMasters
void RunPrioritySwitchBetweenTwoMasters ( int idx, uintptr_t opts ) {
ASSERT( idx < NumTests, NULL );
REMARK( "Config %d: idx=%i, opts=%u\r", ++g_CurConfig, idx, (unsigned)opts );
NativeParallelFor ( 2, MasterBody<NodeType>(idx, opts) );
Harness::Sleep(50);
}
开发者ID:MarkusSR1984,项目名称:Malloctest,代码行数:6,代码来源:test_task_priority.cpp
示例12: TestBufferingNode
void TestBufferingNode(const char * name) {
tbb::flow::graph g;
B bnode(g);
tbb::flow::function_node<int,int,tbb::flow::rejecting> fnode(g, tbb::flow::serial, serial_fn_body<int>(serial_fn_state0));
REMARK("Testing %s:", name);
for(int icnt = 0; icnt < 2; icnt++) {
bool reverse_edge = (icnt & 0x2) != 0;
serial_fn_state0 = 0; // reset to waiting state.
REMARK(" make_edge");
tbb::flow::make_edge(bnode, fnode);
ASSERT(!bnode.my_successors.empty(), "buffering node has no successor after make_edge");
REMARK(" try_put");
bnode.try_put(1); // will forward to the fnode
BACKOFF_WAIT(serial_fn_state0 == 0, "Timed out waiting for first put");
if(reverse_edge) {
REMARK(" try_put2");
bnode.try_put(2); // will reverse the edge
// cannot do a wait_for_all here; the function_node is still executing
BACKOFF_WAIT(!bnode.my_successors.empty(), "Timed out waiting after 2nd put");
// at this point the only task running is the one for the function_node.
ASSERT(bnode.my_successors.empty(), "successor not removed");
}
else {
ASSERT(!bnode.my_successors.empty(), "buffering node has no successor after forwarding message");
}
serial_fn_state0 = 0; // release the function_node.
if(reverse_edge) {
// have to do a second release because the function_node will get the 2nd item
BACKOFF_WAIT( serial_fn_state0 == 0, "Timed out waiting after 2nd put");
serial_fn_state0 = 0; // release the function_node.
}
g.wait_for_all();
REMARK(" remove_edge");
tbb::flow::remove_edge(bnode, fnode);
ASSERT(bnode.my_successors.empty(), "buffering node has a successor after remove_edge");
}
tbb::flow::join_node<tbb::flow::tuple<int,int>,tbb::flow::reserving> jnode(g);
tbb::flow::make_edge(bnode, tbb::flow::input_port<0>(jnode)); // will spawn a task
g.wait_for_all();
ASSERT(!bnode.my_successors.empty(), "buffering node has no successor after attaching to join");
REMARK(" reverse");
bnode.try_put(1); // the edge should reverse
g.wait_for_all();
ASSERT(bnode.my_successors.empty(), "buffering node has a successor after reserving");
REMARK(" reset()");
g.wait_for_all();
g.reset(); // should be in forward direction again
ASSERT(!bnode.my_successors.empty(), "buffering node has no successor after reset()");
REMARK(" remove_edge");
g.reset(tbb::flow::rf_clear_edges);
ASSERT(bnode.my_successors.empty(), "buffering node has a successor after reset(rf_clear_edges)");
tbb::flow::make_edge(bnode, tbb::flow::input_port<0>(jnode)); // add edge again
// reverse edge by adding to buffer.
bnode.try_put(1); // the edge should reverse
g.wait_for_all();
ASSERT(bnode.my_successors.empty(), "buffering node has a successor after reserving");
REMARK(" remove_edge(reversed)");
g.reset(tbb::flow::rf_clear_edges);
ASSERT(bnode.my_successors.empty(), "buffering node has no successor after reset()");
ASSERT(tbb::flow::input_port<0>(jnode).my_predecessors.empty(), "predecessor not reset");
REMARK(" done\n");
g.wait_for_all();
}
开发者ID:adiog,项目名称:tbb,代码行数:63,代码来源:test_flow_graph_whitebox.cpp
示例13: test_reversal
test_reversal() { REMARK("<rejecting>"); }
开发者ID:adiog,项目名称:tbb,代码行数:1,代码来源:test_flow_graph_whitebox.cpp
示例14: TestLimiterNode
void
TestLimiterNode() {
int out_int;
tbb::flow::graph g;
tbb::flow::limiter_node<int> ln(g,1);
REMARK("Testing limiter_node: preds and succs");
ASSERT(ln.decrement.my_predecessor_count == 0, "error in pred count");
ASSERT(ln.decrement.my_initial_predecessor_count == 0, "error in initial pred count");
ASSERT(ln.decrement.my_current_count == 0, "error in current count");
ASSERT(ln.init_decrement_predecessors == 0, "error in decrement predecessors");
ASSERT(ln.my_threshold == 1, "error in my_threshold");
tbb::flow::queue_node<int> inq(g);
tbb::flow::queue_node<int> outq(g);
tbb::flow::broadcast_node<tbb::flow::continue_msg> bn(g);
tbb::flow::make_edge(inq,ln);
tbb::flow::make_edge(ln,outq);
tbb::flow::make_edge(bn,ln.decrement);
g.wait_for_all();
ASSERT(!(ln.my_successors.empty()),"successors empty after make_edge");
ASSERT(ln.my_predecessors.empty(), "input edge reversed");
inq.try_put(1);
g.wait_for_all();
ASSERT(outq.try_get(out_int) && out_int == 1, "limiter_node didn't pass first value");
ASSERT(ln.my_predecessors.empty(), "input edge reversed");
inq.try_put(2);
g.wait_for_all();
ASSERT(!outq.try_get(out_int), "limiter_node incorrectly passed second input");
ASSERT(!ln.my_predecessors.empty(), "input edge to limiter_node not reversed");
bn.try_put(tbb::flow::continue_msg());
g.wait_for_all();
ASSERT(outq.try_get(out_int) && out_int == 2, "limiter_node didn't pass second value");
g.wait_for_all();
ASSERT(!ln.my_predecessors.empty(), "input edge was reversed(after try_get())");
g.reset();
ASSERT(ln.my_predecessors.empty(), "input edge not reset");
inq.try_put(3);
g.wait_for_all();
ASSERT(outq.try_get(out_int) && out_int == 3, "limiter_node didn't pass third value");
REMARK(" rf_clear_edges");
// currently the limiter_node will not pass another message
g.reset(tbb::flow::rf_clear_edges);
ASSERT(ln.decrement.my_predecessor_count == 0, "error in pred count");
ASSERT(ln.decrement.my_initial_predecessor_count == 0, "error in initial pred count");
ASSERT(ln.decrement.my_current_count == 0, "error in current count");
ASSERT(ln.init_decrement_predecessors == 0, "error in decrement predecessors");
ASSERT(ln.my_threshold == 1, "error in my_threshold");
ASSERT(ln.my_predecessors.empty(), "preds not reset(rf_clear_edges)");
ASSERT(ln.my_successors.empty(), "preds not reset(rf_clear_edges)");
ASSERT(inq.my_successors.empty(), "Arc not removed on reset(rf_clear_edges)");
ASSERT(inq.my_successors.empty(), "Arc not removed on reset(rf_clear_edges)");
ASSERT(bn.my_successors.empty(), "control edge not removed on reset(rf_clear_edges)");
tbb::flow::make_edge(inq,ln);
tbb::flow::make_edge(ln,outq);
inq.try_put(4);
inq.try_put(5);
g.wait_for_all();
ASSERT(outq.try_get(out_int),"missing output after reset(rf_clear_edges)");
ASSERT(out_int == 4, "input incorrect (4)");
bn.try_put(tbb::flow::continue_msg());
g.wait_for_all();
ASSERT(!outq.try_get(out_int),"second output incorrectly passed (rf_clear_edges)");
REMARK(" done\n");
}
开发者ID:adiog,项目名称:tbb,代码行数:66,代码来源:test_flow_graph_whitebox.cpp
示例15: TestContinueNode
// continue_node has only predecessor count
// they do not have predecessors, only the counts
// successor edges cannot be reversed
void TestContinueNode() {
tbb::flow::graph g;
tbb::flow::function_node<int> fnode0(g, tbb::flow::serial, serial_fn_body<int>(serial_fn_state0));
tbb::flow::continue_node<int> cnode(g, 1, serial_continue_body<int>(serial_continue_state0));
tbb::flow::function_node<int> fnode1(g, tbb::flow::serial, serial_fn_body<int>(serial_fn_state1));
tbb::flow::make_edge(fnode0, cnode);
tbb::flow::make_edge(cnode, fnode1);
REMARK("Testing continue_node:");
for( int icnt = 0; icnt < 2; ++icnt ) {
REMARK( " initial%d", icnt);
ASSERT(cnode.my_predecessor_count == 2, "predecessor addition didn't increment count");
ASSERT(!cnode.successors().empty(), "successors empty though we added one");
ASSERT(cnode.my_current_count == 0, "state of continue_receiver incorrect");
serial_continue_state0 = 0;
serial_fn_state0 = 0;
serial_fn_state1 = 0;
fnode0.try_put(1); // start the first function node.
BACKOFF_WAIT(!serial_fn_state0, "Timed out waiting for function_node to start");
// Now the body of function_node 0 is executing.
serial_fn_state0 = 0; // release the node
// wait for node to count the message (or for the node body to execute, which would be wrong)
BACKOFF_WAIT(serial_continue_state0 == 0 && cnode.my_current_count == 0, "Timed out waiting for continue_state0 to change");
ASSERT(serial_continue_state0 == 0, "Improperly released continue_node");
ASSERT(cnode.my_current_count == 1, "state of continue_receiver incorrect");
if(icnt == 0) { // first time through, let the continue_node fire
REMARK(" firing");
fnode0.try_put(1); // second message
BACKOFF_WAIT(serial_fn_state0 == 0, "timeout waiting for continue_body to execute");
// Now the body of function_node 0 is executing.
serial_fn_state0 = 0; // release the node
BACKOFF_WAIT(!serial_continue_state0,"continue_node didn't start"); // now we wait for the continue_node.
ASSERT(cnode.my_current_count == 0, " my_current_count not reset before body of continue_node started");
serial_continue_state0 = 0; // release the continue_node
BACKOFF_WAIT(!serial_fn_state1,"successor function_node didn't start"); // wait for the successor function_node to enter body
serial_fn_state1 = 0; // release successor function_node.
g.wait_for_all();
// try a try_get()
{
int i;
ASSERT(!cnode.try_get(i), "try_get not rejected");
}
REMARK(" reset");
ASSERT(!cnode.my_successors.empty(), "Empty successors in built graph (before reset)");
ASSERT(cnode.my_predecessor_count == 2, "predecessor_count reset (before reset)");
g.reset(); // should still be the same
ASSERT(!cnode.my_successors.empty(), "Empty successors in built graph (after reset)" );
ASSERT(cnode.my_predecessor_count == 2, "predecessor_count reset (after reset)");
}
else { // we're going to see if the rf_clear_edges resets things.
g.wait_for_all();
REMARK(" reset(rf_clear_edges)");
ASSERT(!cnode.my_successors.empty(), "Empty successors in built graph (before reset)");
ASSERT(cnode.my_predecessor_count == 2, "predecessor_count reset (before reset)");
g.reset(tbb::flow::rf_clear_edges); // should be in forward direction again
ASSERT(cnode.my_current_count == 0, "state of continue_receiver incorrect after reset(rf_clear_edges)");
ASSERT(cnode.my_successors.empty(), "buffering node has a successor after reset(rf_clear_edges)");
ASSERT(cnode.my_predecessor_count == cnode.my_initial_predecessor_count, "predecessor count not reset");
}
}
REMARK(" done\n");
}
开发者ID:adiog,项目名称:tbb,代码行数:70,代码来源:test_flow_graph_whitebox.cpp
示例16: FireUpJobs
void FireUpJobs( MyServer& server, MyClient& client, int max_thread, int n_extra, Checker* checker ) {
ASSERT( max_thread>=0, NULL );
#if _WIN32||_WIN64
::rml::server::execution_resource_t me;
server.register_master( me );
#endif /* _WIN32||_WIN64 */
client.server = &server;
MyTeam team(server,size_t(max_thread));
MyServer::size_type n_thread = 0;
for( int iteration=0; iteration<4; ++iteration ) {
for( size_t i=0; i<team.max_thread; ++i )
team.info[i].ran = false;
switch( iteration ) {
default:
n_thread = int(max_thread);
break;
case 1:
// No change in number of threads
break;
case 2:
// Decrease number of threads.
n_thread = int(max_thread)/2;
break;
// Case 3 is same code as the default, but has effect of increasing the number of threads.
}
team.barrier = 0;
REMARK("client %d: server.run with n_thread=%d\n", client.client_id(), int(n_thread) );
server.independent_thread_number_changed( n_extra );
if( checker ) {
// Give RML time to respond to change in number of threads.
Harness::Sleep(1);
}
int n_delivered = server.try_increase_load( n_thread, StrictTeam );
ASSERT( !StrictTeam || n_delivered==int(n_thread), "server failed to satisfy strict request" );
if( n_delivered<0 ) {
REMARK( "client %d: oversubscription occurred (by %d)\n", client.client_id(), -n_delivered );
server.independent_thread_number_changed( -n_extra );
n_delivered = 0;
} else {
team.n_thread = n_delivered;
::rml::job* job_array[JobArraySize];
job_array[n_delivered] = (::rml::job*)intptr_t(-1);
server.get_threads( n_delivered, &team, job_array );
__TBB_ASSERT( job_array[n_delivered]== (::rml::job*)intptr_t(-1), NULL );
for( int i=0; i<n_delivered; ++i ) {
MyJob* j = static_cast<MyJob*>(job_array[i]);
int s = j->state;
ASSERT( s==MyJob::idle||s==MyJob::busy, NULL );
}
server.independent_thread_number_changed( -n_extra );
REMARK("client %d: team size is %d\n", client.client_id(), n_delivered);
if( checker ) {
checker->check_number_of_threads_delivered( n_delivered, n_thread, n_extra );
}
// Protocol requires that master wait until workers have called "done_processing"
while( team.barrier!=n_delivered ) {
ASSERT( team.barrier>=0, NULL );
ASSERT( team.barrier<=n_delivered, NULL );
__TBB_Yield();
}
REMARK("client %d: team completed\n", client.client_id() );
for( int i=0; i<n_delivered; ++i ) {
ASSERT( team.info[i].ran, "thread on team allegedly delivered, but did not run?" );
}
}
for( MyServer::size_type i=n_delivered; i<MyServer::size_type(max_thread); ++i ) {
ASSERT( !team.info[i].ran, "thread on team ran with illegal index" );
}
}
#if _WIN32||_WIN64
server.unregister_master( me );
#endif
}
开发者ID:LucaMarradi,项目名称:dealii,代码行数:73,代码来源:test_rml_omp.cpp
示例17: TestNullRWMutex
void TestNullRWMutex( const char * name ) {
REMARK("%s ",name);
const int n = 100;
M m;
tbb::parallel_for(tbb::blocked_range<size_t>(0,n,10),NullUpgradeDowngrade<M>(m, name));
}
开发者ID:AlessioVallero,项目名称:RaspberryPI,代码行数:6,代码来源:test_mutex.cpp
示例18: print_parallel_remark
static void print_parallel_remark() {
REMARK("Parallel test of split_node< %s", name_of<IT>::name());
}
开发者ID:AlessioVallero,项目名称:RaspberryPI,代码行数:3,代码来源:test_split_node.cpp
示例19: print_serial_remark
static void print_serial_remark() {
REMARK("Serial test of split_node< %s", name_of<IT>::name());
}
开发者ID:AlessioVallero,项目名称:RaspberryPI,代码行数:3,代码来源:test_split_node.cpp
示例20: MinimalAllocator
MinimalAllocator() {
REMARK("%p::ctor\n", this);
}
开发者ID:HeliumProject,项目名称:ThreadBuildingBlocks,代码行数:3,代码来源:test_ScalableAllocator.cpp
注:本文中的REMARK函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论