• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

C++ pthread_cond_broadcast函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了C++中pthread_cond_broadcast函数的典型用法代码示例。如果您正苦于以下问题:C++ pthread_cond_broadcast函数的具体用法?C++ pthread_cond_broadcast怎么用?C++ pthread_cond_broadcast使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了pthread_cond_broadcast函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。

示例1: malloc

static void *read_thread(void *param)
{
	hid_device *dev = param;
	unsigned char *buf;
	const size_t length = dev->input_ep_max_packet_size;

	/* Set up the transfer object. */
	buf = malloc(length);
	dev->transfer = libusb_alloc_transfer(0);
	libusb_fill_interrupt_transfer(dev->transfer,
		dev->device_handle,
		dev->input_endpoint,
		buf,
		length,
		read_callback,
		dev,
		5000/*timeout*/);

	/* Make the first submission. Further submissions are made
	   from inside read_callback() */
	libusb_submit_transfer(dev->transfer);

	/* Notify the main thread that the read thread is up and running. */
	pthread_barrier_wait(&dev->barrier);

	/* Handle all the events. */
	while (!dev->shutdown_thread) {
		int res;
		res = libusb_handle_events(usb_context);
		if (res < 0) {
			/* There was an error. */
			LOG("read_thread(): libusb reports error # %d\n", res);

			/* Break out of this loop only on fatal error.*/
			if (res != LIBUSB_ERROR_BUSY &&
			    res != LIBUSB_ERROR_TIMEOUT &&
			    res != LIBUSB_ERROR_OVERFLOW &&
			    res != LIBUSB_ERROR_INTERRUPTED) {
				break;
			}
		}
	}

	/* Cancel any transfer that may be pending. This call will fail
	   if no transfers are pending, but that's OK. */
	libusb_cancel_transfer(dev->transfer);

	while (!dev->cancelled)
		libusb_handle_events_completed(usb_context, &dev->cancelled);

	/* Now that the read thread is stopping, Wake any threads which are
	   waiting on data (in hid_read_timeout()). Do this under a mutex to
	   make sure that a thread which is about to go to sleep waiting on
	   the condition acutally will go to sleep before the condition is
	   signaled. */
	pthread_mutex_lock(&dev->mutex);
	pthread_cond_broadcast(&dev->condition);
	pthread_mutex_unlock(&dev->mutex);

	/* The dev->transfer->buffer and dev->transfer objects are cleaned up
	   in hid_close(). They are not cleaned up here because this thread
	   could end either due to a disconnect or due to a user
	   call to hid_close(). In both cases the objects can be safely
	   cleaned up after the call to pthread_join() (in hid_close()), but
	   since hid_close() calls libusb_cancel_transfer(), on these objects,
	   they can not be cleaned up here. */

	return NULL;
}
开发者ID:daurnimator,项目名称:arcan,代码行数:69,代码来源:hid.c


示例2: ProcessRunnable

static
VOID
ProcessRunnable(
    PKQUEUE_THREAD pThread,
    PKQUEUE_COMMANDS pCommands,
    PRING pRunnable,
    PRING pTimed,
    PRING pWaiting,
    LONG64 llNow
    )
{
    ULONG ulTicks = MAX_TICKS;
    PLW_TASK pTask = NULL;
    PLW_TASK_GROUP pGroup = NULL;
    PRING pRing = NULL;
    PRING pNext = NULL;
    
    /* We are guaranteed to run each task at least once.  If tasks remain
       on the runnable list by yielding, we will continue to run them
       all in a round robin until our ticks are depleted. */
    while (ulTicks && !RingIsEmpty(pRunnable))
    {
        for (pRing = pRunnable->pNext; pRing != pRunnable; pRing = pNext)
        {
            pNext = pRing->pNext;
            
            pTask = LW_STRUCT_FROM_FIELD(pRing, KQUEUE_TASK, QueueRing);
            
            RunTask(pTask, llNow);

            if (ulTicks)
            {
                ulTicks--;
            }
            
            if (pTask->EventWait != LW_TASK_EVENT_COMPLETE)
            {
                if (pTask->EventWait & LW_TASK_EVENT_YIELD)
                {
                    /* Task is yielding.  Set the YIELD flag and
                       leave it on the runnable list for the next iteration. */
                    pTask->EventArgs |= LW_TASK_EVENT_YIELD;
                }   
                else 
                {
                    /* Task is still waiting on events, update kqueue */
                    UpdateEventWait(pCommands, pTask);

                    if (pTask->EventWait & LW_TASK_EVENT_TIME)
                    {
                        /* If the task is waiting for a timeout, 
                           insert it into the timed queue */
                        RingRemove(&pTask->QueueRing);
                        InsertTimedQueue(pTimed, pTask);
                    }
                    else
                    {
                        /* Otherwise, put it in the generic waiting queue */
                        RingRemove(&pTask->QueueRing);
                        RingEnqueue(pWaiting, &pTask->QueueRing);
                    }
                }
            }
            else
            {
                /* Task is complete */
                RingRemove(&pTask->QueueRing);

                /* Remove any associated events from the kqueue */
                if (pTask->Fd >= 0)
                {
                    (void) LwRtlSetTaskFd(pTask, pTask->Fd, 0);
                }

                /* Unsubscribe task from any UNIX signals */
                if (pTask->pUnixSignal)
                {
                    RegisterTaskUnixSignal(pTask, 0, FALSE);
                }

                LOCK_POOL(pThread->pPool);
                pThread->ulLoad--;
                UNLOCK_POOL(pThread->pPool);
                
                pGroup = pTask->pGroup;

                /* If task was in a task group, remove it and notify anyone waiting
                   on the group */
                if (pGroup)
                {
                    LOCK_GROUP(pGroup);
                    pTask->pGroup = NULL;
                    RingRemove(&pTask->GroupRing);
                    pthread_cond_broadcast(&pGroup->Event);
                    UNLOCK_GROUP(pGroup);
                }
                
                LOCK_THREAD(pThread);
                if (--pTask->ulRefCount)
                {
//.........这里部分代码省略.........
开发者ID:borland667,项目名称:pbis,代码行数:101,代码来源:threadpool-kqueue.c


示例3: void

//int ThreadPool::AddWork( ThreadPoolWorker *worker, void *arg )
//int ThreadPool::AddWork( ThreadPoolWorker *worker, Data *data )
int ThreadPool::AddWork( void (*workerThreadFunction)(void *), void *arg )
{
    int             rtn;
    ThreadPoolWork *workp;

    if( (rtn = pthread_mutex_lock(&queueLock)) != 0 )
    {
        fprintf( stderr, "pthread_mutex_lock %s", strerror(rtn) );
        return( 1 );
    }

    // No space and this caller doesn't want to wait.
    if( currentQueueSize == maxQueueSize )
    {
        if( doNotBlockWhenFull )
        {
            if( (rtn = pthread_mutex_unlock(&queueLock)) != 0 )
            {
                fprintf( stderr, "pthread_mutex_unlock %s", strerror(rtn) );
                return( 2 );
            }

            // User did not want to wait to add the work.
            return( -1 );
        }
        else
        {
            while( (currentQueueSize == maxQueueSize) && (! (shutdown || queueClosed)) )
            {
                if( (rtn = pthread_cond_wait(&queueNotFull, &queueLock)) != 0 )
                {
                    fprintf( stderr, "pthread_cond_wait %s", strerror(rtn) );
                    return( 3 );
                }
            }
        }
    }

    // the pool is in the process of being destroyed.
    if( shutdown || queueClosed )
    {
        if( (rtn = pthread_mutex_unlock(&queueLock)) != 0 )
        {
            fprintf( stderr, "pthread_mutex_unlock %s", strerror(rtn) );
            return( 4 );
        }

        return( -1 );
    }

    // Allocate work structure.
    if( (workp = (ThreadPoolWork *)malloc(sizeof(ThreadPoolWork))) == NULL )
    {
        fprintf( stderr, "ThreadPoolWork Malloc failed" );
        return( 5 );
    }
    workp->routine = workerThreadFunction;
    workp->arg     = arg;
    workp->next    = NULL;

    if( currentQueueSize == 0 )
    {
        queueTail = queueHead = workp;
    }
    else
    {
        queueTail->next = workp;
        queueTail       = workp;
    }

    currentQueueSize++;

    if( (rtn = pthread_cond_broadcast(&queueNotEmpty)) != 0 )
    {
        fprintf( stderr, "pthread_cond_signal %s", strerror(rtn) );
        return( 6 );
    }
    if( (rtn = pthread_mutex_unlock(&queueLock)) != 0 )
    {
        fprintf( stderr, "pthread_mutex_unlock %s", strerror(rtn) );
        return( 7 );
    }

    return( 0 );
}
开发者ID:EricAlex,项目名称:ThirdParty-dev,代码行数:87,代码来源:ThreadPool.C


示例4: startServer

void startServer(char * addr, recordid hash) {
  struct addrinfo hints;
  struct addrinfo *result, *rp;
  int sfd, s;
  struct sockaddr peer_addr;
  socklen_t peer_addr_len;
  //  ssize_t nread;
  //char buf[BUF_SIZE];

  memset(&hints, 0, sizeof(struct addrinfo));
  hints.ai_family = AF_INET; //AF_UNSPEC;    /* Allow IPv4 or IPv6 */
  hints.ai_socktype = SOCK_STREAM; /* Datagram socket */
  hints.ai_flags = AI_PASSIVE;    /* For wildcard IP address */
  hints.ai_protocol = 0;          /* Any protocol */

    printf("Listening on socket\n");

  s = getaddrinfo("127.0.0.1", addr, &hints, &result);
  if (s != 0) {
    fprintf(stderr, "getaddrinfo: %s\n", gai_strerror(s));
    exit(EXIT_FAILURE);
  }

  /* getaddrinfo() returns a list of address structures.
     Try each address until we successfully bind().
     If socket(2) (or bind(2)) fails, we (close the socket
     and) try the next address. */

  for (rp = result; rp != NULL; rp = rp->ai_next) {
    sfd = socket(rp->ai_family, rp->ai_socktype,
		 rp->ai_protocol);
    if (sfd == -1)
      continue;

    if (bind(sfd, rp->ai_addr, rp->ai_addrlen) == 0)
      break;                  /* Success */

    close(sfd);
  }

  if (rp == NULL) {               /* No address succeeded */
    perror("Could not bind\n");
    exit(EXIT_FAILURE);
  }

  freeaddrinfo(result);           /* No longer needed */

  int err = listen(sfd, MAX_CONN_QUEUE);
  if(err == -1) {
    perror("Couldn't listen()");
    return;
  }

  printf("Spawning servers.\n"); fflush(NULL);

  for(int i = 0; i < THREAD_POOL; i++) {
    thread_arg * arg = malloc(sizeof(thread_arg));
    arg->id = i;
    arg->hash = hash;
    interpreterStates[i] = INTERPRETER_IDLE;
    interpreterConnections[i] = 0;
    pthread_create(&interpreters[i], 0, interpreterThread, arg);
  }

  printf("Ready for connections.\n"); fflush(NULL);

  /* Read datagrams and echo them back to sender */

  for (;;) {
    int fd = accept(sfd, &peer_addr, &peer_addr_len);
    if(fd == -1) {
      perror("Error accepting connection");
    } else {
      FILE * sock = fdopen(fd, "w+");

      pthread_mutex_lock(&interpreter_mut);

      //      int ret = openInterpreter(sock, sock, hash);
      //      fclose(sock);
      //      if(ret) {
      while(nextSocket) {
	pthread_cond_wait(&nextSocket_cond, &interpreter_mut);
      }

      nextSocket = sock;
      pthread_cond_signal(&interpreter_cond);

      pthread_mutex_unlock(&interpreter_mut);
      if(shuttingdown) {
	break;
      }

    }
  }

  pthread_cond_broadcast(&interpreter_cond);
  for(int i = 0; i < THREAD_POOL; i++) {
    pthread_join(interpreters[i],0);
  }
  close(sfd);
//.........这里部分代码省略.........
开发者ID:Zhoutall,项目名称:stasis,代码行数:101,代码来源:toplevel.c


示例5: MU_TEST

MU_TEST(stress, parallel)
{
    Data data;
    pthread_t threads[NUM_THREADS];
    int i;
    LWMsgContext* context = NULL;
    LWMsgProtocol* protocol = NULL;
    LWMsgPeer* client = NULL;
    LWMsgPeer* server = NULL;
    CounterRequest request;
    CounterReply* reply;
    LWMsgCall* call;
    LWMsgParams in = LWMSG_PARAMS_INITIALIZER;
    LWMsgParams out = LWMSG_PARAMS_INITIALIZER;
    LWMsgTime timeout = {1, 0};

    MU_TRY(lwmsg_context_new(NULL, &context));
    lwmsg_context_set_log_function(context, lwmsg_test_log_function, NULL);

    MU_TRY(lwmsg_protocol_new(context, &protocol));
    MU_TRY(lwmsg_protocol_add_protocol_spec(protocol, counterprotocol_spec));

    MU_TRY(lwmsg_peer_new(context, protocol, &server));
    MU_TRY(lwmsg_peer_add_dispatch_spec(server, counter_dispatch));
    MU_TRY(lwmsg_peer_add_listen_endpoint(server, LWMSG_CONNECTION_MODE_LOCAL, TEST_ENDPOINT, 0600));
    MU_TRY(lwmsg_peer_set_max_listen_clients(server, MAX_CLIENTS));
    MU_TRY(lwmsg_peer_set_timeout(server, LWMSG_TIMEOUT_IDLE, &timeout));
    MU_TRY(lwmsg_peer_start_listen(server));

    MU_TRY(lwmsg_peer_new(context, protocol, &client));
    MU_TRY(lwmsg_peer_add_connect_endpoint(client, LWMSG_CONNECTION_MODE_LOCAL, TEST_ENDPOINT));

    request.counter = 0;

    MU_TRY(lwmsg_peer_acquire_call(client, &call));
    in.tag = COUNTER_OPEN;
    in.data = &request;

    MU_TRY(lwmsg_call_dispatch(call, &in, &out, NULL, NULL));
    
    MU_ASSERT_EQUAL(MU_TYPE_INTEGER, out.tag, COUNTER_OPEN_SUCCESS);
    lwmsg_call_release(call);

    data.client = client;
    data.handle = out.data;
    data.iters = NUM_ITERS;
    data.go = 0;
    
    pthread_mutex_init(&data.lock, NULL);
    pthread_cond_init(&data.event, NULL);

    pthread_mutex_lock(&data.lock);
    for (i = 0; i < NUM_THREADS; i++)
    {
        pthread_create(&threads[i], NULL, add_thread, &data);
    }
    data.go = 1;
    pthread_cond_broadcast(&data.event);
    pthread_mutex_unlock(&data.lock);

    for (i = 0; i < NUM_THREADS; i++)
    {
        pthread_join(threads[i], NULL);
    }

    MU_TRY(lwmsg_peer_acquire_call(client, &call));
    in.tag = COUNTER_READ;
    in.data = data.handle;

    MU_TRY(lwmsg_call_dispatch(call, &in, &out, NULL, NULL));
    
    MU_ASSERT_EQUAL(MU_TYPE_INTEGER, out.tag, COUNTER_READ_SUCCESS);
    reply = out.data;

    MU_ASSERT_EQUAL(MU_TYPE_INTEGER, reply->counter, NUM_THREADS * NUM_ITERS);

    lwmsg_call_destroy_params(call, &out);
    lwmsg_call_release(call);
    
    MU_TRY(lwmsg_peer_acquire_call(client, &call));
    in.tag = COUNTER_CLOSE;
    in.data = data.handle;

    MU_TRY(lwmsg_call_dispatch(call, &in, &out, NULL, NULL));
    
    MU_ASSERT_EQUAL(MU_TYPE_INTEGER, out.tag, COUNTER_CLOSE_SUCCESS);

    lwmsg_call_destroy_params(call, &out);
    lwmsg_call_release(call);

    MU_TRY(lwmsg_peer_disconnect(client));
    lwmsg_peer_delete(client);

    MU_TRY(lwmsg_peer_stop_listen(server));
    lwmsg_peer_delete(server);

    pthread_mutex_destroy(&data.lock);
    pthread_cond_destroy(&data.event);
}
开发者ID:FarazShaikh,项目名称:LikewiseSMB2,代码行数:99,代码来源:test-client-server.c


示例6: get_keys


//.........这里部分代码省略.........
      display_content (main_win, logger, dash, &scrolling);
      break;
    case 107:  /* k - UP expanded module */
      scroll_ptr = &scrolling.module[scrolling.current].scroll;
      offset_ptr = &scrolling.module[scrolling.current].offset;

      if (!scrolling.expanded)
        break;
      if (*scroll_ptr <= 0)
        break;
      --(*scroll_ptr);
      if (*scroll_ptr < *offset_ptr)
        --(*offset_ptr);
      display_content (main_win, logger, dash, &scrolling);
      break;
    case 'n':
      pthread_mutex_lock (&gdns_thread.mutex);
      search = perform_next_find (holder, &scrolling);
      pthread_mutex_unlock (&gdns_thread.mutex);
      if (search == 0) {
        free_dashboard (dash);
        allocate_data ();
        render_screens ();
      }
      break;
    case '/':
      if (render_find_dialog (main_win, &scrolling))
        break;
      pthread_mutex_lock (&gdns_thread.mutex);
      search = perform_next_find (holder, &scrolling);
      pthread_mutex_unlock (&gdns_thread.mutex);
      if (search == 0) {
        free_dashboard (dash);
        allocate_data ();
        render_screens ();
      }
      break;
    case 99:   /* c */
      if (conf.no_color)
        break;
      load_schemes_win (main_win);
      free_dashboard (dash);
      allocate_data ();
      render_screens ();
      break;
    case 115:  /* s */
      load_sort_win (main_win, scrolling.current,
                     &module_sort[scrolling.current]);
      pthread_mutex_lock (&gdns_thread.mutex);
      free_holder (&holder);
      pthread_cond_broadcast (&gdns_thread.not_empty);
      pthread_mutex_unlock (&gdns_thread.mutex);
      free_dashboard (dash);
      allocate_holder ();
      allocate_data ();
      render_screens ();
      break;
    case 269:
    case KEY_RESIZE:
      endwin ();
      refresh ();
      werase (header_win);
      werase (main_win);
      werase (stdscr);
      term_size (main_win);
      refresh ();
      render_screens ();
      break;
    default:
      if (logger->piping)
        break;
      size2 = file_size (conf.ifile);

      /* file has changed */
      if (size2 != size1) {
        if (!(fp = fopen (conf.ifile, "r")))
          FATAL ("Unable to read log file %s.", strerror (errno));
        if (!fseeko (fp, size1, SEEK_SET))
          while (fgets (buf, LINE_BUFFER, fp) != NULL)
            parse_log (&logger, buf, -1);
        fclose (fp);

        size1 = size2;
        pthread_mutex_lock (&gdns_thread.mutex);
        free_holder (&holder);
        pthread_cond_broadcast (&gdns_thread.not_empty);
        pthread_mutex_unlock (&gdns_thread.mutex);

        free_dashboard (dash);
        allocate_holder ();
        allocate_data ();

        term_size (main_win);
        render_screens ();
        usleep (200000);        /* 0.2 seconds */
      }
      break;
    }
  }
}
开发者ID:liuzhengyi,项目名称:goaccess,代码行数:101,代码来源:goaccess.c


示例7: jpeg_highlighter_algorithm


//.........这里部分代码省略.........

		gimp_run_procedure("file-jpeg-save",&num_return_vals, GIMP_PDB_INT32, mode, GIMP_PDB_IMAGE, job->image_id , GIMP_PDB_DRAWABLE, job->drawable->drawable_id, GIMP_PDB_STRING, temp_file_name, GIMP_PDB_STRING, "temp", GIMP_PDB_FLOAT, jpeg_compress, GIMP_PDB_FLOAT, 0.0, GIMP_PDB_INT32, 0, GIMP_PDB_INT32, 0, GIMP_PDB_STRING,"created with Koi", GIMP_PDB_INT32, 0, GIMP_PDB_INT32, 1, GIMP_PDB_INT32, 0, GIMP_PDB_INT32, 1, GIMP_PDB_END);
//		for(ii = 0; ii < SLEEP_TIME; ii++)
//		{
//			job->progress = ((float)ii/SLEEP_TIME) * 4;
			sleep(1);
//		}



		printf("saved jpeg\n");
//		sleep(1);
		// reload our saved image and suck a layer off of it to subtract against or original image
		temp_layer = gimp_file_load_layer(mode, job->image_id, temp_file_name);

		printf("loaded new layer %d in image %d\n", temp_layer, job->image_id);

		//gimp_layer_add_alpha(temp_layer);

		gimp_layer_set_mode(temp_layer, 8);

		printf("set layer mode %d\n", temp_layer);

		/* Add the new layer to this image as the top layer */
		if (gimp_image_add_layer(job->image_id, temp_layer, -1) != TRUE)
		{
			printf("failed to create layer\n");
			return;
		}


		printf("set layer as top\n");

		layer = gimp_image_get_active_layer(job->image_id);
		if (layer == -1)
		{
			printf("failed to get active layer\n");
			return;
		}

		gimp_image_merge_down(job->image_id, layer, 2);

		printf("merged layers\n");
		job->drawable->drawable_id = gimp_image_get_active_drawable(job->image_id);
//		printf("get active drawable\n");
//		gimp_brightness_contrast(job->drawable->drawable_id, 126, 125);
//		printf("adjust contrast\n");
//
//		printf("Jpeg threshold: %d\n",jpeg_threshold);
//
//		//I should have this subtract against an edge detection layer and then threshold it
//
//		gimp_threshold(job->drawable->drawable_id, jpeg_threshold,255 );
//		printf("threshold\n");

//			if(! gimp_drawable_has_alpha (job->drawable->drawable_id))
//			{
//				 /* some filtermacros do not work with layer that do not have an alpha channel
//				 * and cause gimp to fail on attempt to call gimp_pixel_rgn_init
//				  * with both dirty and shadow flag set to TRUE
//				  * in this situation GIMP displays the error message
//				  *    "expected tile ack and received: 5"
//				  *    and causes the called plug-in to exit immediate without success
//				  * Therfore always add an alpha channel before calling a filtermacro.
//				  */
//				  gimp_layer_add_alpha(layer);
//				  printf("adding alpha channel\n");
//		   }

		remove(temp_file_name);

		sleep(1);

//		job->progress = 4;

		pthread_cond_broadcast(&jpeg_cond);
		pthread_mutex_lock(&jpeg_mutex);
		printf("got lock\n");
		jpeg_wait = 0;
		pthread_mutex_unlock(&jpeg_mutex);

		printf("drawable ID after jpeg %d\n",gimp_image_get_active_drawable(job->image_id));

	}
	else
	{
		printf("thread %d waiting\n", job->thread);
		pthread_mutex_lock(&jpeg_mutex);
		while (jpeg_wait)
		{
			pthread_cond_wait(&jpeg_cond, &jpeg_mutex);
		}
		pthread_mutex_unlock(&jpeg_mutex);
	}


	job->progress = 1;

	return NULL;
}
开发者ID:possiblyphilip,项目名称:Koi,代码行数:101,代码来源:jpeg_compress.c


示例8: pthread_cond_broadcast

 void ConditionSys::broadcast() { pthread_cond_broadcast((pthread_cond_t*)cond); }
开发者ID:afabri,项目名称:embree,代码行数:1,代码来源:condition.cpp


示例9: event_dispatch_epoll_worker

static void *
event_dispatch_epoll_worker (void *data)
{
        struct epoll_event  event;
        int                 ret = -1;
        struct event_thread_data *ev_data = data;
	struct event_pool  *event_pool;
        int                 myindex = -1;
        int                 timetodie = 0;

        GF_VALIDATE_OR_GOTO ("event", ev_data, out);

        event_pool = ev_data->event_pool;
        myindex = ev_data->event_index;

        GF_VALIDATE_OR_GOTO ("event", event_pool, out);

        gf_msg ("epoll", GF_LOG_INFO, 0, LG_MSG_STARTED_EPOLL_THREAD, "Started"
                " thread with index %d", myindex);

        pthread_mutex_lock (&event_pool->mutex);
        {
                event_pool->activethreadcount++;
        }
        pthread_mutex_unlock (&event_pool->mutex);

	for (;;) {
                if (event_pool->eventthreadcount < myindex) {
                        /* ...time to die, thread count was decreased below
                         * this threads index */
                        /* Start with extra safety at this point, reducing
                         * lock conention in normal case when threads are not
                         * reconfigured always */
                        pthread_mutex_lock (&event_pool->mutex);
                        {
                                if (event_pool->eventthreadcount <
                                    myindex) {
                                        /* if found true in critical section,
                                         * die */
                                        event_pool->pollers[myindex - 1] = 0;
                                        event_pool->activethreadcount--;
                                        timetodie = 1;
                                        pthread_cond_broadcast (&event_pool->cond);
                                }
                        }
                        pthread_mutex_unlock (&event_pool->mutex);
                        if (timetodie) {
                                gf_msg ("epoll", GF_LOG_INFO, 0,
                                        LG_MSG_EXITED_EPOLL_THREAD, "Exited "
                                        "thread with index %d", myindex);
                                goto out;
                        }
                }

                ret = epoll_wait (event_pool->fd, &event, 1, -1);

                if (ret == 0)
                        /* timeout */
                        continue;

                if (ret == -1 && errno == EINTR)
                        /* sys call */
                        continue;

		ret = event_dispatch_epoll_handler (event_pool, &event);
        }
out:
        if (ev_data)
                GF_FREE (ev_data);
        return NULL;
}
开发者ID:lkzhd,项目名称:glusterfs-annotation,代码行数:71,代码来源:event-epoll.c


示例10: broadcast

 void broadcast() {
   assert(pthread_cond_broadcast(&native_) == 0);
 }
开发者ID:vasco,项目名称:rubinius,代码行数:3,代码来源:thread.hpp


示例11: bsem_post_all

/* Post to all threads */
static void bsem_post_all(bsem *bsem_p) {
	pthread_mutex_lock(&bsem_p->mutex);
	bsem_p->v = 1;
	pthread_cond_broadcast(&bsem_p->cond);
	pthread_mutex_unlock(&bsem_p->mutex);
}
开发者ID:dweymouth,项目名称:C-Thread-Pool,代码行数:7,代码来源:thpool.c


示例12: _delete

      /* Recursive helper function.
      * Returns a pointer to the node if found.
      * Stores an optional pointer to the
      * parent, or what should be the parent if not found.
      *
      * If it returns a node, there will be a lock on that node
      */
      struct trie_node *
      _delete (struct trie_node *node, const char *string,
        size_t strlen) {
          int keylen, cmp;
          int rc;
          // First things first, check if we are NULL

          if (node == NULL) return NULL;


          // lock the node here,
          rc = pthread_mutex_lock(&(node->lock));
          assert(rc == 0);


          assert(node->strlen < 64);

          // See if this key is a substring of the string passed in
          cmp = compare_keys (node->key, node->strlen, string, strlen, &keylen);
          if (cmp == 0) {
            // Yes, either quit, or recur on the children

            // If this key is longer than our search string, the key isn't here
            if (node->strlen > keylen)
            {
              // unlock node
              rc = pthread_mutex_unlock(&(node->lock));
              assert(rc == 0);

              return NULL;
            }
            else if (strlen > keylen)
            {
              // found wont be unlocked by this method
              struct trie_node *found =  _delete(node->children, string, strlen - keylen);

              if (found)
              {
                if(found->waiting > 0 && allow_squatting)
                {
                  pthread_cond_broadcast(&(found->c));
                  rc = pthread_mutex_unlock(&(found->lock));
                  assert(rc == 0);

                }
                else
                {
                  /* If the node doesn't have children, delete it.
                  * Otherwise, keep it around to find the kids */
                  if (found->children == NULL && found->ip4_address == 0)
                  {
                    // shouldnt need to lock found->next because as long as parent is locked,
                    // nothing bad should be able to happen to it (check what insert does)
                    assert(node->children == found);
                    node->children = found->next;
                    // unlock found
                    rc = pthread_mutex_unlock(&(found->lock));
                    assert(rc == 0);


                    free(found);

                  }
                  else
                  {
                    rc = pthread_mutex_unlock(&(found->lock));
                    assert(rc == 0);

                  }
                }

                // unlock found

                // TODO get root lock
                /* Delete the root node if we empty the tree */
                if (node == root && node->children == NULL && node->ip4_address == 0)
                {
                  if(node->waiting > 0 && allow_squatting)
                  {
                    pthread_cond_broadcast(&(node->c));
                    rc = pthread_mutex_unlock(&(node->lock));
                    assert(rc == 0);

                  }
                  else
                  {
                    root = node->next;

                    rc = pthread_mutex_unlock(&(node->lock));
                    assert(rc == 0);


                    free(node);
//.........这里部分代码省略.........
开发者ID:stevenmherring,项目名称:DNS,代码行数:101,代码来源:fine-trie.c


示例13: silc_cond_broadcast

void silc_cond_broadcast(SilcCond cond)
{
#ifdef SILC_THREADS
  pthread_cond_broadcast(&cond->cond);
#endif /* SILC_THREADS*/
}
开发者ID:TabTwo,项目名称:silc-toolkit,代码行数:6,代码来源:silcunixthread.c


示例14: main


//.........这里部分代码省略.........
        for (i = 0; i < num_clients; i++) {
            clients[i]->jpeg_ready = 0;
        }

        if (rem_len) {
            /* copy the remainder of data from the previous iteration back to the jpeg buffer */
            memmove(jpeg_buf, sep + (auto_separator ? 2 /* strlen(JPEG_END) */ : input_separator_len), rem_len);
            jpeg_size = rem_len;
        }

        memcpy(jpeg_buf + jpeg_size, input_buf, size);
        jpeg_size += size;

        /* look behind at most 2 * INPUT_BUF_LEN for a separator */
        sep = (char *) memmem(jpeg_buf + jpeg_size - MIN(2 * INPUT_BUF_LEN, jpeg_size), MIN(2 * INPUT_BUF_LEN, jpeg_size),
                input_separator, input_separator_len);

        if (sep) { /* found a separator, jpeg frame is ready */
            if (auto_separator) {
                rem_len = jpeg_size - (sep - jpeg_buf) - 2 /* strlen(JPEG_START) */;
                jpeg_size = sep - jpeg_buf + 2 /* strlen(JPEG_END) */;
            }
            else {
                rem_len = jpeg_size - (sep - jpeg_buf) - input_separator_len;
                jpeg_size = sep - jpeg_buf;
            }

            DEBUG("input: jpeg buffer ready with %d bytes", jpeg_size);

            /* set the ready flag and notify all client threads about it */
            for (i = 0; i < num_clients; i++) {
                clients[i]->jpeg_ready = 1;
            }
            if (pthread_cond_broadcast(&jpeg_cond)) {
                ERROR("pthread_cond_broadcast() failed");
                return -1;
            }

            now = get_now();
            frame_int = frame_int * 0.7 + (now - last_frame_time) * 0.3;
            last_frame_time = now;
        }
        else {
            rem_len = 0;
        }

        if (pthread_mutex_unlock(&jpeg_mutex)) {
            ERROR("pthread_mutex_unlock() failed");
            return -1;
        }

        if (sep) {
            DEBUG("current fps: %.01lf", 1 / frame_int);

            if (num_clients) {
                min_client_frame_int = clients[0]->frame_int;
                for (i = 0; i < num_clients; i++) {
                    if (clients[i]->frame_int < min_client_frame_int) {
                        min_client_frame_int = clients[i]->frame_int;
                    }
                }

                frame_int_adj = (min_client_frame_int - frame_int) * 1000000;
                if (frame_int_adj > 0) {
                    DEBUG("input frame int.: %.0lf us, client frame int.: %.0lf us, frame int. adjustment: %.0lf us",
                            frame_int * 1000000, min_client_frame_int * 1000000, frame_int_adj);
开发者ID:ccrisan,项目名称:streameye,代码行数:67,代码来源:streameye.c


示例15: printData

void printData(void *args)
{
	printThreadArgs *threadArgs = (printThreadArgs *)args;
	
	pthread_mutex_t *mtx;
	pthread_cond_t *cnd;

	//pthread_cond_t cnd;
	//pthread_mutex_t mtx;

        //memcpy(&mtx,&threadArgs->mutex,sizeof(pthread_mutex_t));
        //memcpy(&cnd,&threadArgs->cond,sizeof(pthread_cond_t));

	mtx = &threadArgs->mutex;
	cnd = &threadArgs->cond;
	
	int empty,eof=0;

	do
	{
		printf("R_PRINT : WAITING FOR THE LOCK\n");
       		pthread_mutex_lock(mtx);
		printf("R_PRINT : WAITING FOR THE COND\n");
		while(client_wndw.CWP<client_wndw.wndw_size)
		pthread_cond_wait(cnd,mtx);


		if(client_wndw.CRP == -1 && client_wndw.CWP>0)
		{
			printf("R_PRINT : GOT THE LOCK\n");
			printf("client_wndw.CRP : %d\n",client_wndw.CRP);
			printf("client_wndw.CWP : %d\n",client_wndw.CWP);

			while(client_wndw.CRP != client_wndw.CWP-1)
			{
				client_wndw.CRP = (client_wndw.CRP + 1);
				fprintf(file,"%s",client_wndw.windElems[client_wndw.CRP].data);
				fputs(client_wndw.windElems[client_wndw.CRP].data,stdout);
				if(client_wndw.windElems[client_wndw.CRP].header.type == 104)
				{
					eof = 1;
					fclose(file);
					break;
	
				}
	
			}	
		}

		if( (client_wndw.CRP == client_wndw.CWP-1) || (eof==1))
		{
			client_wndw.CRP=client_wndw.CWP-1;
			printf("SIGNALLED BACK TO R_READ\n");
			pthread_cond_broadcast(cnd);
		}

       		pthread_mutex_unlock(mtx);

		sleep(1);

	}while(eof==0);
       	//pthread_mutex_unlock(mtx);
	
}
开发者ID:sigsegved,项目名称:Network-Programming,代码行数:64,代码来源:recv.c


示例16: pthread_setcancelstate

void SipperProxyQueue::stopQueue(void)
{
   int oldstate;
   pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &oldstate);
   pthread_mutex_lock(&tEventQueueMutex);

   if(bQueueStopped)
   {
      pthread_mutex_unlock(&tEventQueueMutex);
      pthread_setcancelstate(oldstate, NULL);
      return;
   }

   bQueueStopped = true;

   pthread_cond_broadcast(&waitingFeederCond);
   pthread_cond_broadcast(&waitingConsumerCond);

   for(unsigned int idx = 0; idx < MAX_QUEUE_THR; idx++)
   {
      if(feederData[idx].count)
      {
         pthread_cond_signal(&feederData[idx].condition);
      }

      if(consumerData[idx].count)
      {
         pthread_cond_signal(&consumerData[idx].condition);
      }
   }

   while(ptHeadPtr != NULL)
   {
      t_EventQueueNodePtr currNode = ptHeadPtr;
      ptHeadPtr  = ptHeadPtr->ptNextNode;
      iQueueCount--;

      if(_cleanupFunc != NULL)
      {
         _cleanupFunc(currNode->_queueData);
      }

      if(iFreeNodes < highWaterMark)
      {
         currNode->ptNextNode = ptFreeList;
         ptFreeList = currNode;

         iFreeNodes++;
      }
      else
      {
         delete currNode;
      }

      if(ptHeadPtr == NULL)
      {
         ptTailPtr = NULL;
      }
   }

   pthread_mutex_unlock(&tEventQueueMutex);
   pthread_setcancelstate(oldstate, NULL);
}
开发者ID:bklang,项目名称:sipper,代码行数:63,代码来源:SipperProxyQueue.cpp


示例17: main

int
main()
{
  int failed = 0;
  int i;
  pthread_t t[NUMTHREADS + 1];

  struct _timeb currSysTime;
  const DWORD NANOSEC_PER_MILLISEC = 1000000;

  cvthing.shared = 0;

  assert((t[0] = pthread_self()).p != NULL);

  assert(cvthing.notbusy == PTHREAD_COND_INITIALIZER);

  assert(cvthing.lock == PTHREAD_MUTEX_INITIALIZER);

  assert(pthread_mutex_lock(&start_flag) == 0);

  _ftime(&currSysTime);

  abstime.tv_sec = currSysTime.time;
  abstime.tv_nsec = NANOSEC_PER_MILLISEC * currSysTime.millitm;

  abstime.tv_sec += 10;

  assert((t[0] = pthread_self()).p != NULL);

  awoken = 0;

  for (i = 1; i <= NUMTHREADS; i++)
    {
      threadbag[i].started = 0;
      threadbag[i].threadnum = i;
      assert(pthread_create(&t[i], NULL, mythread, (void *) &threadbag[i]) == 0);
    }

  /*
   * Code to control or munipulate child threads should probably go here.
   */

  assert(pthread_mutex_unlock(&start_flag) == 0);

  /*
   * Give threads time to start.
   */
  Sleep(1000);

  /*
   * Cancel one of the threads.
   */
  assert(pthread_cancel(t[1]) == 0);
  assert(pthread_join(t[1], NULL) == 0);

  assert(pthread_mutex_lock(&cvthing.lock) == 0);

  cvthing.shared++;

  /*
   * Signal all remaining waiting threads.
   */
  assert(pthread_cond_broadcast(&cvthing.notbusy) == 0);

  assert(pthread_mutex_unlock(&cvthing.lock) == 0);

  /*
   * Wait for all threads to complete.
   */
  for (i = 2; i <= NUMTHREADS; i++)
    assert(pthread_join(t[i], NULL) == 0);

  /* 
   * Cleanup the CV.
   */
  
  assert(pthread_mutex_destroy(&cvthing.lock) == 0);

  assert(cvthing.lock == NULL);

  assert(pthread_cond_destroy(&cvthing.notbusy) == 0);

  assert(cvthing.notbusy == NULL);

  /*
   * Standard check that all threads started.
   */
  for (i = 1; i <= NUMTHREADS; i++)
    { 
      failed = !threadbag[i].started;

      if (failed)
	{
	  fprintf(stderr, "Thread %d: started %d\n", i, threadbag[i].started);
	}
    }

  assert(!failed);

  /*
//.........这里部分代码省略.........
开发者ID:kasravi,项目名称:MIDIUIUgenVS,代码行数:101,代码来源:condvar7.c


示例18: cond_lock_broadcast

static int cond_lock_broadcast(cond_lock_t *cond)
{
    return pthread_cond_broadcast(&cond->cond);
}
开发者ID:github188,项目名称:netlib,代码行数:4,代码来源:lock.c


示例19: vcpu_set_state_locked

static int
vcpu_set_state_locked(struct vcpu *vcpu, enum vcpu_state newstate,
    bool from_idle)
{
	int error;
	const struct timespec ts = {.tv_sec = 1, .tv_nsec = 0}; /* 1 second */

	/*
	 * State transitions from the vmmdev_ioctl() must always begin from
	 * the VCPU_IDLE state. This guarantees that there is only a single
	 * ioctl() operating on a vcpu at any point.
	 */
	if (from_idle) {
		while (vcpu->state != VCPU_IDLE) {
			pthread_mutex_lock(&vcpu->state_sleep_mtx);
			vcpu_unlock(vcpu);
			pthread_cond_timedwait_relative_np(&vcpu->state_sleep_cnd,
				&vcpu->state_sleep_mtx, &ts);
			vcpu_lock(vcpu);
			pthread_mutex_unlock(&vcpu->state_sleep_mtx);
			//msleep_spin(&vcpu->state, &vcpu->mtx, "vmstat", hz);
		}
	} else {
		KASSERT(vcpu->state != VCPU_IDLE, ("invalid transition from "
		    "vcpu idle state"));
	}

	/*
	 * The following state transitions are allowed:
	 * IDLE -> FROZEN -> IDLE
	 * FROZEN -> RUNNING -> FROZEN
	 * FROZEN -> SLEEPING -> FROZEN
	 */
	switch (vcpu->state) {
	case VCPU_IDLE:
	case VCPU_RUNNING:
	case VCPU_SLEEPING:
		error = (newstate != VCPU_FROZEN);
		break;
	case VCPU_FROZEN:
		error = (newstate == VCPU_FROZEN);
		break;
	}

	if (error)
		return (EBUSY);

	vcpu->state = newstate;

	if (newstate == VCPU_IDLE)
		pthread_cond_broadcast(&vcpu->state_sleep_cnd);
		//wakeup(&vcpu->state);

	return (0);
}

static void
vcpu_require_state(struct vm *vm, int vcpuid, enum vcpu_state newstate)
{
	int error;

	if ((error = vcpu_set_state(vm, vcpuid, newstate, false)) != 0)
		xhyve_abort("Error %d setting state to %d\n", error, newstate);
}
开发者ID:classic2u,项目名称:hyperkit,代码行数:64,代码来源:vmm.c


示例20: main


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
C++ pthread_cond_init函数代码示例发布时间:2022-05-30
下一篇:
C++ pthread_cleanup_push函数代码示例发布时间:2022-05-30
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap