本文整理汇总了C++中pthread_mutex_unlock函数的典型用法代码示例。如果您正苦于以下问题:C++ pthread_mutex_unlock函数的具体用法?C++ pthread_mutex_unlock怎么用?C++ pthread_mutex_unlock使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了pthread_mutex_unlock函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。
示例1: pthread_mutex_lock
const struct sensors_input_cache_entry_t *sensors_input_cache_get(
const char *name)
{
int rc;
int fd;
DIR *dir;
struct dirent * item;
struct list_node *member;
struct input_dev_list *temp;
pthread_t id[MAX_EVENT_DRIVERS];
unsigned int i = 0;
unsigned int threads = 0;
const struct sensors_input_cache_entry_t *found = NULL;
pthread_mutex_lock(&util_mutex);
if (!list_initialized) {
node_init(&head);
list_initialized = 1;
}
temp = lookup(name, NULL);
if (temp) {
found = &temp->entry;
goto exit;
}
dir = opendir(INPUT_EVENT_DIR);
if (!dir) {
ALOGE("%s: error opening '%s'\n", __func__,
INPUT_EVENT_DIR);
goto exit;
}
while ((item = readdir(dir)) != NULL) {
if (strncmp(item->d_name, INPUT_EVENT_BASENAME,
sizeof(INPUT_EVENT_BASENAME) - 1) != 0) {
continue;
}
temp = (temp ? temp : malloc(sizeof(*temp)));
if (temp == NULL) {
ALOGE("%s: malloc error!\n", __func__);
break;
}
/* skip already cached entries */
snprintf(temp->entry.event_path, sizeof(temp->entry.event_path),
"%s%s", INPUT_EVENT_DIR, item->d_name);
if (lookup(NULL, temp->entry.event_path))
continue;
/* make sure we have access */
fd = open(temp->entry.event_path, O_RDONLY);
if (fd < 0) {
ALOGE("%s: cant open %s", __func__,
item->d_name);
continue;
}
rc = ioctl(fd, EVIOCGNAME(sizeof(temp->entry.dev_name)),
temp->entry.dev_name);
/* close in parallell to optimize boot time */
pthread_create(&id[threads++], NULL,
close_input_dev_fd, (void*) fd);
if (rc < 0) {
ALOGE("%s: cant get name from %s", __func__,
item->d_name);
continue;
}
temp->entry.nr = atoi(item->d_name +
sizeof(INPUT_EVENT_BASENAME) - 1);
node_add(&head, &temp->node);
if (!found && !strncmp(temp->entry.dev_name, name,
sizeof(temp->entry.dev_name) - 1))
found = &temp->entry;
temp = NULL;
}
closedir(dir);
for(i = 0; i < threads; ++i)
pthread_join(id[i], NULL);
exit:
pthread_mutex_unlock(&util_mutex);
return found;
}
开发者ID:Barmaleyushka,项目名称:DASH,代码行数:91,代码来源:sensors_input_cache.c
示例2: unlock
static void unlock()
{
pthread_mutex_unlock(&lock_loggable);
}
开发者ID:Hazy-legacy-zf2,项目名称:platform_system_core,代码行数:4,代码来源:log_is_loggable.c
示例3: pthread_mutex_unlock
inline void interprocess_recursive_mutex::unlock()
{
int res = 0;
res = pthread_mutex_unlock(&m_mut);
assert(res == 0);
}
开发者ID:Kaoschuks,项目名称:cppcms,代码行数:6,代码来源:interprocess_recursive_mutex.hpp
示例4: slabs_rebalancer_resume
void slabs_rebalancer_resume(void) {
pthread_mutex_unlock(&slabs_rebalance_lock);
}
开发者ID:ijibu,项目名称:memcached,代码行数:3,代码来源:slabs.c
示例5: listen_loop
/* The main listening loop for the object. */
static alib_error listen_loop(ClientListener* listener)
{
if(!listener)return(ALIB_BAD_ARG);
int rval;
int event_count;
struct epoll_event* event_it;
long data_in_count;
void* data_in_buff = malloc(DEFAULT_INPUT_BUFF_SIZE);
/* Ensure we were able to allocate the data in buffer. */
if(!data_in_buff)
{
rval = ALIB_MEM_ERR;
goto f_return;
}
/* While our socket is open, then we will keep running. */
while(!(listener->flag_pole & THREAD_STOP))
{
/* If the array list is empty, then we simply wait until something is added
* to it or our thread is called to stop. */
if(!ArrayList_get_count(listener->client_list))
{
/* Call the empty list callback. */
if(listener->client_list_empty)
{
int rval = listener->client_list_empty(listener);
if(rval & SCB_RVAL_STOP_SERVER)
break;
}
pthread_mutex_lock(&listener->mutex);
while(!ArrayList_get_count(listener->client_list) && !(listener->flag_pole & THREAD_STOP))
pthread_cond_wait(&listener->t_cond, &listener->mutex);
pthread_mutex_unlock(&listener->mutex);
continue;
}
/* Wait for an event to come. */
event_count = epoll_wait(listener->ep.efd, listener->ep.triggered_events, DEFAULT_BACKLOG_SIZE,
1000);
if(!event_count)continue;
/* The the event_count is less than zero, then an error occurred. */
if(event_count < 0)
{
if(listener->flag_pole & THREAD_STOP)
rval = ALIB_OK;
else
{
if(listener->ep.efd > -1)
continue;
rval = ALIB_CHECK_ERRNO;
}
goto f_return;
}
/* Iterate through the events. */
if(pthread_mutex_lock(&listener->mutex))
{
rval = ALIB_MUTEX_ERR;
goto f_return;
}
for(event_it = listener->ep.triggered_events; event_count > 0; ++event_it, --event_count)
{
/* Use compare_int_ptr as the first member in the socket package
* is an integer. */
socket_package* client = (socket_package*)ArrayList_find_item_by_value_tsafe(
listener->client_list, &event_it->data.fd, compare_int_ptr);
if(!client)
{
close(listener->ep.triggered_events->data.fd);
continue;
}
/* Error occurred on the socket. */
if((event_it->events & (EPOLLERR | EPOLLHUP)) ||
!(event_it->events & EPOLLIN))
{
ArrayList_remove_tsafe(listener->client_list, client);
continue;
}
/* Call the client_data_ready callback. */
if(listener->data_ready)
{
rval = listener->data_ready(listener, client, &data_in_buff,
&data_in_count);
if(rval & SCB_RVAL_CLOSE_CLIENT)
ArrayList_remove_tsafe(listener->client_list, client);
if(rval & SCB_RVAL_STOP_SERVER)
{
rval = ALIB_OK;
flag_raise(&listener->flag_pole, THREAD_STOP);
if(pthread_mutex_unlock(&listener->mutex))
rval = ALIB_MUTEX_ERR;
goto f_return;
//.........这里部分代码省略.........
开发者ID:acs9307,项目名称:alib-c,代码行数:101,代码来源:ClientListener.c
示例6: slabs_free
void slabs_free(void *ptr, size_t size, unsigned int id) {
pthread_mutex_lock(&slabs_lock);
do_slabs_free(ptr, size, id);
pthread_mutex_unlock(&slabs_lock);
}
开发者ID:ijibu,项目名称:memcached,代码行数:5,代码来源:slabs.c
示例7: slab_rebalance_move
/* refcount == 0 is safe since nobody can incr while cache_lock is held.
* refcount != 0 is impossible since flags/etc can be modified in other
* threads. instead, note we found a busy one and bail. logic in do_item_get
* will prevent busy items from continuing to be busy
*/
static int slab_rebalance_move(void) {
slabclass_t *s_cls;
int x;
int was_busy = 0;
int refcount = 0;
enum move_status status = MOVE_PASS;
pthread_mutex_lock(&cache_lock);
pthread_mutex_lock(&slabs_lock);
s_cls = &slabclass[slab_rebal.s_clsid];
for (x = 0; x < slab_bulk_check; x++) {
item *it = slab_rebal.slab_pos;
status = MOVE_PASS;
if (it->slabs_clsid != 255) {
void *hold_lock = NULL;
uint32_t hv = hash(ITEM_key(it), it->nkey, 0);
if ((hold_lock = item_trylock(hv)) == NULL) {
status = MOVE_LOCKED;
} else {
refcount = refcount_incr(&it->refcount);
if (refcount == 1) { /* item is unlinked, unused */
if (it->it_flags & ITEM_SLABBED) {
/* remove from slab freelist */
if (s_cls->slots == it) {
s_cls->slots = it->next;
}
if (it->next) it->next->prev = it->prev;
if (it->prev) it->prev->next = it->next;
s_cls->sl_curr--;
status = MOVE_DONE;
} else {
status = MOVE_BUSY;
}
} else if (refcount == 2) { /* item is linked but not busy */
if ((it->it_flags & ITEM_LINKED) != 0) {
do_item_unlink_nolock(it, hv);
status = MOVE_DONE;
} else {
/* refcount == 1 + !ITEM_LINKED means the item is being
* uploaded to, or was just unlinked but hasn't been freed
* yet. Let it bleed off on its own and try again later */
status = MOVE_BUSY;
}
} else {
if (settings.verbose > 2) {
fprintf(stderr, "Slab reassign hit a busy item: refcount: %d (%d -> %d)\n",
it->refcount, slab_rebal.s_clsid, slab_rebal.d_clsid);
}
status = MOVE_BUSY;
}
item_trylock_unlock(hold_lock);
}
}
switch (status) {
case MOVE_DONE:
it->refcount = 0;
it->it_flags = 0;
it->slabs_clsid = 255;
break;
case MOVE_BUSY:
refcount_decr(&it->refcount);
case MOVE_LOCKED:
slab_rebal.busy_items++;
was_busy++;
break;
case MOVE_PASS:
break;
}
slab_rebal.slab_pos = (char *)slab_rebal.slab_pos + s_cls->size;
if (slab_rebal.slab_pos >= slab_rebal.slab_end)
break;
}
if (slab_rebal.slab_pos >= slab_rebal.slab_end) {
/* Some items were busy, start again from the top */
if (slab_rebal.busy_items) {
slab_rebal.slab_pos = slab_rebal.slab_start;
slab_rebal.busy_items = 0;
} else {
slab_rebal.done++;
}
}
pthread_mutex_unlock(&slabs_lock);
pthread_mutex_unlock(&cache_lock);
return was_busy;
}
开发者ID:ijibu,项目名称:memcached,代码行数:97,代码来源:slabs.c
示例8: mlt_properties_get_int
static void *video_thread( void *arg )
{
// Identify the arg
consumer_sdl self = arg;
// Obtain time of thread start
struct timeval now;
int64_t start = 0;
int64_t elapsed = 0;
struct timespec tm;
mlt_frame next = NULL;
mlt_properties properties = NULL;
double speed = 0;
// Get real time flag
int real_time = mlt_properties_get_int( self->properties, "real_time" );
// Get the current time
gettimeofday( &now, NULL );
// Determine start time
start = ( int64_t )now.tv_sec * 1000000 + now.tv_usec;
while ( self->running )
{
// Pop the next frame
pthread_mutex_lock( &self->video_mutex );
next = mlt_deque_pop_front( self->queue );
while ( next == NULL && self->running )
{
pthread_cond_wait( &self->video_cond, &self->video_mutex );
next = mlt_deque_pop_front( self->queue );
}
pthread_mutex_unlock( &self->video_mutex );
if ( !self->running || next == NULL ) break;
// Get the properties
properties = MLT_FRAME_PROPERTIES( next );
// Get the speed of the frame
speed = mlt_properties_get_double( properties, "_speed" );
// Get the current time
gettimeofday( &now, NULL );
// Get the elapsed time
elapsed = ( ( int64_t )now.tv_sec * 1000000 + now.tv_usec ) - start;
// See if we have to delay the display of the current frame
if ( mlt_properties_get_int( properties, "rendered" ) == 1 && self->running )
{
// Obtain the scheduled playout time
int64_t scheduled = mlt_properties_get_int( properties, "playtime" );
// Determine the difference between the elapsed time and the scheduled playout time
int64_t difference = scheduled - elapsed;
// Smooth playback a bit
if ( real_time && ( difference > 20000 && speed == 1.0 ) )
{
tm.tv_sec = difference / 1000000;
tm.tv_nsec = ( difference % 1000000 ) * 500;
nanosleep( &tm, NULL );
}
// Show current frame if not too old
if ( !real_time || ( difference > -10000 || speed != 1.0 || mlt_deque_count( self->queue ) < 2 ) )
consumer_play_video( self, next );
// If the queue is empty, recalculate start to allow build up again
if ( real_time && ( mlt_deque_count( self->queue ) == 0 && speed == 1.0 ) )
{
gettimeofday( &now, NULL );
start = ( ( int64_t )now.tv_sec * 1000000 + now.tv_usec ) - scheduled + 20000;
}
}
// This frame can now be closed
mlt_frame_close( next );
next = NULL;
}
if ( next != NULL )
mlt_frame_close( next );
mlt_consumer_stopped( &self->parent );
return NULL;
}
开发者ID:elfring,项目名称:mlt,代码行数:90,代码来源:consumer_sdl_audio.c
示例9: MLT_CONSUMER_PROPERTIES
static void *consumer_thread( void *arg )
{
// Identify the arg
consumer_sdl self = arg;
// Get the consumer
mlt_consumer consumer = &self->parent;
// Get the properties
mlt_properties consumer_props = MLT_CONSUMER_PROPERTIES( consumer );
// Video thread
pthread_t thread;
// internal intialization
int init_audio = 1;
int init_video = 1;
mlt_frame frame = NULL;
mlt_properties properties = NULL;
int duration = 0;
int64_t playtime = 0;
struct timespec tm = { 0, 100000 };
// int last_position = -1;
pthread_mutex_lock( &self->refresh_mutex );
self->refresh_count = 0;
pthread_mutex_unlock( &self->refresh_mutex );
// Loop until told not to
while( self->running )
{
// Get a frame from the attached producer
frame = mlt_consumer_rt_frame( consumer );
// Ensure that we have a frame
if ( frame )
{
// Get the frame properties
properties = MLT_FRAME_PROPERTIES( frame );
// Get the speed of the frame
double speed = mlt_properties_get_double( properties, "_speed" );
// Get refresh request for the current frame
int refresh = mlt_properties_get_int( consumer_props, "refresh" );
// Clear refresh
mlt_events_block( consumer_props, consumer_props );
mlt_properties_set_int( consumer_props, "refresh", 0 );
mlt_events_unblock( consumer_props, consumer_props );
// Play audio
init_audio = consumer_play_audio( self, frame, init_audio, &duration );
// Determine the start time now
if ( self->playing && init_video )
{
// Create the video thread
pthread_create( &thread, NULL, video_thread, self );
// Video doesn't need to be initialised any more
init_video = 0;
}
// Set playtime for this frame
mlt_properties_set_int( properties, "playtime", playtime );
while ( self->running && speed != 0 && mlt_deque_count( self->queue ) > 15 )
nanosleep( &tm, NULL );
// Push this frame to the back of the queue
if ( self->running && speed )
{
pthread_mutex_lock( &self->video_mutex );
if ( self->is_purge && speed == 1.0 )
{
mlt_frame_close( frame );
self->is_purge = 0;
}
else
{
mlt_deque_push_back( self->queue, frame );
pthread_cond_broadcast( &self->video_cond );
}
pthread_mutex_unlock( &self->video_mutex );
// Calculate the next playtime
playtime += ( duration * 1000 );
}
else if ( self->running )
{
pthread_mutex_lock( &self->refresh_mutex );
if ( ( refresh == 0 && self->refresh_count <= 0 ) || self->refresh_count > 1 )
{
consumer_play_video( self, frame );
pthread_cond_wait( &self->refresh_cond, &self->refresh_mutex );
}
mlt_frame_close( frame );
self->refresh_count --;
pthread_mutex_unlock( &self->refresh_mutex );
//.........这里部分代码省略.........
开发者ID:elfring,项目名称:mlt,代码行数:101,代码来源:consumer_sdl_audio.c
示例10: CRITICAL_END
void CRITICAL_END() {
pthread_mutex_unlock(&MULTITHREAD_MUT);
}
开发者ID:jfellus,项目名称:agpca,代码行数:3,代码来源:multithread.cpp
示例11: consumer_play_audio
static int consumer_play_audio( consumer_sdl self, mlt_frame frame, int init_audio, int *duration )
{
// Get the properties of this consumer
mlt_properties properties = self->properties;
mlt_audio_format afmt = mlt_audio_s16;
// Set the preferred params of the test card signal
int channels = mlt_properties_get_int( properties, "channels" );
int frequency = mlt_properties_get_int( properties, "frequency" );
int scrub = mlt_properties_get_int( properties, "scrub_audio" );
static int counter = 0;
int samples = mlt_sample_calculator( mlt_properties_get_double( self->properties, "fps" ), frequency, counter++ );
int16_t *pcm;
int bytes;
mlt_frame_get_audio( frame, (void**) &pcm, &afmt, &frequency, &channels, &samples );
*duration = ( ( samples * 1000 ) / frequency );
if ( mlt_properties_get_int( properties, "audio_off" ) )
{
self->playing = 1;
init_audio = 1;
return init_audio;
}
if ( init_audio == 1 )
{
SDL_AudioSpec request;
SDL_AudioSpec got;
int audio_buffer = mlt_properties_get_int( properties, "audio_buffer" );
// specify audio format
memset( &request, 0, sizeof( SDL_AudioSpec ) );
self->playing = 0;
request.freq = frequency;
request.format = AUDIO_S16SYS;
request.channels = channels;
request.samples = audio_buffer;
request.callback = sdl_fill_audio;
request.userdata = (void *)self;
if ( SDL_OpenAudio( &request, &got ) != 0 )
{
mlt_log_error( MLT_CONSUMER_SERVICE( self ), "SDL failed to open audio: %s\n", SDL_GetError() );
init_audio = 2;
}
else if ( got.size != 0 )
{
SDL_PauseAudio( 0 );
init_audio = 0;
}
}
if ( init_audio == 0 )
{
mlt_properties properties = MLT_FRAME_PROPERTIES( frame );
bytes = ( samples * channels * 2 );
pthread_mutex_lock( &self->audio_mutex );
while ( self->running && bytes > ( sizeof( self->audio_buffer) - self->audio_avail ) )
pthread_cond_wait( &self->audio_cond, &self->audio_mutex );
if ( self->running )
{
if ( scrub || mlt_properties_get_double( properties, "_speed" ) == 1 )
memcpy( &self->audio_buffer[ self->audio_avail ], pcm, bytes );
else
memset( &self->audio_buffer[ self->audio_avail ], 0, bytes );
self->audio_avail += bytes;
}
pthread_cond_broadcast( &self->audio_cond );
pthread_mutex_unlock( &self->audio_mutex );
}
else
{
self->playing = 1;
}
return init_audio;
}
开发者ID:elfring,项目名称:mlt,代码行数:80,代码来源:consumer_sdl_audio.c
示例12: ldap_pvt_thread_mutex_unlock
int
ldap_pvt_thread_mutex_unlock( ldap_pvt_thread_mutex_t *mutex )
{
return ERRVAL( pthread_mutex_unlock( mutex ) );
}
开发者ID:osstech-jp,项目名称:openldap,代码行数:5,代码来源:thr_posix.c
示例13: tty2_thread
void* tty2_thread()
{
while(1)
{
FILE* f;
char fileName[30];
int numlines = 0;
int i; //Standard iterator.
fprintf(tty2, "in: ");
fscanf(tty2, "%s", fileName);
f = fopen(fileName, "r");
if(!f)
{
printf("Arquivo inválido!\n");
exit(-1);
}
p2->tty = tty2;
p2->ready = 0;
pthread_mutex_lock(&allocatorMutex);
if(allocate_block() == OUT_OF_MEMORY)
{
printf("SEM MEMÓRIA\n");
continue;
}
pthread_mutex_unlock(&allocatorMutex);
//Carregar programa na memória.
while(!feof(f))
{
fscanf(f, "%d %d", &mem[numlines + p2->cs].inst, &mem[numlines + p2->cs].op);
printf("inst= %d\t\top= %d\n", mem[numlines + p2->cs].inst, mem[numlines + p2->cs].op);
numlines++;
}
p2->ready = 1;
printf("Rodando!\n\n");
/* while(1)
{
//MUTEX
pthread_mutex_lock(&runningMutex);
if(running == p2)
{
printf("PC: %d\t CS: %d\t DS: %d\t ACC: %d\n", running->pc, running->cs, running->ds, running->acc);
if(run_line() == PROG_END) break;
running->pc++;
}
pthread_mutex_unlock(&runningMutex);
//MUTEX END
}
//Terminou execução
*/
while(p2->ready == 1) {} //Fica parado enquanto o processo está em execução.
pthread_mutex_lock(&allocatorMutex);
if(p2->ready == 0)
{
if(free_memory_block(running->block) == UNALLOCATED_MEM)
{
printf("Memória já foi desalocada. Algo muito errado aconteceu.\n");
continue;
}
}
pthread_mutex_unlock(&allocatorMutex);
}
}
开发者ID:pedrovanzella,项目名称:MicroOSVM,代码行数:72,代码来源:main.c
示例14: videoenc_port_FlushProcessingBuffers
/** @brief Releases buffers under processing.
* This function must be implemented in the derived classes, for the
* specific processing
*/
OMX_ERRORTYPE videoenc_port_FlushProcessingBuffers(omx_base_PortType *openmaxStandPort)
{
omx_videoenc_PortType *omx_videoenc_Port = (omx_videoenc_PortType *)openmaxStandPort;
OMX_VCE_Buffers_List *pBuffersMng_List= &(omx_videoenc_Port->BuffersMng_List);
omx_base_component_PrivateType *omx_base_component_Private;
OMX_BUFFERHEADERTYPE *pBuffer;
int errQue;
DEBUG(DEB_LEV_FUNCTION_NAME, "In %s for port %p\n", __func__, openmaxStandPort);
omx_base_component_Private = (omx_base_component_PrivateType *)openmaxStandPort->standCompContainer->pComponentPrivate;
if(omx_videoenc_Port->ringbuffer == OMX_TRUE && omx_videoenc_Port->bufferpool)
{
clearBufferPool(omx_videoenc_Port->bufferpool);
}
if(openmaxStandPort->sPortParam.eDomain != OMX_PortDomainOther)
{
/* clock buffers not used in the clients buffer managment function */
pthread_mutex_lock(&omx_base_component_Private->flush_mutex);
openmaxStandPort->bIsPortFlushed = OMX_TRUE;
/*Signal the buffer management thread of port flush,if it is waiting for buffers*/
if(omx_base_component_Private->bMgmtSem->semval==0)
{
tsem_up(omx_base_component_Private->bMgmtSem);
}
if(omx_base_component_Private->state != OMX_StateExecuting )
{
/*Waiting at paused state*/
tsem_signal(omx_base_component_Private->bStateSem);
}
DEBUG(DEB_LEV_FULL_SEQ, "In %s waiting for flush all condition port index =%d\n", __func__,
(int)openmaxStandPort->sPortParam.nPortIndex);
/* Wait until flush is completed */
pthread_mutex_unlock(&omx_base_component_Private->flush_mutex);
tsem_down(omx_base_component_Private->flush_all_condition);
}
DEBUG(DEB_LEV_FUNCTION_NAME, "In %s flushed all the buffers under processing\n", __func__);
tsem_reset(omx_base_component_Private->bMgmtSem);
/* Flush all the buffers not under processing */
while (openmaxStandPort->pBufferSem->semval > 0)
{
DEBUG(DEB_LEV_FULL_SEQ, "In %s TFlag=%x Flusing Port=%d,Semval=%d Qelem=%d\n",
__func__, (int)openmaxStandPort->nTunnelFlags, (int)openmaxStandPort->sPortParam.nPortIndex,
(int)openmaxStandPort->pBufferSem->semval, (int)openmaxStandPort->pBufferQueue->nelem);
tsem_down(openmaxStandPort->pBufferSem);
pBuffer = dequeue(openmaxStandPort->pBufferQueue);
//FlushProcessingBuffers_BuffersMng(pBuffersMng_List,pBuffer,omx_videoenc_Port->bIsStoreMediaData);
if (PORT_IS_TUNNELED(openmaxStandPort) && !PORT_IS_BUFFER_SUPPLIER(openmaxStandPort))
{
DEBUG(DEB_LEV_FULL_SEQ, "In %s: Comp %s is returning io:%d buffer\n",
__func__, omx_base_component_Private->name,(int)openmaxStandPort->sPortParam.nPortIndex);
if (openmaxStandPort->sPortParam.eDir == OMX_DirInput)
{
((OMX_COMPONENTTYPE*)(openmaxStandPort->hTunneledComponent))->FillThisBuffer(openmaxStandPort->hTunneledComponent, pBuffer);
}
else
{
((OMX_COMPONENTTYPE*)(openmaxStandPort->hTunneledComponent))->EmptyThisBuffer(openmaxStandPort->hTunneledComponent, pBuffer);
}
}
else if (PORT_IS_TUNNELED_N_BUFFER_SUPPLIER(openmaxStandPort))
{
errQue = queue(openmaxStandPort->pBufferQueue, pBuffer);
if (errQue)
{
/* /TODO the queue is full. This can be handled in a fine way with
* some retrials, or other checking. For the moment this is a critical error
* and simply causes the failure of this call
*/
return OMX_ErrorInsufficientResources;
}
}
else
{
(*(openmaxStandPort->BufferProcessedCallback))(
openmaxStandPort->standCompContainer,
omx_base_component_Private->callbackData,
pBuffer);
}
}
/*Port is tunneled and supplier and didn't received all it's buffer then wait for the buffers*/
if (PORT_IS_TUNNELED_N_BUFFER_SUPPLIER(openmaxStandPort))
{
while(openmaxStandPort->pBufferQueue->nelem != (int)openmaxStandPort->nNumAssignedBuffers)
{
//.........这里部分代码省略.........
开发者ID:LeMaker,项目名称:android-actions,代码行数:101,代码来源:omx_videoenc_port.c
示例15: pthread_mutex_lock
void AsyncLogger::addMessageTokens(const std::string& line,const std::string& line2) {
pthread_mutex_lock(&_mutex);
msg_q->push_back(line);
msg_q->push_back(line2);
pthread_mutex_unlock(&_mutex);
}
开发者ID:llaswell,项目名称:FIX_mini,代码行数:6,代码来源:AsyncLogger.cpp
示例16: __net_slice_worker
static void* __net_slice_worker(void* arg)
{
net_slice_ctx_t* slice_ctx = (net_slice_ctx_t*)arg;
int slice_num = slice_ctx->slice_num;
int slice_id = slice_ctx->slice_id;
int cpu_base = slice_ctx->cpu_base;
assert(slice_ctx->fd > 0);
if (cpu_base > 0) {
cpu_base += (slice_id%4);
spk_worker_set_affinity(cpu_base);
}
zlog_info(net_zc, "slice> spawned: id=%d, cpu=%d", slice_id, cpu_base);
while (!slice_ctx->quit_req) {
pthread_mutex_lock(&slice_ctx->lock);
if(slice_ctx->wptr == slice_ctx->rptr) {
struct timeval now;
struct timespec outtime;
gettimeofday(&now, NULL);
outtime.tv_sec = now.tv_sec + 1;
outtime.tv_nsec = now.tv_usec * 1000;
pthread_cond_timedwait(&slice_ctx->not_empty, &slice_ctx->lock, &outtime);
}
if (slice_ctx->wptr == slice_ctx->rptr) {
pthread_mutex_unlock(&slice_ctx->lock);
continue;
}
assert(slice_ctx->data_chunk.flag == CHUNK_DATA_FLAG__REQ);
size_t chunk_size = slice_ctx->data_chunk.size;
size_t slice_sz = chunk_size / slice_num;
ssize_t access = 0;
// check chunk_size and slice_sz
if ((chunk_size % slice_num) ||
(slice_sz & (0x4000-1))) { // 16k alignment
zlog_error(net_zc, "illegal chunk_sz: chunk_sz=%zu, slice_num=%d",
chunk_size, slice_num);
access = SPKERR_PARAM;
goto done;
}
if (slice_sz != slice_ctx->slice_sz) {
zlog_warn(net_zc, "unexpected slice size : slice_sz=%zu, expect=%zu",slice_sz, slice_ctx->slice_sz);
// this chunk may the last in file
}
if (slice_ctx->dir == SPK_DIR_WRITE) {
// write
if (slice_ctx->type == net_intf_tcp) {
access = net_tcp_write(slice_ctx->fd,
slice_ctx->data_chunk.buf + slice_id * slice_sz,slice_sz);
} else if (slice_ctx->type == net_intf_udp) {
access = net_udp_write(slice_ctx->fd,
slice_ctx->data_chunk.buf + slice_id * slice_sz,slice_sz,((struct sockaddr*)&slice_ctx->svr_addr));
}
} else {
// read
if (slice_ctx->type == net_intf_tcp) {
access = net_tcp_read(slice_ctx->fd,
slice_ctx->data_chunk.buf + slice_id * slice_sz,slice_sz);
} else if (slice_ctx->type == net_intf_udp) {
access = net_udp_read(slice_ctx->fd,
slice_ctx->data_chunk.buf + slice_id * slice_sz,slice_sz,((struct sockaddr*)&slice_ctx->svr_addr));
}
}
if (access != slice_sz) {
zlog_error(net_zc, "failed to access file: dir=%d, "
"sock_fd:%d slice_sz=%zu, offset=%ld, ret=%ld, errmsg=\'%s\'",
slice_ctx->dir, slice_ctx->fd,slice_sz,
slice_id * slice_sz, access, strerror(errno));
access = SPKERR_EACCESS;
goto done;
}
done:
if (access == slice_sz) {
slice_ctx->data_chunk.flag = CHUNK_DATA_FLAG__DONE;
} else {
slice_ctx->data_chunk.flag = access;
}
slice_ctx->rptr++;
pthread_cond_signal(&slice_ctx->not_full);
pthread_mutex_unlock(&slice_ctx->lock);
}
zlog_info(net_zc, "slice> terminated: id=%d", slice_id);
return(NULL);
}
开发者ID:Cai900205,项目名称:test,代码行数:94,代码来源:net_core.c
示例17: pthread_mutex_unlock
/**
* Starts or resumes processing Records
*/
void IpfixAggregator::start() {
pthread_mutex_unlock(&mutex);
}
开发者ID:BackupTheBerlios,项目名称:vermont-svn,代码行数:6,代码来源:IpfixAggregator.cpp
示例18: pxgstrf_scheduler
//.........这里部分代码省略.........
} /* while */
}
} else {
/*
* jcol was EMPTY; Try to get a panel from the task Q.
*/
while ( 1 ) {
/*>>if ( (j = Dequeue(taskq, &item)) == EMPTY ) {*/
if ( taskq->count <= 0 ) {
jcol = EMPTY;
break;
} else {
jcol = taskq->queue[taskq->head++];
--taskq->count;
if ( STATE( jcol ) >= CANGO ) { /* CANGO or CANPIPE */
#ifdef DEBUG
printf("(%d) Dequeue[2] Got %d, STATE %d, Qcount %d\n",
pnum, jcol, STATE(jcol), j);
#endif
#ifdef PROFILE
if ( STATE( jcol ) == CANGO ) ++panhows[NOPIPE];
else ++panhows[PIPE];
#endif
break;
}
}
} /* while */
}
/*
* Update the status of the new panel "jcol" and its parent "dad".
*/
if ( jcol != EMPTY ) {
--pxgstrf_shared->tasks_remain;
#ifdef DOMAINS
if ( in_domain[jcol] == TREE_DOMAIN ) {
/* Dequeue the first descendant of this domain */
*bcol = taskq->queue[taskq->head++];
--taskq->count;
} else
#endif
{
STATE( jcol ) = BUSY;
w = pxgstrf_shared->pan_status[jcol].size;
for (j = jcol; j < jcol+w; ++j) pxgstrf_shared->spin_locks[j] = 1;
dad = DADPANEL (jcol);
if ( dad < n && pxgstrf_shared->pan_status[dad].ukids == 1 ) {
STATE( dad ) = CANPIPE;
/*>> j = Enqueue(taskq, dad);*/
taskq->queue[taskq->tail++] = dad;
++taskq->count;
#ifdef DEBUG
printf("(%d) Enqueue() %d's dad %d ->CANPIPE, Qcount %d\n",
pnum, jcol, dad, j);
#endif
}
#ifdef PROFILE
Gstat->procstat[pnum].panels++;
#endif
/* Find the farthest busy descendant of the new panel
and its parent.*/
*bcol = fb_cols[jcol];
#ifdef DEBUG
printf("(%d) Scheduler[2] fb_cols[%d]=%d, STATE %d\n",
pnum, jcol, *bcol, STATE( *bcol ));
#endif
while ( STATE( *bcol ) == DONE ) *bcol = DADPANEL (*bcol);
fb_cols[dad] = *bcol;
} /* else regular_panel */
} /* if jcol != empty */
*cur_pan = jcol;
#ifdef DEBUG
printf("(%d) Exit C.S. tasks_remain %d, cur_pan %d\n",
pnum, pxgstrf_shared->tasks_remain, jcol);
#endif
} /* ---- END CRITICAL SECTION ---- */
#if ( MACH==SUN )
/* Exit C.S. */
mutex_unlock( &pxgstrf_shared->lu_locks[SCHED_LOCK] );
#elif ( MACH==DEC || MACH==PTHREAD )
pthread_mutex_unlock( &pxgstrf_shared->lu_locks[SCHED_LOCK] );
#elif ( MACH==CRAY_PVP )
#pragma _CRI endguard SCHED_LOCK
#endif
#ifdef PROFILE
Gstat->procstat[pnum].cs_time += SuperLU_timer_() - t;
#endif
return;
}
开发者ID:DBorello,项目名称:OpenSees,代码行数:101,代码来源:pxgstrf_scheduler.c
示例19: slabs_stats
void slabs_stats(ADD_STAT add_stats, void *c) {
pthread_mutex_lock(&slabs_lock);
do_slabs_stats(add_stats, c);
pthread_mutex_unlock(&slabs_lock);
}
开发者ID:ijibu,项目名称:memcached,代码行数:5,代码来源:slabs.c
示例20: rdd_read_write
static void *
rdd_read_write (void *arg)
{
int i = 0, ret = 0;
size_t bs = 0;
off_t offset = 0;
long rand = 0;
long max_ops = 0;
char *buf = NULL;
buf = calloc (1, rdd_config.max_bs);
if (!buf) {
fprintf (stderr, "calloc failed (%s)\n", strerror (errno));
ret = -1;
goto out;
}
for (i = 0; i < rdd_config.iters; i++)
{
pthread_mutex_lock (&rdd_config.lock);
{
int bytes = 0;
rand = random ();
if (rdd_config.min_bs == rdd_config.max_bs) {
bs = rdd_config.max_bs;
} else {
bs = rdd_config.min_bs +
(rand %
(rdd_config.max_bs -
rdd_config.min_bs));
}
offset = rand % rdd_config.in_file.st.st_size;
max_ops = rand % rdd_config.max_ops_per_seq;
if (!max_ops) {
max_ops ++;
}
ret = lseek (rdd_config.in_file.fd, offset, SEEK_SET);
if (ret != offset) {
fprintf (stderr, "lseek failed (%s)\n",
strerror (errno));
ret = -1;
goto unlock;
}
ret = lseek (rdd_config.out_file.fd, offset, SEEK_SET);
if (ret != offset) {
fprintf (stderr, "lseek failed (%s)\n",
strerror (errno));
ret = -1;
goto unlock;
}
while (max_ops--)
{
bytes = read (rdd_config.in_file.fd, buf, bs);
if (!bytes) {
break;
}
if (bytes == -1) {
fprintf (stderr, "read failed (%s)\n",
strerror (errno));
ret = -1;
goto unlock;
}
if (write (rdd_config.out_file.fd, buf, bytes)
!= bytes) {
fprintf (stderr, "write failed (%s)\n",
strerror (errno));
ret = -1;
goto unlock;
}
}
}
unlock:
pthread_mutex_unlock (&rdd_config.lock);
if (ret == -1) {
goto out;
}
ret = 0;
}
out:
free (buf);
pthread_barrier_wait (&rdd_config.barrier);
return NULL;
}
开发者ID:wangevan,项目名称:glusterfs,代码行数:91,代码来源:rdd.c
注:本文中的pthread_mutex_unlock函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论