本文整理汇总了C++中qemu_mutex_unlock函数的典型用法代码示例。如果您正苦于以下问题:C++ qemu_mutex_unlock函数的具体用法?C++ qemu_mutex_unlock怎么用?C++ qemu_mutex_unlock使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了qemu_mutex_unlock函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。
示例1: qemu_tcg_wait_io_event
static void qemu_tcg_wait_io_event(void)
{
CPUState *env;
while (all_cpu_threads_idle()) {
/* Start accounting real time to the virtual clock if the CPUs
are idle. */
qemu_clock_warp(vm_clock);
qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex);
}
qemu_mutex_unlock(&qemu_global_mutex);
/*
* Users of qemu_global_mutex can be starved, having no chance
* to acquire it since this path will get to it first.
* So use another lock to provide fairness.
*/
qemu_mutex_lock(&qemu_fair_mutex);
qemu_mutex_unlock(&qemu_fair_mutex);
qemu_mutex_lock(&qemu_global_mutex);
for (env = first_cpu; env != NULL; env = env->next_cpu) {
qemu_wait_io_event_common(env);
}
}
开发者ID:KonishchevDmitry,项目名称:qemu-kvm,代码行数:27,代码来源:cpus.c
示例2: virtio_balloon_set_status
static void virtio_balloon_set_status(VirtIODevice *vdev, uint8_t status)
{
VirtIOBalloon *s = VIRTIO_BALLOON(vdev);
if (!s->stats_vq_elem && vdev->vm_running &&
(status & VIRTIO_CONFIG_S_DRIVER_OK) && virtqueue_rewind(s->svq, 1)) {
/* poll stats queue for the element we have discarded when the VM
* was stopped */
virtio_balloon_receive_stats(vdev, s->svq);
}
if (virtio_balloon_free_page_support(s)) {
/*
* The VM is woken up and the iothread was blocked, so signal it to
* continue.
*/
if (vdev->vm_running && s->block_iothread) {
qemu_mutex_lock(&s->free_page_lock);
s->block_iothread = false;
qemu_cond_signal(&s->free_page_cond);
qemu_mutex_unlock(&s->free_page_lock);
}
/* The VM is stopped, block the iothread. */
if (!vdev->vm_running) {
qemu_mutex_lock(&s->free_page_lock);
s->block_iothread = true;
qemu_mutex_unlock(&s->free_page_lock);
}
}
}
开发者ID:MaddTheSane,项目名称:qemu,代码行数:31,代码来源:virtio-balloon.c
示例3: qxl_render_cursor
/* called from spice server thread context only */
int qxl_render_cursor(PCIQXLDevice *qxl, QXLCommandExt *ext)
{
QXLCursorCmd *cmd = qxl_phys2virt(qxl, ext->cmd.data, ext->group_id);
QXLCursor *cursor;
QEMUCursor *c;
if (!cmd) {
return 1;
}
if (!dpy_cursor_define_supported(qxl->vga.con)) {
return 0;
}
if (qxl->debug > 1 && cmd->type != QXL_CURSOR_MOVE) {
fprintf(stderr, "%s", __FUNCTION__);
qxl_log_cmd_cursor(qxl, cmd, ext->group_id);
fprintf(stderr, "\n");
}
switch (cmd->type) {
case QXL_CURSOR_SET:
cursor = qxl_phys2virt(qxl, cmd->u.set.shape, ext->group_id);
if (!cursor) {
return 1;
}
if (cursor->chunk.data_size != cursor->data_size) {
fprintf(stderr, "%s: multiple chunks\n", __FUNCTION__);
return 1;
}
c = qxl_cursor(qxl, cursor);
if (c == NULL) {
c = cursor_builtin_left_ptr();
}
qemu_mutex_lock(&qxl->ssd.lock);
if (qxl->ssd.cursor) {
cursor_put(qxl->ssd.cursor);
}
qxl->ssd.cursor = c;
qxl->ssd.mouse_x = cmd->u.set.position.x;
qxl->ssd.mouse_y = cmd->u.set.position.y;
qemu_mutex_unlock(&qxl->ssd.lock);
qemu_bh_schedule(qxl->ssd.cursor_bh);
break;
case QXL_CURSOR_MOVE:
qemu_mutex_lock(&qxl->ssd.lock);
qxl->ssd.mouse_x = cmd->u.position.x;
qxl->ssd.mouse_y = cmd->u.position.y;
qemu_mutex_unlock(&qxl->ssd.lock);
qemu_bh_schedule(qxl->ssd.cursor_bh);
break;
}
return 0;
}
开发者ID:32bitmicro,项目名称:riscv-qemu,代码行数:54,代码来源:qxl-render.c
示例4: cpu_list_remove
void cpu_list_remove(CPUState *cpu)
{
qemu_mutex_lock(&qemu_cpu_list_lock);
if (!QTAILQ_IN_USE(cpu, node)) {
/* there is nothing to undo since cpu_exec_init() hasn't been called */
qemu_mutex_unlock(&qemu_cpu_list_lock);
return;
}
assert(!(cpu_index_auto_assigned && cpu != QTAILQ_LAST(&cpus, CPUTailQ)));
QTAILQ_REMOVE(&cpus, cpu, node);
cpu->cpu_index = UNASSIGNED_CPU_INDEX;
qemu_mutex_unlock(&qemu_cpu_list_lock);
}
开发者ID:8tab,项目名称:qemu,代码行数:15,代码来源:cpus-common.c
示例5: qemu_mutex_lock_iothread
void qemu_mutex_lock_iothread(void)
{
if (kvm_enabled()) {
qemu_mutex_lock(&qemu_fair_mutex);
qemu_mutex_lock(&qemu_global_mutex);
qemu_mutex_unlock(&qemu_fair_mutex);
} else {
qemu_mutex_lock(&qemu_fair_mutex);
if (qemu_mutex_trylock(&qemu_global_mutex)) {
qemu_thread_signal(tcg_cpu_thread, SIG_IPI);
qemu_mutex_lock(&qemu_global_mutex);
}
qemu_mutex_unlock(&qemu_fair_mutex);
}
}
开发者ID:yujinyu,项目名称:QEMU_PACER,代码行数:15,代码来源:cpus.c
示例6: pfifo_write
void pfifo_write(void *opaque, hwaddr addr, uint64_t val, unsigned int size)
{
NV2AState *d = (NV2AState *)opaque;
reg_log_write(NV_PFIFO, addr, val);
qemu_mutex_lock(&d->pfifo.lock);
switch (addr) {
case NV_PFIFO_INTR_0:
d->pfifo.pending_interrupts &= ~val;
update_irq(d);
break;
case NV_PFIFO_INTR_EN_0:
d->pfifo.enabled_interrupts = val;
update_irq(d);
break;
default:
d->pfifo.regs[addr] = val;
break;
}
qemu_cond_broadcast(&d->pfifo.pusher_cond);
qemu_cond_broadcast(&d->pfifo.puller_cond);
qemu_mutex_unlock(&d->pfifo.lock);
}
开发者ID:JayFoxRox,项目名称:xqemu,代码行数:27,代码来源:nv2a_pfifo.c
示例7: hostmem_client_set_memory
static void hostmem_client_set_memory(CPUPhysMemoryClient *client,
target_phys_addr_t start_addr,
ram_addr_t size,
ram_addr_t phys_offset)
{
HostMem *hostmem = container_of(client, HostMem, client);
ram_addr_t flags = phys_offset & ~TARGET_PAGE_MASK;
size_t s = offsetof(struct vhost_memory, regions) +
(hostmem->mem->nregions + 1) * sizeof hostmem->mem->regions[0];
/* TODO: this is a hack.
* At least one vga card (cirrus) changes the gpa to hva
* memory maps on data path, which slows us down.
* Since we should never need to DMA into VGA memory
* anyway, lets just skip these regions. */
if (ranges_overlap(start_addr, size, 0xa0000, 0x10000)) {
return;
}
qemu_mutex_lock(&hostmem->mem_lock);
hostmem->mem = qemu_realloc(hostmem->mem, s);
assert(size);
vhost_mem_unassign_memory(hostmem->mem, start_addr, size);
if (flags == IO_MEM_RAM) {
/* Add given mapping, merging adjacent regions if any */
vhost_mem_assign_memory(hostmem->mem, start_addr, size,
(uintptr_t)qemu_get_ram_ptr(phys_offset));
}
qemu_mutex_unlock(&hostmem->mem_lock);
}
开发者ID:mithleshvrts,项目名称:qemu-kvm-rhel6,代码行数:34,代码来源:hostmem.c
示例8: cpu_exec_end
/* Mark cpu as not executing, and release pending exclusive ops. */
void cpu_exec_end(CPUState *cpu)
{
atomic_set(&cpu->running, false);
/* Write cpu->running before reading pending_cpus. */
smp_mb();
/* 1. start_exclusive saw cpu->running == true. Then it will increment
* pending_cpus and wait for exclusive_cond. After taking the lock
* we'll see cpu->has_waiter == true.
*
* 2. start_exclusive saw cpu->running == false but here pending_cpus >= 1.
* This includes the case when an exclusive item started after setting
* cpu->running to false and before we read pending_cpus. Then we'll see
* cpu->has_waiter == false and not touch pending_cpus. The next call to
* cpu_exec_start will run exclusive_idle if still necessary, thus waiting
* for the item to complete.
*
* 3. pending_cpus == 0. Then start_exclusive is definitely going to
* see cpu->running == false, and it can ignore this CPU until the
* next cpu_exec_start.
*/
if (unlikely(atomic_read(&pending_cpus))) {
qemu_mutex_lock(&qemu_cpu_list_lock);
if (cpu->has_waiter) {
cpu->has_waiter = false;
atomic_set(&pending_cpus, pending_cpus - 1);
if (pending_cpus == 1) {
qemu_cond_signal(&exclusive_cond);
}
}
qemu_mutex_unlock(&qemu_cpu_list_lock);
}
}
开发者ID:8tab,项目名称:qemu,代码行数:35,代码来源:cpus-common.c
示例9: end_exclusive
/* Finish an exclusive operation. */
void end_exclusive(void)
{
qemu_mutex_lock(&qemu_cpu_list_lock);
atomic_set(&pending_cpus, 0);
qemu_cond_broadcast(&exclusive_resume);
qemu_mutex_unlock(&qemu_cpu_list_lock);
}
开发者ID:8tab,项目名称:qemu,代码行数:8,代码来源:cpus-common.c
示例10: start_exclusive
/* Start an exclusive operation.
Must only be called from outside cpu_exec. */
void start_exclusive(void)
{
CPUState *other_cpu;
int running_cpus;
qemu_mutex_lock(&qemu_cpu_list_lock);
exclusive_idle();
/* Make all other cpus stop executing. */
atomic_set(&pending_cpus, 1);
/* Write pending_cpus before reading other_cpu->running. */
smp_mb();
running_cpus = 0;
CPU_FOREACH(other_cpu) {
if (atomic_read(&other_cpu->running)) {
other_cpu->has_waiter = true;
running_cpus++;
qemu_cpu_kick(other_cpu);
}
}
atomic_set(&pending_cpus, running_cpus + 1);
while (pending_cpus > 1) {
qemu_cond_wait(&exclusive_cond, &qemu_cpu_list_lock);
}
/* Can release mutex, no one will enter another exclusive
* section until end_exclusive resets pending_cpus to 0.
*/
qemu_mutex_unlock(&qemu_cpu_list_lock);
}
开发者ID:8tab,项目名称:qemu,代码行数:34,代码来源:cpus-common.c
示例11: rcu_register_thread
static void *rcu_read_perf_test(void *arg)
{
int i;
long long n_reads_local = 0;
rcu_register_thread();
*(struct rcu_reader_data **)arg = &rcu_reader;
atomic_inc(&nthreadsrunning);
while (goflag == GOFLAG_INIT) {
g_usleep(1000);
}
while (goflag == GOFLAG_RUN) {
for (i = 0; i < RCU_READ_RUN; i++) {
rcu_read_lock();
rcu_read_unlock();
}
n_reads_local += RCU_READ_RUN;
}
qemu_mutex_lock(&counts_mutex);
n_reads += n_reads_local;
qemu_mutex_unlock(&counts_mutex);
rcu_unregister_thread();
return NULL;
}
开发者ID:32bitmicro,项目名称:riscv-qemu,代码行数:26,代码来源:rcutorture.c
示例12: qemu_mutex_lock
/**
* Map guest physical address to host pointer
*/
void *hostmem_lookup(HostMem *hostmem, hwaddr phys, hwaddr len, bool is_write)
{
HostMemRegion *region;
void *host_addr = NULL;
hwaddr offset_within_region;
qemu_mutex_lock(&hostmem->current_regions_lock);
region = bsearch(&phys, hostmem->current_regions,
hostmem->num_current_regions,
sizeof(hostmem->current_regions[0]),
hostmem_lookup_cmp);
if (!region) {
goto out;
}
if (is_write && region->readonly) {
goto out;
}
offset_within_region = phys - region->guest_addr;
if (len <= region->size - offset_within_region) {
host_addr = region->host_addr + offset_within_region;
}
out:
qemu_mutex_unlock(&hostmem->current_regions_lock);
return host_addr;
}
开发者ID:CarterTsai,项目名称:qemu-semihost,代码行数:29,代码来源:hostmem.c
示例13: qemu_cond_wait
void qemu_cond_wait(QemuCond *cond, QemuMutex *mutex)
{
/*
* This access is protected under the mutex.
*/
cond->waiters++;
/*
* Unlock external mutex and wait for signal.
* NOTE: we've held mutex locked long enough to increment
* waiters count above, so there's no problem with
* leaving mutex unlocked before we wait on semaphore.
*/
qemu_mutex_unlock(mutex);
WaitForSingleObject(cond->sema, INFINITE);
/* Now waiters must rendez-vous with the signaling thread and
* let it continue. For cond_broadcast this has heavy contention
* and triggers thundering herd. So goes life.
*
* Decrease waiters count. The mutex is not taken, so we have
* to do this atomically.
*
* All waiters contend for the mutex at the end of this function
* until the signaling thread relinquishes it. To ensure
* each waiter consumes exactly one slice of the semaphore,
* the signaling thread stops until it is told by the last
* waiter that it can go on.
*/
if (InterlockedDecrement(&cond->waiters) == cond->target) {
SetEvent(cond->continue_event);
}
qemu_mutex_lock(mutex);
}
开发者ID:chmeeedalf,项目名称:qemu-bsd-user,代码行数:35,代码来源:qemu-thread-win32.c
示例14: rcu_register_thread
static void *iothread_run(void *opaque)
{
IOThread *iothread = opaque;
rcu_register_thread();
my_iothread = iothread;
qemu_mutex_lock(&iothread->init_done_lock);
iothread->thread_id = qemu_get_thread_id();
qemu_cond_signal(&iothread->init_done_cond);
qemu_mutex_unlock(&iothread->init_done_lock);
while (iothread->running) {
aio_poll(iothread->ctx, true);
if (atomic_read(&iothread->worker_context)) {
GMainLoop *loop;
g_main_context_push_thread_default(iothread->worker_context);
iothread->main_loop =
g_main_loop_new(iothread->worker_context, TRUE);
loop = iothread->main_loop;
g_main_loop_run(iothread->main_loop);
iothread->main_loop = NULL;
g_main_loop_unref(loop);
g_main_context_pop_thread_default(iothread->worker_context);
}
}
rcu_unregister_thread();
return NULL;
}
开发者ID:pfliu,项目名称:qemu,代码行数:34,代码来源:iothread.c
示例15: iothread_complete
static void iothread_complete(UserCreatable *obj, Error **errp)
{
Error *local_error = NULL;
IOThread *iothread = IOTHREAD(obj);
iothread->stopping = false;
iothread->thread_id = -1;
iothread->ctx = aio_context_new(&local_error);
if (!iothread->ctx) {
error_propagate(errp, local_error);
return;
}
qemu_mutex_init(&iothread->init_done_lock);
qemu_cond_init(&iothread->init_done_cond);
/* This assumes we are called from a thread with useful CPU affinity for us
* to inherit.
*/
qemu_thread_create(&iothread->thread, "iothread", iothread_run,
iothread, QEMU_THREAD_JOINABLE);
/* Wait for initialization to complete */
qemu_mutex_lock(&iothread->init_done_lock);
while (iothread->thread_id == -1) {
qemu_cond_wait(&iothread->init_done_cond,
&iothread->init_done_lock);
}
qemu_mutex_unlock(&iothread->init_done_lock);
}
开发者ID:ismailhkose,项目名称:qemu,代码行数:30,代码来源:iothread.c
示例16: iothread_complete
static void iothread_complete(UserCreatable *obj, Error **errp)
{
Error *local_error = NULL;
IOThread *iothread = IOTHREAD(obj);
char *name, *thread_name;
iothread->stopping = false;
iothread->thread_id = -1;
iothread->ctx = aio_context_new(&local_error);
if (!iothread->ctx) {
error_propagate(errp, local_error);
return;
}
qemu_mutex_init(&iothread->init_done_lock);
qemu_cond_init(&iothread->init_done_cond);
/* This assumes we are called from a thread with useful CPU affinity for us
* to inherit.
*/
name = object_get_canonical_path_component(OBJECT(obj));
thread_name = g_strdup_printf("IO %s", name);
qemu_thread_create(&iothread->thread, thread_name, iothread_run,
iothread, QEMU_THREAD_JOINABLE);
g_free(thread_name);
g_free(name);
/* Wait for initialization to complete */
qemu_mutex_lock(&iothread->init_done_lock);
while (iothread->thread_id == -1) {
qemu_cond_wait(&iothread->init_done_cond,
&iothread->init_done_lock);
}
qemu_mutex_unlock(&iothread->init_done_lock);
}
开发者ID:01org,项目名称:qemu-lite,代码行数:35,代码来源:iothread.c
示例17: rcu_register_thread
static void *iothread_run(void *opaque)
{
IOThread *iothread = opaque;
bool blocking;
rcu_register_thread();
qemu_mutex_lock(&iothread->init_done_lock);
iothread->thread_id = qemu_get_thread_id();
qemu_cond_signal(&iothread->init_done_cond);
qemu_mutex_unlock(&iothread->init_done_lock);
while (!iothread->stopping) {
aio_context_acquire(iothread->ctx);
blocking = true;
while (!iothread->stopping && aio_poll(iothread->ctx, blocking)) {
/* Progress was made, keep going */
blocking = false;
}
aio_context_release(iothread->ctx);
}
rcu_unregister_thread();
return NULL;
}
开发者ID:01org,项目名称:qemu-lite,代码行数:25,代码来源:iothread.c
示例18: qemu_archipelago_close
static void qemu_archipelago_close(BlockDriverState *bs)
{
int r, targetlen;
char *target;
struct xseg_request *req;
BDRVArchipelagoState *s = bs->opaque;
s->stopping = true;
qemu_mutex_lock(&s->request_mutex);
while (!s->th_is_signaled) {
qemu_cond_wait(&s->request_cond,
&s->request_mutex);
}
qemu_mutex_unlock(&s->request_mutex);
qemu_thread_join(&s->request_th);
qemu_cond_destroy(&s->request_cond);
qemu_mutex_destroy(&s->request_mutex);
qemu_cond_destroy(&s->archip_cond);
qemu_mutex_destroy(&s->archip_mutex);
targetlen = strlen(s->volname);
req = xseg_get_request(s->xseg, s->srcport, s->vportno, X_ALLOC);
if (!req) {
archipelagolog("Cannot get XSEG request\n");
goto err_exit;
}
r = xseg_prep_request(s->xseg, req, targetlen, 0);
if (r < 0) {
xseg_put_request(s->xseg, req, s->srcport);
archipelagolog("Cannot prepare XSEG close request\n");
goto err_exit;
}
target = xseg_get_target(s->xseg, req);
memcpy(target, s->volname, targetlen);
req->size = req->datalen;
req->offset = 0;
req->op = X_CLOSE;
xport p = xseg_submit(s->xseg, req, s->srcport, X_ALLOC);
if (p == NoPort) {
xseg_put_request(s->xseg, req, s->srcport);
archipelagolog("Cannot submit XSEG close request\n");
goto err_exit;
}
xseg_signal(s->xseg, p);
wait_reply(s->xseg, s->srcport, s->port, req);
xseg_put_request(s->xseg, req, s->srcport);
err_exit:
g_free(s->volname);
g_free(s->segment_name);
xseg_quit_local_signal(s->xseg, s->srcport);
xseg_leave_dynport(s->xseg, s->port);
xseg_leave(s->xseg);
}
开发者ID:Acidburn0zzz,项目名称:qemu,代码行数:60,代码来源:archipelago.c
示例19: pfifo_read
/* PFIFO - MMIO and DMA FIFO submission to PGRAPH and VPE */
uint64_t pfifo_read(void *opaque, hwaddr addr, unsigned int size)
{
NV2AState *d = (NV2AState *)opaque;
qemu_mutex_lock(&d->pfifo.lock);
uint64_t r = 0;
switch (addr) {
case NV_PFIFO_INTR_0:
r = d->pfifo.pending_interrupts;
break;
case NV_PFIFO_INTR_EN_0:
r = d->pfifo.enabled_interrupts;
break;
case NV_PFIFO_RUNOUT_STATUS:
r = NV_PFIFO_RUNOUT_STATUS_LOW_MARK; /* low mark empty */
break;
default:
r = d->pfifo.regs[addr];
break;
}
qemu_mutex_unlock(&d->pfifo.lock);
reg_log_read(NV_PFIFO, addr, r);
return r;
}
开发者ID:JayFoxRox,项目名称:xqemu,代码行数:28,代码来源:nv2a_pfifo.c
示例20: qemu_mutex_lock
/**
* Map guest physical address to host pointer
*/
void *hostmem_lookup(HostMem *hostmem, uint64_t phys, uint64_t len,
bool is_write)
{
struct vhost_memory_region *found = NULL;
void *host_addr = NULL;
uint64_t offset_within_region;
unsigned int i;
is_write = is_write; /*r/w information is currently not tracked */
qemu_mutex_lock(&hostmem->mem_lock);
for (i = 0; i < hostmem->mem->nregions; i++) {
struct vhost_memory_region *region = &hostmem->mem->regions[i];
if (range_covers_byte(region->guest_phys_addr,
region->memory_size,
phys)) {
found = region;
break;
}
}
if (!found) {
goto out;
}
offset_within_region = phys - found->guest_phys_addr;
if (len <= found->memory_size - offset_within_region) {
host_addr = (void*)(uintptr_t)(found->userspace_addr +
offset_within_region);
}
out:
qemu_mutex_unlock(&hostmem->mem_lock);
return host_addr;
}
开发者ID:mithleshvrts,项目名称:qemu-kvm-rhel6,代码行数:37,代码来源:hostmem.c
注:本文中的qemu_mutex_unlock函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论