• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

C++ KKASSERT函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了C++中KKASSERT函数的典型用法代码示例。如果您正苦于以下问题:C++ KKASSERT函数的具体用法?C++ KKASSERT怎么用?C++ KKASSERT使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了KKASSERT函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。

示例1: mtx_chain_link_sh

/*
 * Flush waiting shared locks.  The lock's prior state is passed in and must
 * be adjusted atomically only if it matches and LINKSPIN is not set.
 *
 * IMPORTANT! The caller has left one active count on the lock for us to
 *	      consume.  We will apply this to the first link, but must add
 *	      additional counts for any other links.
 */
static int
mtx_chain_link_sh(mtx_t *mtx, u_int olock)
{
	thread_t td = curthread;
	mtx_link_t *link;
	u_int	addcount;
	u_int	nlock;

	olock &= ~MTX_LINKSPIN;
	nlock = olock | MTX_LINKSPIN;
	nlock &= ~MTX_EXCLUSIVE;
	crit_enter_raw(td);
	if (atomic_cmpset_int(&mtx->mtx_lock, olock, nlock)) {
		/*
		 * It should not be possible for SHWANTED to be set without
		 * any links pending.
		 */
		KKASSERT(mtx->mtx_shlink != NULL);

		/*
		 * We have to process the count for all shared locks before
		 * we process any of the links.  Count the additional shared
		 * locks beyond the first link (which is already accounted
		 * for) and associate the full count with the lock
		 * immediately.
		 */
		addcount = 0;
		for (link = mtx->mtx_shlink->next; link != mtx->mtx_shlink;
		     link = link->next) {
			++addcount;
		}
		if (addcount > 0)
			atomic_add_int(&mtx->mtx_lock, addcount);

		/*
		 * We can wakeup all waiting shared locks.
		 */
		while ((link = mtx->mtx_shlink) != NULL) {
			KKASSERT(link->state == MTX_LINK_LINKED_SH);
			if (link->next == link) {
				mtx->mtx_shlink = NULL;
			} else {
				mtx->mtx_shlink = link->next;
				link->next->prev = link->prev;
				link->prev->next = link->next;
			}
			link->next = NULL;
			link->prev = NULL;
			cpu_sfence();
			if (link->callback) {
				link->state = MTX_LINK_CALLEDBACK;
				link->callback(link, link->arg, 0);
			} else {
				cpu_sfence();
				link->state = MTX_LINK_ACQUIRED;
				wakeup(link);
			}
		}
		atomic_clear_int(&mtx->mtx_lock, MTX_LINKSPIN |
						 MTX_SHWANTED);
		crit_exit_raw(td);
		return 1;
	}
	/* retry */
	crit_exit_raw(td);

	return 0;
}
开发者ID:kusumi,项目名称:DragonFlyBSD,代码行数:76,代码来源:kern_mutex.c


示例2: dm_target_stripe_strategy

/*
 * Strategy routine called from dm_strategy.
 */
static int
dm_target_stripe_strategy(dm_table_entry_t *table_en, struct buf *bp)
{
	dm_target_stripe_config_t *tsc;
	struct bio *bio = &bp->b_bio1;
	struct buf *nestbuf;
	uint64_t blkno, blkoff;
	uint64_t stripe, blknr;
	uint32_t stripe_off, stripe_rest, num_blks, issue_blks;
	int devnr;

	tsc = table_en->target_config;
	if (tsc == NULL)
		return 0;

	/* calculate extent of request */
	KKASSERT(bp->b_resid % DEV_BSIZE == 0);

	switch(bp->b_cmd) {
	case BUF_CMD_READ:
	case BUF_CMD_WRITE:
	case BUF_CMD_FREEBLKS:
		/*
		 * Loop through to individual operations
		 */
		blkno = bp->b_bio1.bio_offset / DEV_BSIZE;
		blkoff = 0;
		num_blks = bp->b_resid / DEV_BSIZE;
		nestiobuf_init(bio);

		while (num_blks > 0) {
			/* blockno to strip piece nr */
			stripe = blkno / tsc->stripe_chunksize;
			stripe_off = blkno % tsc->stripe_chunksize;

			/* where we are inside the strip */
			devnr = stripe % tsc->stripe_num;
			blknr = stripe / tsc->stripe_num;

			/* how much is left before we hit a boundary */
			stripe_rest = tsc->stripe_chunksize - stripe_off;

			/* issue this piece on stripe `stripe' */
			issue_blks = MIN(stripe_rest, num_blks);
			nestbuf = getpbuf(NULL);
			nestbuf->b_flags |= bio->bio_buf->b_flags & B_HASBOGUS;

			nestiobuf_add(bio, nestbuf, blkoff,
					issue_blks * DEV_BSIZE, NULL);

			/* I need number of bytes. */
			nestbuf->b_bio1.bio_offset =
				blknr * tsc->stripe_chunksize + stripe_off;
			nestbuf->b_bio1.bio_offset +=
				tsc->stripe_devs[devnr].offset;
			nestbuf->b_bio1.bio_offset *= DEV_BSIZE;

			vn_strategy(tsc->stripe_devs[devnr].pdev->pdev_vnode,
				    &nestbuf->b_bio1);

			blkno += issue_blks;
			blkoff += issue_blks * DEV_BSIZE;
			num_blks -= issue_blks;
		}
		nestiobuf_start(bio);
		break;
	case BUF_CMD_FLUSH:
		nestiobuf_init(bio);
		for (devnr = 0; devnr < tsc->stripe_num; ++devnr) {
			nestbuf = getpbuf(NULL);
			nestbuf->b_flags |= bio->bio_buf->b_flags & B_HASBOGUS;

			nestiobuf_add(bio, nestbuf, 0, 0, NULL);
			nestbuf->b_bio1.bio_offset = 0;
			vn_strategy(tsc->stripe_devs[devnr].pdev->pdev_vnode,
				    &nestbuf->b_bio1);
		}
		nestiobuf_start(bio);
		break;
	default:
		bp->b_flags |= B_ERROR;
		bp->b_error = EIO;
		biodone(bio);
		break;
	}
	return 0;
}
开发者ID:AhmadTux,项目名称:DragonFlyBSD,代码行数:90,代码来源:dm_target_striped.c


示例3: nwfs_putpages

/*
 * Vnode op for VM putpages.
 * possible bug: all IO done in sync mode
 * Note that vop_close always invalidate pages before close, so it's
 * not necessary to open vnode.
 *
 * nwfs_putpages(struct vnode *a_vp, vm_page_t *a_m, int a_count,
 *		 int a_sync, int *a_rtvals, vm_ooffset_t a_offset)
 */
int
nwfs_putpages(struct vop_putpages_args *ap)
{
	int error;
	struct thread *td = curthread;	/* XXX */
	struct vnode *vp = ap->a_vp;
	struct ucred *cred;

#ifndef NWFS_RWCACHE
	KKASSERT(td->td_proc);
	cred = td->td_proc->p_ucred;		/* XXX */
	VOP_OPEN(vp, FWRITE, cred, NULL);
	error = vnode_pager_generic_putpages(ap->a_vp, ap->a_m, ap->a_count,
		ap->a_sync, ap->a_rtvals);
	VOP_CLOSE(vp, FWRITE, cred);
	return error;
#else
	struct uio uio;
	struct iovec iov;
	vm_offset_t kva;
	struct buf *bp;
	int i, npages, count;
	int *rtvals;
	struct nwmount *nmp;
	struct nwnode *np;
	vm_page_t *pages;

	KKASSERT(td->td_proc);
	cred = td->td_proc->p_ucred;		/* XXX */

/*	VOP_OPEN(vp, FWRITE, cred, NULL);*/
	np = VTONW(vp);
	nmp = VFSTONWFS(vp->v_mount);
	pages = ap->a_m;
	count = ap->a_count;
	rtvals = ap->a_rtvals;
	npages = btoc(count);

	for (i = 0; i < npages; i++) {
		rtvals[i] = VM_PAGER_AGAIN;
	}

	bp = getpbuf_kva(&nwfs_pbuf_freecnt);
	kva = (vm_offset_t) bp->b_data;
	pmap_qenter(kva, pages, npages);

	iov.iov_base = (caddr_t) kva;
	iov.iov_len = count;
	uio.uio_iov = &iov;
	uio.uio_iovcnt = 1;
	uio.uio_offset = IDX_TO_OFF(pages[0]->pindex);
	uio.uio_resid = count;
	uio.uio_segflg = UIO_SYSSPACE;
	uio.uio_rw = UIO_WRITE;
	uio.uio_td = td;
	NCPVNDEBUG("ofs=%d,resid=%d\n",(int)uio.uio_offset, uio.uio_resid);

	error = ncp_write(NWFSTOCONN(nmp), &np->n_fh, &uio, cred);
/*	VOP_CLOSE(vp, FWRITE, cred);*/
	NCPVNDEBUG("paged write done: %d\n", error);

	pmap_qremove(kva, npages);
	relpbuf(bp, &nwfs_pbuf_freecnt);

	if (!error) {
		int nwritten = round_page(count - uio.uio_resid) / PAGE_SIZE;
		for (i = 0; i < nwritten; i++) {
			rtvals[i] = VM_PAGER_OK;
			vm_page_undirty(pages[i]);
		}
	}
	return rtvals[0];
#endif /* NWFS_RWCACHE */
}
开发者ID:mihaicarabas,项目名称:dragonfly,代码行数:83,代码来源:nwfs_io.c


示例4: removede

/*
 * Remove a directory entry. At this point the file represented by the
 * directory entry to be removed is still full length until noone has it
 * open.  When the file no longer being used msdosfs_inactive() is called
 * and will truncate the file to 0 length.  When the vnode containing the
 * denode is needed for some other purpose by VFS it will call
 * msdosfs_reclaim() which will remove the denode from the denode cache.
 */
int
removede(struct denode *pdep,	/* directory where the entry is removed */
	 struct denode *dep)	/* file to be removed */
{
	int error;
	struct direntry *ep;
	struct buf *bp;
	daddr_t bn;
	int blsize;
	struct msdosfsmount *pmp = pdep->de_pmp;
	u_long offset = pdep->de_fndoffset;

#ifdef MSDOSFS_DEBUG
	kprintf("removede(): filename %s, dep %p, offset %08lx\n",
	    dep->de_Name, dep, offset);
#endif

	KKASSERT(dep->de_refcnt > 0);
	dep->de_refcnt--;
	offset += sizeof(struct direntry);
	do {
		offset -= sizeof(struct direntry);
		error = pcbmap(pdep, de_cluster(pmp, offset),
			       &bn, NULL, &blsize);
		if (error)
			return error;
		error = bread(pmp->pm_devvp, de_bntodoff(pmp, bn), blsize, &bp);
		if (error) {
			brelse(bp);
			return error;
		}
		ep = bptoep(pmp, bp, offset);
		/*
		 * Check whether, if we came here the second time, i.e.
		 * when underflowing into the previous block, the last
		 * entry in this block is a longfilename entry, too.
		 */
		if (ep->deAttributes != ATTR_WIN95
		    && offset != pdep->de_fndoffset) {
			brelse(bp);
			break;
		}
		offset += sizeof(struct direntry);
		while (1) {
			/*
			 * We are a bit agressive here in that we delete any Win95
			 * entries preceding this entry, not just the ones we "own".
			 * Since these presumably aren't valid anyway,
			 * there should be no harm.
			 */
			offset -= sizeof(struct direntry);
			ep--->deName[0] = SLOT_DELETED;
			if ((pmp->pm_flags & MSDOSFSMNT_NOWIN95)
			    || !(offset & pmp->pm_crbomask)
			    || ep->deAttributes != ATTR_WIN95)
				break;
		}
		if ((error = bwrite(bp)) != 0)
			return error;
	} while (!(pmp->pm_flags & MSDOSFSMNT_NOWIN95)
	    && !(offset & pmp->pm_crbomask)
	    && offset);
	return 0;
}
开发者ID:kusumi,项目名称:DragonFlyBSD,代码行数:72,代码来源:msdosfs_lookup.c


示例5: callout_reset_ipi

/*
 * Remote IPI for callout_reset_bycpu().  The operation is performed only
 * on the 1->0 transition of the counter, otherwise there are callout_stop()s
 * pending after us.
 *
 * The IPI counter and PENDING flags must be set atomically with the
 * 1->0 transition.  The ACTIVE flag was set prior to the ipi being
 * sent and we do not want to race a caller on the original cpu trying
 * to deactivate() the flag concurrent with our installation of the
 * callout.
 */
static void
callout_reset_ipi(void *arg)
{
	struct callout *c = arg;
	globaldata_t gd = mycpu;
	globaldata_t tgd;
	int flags;
	int nflags;

	for (;;) {
		flags = c->c_flags;
		cpu_ccfence();
		KKASSERT((flags & CALLOUT_IPI_MASK) > 0);

		/*
		 * We should already be armed for our cpu, if armed to another
		 * cpu, chain the IPI.  If for some reason we are not armed,
		 * we can arm ourselves.
		 */
		if (flags & CALLOUT_ARMED) {
			if (CALLOUT_FLAGS_TO_CPU(flags) != gd->gd_cpuid) {
				tgd = globaldata_find(
						CALLOUT_FLAGS_TO_CPU(flags));
				lwkt_send_ipiq(tgd, callout_reset_ipi, c);
				return;
			}
			nflags = (flags & ~CALLOUT_EXECUTED);
		} else {
			nflags = (flags & ~(CALLOUT_CPU_MASK |
					    CALLOUT_EXECUTED)) |
				 CALLOUT_ARMED |
				 CALLOUT_CPU_TO_FLAGS(gd->gd_cpuid);
		}

		/*
		 * Decrement the IPI count, retain and clear the WAITING
		 * status, clear EXECUTED.
		 *
		 * NOTE: It is possible for the callout to already have been
		 *	 marked pending due to SMP races.
		 */
		nflags = nflags - 1;
		if ((flags & CALLOUT_IPI_MASK) == 1) {
			nflags &= ~(CALLOUT_WAITING | CALLOUT_EXECUTED);
			nflags |= CALLOUT_PENDING;
		}

		if (atomic_cmpset_int(&c->c_flags, flags, nflags)) {
			/*
			 * Only install the callout on the 1->0 transition
			 * of the IPI count, and only if PENDING was not
			 * already set.  The latter situation should never
			 * occur but we check anyway.
			 */
			if ((flags & (CALLOUT_PENDING|CALLOUT_IPI_MASK)) == 1) {
				softclock_pcpu_t sc;

				sc = &softclock_pcpu_ary[gd->gd_cpuid];
				c->c_time = sc->curticks + c->c_load;
				TAILQ_INSERT_TAIL(
					&sc->callwheel[c->c_time & cwheelmask],
					c, c_links.tqe);
			}
			break;
		}
		/* retry */
		cpu_pause();
	}

	/*
	 * Issue wakeup if requested.
	 */
	if (flags & CALLOUT_WAITING)
		wakeup(c);
}
开发者ID:wan721,项目名称:DragonFlyBSD,代码行数:86,代码来源:kern_timeout.c


示例6: _callout_stop

/*
 * Stop a running timer and ensure that any running callout completes before
 * returning.  If the timer is running on another cpu this function may block
 * to interlock against the callout.  If the callout is currently executing
 * or blocked in another thread this function may also block to interlock
 * against the callout.
 *
 * The caller must be careful to avoid deadlocks, either by using
 * callout_init_lk() (which uses the lockmgr lock cancelation feature),
 * by using tokens and dealing with breaks in the serialization, or using
 * the lockmgr lock cancelation feature yourself in the callout callback
 * function.
 *
 * callout_stop() returns non-zero if the callout was pending.
 */
static int
_callout_stop(struct callout *c, int issync)
{
	globaldata_t gd = mycpu;
	globaldata_t tgd;
	softclock_pcpu_t sc;
	int flags;
	int nflags;
	int rc;
	int cpuid;

#ifdef INVARIANTS
        if ((c->c_flags & CALLOUT_DID_INIT) == 0) {
		callout_init(c);
		kprintf(
		    "callout_stop(%p) from %p: callout was not initialized\n",
		    c, ((int **)&c)[-1]);
		print_backtrace(-1);
	}
#endif
	crit_enter_gd(gd);

	/*
	 * Fast path operations:
	 *
	 * If ARMED and owned by our cpu, or not ARMED, and other simple
	 * conditions are met, we can just clear ACTIVE and EXECUTED
	 * and we are done.
	 */
	for (;;) {
		flags = c->c_flags;
		cpu_ccfence();

		cpuid = CALLOUT_FLAGS_TO_CPU(flags);

		/*
		 * Can't handle an armed callout in the fast path if it is
		 * not on the current cpu.  We must atomically increment the
		 * IPI count for the IPI we intend to send and break out of
		 * the fast path to enter the slow path.
		 */
		if (flags & CALLOUT_ARMED) {
			if (gd->gd_cpuid != cpuid) {
				nflags = flags + 1;
				if (atomic_cmpset_int(&c->c_flags,
						      flags, nflags)) {
					/* break to slow path */
					break;
				}
				continue;	/* retry */
			}
		} else {
			cpuid = gd->gd_cpuid;
			KKASSERT((flags & CALLOUT_IPI_MASK) == 0);
			KKASSERT((flags & CALLOUT_PENDING) == 0);
		}

		/*
		 * Process pending IPIs and retry (only if not called from
		 * an IPI).
		 */
		if (flags & CALLOUT_IPI_MASK) {
			lwkt_process_ipiq();
			continue;	/* retry */
		}

		/*
		 * Transition to the stopped state, recover the EXECUTED
		 * status.  If pending we cannot clear ARMED until after
		 * we have removed (c) from the callwheel.
		 *
		 * NOTE: The callout might already not be armed but in this
		 *	 case it should also not be pending.
		 */
		nflags = flags & ~(CALLOUT_ACTIVE |
				   CALLOUT_EXECUTED |
				   CALLOUT_WAITING |
				   CALLOUT_PENDING);

		/* NOTE: IPI_MASK already tested */
		if ((flags & CALLOUT_PENDING) == 0)
			nflags &= ~CALLOUT_ARMED;
		if (atomic_cmpset_int(&c->c_flags, flags, nflags)) {
			/*
			 * Can only remove from callwheel if currently
//.........这里部分代码省略.........
开发者ID:wan721,项目名称:DragonFlyBSD,代码行数:101,代码来源:kern_timeout.c


示例7: softclock_handler


//.........这里部分代码省略.........
				CALLOUT_FLAGS_TO_CPU(c->c_flags) ==
				mycpu->gd_cpuid,
				("callout %p: bad flags %08x", c, c->c_flags));

			/*
			 * Once CALLOUT_PENDING is cleared, sc->running
			 * protects the callout structure's existance but
			 * only until we call c_func().  A callout_stop()
			 * or callout_reset() issued from within c_func()
			 * will not block.  The callout can also be kfree()d
			 * by c_func().
			 *
			 * We set EXECUTED before calling c_func() so a
			 * callout_stop() issued from within c_func() returns
			 * the correct status.
			 */
			if ((flags & (CALLOUT_AUTOLOCK | CALLOUT_ACTIVE)) ==
			    (CALLOUT_AUTOLOCK | CALLOUT_ACTIVE)) {
				void (*c_func)(void *);
				void *c_arg;
				struct lock *c_lk;
				int error;

				/*
				 * NOTE: sc->running must be set prior to
				 *	 CALLOUT_PENDING being cleared to
				 *	 avoid missed CANCELs and *_stop()
				 *	 races.
				 */
				sc->running = (intptr_t)c;
				c_func = c->c_func;
				c_arg = c->c_arg;
				c_lk = c->c_lk;
				c->c_func = NULL;
				KKASSERT(c->c_flags & CALLOUT_DID_INIT);
				flags = callout_unpend_disarm(c);
				error = lockmgr(c_lk, LK_EXCLUSIVE |
						      LK_CANCELABLE);
				if (error == 0) {
					atomic_set_int(&c->c_flags,
						       CALLOUT_EXECUTED);
					crit_exit();
					c_func(c_arg);
					crit_enter();
					lockmgr(c_lk, LK_RELEASE);
				}
			} else if (flags & CALLOUT_ACTIVE) {
				void (*c_func)(void *);
				void *c_arg;

				sc->running = (intptr_t)c;
				c_func = c->c_func;
				c_arg = c->c_arg;
				c->c_func = NULL;
				KKASSERT(c->c_flags & CALLOUT_DID_INIT);
				flags = callout_unpend_disarm(c);
				atomic_set_int(&c->c_flags, CALLOUT_EXECUTED);
				crit_exit();
				c_func(c_arg);
				crit_enter();
			} else {
				flags = callout_unpend_disarm(c);
			}

			/*
			 * Read and clear sc->running.  If bit 0 was set,
			 * a callout_stop() is likely blocked waiting for
			 * the callback to complete.
			 *
			 * The sigclear above also cleared CALLOUT_WAITING
			 * and returns the contents of flags prior to clearing
			 * any bits.
			 *
			 * Interlock wakeup any _stop's waiting on us.  Note
			 * that once c_func() was called, the callout
			 * structure (c) pointer may no longer be valid.  It
			 * can only be used for the wakeup.
			 */
			if ((atomic_readandclear_ptr(&sc->running) & 1) ||
			    (flags & CALLOUT_WAITING)) {
				wakeup(c);
			}
			/* NOTE: list may have changed */
		}
		++sc->softticks;
	}

	/*
	 * Don't leave us holding the MP lock when we deschedule ourselves.
	 */
	if (mpsafe == 0) {
		mpsafe = 1;
		rel_mplock();
	}
	sc->isrunning = 0;
	lwkt_deschedule_self(&sc->thread);	/* == curthread */
	lwkt_switch();
	goto loop;
	/* NOT REACHED */
}
开发者ID:wan721,项目名称:DragonFlyBSD,代码行数:101,代码来源:kern_timeout.c


示例8: _lwkt_trytokref

/*
 * Attempt to acquire a shared or exclusive token.  Returns TRUE on success,
 * FALSE on failure.
 *
 * If TOK_EXCLUSIVE is set in mode we are attempting to get an exclusive
 * token, otherwise are attempting to get a shared token.
 *
 * If TOK_EXCLREQ is set in mode this is a blocking operation, otherwise
 * it is a non-blocking operation (for both exclusive or shared acquisions).
 */
static __inline
int
_lwkt_trytokref(lwkt_tokref_t ref, thread_t td, long mode)
{
	lwkt_token_t tok;
	lwkt_tokref_t oref;
	long count;

	tok = ref->tr_tok;
	KASSERT(((mode & TOK_EXCLREQ) == 0 ||	/* non blocking */
		td->td_gd->gd_intr_nesting_level == 0 ||
		panic_cpu_gd == mycpu),
		("Attempt to acquire token %p not already "
		"held in hard code section", tok));

	if (mode & TOK_EXCLUSIVE) {
		/*
		 * Attempt to get an exclusive token
		 */
		for (;;) {
			count = tok->t_count;
			oref = tok->t_ref;	/* can be NULL */
			cpu_ccfence();
			if ((count & ~TOK_EXCLREQ) == 0) {
				/*
				 * It is possible to get the exclusive bit.
				 * We must clear TOK_EXCLREQ on successful
				 * acquisition.
				 */
				if (atomic_cmpset_long(&tok->t_count, count,
						       (count & ~TOK_EXCLREQ) |
						       TOK_EXCLUSIVE)) {
					KKASSERT(tok->t_ref == NULL);
					tok->t_ref = ref;
					return TRUE;
				}
				/* retry */
			} else if ((count & TOK_EXCLUSIVE) &&
				   oref >= &td->td_toks_base &&
				   oref < td->td_toks_stop) {
				/*
				 * Our thread already holds the exclusive
				 * bit, we treat this tokref as a shared
				 * token (sorta) to make the token release
				 * code easier.
				 *
				 * NOTE: oref cannot race above if it
				 *	 happens to be ours, so we're good.
				 *	 But we must still have a stable
				 *	 variable for both parts of the
				 *	 comparison.
				 *
				 * NOTE: Since we already have an exclusive
				 *	 lock and don't need to check EXCLREQ
				 *	 we can just use an atomic_add here
				 */
				atomic_add_long(&tok->t_count, TOK_INCR);
				ref->tr_count &= ~TOK_EXCLUSIVE;
				return TRUE;
			} else if ((mode & TOK_EXCLREQ) &&
				   (count & TOK_EXCLREQ) == 0) {
				/*
				 * Unable to get the exclusive bit but being
				 * asked to set the exclusive-request bit.
				 * Since we are going to retry anyway just
				 * set the bit unconditionally.
				 */
				atomic_set_long(&tok->t_count, TOK_EXCLREQ);
				return FALSE;
			} else {
				/*
				 * Unable to get the exclusive bit and not
				 * being asked to set the exclusive-request
				 * (aka lwkt_trytoken()), or EXCLREQ was
				 * already set.
				 */
				cpu_pause();
				return FALSE;
			}
			/* retry */
		}
	} else {
		/*
		 * Attempt to get a shared token.  Note that TOK_EXCLREQ
		 * for shared tokens simply means the caller intends to
		 * block.  We never actually set the bit in tok->t_count.
		 */
		for (;;) {
			count = tok->t_count;
			oref = tok->t_ref;	/* can be NULL */
//.........这里部分代码省略.........
开发者ID:juanfra684,项目名称:DragonFlyBSD,代码行数:101,代码来源:lwkt_token.c


示例9: dmstrategy

/*
 * Do all IO operations on dm logical devices.
 */
static int
dmstrategy(struct dev_strategy_args *ap)
{
	cdev_t dev = ap->a_head.a_dev;
	struct bio *bio = ap->a_bio;
	struct buf *bp = bio->bio_buf;
	int bypass;

	dm_dev_t *dmv;
	dm_table_t  *tbl;
	dm_table_entry_t *table_en;
	struct buf *nestbuf;

	uint32_t dev_type;

	uint64_t buf_start, buf_len, issued_len;
	uint64_t table_start, table_end;
	uint64_t start, end;

	buf_start = bio->bio_offset;
	buf_len = bp->b_bcount;

	tbl = NULL;

	table_end = 0;
	dev_type = 0;
	issued_len = 0;

	dmv = dev->si_drv1;

	switch(bp->b_cmd) {
	case BUF_CMD_READ:
	case BUF_CMD_WRITE:
	case BUF_CMD_FREEBLKS:
		bypass = 0;
		break;
	case BUF_CMD_FLUSH:
		bypass = 1;
		KKASSERT(buf_len == 0);
		break;
	default:
		bp->b_error = EIO;
		bp->b_resid = bp->b_bcount;
		biodone(bio);
		return 0;
	}

	if (bypass == 0 &&
	    bounds_check_with_mediasize(bio, DEV_BSIZE,
					dm_table_size(&dmv->table_head)) <= 0) {
		bp->b_resid = bp->b_bcount;
		biodone(bio);
		return 0;
	}

	/* Select active table */
	tbl = dm_table_get_entry(&dmv->table_head, DM_TABLE_ACTIVE);

	nestiobuf_init(bio);
	devstat_start_transaction(&dmv->stats);

	/*
	 * Find out what tables I want to select.
	 */
	SLIST_FOREACH(table_en, tbl, next) {
		/*
		 * I need need number of bytes not blocks.
		 */
		table_start = table_en->start * DEV_BSIZE;
		table_end = table_start + (table_en->length) * DEV_BSIZE;

		/*
		 * Calculate the start and end
		 */
		start = MAX(table_start, buf_start);
		end = MIN(table_end, buf_start + buf_len);

		aprint_debug("----------------------------------------\n");
		aprint_debug("table_start %010" PRIu64", table_end %010"
		    PRIu64 "\n", table_start, table_end);
		aprint_debug("buf_start %010" PRIu64", buf_len %010"
		    PRIu64"\n", buf_start, buf_len);
		aprint_debug("start-buf_start %010"PRIu64", end %010"
		    PRIu64"\n", start - buf_start, end);
		aprint_debug("start %010" PRIu64" , end %010"
                    PRIu64"\n", start, end);
		aprint_debug("\n----------------------------------------\n");

		if (bypass) {
			nestbuf = getpbuf(NULL);
			nestbuf->b_flags |= bio->bio_buf->b_flags & B_HASBOGUS;

			nestiobuf_add(bio, nestbuf, 0, 0, &dmv->stats);
			nestbuf->b_bio1.bio_offset = 0;
			table_en->target->strategy(table_en, nestbuf);
		} else if (start < end) {
			nestbuf = getpbuf(NULL);
//.........这里部分代码省略.........
开发者ID:AhmadTux,项目名称:DragonFlyBSD,代码行数:101,代码来源:device-mapper.c


示例10: nwfs_doio

/*
 * Do an I/O operation to/from a cache block.
 */
int
nwfs_doio(struct vnode *vp, struct bio *bio, struct ucred *cr, struct thread *td)
{
	struct buf *bp = bio->bio_buf;
	struct uio *uiop;
	struct nwnode *np;
	struct nwmount *nmp;
	int error = 0;
	struct uio uio;
	struct iovec io;

	np = VTONW(vp);
	nmp = VFSTONWFS(vp->v_mount);
	uiop = &uio;
	uiop->uio_iov = &io;
	uiop->uio_iovcnt = 1;
	uiop->uio_segflg = UIO_SYSSPACE;
	uiop->uio_td = td;

	if (bp->b_cmd == BUF_CMD_READ) {
	    io.iov_len = uiop->uio_resid = (size_t)bp->b_bcount;
	    io.iov_base = bp->b_data;
	    uiop->uio_rw = UIO_READ;
	    switch (vp->v_type) {
	      case VREG:
		uiop->uio_offset = bio->bio_offset;
		error = ncp_read(NWFSTOCONN(nmp), &np->n_fh, uiop, cr);
		if (error)
			break;
		if (uiop->uio_resid) {
			size_t left = uiop->uio_resid;
			size_t nread = bp->b_bcount - left;
			if (left > 0)
				bzero((char *)bp->b_data + nread, left);
		}
		break;
/*	    case VDIR:
		nfsstats.readdir_bios++;
		uiop->uio_offset = bio->bio_offset;
		if (nmp->nm_flag & NFSMNT_RDIRPLUS) {
			error = nfs_readdirplusrpc(vp, uiop, cr);
			if (error == NFSERR_NOTSUPP)
				nmp->nm_flag &= ~NFSMNT_RDIRPLUS;
		}
		if ((nmp->nm_flag & NFSMNT_RDIRPLUS) == 0)
			error = nfs_readdirrpc(vp, uiop, cr);
		if (error == 0 && uiop->uio_resid == (size_t)bp->b_bcount)
			bp->b_flags |= B_INVAL;
		break;
*/
	    default:
		kprintf("nwfs_doio:  type %x unexpected\n",vp->v_type);
		break;
	    }
	    if (error) {
		bp->b_flags |= B_ERROR;
		bp->b_error = error;
	    }
	} else { /* write */
	    KKASSERT(bp->b_cmd == BUF_CMD_WRITE);
	    if (bio->bio_offset + bp->b_dirtyend > np->n_size)
		bp->b_dirtyend = np->n_size - bio->bio_offset;

	    if (bp->b_dirtyend > bp->b_dirtyoff) {
		io.iov_len = uiop->uio_resid =
			(size_t)(bp->b_dirtyend - bp->b_dirtyoff);
		uiop->uio_offset = bio->bio_offset + bp->b_dirtyoff;
		io.iov_base = (char *)bp->b_data + bp->b_dirtyoff;
		uiop->uio_rw = UIO_WRITE;
		error = ncp_write(NWFSTOCONN(nmp), &np->n_fh, uiop, cr);

		/*
		 * For an interrupted write, the buffer is still valid
		 * and the write hasn't been pushed to the server yet,
		 * so we can't set B_ERROR and report the interruption
		 * by setting B_EINTR. For the async case, B_EINTR
		 * is not relevant, so the rpc attempt is essentially
		 * a noop.  For the case of a V3 write rpc not being
		 * committed to stable storage, the block is still
		 * dirty and requires either a commit rpc or another
		 * write rpc with iomode == NFSV3WRITE_FILESYNC before
		 * the block is reused. This is indicated by setting
		 * the B_DELWRI and B_NEEDCOMMIT flags.
		 */
    		if (error == EINTR
		    || (!error && (bp->b_flags & B_NEEDCOMMIT))) {

			crit_enter();
			bp->b_flags &= ~(B_INVAL|B_NOCACHE);
			if ((bp->b_flags & B_PAGING) == 0)
			    bdirty(bp);
			bp->b_flags |= B_EINTR;
			crit_exit();
	    	} else {
			if (error) {
				bp->b_flags |= B_ERROR;
				bp->b_error /*= np->n_error */= error;
//.........这里部分代码省略.........
开发者ID:mihaicarabas,项目名称:dragonfly,代码行数:101,代码来源:nwfs_io.c


示例11: nwfs_getpages

/*
 * Vnode op for VM getpages.
 * Wish wish .... get rid from multiple IO routines
 *
 * nwfs_getpages(struct vnode *a_vp, vm_page_t *a_m, int a_count,
 *		 int a_reqpage, vm_ooffset_t a_offset)
 */
int
nwfs_getpages(struct vop_getpages_args *ap)
{
#ifndef NWFS_RWCACHE
	return vnode_pager_generic_getpages(ap->a_vp, ap->a_m, ap->a_count,
					    ap->a_reqpage, ap->a_seqaccess);
#else
	int i, error, npages;
	size_t nextoff, toff;
	size_t count;
	size_t size;
	struct uio uio;
	struct iovec iov;
	vm_offset_t kva;
	struct buf *bp;
	struct vnode *vp;
	struct thread *td = curthread;	/* XXX */
	struct ucred *cred;
	struct nwmount *nmp;
	struct nwnode *np;
	vm_page_t *pages;

	KKASSERT(td->td_proc);
	cred = td->td_proc->p_ucred;

	vp = ap->a_vp;
	np = VTONW(vp);
	nmp = VFSTONWFS(vp->v_mount);
	pages = ap->a_m;
	count = (size_t)ap->a_count;

	if (vp->v_object == NULL) {
		kprintf("nwfs_getpages: called with non-merged cache vnode??\n");
		return VM_PAGER_ERROR;
	}

	bp = getpbuf_kva(&nwfs_pbuf_freecnt);
	npages = btoc(count);
	kva = (vm_offset_t) bp->b_data;
	pmap_qenter(kva, pages, npages);

	iov.iov_base = (caddr_t) kva;
	iov.iov_len = count;
	uio.uio_iov = &iov;
	uio.uio_iovcnt = 1;
	uio.uio_offset = IDX_TO_OFF(pages[0]->pindex);
	uio.uio_resid = count;
	uio.uio_segflg = UIO_SYSSPACE;
	uio.uio_rw = UIO_READ;
	uio.uio_td = td;

	error = ncp_read(NWFSTOCONN(nmp), &np->n_fh, &uio,cred);
	pmap_qremove(kva, npages);

	relpbuf(bp, &nwfs_pbuf_freecnt);

	if (error && (uio.uio_resid == count)) {
		kprintf("nwfs_getpages: error %d\n",error);
		for (i = 0; i < npages; i++) {
			if (ap->a_reqpage != i)
				vnode_pager_freepage(pages[i]);
		}
		return VM_PAGER_ERROR;
	}

	size = count - uio.uio_resid;

	for (i = 0, toff = 0; i < npages; i++, toff = nextoff) {
		vm_page_t m;
		nextoff = toff + PAGE_SIZE;
		m = pages[i];

		m->flags &= ~PG_ZERO;

		/*
		 * NOTE: pmap dirty bit should have already been cleared.
		 *	 We do not clear it here.
		 */
		if (nextoff <= size) {
			m->valid = VM_PAGE_BITS_ALL;
			m->dirty = 0;
		} else {
			int nvalid = ((size + DEV_BSIZE - 1) - toff) &
				      ~(DEV_BSIZE - 1);
			vm_page_set_validclean(m, 0, nvalid);
		}
		
		if (i != ap->a_reqpage) {
			/*
			 * Whether or not to leave the page activated is up in
			 * the air, but we should put the page on a page queue
			 * somewhere (it already is in the object).  Result:
			 * It appears that emperical results show that
//.........这里部分代码省略.........
开发者ID:mihaicarabas,项目名称:dragonfly,代码行数:101,代码来源:nwfs_io.c


示例12: tcp_usr_send

/*
 * Do a send by putting data in output queue and updating urgent
 * marker if URG set.  Possibly send more data.  Unlike the other
 * pru_*() routines, the mbuf chains are our responsibility.  We
 * must either enqueue them or free them.  The other pru_* routines
 * generally are caller-frees.
 */
static void
tcp_usr_send(netmsg_t msg)
{
	struct socket *so = msg->send.base.nm_so;
	int flags = msg->send.nm_flags;
	struct mbuf *m = msg->send.nm_m;
	int error = 0;
	struct inpcb *inp;
	struct tcpcb *tp;
	TCPDEBUG0;

	KKASSERT(msg->send.nm_control == NULL);
	KKASSERT(msg->send.nm_addr == NULL);
	KKASSERT((flags & PRUS_FREEADDR) == 0);

	inp = so->so_pcb;

	if (inp == NULL) {
		/*
		 * OOPS! we lost a race, the TCP session got reset after
		 * we checked SS_CANTSENDMORE, eg: while doing uiomove or a
		 * network interrupt in the non-critical section of sosend().
		 */
		m_freem(m);
		error = ECONNRESET;	/* XXX EPIPE? */
		tp = NULL;
		TCPDEBUG1();
		goto out;
	}
	tp = intotcpcb(inp);
	TCPDEBUG1();

#ifdef foo
	/*
	 * This is no longer necessary, since:
	 * - sosendtcp() has already checked it for us
	 * - It does not work with asynchronized send
	 */

	/*
	 * Don't let too much OOB data build up
	 */
	if (flags & PRUS_OOB) {
		if (ssb_space(&so->so_snd) < -512) {
			m_freem(m);
			error = ENOBUFS;
			goto out;
		}
	}
#endif

	/*
	 * Pump the data into the socket.
	 */
	if (m) {
		ssb_appendstream(&so->so_snd, m);
		sowwakeup(so);
	}
	if (flags & PRUS_OOB) {
		/*
		 * According to RFC961 (Assigned Protocols),
		 * the urgent pointer points to the last octet
		 * of urgent data.  We continue, however,
		 * to consider it to indicate the first octet
		 * of data past the urgent section.
		 * Otherwise, snd_up should be one lower.
		 */
		tp->snd_up = tp->snd_una + so->so_snd.ssb_cc;
		tp->t_flags |= TF_FORCE;
		error = tcp_output(tp);
		tp->t_flags &= ~TF_FORCE;
	} else {
		if (flags & PRUS_EOF) {
			/*
			 * Close the send side of the connection after
			 * the data is sent.
			 */
			socantsendmore(so);
			tp = tcp_usrclosed(tp);
		}
		if (tp != NULL && !tcp_output_pending(tp)) {
			if (flags & PRUS_MORETOCOME)
				tp->t_flags |= TF_MORETOCOME;
			error = tcp_output_fair(tp);
			if (flags & PRUS_MORETOCOME)
				tp->t_flags &= ~TF_MORETOCOME;
		}
	}
	COMMON_END1((flags & PRUS_OOB) ? PRU_SENDOOB :
		   ((flags & PRUS_EOF) ? PRU_SEND_EOF : PRU_SEND),
		   (flags & PRUS_NOREPLY));
}
开发者ID:iHaD,项目名称:DragonFlyBSD,代码行数:99,代码来源:tcp_usrreq.c


示例13: _mtx_unlock

/*
 * Unlock a lock.  The caller must hold the lock either shared or exclusive.
 *
 * On the last release we handle any pending chains.
 */
void
_mtx_unlock(mtx_t *mtx)
{
	thread_t td __debugvar = curthread;
	u_int	lock;
	u_int	nlock;

	for (;;) {
		lock = mtx->mtx_lock;
		cpu_ccfence();

		switch(lock) {
		case MTX_EXCLUSIVE | 1:
			/*
			 * Last release, exclusive lock.
			 * No exclusive or shared requests pending.
			 */
			KKASSERT(mtx->mtx_owner == td ||
				 mtx->mtx_owner == NULL);
			mtx->mtx_owner = NULL;
			nlock = 0;
			if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock))
				goto done;
			break;
		case MTX_EXCLUSIVE | MTX_EXWANTED | 1:
		case MTX_EXCLUSIVE | MTX_EXWANTED | MTX_SHWANTED | 1:
			/*
			 * Last release, exclusive lock.
			 * Exclusive requests pending.
			 * Exclusive requests have priority over shared reqs.
			 */
			KKASSERT(mtx->mtx_owner == td ||
				 mtx->mtx_owner == NULL);
			mtx->mtx_owner = NULL;
			if (mtx_chain_link_ex(mtx, lock))
				goto done;
			break;
		case MTX_EXCLUSIVE | MTX_SHWANTED | 1:
			/*
			 * Last release, exclusive lock.
			 *
			 * Shared requests are pending.  Transfer our count (1)
			 * to the first shared request, wakeup all shared reqs.
			 */
			KKASSERT(mtx->mtx_owner == td ||
				 mtx->mtx_owner == NULL);
			mtx->mtx_owner = NULL;
			if (mtx_chain_link_sh(mtx, lock))
				goto done;
			break;
		case 1:
			/*
			 * Last release, shared lock.
			 * No exclusive or shared requests pending.
			 */
			nlock = 0;
			if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock))
				goto done;
			break;
		case MTX_EXWANTED | 1:
		case MTX_EXWANTED | MTX_SHWANTED | 1:
			/*
			 * Last release, shared lock.
			 *
			 * Exclusive requests are pending.  Upgrade this
			 * final shared lock to exclusive and transfer our
			 * count (1) to the next exclusive request.
			 *
			 * Exclusive requests have priority over shared reqs.
			 */
			if (mtx_chain_link_ex(mtx, lock))
				goto done;
			break;
		case MTX_SHWANTED | 1:
			/*
			 * Last release, shared lock.
			 * Shared requests pending.
			 */
			if (mtx_chain_link_sh(mtx, lock))
				goto done;
			break;
		default:
			/*
			 * We have to loop if this is the last release but
			 * someone is fiddling with LINKSPIN.
			 */
			if ((lock & MTX_MASK) == 1) {
				KKASSERT(lock & MTX_LINKSPIN);
				break;
			}

			/*
			 * Not the last release (shared or exclusive)
			 */
			nlock = lock - 1;
//.........这里部分代码省略.........
开发者ID:kusumi,项目名称:DragonFlyBSD,代码行数:101,代码来源:kern_mutex.c


示例14: hammer_ioc_volume_add

int
hammer_ioc_volume_add(hammer_transaction_t trans, hammer_inode_t ip,
		struct hammer_ioc_volume *ioc)
{
	struct hammer_mount *hmp = trans->hmp;
	struct mount *mp = hmp->mp;
	struct hammer_volume_ondisk ondisk;
	struct bigblock_stat stat;
	hammer_volume_t volume;
	int free_vol_no = 0;
	int error;

	if (mp->mnt_flag & MNT_RDONLY) {
		hmkprintf(hmp, "Cannot add volume to read-only HAMMER filesystem\n");
		return (EINVAL);
	}

	if (hmp->nvolumes >= HAMMER_MAX_VOLUMES) {
		hmkprintf(hmp, "Max number of HAMMER volumes exceeded\n");
		return (EINVAL);
	}

	if (hammer_lock_ex_try(&hmp->volume_lock) != 0) {
		hmkprintf(hmp, "Another volume operation is in progress!\n");
		return (EAGAIN);
	}

	/*
	 * Find an unused volume number.
	 */
	while (free_vol_no < HAMMER_MAX_VOLUMES &&
		HAMMER_VOLUME_NUMBER_IS_SET(hmp, free_vol_no)) {
		++free_vol_no;
	}
	if (free_vol_no >= HAMMER_MAX_VOLUMES) {
		hmkprintf(hmp, "Max number of HAMMER volumes exceeded\n");
		error = EINVAL;
		goto end;
	}

	error = hammer_format_volume_header(
		hmp,
		&ondisk,
		hmp->rootvol->ondisk->vol_name,
		free_vol_no,
		hmp->nvolumes+1,
		ioc->vol_size,
		ioc->boot_area_size,
		ioc->mem_area_size);
	if (error)
		goto end;

	error = hammer_install_volume(hmp, ioc->device_name, NULL, &ondisk);
	if (error)
		goto end;

	hammer_sync_lock_sh(trans);
	hammer_lock_ex(&hmp->blkmap_lock);

	volume = hammer_get_volume(hmp, free_vol_no, &error);
	KKASSERT(volume != NULL && error == 0);

	error =	hammer_format_freemap(trans, volume, &stat);
	KKASSERT(error == 0);
	hammer_rel_volume(volume, 0);

	++hmp->nvolumes;
	error = hammer_update_volumes_header(trans, &stat);
	KKASSERT(error == 0);

	hammer_unlock(&hmp->blkmap_lock);
	hammer_sync_unlock(trans);

	KKASSERT(error == 0);
end:
	hammer_unlock(&hmp->volume_lock);
	if (error)
		hmkprintf(hmp, "An error occurred: %d\n", error);
	return (error);
}
开发者ID:iHaD,项目名称:DragonFlyBSD,代码行数:80,代码来源:hammer_volume.c


示例15: __mtx_lock_ex

/*
 * Exclusive-lock a mutex, block until acquired unless link is async.
 * Recursion is allowed.
 *
 * Returns 0 on success, the tsleep() return code on failure, EINPROGRESS
 * if async.  If immediately successful an async exclusive lock will return 0
 * and not issue the async callback or link the link structure.  The caller
 * must handle this case (typically this is an optimal code path).
 *
 * A tsleep() error can only be returned if PCATCH is specified in the flags.
 */
static __inline int
__mtx_lock_ex(mtx_t *mtx, mtx_link_t *link, int flags, int to)
{
	thread_t td;
	u_int	lock;
	u_int	nlock;
	int	error;
	int	isasync;

	for (;;) {
		lock = mtx->mtx_lock;
		cpu_ccfence();

		if (lock == 0) {
			nlock = MTX_EXCLUSIVE | 1;
			if (atomic_cmpset_int(&mtx->mtx_lock, 0, nlock)) {
				mtx->mtx_owner = curthread;
				cpu_sfence();
				link->state = MTX_LINK_ACQUIRED;
				error = 0;
				break;
			}
			continue;
		}
		if ((lock & MTX_EXCLUSIVE) && mtx->mtx_owner == curthread) {
			KKASSERT((lock & MTX_MASK) != MTX_MASK);
			nlock = lock + 1;
			if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock)) {
				cpu_sfence();
				link->state = MTX_LINK_ACQUIRED;
				error = 0;
				break;
			}
			continue;
		}

		/*
		 * We need MTX_LINKSPIN to manipulate exlink or
		 * shlink.
		 *
		 * We must set MTX_EXWANTED with MTX_LINKSPIN to indicate
		 * pending exclusive requests.  It cannot be set as a separate
		 * operation prior to acquiring MTX_LINKSPIN.
		 *
		 * To avoid unnecessary cpu cache traffic we poll
		 * for collisions.  It is also possible that EXWANTED
		 * state failing the above test was spurious, so all the
		 * tests must be repeated if we cannot obtain LINKSPIN
		 * with the prior state tests intact (i.e. don't reload
		 * the (lock) variable here, for heaven's sake!).
		 */
		if (lock & MTX_LINKSPIN) {
			cpu_pause();
			continue;
		}
		td = curthread;
		nlock = lock | MTX_EXWANTED | MTX_LINKSPIN;
		crit_enter_raw(td);
		if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock) == 0) {
			crit_exit_raw(td);
			continue;
		}

		/*
		 * Check for early abort.
		 */
		if (link->state == MTX_LINK_ABORTED) {
			if (mtx->mtx_exlink == NULL) {
				atomic_clear_int(&mtx->mtx_lock,
						 MTX_LINKSPIN |
						 MTX_EXWANTED);
			} else {
				atomic_clear_int(&mtx->mtx_lock,
						 MTX_LINKSPIN);
			}
			crit_exit_raw(td);
			link->state = MTX_LINK_IDLE;
			error = ENOLCK;
			break;
		}

		/*
		 * Add our link to the exlink list and release LINKSPIN.
		 */
		link->owner = td;
		link->state = MTX_LINK_LINKED_EX;
		if (mtx->mtx_exlink) {
			link->next = mtx->mtx_exlink;
			link->prev = link->next->prev;
//.........这里部分代码省略.........
开发者ID:kusumi,项目名称:DragonFlyBSD,代码行数:101,代码来源:kern_mutex.c


示例16: dm_target_stripe_dump

static int
dm_target_stripe_dump(dm_table_entry_t *table_en, void *data, size_t length, off_t offset)
{
	dm_target_stripe_config_t *tsc;
	uint64_t blkno, blkoff;
	uint64_t stripe, blknr;
	uint32_t stripe_off, stripe_rest, num_blks, issue_blks;
	uint64_t off2, len2;
	int devnr;

	tsc = table_en->target_config;
	if (tsc == NULL)
		return 0;

	/* calculate extent of request */
	KKASSERT(length % DEV_BSIZE == 0);

	blkno = offset / DEV_BSIZE;
	blkoff = 0;
	num_blks = length / DEV_BSIZE;

	/*
	 * 0 length means flush buffers and return
	 */
	if (length == 0) {
		for (devnr = 0; devnr < tsc->stripe_num; ++devnr) {
			if (tsc->stripe_devs[devnr].pdev->pdev_vnode->v_rdev == NULL)
				return ENXIO;

			dev_ddump(tsc->stripe_devs[devnr].pdev->pdev_vnode->v_rdev,
			    data, 0, offset, 0);
		}
		return 0;
	}

	while (num_blks > 0) {
		/* blockno to strip piece nr */
		stripe = blkno / tsc->stripe_chunksize;
		stripe_off = blkno % tsc->stripe_chunksize;

		/* where we are inside the strip */
		devnr = stripe % tsc->stripe_num;
		blknr = stripe / tsc->stripe_num;

		/* how much is left before we hit a boundary */
		stripe_rest = tsc->stripe_chunksize - stripe_off;

		/* issue this piece on stripe `stripe' */
		issue_blks = MIN(stripe_rest, num_blks);

#if 0
		nestiobuf_add(bio, nestbuf, blkoff,
				issue_blks * DEV_BSIZE);
#endif
		len2 = issue_blks * DEV_BSIZE;

		/* I need number of bytes. */
		off2 = blknr * tsc->stripe_chunksize + stripe_off;
		off2 += tsc->stripe_devs[devnr].offset;
		off2 *= DEV_BSIZE;
		off2 = dm_pdev_correct_dump_offset(tsc->stripe_devs[devnr].pdev,
		    off2);

		if (tsc->stripe_devs[devnr].pdev->pdev_vnode->v_rdev == NULL)
			return ENXIO;

		dev_ddump(tsc->stripe_devs[devnr].pdev->pdev_vnode->v_rdev,
		    (char *)data + blkoff, 0, off2, len2);

		blkno += issue_blks;
		blkoff += issue_blks * DEV_BSIZE;
		num_blks -= issue_blks;
	}

	return 0;
}
开发者ID:AhmadTux,项目名称:DragonFlyBSD,代码行数:76,代码来源:dm_target_striped.c


示例17: cbq_add_queue_locked

static int
cbq_add_queue_locked(struct pf_altq *a, cbq_state_t *cbqp)
{
	struct rm_class	*borrow, *parent;
	struct rm_class	*cl;
	struct cbq_opts	*opts;
	int		i;

	KKASSERT(a->qid != 0);

	/*
	 * find a free slot in the class table.  if the slot matching
	 * the lower bits of qid is  

鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
C++ KLIPS_PRINT函数代码示例发布时间:2022-05-30
下一篇:
C++ KIcon函数代码示例发布时间:2022-05-30
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap