• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

C++ P2ALIGN函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了C++中P2ALIGN函数的典型用法代码示例。如果您正苦于以下问题:C++ P2ALIGN函数的具体用法?C++ P2ALIGN怎么用?C++ P2ALIGN使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了P2ALIGN函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。

示例1: pci_cfgacc_map

static caddr_t
pci_cfgacc_map(paddr_t phys_addr)
{
#ifdef __xpv
	phys_addr = pfn_to_pa(xen_assign_pfn(mmu_btop(phys_addr))) |
	    (phys_addr & MMU_PAGEOFFSET);
#endif
	if (khat_running) {
		pfn_t pfn = mmu_btop(phys_addr);
		/*
		 * pci_cfgacc_virt_base may hold address left from early
		 * boot, which points to low mem. Realloc virtual address
		 * in kernel space since it's already late in boot now.
		 * Note: no need to unmap first, clear_boot_mappings() will
		 * do that for us.
		 */
		if (pci_cfgacc_virt_base < (caddr_t)kernelbase)
			pci_cfgacc_virt_base = vmem_alloc(heap_arena,
			    MMU_PAGESIZE, VM_SLEEP);

		hat_devload(kas.a_hat, pci_cfgacc_virt_base,
		    MMU_PAGESIZE, pfn, PROT_READ | PROT_WRITE |
		    HAT_STRICTORDER, HAT_LOAD_LOCK);
	} else {
		paddr_t	pa_base = P2ALIGN(phys_addr, MMU_PAGESIZE);

		if (pci_cfgacc_virt_base == NULL)
			pci_cfgacc_virt_base =
			    (caddr_t)alloc_vaddr(MMU_PAGESIZE, MMU_PAGESIZE);

		kbm_map((uintptr_t)pci_cfgacc_virt_base, pa_base, 0, 0);
	}

	return (pci_cfgacc_virt_base + (phys_addr & MMU_PAGEOFFSET));
}
开发者ID:apprisi,项目名称:illumos-gate,代码行数:35,代码来源:pci_cfgacc_x86.c


示例2: vdev_cache_read

/*
 * Read data from the cache.  Returns 0 on cache hit, errno on a miss.
 */
int
vdev_cache_read(zio_t *zio)
{
	vdev_cache_t *vc = &zio->io_vd->vdev_cache;
	vdev_cache_entry_t *ve, *ve_search;
	uint64_t cache_offset = P2ALIGN(zio->io_offset, VCBS);
	ASSERTV(uint64_t cache_phase = P2PHASE(zio->io_offset, VCBS);)
开发者ID:AB17,项目名称:zfs,代码行数:10,代码来源:vdev_cache.c


示例3: plat_get_mem_unum

/*ARGSUSED*/
int
plat_get_mem_unum(int synd_code, uint64_t flt_addr, int flt_bus_id,
    int flt_in_memory, ushort_t flt_status, char *buf, int buflen, int *lenp)
{
	if (flt_in_memory && (p2get_mem_unum != NULL))
		return (p2get_mem_unum(synd_code, P2ALIGN(flt_addr, 8),
		    buf, buflen, lenp));
	else
		return (ENOTSUP);
}
开发者ID:MatiasNAmendola,项目名称:AuroraUX-SunOS,代码行数:11,代码来源:enchilada.c


示例4: address_in_range

/*
 * Check whether any portion of [start, end] segment is within the
 * [start_addr, end_addr] range.
 *
 * Return values:
 *   0 - address is outside the range
 *   1 - address is within the range
 */
static int
address_in_range(uintptr_t start, uintptr_t end, size_t psz)
{
    int rc = 1;

    /*
     *  Nothing to do if there is no address range specified with -A
     */
    if (start_addr != INVALID_ADDRESS || end_addr != INVALID_ADDRESS) {
        /* The segment end is below the range start */
        if ((start_addr != INVALID_ADDRESS) &&
                (end < P2ALIGN(start_addr, psz)))
            rc = 0;

        /* The segment start is above the range end */
        if ((end_addr != INVALID_ADDRESS) &&
                (start > P2ALIGN(end_addr + psz, psz)))
            rc = 0;
    }
    return (rc);
}
开发者ID:mikess,项目名称:illumos-gate,代码行数:29,代码来源:pmap.c


示例5: zvol_discard

static int
zvol_discard(struct bio *bio)
{
	zvol_state_t *zv = bio->bi_bdev->bd_disk->private_data;
	uint64_t start = BIO_BI_SECTOR(bio) << 9;
	uint64_t size = BIO_BI_SIZE(bio);
	uint64_t end = start + size;
	int error;
	rl_t *rl;
	dmu_tx_t *tx;

	ASSERT(zv && zv->zv_open_count > 0);

	if (end > zv->zv_volsize)
		return (SET_ERROR(EIO));

	/*
	 * Align the request to volume block boundaries when REQ_SECURE is
	 * available, but not requested. If we don't, then this will force
	 * dnode_free_range() to zero out the unaligned parts, which is slow
	 * (read-modify-write) and useless since we are not freeing any space
	 * by doing so. Kernels that do not support REQ_SECURE (2.6.32 through
	 * 2.6.35) will not receive this optimization.
	 */
#ifdef REQ_SECURE
	if (!(bio->bi_rw & REQ_SECURE)) {
		start = P2ROUNDUP(start, zv->zv_volblocksize);
		end = P2ALIGN(end, zv->zv_volblocksize);
		size = end - start;
	}
#endif

	if (start >= end)
		return (0);

	rl = zfs_range_lock(&zv->zv_znode, start, size, RL_WRITER);
	tx = dmu_tx_create(zv->zv_objset);
	dmu_tx_mark_netfree(tx);
	error = dmu_tx_assign(tx, TXG_WAIT);
	if (error != 0) {
		dmu_tx_abort(tx);
	} else {
		zvol_log_truncate(zv, tx, start, size, B_TRUE);
		dmu_tx_commit(tx);
		error = dmu_free_long_range(zv->zv_objset,
		    ZVOL_OBJ, start, size);
	}

	zfs_range_unlock(rl);

	return (error);
}
开发者ID:Oliverlyn,项目名称:zfs,代码行数:52,代码来源:zvol.c


示例6: zvol_discard

static int
zvol_discard(struct bio *bio)
{
	zvol_state_t *zv = bio->bi_bdev->bd_disk->private_data;
	uint64_t start = BIO_BI_SECTOR(bio) << 9;
	uint64_t size = BIO_BI_SIZE(bio);
	uint64_t end = start + size;
	int error;
	rl_t *rl;
	dmu_tx_t *tx;

	ASSERT(zv && zv->zv_open_count > 0);

	if (end > zv->zv_volsize)
		return (SET_ERROR(EIO));

	/*
	 * Align the request to volume block boundaries when a secure erase is
	 * not required.  This will prevent dnode_free_range() from zeroing out
	 * the unaligned parts which is slow (read-modify-write) and useless
	 * since we are not freeing any space by doing so.
	 */
	if (!bio_is_secure_erase(bio)) {
		start = P2ROUNDUP(start, zv->zv_volblocksize);
		end = P2ALIGN(end, zv->zv_volblocksize);
		size = end - start;
	}

	if (start >= end)
		return (0);

	rl = zfs_range_lock(&zv->zv_range_lock, start, size, RL_WRITER);
	tx = dmu_tx_create(zv->zv_objset);
	dmu_tx_mark_netfree(tx);
	error = dmu_tx_assign(tx, TXG_WAIT);
	if (error != 0) {
		dmu_tx_abort(tx);
	} else {
		zvol_log_truncate(zv, tx, start, size, B_TRUE);
		dmu_tx_commit(tx);
		error = dmu_free_long_range(zv->zv_objset,
		    ZVOL_OBJ, start, size);
	}

	zfs_range_unlock(rl);

	return (error);
}
开发者ID:alek-p,项目名称:zfs,代码行数:48,代码来源:zvol.c


示例7: copy_memlist_filter

/*
 * Copy in a memory list from boot to kernel, with a filter function
 * to remove pages. The filter function can increase the address and/or
 * decrease the size to filter out pages.  It will also align addresses and
 * sizes to PAGESIZE.
 */
void
copy_memlist_filter(
	struct memlist *src,
	struct memlist **dstp,
	void (*filter)(uint64_t *, uint64_t *))
{
	struct memlist *dst, *prev;
	uint64_t addr;
	uint64_t size;
	uint64_t eaddr;

	dst = *dstp;
	prev = dst;

	/*
	 * Move through the memlist applying a filter against
	 * each range of memory. Note that we may apply the
	 * filter multiple times against each memlist entry.
	 */
	for (; src; src = src->ml_next) {
		addr = P2ROUNDUP(src->ml_address, PAGESIZE);
		eaddr = P2ALIGN(src->ml_address + src->ml_size, PAGESIZE);
		while (addr < eaddr) {
			size = eaddr - addr;
			if (filter != NULL)
				filter(&addr, &size);
			if (size == 0)
				break;
			dst->ml_address = addr;
			dst->ml_size = size;
			dst->ml_next = 0;
			if (prev == dst) {
				dst->ml_prev = 0;
				dst++;
			} else {
				dst->ml_prev = prev;
				prev->ml_next = dst;
				dst++;
				prev++;
			}
			addr += size;
		}
	}

	*dstp = dst;
}
开发者ID:pcd1193182,项目名称:openzfs,代码行数:52,代码来源:sundep.c


示例8: zvol_discard

static void
zvol_discard(void *arg)
{
    struct request *req = (struct request *)arg;
    struct request_queue *q = req->q;
    zvol_state_t *zv = q->queuedata;
    fstrans_cookie_t cookie = spl_fstrans_mark();
    uint64_t start = blk_rq_pos(req) << 9;
    uint64_t end = start + blk_rq_bytes(req);
    int error;
    rl_t *rl;

    if (end > zv->zv_volsize) {
        error = EIO;
        goto out;
    }

    /*
     * Align the request to volume block boundaries. If we don't,
     * then this will force dnode_free_range() to zero out the
     * unaligned parts, which is slow (read-modify-write) and
     * useless since we are not freeing any space by doing so.
     */
    start = P2ROUNDUP(start, zv->zv_volblocksize);
    end = P2ALIGN(end, zv->zv_volblocksize);

    if (start >= end) {
        error = 0;
        goto out;
    }

    rl = zfs_range_lock(&zv->zv_znode, start, end - start, RL_WRITER);

    error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, start, end-start);

    /*
     * TODO: maybe we should add the operation to the log.
     */

    zfs_range_unlock(rl);
out:
    blk_end_request(req, -error, blk_rq_bytes(req));
    spl_fstrans_unmark(cookie);
}
开发者ID:avg-I,项目名称:zfs,代码行数:44,代码来源:zvol.c


示例9: zvol_discard

static int
zvol_discard(struct bio *bio)
{
	zvol_state_t *zv = bio->bi_bdev->bd_disk->private_data;
	uint64_t start = BIO_BI_SECTOR(bio) << 9;
	uint64_t size = BIO_BI_SIZE(bio);
	uint64_t end = start + size;
	int error;
	rl_t *rl;

	if (end > zv->zv_volsize)
		return (SET_ERROR(EIO));

	/*
	 * Align the request to volume block boundaries when REQ_SECURE is
	 * available, but not requested. If we don't, then this will force
	 * dnode_free_range() to zero out the unaligned parts, which is slow
	 * (read-modify-write) and useless since we are not freeing any space
	 * by doing so. Kernels that do not support REQ_SECURE (2.6.32 through
	 * 2.6.35) will not receive this optimization.
	 */
#ifdef REQ_SECURE
	if (!(bio->bi_rw & REQ_SECURE)) {
		start = P2ROUNDUP(start, zv->zv_volblocksize);
		end = P2ALIGN(end, zv->zv_volblocksize);
		size = end - start;
	}
#endif

	if (start >= end)
		return (0);

	rl = zfs_range_lock(&zv->zv_znode, start, size, RL_WRITER);

	error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, start, size);

	/*
	 * TODO: maybe we should add the operation to the log.
	 */

	zfs_range_unlock(rl);

	return (error);
}
开发者ID:koplover,项目名称:zfs,代码行数:44,代码来源:zvol.c


示例10: fletcher_4_byteswap

void
fletcher_4_byteswap(const void *buf, uint64_t size, zio_cksum_t *zcp)
{
	const fletcher_4_ops_t *ops;
	uint64_t p2size = P2ALIGN(size, 64);

	ASSERT(IS_P2ALIGNED(size, sizeof (uint32_t)));

	if (size == 0) {
		ZIO_SET_CHECKSUM(zcp, 0, 0, 0, 0);
	} else if (p2size == 0) {
		ops = &fletcher_4_scalar_ops;
		fletcher_4_byteswap_impl(ops, buf, size, zcp);
	} else {
		ops = fletcher_4_impl_get();
		fletcher_4_byteswap_impl(ops, buf, p2size, zcp);

		if (p2size < size)
			fletcher_4_incremental_byteswap((char *)buf + p2size,
			    size - p2size, zcp);
	}
}
开发者ID:SageCloud,项目名称:zfs,代码行数:22,代码来源:zfs_fletcher.c


示例11: fletcher_4_byteswap

/*ARGSUSED*/
void
fletcher_4_byteswap(const void *buf, uint64_t size,
    const void *ctx_template, zio_cksum_t *zcp)
{
	const uint64_t p2size = P2ALIGN(size, 64);

	ASSERT(IS_P2ALIGNED(size, sizeof (uint32_t)));

	if (size == 0 || p2size == 0) {
		ZIO_SET_CHECKSUM(zcp, 0, 0, 0, 0);

		if (size > 0)
			fletcher_4_scalar_byteswap((fletcher_4_ctx_t *)zcp,
			    buf, size);
	} else {
		fletcher_4_byteswap_impl(buf, p2size, zcp);

		if (p2size < size)
			fletcher_4_scalar_byteswap((fletcher_4_ctx_t *)zcp,
			    (char *)buf + p2size, size - p2size);
	}
}
开发者ID:Ramzec,项目名称:zfs,代码行数:23,代码来源:zfs_fletcher.c


示例12: dmu_buf_hold_array_by_dnode

/*
 * Note: longer-term, we should modify all of the dmu_buf_*() interfaces
 * to take a held dnode rather than <os, object> -- the lookup is wasteful,
 * and can induce severe lock contention when writing to several files
 * whose dnodes are in the same block.
 */
static int
dmu_buf_hold_array_by_dnode(dnode_t *dn, uint64_t offset, uint64_t length,
    int read, void *tag, int *numbufsp, dmu_buf_t ***dbpp, uint32_t flags)
{
	dmu_buf_t **dbp;
	uint64_t blkid, nblks, i;
	uint32_t dbuf_flags;
	int err;
	zio_t *zio;

	ASSERT(length <= DMU_MAX_ACCESS);

	dbuf_flags = DB_RF_CANFAIL | DB_RF_NEVERWAIT | DB_RF_HAVESTRUCT;
	if (flags & DMU_READ_NO_PREFETCH || length > zfetch_array_rd_sz)
		dbuf_flags |= DB_RF_NOPREFETCH;

	rw_enter(&dn->dn_struct_rwlock, RW_READER);
	if (dn->dn_datablkshift) {
		int blkshift = dn->dn_datablkshift;
		nblks = (P2ROUNDUP(offset+length, 1ULL<<blkshift) -
		    P2ALIGN(offset, 1ULL<<blkshift)) >> blkshift;
	} else {
开发者ID:yaoyutian,项目名称:freebsd,代码行数:28,代码来源:dmu.c


示例13: kgrep_range_basic

static int
kgrep_range_basic(uintptr_t base, uintptr_t lim, void *kg_arg)
{
	kgrep_data_t *kg = kg_arg;
	size_t pagesize = kg->kg_pagesize;
	uintptr_t pattern = kg->kg_pattern;
	uintptr_t *page = kg->kg_page;
	uintptr_t *page_end = &page[pagesize / sizeof (uintptr_t)];
	uintptr_t *pos;

	uintptr_t addr, offset;
	int seen = 0;

	/*
	 * page-align everything, to simplify the loop
	 */
	base = P2ALIGN(base, pagesize);
	lim = P2ROUNDUP(lim, pagesize);

	for (addr = base; addr < lim; addr += pagesize) {
		if (mdb_vread(page, pagesize, addr) == -1)
			continue;
		seen = 1;

		for (pos = page; pos < page_end; pos++) {
			if (*pos != pattern)
				continue;

			offset = (caddr_t)pos - (caddr_t)page;
			kgrep_cb(addr + offset, NULL, kg->kg_cbtype);
		}
	}
	if (seen)
		kg->kg_seen = 1;

	return (WALK_NEXT);
}
开发者ID:metricinc,项目名称:illumos-gate,代码行数:37,代码来源:kgrep.c


示例14: dmu_buf_hold_array_by_dnode

/*
 * Note: longer-term, we should modify all of the dmu_buf_*() interfaces
 * to take a held dnode rather than <os, object> -- the lookup is wasteful,
 * and can induce severe lock contention when writing to several files
 * whose dnodes are in the same block.
 */
static int
dmu_buf_hold_array_by_dnode(dnode_t *dn, uint64_t offset,
    uint64_t length, int read, const void *tag, int *numbufsp, dmu_buf_t ***dbpp)
{
	dsl_pool_t *dp = NULL;
	dmu_buf_t **dbp;
	uint64_t blkid, nblks, i;
	uint32_t flags;
	int err;
	zio_t *zio;
	hrtime_t start;

	ASSERT(length <= DMU_MAX_ACCESS);

	flags = DB_RF_CANFAIL | DB_RF_NEVERWAIT;
	if (length > zfetch_array_rd_sz)
		flags |= DB_RF_NOPREFETCH;

	rw_enter(&dn->dn_struct_rwlock, RW_READER);
	if (dn->dn_datablkshift) {
		int blkshift = dn->dn_datablkshift;
		nblks = (P2ROUNDUP(offset+length, 1ULL<<blkshift) -
		    P2ALIGN(offset, 1ULL<<blkshift)) >> blkshift;
	} else {
开发者ID:YaroslavLitvinov,项目名称:zfs-port,代码行数:30,代码来源:dmu.c


示例15: vdev_cache_allocate

/*
 * Allocate an entry in the cache.  At the point we don't have the data,
 * we're just creating a placeholder so that multiple threads don't all
 * go off and read the same blocks.
 */
static vdev_cache_entry_t *
vdev_cache_allocate(zio_t *zio)
{
	vdev_cache_t *vc = &zio->io_vd->vdev_cache;
	uint64_t offset = P2ALIGN(zio->io_offset, VCBS);
	vdev_cache_entry_t *ve;

	ASSERT(MUTEX_HELD(&vc->vc_lock));

	if (zfs_vdev_cache_size == 0)
		return (NULL);

	/*
	 * If adding a new entry would exceed the cache size,
	 * evict the oldest entry (LRU).
	 */
	if ((avl_numnodes(&vc->vc_lastused_tree) << zfs_vdev_cache_bshift) >
	    zfs_vdev_cache_size) {
		ve = avl_first(&vc->vc_lastused_tree);
		if (ve->ve_fill_io != NULL)
			return (NULL);
		ASSERT3U(ve->ve_hits, !=, 0);
		vdev_cache_evict(vc, ve);
	}
开发者ID:pcd1193182,项目名称:openzfs,代码行数:29,代码来源:vdev_cache.c


示例16: dnode_next_offset

				blkfill = restarted ? 1 : DNODES_PER_BLOCK >> 2;
				minlvl = restarted ? 1 : 2;
				restarted = B_TRUE;
				error = dnode_next_offset(DMU_META_DNODE(os),
				    DNODE_FIND_HOLE, &offset, minlvl,
				    blkfill, 0);
				if (error == 0) {
					object = offset >> DNODE_SHIFT;
				}
			}
			/*
			 * Note: if "restarted", we may find a L0 that
			 * is not suitably aligned.
			 */
			os->os_obj_next_chunk =
			    P2ALIGN(object, dnodes_per_chunk) +
			    dnodes_per_chunk;
			(void) atomic_swap_64(cpuobj, object);
			mutex_exit(&os->os_obj_lock);
		}

		/*
		 * XXX We should check for an i/o error here and return
		 * up to our caller.  Actually we should pre-read it in
		 * dmu_tx_assign(), but there is currently no mechanism
		 * to do so.
		 */
		(void) dnode_hold_impl(os, object, DNODE_MUST_BE_FREE,
		    dn_slots, FTAG, &dn);
		if (dn != NULL) {
			rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
开发者ID:bprotopopov,项目名称:zfs,代码行数:31,代码来源:dmu_object.c


示例17: log_roll_read

/*
 * Find something to roll, then if we don't have cached roll buffers
 * covering all the deltas in that MAPBLOCK then read the master
 * and overlay the deltas.
 * returns;
 * 	0 if sucessful
 *	1 on finding nothing to roll
 *	2 on error
 */
int
log_roll_read(ml_unit_t *ul, rollbuf_t *rbs, int nmblk, caddr_t roll_bufs,
    int *retnbuf)
{
	offset_t	mof;
	buf_t		*bp;
	rollbuf_t	*rbp;
	mt_map_t	*logmap = ul->un_logmap;
	daddr_t		mblkno;
	int		i;
	int		error;
	int		nbuf;

	/*
	 * Make sure there is really something to roll
	 */
	mof = 0;
	if (!logmap_next_roll(logmap, &mof)) {
		return (1);
	}

	/*
	 * build some master blocks + deltas to roll forward
	 */
	rw_enter(&logmap->mtm_rwlock, RW_READER);
	nbuf = 0;
	do {
		mof = mof & (offset_t)MAPBLOCKMASK;
		mblkno = lbtodb(mof);

		/*
		 * Check for the case of a new delta to a set up buffer
		 */
		for (i = 0, rbp = rbs; i < nbuf; ++i, ++rbp) {
			if (P2ALIGN(rbp->rb_bh.b_blkno,
			    MAPBLOCKSIZE / DEV_BSIZE) == mblkno) {
				TNF_PROBE_0(trans_roll_new_delta, "lufs",
				    /* CSTYLED */);
				trans_roll_new_delta++;
				/* Flush out the current set of buffers */
				goto flush_bufs;
			}
		}

		/*
		 * Work out what to roll next. If it isn't cached then read
		 * it asynchronously from the master.
		 */
		bp = &rbp->rb_bh;
		bp->b_blkno = mblkno;
		bp->b_flags = B_READ;
		bp->b_un.b_addr = roll_bufs + (nbuf << MAPBLOCKSHIFT);
		bp->b_bufsize = MAPBLOCKSIZE;
		if (top_read_roll(rbp, ul)) {
			/* logmap deltas were in use */
			if (nbuf == 0) {
				/*
				 * On first buffer wait for the logmap user
				 * to finish by grabbing the logmap lock
				 * exclusively rather than spinning
				 */
				rw_exit(&logmap->mtm_rwlock);
				lrr_wait++;
				rw_enter(&logmap->mtm_rwlock, RW_WRITER);
				rw_exit(&logmap->mtm_rwlock);
				return (1);
			}
			/* we have at least one buffer - flush it */
			goto flush_bufs;
		}
		if ((bp->b_flags & B_INVAL) == 0) {
			nbuf++;
		}
		mof += MAPBLOCKSIZE;
	} while ((nbuf < nmblk) && logmap_next_roll(logmap, &mof));
开发者ID:MatiasNAmendola,项目名称:AuroraUX-SunOS,代码行数:84,代码来源:lufs_thread.c


示例18: exec_init

/*
 * Construct a stack for init containing the arguments to it, then
 * pass control to exec_common.
 */
int
exec_init(const char *initpath, const char *args)
{
	caddr32_t ucp;
	caddr32_t *uap;
	caddr32_t *argv;
	caddr32_t exec_fnamep;
	char *scratchargs;
	int i, sarg;
	size_t argvlen, alen;
	boolean_t in_arg;
	int argc = 0;
	int error = 0, count = 0;
	proc_t *p = ttoproc(curthread);
	klwp_t *lwp = ttolwp(curthread);
	int brand_action;

	if (args == NULL)
		args = "";

	alen = strlen(initpath) + 1 + strlen(args) + 1;
	scratchargs = kmem_alloc(alen, KM_SLEEP);
	(void) snprintf(scratchargs, alen, "%s %s", initpath, args);

	/*
	 * We do a quick two state parse of the string to sort out how big
	 * argc should be.
	 */
	in_arg = B_FALSE;
	for (i = 0; i < strlen(scratchargs); i++) {
		if (scratchargs[i] == ' ' || scratchargs[i] == '\0') {
			if (in_arg) {
				in_arg = B_FALSE;
				argc++;
			}
		} else {
			in_arg = B_TRUE;
		}
	}
	argvlen = sizeof (caddr32_t) * (argc + 1);
	argv = kmem_zalloc(argvlen, KM_SLEEP);

	/*
	 * We pull off a bit of a hack here.  We work our way through the
	 * args string, putting nulls at the ends of space delimited tokens
	 * (boot args don't support quoting at this time).  Then we just
	 * copy the whole mess to userland in one go.  In other words, we
	 * transform this: "init -s -r\0" into this on the stack:
	 *
	 *	-0x00 \0
	 *	-0x01 r
	 *	-0x02 -  <--------.
	 *	-0x03 \0	  |
	 *	-0x04 s		  |
	 *	-0x05 -  <------. |
	 *	-0x06 \0	| |
	 *	-0x07 t		| |
	 *	-0x08 i 	| |
	 *	-0x09 n		| |
	 *	-0x0a i  <---.  | |
	 *	-0x10 NULL   |  | |	(argv[3])
	 *	-0x14   -----|--|-'	(argv[2])
	 *	-0x18  ------|--'	(argv[1])
	 *	-0x1c -------'		(argv[0])
	 *
	 * Since we know the value of ucp at the beginning of this process,
	 * we can trivially compute the argv[] array which we also need to
	 * place in userland: argv[i] = ucp - sarg(i), where ucp is the
	 * stack ptr, and sarg is the string index of the start of the
	 * argument.
	 */
	ucp = (caddr32_t)(uintptr_t)p->p_usrstack;

	argc = 0;
	in_arg = B_FALSE;
	sarg = 0;

	for (i = 0; i < alen; i++) {
		if (scratchargs[i] == ' ' || scratchargs[i] == '\0') {
			if (in_arg == B_TRUE) {
				in_arg = B_FALSE;
				scratchargs[i] = '\0';
				argv[argc++] = ucp - (alen - sarg);
			}
		} else if (in_arg == B_FALSE) {
			in_arg = B_TRUE;
			sarg = i;
		}
	}
	ucp -= alen;
	error |= copyout(scratchargs, (caddr_t)(uintptr_t)ucp, alen);

	uap = (caddr32_t *)P2ALIGN((uintptr_t)ucp, sizeof (caddr32_t));
	uap--;	/* advance to be below the word we're in */
	uap -= (argc + 1);	/* advance argc words down, plus one for NULL */
	error |= copyout(argv, uap, argvlen);
//.........这里部分代码省略.........
开发者ID:apprisi,项目名称:illumos-gate,代码行数:101,代码来源:main.c


示例19: dmu_zfetch

/*
 * This is the prefetch entry point.  It calls all of the other dmu_zfetch
 * routines to create, delete, find, or operate upon prefetch streams.
 */
void
dmu_zfetch(zfetch_t *zf, uint64_t offset, uint64_t size, int prefetched)
{
	zstream_t	zst;
	zstream_t	*newstream;
	int		fetched;
	int		inserted;
	unsigned int	blkshft;
	uint64_t	blksz;

	if (zfs_prefetch_disable)
		return;

	/* files that aren't ln2 blocksz are only one block -- nothing to do */
	if (!zf->zf_dnode->dn_datablkshift)
		return;

	/* convert offset and size, into blockid and nblocks */
	blkshft = zf->zf_dnode->dn_datablkshift;
	blksz = (1 << blkshft);

	bzero(&zst, sizeof (zstream_t));
	zst.zst_offset = offset >> blkshft;
	zst.zst_len = (P2ROUNDUP(offset + size, blksz) -
	    P2ALIGN(offset, blksz)) >> blkshft;

	fetched = dmu_zfetch_find(zf, &zst, prefetched);
	if (!fetched) {
		fetched = dmu_zfetch_colinear(zf, &zst);
	}

	if (!fetched) {
		newstream = dmu_zfetch_stream_reclaim(zf);

		/*
		 * we still couldn't find a stream, drop the lock, and allocate
		 * one if possible.  Otherwise, give up and go home.
		 */
		if (newstream == NULL) {
			uint64_t	maxblocks;
			uint32_t	max_streams;
			uint32_t	cur_streams;

			cur_streams = zf->zf_stream_cnt;
			maxblocks = zf->zf_dnode->dn_maxblkid;

			max_streams = MIN(zfetch_max_streams,
			    (maxblocks / zfetch_block_cap));
			if (max_streams == 0) {
				max_streams++;
			}

			if (cur_streams >= max_streams) {
				return;
			}

			newstream = kmem_zalloc(sizeof (zstream_t), KM_SLEEP);
		}

		newstream->zst_offset = zst.zst_offset;
		newstream->zst_len = zst.zst_len;
		newstream->zst_stride = zst.zst_len;
		newstream->zst_ph_offset = zst.zst_len + zst.zst_offset;
		newstream->zst_cap = zst.zst_len;
		newstream->zst_direction = ZFETCH_FORWARD;
		newstream->zst_last = lbolt;

		mutex_init(&newstream->zst_lock, NULL, MUTEX_DEFAULT, NULL);

		rw_enter(&zf->zf_rwlock, RW_WRITER);
		inserted = dmu_zfetch_stream_insert(zf, newstream);
		rw_exit(&zf->zf_rwlock);

		if (!inserted) {
			mutex_destroy(&newstream->zst_lock);
			kmem_free(newstream, sizeof (zstream_t));
		}
	}
}
开发者ID:BjoKaSH,项目名称:mac-zfs,代码行数:83,代码来源:dmu_zfetch.c


示例20: slice_small_get_slice_from_row

static inline slice_t *
slice_small_get_slice_from_row(void *buf, small_allocatable_row_t **row)
{
	(*row) = (small_allocatable_row_t *)buf;
	return (slice_t*)P2ALIGN((uint64_t)buf, (uint64_t)PAGE_SIZE);
}
开发者ID:lundman,项目名称:osx-spl-crypto,代码行数:6,代码来源:spl-bmalloc.c



注:本文中的P2ALIGN函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
C++ P2ROUNDUP函数代码示例发布时间:2022-05-30
下一篇:
C++ P2函数代码示例发布时间:2022-05-30
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap