• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

C++ round_down函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了C++中round_down函数的典型用法代码示例。如果您正苦于以下问题:C++ round_down函数的具体用法?C++ round_down怎么用?C++ round_down使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了round_down函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。

示例1: truncate_pagecache_range

/**
 * truncate_pagecache_range - unmap and remove pagecache that is hole-punched
 * @inode: inode
 * @lstart: offset of beginning of hole
 * @lend: offset of last byte of hole
 *
 * This function should typically be called before the filesystem
 * releases resources associated with the freed range (eg. deallocates
 * blocks). This way, pagecache will always stay logically coherent
 * with on-disk format, and the filesystem would not have to deal with
 * situations such as writepage being called for a page that has already
 * had its underlying blocks deallocated.
 */
void truncate_pagecache_range(struct inode *inode, loff_t lstart, loff_t lend)
{
	struct address_space *mapping = inode->i_mapping;
	loff_t unmap_start = round_up(lstart, PAGE_SIZE);
	loff_t unmap_end = round_down(1 + lend, PAGE_SIZE) - 1;
	/*
	 * This rounding is currently just for example: unmap_mapping_range
	 * expands its hole outwards, whereas we want it to contract the hole
	 * inwards.  However, existing callers of truncate_pagecache_range are
	 * doing their own page rounding first.  Note that unmap_mapping_range
	 * allows holelen 0 for all, and we allow lend -1 for end of file.
	 */

	/*
	 * Unlike in truncate_pagecache, unmap_mapping_range is called only
	 * once (before truncating pagecache), and without "even_cows" flag:
	 * hole-punching should not remove private COWed pages from the hole.
	 */
	if ((u64)unmap_end > (u64)unmap_start)
		unmap_mapping_range(mapping, unmap_start,
				    1 + unmap_end - unmap_start, 0);
	truncate_inode_pages_range(mapping, lstart, lend);
}
开发者ID:spacex,项目名称:kernel-centos7,代码行数:36,代码来源:truncate.c


示例2: add_identity_map

/*
 * Adds the specified range to what will become the new identity mappings.
 * Once all ranges have been added, the new mapping is activated by calling
 * finalize_identity_maps() below.
 */
void add_identity_map(unsigned long start, unsigned long size)
{
	struct x86_mapping_info mapping_info = {
		.alloc_pgt_page	= alloc_pgt_page,
		.context	= &pgt_data,
		.pmd_flag	= __PAGE_KERNEL_LARGE_EXEC,
	};
	unsigned long end = start + size;

	/* Make sure we have a top level page table ready to use. */
	if (!level4p)
		prepare_level4();

	/* Align boundary to 2M. */
	start = round_down(start, PMD_SIZE);
	end = round_up(end, PMD_SIZE);
	if (start >= end)
		return;

	/* Build the mapping. */
	kernel_ident_mapping_init(&mapping_info, (pgd_t *)level4p,
				  start, end);
}
开发者ID:1314cc,项目名称:linux,代码行数:28,代码来源:pagetable.c


示例3: print_shadow_for_address

static void print_shadow_for_address(const void *addr)
{
	int i;
	const void *shadow = kasan_mem_to_shadow(addr);
	const void *shadow_row;

	shadow_row = (void *)round_down((unsigned long)shadow,
					SHADOW_BYTES_PER_ROW)
		- SHADOW_ROWS_AROUND_ADDR * SHADOW_BYTES_PER_ROW;

	pr_err("Memory state around the buggy address:\n");

	for (i = -SHADOW_ROWS_AROUND_ADDR; i <= SHADOW_ROWS_AROUND_ADDR; i++) {
		const void *kaddr = kasan_shadow_to_mem(shadow_row);
		char buffer[4 + (BITS_PER_LONG/8)*2];
		char shadow_buf[SHADOW_BYTES_PER_ROW];

		snprintf(buffer, sizeof(buffer),
			(i == 0) ? ">%p: " : " %p: ", kaddr);
		/*
		 * We should not pass a shadow pointer to generic
		 * function, because generic functions may try to
		 * access kasan mapping for the passed address.
		 */
		memcpy(shadow_buf, shadow_row, SHADOW_BYTES_PER_ROW);
		print_hex_dump(KERN_ERR, buffer,
			DUMP_PREFIX_NONE, SHADOW_BYTES_PER_ROW, 1,
			shadow_buf, SHADOW_BYTES_PER_ROW, 0);

		if (row_is_guilty(shadow_row, shadow))
			pr_err("%*c\n",
				shadow_pointer_offset(shadow_row, shadow),
				'^');

		shadow_row += SHADOW_BYTES_PER_ROW;
	}
}
开发者ID:HarryWei,项目名称:linux,代码行数:37,代码来源:report.c


示例4: chacha_stream_xor

static int chacha_stream_xor(struct skcipher_request *req,
			     struct chacha_ctx *ctx, u8 *iv)
{
	struct skcipher_walk walk;
	u32 state[16];
	int err;

	err = skcipher_walk_virt(&walk, req, false);

	crypto_chacha_init(state, ctx, iv);

	while (walk.nbytes > 0) {
		unsigned int nbytes = walk.nbytes;

		if (nbytes < walk.total)
			nbytes = round_down(nbytes, CHACHA_BLOCK_SIZE);

		chacha_docrypt(state, walk.dst.virt.addr, walk.src.virt.addr,
			       nbytes, ctx->nrounds);
		err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
	}

	return err;
}
开发者ID:avagin,项目名称:linux,代码行数:24,代码来源:chacha_generic.c


示例5: _find_next_bit

/*
 * This is a common helper function for find_next_bit and
 * find_next_zero_bit.  The difference is the "invert" argument, which
 * is XORed with each fetched word before searching it for one bits.
 */
static unsigned long _find_next_bit(const unsigned long *addr,
		unsigned long nbits, unsigned long start, unsigned long invert)
{
	unsigned long tmp;

	if (!nbits || start >= nbits)
		return nbits;

	tmp = addr[start / BITS_PER_LONG] ^ invert;

	/* Handle 1st word. */
	tmp &= BITMAP_FIRST_WORD_MASK(start);
	start = round_down(start, BITS_PER_LONG);

	while (!tmp) {
		start += BITS_PER_LONG;
		if (start >= nbits)
			return nbits;

		tmp = addr[start / BITS_PER_LONG] ^ invert;
	}

	return min(start + __ffs(tmp), nbits);
}
开发者ID:020gzh,项目名称:linux,代码行数:29,代码来源:find_bit.c


示例6: page_to_pfn

struct page_ext *lookup_page_ext(struct page *page)
{
	unsigned long pfn = page_to_pfn(page);
	unsigned long index;
	struct page_ext *base;

	base = NODE_DATA(page_to_nid(page))->node_page_ext;
#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAGE_POISONING)
	/*
	 * The sanity checks the page allocator does upon freeing a
	 * page can reach here before the page_ext arrays are
	 * allocated when feeding a range of pages to the allocator
	 * for the first time during bootup or memory hotplug.
	 *
	 * This check is also necessary for ensuring page poisoning
	 * works as expected when enabled
	 */
	if (unlikely(!base))
		return NULL;
#endif
	index = pfn - round_down(node_start_pfn(page_to_nid(page)),
					MAX_ORDER_NR_PAGES);
	return get_entry(base, index);
}
开发者ID:AshishNamdev,项目名称:linux,代码行数:24,代码来源:page_ext.c


示例7: ccu_data_offsets_valid

static bool ccu_data_offsets_valid(struct ccu_data *ccu)
{
	struct ccu_policy *ccu_policy = &ccu->policy;
	u32 limit;

	limit = ccu->range - sizeof(u32);
	limit = round_down(limit, sizeof(u32));
	if (ccu_policy_exists(ccu_policy)) {
		if (ccu_policy->enable.offset > limit) {
			pr_err("%s: bad policy enable offset for %s "
					"(%u > %u)\n", __func__,
				ccu->name, ccu_policy->enable.offset, limit);
			return false;
		}
		if (ccu_policy->control.offset > limit) {
			pr_err("%s: bad policy control offset for %s "
					"(%u > %u)\n", __func__,
				ccu->name, ccu_policy->control.offset, limit);
			return false;
		}
	}

	return true;
}
开发者ID:01org,项目名称:thunderbolt-software-kernel-tree,代码行数:24,代码来源:clk-kona-setup.c


示例8: cxl_afu_read_err_buffer

/*
 * afu_eb_read:
 * Called from sysfs and reads the afu error info buffer. The h/w only supports
 * 4/8 bytes aligned access. So in case the requested offset/count arent 8 byte
 * aligned the function uses a bounce buffer which can be max PAGE_SIZE.
 */
ssize_t cxl_afu_read_err_buffer(struct cxl_afu *afu, char *buf,
				loff_t off, size_t count)
{
	loff_t aligned_start, aligned_end;
	size_t aligned_length;
	void *tbuf;
	const void __iomem *ebuf = afu->afu_desc_mmio + afu->eb_offset;

	if (count == 0 || off < 0 || (size_t)off >= afu->eb_len)
		return 0;

	/* calculate aligned read window */
	count = min((size_t)(afu->eb_len - off), count);
	aligned_start = round_down(off, 8);
	aligned_end = round_up(off + count, 8);
	aligned_length = aligned_end - aligned_start;

	/* max we can copy in one read is PAGE_SIZE */
	if (aligned_length > ERR_BUFF_MAX_COPY_SIZE) {
		aligned_length = ERR_BUFF_MAX_COPY_SIZE;
		count = ERR_BUFF_MAX_COPY_SIZE - (off & 0x7);
	}

	/* use bounce buffer for copy */
	tbuf = (void *)__get_free_page(GFP_TEMPORARY);
	if (!tbuf)
		return -ENOMEM;

	/* perform aligned read from the mmio region */
	memcpy_fromio(tbuf, ebuf + aligned_start, aligned_length);
	memcpy(buf, tbuf + (off & 0x7), count);

	free_page((unsigned long)tbuf);

	return count;
}
开发者ID:greguu,项目名称:linux-4.2.3-c3x00,代码行数:42,代码来源:pci.c


示例9: olpc_ofw_detect

/* 어린이를 위한 OLPC 찾기 */
void __init olpc_ofw_detect(void)
{
	struct olpc_ofw_header *hdr = &boot_params.olpc_ofw_header;
	unsigned long start;

	/* ensure OFW booted us by checking for "OFW " string */
	if (hdr->ofw_magic != OLPC_OFW_SIG)
		return;

	olpc_ofw_cif = (int (*)(int *))hdr->cif_handler;

	if ((unsigned long)olpc_ofw_cif < OFW_MIN) {
		printk(KERN_ERR "OFW detected, but cif has invalid address 0x%lx - disabling.\n",
				(unsigned long)olpc_ofw_cif);
		olpc_ofw_cif = NULL;
		return;
	}

	/* determine where OFW starts in memory */
	start = round_down((unsigned long)olpc_ofw_cif, OFW_BOUND);
	printk(KERN_INFO "OFW detected in memory, cif @ 0x%lx (reserving top %ldMB)\n",
			(unsigned long)olpc_ofw_cif, (-start) >> 20);
	reserve_top_address(-start);
}
开发者ID:hephaex,项目名称:linux-3.2,代码行数:25,代码来源:olpc_ofw.c


示例10: kasan_init

void __init kasan_init(void)
{
	u64 kimg_shadow_start, kimg_shadow_end;
	u64 mod_shadow_start, mod_shadow_end;
	struct memblock_region *reg;
	int i;

	kimg_shadow_start = (u64)kasan_mem_to_shadow(_text);
	kimg_shadow_end = (u64)kasan_mem_to_shadow(_end);

	mod_shadow_start = (u64)kasan_mem_to_shadow((void *)MODULES_VADDR);
	mod_shadow_end = (u64)kasan_mem_to_shadow((void *)MODULES_END);

	/*
	 * We are going to perform proper setup of shadow memory.
	 * At first we should unmap early shadow (clear_pgds() call bellow).
	 * However, instrumented code couldn't execute without shadow memory.
	 * tmp_pg_dir used to keep early shadow mapped until full shadow
	 * setup will be finished.
	 */
	memcpy(tmp_pg_dir, swapper_pg_dir, sizeof(tmp_pg_dir));
	dsb(ishst);
	cpu_replace_ttbr1(lm_alias(tmp_pg_dir));

	clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);

	vmemmap_populate(kimg_shadow_start, kimg_shadow_end,
			 pfn_to_nid(virt_to_pfn(_text)));

	/*
	 * vmemmap_populate() has populated the shadow region that covers the
	 * kernel image with SWAPPER_BLOCK_SIZE mappings, so we have to round
	 * the start and end addresses to SWAPPER_BLOCK_SIZE as well, to prevent
	 * kasan_populate_zero_shadow() from replacing the page table entries
	 * (PMD or PTE) at the edges of the shadow region for the kernel
	 * image.
	 */
	kimg_shadow_start = round_down(kimg_shadow_start, SWAPPER_BLOCK_SIZE);
	kimg_shadow_end = round_up(kimg_shadow_end, SWAPPER_BLOCK_SIZE);

	kasan_populate_zero_shadow((void *)KASAN_SHADOW_START,
				   (void *)mod_shadow_start);
	kasan_populate_zero_shadow((void *)kimg_shadow_end,
				   kasan_mem_to_shadow((void *)PAGE_OFFSET));

	if (kimg_shadow_start > mod_shadow_end)
		kasan_populate_zero_shadow((void *)mod_shadow_end,
					   (void *)kimg_shadow_start);

	for_each_memblock(memory, reg) {
		void *start = (void *)__phys_to_virt(reg->base);
		void *end = (void *)__phys_to_virt(reg->base + reg->size);

		if (start >= end)
			break;

		/*
		 * end + 1 here is intentional. We check several shadow bytes in
		 * advance to slightly speed up fastpath. In some rare cases
		 * we could cross boundary of mapped shadow, so we just map
		 * some more here.
		 */
		vmemmap_populate((unsigned long)kasan_mem_to_shadow(start),
				(unsigned long)kasan_mem_to_shadow(end) + 1,
				pfn_to_nid(virt_to_pfn(start)));
	}
开发者ID:AshishNamdev,项目名称:linux,代码行数:66,代码来源:kasan_init.c


示例11: LOG_TRACE

template <  typename IN_PORT_TYPE > int file_descriptor_sink_i_base::_forecastAndProcess( bool &eos, typename  std::vector< gr_istream< IN_PORT_TYPE > > &istreams )
{
    typedef typename std::vector< gr_istream< IN_PORT_TYPE > >   _IStreamList;

    typename _IStreamList::iterator istream = istreams.begin();
    int nout = 0;
    bool dataReady = false;
    if ( !eos ) {
        uint64_t max_items_avail = 0;
        for ( int idx=0 ; istream != istreams.end() && serviceThread->threadRunning() ; idx++, istream++ ) {
            LOG_TRACE( file_descriptor_sink_i_base, "GET MAX ITEMS: STREAM:" << idx << " NITEMS/SCALARS:"
                      << istream->nitems() << "/" << istream->_data.size() );
            max_items_avail = std::max( istream->nitems(), max_items_avail );
        }

        //
        // calc number of output items to produce
        //
        noutput_items = (int) (max_items_avail * gr_sptr->relative_rate ());
        noutput_items = round_down (noutput_items, gr_sptr->output_multiple ());

        if ( noutput_items <= 0  ) {
           LOG_TRACE( file_descriptor_sink_i_base, "DATA CHECK - MAX ITEMS  NOUTPUT/MAX_ITEMS:" <<   noutput_items << "/" << max_items_avail);
           return -1;
        }

        if ( gr_sptr->fixed_rate() ) {
            istream = istreams.begin();
            for ( int i=0; istream != istreams.end(); i++, istream++ ) {
                int t_noutput_items = gr_sptr->fixed_rate_ninput_to_noutput( istream->nitems() );
                if ( gr_sptr->output_multiple_set() ) {
                    t_noutput_items = round_up(t_noutput_items, gr_sptr->output_multiple());
                }
                if ( t_noutput_items > 0 ) {
                    if ( noutput_items == 0 ) {
                        noutput_items = t_noutput_items;
                    }
                    if ( t_noutput_items <= noutput_items ) {
                        noutput_items = t_noutput_items;
                    }
                }
            }
            LOG_TRACE( file_descriptor_sink_i_base, " FIXED FORECAST NOUTPUT/output_multiple == " 
                      << noutput_items  << "/" << gr_sptr->output_multiple());
        }

        //
        // ask the block how much input they need to produce noutput_items...
        // if enough data is available to process then set the dataReady flag
        //
        int32_t  outMultiple = gr_sptr->output_multiple();
        while ( !dataReady && noutput_items >= outMultiple  ) {
            //
            // ask the block how much input they need to produce noutput_items...
            //
            gr_sptr->forecast(noutput_items, _ninput_items_required);

            LOG_TRACE( file_descriptor_sink_i_base, "--> FORECAST IN/OUT " << _ninput_items_required[0]  << "/" << noutput_items  );

            istream = istreams.begin();
            uint32_t dr_cnt=0;
            for ( int idx=0 ; noutput_items > 0 && istream != istreams.end(); idx++, istream++ ) {
                // check if buffer has enough elements
                _input_ready[idx] = false;
                if ( istream->nitems() >= (uint64_t)_ninput_items_required[idx] ) {
                    _input_ready[idx] = true;
                    dr_cnt++;
                }
                LOG_TRACE( file_descriptor_sink_i_base, "ISTREAM DATACHECK NELMS/NITEMS/REQ/READY:" << 
                          istream->nelems() << "/" << istream->nitems() << "/" << 
                          _ninput_items_required[idx] << "/" << _input_ready[idx]);
            }
    
            if ( dr_cnt < istreams.size() ) {
                if ( outMultiple > 1 ) {
                    noutput_items -= outMultiple;
                } else {
                    noutput_items /= 2;
                }
            } else {
                dataReady = true;
            }
            LOG_TRACE( file_descriptor_sink_i_base, " TRIM FORECAST NOUTPUT/READY " << noutput_items << "/" << dataReady );
        }

        // check if data is ready...
        if ( !dataReady ) {
            LOG_TRACE( file_descriptor_sink_i_base, "DATA CHECK - NOT ENOUGH DATA  AVAIL/REQ:" 
                      <<   _istreams[0].nitems() << "/" << _ninput_items_required[0] );
            return -1;
        }

        // reset looping variables
        int  ritems = 0;
        int  nitems = 0;

        // reset caching vectors
        _output_items.clear();
        _input_items.clear();
        _ninput_items.clear();
//.........这里部分代码省略.........
开发者ID:54AndyN,项目名称:integration-gnuhawk,代码行数:101,代码来源:file_descriptor_sink_i_base.cpp


示例12: mx_cma_region_reserve


//.........这里部分代码省略.........
            reg->alignment = PAGE_SIZE;
        }

        if (reg->start) {
            if (!memblock_is_region_reserved(reg->start, reg->size)
                    && (memblock_reserve(reg->start, reg->size) == 0))
                reg->reserved = 1;
            else
                pr_err("S5P/CMA: Failed to reserve '%s'\n",
                       reg->name);
            continue;
        }

        paddr = memblock_find_in_range(0, MEMBLOCK_ALLOC_ACCESSIBLE,
                                       reg->size, reg->alignment);
        if (paddr != MEMBLOCK_ERROR) {
            if (memblock_reserve(paddr, reg->size)) {
                pr_err("S5P/CMA: Failed to reserve '%s'\n",
                       reg->name);
                continue;
            }
            reg->start = paddr;
            reg->reserved = 1;
            pr_info("name = %s, paddr = 0x%x, size = %d\n", reg->name, paddr, reg->size);
        } else {
            pr_err("S5P/CMA: No free space in memory for '%s'\n",
                   reg->name);
        }

        if (cma_early_region_register(reg)) {
            pr_err("S5P/CMA: Failed to register '%s'\n",
                   reg->name);
            memblock_free(reg->start, reg->size);
        } else {
            paddr_last = min(paddr, paddr_last);
        }
    }

    if (regions_secure && regions_secure->size) {
        size_t size_secure = 0;
        size_t align_secure, size_region2, aug_size, order_region2;

        for (reg = regions_secure; reg->size != 0; reg++)
            size_secure += reg->size;

        reg--;

        /* Entire secure regions will be merged into 2
         * consecutive regions. */
        align_secure = 1 <<
                       (get_order((size_secure + 1) / 2) + PAGE_SHIFT);
        /* Calculation of a subregion size */
        size_region2 = size_secure - align_secure;
        order_region2 = get_order(size_region2) + PAGE_SHIFT;
        if (order_region2 < 20)
            order_region2 = 20; /* 1MB */
        order_region2 -= 3; /* divide by 8 */
        size_region2 = ALIGN(size_region2, 1 << order_region2);

        aug_size = align_secure + size_region2 - size_secure;
        if (aug_size > 0)
            reg->size += aug_size;

        size_secure = ALIGN(size_secure, align_secure);

        if (paddr_last >= memblock.current_limit) {
            paddr_last = memblock_find_in_range(0,
                                                MEMBLOCK_ALLOC_ACCESSIBLE,
                                                size_secure, reg->alignment);
        } else {
            paddr_last -= size_secure;
            paddr_last = round_down(paddr_last, align_secure);
        }

        if (paddr_last) {
            while (memblock_reserve(paddr_last, size_secure))
                paddr_last -= align_secure;

            do {
                reg->start = paddr_last;
                reg->reserved = 1;
                paddr_last += reg->size;

                if (cma_early_region_register(reg)) {
                    memblock_free(reg->start, reg->size);
                    pr_err("S5P/CMA: "
                           "Failed to register secure region "
                           "'%s'\n", reg->name);
                } else {
                    size_secure -= reg->size;
                }
            } while (reg-- != regions_secure);

            if (size_secure > 0)
                memblock_free(paddr_last, size_secure);
        } else {
            pr_err("S5P/CMA: Failed to reserve secure regions\n");
        }
    }
}
开发者ID:gcrisis,项目名称:android_kernel_mx2,代码行数:101,代码来源:mx_cma.c


示例13: _rtld_map_object


//.........这里部分代码省略.........
 			dbg(("%s: PT_DYNAMIC %p", obj->path, obj->dynamic));
			break;
		}

		++phdr;
	}
	phdr = (Elf_Phdr *) ((caddr_t)ehdr + ehdr->e_phoff);
	obj->entry = (void *)(uintptr_t)ehdr->e_entry;
	if (!obj->dynamic) {
		_rtld_error("%s: not dynamically linked", path);
		goto bad;
	}
	if (nsegs != 2) {
		_rtld_error("%s: wrong number of segments (%d != 2)", path,
		    nsegs);
		goto bad;
	}

	/*
	 * Map the entire address space of the object as a file
	 * region to stake out our contiguous region and establish a
	 * base for relocation.  We use a file mapping so that
	 * the kernel will give us whatever alignment is appropriate
	 * for the platform we're running on.
	 *
	 * We map it using the text protection, map the data segment
	 * into the right place, then map an anon segment for the bss
	 * and unmap the gaps left by padding to alignment.
	 */

#ifdef MAP_ALIGNED
	base_alignment = segs[0]->p_align;
#endif
	base_offset = round_down(segs[0]->p_offset);
	base_vaddr = round_down(segs[0]->p_vaddr);
	base_vlimit = round_up(segs[1]->p_vaddr + segs[1]->p_memsz);
	text_vlimit = round_up(segs[0]->p_vaddr + segs[0]->p_memsz);
	text_flags = protflags(segs[0]->p_flags);
	data_offset = round_down(segs[1]->p_offset);
	data_vaddr = round_down(segs[1]->p_vaddr);
	data_vlimit = round_up(segs[1]->p_vaddr + segs[1]->p_filesz);
	data_flags = protflags(segs[1]->p_flags);
#ifdef RTLD_LOADER
	clear_vaddr = segs[1]->p_vaddr + segs[1]->p_filesz;
#endif

	obj->textsize = text_vlimit - base_vaddr;
	obj->vaddrbase = base_vaddr;
	obj->isdynamic = ehdr->e_type == ET_DYN;

	obj->phdr_loaded = false;
	for (i = 0; i < nsegs; i++) {
		if (phdr_vaddr != EA_UNDEF &&
		    segs[i]->p_vaddr <= phdr_vaddr &&
		    segs[i]->p_memsz >= phdr_memsz) {
			obj->phdr_loaded = true;
			break;
		}
		if (segs[i]->p_offset <= ehdr->e_phoff &&
		    segs[i]->p_memsz >= phsize) {
			phdr_vaddr = segs[i]->p_vaddr + ehdr->e_phoff;
			phdr_memsz = phsize;
			obj->phdr_loaded = true;
			break;
		}
	}
开发者ID:Sciumo,项目名称:minix,代码行数:67,代码来源:map_object.c


示例14: init_stack

// Set up the initial stack page for the new child process with envid 'child'
// using the arguments array pointed to by 'argv',
// which is a null-terminated array of pointers to '\0'-terminated strings.
//
// On success, returns 0 and sets *init_esp
// to the initial stack pointer with which the child should start.
// Returns < 0 on failure.
static int
init_stack(envid_t child, const char **argv, uintptr_t *init_esp)
{
	size_t string_size;
	int argc, i, r;
	char *string_store;
	uintptr_t *argv_store;

	// Count the number of arguments (argc)
	// and the total amount of space needed for strings (string_size).
	string_size = 0;
	for (argc = 0; argv[argc] != 0; argc++)
		string_size += strlen(argv[argc]) + 1;

	// Determine where to place the strings and the argv array.
	// We set up the 'string_store' and 'argv_store' pointers to point
	// into the temporary page at UTEMP.
	// Later, we'll remap that page into the child environment
	// at (USTACKTOP - PGSIZE).
	
	// strings is the topmost thing on the stack.
	string_store = (char *) UTEMP + PGSIZE - string_size;
	
	// argv is below that.  There's one argument pointer per argument, plus
	// a null pointer.
	argv_store = (uintptr_t*) (round_down(string_store, 4) - 4 * (argc + 1));
	
	// Make sure that argv, strings, and the 2 words that hold 'argc'
	// and 'argv' themselves will all fit in a single stack page.
	if ((void*) (argv_store - 2) < (void*) UTEMP)
		return -E_NO_MEM;

	// Allocate a page at UTEMP.
	if ((r = sys_page_alloc(0, (void*) UTEMP, PTE_P|PTE_U|PTE_W)) < 0)
		return r;

	// Replace this with your code to:
	//
	//	* Initialize 'argv_store[i]' to point to argument string i,
	//	  for all 0 <= i < argc.
	//	  Also, copy the argument strings from 'argv' into the
	//	  newly-allocated stack page.
	//	  Hint: Copy the argument strings into string_store.
	//	  Hint: Make sure that argv_store uses addresses valid in the
	//	  CHILD'S environment!  The string_store variable itself
	//	  points into page UTEMP, but the child environment will have
	//	  this page mapped at USTACKTOP - PGSIZE.  Check out the
	//	  utemp_addr_to_ustack_addr function defined above.
	//
	for(i = 0; i < argc; i++){
		argv_store[i] = UTEMP2USTACK(string_store);
		strcpy(string_store,argv[i]);
		string_store += strlen(argv[i])+1;
	}
	//	* Set 'argv_store[argc]' to 0 to null-terminate the args array.
	//
	argv_store[argc] = 0;
	//	* Push two more words onto the child's stack below 'args',
	//	  containing the argc and argv parameters to be passed
	//	  to the child's umain() function.
	//	  argv should be below argc on the stack.
	//	  (Again, argv should use an address valid in the child's
	//	  environment.)
	//
	argv_store[-1] = UTEMP2USTACK(argv_store);
	argv_store[-2] = argc;
	//	* Set *init_esp to the initial stack pointer for the child,
	//	  (Again, use an address valid in the child's environment.)
	//
	// LAB 4: Your code here.
	//*init_esp = USTACKTOP;	// Change this!
	*init_esp = UTEMP2USTACK(argv_store-2);
	// After completing the stack, map it into the child's address space
	// and unmap it from ours!
	if ((r = sys_page_map(0, (void*) UTEMP, child, (void*) (USTACKTOP - PGSIZE), PTE_P | PTE_U | PTE_W)) < 0)
		goto error;
	if ((r = sys_page_unmap(0, (void*) UTEMP)) < 0)
		goto error;

	return 0;

error:
	sys_page_unmap(0, (void*) UTEMP);
	return r;
}
开发者ID:donguoxing,项目名称:Lab3,代码行数:92,代码来源:spawn.c


示例15: s5p_cma_region_reserve


//.........这里部分代码省略.........
			size_t aug_size;

			align_secure = 1 <<
				(get_order((size_secure + 1) / 2) + PAGE_SHIFT);
			/* Calculation of a subregion size */
			size_region2 = size_secure - align_secure;
			order_region2 = get_order(size_region2) + PAGE_SHIFT;
			if (order_region2 < 20)
				order_region2 = 20; /* 1MB */
			order_region2 -= 3; /* divide by 8 */
			size_region2 = ALIGN(size_region2, 1 << order_region2);

			aug_size = align_secure + size_region2 - size_secure;
			if (aug_size > 0) {
				reg->size += aug_size;
				size_secure += aug_size;
				pr_debug("S5P/CMA: "
					"Augmented size of '%s' by %#x B.\n",
					reg->name, aug_size);
			}
		} else
			size_secure = ALIGN(size_secure, align_secure);

		pr_info("S5P/CMA: "
			"Reserving %#x for secure region aligned by %#x.\n",
						size_secure, align_secure);

		if (paddr_last >= memblock.current_limit) {
			paddr_last = memblock_find_in_range(0,
					MEMBLOCK_ALLOC_ACCESSIBLE,
					size_secure, reg->alignment);
		} else {
			paddr_last -= size_secure;
			paddr_last = round_down(paddr_last, align_secure);
		}

		if (paddr_last) {
			pr_info("S5P/CMA: "
				"Reserved 0x%08x/0x%08x for 'secure_region'\n",
				paddr_last, size_secure);
#ifndef CONFIG_DMA_CMA
			while (memblock_reserve(paddr_last, size_secure))
				paddr_last -= align_secure;
#else
			if (!reg->start) {
				while (memblock_reserve(paddr_last,
							size_secure))
					paddr_last -= align_secure;
			}
#endif
			do {
#ifndef CONFIG_DMA_CMA
				reg->start = paddr_last;
				reg->reserved = 1;
				paddr_last += reg->size;
#else
				if (reg->start) {
					reg->reserved = 1;
#if defined(CONFIG_USE_MFC_CMA) && defined(CONFIG_MACH_M0)
					if (reg->start == 0x5C100000) {
						if (memblock_reserve(0x5C100000,
								0x700000))
							panic("memblock\n");
						if (memblock_reserve(0x5F000000,
								0x200000))
							panic("memblock\n");
开发者ID:Smando87,项目名称:smdk4412_kernel,代码行数:67,代码来源:reserve_mem.c


示例16: R2ISup

Box2di R2ISup(const Box2dr & aB)
{
   return Box2di(round_down(aB._p0),round_up(aB._p1));
}
开发者ID:jakexie,项目名称:micmac-archeos,代码行数:4,代码来源:box.cpp


示例17: arch_detect_mem_map

void
arch_detect_mem_map (mmap_info_t * mm_info, 
                     mem_map_entry_t * memory_map,
                     unsigned long mbd)
{
    struct multiboot_tag * tag;
    uint32_t n = 0;

    if (mbd & 7) {
        panic("ERROR: Unaligned multiboot info struct\n");
    }

    tag = (struct multiboot_tag*)(mbd+8);
    while (tag->type != MULTIBOOT_TAG_TYPE_MMAP) {
        tag = (struct multiboot_tag*)((multiboot_uint8_t*)tag + ((tag->size+7)&~7));
    }

    if (tag->type != MULTIBOOT_TAG_TYPE_MMAP) {
        panic("ERROR: no mmap tag found\n");
    }

    multiboot_memory_map_t * mmap;

    for (mmap=((struct multiboot_tag_mmap*)tag)->entries;
            (multiboot_uint8_t*)mmap < (multiboot_uint8_t*)tag + tag->size;
            mmap = (multiboot_memory_map_t*)((ulong_t)mmap + 
                ((struct multiboot_tag_mmap*)tag)->entry_size)) {


        if (n > MAX_MMAP_ENTRIES) {
            panic("Reached memory region limit!\n");
        }

        ulong_t start,end;

        start = round_up(mmap->addr, PAGE_SIZE_4KB);
        end   = round_down(mmap->addr + mmap->len, PAGE_SIZE_4KB);

        memory_map[n].addr = start;
        memory_map[n].len  = end-start;
        memory_map[n].type = mmap->type;

        BMM_PRINT("Memory map[%u] - [%p - %p] <%s>\n", 
                n, 
                start,
                end,
                mem_region_types[memory_map[n].type]);

        if (mmap->type == MULTIBOOT_MEMORY_AVAILABLE) {
            mm_info->usable_ram += mmap->len;
        }

        if (end > (mm_info->last_pfn << PAGE_SHIFT)) {
            mm_info->last_pfn = end >> PAGE_SHIFT;
        }

        mm_info->total_mem += end-start;

        ++n;
        ++mm_info->num_regions;
    }
开发者ID:ChrisBeauchene,项目名称:EECS399,代码行数:61,代码来源:early_mem.c


示例18: BoxDown

static Pt2di BoxDown(Pt2dr aP,INT  *) {return round_down(aP);}
开发者ID:jakexie,项目名称:micmac-archeos,代码行数:1,代码来源:box.cpp


示例19: mmci_data_irq

static void
mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
	      unsigned int status)
{
	/* Make sure we have data to handle */
	if (!data)
		return;

	/* First check for errors */
	if (status & (MCI_DATACRCFAIL | MCI_DATATIMEOUT |
		      host->variant->start_err |
		      MCI_TXUNDERRUN | MCI_RXOVERRUN)) {
		u32 remain, success;

		/* Terminate the DMA transfer */
		if (dma_inprogress(host)) {
			mmci_dma_data_error(host);
			mmci_dma_unmap(host, data);
		}

		/*
		 * Calculate how far we are into the transfer.  Note that
		 * the data counter gives the number of bytes transferred
		 * on the MMC bus, not on the host side.  On reads, this
		 * can be as much as a FIFO-worth of data ahead.  This
		 * matters for FIFO overruns only.
		 */
		remain = readl(host->base + MMCIDATACNT);
		success = data->blksz * data->blocks - remain;

		dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ, status 0x%08x at 0x%08x\n",
			status, success);
		if (status & MCI_DATACRCFAIL) {
			/* Last block was not successful */
			success -= 1;
			data->error = -EILSEQ;
		} else if (status & MCI_DATATIMEOUT) {
			data->error = -ETIMEDOUT;
		} else if (status & MCI_STARTBITERR) {
			data->error = -ECOMM;
		} else if (status & MCI_TXUNDERRUN) {
			data->error = -EIO;
		} else if (status & MCI_RXOVERRUN) {
			if (success > host->variant->fifosize)
				success -= host->variant->fifosize;
			else
				success = 0;
			data->error = -EIO;
		}
		data->bytes_xfered = round_down(success, data->blksz);
	}

	if (status & MCI_DATABLOCKEND)
		dev_err(mmc_dev(host->mmc), "stray MCI_DATABLOCKEND interrupt\n");

	if (status & MCI_DATAEND || data->error) {
		if (dma_inprogress(host))
			mmci_dma_finalize(host, data);
		mmci_stop_data(host);

		if (!data->error)
			/* The error clause is handled above, success! */
			data->bytes_xfered = data->blksz * data->blocks;

		if (!data->stop || host->mrq->sbc) {
			mmci_request_end(host, data->mrq);
		} else {
			mmci_start_command(host, data->stop, 0);
		}
	}
}
开发者ID:Lyude,项目名称:linux,代码行数:71,代码来源:mmci.c


示例20: load_segment

// Load the 'ph' program segment of program 'id' into 'child's address
// space at the proper place.
//
// Returns 0 on success, < 0 on error.  It is also OK panic on error.
// There's no need to clean up page mappings after error; the caller will call
// sys_env_destroy(), which cleans up most mappings for us.
static int
load_segment(envid_t child, int id, struct Proghdr* ph,
	     struct Elf* elf, size_t elf_size)
{
	// Use the p_flags field in the Proghdr for each segment
	// to determine how to map the segment:
	//
	//    * If the ELF flags do not include ELF_PROG_FLAG_WRITE,
	//	then the segment contains text and read-only data.
	//	Use map_page() to map the contents of this segment
	//	directly into the child, so that multiple instances of
	//	the same program will share the same copy of the program text.
	//      Be sure to map the program text read-only in the child.
	//
	int err;
	if((ph->p_flags & ELF_PROG_FLAG_WRITE) == 0){
		uint32_t i;
		for(i = round_down(ph->p_offset,PGSIZE); i < round_up(ph->p_offset+ph->p_memsz,PGSIZE); i+= PGSIZE){
			void *dstva = (void *)(round_down(ph->p_va,PGSIZE)+i-round_down(ph->p_offset,PGSIZE));
			map_page(child, id, i, dstva, PTE_U|PTE_P);
		}
	}
	else{
	//    * If the ELF segment flags DO include ELF_PROG_FLAG_WRITE,
	//	then the segment contains read/write data and bss.
	//	As with load_elf(), such an ELF segment
	//	occupies p_memsz bytes in memory, but only the first
	//	p_filesz bytes of the segment are actually loaded
	//	from the executable file -- you must clear the rest to zero.
	//      For each page to be mapped for a read/write segment,
	//	allocate a page of memory at UTEMP in the current
	//	environment.  Then use map_page() to map that portion of
	//	the program at UTEMP2.  Then copy the data from UTEMP2 into
	//	the page at UTEMP, and/or use memset() to zero any non-loaded
	//	portions of the page.  (You can avoid calling memset(),
	//	if you like, because sys_page_alloc() returns zeroed pages
	//	already.)  Finally, insert the correct page mapping into
	//	the child, and unmap the page at UTEMP2.
	//	Look at load_elf() and fork() for inspiration.
	//

		uint32_t i;
		for(i = round_down(ph->p_offset,PGSIZE); i < round_up(ph->p_offset+ph->p_memsz,PGSIZE); i+= PGSIZE)
		{
			if ((err = sys_page_alloc(0, (void*) UTEMP, PTE_P|PTE_U|PTE_W)) < 0)
				return err;
			//deal with bss
			if((i - round_down(ph->p_offset,PGSIZE)) <= ph->p_filesz ){
				int size, offset;
				offset = i - round_down(ph->p_offset, PGSIZE);
				//modify the size
				size = (ph->p_filesz - offset) > PGSIZE ? PGSIZE : (ph->p_filesz - offset); 
				map_page(0, id, i, (void *)(UTEMP2), PTE_U|PTE_P);
				memcpy((void *)UTEMP, (void *)(UTEMP2), size);
			}	
			//memset((void *)(UTEMP2),0,PGSIZE);
			void *dstva = (void *)(round_down(ph->p_va,PGSIZE)+i-round_down(ph->p_offset,PGSIZE));
			sys_page_map(0,(void *)UTEMP,child,dstva,PTE_P|PTE_U|PTE_W);
			sys_page_unmap(0,(void*)UTEMP);
		}


	// Note: All of the segment addresses or lengths above
	// might be non-page-aligned, so you must deal with
	// these non-page-aligned values appropriately.
	// The ELF linker does, however, guarantee that no two segments
	// will overlap on the same page; and it guarantees that
	// PGOFF(ph->p_offset) == PGOFF(ph->p_va).
	}
	return 0;
	// LAB 4: Your code here.
	//panic("load_segment not completed!\n");
	//return -1;
}
开发者ID:donguoxing,项目名称:Lab3,代码行数:80,代码来源:spawn.c



注:本文中的round_down函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
C++ round_jiffies函数代码示例发布时间:2022-05-30
下一篇:
C++ roundUp函数代码示例发布时间:2022-05-30
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap