• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

C++ ALIGN函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了C++中ALIGN函数的典型用法代码示例。如果您正苦于以下问题:C++ ALIGN函数的具体用法?C++ ALIGN怎么用?C++ ALIGN使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了ALIGN函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。

示例1: cvmx_bootmem_phy_alloc

int64_t cvmx_bootmem_phy_alloc(uint64_t req_size, uint64_t address_min,
			       uint64_t address_max, uint64_t alignment,
			       uint32_t flags)
{

	uint64_t head_addr;
	uint64_t ent_addr;
	/* points to previous list entry, NULL current entry is head of list */
	uint64_t prev_addr = 0;
	uint64_t new_ent_addr = 0;
	uint64_t desired_min_addr;

#ifdef DEBUG
	cvmx_dprintf("cvmx_bootmem_phy_alloc: req_size: 0x%llx, "
		     "min_addr: 0x%llx, max_addr: 0x%llx, align: 0x%llx\n",
		     (unsigned long long)req_size,
		     (unsigned long long)address_min,
		     (unsigned long long)address_max,
		     (unsigned long long)alignment);
#endif

	if (cvmx_bootmem_desc->major_version > 3) {
		cvmx_dprintf("ERROR: Incompatible bootmem descriptor "
			     "version: %d.%d at addr: %p\n",
			     (int)cvmx_bootmem_desc->major_version,
			     (int)cvmx_bootmem_desc->minor_version,
			     cvmx_bootmem_desc);
		goto error_out;
	}

	/*
	 * Do a variety of checks to validate the arguments.  The
	 * allocator code will later assume that these checks have
	 * been made.  We validate that the requested constraints are
	 * not self-contradictory before we look through the list of
	 * available memory.
	 */

	/* 0 is not a valid req_size for this allocator */
	if (!req_size)
		goto error_out;

	/* Round req_size up to mult of minimum alignment bytes */
	req_size = (req_size + (CVMX_BOOTMEM_ALIGNMENT_SIZE - 1)) &
		~(CVMX_BOOTMEM_ALIGNMENT_SIZE - 1);

	/*
	 * Convert !0 address_min and 0 address_max to special case of
	 * range that specifies an exact memory block to allocate.  Do
	 * this before other checks and adjustments so that this
	 * tranformation will be validated.
	 */
	if (address_min && !address_max)
		address_max = address_min + req_size;
	else if (!address_min && !address_max)
		address_max = ~0ull;  /* If no limits given, use max limits */


	/*
	 * Enforce minimum alignment (this also keeps the minimum free block
	 * req_size the same as the alignment req_size.
	 */
	if (alignment < CVMX_BOOTMEM_ALIGNMENT_SIZE)
		alignment = CVMX_BOOTMEM_ALIGNMENT_SIZE;

	/*
	 * Adjust address minimum based on requested alignment (round
	 * up to meet alignment).  Do this here so we can reject
	 * impossible requests up front. (NOP for address_min == 0)
	 */
	if (alignment)
		address_min = ALIGN(address_min, alignment);

	/*
	 * Reject inconsistent args.  We have adjusted these, so this
	 * may fail due to our internal changes even if this check
	 * would pass for the values the user supplied.
	 */
	if (req_size > address_max - address_min)
		goto error_out;

	/* Walk through the list entries - first fit found is returned */

	if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
		cvmx_bootmem_lock();
	head_addr = cvmx_bootmem_desc->head_addr;
	ent_addr = head_addr;
	for (; ent_addr;
	     prev_addr = ent_addr,
	     ent_addr = cvmx_bootmem_phy_get_next(ent_addr)) {
		uint64_t usable_base, usable_max;
		uint64_t ent_size = cvmx_bootmem_phy_get_size(ent_addr);

		if (cvmx_bootmem_phy_get_next(ent_addr)
		    && ent_addr > cvmx_bootmem_phy_get_next(ent_addr)) {
			cvmx_dprintf("Internal bootmem_alloc() error: ent: "
				"0x%llx, next: 0x%llx\n",
				(unsigned long long)ent_addr,
				(unsigned long long)
				cvmx_bootmem_phy_get_next(ent_addr));
//.........这里部分代码省略.........
开发者ID:CSCLOG,项目名称:beaglebone,代码行数:101,代码来源:cvmx-bootmem.c


示例2: intel_compute_size

unsigned int
intel_compute_size(struct intel_screen_private *intel,
                   int w, int h, int bpp, unsigned usage,
                   uint32_t *tiling, int *stride)
{
	int pitch, size;

	if (*tiling != I915_TILING_NONE) {
		/* First check whether tiling is necessary. */
		pitch = (w * bpp  + 7) / 8;
		pitch = ALIGN(pitch, 64);
		size = pitch * ALIGN (h, 2);
		if (INTEL_INFO(intel)->gen < 040) {
			/* Gen 2/3 has a maximum stride for tiling of
			 * 8192 bytes.
			 */
			if (pitch > KB(8))
				*tiling = I915_TILING_NONE;

			/* Narrower than half a tile? */
			if (pitch < 256)
				*tiling = I915_TILING_NONE;

			/* Older hardware requires fences to be pot size
			 * aligned with a minimum of 1 MiB, so causes
			 * massive overallocation for small textures.
			 */
			if (size < 1024*1024/2 && !intel->has_relaxed_fencing)
				*tiling = I915_TILING_NONE;
		} else if (!(usage & INTEL_CREATE_PIXMAP_DRI2) && size <= 4096) {
			/* Disable tiling beneath a page size, we will not see
			 * any benefit from reducing TLB misses and instead
			 * just incur extra cost when we require a fence.
			 */
			*tiling = I915_TILING_NONE;
		}
	}

	pitch = (w * bpp + 7) / 8;
	if (!(usage & INTEL_CREATE_PIXMAP_DRI2) && pitch <= 256)
		*tiling = I915_TILING_NONE;

	if (*tiling != I915_TILING_NONE) {
		int aligned_h, tile_height;

		if (IS_GEN2(intel))
			tile_height = 16;
		else if (*tiling == I915_TILING_X)
			tile_height = 8;
		else
			tile_height = 32;
		aligned_h = ALIGN(h, tile_height);

		*stride = intel_get_fence_pitch(intel,
						ALIGN(pitch, 512),
						*tiling);

		/* Round the object up to the size of the fence it will live in
		 * if necessary.  We could potentially make the kernel allocate
		 * a larger aperture space and just bind the subset of pages in,
		 * but this is easier and also keeps us out of trouble (as much)
		 * with drm_intel_bufmgr_check_aperture().
		 */
		size = intel_get_fence_size(intel, *stride * aligned_h);

		if (size > intel->max_tiling_size)
			*tiling = I915_TILING_NONE;
	}

	if (*tiling == I915_TILING_NONE) {
		/* We only require a 64 byte alignment for scanouts, but
		 * a 256 byte alignment for sharing with PRIME.
		 */
		*stride = ALIGN(pitch, 256);
		/* Round the height up so that the GPU's access to a 2x2 aligned
		 * subspan doesn't address an invalid page offset beyond the
		 * end of the GTT.
		 */
		size = *stride * ALIGN(h, 2);
	}

	return size;
}
开发者ID:01org,项目名称:iotg-lin-gfx-ddx,代码行数:83,代码来源:intel_memory.c


示例3: cache


//.........这里部分代码省略.........
					if( tri->verts[j].xyz[k] > entityDef->localReferenceBounds[1][k] + CHECK_BOUNDS_EPSILON
							|| tri->verts[j].xyz[k] < entityDef->localReferenceBounds[0][k] - CHECK_BOUNDS_EPSILON )
					{
						common->Printf( "bad referenceBounds on %s:%s\n", entityDef->parms.hModel->Name(), shader->GetName() );
						break;
					}
				}
				if( k != 3 )
				{
					break;
				}
			}
		}
		
		// view frustum culling for the precise surface bounds, which is tighter
		// than the entire entity reference bounds
		// If the entire model wasn't visible, there is no need to check the
		// individual surfaces.
		const bool surfaceDirectlyVisible = modelIsVisible && !idRenderMatrix::CullBoundsToMVP( vEntity->mvp, tri->bounds );
		
		// RB: added check wether GPU skinning is available at all
		const bool gpuSkinned = ( tri->staticModelWithJoints != NULL && r_useGPUSkinning.GetBool() && glConfig.gpuSkinningAvailable );
		// RB end
		
		//--------------------------
		// base drawing surface
		//--------------------------
		drawSurf_t* baseDrawSurf = NULL;
		if( surfaceDirectlyVisible )
		{
			// make sure we have an ambient cache and all necessary normals / tangents
			if( !vertexCache.CacheIsCurrent( tri->indexCache ) )
			{
				tri->indexCache = vertexCache.AllocIndex( tri->indexes, ALIGN( tri->numIndexes * sizeof( triIndex_t ), INDEX_CACHE_ALIGN ) );
			}
			
			if( !vertexCache.CacheIsCurrent( tri->ambientCache ) )
			{
				// we are going to use it for drawing, so make sure we have the tangents and normals
				if( shader->ReceivesLighting() && !tri->tangentsCalculated )
				{
					assert( tri->staticModelWithJoints == NULL );
					R_DeriveTangents( tri );
					
					// RB: this was hit by parametric particle models ..
					//assert( false );	// this should no longer be hit
					// RB end
				}
				tri->ambientCache = vertexCache.AllocVertex( tri->verts, ALIGN( tri->numVerts * sizeof( idDrawVert ), VERTEX_CACHE_ALIGN ) );
			}
			
			// add the surface for drawing
			// we can re-use some of the values for light interaction surfaces
			baseDrawSurf = ( drawSurf_t* )R_FrameAlloc( sizeof( *baseDrawSurf ), FRAME_ALLOC_DRAW_SURFACE );
			baseDrawSurf->frontEndGeo = tri;
			baseDrawSurf->space = vEntity;
			baseDrawSurf->scissorRect = vEntity->scissorRect;
			baseDrawSurf->extraGLState = 0;
			baseDrawSurf->renderZFail = 0;
			
			R_SetupDrawSurfShader( baseDrawSurf, shader, renderEntity );
			
			// Check for deformations (eyeballs, flares, etc)
			const deform_t shaderDeform = shader->Deform();
			if( shaderDeform != DFRM_NONE )
			{
开发者ID:slacka,项目名称:RBDOOM-3-BFG,代码行数:67,代码来源:tr_frontend_addmodels.cpp


示例4: zynq_load

int zynq_load(Xilinx_desc *desc, const void *buf, size_t bsize)
{
	unsigned long ts; /* Timestamp */
	u32 partialbit = 0;
	u32 i, control, isr_status, status, swap, diff;
	u32 *buf_start;

	/* Detect if we are going working with partial or full bitstream */
	if (bsize != desc->size) {
		printf("%s: Working with partial bitstream\n", __func__);
		partialbit = 1;
	}

	buf_start = check_data((u8 *)buf, bsize, &swap);
	if (!buf_start)
		return FPGA_FAIL;

	/* Check if data is postpone from start */
	diff = (u32)buf_start - (u32)buf;
	if (diff) {
		printf("%s: Bitstream is not validated yet (diff %x)\n",
		       __func__, diff);
		return FPGA_FAIL;
	}

	if ((u32)buf < SZ_1M) {
		printf("%s: Bitstream has to be placed up to 1MB (%x)\n",
		       __func__, (u32)buf);
		return FPGA_FAIL;
	}

	if ((u32)buf != ALIGN((u32)buf, ARCH_DMA_MINALIGN)) {
		u32 *new_buf = (u32 *)ALIGN((u32)buf, ARCH_DMA_MINALIGN);

		printf("%s: Align buffer at %x to %x(swap %d)\n", __func__,
		       (u32)buf_start, (u32)new_buf, swap);

		for (i = 0; i < (bsize/4); i++)
			new_buf[i] = load_word(&buf_start[i], swap);

		swap = SWAP_DONE;
		buf = new_buf;
	} else if (swap != SWAP_DONE) {
		/* For bitstream which are aligned */
		u32 *new_buf = (u32 *)buf;

		printf("%s: Bitstream is not swapped(%d) - swap it\n", __func__,
		       swap);

		for (i = 0; i < (bsize/4); i++)
			new_buf[i] = load_word(&buf_start[i], swap);

		swap = SWAP_DONE;
	}

	/* Clear loopback bit */
	clrbits_le32(&devcfg_base->mctrl, DEVCFG_MCTRL_PCAP_LPBK);

	if (!partialbit) {
		zynq_slcr_devcfg_disable();

		/* Setting PCFG_PROG_B signal to high */
		control = readl(&devcfg_base->ctrl);
		writel(control | DEVCFG_CTRL_PCFG_PROG_B, &devcfg_base->ctrl);
		/* Setting PCFG_PROG_B signal to low */
		writel(control & ~DEVCFG_CTRL_PCFG_PROG_B, &devcfg_base->ctrl);

		/* Polling the PCAP_INIT status for Reset */
		ts = get_timer(0);
		while (readl(&devcfg_base->status) & DEVCFG_STATUS_PCFG_INIT) {
			if (get_timer(ts) > CONFIG_SYS_FPGA_WAIT) {
				printf("%s: Timeout wait for INIT to clear\n",
				       __func__);
				return FPGA_FAIL;
			}
		}

		/* Setting PCFG_PROG_B signal to high */
		writel(control | DEVCFG_CTRL_PCFG_PROG_B, &devcfg_base->ctrl);

		/* Polling the PCAP_INIT status for Set */
		ts = get_timer(0);
		while (!(readl(&devcfg_base->status) &
			DEVCFG_STATUS_PCFG_INIT)) {
			if (get_timer(ts) > CONFIG_SYS_FPGA_WAIT) {
				printf("%s: Timeout wait for INIT to set\n",
				       __func__);
				return FPGA_FAIL;
			}
		}
	}

	isr_status = readl(&devcfg_base->int_sts);

	/* Clear it all, so if Boot ROM comes back, it can proceed */
	writel(0xFFFFFFFF, &devcfg_base->int_sts);

	if (isr_status & DEVCFG_ISR_FATAL_ERROR_MASK) {
		debug("%s: Fatal errors in PCAP 0x%X\n", __func__, isr_status);

//.........这里部分代码省略.........
开发者ID:Bing0,项目名称:u-boot,代码行数:101,代码来源:zynqpl.c


示例5: ubifs_wbuf_sync_nolock

/**
 * ubifs_wbuf_sync_nolock - synchronize write-buffer.
 * @wbuf: write-buffer to synchronize
 *
 * This function synchronizes write-buffer @buf and returns zero in case of
 * success or a negative error code in case of failure.
 *
 * Note, although write-buffers are of @c->max_write_size, this function does
 * not necessarily writes all @c->max_write_size bytes to the flash. Instead,
 * if the write-buffer is only partially filled with data, only the used part
 * of the write-buffer (aligned on @c->min_io_size boundary) is synchronized.
 * This way we waste less space.
 */
int ubifs_wbuf_sync_nolock(struct ubifs_wbuf *wbuf)
{
	struct ubifs_info *c = wbuf->c;
	int err, dirt, sync_len;

	cancel_wbuf_timer_nolock(wbuf);
	if (!wbuf->used || wbuf->lnum == -1)
		/* Write-buffer is empty or not seeked */
		return 0;

	dbg_io("LEB %d:%d, %d bytes, jhead %s",
	       wbuf->lnum, wbuf->offs, wbuf->used, dbg_jhead(wbuf->jhead));
	ubifs_assert(!(wbuf->avail & 7));
	ubifs_assert(wbuf->offs + wbuf->size <= c->leb_size);
	ubifs_assert(wbuf->size >= c->min_io_size);
	ubifs_assert(wbuf->size <= c->max_write_size);
	ubifs_assert(wbuf->size % c->min_io_size == 0);
	ubifs_assert(!c->ro_media && !c->ro_mount);
	if (c->leb_size - wbuf->offs >= c->max_write_size)
		ubifs_assert(!((wbuf->offs + wbuf->size) % c->max_write_size));

	if (c->ro_error)
		return -EROFS;

	/*
	 * Do not write whole write buffer but write only the minimum necessary
	 * amount of min. I/O units.
	 */
	sync_len = ALIGN(wbuf->used, c->min_io_size);
	dirt = sync_len - wbuf->used;
	if (dirt)
		ubifs_pad(c, wbuf->buf + wbuf->used, dirt);
	err = ubi_leb_write(c->ubi, wbuf->lnum, wbuf->buf, wbuf->offs,
			    sync_len, wbuf->dtype);
	if (err) {
		ubifs_err("cannot write %d bytes to LEB %d:%d",
			  sync_len, wbuf->lnum, wbuf->offs);
		dbg_dump_stack();
		return err;
	}

	spin_lock(&wbuf->lock);
	wbuf->offs += sync_len;
	/*
	 * Now @wbuf->offs is not necessarily aligned to @c->max_write_size.
	 * But our goal is to optimize writes and make sure we write in
	 * @c->max_write_size chunks and to @c->max_write_size-aligned offset.
	 * Thus, if @wbuf->offs is not aligned to @c->max_write_size now, make
	 * sure that @wbuf->offs + @wbuf->size is aligned to
	 * @c->max_write_size. This way we make sure that after next
	 * write-buffer flush we are again at the optimal offset (aligned to
	 * @c->max_write_size).
	 */
	if (c->leb_size - wbuf->offs < c->max_write_size)
		wbuf->size = c->leb_size - wbuf->offs;
	else if (wbuf->offs & (c->max_write_size - 1))
		wbuf->size = ALIGN(wbuf->offs, c->max_write_size) - wbuf->offs;
	else
		wbuf->size = c->max_write_size;
	wbuf->avail = wbuf->size;
	wbuf->used = 0;
	wbuf->next_ino = 0;
	spin_unlock(&wbuf->lock);

	if (wbuf->sync_callback)
		err = wbuf->sync_callback(c, wbuf->lnum,
					  c->leb_size - wbuf->offs, dirt);
	return err;
}
开发者ID:119-org,项目名称:hi3518-osdrv,代码行数:82,代码来源:io.c


示例6: esp6_input

static int esp6_input(struct xfrm_state *x, struct xfrm_decap_state *decap, struct sk_buff *skb)
{
	struct ipv6hdr *iph;
	struct ipv6_esp_hdr *esph;
	struct esp_data *esp = x->data;
	struct sk_buff *trailer;
	int blksize = ALIGN(crypto_tfm_alg_blocksize(esp->conf.tfm), 4);
	int alen = esp->auth.icv_trunc_len;
	int elen = skb->len - sizeof(struct ipv6_esp_hdr) - esp->conf.ivlen - alen;

	int hdr_len = skb->h.raw - skb->nh.raw;
	int nfrags;
	unsigned char *tmp_hdr = NULL;
	int ret = 0;

	if (!pskb_may_pull(skb, sizeof(struct ipv6_esp_hdr))) {
		ret = -EINVAL;
		goto out_nofree;
	}

	if (elen <= 0 || (elen & (blksize-1))) {
		ret = -EINVAL;
		goto out_nofree;
	}

	tmp_hdr = kmalloc(hdr_len, GFP_ATOMIC);
	if (!tmp_hdr) {
		ret = -ENOMEM;
		goto out_nofree;
	}
	memcpy(tmp_hdr, skb->nh.raw, hdr_len);

	/* If integrity check is required, do this. */
        if (esp->auth.icv_full_len) {
		u8 sum[esp->auth.icv_full_len];
		u8 sum1[alen];

		esp->auth.icv(esp, skb, 0, skb->len-alen, sum);

		if (skb_copy_bits(skb, skb->len-alen, sum1, alen))
			BUG();

		if (unlikely(memcmp(sum, sum1, alen))) {
			x->stats.integrity_failed++;
			ret = -EINVAL;
			goto out;
		}
	}

	if ((nfrags = skb_cow_data(skb, 0, &trailer)) < 0) {
		ret = -EINVAL;
		goto out;
	}

	skb->ip_summed = CHECKSUM_NONE;

	esph = (struct ipv6_esp_hdr*)skb->data;
	iph = skb->nh.ipv6h;

	/* Get ivec. This can be wrong, check against another impls. */
	if (esp->conf.ivlen)
		crypto_cipher_set_iv(esp->conf.tfm, esph->enc_data, crypto_tfm_alg_ivsize(esp->conf.tfm));

        {
		u8 nexthdr[2];
		struct scatterlist *sg = &esp->sgbuf[0];
		u8 padlen;

		if (unlikely(nfrags > ESP_NUM_FAST_SG)) {
			sg = kmalloc(sizeof(struct scatterlist)*nfrags, GFP_ATOMIC);
			if (!sg) {
				ret = -ENOMEM;
				goto out;
			}
		}
		skb_to_sgvec(skb, sg, sizeof(struct ipv6_esp_hdr) + esp->conf.ivlen, elen);
		crypto_cipher_decrypt(esp->conf.tfm, sg, sg, elen);
		if (unlikely(sg != &esp->sgbuf[0]))
			kfree(sg);

		if (skb_copy_bits(skb, skb->len-alen-2, nexthdr, 2))
			BUG();

		padlen = nexthdr[0];
		if (padlen+2 >= elen) {
			LIMIT_NETDEBUG(KERN_WARNING "ipsec esp packet is garbage padlen=%d, elen=%d\n", padlen+2, elen);
			ret = -EINVAL;
			goto out;
		}
		/* ... check padding bits here. Silly. :-) */ 

		pskb_trim(skb, skb->len - alen - padlen - 2);
		skb->h.raw = skb_pull(skb, sizeof(struct ipv6_esp_hdr) + esp->conf.ivlen);
		skb->nh.raw += sizeof(struct ipv6_esp_hdr) + esp->conf.ivlen;
		memcpy(skb->nh.raw, tmp_hdr, hdr_len);
		skb->nh.ipv6h->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
		ret = nexthdr[1];
	}

out:
//.........这里部分代码省略.........
开发者ID:nighthawk149,项目名称:fvs318g-cfw,代码行数:101,代码来源:esp6.c


示例7: iwl_pcie_get_cmd_index

static struct
iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans,
				    struct iwl_txq *txq,
				    struct iwl_device_cmd *dev_cmd,
				    struct sk_buff *skb,
				    struct iwl_cmd_meta *out_meta,
				    int hdr_len,
				    int tx_cmd_len,
				    bool pad)
{
	int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
	struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, idx);
	dma_addr_t tb_phys;
	int len, tb1_len, tb2_len;
	void *tb1_addr;

	tb_phys = iwl_pcie_get_first_tb_dma(txq, idx);

	/* The first TB points to bi-directional DMA data */
	memcpy(&txq->first_tb_bufs[idx], &dev_cmd->hdr, IWL_FIRST_TB_SIZE);

	iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);

	/*
	 * The second TB (tb1) points to the remainder of the TX command
	 * and the 802.11 header - dword aligned size
	 * (This calculation modifies the TX command, so do it before the
	 * setup of the first TB)
	 */
	len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len -
	      IWL_FIRST_TB_SIZE;

	if (pad)
		tb1_len = ALIGN(len, 4);
	else
		tb1_len = len;

	/* map the data for TB1 */
	tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
	tb_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE);
	if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
		goto out_err;
	iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb1_len);
	trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr,
			     IWL_FIRST_TB_SIZE + tb1_len, hdr_len);

	/* set up TFD's third entry to point to remainder of skb's head */
	tb2_len = skb_headlen(skb) - hdr_len;

	if (tb2_len > 0) {
		tb_phys = dma_map_single(trans->dev, skb->data + hdr_len,
					 tb2_len, DMA_TO_DEVICE);
		if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
			goto out_err;
		iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb2_len);
		trace_iwlwifi_dev_tx_tb(trans->dev, skb,
					skb->data + hdr_len,
					tb2_len);
	}

	if (iwl_pcie_gen2_tx_add_frags(trans, skb, tfd, out_meta))
		goto out_err;

	return tfd;

out_err:
	iwl_pcie_gen2_tfd_unmap(trans, out_meta, tfd);
	return NULL;
}
开发者ID:AlexShiLucky,项目名称:linux,代码行数:69,代码来源:tx-gen2.c


示例8: MSG

h264enc *h264enc_new(const struct h264enc_params *p)
{
	h264enc *c;
	int i;

	/* check parameter validity */
	if (!IS_ALIGNED(p->src_width, 16) || !IS_ALIGNED(p->src_height, 16) ||
		!IS_ALIGNED(p->width, 2) || !IS_ALIGNED(p->height, 2) ||
		p->width > p->src_width || p->height > p->src_height)
	{
		MSG("invalid picture size");
		return NULL;
	}

	if (p->qp == 0 || p->qp > 47)
	{
		MSG("invalid QP");
		return NULL;
	}

	if (p->src_format != H264_FMT_NV12 && p->src_format != H264_FMT_NV16)
	{
		MSG("invalid color format");
		return NULL;
	}

	/* allocate memory for h264enc structure */
	c = calloc(1, sizeof(*c));
	if (c == NULL)
	{
		MSG("can't allocate h264enc data");
		return NULL;
	}

	/* copy parameters */
	c->mb_width = DIV_ROUND_UP(p->width, 16);
	c->mb_height = DIV_ROUND_UP(p->height, 16);
	c->mb_stride = p->src_width / 16;

	c->crop_right = (c->mb_width * 16 - p->width) / 2;
	c->crop_bottom = (c->mb_height * 16 - p->height) / 2;

	c->profile_idc = p->profile_idc;
	c->level_idc = p->level_idc;

	c->entropy_coding_mode_flag = p->entropy_coding_mode ? 1 : 0;
	c->pic_init_qp = p->qp;
	c->keyframe_interval = p->keyframe_interval;

	c->write_sps_pps = 1;
	c->current_frame_num = 0;

	/* allocate input buffer */
	c->input_color_format = p->src_format;
	switch (c->input_color_format)
	{
	case H264_FMT_NV12:
		c->input_buffer_size = p->src_width * (p->src_height + p->src_height / 2);
		break;
	case H264_FMT_NV16:
		c->input_buffer_size = p->src_width * p->src_height * 2;
		break;
	}

	c->luma_buffer = ve_malloc(c->input_buffer_size);
	if (c->luma_buffer == NULL)
		goto nomem;

	c->chroma_buffer = c->luma_buffer + p->src_width * p->src_height;

	/* allocate bytestream output buffer */
	c->bytestream_buffer_size = 1 * 1024 * 1024;
	c->bytestream_buffer = ve_malloc(c->bytestream_buffer_size);
	if (c->bytestream_buffer == NULL)
		goto nomem;

	/* allocate reference picture memory */
	unsigned int luma_size = ALIGN(c->mb_width * 16, 32) * ALIGN(c->mb_height * 16, 32);
	unsigned int chroma_size = ALIGN(c->mb_width * 16, 32) * ALIGN(c->mb_height * 8, 32);
	for (i = 0; i < 2; i++)
	{
		c->ref_picture[i].luma_buffer = ve_malloc(luma_size + chroma_size);
		c->ref_picture[i].chroma_buffer = c->ref_picture[i].luma_buffer + luma_size;
		c->ref_picture[i].extra_buffer = ve_malloc(luma_size / 4);
		if (c->ref_picture[i].luma_buffer == NULL || c->ref_picture[i].extra_buffer == NULL)
			goto nomem;
	}

	/* allocate unknown purpose buffers */
	c->extra_buffer_frame = ve_malloc(ALIGN(c->mb_width, 4) * c->mb_height * 8);
	c->extra_buffer_line = ve_malloc(c->mb_width * 32);
	if (c->extra_buffer_frame == NULL || c->extra_buffer_line == NULL)
		goto nomem;

	return c;

nomem:
	MSG("can't allocate VE memory");
	h264enc_free(c);
	return NULL;
//.........这里部分代码省略.........
开发者ID:aRUNTU,项目名称:FFmpeg-Cedrus,代码行数:101,代码来源:h264enc.c


示例9: sdma_v3_0_init_microcode

/**
 * sdma_v3_0_init_microcode - load ucode images from disk
 *
 * @adev: amdgpu_device pointer
 *
 * Use the firmware interface to load the ucode images into
 * the driver (not loaded into hw).
 * Returns 0 on success, error on failure.
 */
static int sdma_v3_0_init_microcode(struct amdgpu_device *adev)
{
	const char *chip_name;
	char fw_name[30];
	int err = 0, i;
	struct amdgpu_firmware_info *info = NULL;
	const struct common_firmware_header *header = NULL;
	const struct sdma_firmware_header_v1_0 *hdr;

	DRM_DEBUG("\n");

	switch (adev->asic_type) {
	case CHIP_TONGA:
		chip_name = "tonga";
		break;
	case CHIP_FIJI:
		chip_name = "fiji";
		break;
	case CHIP_POLARIS11:
		chip_name = "polaris11";
		break;
	case CHIP_POLARIS10:
		chip_name = "polaris10";
		break;
	case CHIP_CARRIZO:
		chip_name = "carrizo";
		break;
	case CHIP_STONEY:
		chip_name = "stoney";
		break;
	default: BUG();
	}

	for (i = 0; i < adev->sdma.num_instances; i++) {
		if (i == 0)
			snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name);
		else
			snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma1.bin", chip_name);
		err = request_firmware(&adev->sdma.instance[i].fw, fw_name, adev->dev);
		if (err)
			goto out;
		err = amdgpu_ucode_validate(adev->sdma.instance[i].fw);
		if (err)
			goto out;
		hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
		adev->sdma.instance[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
		adev->sdma.instance[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);
		if (adev->sdma.instance[i].feature_version >= 20)
			adev->sdma.instance[i].burst_nop = true;

		if (adev->firmware.smu_load) {
			info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i];
			info->ucode_id = AMDGPU_UCODE_ID_SDMA0 + i;
			info->fw = adev->sdma.instance[i].fw;
			header = (const struct common_firmware_header *)info->fw->data;
			adev->firmware.fw_size +=
				ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
		}
	}
out:
	if (err) {
		printk(KERN_ERR
		       "sdma_v3_0: Failed to load firmware \"%s\"\n",
		       fw_name);
		for (i = 0; i < adev->sdma.num_instances; i++) {
			release_firmware(adev->sdma.instance[i].fw);
			adev->sdma.instance[i].fw = NULL;
		}
	}
	return err;
}
开发者ID:forgivemyheart,项目名称:linux,代码行数:80,代码来源:sdma_v3_0.c


示例10: wl1271_boot_upload_nvs

static int wl1271_boot_upload_nvs(struct wl1271 *wl)
{
	size_t nvs_len, burst_len;
	int i;
	u32 dest_addr, val;
	u8 *nvs_ptr, *nvs, *nvs_aligned;

	nvs = wl->nvs;
	if (nvs == NULL)
		return -ENODEV;

	nvs_ptr = nvs;

	nvs_len = wl->nvs_len;

	/* Update the device MAC address into the nvs */
	nvs[11] = wl->mac_addr[0];
	nvs[10] = wl->mac_addr[1];
	nvs[6] = wl->mac_addr[2];
	nvs[5] = wl->mac_addr[3];
	nvs[4] = wl->mac_addr[4];
	nvs[3] = wl->mac_addr[5];

	/*
	 * Layout before the actual NVS tables:
	 * 1 byte : burst length.
	 * 2 bytes: destination address.
	 * n bytes: data to burst copy.
	 *
	 * This is ended by a 0 length, then the NVS tables.
	 */

	/* FIXME: Do we need to check here whether the LSB is 1? */
	while (nvs_ptr[0]) {
		burst_len = nvs_ptr[0];
		dest_addr = (nvs_ptr[1] & 0xfe) | ((u32)(nvs_ptr[2] << 8));

		/* FIXME: Due to our new wl1271_translate_reg_addr function,
		   we need to add the REGISTER_BASE to the destination */
		dest_addr += REGISTERS_BASE;

		/* We move our pointer to the data */
		nvs_ptr += 3;

		for (i = 0; i < burst_len; i++) {
			val = (nvs_ptr[0] | (nvs_ptr[1] << 8)
			       | (nvs_ptr[2] << 16) | (nvs_ptr[3] << 24));

			wl1271_debug(DEBUG_BOOT,
				     "nvs burst write 0x%x: 0x%x",
				     dest_addr, val);
			wl1271_reg_write32(wl, dest_addr, val);

			nvs_ptr += 4;
			dest_addr += 4;
		}
	}

	/*
	 * We've reached the first zero length, the first NVS table
	 * is 7 bytes further.
	 */
	nvs_ptr += 7;
	nvs_len -= nvs_ptr - nvs;
	nvs_len = ALIGN(nvs_len, 4);

	/* FIXME: The driver sets the partition here, but this is not needed,
	   since it sets to the same one as currently in use */
	/* Now we must set the partition correctly */
	wl1271_set_partition(wl,
			     part_table[PART_WORK].mem.start,
			     part_table[PART_WORK].mem.size,
			     part_table[PART_WORK].reg.start,
			     part_table[PART_WORK].reg.size);

	/* Copy the NVS tables to a new block to ensure alignment */
	nvs_aligned = kmemdup(nvs_ptr, nvs_len, GFP_KERNEL);

	/* And finally we upload the NVS tables */
	/* FIXME: In wl1271, we upload everything at once.
	   No endianness handling needed here?! The ref driver doesn't do
	   anything about it at this point */
	wl1271_spi_mem_write(wl, CMD_MBOX_ADDRESS, nvs_aligned, nvs_len);

	kfree(nvs_aligned);
	return 0;
}
开发者ID:mikuhatsune001,项目名称:linux2.6.32,代码行数:87,代码来源:wl1271_boot.c


示例11: intelfb_create

static int intelfb_create(struct intel_fbdev *ifbdev,
			  struct drm_fb_helper_surface_size *sizes)
{
	struct drm_device *dev = ifbdev->helper.dev;
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct fb_info *info;
	struct drm_framebuffer *fb;
	struct drm_mode_fb_cmd2 mode_cmd = {};
	struct drm_i915_gem_object *obj;
	struct device *device = &dev->pdev->dev;
	int size, ret;

	/* we don't do packed 24bpp */
	if (sizes->surface_bpp == 24)
		sizes->surface_bpp = 32;

	mode_cmd.width = sizes->surface_width;
	mode_cmd.height = sizes->surface_height;

	mode_cmd.pitches[0] = ALIGN(mode_cmd.width * ((sizes->surface_bpp + 7) /
						      8), 64);
	mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
							  sizes->surface_depth);

	size = mode_cmd.pitches[0] * mode_cmd.height;
	size = ALIGN(size, PAGE_SIZE);
	obj = i915_gem_alloc_object(dev, size);
	if (!obj) {
		DRM_ERROR("failed to allocate framebuffer\n");
		ret = -ENOMEM;
		goto out;
	}

	mutex_lock(&dev->struct_mutex);

	/* Flush everything out, we'll be doing GTT only from now on */
	ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
	if (ret) {
		DRM_ERROR("failed to pin fb: %d\n", ret);
		goto out_unref;
	}

	info = framebuffer_alloc(0, device);
	if (!info) {
		ret = -ENOMEM;
		goto out_unpin;
	}

	info->par = ifbdev;

	ret = intel_framebuffer_init(dev, &ifbdev->ifb, &mode_cmd, obj);
	if (ret)
		goto out_unpin;

	fb = &ifbdev->ifb.base;

	ifbdev->helper.fb = fb;
	ifbdev->helper.fbdev = info;

	strcpy(info->fix.id, "inteldrmfb");

	info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
	info->fbops = &intelfb_ops;

	ret = fb_alloc_cmap(&info->cmap, 256, 0);
	if (ret) {
		ret = -ENOMEM;
		goto out_unpin;
	}
	/* setup aperture base/size for vesafb takeover */
	info->aperture_base = dev->mode_config.fb_base;
	if (!IS_GEN2(dev))
		info->aperture_size = pci_resource_len(dev->pdev, 2);
	else
		info->aperture_size = pci_resource_len(dev->pdev, 0);

	info->fix.smem_start = dev->mode_config.fb_base + obj->gtt_offset;
	info->fix.smem_len = size;

	info->screen_base =
		ioremap_wc(dev_priv->mm.gtt_base_addr + obj->gtt_offset,
			   size);
	if (!info->screen_base) {
		ret = -ENOSPC;
		goto out_unpin;
	}
	info->screen_size = size;

//	memset(info->screen_base, 0, size);

	drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
	drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height);

	/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */

	DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n",
		      fb->width, fb->height,
		      obj->gtt_offset, obj);


//.........这里部分代码省略.........
开发者ID:daveti,项目名称:prov-kernel,代码行数:101,代码来源:intel_fb.c


示例12: do_iommu_domain_map

static int do_iommu_domain_map(struct hisi_iommu_domain *hisi_domain,struct scatterlist *sgl,
		struct iommu_map_format *format, struct map_result *result)
{
	int ret;
	unsigned long phys_len, iova_size;
	unsigned long iova_start;

	struct gen_pool *pool;
	struct iommu_domain *domain;
	struct scatterlist *sg;
	struct tile_format fmt;
	/* calculate whole phys mem length */
	for (phys_len = 0, sg = sgl; sg; sg = sg_next(sg)) {
		phys_len += (unsigned long)ALIGN(sg->length, PAGE_SIZE);
	}

	/* get io virtual address size */
	if (format->is_tile) {
		unsigned long lines;
		unsigned long body_size;
		body_size = phys_len - format->header_size;
		lines = body_size / (format->phys_page_line * PAGE_SIZE);

		/*header need more lines virtual space*/
		if ( format->header_size ){
			unsigned long header_size;
			header_size = ALIGN(format->header_size ,format->virt_page_line * PAGE_SIZE);
			lines +=  header_size / (format->virt_page_line * PAGE_SIZE);
		}

		iova_size = lines * format->virt_page_line * PAGE_SIZE ;
	} else {
		iova_size = phys_len;
	}

	/* alloc iova */
	pool = hisi_domain->iova_pool;
	domain = hisi_domain->domain;
	iova_start = hisi_alloc_iova(pool,iova_size,hisi_domain->range.align);
	if (!iova_start) {
		printk("[%s]hisi_alloc_iova alloc 0x%lx failed!\n", __func__, iova_size);
		printk("[%s]dump iova pool begain--------------------------\n", __func__);
		printk("iova available: 0x%x\n",(unsigned int)hisi_iommu_iova_available());
		printk("alloc count: %d, free count: %d\n",
				dbg_inf.alloc_iova_count, dbg_inf.free_iova_count);
		printk("[%s]dump iova pool end   --------------------------\n", __func__);
		return -EINVAL;
	}

	if (0x100000000 < (iova_start + iova_size)) {
		pr_err("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! "
				"hisi iommu can not deal with iova 0x%lx size 0x%lx\n",
				iova_start, iova_size);
	}

	/* do map */
	if (format->is_tile) {
		fmt.is_tile = format->is_tile;
		fmt.phys_page_line = format->phys_page_line;
		fmt.virt_page_line = format->virt_page_line;
		fmt.header_size = format->header_size ;
		ret = iommu_map_tile(domain, iova_start, sgl, iova_size, 0,&fmt);
	} else {
		ret = iommu_map_range(domain, iova_start,sgl,(size_t)iova_size,format->prot);
	}

	if (ret) {
		printk(KERN_ERR "[%s]map failed!\n", __func__);
		hisi_free_iova(pool, iova_start, iova_size);
		return ret;
	}else {
		/* out put result */
		result->iova_start = iova_start;
		result->iova_size = iova_size;
	}
	return 0;
}
开发者ID:XePeleato,项目名称:android_kernel_huawei_venus,代码行数:77,代码来源:ion-iommu-map.c


示例13: mdss_mdp_get_plane_sizes

int mdss_mdp_get_plane_sizes(u32 format, u32 w, u32 h,
			     struct mdss_mdp_plane_sizes *ps, u32 bwc_mode)
{
	struct mdss_mdp_format_params *fmt;
	int i, rc;
	u32 bpp, ystride0_off, ystride1_off;
	if (ps == NULL)
		return -EINVAL;

	if ((w > MAX_IMG_WIDTH) || (h > MAX_IMG_HEIGHT))
		return -ERANGE;

	fmt = mdss_mdp_get_format_params(format);
	if (!fmt)
		return -EINVAL;

	bpp = fmt->bpp;
	memset(ps, 0, sizeof(struct mdss_mdp_plane_sizes));

	if (bwc_mode) {
		rc = mdss_mdp_get_rau_strides(w, h, fmt, ps);
		if (rc)
			return rc;
		ystride0_off = DIV_ROUND_UP(h, ps->rau_h[0]);
		ystride1_off = DIV_ROUND_UP(h, ps->rau_h[1]);
		ps->plane_size[0] = (ps->ystride[0] * ystride0_off) +
				    (ps->ystride[1] * ystride1_off);
		ps->ystride[0] += ps->ystride[1];
		ps->ystride[1] = 2;
		ps->plane_size[1] = ps->rau_cnt * ps->ystride[1] *
				   (ystride0_off + ystride1_off);
	} else {
		if (fmt->fetch_planes == MDSS_MDP_PLANE_INTERLEAVED) {
			ps->num_planes = 1;
			ps->plane_size[0] = w * h * bpp;
			ps->ystride[0] = w * bpp;
		} else if (format == MDP_Y_CBCR_H2V2_VENUS) {
			int cf = COLOR_FMT_NV12;
			ps->num_planes = 2;
			ps->ystride[0] = VENUS_Y_STRIDE(cf, w);
			ps->ystride[1] = VENUS_UV_STRIDE(cf, w);
			ps->plane_size[0] = VENUS_Y_SCANLINES(cf, h) *
				ps->ystride[0];
			ps->plane_size[1] = VENUS_UV_SCANLINES(cf, h) *
				ps->ystride[1];
		} else {
			u8 hmap[] = { 1, 2, 1, 2 };
			u8 vmap[] = { 1, 1, 2, 2 };
			u8 horiz, vert, stride_align, height_align;

			horiz = hmap[fmt->chroma_sample];
			vert = vmap[fmt->chroma_sample];

			switch (format) {
			case MDP_Y_CR_CB_GH2V2:
				stride_align = 16;
				height_align = 1;
				break;
			default:
				stride_align = 1;
				height_align = 1;
				break;
			}

			ps->ystride[0] = ALIGN(w, stride_align);
			ps->ystride[1] = ALIGN(w / horiz, stride_align);
			ps->plane_size[0] = ps->ystride[0] *
				ALIGN(h, height_align);
			ps->plane_size[1] = ps->ystride[1] * (h / vert);

			if (fmt->fetch_planes == MDSS_MDP_PLANE_PSEUDO_PLANAR) {
				ps->num_planes = 2;
				ps->plane_size[1] *= 2;
				ps->ystride[1] *= 2;
			} else { /* planar */
				ps->num_planes = 3;
				ps->plane_size[2] = ps->plane_size[1];
				ps->ystride[2] = ps->ystride[1];
			}
		}
	}
	for (i = 0; i < ps->num_planes; i++)
		ps->total_size += ps->plane_size[i];

	return 0;
}
开发者ID:grzmot22,项目名称:android_kernel_msm,代码行数:86,代码来源:mdss_mdp_util.c


示例14: cvmx_bootmem_phy_named_block_alloc

int64_t cvmx_bootmem_phy_named_block_alloc(uint64_t size, uint64_t min_addr,
					   uint64_t max_addr,
					   uint64_t alignment,
					   char *name,
					   uint32_t flags)
{
	int64_t addr_allocated;
	struct cvmx_bootmem_named_block_desc *named_block_desc_ptr;

#ifdef DEBUG
	cvmx_dprintf("cvmx_bootmem_phy_named_block_alloc: size: 0x%llx, min: "
		     "0x%llx, max: 0x%llx, align: 0x%llx, name: %s\n",
		     (unsigned long long)size,
		     (unsigned long long)min_addr,
		     (unsigned long long)max_addr,
		     (unsigned long long)alignment,
		     name);
#endif
	if (cvmx_bootmem_desc->major_version != 3) {
		cvmx_dprintf("ERROR: Incompatible bootmem descriptor version: "
			     "%d.%d at addr: %p\n",
			     (int)cvmx_bootmem_desc->major_version,
			     (int)cvmx_bootmem_desc->minor_version,
			     cvmx_bootmem_desc);
		return -1;
	}

	/*
	 * Take lock here, as name lookup/block alloc/name add need to
	 * be atomic.
	 */
	if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
		cvmx_spinlock_lock((cvmx_spinlock_t *)&(cvmx_bootmem_desc->lock));

	/* Get pointer to first available named block descriptor */
	named_block_desc_ptr =
		cvmx_bootmem_phy_named_block_find(NULL,
						  flags | CVMX_BOOTMEM_FLAG_NO_LOCKING);

	/*
	 * Check to see if name already in use, return error if name
	 * not available or no more room for blocks.
	 */
	if (cvmx_bootmem_phy_named_block_find(name,
					      flags | CVMX_BOOTMEM_FLAG_NO_LOCKING) || !named_block_desc_ptr) {
		if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
			cvmx_spinlock_unlock((cvmx_spinlock_t *)&(cvmx_bootmem_desc->lock));
		return -1;
	}


	/*
	 * Round size up to mult of minimum alignment bytes We need
	 * the actual size allocated to allow for blocks to be
	 * coallesced when they are freed.  The alloc routine does the
	 * same rounding up on all allocations.
	 */
	size = ALIGN(size, CVMX_BOOTMEM_ALIGNMENT_SIZE);

	addr_allocated = cvmx_bootmem_phy_alloc(size, min_addr, max_addr,
						alignment,
						flags | CVMX_BOOTMEM_FLAG_NO_LOCKING);
	if (addr_allocated >= 0) {
		named_block_desc_ptr->base_addr = addr_allocated;
		named_block_desc_ptr->size = size;
		strncpy(named_block_desc_ptr->name, name,
			cvmx_bootmem_desc->named_block_name_len);
		named_block_desc_ptr->name[cvmx_bootmem_desc->named_block_name_len - 1] = 0;
	}

	if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
		cvmx_spinlock_unlock((cvmx_spinlock_t *)&(cvmx_bootmem_desc->lock));
	return addr_allocated;
}
开发者ID:CSCLOG,项目名称:beaglebone,代码行数:74,代码来源:cvmx-bootmem.c


示例15: big_key_preparse

/*
 * Preparse a big key
 */
int big_key_preparse(struct key_preparsed_payload *prep)
{
	struct path *path = (struct path *)&prep->payload.data[big_key_path];
	struct file *file;
	u8 *enckey;
	u8 *data = NULL;
	ssize_t written;
	size_t datalen = prep->datalen;
	int ret;

	ret = -EINVAL;
	if (datalen <= 0 || datalen > 1024 * 1024 || !prep->data)
		goto error;

	/* Set an arbitrary quota */
	prep->quotalen = 16;

	prep->payload.data[big_key_len] = (void *)(unsigned long)datalen;

	if (datalen > BIG_KEY_FILE_THRESHOLD) {
		/* Create a shmem file to store the data in.  This will permit the data
		 * to be swapped out if needed.
		 *
		 * File content is stored encrypted with randomly generated key.
		 */
		size_t enclen = ALIGN(datalen, crypto_skcipher_blocksize(big_key_skcipher));

		/* prepare aligned data to encrypt */
		data = kmalloc(enclen, GFP_KERNEL);
		if (!data)
			return -ENOMEM;

		memcpy(data, prep->data, datalen);
		memset(data + datalen, 0x00, enclen - datalen);

		/* generate random key */
		enckey = kmalloc(ENC_KEY_SIZE, GFP_KERNEL);
		if (!enckey) {
			ret = -ENOMEM;
			goto error;
		}

		ret = big_key_gen_enckey(enckey);
		if (ret)
			goto err_enckey;

		/* encrypt aligned data */
		ret = big_key_crypt(BIG_KEY_ENC, data, enclen, enckey);
		if (ret)
			goto err_enckey;

		/* save aligned data to file */
		file = shmem_kernel_file_setup("", enclen, 0);
		if (IS_ERR(file)) {
			ret = PTR_ERR(file);
			goto err_enckey;
		}

		written = kernel_write(file, data, enclen, 0);
		if (written != enclen) {
			ret = written;
			if (written >= 0)
				ret = -ENOMEM;
			goto err_fput;
		}

		/* Pin the mount and dentry to the key so that we can open it again
		 * later
		 */
		prep->payload.data[big_key_data] = enckey;
		*path = file->f_path;
		path_get(path);
		fput(file);
		kfree(data);
	} else {
		/* Just store the data in a buffer */
		void *data = kmalloc(datalen, GFP_KERNEL);

		if (!data)
			return -ENOMEM;

		prep->payload.data[big_key_data] = data;
		memcpy(data, prep->data, prep->datalen);
	}
	return 0;

err_fput:
	fput(file);
err_enckey:
	kfree(enckey);
error:
	kfree(data);
	return ret;
}
开发者ID:AK101111,项目名称:linux,代码行数:97,代码来源:big_key.c


示例16: prepare_constant_buffer


//.........这里部分代码省略.........
       */
      assert(MAX_CLIP_PLANES == 6);
      for (j = 0; j < MAX_CLIP_PLANES; j++) {
	 if (ctx->Transform.ClipPlanesEnabled & (1<<j)) {
	    buf[offset + i * 4 + 0] = ctx->Transform._ClipUserPlane[j][0];
	    buf[offset + i * 4 + 1] = ctx->Transform._ClipUserPlane[j][1];
	    buf[offset + i * 4 + 2] = ctx->Transform._ClipUserPlane[j][2];
	    buf[offset + i * 4 + 3] = ctx->Transform._ClipUserPlane[j][3];
	    i++;
	 }
      }
   }

   /* vertex shader constants */
   if (brw->curbe.vs_size) {
      GLuint offset = brw->curbe.vs_start * 16;
      GLuint nr = brw->vs.prog_data->nr_params / 4;

      /* Load the subset of push constants that will get used when
       * we also have a pull constant buffer.
       */
      for (i = 0; i < vp->program.Base.Parameters->NumParameters; i++) {
	 if (brw->vs.constant_map[i] != -1) {
	    assert(brw->vs.constant_map[i] <= nr);
	    memcpy(buf + offset + brw->vs.constant_map[i] * 4,
		   vp->program.Base.Parameters->ParameterValues[i],
		   4 * sizeof(float));
	 }
      }
   }

   if (0) {
      for (i = 0; i < sz*16; i+=4) 
	 printf("curbe %d.%d: %f %f %f %f\n", i/8, i&4,
		buf[i+0], buf[i+1], buf[i+2], buf[i+3]);

      printf("last_buf %p buf %p sz %d/%d cmp %d\n",
	     brw->curbe.last_buf, buf,
	     bufsz, brw->curbe.last_bufsz,
	     brw->curbe.last_buf ? memcmp(buf, brw->curbe.last_buf, bufsz) : -1);
   }

   if (brw->curbe.curbe_bo != NULL &&
       bufsz == brw->curbe.last_bufsz &&
       memcmp(buf, brw->curbe.last_buf, bufsz) == 0) {
      /* constants have not changed */
   } else  

鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
C++ ALIGNED函数代码示例发布时间:2022-05-30
下一篇:
C++ ALERT函数代码示例发布时间:2022-05-30
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap