本文整理汇总了C++中pgprot_writecombine函数的典型用法代码示例。如果您正苦于以下问题:C++ pgprot_writecombine函数的具体用法?C++ pgprot_writecombine怎么用?C++ pgprot_writecombine使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了pgprot_writecombine函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。
示例1: rockchip_gem_alloc_iommu
static int rockchip_gem_alloc_iommu(struct rockchip_gem_object *rk_obj,
bool alloc_kmap)
{
int ret;
ret = rockchip_gem_get_pages(rk_obj);
if (ret < 0)
return ret;
ret = rockchip_gem_iommu_map(rk_obj);
if (ret < 0)
goto err_free;
if (alloc_kmap) {
rk_obj->kvaddr = vmap(rk_obj->pages, rk_obj->num_pages, VM_MAP,
pgprot_writecombine(PAGE_KERNEL));
if (!rk_obj->kvaddr) {
DRM_ERROR("failed to vmap() buffer\n");
ret = -ENOMEM;
goto err_unmap;
}
}
return 0;
err_unmap:
rockchip_gem_iommu_unmap(rk_obj);
err_free:
rockchip_gem_put_pages(rk_obj);
return ret;
}
开发者ID:AlexShiLucky,项目名称:linux,代码行数:32,代码来源:rockchip_drm_gem.c
示例2: mips_dma_mmap
static int mips_dma_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs)
{
unsigned long user_count = vma_pages(vma);
unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
unsigned long addr = (unsigned long)cpu_addr;
unsigned long off = vma->vm_pgoff;
unsigned long pfn;
int ret = -ENXIO;
if (!plat_device_is_coherent(dev))
addr = CAC_ADDR(addr);
pfn = page_to_pfn(virt_to_page((void *)addr));
if (attrs & DMA_ATTR_WRITE_COMBINE)
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
else
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
return ret;
if (off < count && user_count <= (count - off)) {
ret = remap_pfn_range(vma, vma->vm_start,
pfn + off,
user_count << PAGE_SHIFT,
vma->vm_page_prot);
}
return ret;
}
开发者ID:12zz,项目名称:linux,代码行数:33,代码来源:dma-default.c
示例3: dma_alloc_writecombine
/*
* Allocate a writecombining region, in much the same way as
* dma_alloc_coherent above.
*/
void *
dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp)
{
return __dma_alloc(dev, size, handle, gfp,
pgprot_writecombine(pgprot_kernel),
__builtin_return_address(0));
}
开发者ID:12019,项目名称:mediatek,代码行数:11,代码来源:dma-mapping.c
示例4: arch_dma_mmap_pgprot
pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot,
unsigned long attrs)
{
if (!dev_is_dma_coherent(dev) || (attrs & DMA_ATTR_WRITE_COMBINE))
return pgprot_writecombine(prot);
return prot;
}
开发者ID:bristot,项目名称:linux,代码行数:7,代码来源:dma-mapping.c
示例5: imx_iram_audio_playback_mmap
/*
enable user space access to iram buffer
*/
static int imx_iram_audio_playback_mmap(struct snd_pcm_substream *substream,
struct vm_area_struct *area)
{
struct snd_dma_buffer *buf = &substream->dma_buffer;
unsigned long off;
unsigned long phys;
unsigned long size;
int ret = 0;
area->vm_ops = &snd_mxc_audio_playback_vm_ops;
area->vm_private_data = substream;
off = area->vm_pgoff << PAGE_SHIFT;
phys = buf->addr + off;
size = area->vm_end - area->vm_start;
if (off + size > SND_RAM_SIZE)
return -EINVAL;
area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
area->vm_flags |= VM_IO;
ret =
remap_pfn_range(area, area->vm_start, phys >> PAGE_SHIFT,
size, area->vm_page_prot);
if (ret == 0)
area->vm_ops->open(area);
return ret;
}
开发者ID:fwmfee,项目名称:linux-legacy,代码行数:32,代码来源:imx-pcm.c
示例6: drm_gem_mmap_obj
/**
* drm_gem_mmap_obj - memory map a GEM object
* @obj: the GEM object to map
* @obj_size: the object size to be mapped, in bytes
* @vma: VMA for the area to be mapped
*
* Set up the VMA to prepare mapping of the GEM object using the gem_vm_ops
* provided by the driver. Depending on their requirements, drivers can either
* provide a fault handler in their gem_vm_ops (in which case any accesses to
* the object will be trapped, to perform migration, GTT binding, surface
* register allocation, or performance monitoring), or mmap the buffer memory
* synchronously after calling drm_gem_mmap_obj.
*
* This function is mainly intended to implement the DMABUF mmap operation, when
* the GEM object is not looked up based on its fake offset. To implement the
* DRM mmap operation, drivers should use the drm_gem_mmap() function.
*
* drm_gem_mmap_obj() assumes the user is granted access to the buffer while
* drm_gem_mmap() prevents unprivileged users from mapping random objects. So
* callers must verify access restrictions before calling this helper.
*
* NOTE: This function has to be protected with dev->struct_mutex
*
* Return 0 or success or -EINVAL if the object size is smaller than the VMA
* size, or if no gem_vm_ops are provided.
*/
int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
struct vm_area_struct *vma)
{
struct drm_device *dev = obj->dev;
lockdep_assert_held(&dev->struct_mutex);
/* Check for valid size. */
if (obj_size < vma->vm_end - vma->vm_start)
return -EINVAL;
if (!dev->driver->gem_vm_ops)
return -EINVAL;
vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
vma->vm_ops = dev->driver->gem_vm_ops;
vma->vm_private_data = obj;
vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
/* Take a ref for this mapping of the object, so that the fault
* handler can dereference the mmap offset's pointer to the object.
* This reference is cleaned up by the corresponding vm_close
* (which should happen whether the vma was created by this call, or
* by a vm_open due to mremap or partial unmap or whatever).
*/
drm_gem_object_reference(obj);
drm_vm_open_locked(dev, vma);
return 0;
}
开发者ID:electrikjesus,项目名称:kernel_intel-uefi,代码行数:56,代码来源:drm_gem.c
示例7: __get_dma_pgprot
static pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot,
bool coherent)
{
if (!coherent || dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs))
return pgprot_writecombine(prot);
return prot;
}
开发者ID:shri360,项目名称:kernel-moto-c-plus,代码行数:7,代码来源:dma-mapping.c
示例8: exynos_mem_mmap
int exynos_mem_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct exynos_mem *mem = (struct exynos_mem *)filp->private_data;
bool cacheable = mem->cacheable;
dma_addr_t start = vma->vm_pgoff << PAGE_SHIFT;
u32 pfn = vma->vm_pgoff;
u32 size = vma->vm_end - vma->vm_start;
if (!cma_is_registered_region(start, size)) {
pr_err("[%s] handling non-cma region (%#[email protected]%#x)is prohibited\n",
__func__, size, start);
return -EINVAL;
}
if (!cacheable)
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
vma->vm_flags |= VM_RESERVED;
vma->vm_ops = &exynos_mem_ops;
if ((vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_SHARED)) {
pr_err("writable mapping must be shared\n");
return -EINVAL;
}
if (remap_pfn_range(vma, vma->vm_start, pfn, size, vma->vm_page_prot)) {
pr_err("mmap fail\n");
return -EINVAL;
}
vma->vm_ops->open(vma);
return 0;
}
开发者ID:Thinkware-Device,项目名称:willow,代码行数:34,代码来源:exynos_mem.c
示例9: ion_cp_heap_map_user
int ion_cp_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
struct vm_area_struct *vma, unsigned long flags)
{
int ret_value = -EAGAIN;
struct ion_cp_heap *cp_heap =
container_of(heap, struct ion_cp_heap, heap);
mutex_lock(&cp_heap->lock);
if (cp_heap->heap_protected == HEAP_NOT_PROTECTED) {
if (ion_cp_request_region(cp_heap)) {
mutex_unlock(&cp_heap->lock);
return -EINVAL;
}
if (!ION_IS_CACHED(flags))
vma->vm_page_prot = pgprot_writecombine(
vma->vm_page_prot);
ret_value = remap_pfn_range(vma, vma->vm_start,
__phys_to_pfn(buffer->priv_phys) + vma->vm_pgoff,
vma->vm_end - vma->vm_start,
vma->vm_page_prot);
if (ret_value)
ion_cp_release_region(cp_heap);
else
++cp_heap->umap_count;
}
mutex_unlock(&cp_heap->lock);
return ret_value;
}
开发者ID:fikus011,项目名称:primou-kernel-HELLBOY,代码行数:31,代码来源:ion_cp_heap.c
示例10: pci_mmap_page_range
int
pci_mmap_page_range (struct pci_dev *dev, struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine)
{
/*
* I/O space cannot be accessed via normal processor loads and
* stores on this platform.
*/
if (mmap_state == pci_mmap_io)
/*
* XXX we could relax this for I/O spaces for which ACPI
* indicates that the space is 1-to-1 mapped. But at the
* moment, we don't support multiple PCI address spaces and
* the legacy I/O space is not 1-to-1 mapped, so this is moot.
*/
return -EINVAL;
/*
* Leave vm_pgoff as-is, the PCI space address is the physical
* address on this platform.
*/
if (write_combine && efi_range_is_wc(vma->vm_start,
vma->vm_end - vma->vm_start))
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
else
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
vma->vm_end - vma->vm_start, vma->vm_page_prot))
return -EAGAIN;
return 0;
}
开发者ID:mrtos,项目名称:Logitech-Revue,代码行数:33,代码来源:pci.c
示例11: mali_mmap
/** @note munmap handler is done by vma close handler */
int mali_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct mali_session_data *session;
mali_mem_allocation *descriptor;
u32 size = vma->vm_end - vma->vm_start;
u32 mali_addr = vma->vm_pgoff << PAGE_SHIFT;
session = (struct mali_session_data *)filp->private_data;
if (NULL == session) {
MALI_PRINT_ERROR(("mmap called without any session data available\n"));
return -EFAULT;
}
MALI_DEBUG_PRINT(4, ("MMap() handler: start=0x%08X, phys=0x%08X, size=0x%08X vma->flags 0x%08x\n",
(unsigned int)vma->vm_start, (unsigned int)(vma->vm_pgoff << PAGE_SHIFT),
(unsigned int)(vma->vm_end - vma->vm_start), vma->vm_flags));
/* Set some bits which indicate that, the memory is IO memory, meaning
* that no paging is to be performed and the memory should not be
* included in crash dumps. And that the memory is reserved, meaning
* that it's present and can never be paged out (see also previous
* entry)
*/
vma->vm_flags |= VM_IO;
vma->vm_flags |= VM_DONTCOPY;
vma->vm_flags |= VM_PFNMAP;
#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0)
vma->vm_flags |= VM_RESERVED;
#else
vma->vm_flags |= VM_DONTDUMP;
vma->vm_flags |= VM_DONTEXPAND;
#endif
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
vma->vm_ops = &mali_kernel_vm_ops; /* Operations used on any memory system */
descriptor = mali_mem_block_alloc(mali_addr, size, vma, session);
if (NULL == descriptor) {
descriptor = mali_mem_os_alloc(mali_addr, size, vma, session);
if (NULL == descriptor) {
MALI_DEBUG_PRINT(3, ("MMAP failed\n"));
return -ENOMEM;
}
}
MALI_DEBUG_ASSERT(MALI_MEM_ALLOCATION_VALID_MAGIC == descriptor->magic);
vma->vm_private_data = (void *)descriptor;
/* Put on descriptor map */
if (_MALI_OSK_ERR_OK != mali_descriptor_mapping_allocate_mapping(session->descriptor_mapping, descriptor, &descriptor->id)) {
_mali_osk_mutex_wait(session->memory_lock);
mali_mem_os_release(descriptor);
_mali_osk_mutex_signal(session->memory_lock);
return -EFAULT;
}
return 0;
}
开发者ID:HuaweiHonor4C,项目名称:kernel_hi6210sft_mm,代码行数:60,代码来源:mali_memory.c
示例12: hv_cdev_mmap
static int hv_cdev_mmap(struct file *filp, struct vm_area_struct *vma)
{
hv_cdev_private *priv = filp->private_data;
int res;
/* vm_pgoff = the offset of the area in the file, in pages */
/* shift by PAGE_SHIFT to get physical addr offset */
unsigned long off = vma->vm_pgoff << PAGE_SHIFT;
/* off is decided by user's mmap() offset parm. If 0, off=0 */
phys_addr_t physical = priv->phys_start + off;
unsigned long vsize = vma->vm_end - vma->vm_start;
unsigned long psize = priv->dev_size - off;
PINFO("%s: enter\n", __func__);
PINFO("off=%lu, physical=%p, vsize=%lu, psize=%lu\n",
off, (void *)physical, vsize, psize);
if (vsize > psize) {
PERR("%s: requested vma size exceeds disk size\n", __func__);
return -EINVAL;
}
vma->vm_ops = &hv_cdev_vm_ops;
switch (hv_mmap_type) {
case 0:
default:
break;
case 1:
pgprot_writecombine(vma->vm_page_prot);
break;
case 2:
pgprot_noncached(vma->vm_page_prot);
break;
}
vma->vm_flags |= VM_LOCKED; /* locked from swap */
PDEBUG("phys_start=%p, page_frame_num=%d\n",
(void *)priv->phys_start, (int)priv->phys_start >> PAGE_SHIFT);
/* Remap the phys addr of device into user space virtual mem */
res = remap_pfn_range(vma,
vma->vm_start,
physical >> PAGE_SHIFT, /* = pfn */
vsize,
vma->vm_page_prot);
if (res) {
PERR("%s: error from remap_pfn_range()/n", __func__);
return -EAGAIN;
} else
PDEBUG("%s: Physical mem remapped to user VA\n", __func__);
return 0;
}
开发者ID:ssg10,项目名称:linux_char_driver_and_app,代码行数:59,代码来源:hv_cdev.c
示例13: drm_gem_mmap
int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct drm_file *priv = filp->private_data;
struct drm_device *dev = priv->minor->dev;
struct drm_gem_mm *mm = dev->mm_private;
struct drm_local_map *map = NULL;
struct drm_gem_object *obj;
struct drm_hash_item *hash;
int ret = 0;
if (drm_device_is_unplugged(dev))
return -ENODEV;
mutex_lock(&dev->struct_mutex);
if (drm_ht_find_item(&mm->offset_hash, vma->vm_pgoff, &hash)) {
mutex_unlock(&dev->struct_mutex);
return drm_mmap(filp, vma);
}
map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
if (!map ||
((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) {
ret = -EPERM;
goto out_unlock;
}
/* */
if (map->size < vma->vm_end - vma->vm_start) {
ret = -EINVAL;
goto out_unlock;
}
obj = map->handle;
if (!obj->dev->driver->gem_vm_ops) {
ret = -EINVAL;
goto out_unlock;
}
vma->vm_flags |= VM_RESERVED | VM_IO | VM_PFNMAP | VM_DONTEXPAND;
vma->vm_ops = obj->dev->driver->gem_vm_ops;
vma->vm_private_data = map->handle;
vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
/*
*/
drm_gem_object_reference(obj);
drm_vm_open_locked(vma);
out_unlock:
mutex_unlock(&dev->struct_mutex);
return ret;
}
开发者ID:romanbb,项目名称:android_kernel_lge_d851,代码行数:59,代码来源:drm_gem.c
示例14: drm_gem_mmap
/**
* drm_gem_mmap - memory map routine for GEM objects
* @filp: DRM file pointer
* @vma: VMA for the area to be mapped
*
* If a driver supports GEM object mapping, mmap calls on the DRM file
* descriptor will end up here.
*
* If we find the object based on the offset passed in (vma->vm_pgoff will
* contain the fake offset we created when the GTT map ioctl was called on
* the object), we set up the driver fault handler so that any accesses
* to the object can be trapped, to perform migration, GTT binding, surface
* register allocation, or performance monitoring.
*/
int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct drm_file *priv = filp->private_data;
struct drm_device *dev = priv->minor->dev;
struct drm_gem_mm *mm = dev->mm_private;
struct drm_local_map *map = NULL;
struct drm_gem_object *obj;
struct drm_hash_item *hash;
int ret = 0;
if (drm_device_is_unplugged(dev))
return -ENODEV;
mutex_lock(&dev->struct_mutex);
if (drm_ht_find_item(&mm->offset_hash, vma->vm_pgoff, &hash)) {
mutex_unlock(&dev->struct_mutex);
return drm_mmap(filp, vma);
}
map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
if (!map ||
((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) {
ret = -EPERM;
goto out_unlock;
}
/* Check for valid size. */
if (map->size < vma->vm_end - vma->vm_start) {
ret = -EINVAL;
goto out_unlock;
}
obj = map->handle;
if (!obj->dev->driver->gem_vm_ops) {
ret = -EINVAL;
goto out_unlock;
}
vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
vma->vm_ops = obj->dev->driver->gem_vm_ops;
vma->vm_private_data = map->handle;
vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
/* Take a ref for this mapping of the object, so that the fault
* handler can dereference the mmap offset's pointer to the object.
* This reference is cleaned up by the corresponding vm_close
* (which should happen whether the vma was created by this call, or
* by a vm_open due to mremap or partial unmap or whatever).
*/
drm_gem_object_reference(obj);
drm_vm_open_locked(dev, vma);
out_unlock:
mutex_unlock(&dev->struct_mutex);
return ret;
}
开发者ID:174high,项目名称:dell-driver,代码行数:73,代码来源:drm_gem.c
示例15: kbase_cpu_mmap
static int kbase_cpu_mmap(struct kbase_va_region *reg, struct vm_area_struct *vma, void *kaddr, u32 nr_pages)
{
struct kbase_cpu_mapping *map;
u64 start_off = vma->vm_pgoff - reg->start_pfn;
phys_addr_t *page_array;
int err = 0;
int i;
map = kzalloc(sizeof(*map), GFP_KERNEL);
if (!map) {
WARN_ON(1);
err = -ENOMEM;
goto out;
}
/*
* VM_DONTCOPY - don't make this mapping available in fork'ed processes
* VM_DONTEXPAND - disable mremap on this region
* VM_IO - disables paging
* VM_DONTDUMP - Don't include in core dumps (3.7 only)
* VM_MIXEDMAP - Support mixing struct page*s and raw pfns.
* This is needed to support using the dedicated and
* the OS based memory backends together.
*/
/*
* This will need updating to propagate coherency flags
* See MIDBASE-1057
*/
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0))
vma->vm_flags |= VM_DONTCOPY | VM_DONTDUMP | VM_DONTEXPAND | VM_IO | VM_MIXEDMAP;
#else
vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_RESERVED | VM_IO | VM_MIXEDMAP;
#endif
vma->vm_ops = &kbase_vm_ops;
vma->vm_private_data = reg;
page_array = kbase_get_phy_pages(reg);
if (!(reg->flags & KBASE_REG_CPU_CACHED)) {
/* We can't map vmalloc'd memory uncached.
* Other memory will have been returned from
* kbase_phy_pages_alloc which should have done the cache
* maintenance necessary to support an uncached mapping
*/
BUG_ON(kaddr);
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
}
if (!kaddr) {
for (i = 0; i < nr_pages; i++) {
err = vm_insert_mixed(vma, vma->vm_start + (i << PAGE_SHIFT), page_array[i + start_off] >> PAGE_SHIFT);
WARN_ON(err);
if (err)
break;
}
} else {
开发者ID:Hani-K,项目名称:H-Vitamin,代码行数:58,代码来源:mali_kbase_mem_linux.c
示例16: __get_dma_pgprot
static pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot,
bool coherent)
{
if (dma_get_attr(DMA_ATTR_STRONGLY_ORDERED, attrs))
return pgprot_noncached(prot);
else if (!coherent || dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs))
return pgprot_writecombine(prot);
return prot;
}
开发者ID:D5T,项目名称:android_kernel_nubia_NX549J,代码行数:9,代码来源:dma-mapping.c
示例17: pscnv_mmap
int pscnv_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct drm_file *priv = filp->private_data;
struct drm_device *dev = priv->minor->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct drm_gem_object *obj;
struct pscnv_bo *bo;
int ret;
if (vma->vm_pgoff * PAGE_SIZE < (1ull << 31))
return drm_mmap(filp, vma);
if (vma->vm_pgoff * PAGE_SIZE < (1ull << 32))
return pscnv_chan_mmap(filp, vma);
obj = drm_gem_object_lookup(dev, priv, (vma->vm_pgoff * PAGE_SIZE) >> 32);
if (!obj)
return -ENOENT;
bo = obj->driver_private;
if (vma->vm_end - vma->vm_start > bo->size) {
drm_gem_object_unreference_unlocked(obj);
return -EINVAL;
}
switch (bo->flags & PSCNV_GEM_MEMTYPE_MASK) {
case PSCNV_GEM_VRAM_SMALL:
case PSCNV_GEM_VRAM_LARGE:
if ((ret = dev_priv->vm->map_user(bo))) {
drm_gem_object_unreference_unlocked(obj);
return ret;
}
vma->vm_flags |= VM_RESERVED | VM_IO | VM_PFNMAP | VM_DONTEXPAND;
vma->vm_ops = &pscnv_vram_ops;
vma->vm_private_data = obj;
vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
vma->vm_file = filp;
return remap_pfn_range(vma, vma->vm_start,
(dev_priv->fb_phys + bo->map1->start) >> PAGE_SHIFT,
vma->vm_end - vma->vm_start, PAGE_SHARED);
case PSCNV_GEM_SYSRAM_SNOOP:
case PSCNV_GEM_SYSRAM_NOSNOOP:
/* XXX */
vma->vm_flags |= VM_RESERVED;
vma->vm_ops = &pscnv_sysram_ops;
vma->vm_private_data = obj;
vma->vm_file = filp;
return 0;
default:
drm_gem_object_unreference_unlocked(obj);
return -ENOSYS;
}
}
开发者ID:Aeternam,项目名称:gdev,代码行数:57,代码来源:pscnv_vm.c
示例18: phys_mem_access_prot
pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
unsigned long size, pgprot_t vma_prot)
{
if (!pfn_valid(pfn))
return pgprot_noncached(vma_prot);
else if (file->f_flags & O_SYNC)
return pgprot_writecombine(vma_prot);
return vma_prot;
}
开发者ID:austriancoder,项目名称:linux,代码行数:9,代码来源:mmu.c
示例19: axi_dma_mmap
static int axi_dma_mmap(struct file *filp, struct vm_area_struct *vma)
{
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
if(remap_pfn_range(vma, vma->vm_start, mem_info.phy_base>>PAGE_SHIFT, vma->vm_end - vma->vm_start,
vma->vm_page_prot))
return -EAGAIN;
vma->vm_flags &= ~VM_IO;
vma->vm_flags |= (VM_DONTEXPAND | VM_DONTDUMP);
return 0;
}
开发者ID:v3best,项目名称:YunSDR,代码行数:10,代码来源:v3best-dma-adc.c
示例20: __attribute__
void __attribute__((weak)) cachi_set_pgprot_cache_options(
enum hwmem_alloc_flags cache_settings, pgprot_t *pgprot)
{
if (cache_settings & HWMEM_ALLOC_HINT_CACHED)
*pgprot = *pgprot; /* To silence compiler and checkpatch */
else if (cache_settings & HWMEM_ALLOC_HINT_WRITE_COMBINE)
*pgprot = pgprot_writecombine(*pgprot);
else
*pgprot = pgprot_noncached(*pgprot);
}
开发者ID:CallMeVentus,项目名称:i9070_kernel_CoCore-P,代码行数:10,代码来源:cache_handler.c
注:本文中的pgprot_writecombine函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论