• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

C++ down_read_trylock函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了C++中down_read_trylock函数的典型用法代码示例。如果您正苦于以下问题:C++ down_read_trylock函数的具体用法?C++ down_read_trylock怎么用?C++ down_read_trylock使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了down_read_trylock函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。

示例1: down_read_trylock_init

int __init down_read_trylock_init(void) 
{
	int ret;
	init_rwsem( &rwsem );   	//读写信号量初始化
	printk("<0>after init_rwsem, count: %ld\n",rwsem.count);

	if( EXEC_DOWN_WRITE )
		down_write( &rwsem );   //写者获取读写信号量

	ret = down_read_trylock( &rwsem ); 	//读者尝试获取读写信号量
	if(ret)
	{
		printk("<0>first down_read_trylock, count: %ld\n",rwsem.count);
		ret = down_read_trylock( &rwsem );   
		printk("<0>second down_read_trylock, count: %ld\n",rwsem.count);

		while( rwsem.count != 0 )
		{
			up_read( &rwsem );     	//读者释放读写信号量
		}
	}	
	else
		printk("<0>down_read_trylock failed!\n");

	return 0;
}
开发者ID:fjrti,项目名称:snippets,代码行数:26,代码来源:down_read_trylock.c


示例2: ocfs2_readpages

static int ocfs2_readpages(struct file *filp, struct address_space *mapping,
			   struct list_head *pages, unsigned nr_pages)
{
	int ret, err = -EIO;
	struct inode *inode = mapping->host;
	struct ocfs2_inode_info *oi = OCFS2_I(inode);
	loff_t start;
	struct page *last;

	ret = ocfs2_inode_lock_full(inode, NULL, 0, OCFS2_LOCK_NONBLOCK);
	if (ret)
		return err;

	if (down_read_trylock(&oi->ip_alloc_sem) == 0) {
		ocfs2_inode_unlock(inode, 0);
		return err;
	}

	if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL)
		goto out_unlock;

	last = list_entry(pages->prev, struct page, lru);
	start = (loff_t)last->index << PAGE_CACHE_SHIFT;
	if (start >= i_size_read(inode))
		goto out_unlock;

	err = mpage_readpages(mapping, pages, nr_pages, ocfs2_get_block);

out_unlock:
	up_read(&oi->ip_alloc_sem);
	ocfs2_inode_unlock(inode, 0);

	return err;
}
开发者ID:MiniBlu,项目名称:cm11_kernel_htc_msm8974a3ul,代码行数:34,代码来源:aops.c


示例3: ocfs2_readpage

static int ocfs2_readpage(struct file *file, struct page *page)
{
	struct inode *inode = page->mapping->host;
	loff_t start = (loff_t)page->index << PAGE_CACHE_SHIFT;
	int ret, unlock = 1;

	mlog_entry("(0x%p, %lu)\n", file, (page ? page->index : 0));

	ret = ocfs2_meta_lock_with_page(inode, NULL, 0, page);
	if (ret != 0) {
		if (ret == AOP_TRUNCATED_PAGE)
			unlock = 0;
		mlog_errno(ret);
		goto out;
	}

	if (down_read_trylock(&OCFS2_I(inode)->ip_alloc_sem) == 0) {
		ret = AOP_TRUNCATED_PAGE;
		goto out_meta_unlock;
	}

	/*
	 * i_size might have just been updated as we grabed the meta lock.  We
	 * might now be discovering a truncate that hit on another node.
	 * block_read_full_page->get_block freaks out if it is asked to read
	 * beyond the end of a file, so we check here.  Callers
	 * (generic_file_read, vm_ops->fault) are clever enough to check i_size
	 * and notice that the page they just read isn't needed.
	 *
	 * XXX sys_readahead() seems to get that wrong?
	 */
	if (start >= i_size_read(inode)) {
		zero_user_page(page, 0, PAGE_SIZE, KM_USER0);
		SetPageUptodate(page);
		ret = 0;
		goto out_alloc;
	}

	ret = ocfs2_data_lock_with_page(inode, 0, page);
	if (ret != 0) {
		if (ret == AOP_TRUNCATED_PAGE)
			unlock = 0;
		mlog_errno(ret);
		goto out_alloc;
	}

	ret = block_read_full_page(page, ocfs2_get_block);
	unlock = 0;

	ocfs2_data_unlock(inode, 0);
out_alloc:
	up_read(&OCFS2_I(inode)->ip_alloc_sem);
out_meta_unlock:
	ocfs2_meta_unlock(inode, 0);
out:
	if (unlock)
		unlock_page(page);
	mlog_exit(ret);
	return ret;
}
开发者ID:robacklin,项目名称:nxc2620,代码行数:60,代码来源:aops.c


示例4: nilfs_test_bdev_super2

static int nilfs_test_bdev_super2(struct super_block *s, void *data)
{
	struct nilfs_super_data *sd = data;
	int ret;

	if (s->s_bdev != sd->bdev)
		return 0;

	if (!((s->s_flags | sd->flags) & MS_RDONLY))
		return 1; /* Reuse an old R/W-mode super_block */

	if (s->s_flags & sd->flags & MS_RDONLY) {
		if (down_read_trylock(&s->s_umount)) {
			ret = s->s_root &&
				(sd->cno == NILFS_SB(s)->s_snapshot_cno);
			up_read(&s->s_umount);
			/*
			 * This path is locked with sb_lock by sget().
			 * So, drop_super() causes deadlock.
			 */
			return ret;
		}
	}
	return 0;
}
开发者ID:mikeberkelaar,项目名称:grhardened,代码行数:25,代码来源:super.c


示例5: xfs_sync_worker

/*
 * Every sync period we need to unpin all items, reclaim inodes and sync
 * disk quotas.  We might need to cover the log to indicate that the
 * filesystem is idle and not frozen.
 */
STATIC void
xfs_sync_worker(
	struct work_struct *work)
{
	struct xfs_mount *mp = container_of(to_delayed_work(work),
					struct xfs_mount, m_sync_work);
	int		error;

	/*
	 * We shouldn't write/force the log if we are in the mount/unmount
	 * process or on a read only filesystem. The workqueue still needs to be
	 * active in both cases, however, because it is used for inode reclaim
	 * during these times.  Use the s_umount semaphore to provide exclusion
	 * with unmount.
	 */
	if (down_read_trylock(&mp->m_super->s_umount)) {
		if (!(mp->m_flags & XFS_MOUNT_RDONLY)) {
			/* dgc: errors ignored here */
			if (mp->m_super->s_frozen == SB_UNFROZEN &&
			    xfs_log_need_covered(mp))
				error = xfs_fs_log_dummy(mp);
			else
				xfs_log_force(mp, 0);

			/* start pushing all the metadata that is currently
			 * dirty */
			xfs_ail_push_all(mp->m_ail);
		}
		up_read(&mp->m_super->s_umount);
	}

	/* queue us up again */
	xfs_syncd_queue_sync(mp);
}
开发者ID:AsherBond,项目名称:ceph-client,代码行数:39,代码来源:xfs_sync.c


示例6: unuse_mm

static int unuse_mm(struct mm_struct *mm,
				swp_entry_t entry, struct page *page)
{
	struct vm_area_struct *vma;

	if (!down_read_trylock(&mm->mmap_sem)) {
		/*
		 * Activate page so shrink_cache is unlikely to unmap its
		 * ptes while lock is dropped, so swapoff can make progress.
		 */
		activate_page(page);
		unlock_page(page);
		down_read(&mm->mmap_sem);
		lock_page(page);
	}
	for (vma = mm->mmap; vma; vma = vma->vm_next) {
		if (vma->anon_vma && unuse_vma(vma, entry, page))
			break;
	}
	up_read(&mm->mmap_sem);
	/*
	 * Currently unuse_mm cannot fail, but leave error handling
	 * at call sites for now, since we change it from time to time.
	 */
	return 0;
}
开发者ID:acassis,项目名称:emlinux-ssd1935,代码行数:26,代码来源:swapfile.c


示例7: try_to_get_nonexclusive_access

/**
 * try_to_get_nonexclusive_access - try to get nonexclusive access to a file
 * @uf_info: unix file specific part of inode to obtain access to
 *
 * Non-blocking version of nonexclusive access obtaining.
 */
int try_to_get_nonexclusive_access(struct unix_file_info *uf_info)
{
	int result;

	result = down_read_trylock(&uf_info->latch);
	if (result)
		nea_grabbed(uf_info);
	return result;
}
开发者ID:mgross029,项目名称:android_kernel,代码行数:15,代码来源:tail_conversion.c


示例8: l4x_evict_tasks

void l4x_evict_tasks(struct task_struct *exclude)
{
	struct task_struct *p;
	int cnt = 0;

	rcu_read_lock();
	for_each_process(p) {
		l4_cap_idx_t t;
		struct mm_struct *mm;

		if (p == exclude)
			continue;

		task_lock(p);
		mm = p->mm;
		if (!mm) {
			task_unlock(p);
			continue;
		}

		t = ACCESS_ONCE(mm->context.task);

		if (l4_is_invalid_cap(t)) {
			task_unlock(p);
			continue;
		}

		if (down_read_trylock(&mm->mmap_sem)) {
			struct vm_area_struct *vma;
			for (vma = mm->mmap; vma; vma = vma->vm_next)
				if (vma->vm_flags & VM_LOCKED) {
					t = L4_INVALID_CAP;
					break;
				}
			up_read(&mm->mmap_sem);
			if (!vma)
				if (cmpxchg(&mm->context.task, t, L4_INVALID_CAP) != t)
					t = L4_INVALID_CAP;
		} else
			t = L4_INVALID_CAP;

		task_unlock(p);

		if (!l4_is_invalid_cap(t)) {
			l4lx_task_delete_task(t);
			l4lx_task_number_free(t);

			if (++cnt > 10)
				break;
		}
	}
	rcu_read_unlock();

	if (cnt == 0)
		pr_info_ratelimited("l4x-evict: Found no process to free.\n");
}
开发者ID:michas2,项目名称:l4re-snapshot,代码行数:56,代码来源:process_gen.c


示例9: cfs_access_process_vm

static int cfs_access_process_vm(struct task_struct *tsk,
				 struct mm_struct *mm,
				 unsigned long addr,
				 void *buf, int len, int write)
{
	/* Just copied from kernel for the kernels which doesn't
	 * have access_process_vm() exported */
	struct vm_area_struct *vma;
	struct page *page;
	void *old_buf = buf;

	/* Avoid deadlocks on mmap_sem if called from sys_mmap_pgoff(),
	 * which is already holding mmap_sem for writes.  If some other
	 * thread gets the write lock in the meantime, this thread will
	 * block, but at least it won't deadlock on itself.  LU-1735 */
	if (down_read_trylock(&mm->mmap_sem) == 0)
		return -EDEADLK;

	/* ignore errors, just check how much was successfully transferred */
	while (len) {
		int bytes, rc, offset;
		void *maddr;

		rc = get_user_pages(tsk, mm, addr, 1,
				     write, 1, &page, &vma);
		if (rc <= 0)
			break;

		bytes = len;
		offset = addr & (PAGE_SIZE-1);
		if (bytes > PAGE_SIZE-offset)
			bytes = PAGE_SIZE-offset;

		maddr = kmap(page);
		if (write) {
			copy_to_user_page(vma, page, addr,
					  maddr + offset, buf, bytes);
			set_page_dirty_lock(page);
		} else {
			copy_from_user_page(vma, page, addr,
					    buf, maddr + offset, bytes);
		}
		kunmap(page);
		page_cache_release(page);
		len -= bytes;
		buf += bytes;
		addr += bytes;
	}
	up_read(&mm->mmap_sem);

	return buf - old_buf;
}
开发者ID:Keeper-of-the-Keys,项目名称:Lustre,代码行数:52,代码来源:linux-curproc.c


示例10: foo_show

static ssize_t foo_show(struct kobject *kobj, struct kobj_attribute *attr,
	char *buf)
{
	size_t count;

	/* down_read_trylock returns zero on failure */
	if (!down_read_trylock(&rw_sem))
		return -ERESTARTSYS;

	count = sprintf(buf, "%s\n", foo_kbuff);

	up_read(&rw_sem);
	return count;
}
开发者ID:rahulraghu94,项目名称:linux-device-drivers,代码行数:14,代码来源:rwlock.c


示例11: ocfs2_readpage

static int ocfs2_readpage(struct file *file, struct page *page)
{
	struct inode *inode = page->mapping->host;
	struct ocfs2_inode_info *oi = OCFS2_I(inode);
	loff_t start = (loff_t)page->index << PAGE_CACHE_SHIFT;
	int ret, unlock = 1;

	trace_ocfs2_readpage((unsigned long long)oi->ip_blkno,
			     (page ? page->index : 0));

	ret = ocfs2_inode_lock_with_page(inode, NULL, 0, page);
	if (ret != 0) {
		if (ret == AOP_TRUNCATED_PAGE)
			unlock = 0;
		mlog_errno(ret);
		goto out;
	}

	if (down_read_trylock(&oi->ip_alloc_sem) == 0) {
		ret = AOP_TRUNCATED_PAGE;
		unlock_page(page);
		unlock = 0;
		down_read(&oi->ip_alloc_sem);
		up_read(&oi->ip_alloc_sem);
		goto out_inode_unlock;
	}

	if (start >= i_size_read(inode)) {
		zero_user(page, 0, PAGE_SIZE);
		SetPageUptodate(page);
		ret = 0;
		goto out_alloc;
	}

	if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL)
		ret = ocfs2_readpage_inline(inode, page);
	else
		ret = block_read_full_page(page, ocfs2_get_block);
	unlock = 0;

out_alloc:
	up_read(&OCFS2_I(inode)->ip_alloc_sem);
out_inode_unlock:
	ocfs2_inode_unlock(inode, 0);
out:
	if (unlock)
		unlock_page(page);
	return ret;
}
开发者ID:MiniBlu,项目名称:cm11_kernel_htc_msm8974a3ul,代码行数:49,代码来源:aops.c


示例12: siw_qp_llp_data_ready

static void siw_qp_llp_data_ready(struct sock *sk)
#endif
{
	struct siw_qp		*qp;

	read_lock(&sk->sk_callback_lock);

	if (unlikely(!sk->sk_user_data || !sk_to_qp(sk))) {
		dprint(DBG_ON, " No QP: %p\n", sk->sk_user_data);
		goto done;
	}
	qp = sk_to_qp(sk);

	if (down_read_trylock(&qp->state_lock)) {
		read_descriptor_t	rd_desc = {.arg.data = qp, .count = 1};

#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 15, 0)
		dprint(DBG_SK|DBG_RX, "(QP%d): "
			"state (before tcp_read_sock)=%d, bytes=%x\n",
			QP_ID(qp), qp->attrs.state, bytes);
#else
		dprint(DBG_SK|DBG_RX, "(QP%d): "
			"state (before tcp_read_sock)=%d\n",
			QP_ID(qp), qp->attrs.state);
#endif

		if (likely(qp->attrs.state == SIW_QP_STATE_RTS))
			/*
			 * Implements data receive operation during
			 * socket callback. TCP gracefully catches
			 * the case where there is nothing to receive
			 * (not calling siw_tcp_rx_data() then).
			 */
			tcp_read_sock(sk, &rd_desc, siw_tcp_rx_data);

#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 15, 0)
		dprint(DBG_SK|DBG_RX, "(QP%d): "
			"state (after tcp_read_sock)=%d, bytes=%x\n",
			QP_ID(qp), qp->attrs.state, bytes);
#else
		dprint(DBG_SK|DBG_RX, "(QP%d): "
			"state (after tcp_read_sock)=%d\n",
			QP_ID(qp), qp->attrs.state);
#endif

		up_read(&qp->state_lock);
	} else {
开发者ID:asaf-levy,项目名称:softiwarp,代码行数:47,代码来源:siw_qp.c


示例13: ocfs2_readpages

/*
 * This is used only for read-ahead. Failures or difficult to handle
 * situations are safe to ignore.
 *
 * Right now, we don't bother with BH_Boundary - in-inode extent lists
 * are quite large (243 extents on 4k blocks), so most inodes don't
 * grow out to a tree. If need be, detecting boundary extents could
 * trivially be added in a future version of ocfs2_get_block().
 */
static int ocfs2_readpages(struct file *filp, struct address_space *mapping,
			   struct list_head *pages, unsigned nr_pages)
{
	int ret, err = -EIO;
	struct inode *inode = mapping->host;
	struct ocfs2_inode_info *oi = OCFS2_I(inode);
	loff_t start;
	struct page *last;

	/*
	 * Use the nonblocking flag for the dlm code to avoid page
	 * lock inversion, but don't bother with retrying.
	 */
	ret = ocfs2_inode_lock_full(inode, NULL, 0, OCFS2_LOCK_NONBLOCK);
	if (ret)
		return err;

	if (down_read_trylock(&oi->ip_alloc_sem) == 0) {
		ocfs2_inode_unlock(inode, 0);
		return err;
	}

	/*
	 * Don't bother with inline-data. There isn't anything
	 * to read-ahead in that case anyway...
	 */
	if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL)
		goto out_unlock;

	/*
	 * Check whether a remote node truncated this file - we just
	 * drop out in that case as it's not worth handling here.
	 */
	last = list_entry(pages->prev, struct page, lru);
	start = (loff_t)last->index << PAGE_CACHE_SHIFT;
	if (start >= i_size_read(inode))
		goto out_unlock;

	err = mpage_readpages(mapping, pages, nr_pages, ocfs2_get_block);

out_unlock:
	up_read(&oi->ip_alloc_sem);
	ocfs2_inode_unlock(inode, 0);

	return err;
}
开发者ID:458941968,项目名称:mini2440-kernel-2.6.29,代码行数:55,代码来源:aops.c


示例14: oom_reap_task_mm

/*
 * Reaps the address space of the give task.
 *
 * Returns true on success and false if none or part of the address space
 * has been reclaimed and the caller should retry later.
 */
static bool oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm)
{
	bool ret = true;

	if (!down_read_trylock(&mm->mmap_sem)) {
		trace_skip_task_reaping(tsk->pid);
		return false;
	}

	/*
	 * MMF_OOM_SKIP is set by exit_mmap when the OOM reaper can't
	 * work on the mm anymore. The check for MMF_OOM_SKIP must run
	 * under mmap_sem for reading because it serializes against the
	 * down_write();up_write() cycle in exit_mmap().
	 */
	if (test_bit(MMF_OOM_SKIP, &mm->flags)) {
		trace_skip_task_reaping(tsk->pid);
		goto out_unlock;
	}

	trace_start_task_reaping(tsk->pid);

	/* failed to reap part of the address space. Try again later */
	ret = __oom_reap_task_mm(mm);
	if (!ret)
		goto out_finish;

	pr_info("oom_reaper: reaped process %d (%s), now anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n",
			task_pid_nr(tsk), tsk->comm,
			K(get_mm_counter(mm, MM_ANONPAGES)),
			K(get_mm_counter(mm, MM_FILEPAGES)),
			K(get_mm_counter(mm, MM_SHMEMPAGES)));
out_finish:
	trace_finish_task_reaping(tsk->pid);
out_unlock:
	up_read(&mm->mmap_sem);

	return ret;
}
开发者ID:avagin,项目名称:linux,代码行数:45,代码来源:oom_kill.c


示例15: mem_free_percent

static ssize_t mem_free_percent(void)
{
	unsigned long mem_used_pages = 0;
	u64 val = 0;
	int i = 0;
	struct zram *zram = NULL;
	struct zram_meta *meta = NULL;
	unsigned long total_zram_pages = totalram_pages*total_mem_usage_percent/100;

	for (i = 0; i < zram_get_num_devices(); i++) {

		zram = &zram_devices[i];
		if(!zram || !zram->disk)
		{
			continue;
		}

		if( !down_read_trylock(&zram->init_lock) )
			return -1;

		if(zram->init_done)
		{
			meta = zram->meta;
			if (meta && meta->mem_pool)
			{
				val += zs_get_total_pages(meta->mem_pool);
			}
		}

		up_read(&zram->init_lock);
	}

	mem_used_pages = val;

	pr_debug("ZRAM:totalram_pages:%lu,total_zram_pages:%lu,mem_used_pages:%lu\r\n", totalram_pages, total_zram_pages,mem_used_pages);

	return (mem_used_pages >= total_zram_pages) ? 0 : ((total_zram_pages - mem_used_pages)*100/total_zram_pages);
}
开发者ID:NhlalukoG,项目名称:android_kernel_samsung_goyave3g,代码行数:38,代码来源:zram_sysfs.c


示例16: unuse_mm

static int unuse_mm(struct mm_struct *mm,
				swp_entry_t entry, struct page *page)
{
	struct vm_area_struct *vma;
	int ret = 0;

	if (!down_read_trylock(&mm->mmap_sem)) {
		/*
		 * Activate page so shrink_inactive_list is unlikely to unmap
		 * its ptes while lock is dropped, so swapoff can make progress.
		 */
		activate_page(page);
		unlock_page(page);
		down_read(&mm->mmap_sem);
		lock_page(page);
	}
	for (vma = mm->mmap; vma; vma = vma->vm_next) {
		if (vma->anon_vma && (ret = unuse_vma(vma, entry, page)))
			break;
	}
	up_read(&mm->mmap_sem);
	return (ret < 0)? ret: 0;
}
开发者ID:deepikateriar,项目名称:Onlive-Source-Backup,代码行数:23,代码来源:swapfile.c


示例17: process_get

static int
process_get(struct request_state* req)
{
	struct ub_entry* e;
	struct sk_buff* skb;
	
	while (!down_read_trylock(&rwlock))
		continue;
	e = ub_cache_find(req->key, req->len_key);

	if (!e)
	{
		req->err = -EUBKEYNOTFOUND;
		up_read(&rwlock);
		return req->err;
	}

	/* If we found a suitable ub_entry* e,
	   it will contain the skb to be emitted on the wire.
	   Clone the copy from the hash table under the lock so that headers can be added
	   and responsibility for it can be handed over to the NIC during the send process.
	   This keeps the critical region under the read lock as short and contained as possible. */
	skb = skb_clone(e->skb, GFP_ATOMIC);
	if (unlikely(!skb))
	{
		req->err = -EUBKEYNOTFOUND;
		up_read(&rwlock);
		return req->err;
	}
	up_read(&rwlock);

	/* We should have found an entry, and this means we have a pointer to an skb
	   within the ub_entry struct which we can now use to send directly on the 
	   wire (after pushing some headers on the front). */
	req->skb_tx = skb;
	return 0;
}
开发者ID:jonathanmarvens,项目名称:unbuckle,代码行数:37,代码来源:core.c


示例18: cpufreq_interactive_timer

static void cpufreq_interactive_timer(unsigned long data)
{
	u64 now;
	unsigned int delta_time;
	u64 cputime_speedadj;
	int cpu_load;
	struct cpufreq_interactive_cpuinfo *pcpu =
		&per_cpu(cpuinfo, data);
	unsigned int new_freq;
	unsigned int loadadjfreq;
	unsigned int index;
	unsigned long flags;
	bool boosted;
	unsigned long mod_min_sample_time;
	int i, max_load;
	unsigned int max_freq;
	unsigned int boosted_freq;
	struct cpufreq_interactive_cpuinfo *picpu;

	if (!down_read_trylock(&pcpu->enable_sem))
		return;
	if (!pcpu->governor_enabled)
		goto exit;

	spin_lock_irqsave(&pcpu->load_lock, flags);
	now = update_load(data);
	delta_time = (unsigned int)(now - pcpu->cputime_speedadj_timestamp);
	cputime_speedadj = pcpu->cputime_speedadj;
	pcpu->last_evaluated_jiffy = get_jiffies_64();
	spin_unlock_irqrestore(&pcpu->load_lock, flags);

	if (WARN_ON_ONCE(!delta_time))
		goto rearm;

	spin_lock_irqsave(&pcpu->target_freq_lock, flags);
	do_div(cputime_speedadj, delta_time);
	loadadjfreq = (unsigned int)cputime_speedadj * 100;
	cpu_load = loadadjfreq / pcpu->target_freq;
	pcpu->prev_load = cpu_load;
	boosted = boost_val || now < boostpulse_endtime;
	boosted_freq = max(hispeed_freq, pcpu->policy->min);

	if (cpu_load >= go_hispeed_load || boosted) {
		if (pcpu->target_freq < boosted_freq) {
			new_freq = boosted_freq;
		} else {
			new_freq = choose_freq(pcpu, loadadjfreq);

			if (new_freq > freq_calc_thresh)
				new_freq = pcpu->policy->max * cpu_load / 100;

			if (new_freq < boosted_freq)
				new_freq = boosted_freq;
		}
	} else {
		new_freq = choose_freq(pcpu, loadadjfreq);

		if (new_freq > freq_calc_thresh)
			new_freq = pcpu->policy->max * cpu_load / 100;

		if (sync_freq && new_freq < sync_freq) {

			max_load = 0;
			max_freq = 0;

			for_each_online_cpu(i) {
				picpu = &per_cpu(cpuinfo, i);

				if (i == data || picpu->prev_load <
						up_threshold_any_cpu_load)
					continue;

				max_load = max(max_load, picpu->prev_load);
				max_freq = max(max_freq, picpu->target_freq);
			}

			if (max_freq > up_threshold_any_cpu_freq ||
				max_load >= up_threshold_any_cpu_load)
				new_freq = sync_freq;
		}
	}
开发者ID:lurepheonix,项目名称:kernel_asus_a500cg,代码行数:81,代码来源:cpufreq_interactive_pro.c


示例19: do_page_fault

static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
				   struct pt_regs *regs)
{
	struct task_struct *tsk;
	struct mm_struct *mm;
	int fault, sig, code;
	unsigned long vm_flags = VM_READ | VM_WRITE | VM_EXEC;
	unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;

	tsk = current;
	mm  = tsk->mm;

	/* Enable interrupts if they were enabled in the parent context. */
	if (interrupts_enabled(regs))
		local_irq_enable();

	/*
	 * If we're in an interrupt or have no user context, we must not take
	 * the fault.
	 */
	if (in_atomic() || !mm)
		goto no_context;

	if (user_mode(regs))
		mm_flags |= FAULT_FLAG_USER;

	if (esr & ESR_LNX_EXEC) {
		vm_flags = VM_EXEC;
	} else if ((esr & ESR_EL1_WRITE) && !(esr & ESR_EL1_CM)) {
		vm_flags = VM_WRITE;
		mm_flags |= FAULT_FLAG_WRITE;
	}

	/*
	 * As per x86, we may deadlock here. However, since the kernel only
	 * validly references user space from well defined areas of the code,
	 * we can bug out early if this is from code which shouldn't.
	 */
	if (!down_read_trylock(&mm->mmap_sem)) {
		if (!user_mode(regs) && !search_exception_tables(regs->pc))
			goto no_context;
retry:
		down_read(&mm->mmap_sem);
	} else {
		/*
		 * The above down_read_trylock() might have succeeded in which
		 * case, we'll have missed the might_sleep() from down_read().
		 */
		might_sleep();
#ifdef CONFIG_DEBUG_VM
		if (!user_mode(regs) && !search_exception_tables(regs->pc))
			goto no_context;
#endif
	}

	fault = __do_page_fault(mm, addr, mm_flags, vm_flags, tsk);

	/*
	 * If we need to retry but a fatal signal is pending, handle the
	 * signal first. We do not need to release the mmap_sem because it
	 * would already be released in __lock_page_or_retry in mm/filemap.c.
	 */
	if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
		return 0;

	/*
	 * Major/minor page fault accounting is only done on the initial
	 * attempt. If we go through a retry, it is extremely likely that the
	 * page will be found in page cache at that point.
	 */

	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
	if (mm_flags & FAULT_FLAG_ALLOW_RETRY) {
		if (fault & VM_FAULT_MAJOR) {
			tsk->maj_flt++;
			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs,
				      addr);
		} else {
			tsk->min_flt++;
			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs,
				      addr);
		}
		if (fault & VM_FAULT_RETRY) {
			/*
			 * Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk of
			 * starvation.
			 */
			mm_flags &= ~FAULT_FLAG_ALLOW_RETRY;
			mm_flags |= FAULT_FLAG_TRIED;
			goto retry;
		}
	}

	up_read(&mm->mmap_sem);

	/*
	 * Handle the "normal" case first - VM_FAULT_MAJOR / VM_FAULT_MINOR
	 */
	if (likely(!(fault & (VM_FAULT_ERROR | VM_FAULT_BADMAP |
			      VM_FAULT_BADACCESS))))
//.........这里部分代码省略.........
开发者ID:Deepflex,项目名称:android_kernel_elephone_p9000,代码行数:101,代码来源:fault.c


示例20: __oom_reap_task

static bool __oom_reap_task(struct task_struct *tsk)
{
	struct mmu_gather tlb;
	struct vm_area_struct *vma;
	struct mm_struct *mm = NULL;
	struct task_struct *p;
	struct zap_details details = {.check_swap_entries = true,
				      .ignore_dirty = true};
	bool ret = true;

	/*
	 * We have to make sure to not race with the victim exit path
	 * and cause premature new oom victim selection:
	 * __oom_reap_task		exit_mm
	 *   atomic_inc_not_zero
	 *				  mmput
	 *				    atomic_dec_and_test
	 *				  exit_oom_victim
	 *				[...]
	 *				out_of_memory
	 *				  select_bad_process
	 *				    # no TIF_MEMDIE task selects new victim
	 *  unmap_page_range # frees some memory
	 */
	mutex_lock(&oom_lock);

	/*
	 * Make sure we find the associated mm_struct even when the particular
	 * thread has already terminated and cleared its mm.
	 * We might have race with exit path so consider our work done if there
	 * is no mm.
	 */
	p = find_lock_task_mm(tsk);
	if (!p)
		goto unlock_oom;
	mm = p->mm;
	atomic_inc(&mm->mm_users);
	task_unlock(p);

	if (!down_read_trylock(&mm->mmap_sem)) {
		ret = false;
		goto unlock_oom;
	}

	tlb_gather_mmu(&tlb, mm, 0, -1);
	for (vma = mm->mmap ; vma; vma = vma->vm_next) {
		if (is_vm_hugetlb_page(vma))
			continue;

		/*
		 * mlocked VMAs require explicit munlocking before unmap.
		 * Let's keep it simple here and skip such VMAs.
		 */
		if (vma->vm_flags & VM_LOCKED)
			continue;

		/*
		 * Only anonymous pages have a good chance to be dropped
		 * without additional steps which we cannot afford as we
		 * are OOM already.
		 *
		 * We do not even care about fs backed pages because all
		 * which are reclaimable have already been reclaimed and
		 * we do not want to block exit_mmap by keeping mm ref
		 * count elevated without a good reason.
		 */
		if (vma_is_anonymous(vma) || !(vma->vm_flags & VM_SHARED))
			unmap_page_range(&tlb, vma, vma->vm_start, vma->vm_end,
					 &details);
	}
	tlb_finish_mmu(&tlb, 0, -1);
	pr_info("oom_reaper: reaped process %d (%s), now anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n",
			task_pid_nr(tsk), tsk->comm,
			K(get_mm_counter(mm, MM_ANONPAGES)),
			K(get_mm_counter(mm, MM_FILEPAGES)),
			K(get_mm_counter(mm, MM_SHMEMPAGES)));
	up_read(&mm->mmap_sem);

	/*
	 * This task can be safely ignored because we cannot do much more
	 * to release its memory.
	 */
	set_bit(MMF_OOM_REAPED, &mm->flags);
unlock_oom:
	mutex_unlock(&oom_lock);
	/*
	 * Drop our reference but make sure the mmput slow path is called from a
	 * different context because we shouldn't risk we get stuck there and
	 * put the oom_reaper out of the way.
	 */
	if (mm)
		mmput_async(mm);
	return ret;
}

#define MAX_OOM_REAP_RETRIES 10
static void oom_reap_task(struct task_struct *tsk)
{
	int attempts = 0;

//.........这里部分代码省略.........
开发者ID:513855417,项目名称:linux,代码行数:101,代码来源:oom_kill.c



注:本文中的down_read_trylock函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
C++ down_timeout函数代码示例发布时间:2022-05-30
下一篇:
C++ down_read函数代码示例发布时间:2022-05-30
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap