/*
* receive a message from an RxRPC socket
* - we need to be careful about two or more threads calling recvmsg
* simultaneously
*/
int rxrpc_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t len, int flags)
{
struct rxrpc_skb_priv *sp;
struct rxrpc_call *call = NULL, *continue_call = NULL;
struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
struct sk_buff *skb;
long timeo;
int copy, ret, ullen, offset, copied = 0;
u32 abort_code;
DEFINE_WAIT(wait);
_enter(",,,%zu,%d", len, flags);
if (flags & (MSG_OOB | MSG_TRUNC))
return -EOPNOTSUPP;
ullen = msg->msg_flags & MSG_CMSG_COMPAT ? 4 : sizeof(unsigned long);
timeo = sock_rcvtimeo(&rx->sk, flags & MSG_DONTWAIT);
msg->msg_flags |= MSG_MORE;
lock_sock(&rx->sk);
for (;;) {
/* return immediately if a client socket has no outstanding
* calls */
if (RB_EMPTY_ROOT(&rx->calls)) {
if (copied)
goto out;
if (rx->sk.sk_state != RXRPC_SERVER_LISTENING) {
release_sock(&rx->sk);
if (continue_call)
rxrpc_put_call(continue_call);
return -ENODATA;
}
}
/* get the next message on the Rx queue */
skb = skb_peek(&rx->sk.sk_receive_queue);
if (!skb) {
/* nothing remains on the queue */
if (copied &&
(msg->msg_flags & MSG_PEEK || timeo == 0))
goto out;
/* wait for a message to turn up */
release_sock(&rx->sk);
prepare_to_wait_exclusive(rx->sk), &wait,
TASK_INTERRUPTIBLE);
ret = sock_error(&rx->sk);
if (ret)
goto wait_error;
if (skb_queue_empty(&rx->sk.sk_receive_queue)) {
if (signal_pending(current))
goto wait_interrupted;
timeo = schedule_timeout(timeo);
}
finish_wait(sk_sleep(rx->sk), &wait);
lock_sock(&rx->sk);
continue;
}
peek_next_packet:
sp = rxrpc_skb(skb);
call = sp->call;
ASSERT(call != NULL);
_debug("next pkt %s", rxrpc_pkts[sp->hdr.type]);
/* make sure we wait for the state to be updated in this call */
spin_lock_bh(&call->lock);
spin_unlock_bh(&call->lock);
if (test_bit(RXRPC_CALL_RELEASED, &call->flags)) {
_debug("packet from released call");
if (skb_dequeue(&rx->sk.sk_receive_queue) != skb)
BUG();
rxrpc_free_skb(skb);
continue;
}
/* determine whether to continue last data receive */
if (continue_call) {
_debug("maybe cont");
if (call != continue_call ||
skb->mark != RXRPC_SKB_MARK_DATA) {
release_sock(&rx->sk);
rxrpc_put_call(continue_call);
_leave(" = %d [noncont]", copied);
return copied;
}
}
//.........这里部分代码省略.........
static int
lnet_acceptor(void *arg)
{
struct socket *newsock;
int rc;
__u32 magic;
__u32 peer_ip;
int peer_port;
int secure = (int)((long_ptr_t)arg);
LASSERT(lnet_acceptor_state.pta_sock == NULL);
cfs_block_allsigs();
rc = lnet_sock_listen(&lnet_acceptor_state.pta_sock, 0, accept_port,
accept_backlog);
if (rc != 0) {
if (rc == -EADDRINUSE)
LCONSOLE_ERROR_MSG(0x122, "Can't start acceptor on port %d: port already in use\n",
accept_port);
else
LCONSOLE_ERROR_MSG(0x123, "Can't start acceptor on port %d: unexpected error %d\n",
accept_port, rc);
lnet_acceptor_state.pta_sock = NULL;
} else {
LCONSOLE(0, "Accept %s, port %d\n", accept_type, accept_port);
}
/* set init status and unblock parent */
lnet_acceptor_state.pta_shutdown = rc;
complete(&lnet_acceptor_state.pta_signal);
if (rc != 0)
return rc;
while (!lnet_acceptor_state.pta_shutdown) {
rc = lnet_sock_accept(&newsock, lnet_acceptor_state.pta_sock);
if (rc != 0) {
if (rc != -EAGAIN) {
CWARN("Accept error %d: pausing...\n", rc);
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(cfs_time_seconds(1));
}
continue;
}
/* maybe the LNet acceptor thread has been waken */
if (lnet_acceptor_state.pta_shutdown) {
sock_release(newsock);
break;
}
rc = lnet_sock_getaddr(newsock, 1, &peer_ip, &peer_port);
if (rc != 0) {
CERROR("Can't determine new connection's address\n");
goto failed;
}
if (secure && peer_port > LNET_ACCEPTOR_MAX_RESERVED_PORT) {
CERROR("Refusing connection from %pI4h: insecure port %d\n",
&peer_ip, peer_port);
goto failed;
}
rc = lnet_sock_read(newsock, &magic, sizeof(magic),
accept_timeout);
if (rc != 0) {
CERROR("Error %d reading connection request from %pI4h\n",
rc, &peer_ip);
goto failed;
}
rc = lnet_accept(newsock, magic);
if (rc != 0)
goto failed;
continue;
failed:
sock_release(newsock);
}
sock_release(lnet_acceptor_state.pta_sock);
lnet_acceptor_state.pta_sock = NULL;
CDEBUG(D_NET, "Acceptor stopping\n");
/* unblock lnet_acceptor_stop() */
complete(&lnet_acceptor_state.pta_signal);
return 0;
}
开发者ID:DenisLug,项目名称:mptcp,代码行数:93,代码来源:acceptor.c
示例6: init_port
static int init_port(void)
{
int i, nlow, nhigh;
/* Reserve io region. */
#if defined(LIRC_ALLOW_MMAPPED_IO)
/* Future MMAP-Developers: Attention!
For memory mapped I/O you *might* need to use ioremap() first,
for the NSLU2 it's done in boot code. */
if(((iommap != 0)
&& (request_mem_region(iommap, 8<<ioshift,
LIRC_DRIVER_NAME) == NULL))
|| ((iommap == 0)
&& (request_region(io, 8, LIRC_DRIVER_NAME) == NULL)))
#else
if(request_region(io, 8, LIRC_DRIVER_NAME)==NULL)
#endif
{
printk(KERN_ERR LIRC_DRIVER_NAME
": port %04x already in use\n", io);
printk(KERN_WARNING LIRC_DRIVER_NAME
": use 'setserial /dev/ttySX uart none'\n");
printk(KERN_WARNING LIRC_DRIVER_NAME
": or compile the serial port driver as module and\n");
printk(KERN_WARNING LIRC_DRIVER_NAME
": make sure this module is loaded first\n");
return(-EBUSY);
}
hardware_init_port();
/* Initialize pulse/space widths */
init_timing_params(duty_cycle, freq);
/* If pin is high, then this must be an active low receiver. */
if(sense==-1)
{
/* wait 1/2 sec for the power supply */
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(HZ/2);
/* probe 9 times every 0.04s, collect "votes" for
active high/low */
nlow = 0;
nhigh = 0;
for(i = 0; i < 9; i ++)
{
if (sinp(UART_MSR) & hardware[type].signal_pin)
{
nlow++;
}
else
{
nhigh++;
}
schedule_timeout(HZ/25);
}
sense = (nlow >= nhigh ? 1 : 0);
printk(KERN_INFO LIRC_DRIVER_NAME ": auto-detected active "
"%s receiver\n",sense ? "low":"high");
}
else
{
printk(KERN_INFO LIRC_DRIVER_NAME ": Manually using active "
"%s receiver\n",sense ? "low":"high");
};
return 0;
}
static int cifs_oplock_thread(void * dummyarg)
{
struct oplock_q_entry * oplock_item;
struct cifsTconInfo *pTcon;
struct inode * inode;
__u16 netfid;
int rc;
daemonize("cifsoplockd");
allow_signal(SIGTERM);
oplockThread = current;
do {
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(1*HZ);
spin_lock(&GlobalMid_Lock);
if(list_empty(&GlobalOplock_Q)) {
spin_unlock(&GlobalMid_Lock);
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(39*HZ);
} else {
oplock_item = list_entry(GlobalOplock_Q.next,
struct oplock_q_entry, qhead);
if(oplock_item) {
cFYI(1,("found oplock item to write out"));
pTcon = oplock_item->tcon;
inode = oplock_item->pinode;
netfid = oplock_item->netfid;
spin_unlock(&GlobalMid_Lock);
DeleteOplockQEntry(oplock_item);
/* can not grab inode sem here since it would
deadlock when oplock received on delete
since vfs_unlink holds the i_sem across
the call */
/* down(&inode->i_sem);*/
if (S_ISREG(inode->i_mode)) {
rc = filemap_fdatawrite(inode->i_mapping);
if(CIFS_I(inode)->clientCanCacheRead == 0) {
filemap_fdatawait(inode->i_mapping);
invalidate_remote_inode(inode);
}
} else
rc = 0;
/* up(&inode->i_sem);*/
if (rc)
CIFS_I(inode)->write_behind_rc = rc;
cFYI(1,("Oplock flush inode %p rc %d",inode,rc));
/* releasing a stale oplock after recent reconnection
of smb session using a now incorrect file
handle is not a data integrity issue but do
not bother sending an oplock release if session
to server still is disconnected since oplock
already released by the server in that case */
if(pTcon->tidStatus != CifsNeedReconnect) {
rc = CIFSSMBLock(0, pTcon, netfid,
0 /* len */ , 0 /* offset */, 0,
0, LOCKING_ANDX_OPLOCK_RELEASE,
0 /* wait flag */);
cFYI(1,("Oplock release rc = %d ",rc));
}
} else
spin_unlock(&GlobalMid_Lock);
}
} while(!signal_pending(current));
complete_and_exit (&cifs_oplock_exited, 0);
oplockThread = NULL;
}
static int
zpios_thread_main(void *data)
{
thread_data_t *thr = (thread_data_t *)data;
run_args_t *run_args = thr->run_args;
zpios_time_t t;
dmu_obj_t obj;
__u64 offset;
__u32 chunk_size;
zpios_region_t *region;
char *buf;
unsigned int random_int;
int chunk_noise = run_args->chunk_noise;
int chunk_noise_tmp = 0;
int thread_delay = run_args->thread_delay;
int thread_delay_tmp = 0;
int i, rc = 0;
if (chunk_noise) {
get_random_bytes(&random_int, sizeof (unsigned int));
chunk_noise_tmp = (random_int % (chunk_noise * 2))-chunk_noise;
}
/*
* It's OK to vmem_alloc() this memory because it will be copied
* in to the slab and pointers to the slab copy will be setup in
* the bio when the IO is submitted. This of course is not ideal
* since we want a zero-copy IO path if possible. It would be nice
* to have direct access to those slab entries.
*/
chunk_size = run_args->chunk_size + chunk_noise_tmp;
buf = (char *)vmem_alloc(chunk_size, KM_SLEEP);
ASSERT(buf);
/* Trivial data verification pattern for now. */
if (run_args->flags & DMU_VERIFY)
memset(buf, 'z', chunk_size);
/* Write phase */
mutex_enter(&thr->lock);
thr->stats.wr_time.start = zpios_timespec_now();
mutex_exit(&thr->lock);
while (zpios_get_work_item(run_args, &obj, &offset,
&chunk_size, ®ion, DMU_WRITE)) {
if (thread_delay) {
get_random_bytes(&random_int, sizeof (unsigned int));
thread_delay_tmp = random_int % thread_delay;
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(thread_delay_tmp); /* In jiffies */
}
t.start = zpios_timespec_now();
rc = zpios_dmu_write(run_args, obj.os, obj.obj,
offset, chunk_size, buf);
t.stop = zpios_timespec_now();
t.delta = zpios_timespec_sub(t.stop, t.start);
if (rc) {
zpios_print(run_args->file, "IO error while doing "
"dmu_write(): %d\n", rc);
break;
}
mutex_enter(&thr->lock);
thr->stats.wr_data += chunk_size;
thr->stats.wr_chunks++;
thr->stats.wr_time.delta = zpios_timespec_add(
thr->stats.wr_time.delta, t.delta);
mutex_exit(&thr->lock);
mutex_enter(®ion->lock);
region->stats.wr_data += chunk_size;
region->stats.wr_chunks++;
region->stats.wr_time.delta = zpios_timespec_add(
region->stats.wr_time.delta, t.delta);
/* First time region was accessed */
if (region->init_offset == offset)
region->stats.wr_time.start = t.start;
mutex_exit(®ion->lock);
}
mutex_enter(&run_args->lock_ctl);
run_args->threads_done++;
mutex_exit(&run_args->lock_ctl);
mutex_enter(&thr->lock);
thr->rc = rc;
thr->stats.wr_time.stop = zpios_timespec_now();
mutex_exit(&thr->lock);
wake_up(&run_args->waitq);
set_current_state(TASK_UNINTERRUPTIBLE);
schedule();
/* Check if we should exit */
mutex_enter(&thr->lock);
rc = thr->rc;
//.........这里部分代码省略.........
开发者ID:akatrevorjay,项目名称:zfs,代码行数:101,代码来源:pios.c
示例14: vmw_fallback_wait
int vmw_fallback_wait(struct vmw_private *dev_priv,
bool lazy,
bool fifo_idle,
uint32_t sequence,
bool interruptible,
unsigned long timeout)
{
struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
uint32_t count = 0;
uint32_t signal_seq;
int ret;
unsigned long end_jiffies = jiffies + timeout;
bool (*wait_condition)(struct vmw_private *, uint32_t);
DEFINE_WAIT(__wait);
wait_condition = (fifo_idle) ? &vmw_fifo_idle :
&vmw_fence_signaled;
/**
* Block command submission while waiting for idle.
*/
if (fifo_idle)
down_read(&fifo_state->rwsem);
signal_seq = atomic_read_unchecked(&dev_priv->fence_seq);
ret = 0;
for (;;) {
prepare_to_wait(&dev_priv->fence_queue, &__wait,
(interruptible) ?
TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
if (wait_condition(dev_priv, sequence))
break;
if (time_after_eq(jiffies, end_jiffies)) {
DRM_ERROR("SVGA device lockup.\n");
break;
}
if (lazy)
schedule_timeout(1);
else if ((++count & 0x0F) == 0) {
/**
* FIXME: Use schedule_hr_timeout here for
* newer kernels and lower CPU utilization.
*/
__set_current_state(TASK_RUNNING);
schedule();
__set_current_state((interruptible) ?
TASK_INTERRUPTIBLE :
TASK_UNINTERRUPTIBLE);
}
if (interruptible && signal_pending(current)) {
ret = -ERESTARTSYS;
break;
}
}
finish_wait(&dev_priv->fence_queue, &__wait);
if (ret == 0 && fifo_idle) {
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
iowrite32(signal_seq, fifo_mem + SVGA_FIFO_FENCE);
}
wake_up_all(&dev_priv->fence_queue);
if (fifo_idle)
up_read(&fifo_state->rwsem);
return ret;
}
请发表评论