本文整理汇总了C++中req_put函数的典型用法代码示例。如果您正苦于以下问题:C++ req_put函数的具体用法?C++ req_put怎么用?C++ req_put使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了req_put函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。
示例1: req_recv_next
struct msg *
req_recv_next(struct context *ctx, struct conn *conn, bool alloc)
{
struct msg *msg;
ASSERT(conn->client && !conn->proxy);
if (conn->eof) {
msg = conn->rmsg;
/* client sent eof before sending the entire request */
if (msg != NULL) {
conn->rmsg = NULL;
ASSERT(msg->peer == NULL);
ASSERT(msg->request && !msg->done);
log_error("eof c %d discarding incomplete req %"PRIu64" len "
"%"PRIu32"", conn->sd, msg->id, msg->mlen);
req_put(msg);
}
/*
* TCP half-close enables the client to terminate its half of the
* connection (i.e. the client no longer sends data), but it still
* is able to receive data from the proxy. The proxy closes its
* half (by sending the second FIN) when the client has no
* outstanding requests
*/
if (!conn->active(conn)) {
conn->done = 1;
log_debug(LOG_INFO, "c %d is done", conn->sd);
}
return NULL;
}
msg = conn->rmsg;
if (msg != NULL) {
ASSERT(msg->request);
return msg;
}
if (!alloc) {
return NULL;
}
msg = req_get(conn);
if (msg != NULL) {
conn->rmsg = msg;
}
return msg;
}
开发者ID:wooparadog,项目名称:twemproxy,代码行数:54,代码来源:nc_request.c
示例2: mtp_tunnel_complete_in
static void mtp_tunnel_complete_in(struct usb_endpoint *ept, struct usb_request *req)
{
struct mtp_tunnel_context *ctxt = req->context;
if (req->status != 0)
ctxt->error = 1;
req_put(ctxt, &ctxt->tx_idle, req);
wake_up(&ctxt->write_wq);
}
开发者ID:Soaa-,项目名称:-lightspeed,代码行数:11,代码来源:mtp_tunnel.c
示例3: acc_complete_in
static void acc_complete_in(struct usb_ep *ep, struct usb_request *req)
{
struct acc_dev *dev = _acc_dev;
if (req->status == -ESHUTDOWN) {
pr_debug("acc_complete_in set disconnected");
acc_set_disconnected(dev);
}
req_put(dev, &dev->tx_idle, req);
wake_up(&dev->write_wq);
}
开发者ID:moonlightly,项目名称:android_kernel_htc_primou,代码行数:12,代码来源:f_accessory.c
示例4: rsp_make_error
static struct msg *
rsp_make_error(struct context *ctx, struct conn *conn, struct msg *msg)
{
struct msg *pmsg; /* peer message (response) */
struct msg *cmsg, *nmsg; /* current and next message (request) */
uint64_t id;
err_t err;
ASSERT(conn->client && !conn->proxy);
ASSERT(msg->request && req_error(conn, msg));
ASSERT(msg->owner == conn);
id = msg->frag_id;
if (id != 0) {
for (err = 0, cmsg = TAILQ_NEXT(msg, c_tqe);
cmsg != NULL && cmsg->frag_id == id;
cmsg = nmsg) {
nmsg = TAILQ_NEXT(cmsg, c_tqe);
/* dequeue request (error fragment) from client outq */
conn->dequeue_outq(ctx, conn, cmsg);
if (err == 0 && cmsg->err != 0) {
err = cmsg->err;
}
req_put(cmsg);
}
} else {
err = msg->err;
}
pmsg = msg->peer;
if (pmsg != NULL) {
ASSERT(!pmsg->request && pmsg->peer == msg);
msg->peer = NULL;
pmsg->peer = NULL;
rsp_put(pmsg);
}
#if 1 //shenzheng 2014-12-4 common
//attention: the new error macro we defined must be a negative number.
if(err >= 0)
{
#endif
return msg_get_error(conn->redis, err);
#if 1 //shenzheng 2014-12-4 common
}
else
{
return msg_get_error_other(conn->redis, err);
}
#endif
}
开发者ID:7758285,项目名称:twemproxy-vip,代码行数:52,代码来源:nc_response.c
示例5: rsp_filter
static bool
rsp_filter(struct context *ctx, struct conn *conn, struct msg *msg)
{
struct msg *pmsg;
ASSERT(!conn->client && !conn->proxy);
if(conn->is_Select_Msg){
conn->is_Select_Msg = 0;
rsp_put(msg);
log_debug(LOG_VERB," select success rsp %"PRIu64" len %"PRIu32" on s %d ", msg->id,
msg->mlen, conn->sd);
//ignore first response
return true;
}
if (msg_empty(msg)) {
ASSERT(conn->rmsg == NULL);
log_debug(LOG_VERB, "filter empty rsp %"PRIu64" on s %d", msg->id,
conn->sd);
rsp_put(msg);
return true;
}
pmsg = TAILQ_FIRST(&conn->omsg_q);
if (pmsg == NULL) {
log_error("filter stray rsp %"PRIu64" len %"PRIu32" on s %d", msg->id,
msg->mlen, conn->sd);
rsp_put(msg);
errno = EINVAL;
conn->err = errno;
return true;
}
ASSERT(pmsg->peer == NULL);
ASSERT(pmsg->request && !pmsg->done);
if (pmsg->swallow) {
conn->dequeue_outq(ctx, conn, pmsg);
pmsg->done = 1;
log_debug(LOG_INFO, "swallow rsp %"PRIu64" len %"PRIu32" of req "
"%"PRIu64" on s %d", msg->id, msg->mlen, pmsg->id,
conn->sd);
rsp_put(msg);
req_put(pmsg);
return true;
}
return false;
}
开发者ID:Niteesh,项目名称:twemproxy,代码行数:51,代码来源:nc_response.c
示例6: req_filter
static bool
req_filter(struct context *ctx, struct conn *conn, struct msg *msg)
{
ASSERT(conn->client && !conn->proxy);
if (msg_empty(msg)) {
ASSERT(conn->rmsg == NULL);
log_debug(LOG_VERB, "filter empty req %"PRIu64" from c %d", msg->id,
conn->sd);
req_put(msg);
return true;
}
/*
* Handle "quit\r\n", which is the protocol way of doing a
* passive close
*/
if (msg->quit) {
ASSERT(conn->rmsg == NULL);
log_debug(LOG_INFO, "filter quit req %"PRIu64" from c %d", msg->id,
conn->sd);
conn->eof = 1;
conn->recv_ready = 0;
req_put(msg);
return true;
}
/*
* if this conn is not authenticated, we will mark it as noforward,
* and handle it in the redis_reply handler.
*
*/
if (conn->need_auth) {
msg->noforward = 1;
}
return false;
}
开发者ID:YongMan,项目名称:r3proxy,代码行数:38,代码来源:nc_request.c
示例7: rsp_filter
static bool
rsp_filter(struct context *ctx, struct conn *conn, struct msg *msg)
{
struct msg *pmsg;
ASSERT(!conn->client && !conn->proxy);
if (msg_empty(msg)) {
ASSERT(conn->rmsg == NULL);
log_debug(LOG_VERB, "filter empty rsp %"PRIu64" on s %d", msg->id,
conn->sd);
rsp_put(msg);
return true;
}
pmsg = TAILQ_FIRST(&conn->omsg_q);
if (pmsg == NULL) {
log_debug(LOG_VERB, "filter stray rsp %"PRIu64" len %"PRIu32" on s %d",
msg->id, msg->mlen, conn->sd);
rsp_put(msg);
return true;
}
if (pmsg->noreply) {
conn->dequeue_outq(ctx, conn, pmsg);
rsp_put(pmsg);
rsp_put(msg);
return true;
}
ASSERT(pmsg->peer == NULL);
ASSERT(pmsg->request && !pmsg->done);
if (pmsg->swallow) {
conn->dequeue_outq(ctx, conn, pmsg);
pmsg->done = 1;
if (log_loggable(LOG_DEBUG)) {
log_debug(LOG_DEBUG, "swallow rsp %"PRIu64" len %"PRIu32" of req "
"%"PRIu64" on s %d", msg->id, msg->mlen, pmsg->id,
conn->sd);
}
rsp_put(msg);
req_put(pmsg);
return true;
}
return false;
}
开发者ID:amimimor,项目名称:dynomite,代码行数:50,代码来源:dyn_response.c
示例8: dnode_rsp_swallow
static void
dnode_rsp_swallow(struct context *ctx, struct conn *peer_conn,
struct msg *req, struct msg *rsp)
{
peer_conn->dequeue_outq(ctx, peer_conn, req);
req->done = 1;
log_debug(LOG_VERB, "conn %p swallow %p", peer_conn, req);
if (rsp) {
log_debug(LOG_INFO, "dyn: swallow rsp %"PRIu64" len %"PRIu32" of req "
"%"PRIu64" on s %d", rsp->id, rsp->mlen, req->id,
peer_conn->sd);
dnode_rsp_put(rsp);
}
req_put(req);
}
开发者ID:hushi55,项目名称:dynomite,代码行数:15,代码来源:dyn_dnode_response.c
示例9: mtp_in_complete
static void mtp_in_complete(struct usb_ep *ep, struct usb_request *req)
{
mtp_debug("status is %d %p %d\n", req->status, req, req->actual);
if (req->status == -ECONNRESET)
usb_ep_fifo_flush(ep);
if (req->status != 0) {
g_usb_mtp_context.error = 1;
mtp_err("status is %d %p len=%d\n",
req->status, req, req->actual);
}
req_put(&g_usb_mtp_context.tx_reqs, req);
wake_up(&g_usb_mtp_context.tx_wq);
}
开发者ID:atarii,项目名称:BDA-ACTV,代码行数:15,代码来源:f_mtp.c
示例10: start_out_receive
static void start_out_receive(void)
{
struct usb_request *req;
int ret;
/* if we have idle read requests, get them queued */
while ((req = req_get(&g_usb_mtp_context.rx_reqs))) {
req->length = BULK_BUFFER_SIZE;
ret = usb_ep_queue(g_usb_mtp_context.bulk_out, req, GFP_ATOMIC);
if (ret < 0) {
mtp_err("error %d\n", ret);
g_usb_mtp_context.error = 1;
req_put(&g_usb_mtp_context.rx_reqs, req);
}
}
}
开发者ID:atarii,项目名称:BDA-ACTV,代码行数:16,代码来源:f_mtp.c
示例11: rsp_filter
static bool
rsp_filter(struct context *ctx, struct conn *conn, struct msg *msg)
{
struct msg *pmsg;
ASSERT(!conn->client && !conn->proxy);
if (msg_empty(msg)) {
ASSERT(conn->rmsg == NULL);
log_debug(LOG_VERB, "filter empty rsp %"PRIu64" on s %d", msg->id,
conn->sd);
rsp_put(msg);
return true;
}
pmsg = TAILQ_FIRST(&conn->omsg_q);
if (pmsg == NULL) {
log_debug(LOG_ERR, "filter stray rsp %"PRIu64" len %"PRIu32" on s %d",
msg->id, msg->mlen, conn->sd);
rsp_put(msg);
return true;
}
ASSERT(pmsg->peer == NULL);
ASSERT(pmsg->request && !pmsg->done);
/* establish msg <-> pmsg (response <-> request) link */
msg->peer = pmsg;
pmsg->peer = msg;
if (pmsg->swallow) {
if (pmsg->pre_swallow != NULL) {
pmsg->pre_swallow(ctx, conn, msg);
}
conn->dequeue_outq(ctx, conn, pmsg);
pmsg->done = 1;
log_debug(LOG_INFO, "swallow rsp %"PRIu64" len %"PRIu32" of req "
"%"PRIu64" on s %d", msg->id, msg->mlen, pmsg->id,
conn->sd);
req_put(pmsg);
return true;
}
return false;
}
开发者ID:crask,项目名称:redisproxy,代码行数:47,代码来源:nc_response.c
示例12: req_forward
static void
req_forward(struct context *ctx, struct conn *c_conn, struct msg *msg)
{
rstatus_t status;
struct conn *s_conn;
struct server_pool *pool;
uint8_t *key;
uint32_t keylen;
struct keypos *kpos;
ASSERT(c_conn->client && !c_conn->proxy);
/* enqueue message (request) into client outq, if response is expected */
if (!msg->noreply) {
c_conn->enqueue_outq(ctx, c_conn, msg);
}
pool = c_conn->owner;
ASSERT(array_n(msg->keys) > 0);
kpos = array_get(msg->keys, 0);
key = kpos->start;
keylen = (uint32_t)(kpos->end - kpos->start);
s_conn = msg->routing(ctx, pool, msg, key, keylen);
if (s_conn == NULL) {
req_forward_error(ctx, c_conn, msg);
return;
}
ASSERT(!s_conn->client && !s_conn->proxy);
status = req_enqueue(ctx, s_conn, c_conn, msg);
if (status != NC_OK) {
req_put(msg);
return;
}
req_forward_stats(ctx, s_conn->owner, msg);
log_debug(LOG_VERB, "forward from c %d to s %d req %"PRIu64" len %"PRIu32
" type %d with key '%.*s'", c_conn->sd, s_conn->sd, msg->id,
msg->mlen, msg->type, keylen, key);
return;
}
开发者ID:YongMan,项目名称:r3proxy,代码行数:44,代码来源:nc_request.c
示例13: rsp_make_error
static struct msg *
rsp_make_error(struct context *ctx, struct conn *conn, struct msg *msg)
{
struct msg *pmsg; /* peer message (response) */
struct msg *cmsg, *nmsg; /* current and next message (request) */
uint64_t id;
err_t err;
ASSERT((conn->type == CONN_CLIENT) ||
(conn->type == CONN_DNODE_PEER_CLIENT));
ASSERT(msg->request && req_error(conn, msg));
ASSERT(msg->owner == conn);
id = msg->frag_id;
if (id != 0) {
for (err = 0, cmsg = TAILQ_NEXT(msg, c_tqe);
cmsg != NULL && cmsg->frag_id == id;
cmsg = nmsg) {
nmsg = TAILQ_NEXT(cmsg, c_tqe);
/* dequeue request (error fragment) from client outq */
conn_dequeue_outq(ctx, conn, cmsg);
if (err == 0 && cmsg->err != 0) {
err = cmsg->err;
}
req_put(cmsg);
}
} else {
err = msg->err;
}
pmsg = msg->selected_rsp;
if (pmsg != NULL) {
ASSERT(!pmsg->request && pmsg->peer == msg);
msg->selected_rsp = NULL;
pmsg->peer = NULL;
rsp_put(pmsg);
}
return msg_get_error(conn, msg->dyn_error, err);
}
开发者ID:DynomiteDB,项目名称:dynomite,代码行数:42,代码来源:dyn_response.c
示例14: rsp_make_error
static struct msg *
rsp_make_error(struct context *ctx, struct conn *conn, struct msg *msg)
{
struct msg *pmsg; /* peer message (response) */
struct msg *cmsg, *nmsg; /* current and next message (request) */
uint64_t id;
err_t err;
ASSERT(conn->client && !conn->proxy);
ASSERT(msg->request && req_error(conn, msg));
ASSERT(msg->owner == conn);
id = msg->frag_id;
/* 将属于同一分片的msg的都干掉 */
if (id != 0) {
for (err = 0, cmsg = TAILQ_NEXT(msg, c_tqe);
cmsg != NULL && cmsg->frag_id == id;
cmsg = nmsg) {
nmsg = TAILQ_NEXT(cmsg, c_tqe);
/* dequeue request (error fragment) from client outq */
conn->dequeue_outq(ctx, conn, cmsg);
if (err == 0 && cmsg->err != 0) {
err = cmsg->err;
}
req_put(cmsg);
}
} else {
err = msg->err;
}
pmsg = msg->peer;
if (pmsg != NULL) {
ASSERT(!pmsg->request && pmsg->peer == msg);
msg->peer = NULL;
pmsg->peer = NULL;
rsp_put(pmsg);
}
return msg_get_error(conn->redis, err);
}
开发者ID:XiaoxiaoxiaoCoder,项目名称:twemproxy,代码行数:42,代码来源:nc_response.c
示例15: dnode_rsp_filter
static bool
dnode_rsp_filter(struct context *ctx, struct conn *conn, struct msg *msg)
{
struct msg *pmsg;
ASSERT(!conn->dnode_client && !conn->dnode_server);
if (msg_empty(msg)) {
ASSERT(conn->rmsg == NULL);
log_debug(LOG_VERB, "dyn: filter empty rsp %"PRIu64" on s %d", msg->id,
conn->sd);
dnode_rsp_put(msg);
return true;
}
pmsg = TAILQ_FIRST(&conn->omsg_q);
if (pmsg == NULL) {
log_debug(LOG_INFO, "dyn: filter stray rsp %"PRIu64" len %"PRIu32" on s %d noreply %d",
msg->id, msg->mlen, conn->sd, msg->noreply);
dnode_rsp_put(msg);
return true;
}
ASSERT(pmsg->peer == NULL);
ASSERT(pmsg->request && !pmsg->done);
if (pmsg->swallow) {
conn->dequeue_outq(ctx, conn, pmsg);
pmsg->done = 1;
log_debug(LOG_INFO, "dyn: swallow rsp %"PRIu64" len %"PRIu32" of req "
"%"PRIu64" on s %d", msg->id, msg->mlen, pmsg->id,
conn->sd);
dnode_rsp_put(msg);
req_put(pmsg);
return true;
}
return false;
}
开发者ID:mbrukman,项目名称:netflix-dynomite,代码行数:40,代码来源:dyn_dnode_response.c
示例16: server_close
void
server_close(struct context *ctx, struct conn *conn)
{
rstatus_t status;
struct msg *msg, *nmsg; /* current and next message */
struct conn *c_conn; /* peer client connection */
ASSERT(!conn->client && !conn->proxy);
server_close_stats(ctx, conn->owner, conn->err, conn->eof,
conn->connected);
if (conn->sd < 0) {
server_failure(ctx, conn->owner);
conn->unref(conn);
conn_put(conn);
return;
}
for (msg = TAILQ_FIRST(&conn->imsg_q); msg != NULL; msg = nmsg) {
nmsg = TAILQ_NEXT(msg, s_tqe);
/* dequeue the message (request) from server inq */
conn->dequeue_inq(ctx, conn, msg);
/*
* Don't send any error response, if
* 1. request is tagged as noreply or,
* 2. client has already closed its connection
*/
if (msg->swallow || msg->noreply) {
log_debug(LOG_INFO, "close s %d swallow req %"PRIu64" len %"PRIu32
" type %d", conn->sd, msg->id, msg->mlen, msg->type);
req_put(msg);
} else {
c_conn = msg->owner;
//ASSERT(c_conn->client && !c_conn->proxy);
msg->done = 1;
msg->error = 1;
msg->err = conn->err;
msg->dyn_error = STORAGE_CONNECTION_REFUSE;
if (req_done(c_conn, TAILQ_FIRST(&c_conn->omsg_q))) {
event_add_out(ctx->evb, msg->owner);
}
log_debug(LOG_INFO, "close s %d schedule error for req %"PRIu64" "
"len %"PRIu32" type %d from c %d%c %s", conn->sd, msg->id,
msg->mlen, msg->type, c_conn->sd, conn->err ? ':' : ' ',
conn->err ? strerror(conn->err): " ");
}
}
ASSERT(TAILQ_EMPTY(&conn->imsg_q));
for (msg = TAILQ_FIRST(&conn->omsg_q); msg != NULL; msg = nmsg) {
nmsg = TAILQ_NEXT(msg, s_tqe);
/* dequeue the message (request) from server outq */
conn->dequeue_outq(ctx, conn, msg);
if (msg->swallow) {
log_debug(LOG_INFO, "close s %d swallow req %"PRIu64" len %"PRIu32
" type %d", conn->sd, msg->id, msg->mlen, msg->type);
req_put(msg);
} else {
c_conn = msg->owner;
//ASSERT(c_conn->client && !c_conn->proxy);
msg->done = 1;
msg->error = 1;
msg->err = conn->err;
if (req_done(c_conn, TAILQ_FIRST(&c_conn->omsg_q))) {
event_add_out(ctx->evb, msg->owner);
}
log_debug(LOG_INFO, "close s %d schedule error for req %"PRIu64" "
"len %"PRIu32" type %d from c %d%c %s", conn->sd, msg->id,
msg->mlen, msg->type, c_conn->sd, conn->err ? ':' : ' ',
conn->err ? strerror(conn->err): " ");
}
}
ASSERT(TAILQ_EMPTY(&conn->omsg_q));
msg = conn->rmsg;
if (msg != NULL) {
conn->rmsg = NULL;
ASSERT(!msg->request);
ASSERT(msg->peer == NULL);
rsp_put(msg);
log_debug(LOG_INFO, "close s %d discarding rsp %"PRIu64" len %"PRIu32" "
"in error", conn->sd, msg->id, msg->mlen);
}
ASSERT(conn->smsg == NULL);
//.........这里部分代码省略.........
开发者ID:mbrukman,项目名称:netflix-dynomite,代码行数:101,代码来源:dyn_server.c
示例17: rsp_filter
static bool
rsp_filter(struct context *ctx, struct conn *conn, struct msg *msg)
{
struct msg *pmsg;
ASSERT(!conn->client && !conn->proxy);
if (msg_empty(msg)) {
ASSERT(conn->rmsg == NULL);
log_debug(LOG_VERB, "filter empty rsp %"PRIu64" on s %d", msg->id,
conn->sd);
rsp_put(msg);
return true;
}
pmsg = TAILQ_FIRST(&conn->omsg_q);
if (pmsg == NULL) {
log_debug(LOG_ERR, "filter stray rsp %"PRIu64" len %"PRIu32" on s %d",
msg->id, msg->mlen, conn->sd);
rsp_put(msg);
/*
* Memcached server can respond with an error response before it has
* received the entire request. This is most commonly seen for set
* requests that exceed item_size_max. IMO, this behavior of memcached
* is incorrect. The right behavior for update requests that are over
* item_size_max would be to either:
* - close the connection Or,
* - read the entire item_size_max data and then send CLIENT_ERROR
*
* We handle this stray packet scenario in nutcracker by closing the
* server connection which would end up sending SERVER_ERROR to all
* clients that have requests pending on this server connection. The
* fix is aggressive, but not doing so would lead to clients getting
* out of sync with the server and as a result clients end up getting
* responses that don't correspond to the right request.
*
* See: https://github.com/twitter/twemproxy/issues/149
*/
conn->err = EINVAL;
conn->done = 1;
return true;
}
ASSERT(pmsg->peer == NULL);
ASSERT(pmsg->request && !pmsg->done);
if (pmsg->swallow) {
conn->swallow_msg(conn, pmsg, msg);
conn->dequeue_outq(ctx, conn, pmsg);
pmsg->done = 1;
log_debug(LOG_INFO, "swallow rsp %"PRIu64" len %"PRIu32" of req "
"%"PRIu64" on s %d", msg->id, msg->mlen, pmsg->id,
conn->sd);
rsp_put(msg);
req_put(pmsg);
return true;
}
return false;
}
开发者ID:YongMan,项目名称:r3proxy,代码行数:63,代码来源:nc_response.c
示例18: adb_read
static ssize_t adb_read(struct file *fp, char __user *buf,
size_t count, loff_t *pos)
{
struct adb_context *ctxt = &_context;
struct usb_request *req;
int r = count, xfer;
int ret;
unsigned MaxPacketSize;
DBG("adb_read(%d)\n", count);
if (_lock(&ctxt->read_excl))
return -EBUSY;
/* we will block until we're online */
while (!(ctxt->online || ctxt->error)) {
DBG("adb_read: waiting for online state\n");
ret = wait_event_interruptible(ctxt->read_wq, (ctxt->online || ctxt->error));
if (ret < 0) {
_unlock(&ctxt->read_excl);
return ret;
}
}
MaxPacketSize = usb_ept_get_max_packet(ctxt->out);
if (MaxPacketSize > 512)
MaxPacketSize = 512;
while (count > 0) {
if (ctxt->error) {
r = -EIO;
break;
}
/* if we have idle read requests, get them queued */
while ((req = req_get(ctxt, &ctxt->rx_idle))) {
requeue_req:
req->length = MaxPacketSize;
ret = usb_ept_queue_xfer(ctxt->out, req);
if (ret < 0) {
DBG("adb_read: failed to queue req %p (%d)\n", req, ret);
r = -EIO;
ctxt->error = 1;
req_put(ctxt, &ctxt->rx_idle, req);
goto fail;
} else {
DBG("%s(): rx %p queue\n", __func__, req);
}
}
/* if we have data pending, give it to userspace */
if (ctxt->read_count > 0) {
xfer = (ctxt->read_count < count) ? ctxt->read_count : count;
if (copy_to_user(buf, ctxt->read_buf, xfer)) {
r = -EFAULT;
break;
}
ctxt->read_buf += xfer;
ctxt->read_count -= xfer;
buf += xfer;
count -= xfer;
/* if we've emptied the buffer, release the request */
if (ctxt->read_count == 0) {
req_put(ctxt, &ctxt->rx_idle, ctxt->read_req);
ctxt->read_req = 0;
}
continue;
}
/* wait for a request to complete */
req = 0;
ret = wait_event_interruptible(ctxt->read_wq,
((req = req_get(ctxt, &ctxt->rx_done)) || ctxt->error));
if (req != 0) {
/* if we got a 0-len one we need to put it back into
** service. if we made it the current read req we'd
** be stuck forever
*/
if (req->actual == 0)
goto requeue_req;
ctxt->read_req = req;
ctxt->read_count = req->actual;
ctxt->read_buf = req->buf;
DBG("%s(): rx %p %d\n", __func__, req, req->actual);
}
if (ret < 0) {
r = ret;
break;
}
}
fail:
_unlock(&ctxt->read_excl);
return r;
}
开发者ID:Soaa-,项目名称:-lightspeed,代码行数:99,代码来源:adb.c
示例19: dnode_req_put
void
dnode_req_put(struct msg *msg)
{
req_put(msg);
}
开发者ID:amit-git,项目名称:dynomite,代码行数:5,代码来源:dyn_dnode_request.c
示例20: mtp_function_bind
static int
mtp_function_bind(struct usb_configuration *c, struct usb_function *f)
{
int n, rc, id;
struct usb_ep *ep;
struct usb_request *req;
struct proc_dir_entry *mtp_proc = NULL;
spin_lock_init(&g_usb_mtp_context.lock);
g_usb_mtp_context.cdev = c->cdev;
/* allocate interface ID(s) */
id = usb_interface_id(c, f);
if (id < 0)
return id;
intf_desc.bInterfaceNumber = id;
/* Find all the endpoints we will use */
ep = usb_ep_autoconfig(g_usb_mtp_context.cdev->gadget,
&fs_bulk_in_desc);
if (!ep) {
mtp_err("auto-configure hs_bulk_in_desc error\n");
goto autoconf_fail;
}
ep->driver_data = &g_usb_mtp_context;
g_usb_mtp_context.bulk_in = ep;
ep = usb_ep_autoconfig(g_usb_mtp_context.cdev->gadget,
&fs_bulk_out_desc);
if (!ep) {
mtp_err("auto-configure hs_bulk_out_desc error\n");
goto autoconf_fail;
}
ep->driver_data = &g_usb_mtp_context;
g_usb_mtp_context.bulk_out = ep;
ep = usb_ep_autoconfig(g_usb_mtp_context.cdev->gadget,
&fs_intr_in_desc);
if (!ep) {
mtp_err("auto-configure hs_intr_in_desc error\n");
goto autoconf_fail;
}
ep->driver_data = &g_usb_mtp_context;
g_usb_mtp_context.intr_in = ep;
if (gadget_is_dualspeed(g_usb_mtp_context.cdev->gadget)) {
/* Assume endpoint addresses are the same for both speeds */
hs_bulk_in_desc.bEndpointAddress =
fs_bulk_in_desc.bEndpointAddress;
hs_bulk_out_desc.bEndpointAddress =
fs_bulk_out_desc.bEndpointAddress;
hs_intr_in_desc.bEndpointAddress =
fs_intr_in_desc.bEndpointAddress;
}
rc = -ENOMEM;
for (n = 0; n < MAX_BULK_RX_REQ_NUM; n++) {
req = req_new(g_usb_mtp_context.bulk_out, BULK_BUFFER_SIZE);
if (!req)
goto autoconf_fail;
pending_reqs[n] = req;
req->complete = mtp_out_complete;
req_put(&g_usb_mtp_context.rx_reqs, req);
}
for (n = 0; n < MAX_BULK_TX_REQ_NUM; n++) {
req = req_new(g_usb_mtp_context.bulk_in, BULK_BUFFER_SIZE);
if (!req)
goto autoconf_fail;
req->complete = mtp_in_complete;
req_put(&g_usb_mtp_context.tx_reqs, req);
}
for (n = 0; n < MAX_CTL_RX_REQ_NUM; n++)
ctl_req_put(&g_usb_mtp_context.ctl_rx_reqs, &ctl_reqs[n]);
g_usb_mtp_context.int_tx_req =
req_new(g_usb_mtp_context.intr_in, BULK_BUFFER_SIZE);
if (!g_usb_mtp_context.int_tx_req)
goto autoconf_fail;
g_usb_mtp_context.intr_in_busy = 0;
g_usb_mtp_context.int_tx_req->complete = mtp_int_complete;
g_usb_mtp_context.ctl_tx_req =
req_new(g_usb_mtp_context.cdev->gadget->ep0, 512);
if (!g_usb_mtp_context.ctl_tx_req)
goto autoconf_fail;
misc_register(&mtp_device);
mtp_proc = create_proc_entry("mtpctl", 0666, 0);
if (!mtp_proc) {
mtp_err("creating /proc/mtpctl failed\n");
goto autoconf_fail;
}
mtp_proc->proc_fops = &mtp_ctl_fops;
return 0;
//.........这里部分代码省略.........
开发者ID:atarii,项目名称:BDA-ACTV,代码行数:101,代码来源:f_mtp.c
注:本文中的req_put函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论