//.........这里部分代码省略.........
type = eh->ether_type;
break;
}
default:
if_printf(ifp, "can't handle af%d\n", dst->sa_family);
senderr(EAFNOSUPPORT);
}
if (lle != NULL && (lle->la_flags & LLE_IFADDR)) {
update_mbuf_csumflags(m, m);
return (if_simloop(ifp, m, dst->sa_family, 0));
}
/*
* Add local net header. If no space in first mbuf,
* allocate another.
*/
M_PREPEND(m, ETHER_HDR_LEN, M_NOWAIT);
if (m == NULL)
senderr(ENOBUFS);
eh = mtod(m, struct ether_header *);
(void)memcpy(&eh->ether_type, &type,
sizeof(eh->ether_type));
(void)memcpy(eh->ether_dhost, edst, sizeof (edst));
if (hdrcmplt)
(void)memcpy(eh->ether_shost, esrc,
sizeof(eh->ether_shost));
else
(void)memcpy(eh->ether_shost, IF_LLADDR(ifp),
sizeof(eh->ether_shost));
/*
* If a simplex interface, and the packet is being sent to our
* Ethernet address or a broadcast address, loopback a copy.
* XXX To make a simplex device behave exactly like a duplex
* device, we should copy in the case of sending to our own
* ethernet address (thus letting the original actually appear
* on the wire). However, we don't do that here for security
* reasons and compatibility with the original behavior.
*/
if ((ifp->if_flags & IFF_SIMPLEX) && loop_copy &&
((t = pf_find_mtag(m)) == NULL || !t->routed)) {
if (m->m_flags & M_BCAST) {
struct mbuf *n;
/*
* Because if_simloop() modifies the packet, we need a
* writable copy through m_dup() instead of a readonly
* one as m_copy[m] would give us. The alternative would
* be to modify if_simloop() to handle the readonly mbuf,
* but performancewise it is mostly equivalent (trading
* extra data copying vs. extra locking).
*
* XXX This is a local workaround. A number of less
* often used kernel parts suffer from the same bug.
* See PR kern/105943 for a proposed general solution.
*/
if ((n = m_dup(m, M_NOWAIT)) != NULL) {
update_mbuf_csumflags(m, n);
(void)if_simloop(ifp, n, dst->sa_family, hlen);
} else
ifp->if_iqdrops++;
} else if (bcmp(eh->ether_dhost, eh->ether_shost,
ETHER_ADDR_LEN) == 0) {
update_mbuf_csumflags(m, m);
(void) if_simloop(ifp, m, dst->sa_family, hlen);
return (0); /* XXX */
}
}
/*
* Bridges require special output handling.
*/
if (ifp->if_bridge) {
BRIDGE_OUTPUT(ifp, m, error);
return (error);
}
#if defined(INET) || defined(INET6)
if (ifp->if_carp &&
(error = (*carp_output_p)(ifp, m, dst)))
goto bad;
#endif
/* Handle ng_ether(4) processing, if any */
if (IFP2AC(ifp)->ac_netgraph != NULL) {
KASSERT(ng_ether_output_p != NULL,
("ng_ether_output_p is NULL"));
if ((error = (*ng_ether_output_p)(ifp, &m)) != 0) {
bad: if (m != NULL)
m_freem(m);
return (error);
}
if (m == NULL)
return (0);
}
/* Continue with link-layer output */
return ether_output_frame(ifp, m);
}
static int
udp6_send(struct socket *so, int flags, struct mbuf *m,
struct sockaddr *addr, struct mbuf *control, struct thread *td)
{
struct inpcb *inp;
struct inpcbinfo *pcbinfo;
int error = 0;
pcbinfo = get_inpcbinfo(so->so_proto->pr_protocol);
inp = sotoinpcb(so);
KASSERT(inp != NULL, ("udp6_send: inp == NULL"));
INP_WLOCK(inp);
if (addr) {
if (addr->sa_len != sizeof(struct sockaddr_in6)) {
error = EINVAL;
goto bad;
}
if (addr->sa_family != AF_INET6) {
error = EAFNOSUPPORT;
goto bad;
}
}
#ifdef INET
if ((inp->inp_flags & IN6P_IPV6_V6ONLY) == 0) {
int hasv4addr;
struct sockaddr_in6 *sin6 = 0;
if (addr == 0)
hasv4addr = (inp->inp_vflag & INP_IPV4);
else {
sin6 = (struct sockaddr_in6 *)addr;
hasv4addr = IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)
? 1 : 0;
}
if (hasv4addr) {
struct pr_usrreqs *pru;
/*
* XXXRW: We release UDP-layer locks before calling
* udp_send() in order to avoid recursion. However,
* this does mean there is a short window where inp's
* fields are unstable. Could this lead to a
* potential race in which the factors causing us to
* select the UDPv4 output routine are invalidated?
*/
INP_WUNLOCK(inp);
if (sin6)
in6_sin6_2_sin_in_sock(addr);
pru = inetsw[ip_protox[IPPROTO_UDP]].pr_usrreqs;
/* addr will just be freed in sendit(). */
return ((*pru->pru_send)(so, flags, m, addr, control,
td));
}
}
#endif
#ifdef MAC
mac_inpcb_create_mbuf(inp, m);
#endif
INP_HASH_WLOCK(pcbinfo);
error = udp6_output(inp, m, addr, control, td);
INP_HASH_WUNLOCK(pcbinfo);
#ifdef INET
#endif
INP_WUNLOCK(inp);
return (error);
bad:
INP_WUNLOCK(inp);
m_freem(m);
return (error);
}
static
void
testa(struct array *a)
{
int testarray[TESTSIZE];
int i, j, n, r, *p;
for (i=0; i<TESTSIZE; i++) {
testarray[i]=i;
}
n = array_num(a);
KASSERT(n==0);
for (i=0; i<TESTSIZE; i++) {
r = array_add(a, &testarray[i], NULL);
KASSERT(r==0);
n = array_num(a);
KASSERT(n==i+1);
}
n = array_num(a);
KASSERT(n==TESTSIZE);
for (i=0; i<TESTSIZE; i++) {
p = array_get(a, i);
KASSERT(*p == i);
}
n = array_num(a);
KASSERT(n==TESTSIZE);
for (j=0; j<TESTSIZE*4; j++) {
i = random()%TESTSIZE;
p = array_get(a, i);
KASSERT(*p == i);
}
n = array_num(a);
KASSERT(n==TESTSIZE);
for (i=0; i<TESTSIZE; i++) {
array_set(a, i, &testarray[TESTSIZE-i-1]);
}
for (i=0; i<TESTSIZE; i++) {
p = array_get(a, i);
KASSERT(*p == TESTSIZE-i-1);
}
r = array_setsize(a, TESTSIZE/2);
KASSERT(r==0);
for (i=0; i<TESTSIZE/2; i++) {
p = array_get(a, i);
KASSERT(*p == TESTSIZE-i-1);
}
array_remove(a, 1);
for (i=1; i<TESTSIZE/2 - 1; i++) {
p = array_get(a, i);
KASSERT(*p == TESTSIZE-i-2);
}
p = array_get(a, 0);
KASSERT(*p == TESTSIZE-1);
array_setsize(a, 2);
p = array_get(a, 0);
KASSERT(*p == TESTSIZE-1);
p = array_get(a, 1);
KASSERT(*p == TESTSIZE-3);
array_set(a, 1, NULL);
array_setsize(a, 2);
p = array_get(a, 0);
KASSERT(*p == TESTSIZE-1);
p = array_get(a, 1);
KASSERT(p==NULL);
array_setsize(a, TESTSIZE*10);
p = array_get(a, 0);
KASSERT(*p == TESTSIZE-1);
p = array_get(a, 1);
KASSERT(p==NULL);
}
开发者ID:Adam-Koza,项目名称:A3,代码行数:83,代码来源:arraytest.c
示例9: ofw_bus_search_intrmap
/*
* Map an interrupt using the firmware reg, interrupt-map and
* interrupt-map-mask properties.
* The interrupt property to be mapped must be of size intrsz, and pointed to
* by intr. The regs property of the node for which the mapping is done must
* be passed as regs. This property is an array of register specifications;
* the size of the address part of such a specification must be passed as
* physsz. Only the first element of the property is used.
* imap and imapsz hold the interrupt mask and it's size.
* imapmsk is a pointer to the interrupt-map-mask property, which must have
* a size of physsz + intrsz; it may be NULL, in which case a full mask is
* assumed.
* maskbuf must point to a buffer of length physsz + intrsz.
* The interrupt is returned in result, which must point to a buffer of length
* rintrsz (which gives the expected size of the mapped interrupt).
* Returns number of cells in the interrupt if a mapping was found, 0 otherwise.
*/
int
ofw_bus_search_intrmap(void *intr, int intrsz, void *regs, int physsz,
void *imap, int imapsz, void *imapmsk, void *maskbuf, void *result,
int rintrsz, phandle_t *iparent)
{
phandle_t parent;
uint8_t *ref = maskbuf;
uint8_t *uiintr = intr;
uint8_t *uiregs = regs;
uint8_t *uiimapmsk = imapmsk;
uint8_t *mptr;
pcell_t paddrsz;
pcell_t pintrsz;
int i, tsz;
if (imapmsk != NULL) {
for (i = 0; i < physsz; i++)
ref[i] = uiregs[i] & uiimapmsk[i];
for (i = 0; i < intrsz; i++)
ref[physsz + i] = uiintr[i] & uiimapmsk[physsz + i];
} else {
bcopy(regs, ref, physsz);
bcopy(intr, ref + physsz, intrsz);
}
mptr = imap;
i = imapsz;
paddrsz = 0;
while (i > 0) {
bcopy(mptr + physsz + intrsz, &parent, sizeof(parent));
#ifndef OFW_IMAP_NO_IPARENT_ADDR_CELLS
/*
* Find if we need to read the parent address data.
* CHRP-derived OF bindings, including ePAPR-compliant FDTs,
* use this as an optional part of the specifier.
*/
if (OF_getencprop(OF_node_from_xref(parent),
"#address-cells", &paddrsz, sizeof(paddrsz)) == -1)
paddrsz = 0; /* default */
paddrsz *= sizeof(pcell_t);
#endif
if (OF_searchencprop(OF_node_from_xref(parent),
"#interrupt-cells", &pintrsz, sizeof(pintrsz)) == -1)
pintrsz = 1; /* default */
pintrsz *= sizeof(pcell_t);
/* Compute the map stride size. */
tsz = physsz + intrsz + sizeof(phandle_t) + paddrsz + pintrsz;
KASSERT(i >= tsz, ("ofw_bus_search_intrmap: truncated map"));
if (bcmp(ref, mptr, physsz + intrsz) == 0) {
bcopy(mptr + physsz + intrsz + sizeof(parent) + paddrsz,
result, MIN(rintrsz, pintrsz));
if (iparent != NULL)
*iparent = parent;
return (pintrsz/sizeof(pcell_t));
}
mptr += tsz;
i -= tsz;
}
return (0);
}
/**
* Parse the next core entry from the EROM table and produce a bcma_corecfg
* to be owned by the caller.
*
* @param erom EROM read state.
* @param[out] result On success, the core's device info. The caller inherits
* ownership of this allocation.
*
* @return If successful, returns 0. If the end of the EROM table is hit,
* ENOENT will be returned. On error, returns a non-zero error value.
*/
int
bcma_erom_parse_corecfg(struct bcma_erom *erom, struct bcma_corecfg **result)
{
struct bcma_corecfg *cfg;
struct bcma_erom_core core;
uint8_t first_region_type;
bus_size_t initial_offset;
u_int core_index;
int core_unit;
int error;
cfg = NULL;
initial_offset = bcma_erom_tell(erom);
/* Parse the next core entry */
if ((error = bcma_erom_parse_core(erom, &core)))
return (error);
/* Determine the core's index and unit numbers */
bcma_erom_reset(erom);
core_unit = 0;
core_index = 0;
for (; bcma_erom_tell(erom) != initial_offset; core_index++) {
struct bcma_erom_core prev_core;
/* Parse next core */
if ((error = erom_seek_next(erom, BCMA_EROM_ENTRY_TYPE_CORE)))
return (error);
if ((error = bcma_erom_parse_core(erom, &prev_core)))
return (error);
/* Is earlier unit? */
if (core.vendor == prev_core.vendor &&
core.device == prev_core.device)
{
core_unit++;
}
/* Seek to next core */
if ((error = erom_seek_next(erom, BCMA_EROM_ENTRY_TYPE_CORE)))
return (error);
}
/* We already parsed the core descriptor */
if ((error = erom_skip_core(erom)))
return (error);
/* Allocate our corecfg */
cfg = bcma_alloc_corecfg(core_index, core_unit, core.vendor,
core.device, core.rev);
if (cfg == NULL)
return (ENOMEM);
/* These are 5-bit values in the EROM table, and should never be able
* to overflow BCMA_PID_MAX. */
KASSERT(core.num_mport <= BCMA_PID_MAX, ("unsupported mport count"));
KASSERT(core.num_dport <= BCMA_PID_MAX, ("unsupported dport count"));
KASSERT(core.num_mwrap + core.num_swrap <= BCMA_PID_MAX,
("unsupported wport count"));
if (bootverbose) {
EROM_LOG(erom,
"core%u: %s %s (cid=%hx, rev=%hu, unit=%d)\n",
core_index,
bhnd_vendor_name(core.vendor),
bhnd_find_core_name(core.vendor, core.device),
core.device, core.rev, core_unit);
}
cfg->num_master_ports = core.num_mport;
cfg->num_dev_ports = 0; /* determined below */
cfg->num_bridge_ports = 0; /* determined blow */
cfg->num_wrapper_ports = core.num_mwrap + core.num_swrap;
/* Parse Master Port Descriptors */
for (uint8_t i = 0; i < core.num_mport; i++) {
struct bcma_mport *mport;
struct bcma_erom_mport mpd;
/* Parse the master port descriptor */
error = bcma_erom_parse_mport(erom, &mpd);
if (error)
goto failed;
/* Initialize a new bus mport structure */
mport = malloc(sizeof(struct bcma_mport), M_BHND, M_NOWAIT);
if (mport == NULL) {
error = ENOMEM;
//.........这里部分代码省略.........
//.........这里部分代码省略.........
CURVNET_SET_QUIET(ifp->if_vnet);
if (ETHER_IS_MULTICAST(eh->ether_dhost)) {
if (ETHER_IS_BROADCAST(eh->ether_dhost))
m->m_flags |= M_BCAST;
else
m->m_flags |= M_MCAST;
ifp->if_imcasts++;
}
#ifdef MAC
/*
* Tag the mbuf with an appropriate MAC label before any other
* consumers can get to it.
*/
mac_ifnet_create_mbuf(ifp, m);
#endif
/*
* Give bpf a chance at the packet.
*/
ETHER_BPF_MTAP(ifp, m);
/*
* If the CRC is still on the packet, trim it off. We do this once
* and once only in case we are re-entered. Nothing else on the
* Ethernet receive path expects to see the FCS.
*/
if (m->m_flags & M_HASFCS) {
m_adj(m, -ETHER_CRC_LEN);
m->m_flags &= ~M_HASFCS;
}
if (!(ifp->if_capenable & IFCAP_HWSTATS))
ifp->if_ibytes += m->m_pkthdr.len;
/* Allow monitor mode to claim this frame, after stats are updated. */
if (ifp->if_flags & IFF_MONITOR) {
m_freem(m);
CURVNET_RESTORE();
return;
}
/* Handle input from a lagg(4) port */
if (ifp->if_type == IFT_IEEE8023ADLAG) {
KASSERT(lagg_input_p != NULL,
("%s: if_lagg not loaded!", __func__));
m = (*lagg_input_p)(ifp, m);
if (m != NULL)
ifp = m->m_pkthdr.rcvif;
else {
CURVNET_RESTORE();
return;
}
}
/*
* If the hardware did not process an 802.1Q tag, do this now,
* to allow 802.1P priority frames to be passed to the main input
* path correctly.
* TODO: Deal with Q-in-Q frames, but not arbitrary nesting levels.
*/
if ((m->m_flags & M_VLANTAG) == 0 && etype == ETHERTYPE_VLAN) {
struct ether_vlan_header *evl;
if (m->m_len < sizeof(*evl) &&
(m = m_pullup(m, sizeof(*evl))) == NULL) {
#ifdef DIAGNOSTIC
if_printf(ifp, "cannot pullup VLAN header\n");
#endif
ifp->if_ierrors++;
m_freem(m);
CURVNET_RESTORE();
return;
}
evl = mtod(m, struct ether_vlan_header *);
m->m_pkthdr.ether_vtag = ntohs(evl->evl_tag);
m->m_flags |= M_VLANTAG;
bcopy((char *)evl, (char *)evl + ETHER_VLAN_ENCAP_LEN,
ETHER_HDR_LEN - ETHER_TYPE_LEN);
m_adj(m, ETHER_VLAN_ENCAP_LEN);
eh = mtod(m, struct ether_header *);
}
M_SETFIB(m, ifp->if_fib);
/* Allow ng_ether(4) to claim this frame. */
if (IFP2AC(ifp)->ac_netgraph != NULL) {
KASSERT(ng_ether_input_p != NULL,
("%s: ng_ether_input_p is NULL", __func__));
m->m_flags &= ~M_PROMISC;
(*ng_ether_input_p)(ifp, &m);
if (m == NULL) {
CURVNET_RESTORE();
return;
}
eh = mtod(m, struct ether_header *);
}
static void
uvm_unloanpage(struct vm_page **ploans, int npages)
{
struct vm_page *pg;
kmutex_t *slock;
mutex_enter(&uvm_pageqlock);
while (npages-- > 0) {
pg = *ploans++;
/*
* do a little dance to acquire the object or anon lock
* as appropriate. we are locking in the wrong order,
* so we have to do a try-lock here.
*/
slock = NULL;
while (pg->uobject != NULL || pg->uanon != NULL) {
if (pg->uobject != NULL) {
slock = pg->uobject->vmobjlock;
} else {
slock = pg->uanon->an_lock;
}
if (mutex_tryenter(slock)) {
break;
}
/* XXX Better than yielding but inadequate. */
kpause("livelock", false, 1, &uvm_pageqlock);
slock = NULL;
}
/*
* drop our loan. if page is owned by an anon but
* PQ_ANON is not set, the page was loaned to the anon
* from an object which dropped ownership, so resolve
* this by turning the anon's loan into real ownership
* (ie. decrement loan_count again and set PQ_ANON).
* after all this, if there are no loans left, put the
* page back a paging queue (if the page is owned by
* an anon) or free it (if the page is now unowned).
*/
KASSERT(pg->loan_count > 0);
pg->loan_count--;
if (pg->uobject == NULL && pg->uanon != NULL &&
(pg->pqflags & PQ_ANON) == 0) {
KASSERT(pg->loan_count > 0);
pg->loan_count--;
pg->pqflags |= PQ_ANON;
}
if (pg->loan_count == 0 && pg->uobject == NULL &&
pg->uanon == NULL) {
KASSERT((pg->flags & PG_BUSY) == 0);
uvm_pagefree(pg);
}
if (slock != NULL) {
mutex_exit(slock);
}
}
mutex_exit(&uvm_pageqlock);
}
/*
* uvm_loanbreak: break loan on a uobj page
*
* => called with uobj locked
* => the page should be busy
* => return value:
* newly allocated page if succeeded
*/
struct vm_page *
uvm_loanbreak(struct vm_page *uobjpage)
{
struct vm_page *pg;
#ifdef DIAGNOSTIC
struct uvm_object *uobj = uobjpage->uobject;
#endif
KASSERT(uobj != NULL);
KASSERT(mutex_owned(uobj->vmobjlock));
KASSERT(uobjpage->flags & PG_BUSY);
/* alloc new un-owned page */
pg = uvm_pagealloc(NULL, 0, NULL, 0);
if (pg == NULL)
return NULL;
/*
* copy the data from the old page to the new
* one and clear the fake flags on the new page (keep it busy).
* force a reload of the old page by clearing it from all
* pmaps.
* transfer dirtiness of the old page to the new page.
* then lock the page queues to rename the pages.
*/
uvm_pagecopy(uobjpage, pg); /* old -> new */
pg->flags &= ~PG_FAKE;
pmap_page_protect(uobjpage, VM_PROT_NONE);
if ((uobjpage->flags & PG_CLEAN) != 0 && !pmap_clear_modify(uobjpage)) {
pmap_clear_modify(pg);
pg->flags |= PG_CLEAN;
} else {
/* uvm_pagecopy marked it dirty */
KASSERT((pg->flags & PG_CLEAN) == 0);
/* a object with a dirty page should be dirty. */
KASSERT(!UVM_OBJ_IS_CLEAN(uobj));
}
if (uobjpage->flags & PG_WANTED)
wakeup(uobjpage);
/* uobj still locked */
uobjpage->flags &= ~(PG_WANTED|PG_BUSY);
UVM_PAGE_OWN(uobjpage, NULL);
mutex_enter(&uvm_pageqlock);
/*
* replace uobjpage with new page.
*/
uvm_pagereplace(uobjpage, pg);
/*
* if the page is no longer referenced by
* an anon (i.e. we are breaking an O->K
* loan), then remove it from any pageq's.
*/
if (uobjpage->uanon == NULL)
uvm_pagedequeue(uobjpage);
/*
* at this point we have absolutely no
* control over uobjpage
*/
/* install new page */
uvm_pageactivate(pg);
mutex_exit(&uvm_pageqlock);
/*
* done! loan is broken and "pg" is
* PG_BUSY. it can now replace uobjpage.
*/
return pg;
}
开发者ID:ryo,项目名称:netbsd-src,代码行数:84,代码来源:uvm_loan.c
示例18: make_established
/*
* Completes some final bits of initialization for just established connections
* and changes their state to TCPS_ESTABLISHED.
*
* The ISNs are from after the exchange of SYNs. i.e., the true ISN + 1.
*/
void
make_established(struct toepcb *toep, uint32_t snd_isn, uint32_t rcv_isn,
uint16_t opt)
{
struct inpcb *inp = toep->inp;
struct socket *so = inp->inp_socket;
struct tcpcb *tp = intotcpcb(inp);
long bufsize;
uint32_t iss = be32toh(snd_isn) - 1; /* true ISS */
uint32_t irs = be32toh(rcv_isn) - 1; /* true IRS */
uint16_t tcpopt = be16toh(opt);
struct flowc_tx_params ftxp;
INP_WLOCK_ASSERT(inp);
KASSERT(tp->t_state == TCPS_SYN_SENT ||
tp->t_state == TCPS_SYN_RECEIVED,
("%s: TCP state %s", __func__, tcpstates[tp->t_state]));
CTR4(KTR_CXGBE, "%s: tid %d, toep %p, inp %p",
__func__, toep->tid, toep, inp);
tp->t_state = TCPS_ESTABLISHED;
tp->t_starttime = ticks;
TCPSTAT_INC(tcps_connects);
tp->irs = irs;
tcp_rcvseqinit(tp);
tp->rcv_wnd = toep->rx_credits << 10;
tp->rcv_adv += tp->rcv_wnd;
tp->last_ack_sent = tp->rcv_nxt;
/*
* If we were unable to send all rx credits via opt0, save the remainder
* in rx_credits so that they can be handed over with the next credit
* update.
*/
SOCKBUF_LOCK(&so->so_rcv);
bufsize = select_rcv_wnd(so);
SOCKBUF_UNLOCK(&so->so_rcv);
toep->rx_credits = bufsize - tp->rcv_wnd;
tp->iss = iss;
tcp_sendseqinit(tp);
tp->snd_una = iss + 1;
tp->snd_nxt = iss + 1;
tp->snd_max = iss + 1;
assign_rxopt(tp, tcpopt);
SOCKBUF_LOCK(&so->so_snd);
if (so->so_snd.sb_flags & SB_AUTOSIZE && V_tcp_do_autosndbuf)
bufsize = V_tcp_autosndbuf_max;
else
bufsize = sbspace(&so->so_snd);
SOCKBUF_UNLOCK(&so->so_snd);
ftxp.snd_nxt = tp->snd_nxt;
ftxp.rcv_nxt = tp->rcv_nxt;
ftxp.snd_space = bufsize;
ftxp.mss = tp->t_maxseg;
send_flowc_wr(toep, &ftxp);
soisconnected(so);
}
请发表评论