* terms and conditions of the copyright.
*/
-#include <slirp.h>
+#include "qemu/osdep.h"
+#include "slirp.h"
+#include "qemu/timer.h"
-int if_mtu, if_mru;
-int if_comp;
-int if_maxlinkhdr;
-int if_queued = 0; /* Number of packets queued so far */
-int if_thresh = 10; /* Number of packets queued before we start sending
- * (to prevent allocing too many mbufs) */
-
-struct mbuf if_fastq; /* fast queue (for interactive data) */
-struct mbuf if_batchq; /* queue for non-interactive data */
-struct mbuf *next_m; /* Pointer to next mbuf to output */
-
-#define ifs_init(ifm) ((ifm)->ifs_next = (ifm)->ifs_prev = (ifm))
-
-void
-ifs_insque(ifm, ifmhead)
- struct mbuf *ifm, *ifmhead;
+static void
+ifs_insque(struct mbuf *ifm, struct mbuf *ifmhead)
{
ifm->ifs_next = ifmhead->ifs_next;
ifmhead->ifs_next = ifm;
ifm->ifs_next->ifs_prev = ifm;
}
-void
-ifs_remque(ifm)
- struct mbuf *ifm;
+static void
+ifs_remque(struct mbuf *ifm)
{
ifm->ifs_prev->ifs_next = ifm->ifs_next;
ifm->ifs_next->ifs_prev = ifm->ifs_prev;
}
void
-if_init()
-{
-#if 0
- /*
- * Set if_maxlinkhdr to 48 because it's 40 bytes for TCP/IP,
- * and 8 bytes for PPP, but need to have it on an 8byte boundary
- */
-#ifdef USE_PPP
- if_maxlinkhdr = 48;
-#else
- if_maxlinkhdr = 40;
-#endif
-#else
- /* 2 for alignment, 14 for ethernet, 40 for TCP/IP */
- if_maxlinkhdr = 2 + 14 + 40;
-#endif
- if_mtu = 1500;
- if_mru = 1500;
- if_comp = IF_AUTOCOMP;
- if_fastq.ifq_next = if_fastq.ifq_prev = &if_fastq;
- if_batchq.ifq_next = if_batchq.ifq_prev = &if_batchq;
- // sl_compress_init(&comp_s);
- next_m = &if_batchq;
-}
-
-#if 0
-/*
- * This shouldn't be needed since the modem is blocking and
- * we don't expect any signals, but what the hell..
- */
-inline int
-writen(fd, bptr, n)
- int fd;
- char *bptr;
- int n;
+if_init(Slirp *slirp)
{
- int ret;
- int total;
-
- /* This should succeed most of the time */
- ret = send(fd, bptr, n,0);
- if (ret == n || ret <= 0)
- return ret;
-
- /* Didn't write everything, go into the loop */
- total = ret;
- while (n > total) {
- ret = send(fd, bptr+total, n-total,0);
- if (ret <= 0)
- return ret;
- total += ret;
- }
- return total;
+ slirp->if_fastq.qh_link = slirp->if_fastq.qh_rlink = &slirp->if_fastq;
+ slirp->if_batchq.qh_link = slirp->if_batchq.qh_rlink = &slirp->if_batchq;
+ slirp->next_m = (struct mbuf *) &slirp->if_batchq;
}
-/*
- * if_input - read() the tty, do "top level" processing (ie: check for any escapes),
- * and pass onto (*ttyp->if_input)
- *
- * XXXXX Any zeros arriving by themselves are NOT placed into the arriving packet.
- */
-#define INBUFF_SIZE 2048 /* XXX */
-void
-if_input(ttyp)
- struct ttys *ttyp;
-{
- u_char if_inbuff[INBUFF_SIZE];
- int if_n;
-
- DEBUG_CALL("if_input");
- DEBUG_ARG("ttyp = %lx", (long)ttyp);
-
- if_n = recv(ttyp->fd, (char *)if_inbuff, INBUFF_SIZE,0);
-
- DEBUG_MISC((dfd, " read %d bytes\n", if_n));
-
- if (if_n <= 0) {
- if (if_n == 0 || (errno != EINTR && errno != EAGAIN)) {
- if (ttyp->up)
- link_up--;
- tty_detached(ttyp, 0);
- }
- return;
- }
- if (if_n == 1) {
- if (*if_inbuff == '0') {
- ttyp->ones = 0;
- if (++ttyp->zeros >= 5)
- slirp_exit(0);
- return;
- }
- if (*if_inbuff == '1') {
- ttyp->zeros = 0;
- if (++ttyp->ones >= 5)
- tty_detached(ttyp, 0);
- return;
- }
- }
- ttyp->ones = ttyp->zeros = 0;
-
- (*ttyp->if_input)(ttyp, if_inbuff, if_n);
-}
-#endif
-
/*
* if_output: Queue packet into an output queue.
* There are 2 output queue's, if_fastq and if_batchq.
* it'll temporarily get downgraded to the batchq)
*/
void
-if_output(so, ifm)
- struct socket *so;
- struct mbuf *ifm;
+if_output(struct socket *so, struct mbuf *ifm)
{
+ Slirp *slirp = ifm->slirp;
struct mbuf *ifq;
int on_fastq = 1;
DEBUG_CALL("if_output");
- DEBUG_ARG("so = %lx", (long)so);
- DEBUG_ARG("ifm = %lx", (long)ifm);
+ DEBUG_ARG("so = %p", so);
+ DEBUG_ARG("ifm = %p", ifm);
/*
* First remove the mbuf from m_usedlist,
* We mustn't put this packet back on the fastq (or we'll send it out of order)
* XXX add cache here?
*/
- for (ifq = if_batchq.ifq_prev; ifq != &if_batchq; ifq = ifq->ifq_prev) {
+ for (ifq = (struct mbuf *) slirp->if_batchq.qh_rlink;
+ (struct quehead *) ifq != &slirp->if_batchq;
+ ifq = ifq->ifq_prev) {
if (so == ifq->ifq_so) {
/* A match! */
ifm->ifq_so = so;
/* No match, check which queue to put it on */
if (so && (so->so_iptos & IPTOS_LOWDELAY)) {
- ifq = if_fastq.ifq_prev;
+ ifq = (struct mbuf *) slirp->if_fastq.qh_rlink;
on_fastq = 1;
/*
* Check if this packet is a part of the last
ifs_insque(ifm, ifq->ifs_prev);
goto diddit;
}
- } else
- ifq = if_batchq.ifq_prev;
+ } else {
+ ifq = (struct mbuf *) slirp->if_batchq.qh_rlink;
+ /* Set next_m if the queue was empty so far */
+ if ((struct quehead *) slirp->next_m == &slirp->if_batchq) {
+ slirp->next_m = ifm;
+ }
+ }
/* Create a new doubly linked list for this session */
ifm->ifq_so = so;
insque(ifm, ifq);
diddit:
- ++if_queued;
-
if (so) {
/* Update *_queued */
so->so_queued++;
remque(ifm->ifs_next);
/* ...And insert in the new. That'll teach ya! */
- insque(ifm->ifs_next, &if_batchq);
+ insque(ifm->ifs_next, &slirp->if_batchq);
}
}
/*
* This prevents us from malloc()ing too many mbufs
*/
- if (link_up) {
- /* if_start will check towrite */
- if_start();
- }
+ if_start(ifm->slirp);
#endif
}
/*
* Send a packet
- * We choose a packet based on it's position in the output queues;
+ * We choose a packet based on its position in the output queues;
* If there are packets on the fastq, they are sent FIFO, before
* everything else. Otherwise we choose the first packet from the
* batchq and send it. the next packet chosen will be from the session
* from the second session, then one packet from the third, then back
* to the first, etc. etc.
*/
-void
-if_start(void)
+void if_start(Slirp *slirp)
{
- struct mbuf *ifm, *ifqt;
-
- DEBUG_CALL("if_start");
-
- if (if_queued == 0)
- return; /* Nothing to do */
-
- again:
- /* check if we can really output */
- if (!slirp_can_output())
- return;
-
- /*
- * See which queue to get next packet from
- * If there's something in the fastq, select it immediately
- */
- if (if_fastq.ifq_next != &if_fastq) {
- ifm = if_fastq.ifq_next;
- } else {
- /* Nothing on fastq, see if next_m is valid */
- if (next_m != &if_batchq)
- ifm = next_m;
- else
- ifm = if_batchq.ifq_next;
-
- /* Set which packet to send on next iteration */
- next_m = ifm->ifq_next;
- }
- /* Remove it from the queue */
- ifqt = ifm->ifq_prev;
- remque(ifm);
- --if_queued;
-
- /* If there are more packets for this session, re-queue them */
- if (ifm->ifs_next != /* ifm->ifs_prev != */ ifm) {
- insque(ifm->ifs_next, ifqt);
- ifs_remque(ifm);
- }
-
- /* Update so_queued */
- if (ifm->ifq_so) {
- if (--ifm->ifq_so->so_queued == 0)
- /* If there's no more queued, reset nqueued */
- ifm->ifq_so->so_nqueued = 0;
- }
-
- /* Encapsulate the packet for sending */
- if_encap(ifm->m_data, ifm->m_len);
+ uint64_t now = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
+ bool from_batchq, next_from_batchq;
+ struct mbuf *ifm, *ifm_next, *ifqt;
+
+ DEBUG_CALL("if_start");
+
+ if (slirp->if_start_busy) {
+ return;
+ }
+ slirp->if_start_busy = true;
+
+ if (slirp->if_fastq.qh_link != &slirp->if_fastq) {
+ ifm_next = (struct mbuf *) slirp->if_fastq.qh_link;
+ next_from_batchq = false;
+ } else if ((struct quehead *) slirp->next_m != &slirp->if_batchq) {
+ /* Nothing on fastq, pick up from batchq via next_m */
+ ifm_next = slirp->next_m;
+ next_from_batchq = true;
+ } else {
+ ifm_next = NULL;
+ }
+
+ while (ifm_next) {
+ ifm = ifm_next;
+ from_batchq = next_from_batchq;
+
+ ifm_next = ifm->ifq_next;
+ if ((struct quehead *) ifm_next == &slirp->if_fastq) {
+ /* No more packets in fastq, switch to batchq */
+ ifm_next = slirp->next_m;
+ next_from_batchq = true;
+ }
+ if ((struct quehead *) ifm_next == &slirp->if_batchq) {
+ /* end of batchq */
+ ifm_next = NULL;
+ }
+
+ /* Try to send packet unless it already expired */
+ if (ifm->expiration_date >= now && !if_encap(slirp, ifm)) {
+ /* Packet is delayed due to pending ARP or NDP resolution */
+ continue;
+ }
+
+ if (ifm == slirp->next_m) {
+ /* Set which packet to send on next iteration */
+ slirp->next_m = ifm->ifq_next;
+ }
+
+ /* Remove it from the queue */
+ ifqt = ifm->ifq_prev;
+ remque(ifm);
+
+ /* If there are more packets for this session, re-queue them */
+ if (ifm->ifs_next != ifm) {
+ struct mbuf *next = ifm->ifs_next;
+
+ insque(next, ifqt);
+ ifs_remque(ifm);
+
+ if (!from_batchq) {
+ /* Next packet in fastq is from the same session */
+ ifm_next = next;
+ next_from_batchq = false;
+ } else if ((struct quehead *) slirp->next_m == &slirp->if_batchq) {
+ /* Set next_m and ifm_next if the session packet is now the
+ * only one on batchq */
+ slirp->next_m = ifm_next = next;
+ }
+ }
+
+ /* Update so_queued */
+ if (ifm->ifq_so && --ifm->ifq_so->so_queued == 0) {
+ /* If there's no more queued, reset nqueued */
+ ifm->ifq_so->so_nqueued = 0;
+ }
m_free(ifm);
+ }
- if (if_queued)
- goto again;
+ slirp->if_start_busy = false;
}