]>
Commit | Line | Data |
---|---|---|
f0cbd3ec FB |
1 | /* |
2 | * Copyright (c) 1995 Danny Gasparovski. | |
3 | * | |
4 | * Please read the file COPYRIGHT for the | |
5 | * terms and conditions of the copyright. | |
6 | */ | |
7 | ||
a9c94277 | 8 | #include "slirp.h" |
f0cbd3ec | 9 | |
674bb261 | 10 | static void |
a5f1b965 | 11 | ifs_insque(struct mbuf *ifm, struct mbuf *ifmhead) |
f0cbd3ec FB |
12 | { |
13 | ifm->ifs_next = ifmhead->ifs_next; | |
14 | ifmhead->ifs_next = ifm; | |
15 | ifm->ifs_prev = ifmhead; | |
16 | ifm->ifs_next->ifs_prev = ifm; | |
17 | } | |
18 | ||
674bb261 | 19 | static void |
a5f1b965 | 20 | ifs_remque(struct mbuf *ifm) |
f0cbd3ec FB |
21 | { |
22 | ifm->ifs_prev->ifs_next = ifm->ifs_next; | |
23 | ifm->ifs_next->ifs_prev = ifm->ifs_prev; | |
24 | } | |
25 | ||
26 | void | |
460fec67 | 27 | if_init(Slirp *slirp) |
f0cbd3ec | 28 | { |
67e3eee4 ST |
29 | slirp->if_fastq.qh_link = slirp->if_fastq.qh_rlink = &slirp->if_fastq; |
30 | slirp->if_batchq.qh_link = slirp->if_batchq.qh_rlink = &slirp->if_batchq; | |
f0cbd3ec FB |
31 | } |
32 | ||
f0cbd3ec FB |
33 | /* |
34 | * if_output: Queue packet into an output queue. | |
5fafdf24 | 35 | * There are 2 output queue's, if_fastq and if_batchq. |
f0cbd3ec FB |
36 | * Each output queue is a doubly linked list of double linked lists |
37 | * of mbufs, each list belonging to one "session" (socket). This | |
38 | * way, we can output packets fairly by sending one packet from each | |
39 | * session, instead of all the packets from one session, then all packets | |
5fafdf24 | 40 | * from the next session, etc. Packets on the if_fastq get absolute |
f0cbd3ec FB |
41 | * priority, but if one session hogs the link, it gets "downgraded" |
42 | * to the batchq until it runs out of packets, then it'll return | |
43 | * to the fastq (eg. if the user does an ls -alR in a telnet session, | |
44 | * it'll temporarily get downgraded to the batchq) | |
45 | */ | |
46 | void | |
511d2b14 | 47 | if_output(struct socket *so, struct mbuf *ifm) |
f0cbd3ec | 48 | { |
460fec67 | 49 | Slirp *slirp = ifm->slirp; |
f0cbd3ec FB |
50 | struct mbuf *ifq; |
51 | int on_fastq = 1; | |
5fafdf24 | 52 | |
f0cbd3ec | 53 | DEBUG_CALL("if_output"); |
ecc804ca SW |
54 | DEBUG_ARG("so = %p", so); |
55 | DEBUG_ARG("ifm = %p", ifm); | |
5fafdf24 | 56 | |
f0cbd3ec FB |
57 | /* |
58 | * First remove the mbuf from m_usedlist, | |
59 | * since we're gonna use m_next and m_prev ourselves | |
60 | * XXX Shouldn't need this, gotta change dtom() etc. | |
61 | */ | |
62 | if (ifm->m_flags & M_USEDLIST) { | |
63 | remque(ifm); | |
64 | ifm->m_flags &= ~M_USEDLIST; | |
65 | } | |
5fafdf24 | 66 | |
f0cbd3ec | 67 | /* |
3b46e624 | 68 | * See if there's already a batchq list for this session. |
f0cbd3ec FB |
69 | * This can include an interactive session, which should go on fastq, |
70 | * but gets too greedy... hence it'll be downgraded from fastq to batchq. | |
71 | * We mustn't put this packet back on the fastq (or we'll send it out of order) | |
72 | * XXX add cache here? | |
73 | */ | |
13146a83 KC |
74 | if (so) { |
75 | for (ifq = (struct mbuf *) slirp->if_batchq.qh_rlink; | |
76 | (struct quehead *) ifq != &slirp->if_batchq; | |
77 | ifq = ifq->ifq_prev) { | |
78 | if (so == ifq->ifq_so) { | |
79 | /* A match! */ | |
80 | ifm->ifq_so = so; | |
81 | ifs_insque(ifm, ifq->ifs_prev); | |
82 | goto diddit; | |
83 | } | |
f0cbd3ec FB |
84 | } |
85 | } | |
5fafdf24 | 86 | |
f0cbd3ec FB |
87 | /* No match, check which queue to put it on */ |
88 | if (so && (so->so_iptos & IPTOS_LOWDELAY)) { | |
67e3eee4 | 89 | ifq = (struct mbuf *) slirp->if_fastq.qh_rlink; |
f0cbd3ec FB |
90 | on_fastq = 1; |
91 | /* | |
92 | * Check if this packet is a part of the last | |
93 | * packet's session | |
94 | */ | |
95 | if (ifq->ifq_so == so) { | |
96 | ifm->ifq_so = so; | |
97 | ifs_insque(ifm, ifq->ifs_prev); | |
98 | goto diddit; | |
99 | } | |
d6536b2c | 100 | } else { |
67e3eee4 | 101 | ifq = (struct mbuf *) slirp->if_batchq.qh_rlink; |
d6536b2c | 102 | } |
5fafdf24 | 103 | |
f0cbd3ec FB |
104 | /* Create a new doubly linked list for this session */ |
105 | ifm->ifq_so = so; | |
106 | ifs_init(ifm); | |
107 | insque(ifm, ifq); | |
5fafdf24 | 108 | |
f0cbd3ec | 109 | diddit: |
f0cbd3ec FB |
110 | if (so) { |
111 | /* Update *_queued */ | |
112 | so->so_queued++; | |
113 | so->so_nqueued++; | |
114 | /* | |
115 | * Check if the interactive session should be downgraded to | |
116 | * the batchq. A session is downgraded if it has queued 6 | |
117 | * packets without pausing, and at least 3 of those packets | |
118 | * have been sent over the link | |
119 | * (XXX These are arbitrary numbers, probably not optimal..) | |
120 | */ | |
5fafdf24 | 121 | if (on_fastq && ((so->so_nqueued >= 6) && |
f0cbd3ec | 122 | (so->so_nqueued - so->so_queued) >= 3)) { |
3b46e624 | 123 | |
f0cbd3ec FB |
124 | /* Remove from current queue... */ |
125 | remque(ifm->ifs_next); | |
3b46e624 | 126 | |
f0cbd3ec | 127 | /* ...And insert in the new. That'll teach ya! */ |
460fec67 | 128 | insque(ifm->ifs_next, &slirp->if_batchq); |
f0cbd3ec FB |
129 | } |
130 | } | |
131 | ||
f0cbd3ec FB |
132 | /* |
133 | * This prevents us from malloc()ing too many mbufs | |
134 | */ | |
460fec67 | 135 | if_start(ifm->slirp); |
f0cbd3ec FB |
136 | } |
137 | ||
138 | /* | |
e2aad34d | 139 | * Send one packet from each session. |
f0cbd3ec | 140 | * If there are packets on the fastq, they are sent FIFO, before |
e2aad34d KC |
141 | * everything else. Then we choose the first packet from each |
142 | * batchq session (socket) and send it. | |
143 | * For example, if there are 3 ftp sessions fighting for bandwidth, | |
f0cbd3ec | 144 | * one packet will be sent from the first session, then one packet |
e2aad34d | 145 | * from the second session, then one packet from the third. |
f0cbd3ec | 146 | */ |
b87ffa16 | 147 | void if_start(Slirp *slirp) |
f0cbd3ec | 148 | { |
3e0fad3a | 149 | uint64_t now = slirp->cb->clock_get_ns(slirp->opaque); |
e2aad34d | 150 | bool from_batchq = false; |
e3078bf4 | 151 | struct mbuf *ifm, *ifm_next, *ifqt; |
5fafdf24 | 152 | |
b87ffa16 | 153 | DEBUG_CALL("if_start"); |
5fafdf24 | 154 | |
953e7f54 JK |
155 | if (slirp->if_start_busy) { |
156 | return; | |
157 | } | |
158 | slirp->if_start_busy = true; | |
159 | ||
e2aad34d KC |
160 | struct mbuf *batch_head = NULL; |
161 | if (slirp->if_batchq.qh_link != &slirp->if_batchq) { | |
162 | batch_head = (struct mbuf *) slirp->if_batchq.qh_link; | |
163 | } | |
164 | ||
67e3eee4 ST |
165 | if (slirp->if_fastq.qh_link != &slirp->if_fastq) { |
166 | ifm_next = (struct mbuf *) slirp->if_fastq.qh_link; | |
e2aad34d KC |
167 | } else if (batch_head) { |
168 | /* Nothing on fastq, pick up from batchq */ | |
169 | ifm_next = batch_head; | |
170 | from_batchq = true; | |
e3078bf4 JK |
171 | } else { |
172 | ifm_next = NULL; | |
173 | } | |
174 | ||
175 | while (ifm_next) { | |
e3078bf4 | 176 | ifm = ifm_next; |
e3078bf4 JK |
177 | |
178 | ifm_next = ifm->ifq_next; | |
67e3eee4 | 179 | if ((struct quehead *) ifm_next == &slirp->if_fastq) { |
e3078bf4 | 180 | /* No more packets in fastq, switch to batchq */ |
e2aad34d KC |
181 | ifm_next = batch_head; |
182 | from_batchq = true; | |
e3078bf4 | 183 | } |
67e3eee4 | 184 | if ((struct quehead *) ifm_next == &slirp->if_batchq) { |
e3078bf4 JK |
185 | /* end of batchq */ |
186 | ifm_next = NULL; | |
b87ffa16 | 187 | } |
b248ede2 | 188 | |
b248ede2 JK |
189 | /* Try to send packet unless it already expired */ |
190 | if (ifm->expiration_date >= now && !if_encap(slirp, ifm)) { | |
0d6ff71a | 191 | /* Packet is delayed due to pending ARP or NDP resolution */ |
b87ffa16 | 192 | continue; |
b248ede2 JK |
193 | } |
194 | ||
b87ffa16 JK |
195 | /* Remove it from the queue */ |
196 | ifqt = ifm->ifq_prev; | |
197 | remque(ifm); | |
5fafdf24 | 198 | |
b87ffa16 JK |
199 | /* If there are more packets for this session, re-queue them */ |
200 | if (ifm->ifs_next != ifm) { | |
e3078bf4 JK |
201 | struct mbuf *next = ifm->ifs_next; |
202 | ||
203 | insque(next, ifqt); | |
b87ffa16 | 204 | ifs_remque(ifm); |
e3078bf4 | 205 | if (!from_batchq) { |
e3078bf4 | 206 | ifm_next = next; |
d6536b2c | 207 | } |
b87ffa16 | 208 | } |
5fafdf24 | 209 | |
b87ffa16 JK |
210 | /* Update so_queued */ |
211 | if (ifm->ifq_so && --ifm->ifq_so->so_queued == 0) { | |
212 | /* If there's no more queued, reset nqueued */ | |
213 | ifm->ifq_so->so_nqueued = 0; | |
214 | } | |
5fafdf24 | 215 | |
b248ede2 | 216 | m_free(ifm); |
b87ffa16 | 217 | } |
1ab74cea | 218 | |
953e7f54 | 219 | slirp->if_start_busy = false; |
f0cbd3ec | 220 | } |