]>
Commit | Line | Data |
---|---|---|
f0cbd3ec FB |
1 | /* |
2 | * Copyright (c) 1995 Danny Gasparovski. | |
3 | * | |
4 | * Please read the file COPYRIGHT for the | |
5 | * terms and conditions of the copyright. | |
6 | */ | |
7 | ||
7df7482b | 8 | #include "qemu/osdep.h" |
a9c94277 | 9 | #include "slirp.h" |
1de7afc9 | 10 | #include "qemu/timer.h" |
f0cbd3ec | 11 | |
674bb261 | 12 | static void |
a5f1b965 | 13 | ifs_insque(struct mbuf *ifm, struct mbuf *ifmhead) |
f0cbd3ec FB |
14 | { |
15 | ifm->ifs_next = ifmhead->ifs_next; | |
16 | ifmhead->ifs_next = ifm; | |
17 | ifm->ifs_prev = ifmhead; | |
18 | ifm->ifs_next->ifs_prev = ifm; | |
19 | } | |
20 | ||
674bb261 | 21 | static void |
a5f1b965 | 22 | ifs_remque(struct mbuf *ifm) |
f0cbd3ec FB |
23 | { |
24 | ifm->ifs_prev->ifs_next = ifm->ifs_next; | |
25 | ifm->ifs_next->ifs_prev = ifm->ifs_prev; | |
26 | } | |
27 | ||
28 | void | |
460fec67 | 29 | if_init(Slirp *slirp) |
f0cbd3ec | 30 | { |
67e3eee4 ST |
31 | slirp->if_fastq.qh_link = slirp->if_fastq.qh_rlink = &slirp->if_fastq; |
32 | slirp->if_batchq.qh_link = slirp->if_batchq.qh_rlink = &slirp->if_batchq; | |
33 | slirp->next_m = (struct mbuf *) &slirp->if_batchq; | |
f0cbd3ec FB |
34 | } |
35 | ||
f0cbd3ec FB |
36 | /* |
37 | * if_output: Queue packet into an output queue. | |
5fafdf24 | 38 | * There are 2 output queue's, if_fastq and if_batchq. |
f0cbd3ec FB |
39 | * Each output queue is a doubly linked list of double linked lists |
40 | * of mbufs, each list belonging to one "session" (socket). This | |
41 | * way, we can output packets fairly by sending one packet from each | |
42 | * session, instead of all the packets from one session, then all packets | |
5fafdf24 | 43 | * from the next session, etc. Packets on the if_fastq get absolute |
f0cbd3ec FB |
44 | * priority, but if one session hogs the link, it gets "downgraded" |
45 | * to the batchq until it runs out of packets, then it'll return | |
46 | * to the fastq (eg. if the user does an ls -alR in a telnet session, | |
47 | * it'll temporarily get downgraded to the batchq) | |
48 | */ | |
49 | void | |
511d2b14 | 50 | if_output(struct socket *so, struct mbuf *ifm) |
f0cbd3ec | 51 | { |
460fec67 | 52 | Slirp *slirp = ifm->slirp; |
f0cbd3ec FB |
53 | struct mbuf *ifq; |
54 | int on_fastq = 1; | |
5fafdf24 | 55 | |
f0cbd3ec | 56 | DEBUG_CALL("if_output"); |
ecc804ca SW |
57 | DEBUG_ARG("so = %p", so); |
58 | DEBUG_ARG("ifm = %p", ifm); | |
5fafdf24 | 59 | |
f0cbd3ec FB |
60 | /* |
61 | * First remove the mbuf from m_usedlist, | |
62 | * since we're gonna use m_next and m_prev ourselves | |
63 | * XXX Shouldn't need this, gotta change dtom() etc. | |
64 | */ | |
65 | if (ifm->m_flags & M_USEDLIST) { | |
66 | remque(ifm); | |
67 | ifm->m_flags &= ~M_USEDLIST; | |
68 | } | |
5fafdf24 | 69 | |
f0cbd3ec | 70 | /* |
3b46e624 | 71 | * See if there's already a batchq list for this session. |
f0cbd3ec FB |
72 | * This can include an interactive session, which should go on fastq, |
73 | * but gets too greedy... hence it'll be downgraded from fastq to batchq. | |
74 | * We mustn't put this packet back on the fastq (or we'll send it out of order) | |
75 | * XXX add cache here? | |
76 | */ | |
67e3eee4 ST |
77 | for (ifq = (struct mbuf *) slirp->if_batchq.qh_rlink; |
78 | (struct quehead *) ifq != &slirp->if_batchq; | |
460fec67 | 79 | ifq = ifq->ifq_prev) { |
f0cbd3ec FB |
80 | if (so == ifq->ifq_so) { |
81 | /* A match! */ | |
82 | ifm->ifq_so = so; | |
83 | ifs_insque(ifm, ifq->ifs_prev); | |
84 | goto diddit; | |
85 | } | |
86 | } | |
5fafdf24 | 87 | |
f0cbd3ec FB |
88 | /* No match, check which queue to put it on */ |
89 | if (so && (so->so_iptos & IPTOS_LOWDELAY)) { | |
67e3eee4 | 90 | ifq = (struct mbuf *) slirp->if_fastq.qh_rlink; |
f0cbd3ec FB |
91 | on_fastq = 1; |
92 | /* | |
93 | * Check if this packet is a part of the last | |
94 | * packet's session | |
95 | */ | |
96 | if (ifq->ifq_so == so) { | |
97 | ifm->ifq_so = so; | |
98 | ifs_insque(ifm, ifq->ifs_prev); | |
99 | goto diddit; | |
100 | } | |
d6536b2c | 101 | } else { |
67e3eee4 | 102 | ifq = (struct mbuf *) slirp->if_batchq.qh_rlink; |
d6536b2c | 103 | /* Set next_m if the queue was empty so far */ |
67e3eee4 | 104 | if ((struct quehead *) slirp->next_m == &slirp->if_batchq) { |
d6536b2c JK |
105 | slirp->next_m = ifm; |
106 | } | |
107 | } | |
5fafdf24 | 108 | |
f0cbd3ec FB |
109 | /* Create a new doubly linked list for this session */ |
110 | ifm->ifq_so = so; | |
111 | ifs_init(ifm); | |
112 | insque(ifm, ifq); | |
5fafdf24 | 113 | |
f0cbd3ec | 114 | diddit: |
f0cbd3ec FB |
115 | if (so) { |
116 | /* Update *_queued */ | |
117 | so->so_queued++; | |
118 | so->so_nqueued++; | |
119 | /* | |
120 | * Check if the interactive session should be downgraded to | |
121 | * the batchq. A session is downgraded if it has queued 6 | |
122 | * packets without pausing, and at least 3 of those packets | |
123 | * have been sent over the link | |
124 | * (XXX These are arbitrary numbers, probably not optimal..) | |
125 | */ | |
5fafdf24 | 126 | if (on_fastq && ((so->so_nqueued >= 6) && |
f0cbd3ec | 127 | (so->so_nqueued - so->so_queued) >= 3)) { |
3b46e624 | 128 | |
f0cbd3ec FB |
129 | /* Remove from current queue... */ |
130 | remque(ifm->ifs_next); | |
3b46e624 | 131 | |
f0cbd3ec | 132 | /* ...And insert in the new. That'll teach ya! */ |
460fec67 | 133 | insque(ifm->ifs_next, &slirp->if_batchq); |
f0cbd3ec FB |
134 | } |
135 | } | |
136 | ||
137 | #ifndef FULL_BOLT | |
138 | /* | |
139 | * This prevents us from malloc()ing too many mbufs | |
140 | */ | |
460fec67 | 141 | if_start(ifm->slirp); |
f0cbd3ec FB |
142 | #endif |
143 | } | |
144 | ||
145 | /* | |
146 | * Send a packet | |
59b00962 | 147 | * We choose a packet based on its position in the output queues; |
f0cbd3ec FB |
148 | * If there are packets on the fastq, they are sent FIFO, before |
149 | * everything else. Otherwise we choose the first packet from the | |
150 | * batchq and send it. the next packet chosen will be from the session | |
151 | * after this one, then the session after that one, and so on.. So, | |
152 | * for example, if there are 3 ftp session's fighting for bandwidth, | |
153 | * one packet will be sent from the first session, then one packet | |
154 | * from the second session, then one packet from the third, then back | |
155 | * to the first, etc. etc. | |
156 | */ | |
b87ffa16 | 157 | void if_start(Slirp *slirp) |
f0cbd3ec | 158 | { |
bc72ad67 | 159 | uint64_t now = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); |
e3078bf4 JK |
160 | bool from_batchq, next_from_batchq; |
161 | struct mbuf *ifm, *ifm_next, *ifqt; | |
5fafdf24 | 162 | |
b87ffa16 | 163 | DEBUG_CALL("if_start"); |
5fafdf24 | 164 | |
953e7f54 JK |
165 | if (slirp->if_start_busy) { |
166 | return; | |
167 | } | |
168 | slirp->if_start_busy = true; | |
169 | ||
67e3eee4 ST |
170 | if (slirp->if_fastq.qh_link != &slirp->if_fastq) { |
171 | ifm_next = (struct mbuf *) slirp->if_fastq.qh_link; | |
e3078bf4 | 172 | next_from_batchq = false; |
67e3eee4 | 173 | } else if ((struct quehead *) slirp->next_m != &slirp->if_batchq) { |
e3078bf4 JK |
174 | /* Nothing on fastq, pick up from batchq via next_m */ |
175 | ifm_next = slirp->next_m; | |
176 | next_from_batchq = true; | |
177 | } else { | |
178 | ifm_next = NULL; | |
179 | } | |
180 | ||
181 | while (ifm_next) { | |
e3078bf4 JK |
182 | ifm = ifm_next; |
183 | from_batchq = next_from_batchq; | |
184 | ||
185 | ifm_next = ifm->ifq_next; | |
67e3eee4 | 186 | if ((struct quehead *) ifm_next == &slirp->if_fastq) { |
e3078bf4 JK |
187 | /* No more packets in fastq, switch to batchq */ |
188 | ifm_next = slirp->next_m; | |
189 | next_from_batchq = true; | |
190 | } | |
67e3eee4 | 191 | if ((struct quehead *) ifm_next == &slirp->if_batchq) { |
e3078bf4 JK |
192 | /* end of batchq */ |
193 | ifm_next = NULL; | |
b87ffa16 | 194 | } |
b248ede2 | 195 | |
b248ede2 JK |
196 | /* Try to send packet unless it already expired */ |
197 | if (ifm->expiration_date >= now && !if_encap(slirp, ifm)) { | |
0d6ff71a | 198 | /* Packet is delayed due to pending ARP or NDP resolution */ |
b87ffa16 | 199 | continue; |
b248ede2 JK |
200 | } |
201 | ||
e3078bf4 | 202 | if (ifm == slirp->next_m) { |
b248ede2 JK |
203 | /* Set which packet to send on next iteration */ |
204 | slirp->next_m = ifm->ifq_next; | |
205 | } | |
206 | ||
b87ffa16 JK |
207 | /* Remove it from the queue */ |
208 | ifqt = ifm->ifq_prev; | |
209 | remque(ifm); | |
5fafdf24 | 210 | |
b87ffa16 JK |
211 | /* If there are more packets for this session, re-queue them */ |
212 | if (ifm->ifs_next != ifm) { | |
e3078bf4 JK |
213 | struct mbuf *next = ifm->ifs_next; |
214 | ||
215 | insque(next, ifqt); | |
b87ffa16 | 216 | ifs_remque(ifm); |
e3078bf4 JK |
217 | |
218 | if (!from_batchq) { | |
219 | /* Next packet in fastq is from the same session */ | |
220 | ifm_next = next; | |
221 | next_from_batchq = false; | |
67e3eee4 | 222 | } else if ((struct quehead *) slirp->next_m == &slirp->if_batchq) { |
e3078bf4 JK |
223 | /* Set next_m and ifm_next if the session packet is now the |
224 | * only one on batchq */ | |
225 | slirp->next_m = ifm_next = next; | |
d6536b2c | 226 | } |
b87ffa16 | 227 | } |
5fafdf24 | 228 | |
b87ffa16 JK |
229 | /* Update so_queued */ |
230 | if (ifm->ifq_so && --ifm->ifq_so->so_queued == 0) { | |
231 | /* If there's no more queued, reset nqueued */ | |
232 | ifm->ifq_so->so_nqueued = 0; | |
233 | } | |
5fafdf24 | 234 | |
b248ede2 | 235 | m_free(ifm); |
b87ffa16 | 236 | } |
1ab74cea | 237 | |
953e7f54 | 238 | slirp->if_start_busy = false; |
f0cbd3ec | 239 | } |