]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (c) 1995 Danny Gasparovski. | |
3 | * | |
4 | * Please read the file COPYRIGHT for the | |
5 | * terms and conditions of the copyright. | |
6 | */ | |
7 | ||
8 | #include <slirp.h> | |
9 | #include "qemu-timer.h" | |
10 | ||
11 | static void | |
12 | ifs_insque(struct mbuf *ifm, struct mbuf *ifmhead) | |
13 | { | |
14 | ifm->ifs_next = ifmhead->ifs_next; | |
15 | ifmhead->ifs_next = ifm; | |
16 | ifm->ifs_prev = ifmhead; | |
17 | ifm->ifs_next->ifs_prev = ifm; | |
18 | } | |
19 | ||
20 | static void | |
21 | ifs_remque(struct mbuf *ifm) | |
22 | { | |
23 | ifm->ifs_prev->ifs_next = ifm->ifs_next; | |
24 | ifm->ifs_next->ifs_prev = ifm->ifs_prev; | |
25 | } | |
26 | ||
27 | void | |
28 | if_init(Slirp *slirp) | |
29 | { | |
30 | slirp->if_fastq.ifq_next = slirp->if_fastq.ifq_prev = &slirp->if_fastq; | |
31 | slirp->if_batchq.ifq_next = slirp->if_batchq.ifq_prev = &slirp->if_batchq; | |
32 | slirp->next_m = &slirp->if_batchq; | |
33 | } | |
34 | ||
35 | /* | |
36 | * if_output: Queue packet into an output queue. | |
37 | * There are 2 output queue's, if_fastq and if_batchq. | |
38 | * Each output queue is a doubly linked list of double linked lists | |
39 | * of mbufs, each list belonging to one "session" (socket). This | |
40 | * way, we can output packets fairly by sending one packet from each | |
41 | * session, instead of all the packets from one session, then all packets | |
42 | * from the next session, etc. Packets on the if_fastq get absolute | |
43 | * priority, but if one session hogs the link, it gets "downgraded" | |
44 | * to the batchq until it runs out of packets, then it'll return | |
45 | * to the fastq (eg. if the user does an ls -alR in a telnet session, | |
46 | * it'll temporarily get downgraded to the batchq) | |
47 | */ | |
48 | void | |
49 | if_output(struct socket *so, struct mbuf *ifm) | |
50 | { | |
51 | Slirp *slirp = ifm->slirp; | |
52 | struct mbuf *ifq; | |
53 | int on_fastq = 1; | |
54 | ||
55 | DEBUG_CALL("if_output"); | |
56 | DEBUG_ARG("so = %lx", (long)so); | |
57 | DEBUG_ARG("ifm = %lx", (long)ifm); | |
58 | ||
59 | /* | |
60 | * First remove the mbuf from m_usedlist, | |
61 | * since we're gonna use m_next and m_prev ourselves | |
62 | * XXX Shouldn't need this, gotta change dtom() etc. | |
63 | */ | |
64 | if (ifm->m_flags & M_USEDLIST) { | |
65 | remque(ifm); | |
66 | ifm->m_flags &= ~M_USEDLIST; | |
67 | } | |
68 | ||
69 | /* | |
70 | * See if there's already a batchq list for this session. | |
71 | * This can include an interactive session, which should go on fastq, | |
72 | * but gets too greedy... hence it'll be downgraded from fastq to batchq. | |
73 | * We mustn't put this packet back on the fastq (or we'll send it out of order) | |
74 | * XXX add cache here? | |
75 | */ | |
76 | for (ifq = slirp->if_batchq.ifq_prev; ifq != &slirp->if_batchq; | |
77 | ifq = ifq->ifq_prev) { | |
78 | if (so == ifq->ifq_so) { | |
79 | /* A match! */ | |
80 | ifm->ifq_so = so; | |
81 | ifs_insque(ifm, ifq->ifs_prev); | |
82 | goto diddit; | |
83 | } | |
84 | } | |
85 | ||
86 | /* No match, check which queue to put it on */ | |
87 | if (so && (so->so_iptos & IPTOS_LOWDELAY)) { | |
88 | ifq = slirp->if_fastq.ifq_prev; | |
89 | on_fastq = 1; | |
90 | /* | |
91 | * Check if this packet is a part of the last | |
92 | * packet's session | |
93 | */ | |
94 | if (ifq->ifq_so == so) { | |
95 | ifm->ifq_so = so; | |
96 | ifs_insque(ifm, ifq->ifs_prev); | |
97 | goto diddit; | |
98 | } | |
99 | } else | |
100 | ifq = slirp->if_batchq.ifq_prev; | |
101 | ||
102 | /* Create a new doubly linked list for this session */ | |
103 | ifm->ifq_so = so; | |
104 | ifs_init(ifm); | |
105 | insque(ifm, ifq); | |
106 | ||
107 | diddit: | |
108 | slirp->if_queued++; | |
109 | ||
110 | if (so) { | |
111 | /* Update *_queued */ | |
112 | so->so_queued++; | |
113 | so->so_nqueued++; | |
114 | /* | |
115 | * Check if the interactive session should be downgraded to | |
116 | * the batchq. A session is downgraded if it has queued 6 | |
117 | * packets without pausing, and at least 3 of those packets | |
118 | * have been sent over the link | |
119 | * (XXX These are arbitrary numbers, probably not optimal..) | |
120 | */ | |
121 | if (on_fastq && ((so->so_nqueued >= 6) && | |
122 | (so->so_nqueued - so->so_queued) >= 3)) { | |
123 | ||
124 | /* Remove from current queue... */ | |
125 | remque(ifm->ifs_next); | |
126 | ||
127 | /* ...And insert in the new. That'll teach ya! */ | |
128 | insque(ifm->ifs_next, &slirp->if_batchq); | |
129 | } | |
130 | } | |
131 | ||
132 | #ifndef FULL_BOLT | |
133 | /* | |
134 | * This prevents us from malloc()ing too many mbufs | |
135 | */ | |
136 | if_start(ifm->slirp); | |
137 | #endif | |
138 | } | |
139 | ||
140 | /* | |
141 | * Send a packet | |
142 | * We choose a packet based on it's position in the output queues; | |
143 | * If there are packets on the fastq, they are sent FIFO, before | |
144 | * everything else. Otherwise we choose the first packet from the | |
145 | * batchq and send it. the next packet chosen will be from the session | |
146 | * after this one, then the session after that one, and so on.. So, | |
147 | * for example, if there are 3 ftp session's fighting for bandwidth, | |
148 | * one packet will be sent from the first session, then one packet | |
149 | * from the second session, then one packet from the third, then back | |
150 | * to the first, etc. etc. | |
151 | */ | |
152 | void if_start(Slirp *slirp) | |
153 | { | |
154 | uint64_t now = qemu_get_clock_ns(rt_clock); | |
155 | int requeued = 0; | |
156 | bool from_batchq = false; | |
157 | struct mbuf *ifm, *ifqt; | |
158 | ||
159 | DEBUG_CALL("if_start"); | |
160 | ||
161 | while (slirp->if_queued) { | |
162 | /* check if we can really output */ | |
163 | if (!slirp_can_output(slirp->opaque)) | |
164 | return; | |
165 | ||
166 | /* | |
167 | * See which queue to get next packet from | |
168 | * If there's something in the fastq, select it immediately | |
169 | */ | |
170 | if (slirp->if_fastq.ifq_next != &slirp->if_fastq) { | |
171 | ifm = slirp->if_fastq.ifq_next; | |
172 | } else { | |
173 | /* Nothing on fastq, see if next_m is valid */ | |
174 | if (slirp->next_m != &slirp->if_batchq) { | |
175 | ifm = slirp->next_m; | |
176 | } else { | |
177 | ifm = slirp->if_batchq.ifq_next; | |
178 | } | |
179 | ||
180 | from_batchq = true; | |
181 | } | |
182 | ||
183 | slirp->if_queued--; | |
184 | ||
185 | /* Try to send packet unless it already expired */ | |
186 | if (ifm->expiration_date >= now && !if_encap(slirp, ifm)) { | |
187 | /* Packet is delayed due to pending ARP resolution */ | |
188 | requeued++; | |
189 | continue; | |
190 | } | |
191 | ||
192 | if (from_batchq) { | |
193 | /* Set which packet to send on next iteration */ | |
194 | slirp->next_m = ifm->ifq_next; | |
195 | } | |
196 | ||
197 | /* Remove it from the queue */ | |
198 | ifqt = ifm->ifq_prev; | |
199 | remque(ifm); | |
200 | ||
201 | /* If there are more packets for this session, re-queue them */ | |
202 | if (ifm->ifs_next != ifm) { | |
203 | insque(ifm->ifs_next, ifqt); | |
204 | ifs_remque(ifm); | |
205 | } | |
206 | ||
207 | /* Update so_queued */ | |
208 | if (ifm->ifq_so && --ifm->ifq_so->so_queued == 0) { | |
209 | /* If there's no more queued, reset nqueued */ | |
210 | ifm->ifq_so->so_nqueued = 0; | |
211 | } | |
212 | ||
213 | m_free(ifm); | |
214 | ||
215 | } | |
216 | ||
217 | slirp->if_queued = requeued; | |
218 | } |