]>
Commit | Line | Data |
---|---|---|
4f999d05 KW |
1 | /* |
2 | * QEMU System Emulator | |
3 | * | |
4 | * Copyright (c) 2003-2008 Fabrice Bellard | |
5 | * | |
6 | * Permission is hereby granted, free of charge, to any person obtaining a copy | |
7 | * of this software and associated documentation files (the "Software"), to deal | |
8 | * in the Software without restriction, including without limitation the rights | |
9 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |
10 | * copies of the Software, and to permit persons to whom the Software is | |
11 | * furnished to do so, subject to the following conditions: | |
12 | * | |
13 | * The above copyright notice and this permission notice shall be included in | |
14 | * all copies or substantial portions of the Software. | |
15 | * | |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
19 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
21 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | |
22 | * THE SOFTWARE. | |
23 | */ | |
24 | ||
25 | #include "qemu-common.h" | |
9a1e9481 KW |
26 | #include "qemu-aio.h" |
27 | ||
28 | /* | |
29 | * An AsyncContext protects the callbacks of AIO requests and Bottom Halves | |
30 | * against interfering with each other. A typical example is qcow2 that accepts | |
31 | * asynchronous requests, but relies for manipulation of its metadata on | |
32 | * synchronous bdrv_read/write that doesn't trigger any callbacks. | |
33 | * | |
34 | * However, these functions are often emulated using AIO which means that AIO | |
35 | * callbacks must be run - but at the same time we must not run callbacks of | |
36 | * other requests as they might start to modify metadata and corrupt the | |
37 | * internal state of the caller of bdrv_read/write. | |
38 | * | |
39 | * To achieve the desired semantics we switch into a new AsyncContext. | |
40 | * Callbacks must only be run if they belong to the current AsyncContext. | |
41 | * Otherwise they need to be queued until their own context is active again. | |
42 | * This is how you can make qemu_aio_wait() wait only for your own callbacks. | |
43 | * | |
44 | * The AsyncContexts form a stack. When you leave a AsyncContexts, you always | |
45 | * return to the old ("parent") context. | |
46 | */ | |
47 | struct AsyncContext { | |
48 | /* Consecutive number of the AsyncContext (position in the stack) */ | |
49 | int id; | |
50 | ||
51 | /* Anchor of the list of Bottom Halves belonging to the context */ | |
52 | struct QEMUBH *first_bh; | |
53 | ||
54 | /* Link to parent context */ | |
55 | struct AsyncContext *parent; | |
56 | }; | |
57 | ||
58 | /* The currently active AsyncContext */ | |
59 | static struct AsyncContext *async_context = &(struct AsyncContext) { 0 }; | |
60 | ||
61 | /* | |
62 | * Enter a new AsyncContext. Already scheduled Bottom Halves and AIO callbacks | |
63 | * won't be called until this context is left again. | |
64 | */ | |
65 | void async_context_push(void) | |
66 | { | |
67 | struct AsyncContext *new = qemu_mallocz(sizeof(*new)); | |
68 | new->parent = async_context; | |
69 | new->id = async_context->id + 1; | |
70 | async_context = new; | |
71 | } | |
72 | ||
73 | /* Run queued AIO completions and destroy Bottom Half */ | |
74 | static void bh_run_aio_completions(void *opaque) | |
75 | { | |
76 | QEMUBH **bh = opaque; | |
77 | qemu_bh_delete(*bh); | |
78 | qemu_free(bh); | |
79 | qemu_aio_process_queue(); | |
80 | } | |
81 | /* | |
82 | * Leave the currently active AsyncContext. All Bottom Halves belonging to the | |
83 | * old context are executed before changing the context. | |
84 | */ | |
85 | void async_context_pop(void) | |
86 | { | |
87 | struct AsyncContext *old = async_context; | |
88 | QEMUBH **bh; | |
89 | ||
90 | /* Flush the bottom halves, we don't want to lose them */ | |
91 | while (qemu_bh_poll()); | |
92 | ||
93 | /* Switch back to the parent context */ | |
94 | async_context = async_context->parent; | |
95 | qemu_free(old); | |
96 | ||
97 | if (async_context == NULL) { | |
98 | abort(); | |
99 | } | |
100 | ||
101 | /* Schedule BH to run any queued AIO completions as soon as possible */ | |
102 | bh = qemu_malloc(sizeof(*bh)); | |
103 | *bh = qemu_bh_new(bh_run_aio_completions, bh); | |
104 | qemu_bh_schedule(*bh); | |
105 | } | |
106 | ||
107 | /* | |
108 | * Returns the ID of the currently active AsyncContext | |
109 | */ | |
110 | int get_async_context_id(void) | |
111 | { | |
112 | return async_context->id; | |
113 | } | |
4f999d05 KW |
114 | |
115 | /***********************************************************/ | |
116 | /* bottom halves (can be seen as timers which expire ASAP) */ | |
117 | ||
118 | struct QEMUBH { | |
119 | QEMUBHFunc *cb; | |
120 | void *opaque; | |
121 | int scheduled; | |
122 | int idle; | |
123 | int deleted; | |
124 | QEMUBH *next; | |
125 | }; | |
126 | ||
4f999d05 KW |
127 | QEMUBH *qemu_bh_new(QEMUBHFunc *cb, void *opaque) |
128 | { | |
129 | QEMUBH *bh; | |
130 | bh = qemu_mallocz(sizeof(QEMUBH)); | |
131 | bh->cb = cb; | |
132 | bh->opaque = opaque; | |
9a1e9481 KW |
133 | bh->next = async_context->first_bh; |
134 | async_context->first_bh = bh; | |
4f999d05 KW |
135 | return bh; |
136 | } | |
137 | ||
138 | int qemu_bh_poll(void) | |
139 | { | |
7887f620 | 140 | QEMUBH *bh, **bhp, *next; |
4f999d05 KW |
141 | int ret; |
142 | ||
143 | ret = 0; | |
7887f620 KW |
144 | for (bh = async_context->first_bh; bh; bh = next) { |
145 | next = bh->next; | |
4f999d05 KW |
146 | if (!bh->deleted && bh->scheduled) { |
147 | bh->scheduled = 0; | |
148 | if (!bh->idle) | |
149 | ret = 1; | |
150 | bh->idle = 0; | |
151 | bh->cb(bh->opaque); | |
152 | } | |
153 | } | |
154 | ||
155 | /* remove deleted bhs */ | |
9a1e9481 | 156 | bhp = &async_context->first_bh; |
4f999d05 KW |
157 | while (*bhp) { |
158 | bh = *bhp; | |
159 | if (bh->deleted) { | |
160 | *bhp = bh->next; | |
161 | qemu_free(bh); | |
162 | } else | |
163 | bhp = &bh->next; | |
164 | } | |
165 | ||
166 | return ret; | |
167 | } | |
168 | ||
169 | void qemu_bh_schedule_idle(QEMUBH *bh) | |
170 | { | |
171 | if (bh->scheduled) | |
172 | return; | |
173 | bh->scheduled = 1; | |
174 | bh->idle = 1; | |
175 | } | |
176 | ||
177 | void qemu_bh_schedule(QEMUBH *bh) | |
178 | { | |
179 | if (bh->scheduled) | |
180 | return; | |
181 | bh->scheduled = 1; | |
182 | bh->idle = 0; | |
183 | /* stop the currently executing CPU to execute the BH ASAP */ | |
184 | qemu_notify_event(); | |
185 | } | |
186 | ||
187 | void qemu_bh_cancel(QEMUBH *bh) | |
188 | { | |
189 | bh->scheduled = 0; | |
190 | } | |
191 | ||
192 | void qemu_bh_delete(QEMUBH *bh) | |
193 | { | |
194 | bh->scheduled = 0; | |
195 | bh->deleted = 1; | |
196 | } | |
197 | ||
198 | void qemu_bh_update_timeout(int *timeout) | |
199 | { | |
200 | QEMUBH *bh; | |
201 | ||
9a1e9481 | 202 | for (bh = async_context->first_bh; bh; bh = bh->next) { |
4f999d05 KW |
203 | if (!bh->deleted && bh->scheduled) { |
204 | if (bh->idle) { | |
205 | /* idle bottom halves will be polled at least | |
206 | * every 10ms */ | |
207 | *timeout = MIN(10, *timeout); | |
208 | } else { | |
209 | /* non-idle bottom halves will be executed | |
210 | * immediately */ | |
211 | *timeout = 0; | |
212 | break; | |
213 | } | |
214 | } | |
215 | } | |
216 | } | |
217 |